Home | History | Annotate | Line # | Download | only in vmwgfx
vmwgfx_binding.c revision 1.2
      1 /*	$NetBSD: vmwgfx_binding.c,v 1.2 2018/08/27 04:58:37 riastradh Exp $	*/
      2 
      3 /**************************************************************************
      4  *
      5  * Copyright  2015 VMware, Inc., Palo Alto, CA., USA
      6  * All Rights Reserved.
      7  *
      8  * Permission is hereby granted, free of charge, to any person obtaining a
      9  * copy of this software and associated documentation files (the
     10  * "Software"), to deal in the Software without restriction, including
     11  * without limitation the rights to use, copy, modify, merge, publish,
     12  * distribute, sub license, and/or sell copies of the Software, and to
     13  * permit persons to whom the Software is furnished to do so, subject to
     14  * the following conditions:
     15  *
     16  * The above copyright notice and this permission notice (including the
     17  * next paragraph) shall be included in all copies or substantial portions
     18  * of the Software.
     19  *
     20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     22  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     23  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     24  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     25  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     26  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     27  *
     28  **************************************************************************/
     29 /*
     30  * This file implements the vmwgfx context binding manager,
     31  * The sole reason for having to use this code is that vmware guest
     32  * backed contexts can be swapped out to their backing mobs by the device
     33  * at any time, also swapped in at any time. At swapin time, the device
     34  * validates the context bindings to make sure they point to valid resources.
     35  * It's this outside-of-drawcall validation (that can happen at any time),
     36  * that makes this code necessary.
     37  *
     38  * We therefore need to kill any context bindings pointing to a resource
     39  * when the resource is swapped out. Furthermore, if the vmwgfx driver has
     40  * swapped out the context we can't swap it in again to kill bindings because
     41  * of backing mob reservation lockdep violations, so as part of
     42  * context swapout, also kill all bindings of a context, so that they are
     43  * already killed if a resource to which a binding points
     44  * needs to be swapped out.
     45  *
     46  * Note that a resource can be pointed to by bindings from multiple contexts,
     47  * Therefore we can't easily protect this data by a per context mutex
     48  * (unless we use deadlock-safe WW mutexes). So we use a global binding_mutex
     49  * to protect all binding manager data.
     50  *
     51  * Finally, any association between a context and a global resource
     52  * (surface, shader or even DX query) is conceptually a context binding that
     53  * needs to be tracked by this code.
     54  */
     55 
     56 #include <sys/cdefs.h>
     57 __KERNEL_RCSID(0, "$NetBSD: vmwgfx_binding.c,v 1.2 2018/08/27 04:58:37 riastradh Exp $");
     58 
     59 #include "vmwgfx_drv.h"
     60 #include "vmwgfx_binding.h"
     61 #include "device_include/svga3d_reg.h"
     62 
     63 #define VMW_BINDING_RT_BIT     0
     64 #define VMW_BINDING_PS_BIT     1
     65 #define VMW_BINDING_SO_BIT     2
     66 #define VMW_BINDING_VB_BIT     3
     67 #define VMW_BINDING_NUM_BITS   4
     68 
     69 #define VMW_BINDING_PS_SR_BIT  0
     70 
     71 /**
     72  * struct vmw_ctx_binding_state - per context binding state
     73  *
     74  * @dev_priv: Pointer to device private structure.
     75  * @list: linked list of individual active bindings.
     76  * @render_targets: Render target bindings.
     77  * @texture_units: Texture units bindings.
     78  * @ds_view: Depth-stencil view binding.
     79  * @so_targets: StreamOutput target bindings.
     80  * @vertex_buffers: Vertex buffer bindings.
     81  * @index_buffer: Index buffer binding.
     82  * @per_shader: Per shader-type bindings.
     83  * @dirty: Bitmap tracking per binding-type changes that have not yet
     84  * been emitted to the device.
     85  * @dirty_vb: Bitmap tracking individual vertex buffer binding changes that
     86  * have not yet been emitted to the device.
     87  * @bind_cmd_buffer: Scratch space used to construct binding commands.
     88  * @bind_cmd_count: Number of binding command data entries in @bind_cmd_buffer
     89  * @bind_first_slot: Used together with @bind_cmd_buffer to indicate the
     90  * device binding slot of the first command data entry in @bind_cmd_buffer.
     91  *
     92  * Note that this structure also provides storage space for the individual
     93  * struct vmw_ctx_binding objects, so that no dynamic allocation is needed
     94  * for individual bindings.
     95  *
     96  */
     97 struct vmw_ctx_binding_state {
     98 	struct vmw_private *dev_priv;
     99 	struct list_head list;
    100 	struct vmw_ctx_bindinfo_view render_targets[SVGA3D_RT_MAX];
    101 	struct vmw_ctx_bindinfo_tex texture_units[SVGA3D_NUM_TEXTURE_UNITS];
    102 	struct vmw_ctx_bindinfo_view ds_view;
    103 	struct vmw_ctx_bindinfo_so so_targets[SVGA3D_DX_MAX_SOTARGETS];
    104 	struct vmw_ctx_bindinfo_vb vertex_buffers[SVGA3D_DX_MAX_VERTEXBUFFERS];
    105 	struct vmw_ctx_bindinfo_ib index_buffer;
    106 	struct vmw_dx_shader_bindings per_shader[SVGA3D_NUM_SHADERTYPE_DX10];
    107 
    108 	unsigned long dirty;
    109 	DECLARE_BITMAP(dirty_vb, SVGA3D_DX_MAX_VERTEXBUFFERS);
    110 
    111 	u32 bind_cmd_buffer[VMW_MAX_VIEW_BINDINGS];
    112 	u32 bind_cmd_count;
    113 	u32 bind_first_slot;
    114 };
    115 
    116 static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
    117 static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
    118 					   bool rebind);
    119 static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
    120 static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind);
    121 static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind);
    122 static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind);
    123 static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind);
    124 static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs);
    125 static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi,
    126 				       bool rebind);
    127 static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind);
    128 static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind);
    129 static void vmw_binding_build_asserts(void) __attribute__ ((unused));
    130 
    131 typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
    132 
    133 /**
    134  * struct vmw_binding_info - Per binding type information for the binding
    135  * manager
    136  *
    137  * @size: The size of the struct binding derived from a struct vmw_ctx_bindinfo.
    138  * @offsets: array[shader_slot] of offsets to the array[slot]
    139  * of struct bindings for the binding type.
    140  * @scrub_func: Pointer to the scrub function for this binding type.
    141  *
    142  * Holds static information to help optimize the binding manager and avoid
    143  * an excessive amount of switch statements.
    144  */
    145 struct vmw_binding_info {
    146 	size_t size;
    147 	const size_t *offsets;
    148 	vmw_scrub_func scrub_func;
    149 };
    150 
    151 /*
    152  * A number of static variables that help determine the scrub func and the
    153  * location of the struct vmw_ctx_bindinfo slots for each binding type.
    154  */
    155 static const size_t vmw_binding_shader_offsets[] = {
    156 	offsetof(struct vmw_ctx_binding_state, per_shader[0].shader),
    157 	offsetof(struct vmw_ctx_binding_state, per_shader[1].shader),
    158 	offsetof(struct vmw_ctx_binding_state, per_shader[2].shader),
    159 };
    160 static const size_t vmw_binding_rt_offsets[] = {
    161 	offsetof(struct vmw_ctx_binding_state, render_targets),
    162 };
    163 static const size_t vmw_binding_tex_offsets[] = {
    164 	offsetof(struct vmw_ctx_binding_state, texture_units),
    165 };
    166 static const size_t vmw_binding_cb_offsets[] = {
    167 	offsetof(struct vmw_ctx_binding_state, per_shader[0].const_buffers),
    168 	offsetof(struct vmw_ctx_binding_state, per_shader[1].const_buffers),
    169 	offsetof(struct vmw_ctx_binding_state, per_shader[2].const_buffers),
    170 };
    171 static const size_t vmw_binding_dx_ds_offsets[] = {
    172 	offsetof(struct vmw_ctx_binding_state, ds_view),
    173 };
    174 static const size_t vmw_binding_sr_offsets[] = {
    175 	offsetof(struct vmw_ctx_binding_state, per_shader[0].shader_res),
    176 	offsetof(struct vmw_ctx_binding_state, per_shader[1].shader_res),
    177 	offsetof(struct vmw_ctx_binding_state, per_shader[2].shader_res),
    178 };
    179 static const size_t vmw_binding_so_offsets[] = {
    180 	offsetof(struct vmw_ctx_binding_state, so_targets),
    181 };
    182 static const size_t vmw_binding_vb_offsets[] = {
    183 	offsetof(struct vmw_ctx_binding_state, vertex_buffers),
    184 };
    185 static const size_t vmw_binding_ib_offsets[] = {
    186 	offsetof(struct vmw_ctx_binding_state, index_buffer),
    187 };
    188 
    189 static const struct vmw_binding_info vmw_binding_infos[] = {
    190 	[vmw_ctx_binding_shader] = {
    191 		.size = sizeof(struct vmw_ctx_bindinfo_shader),
    192 		.offsets = vmw_binding_shader_offsets,
    193 		.scrub_func = vmw_binding_scrub_shader},
    194 	[vmw_ctx_binding_rt] = {
    195 		.size = sizeof(struct vmw_ctx_bindinfo_view),
    196 		.offsets = vmw_binding_rt_offsets,
    197 		.scrub_func = vmw_binding_scrub_render_target},
    198 	[vmw_ctx_binding_tex] = {
    199 		.size = sizeof(struct vmw_ctx_bindinfo_tex),
    200 		.offsets = vmw_binding_tex_offsets,
    201 		.scrub_func = vmw_binding_scrub_texture},
    202 	[vmw_ctx_binding_cb] = {
    203 		.size = sizeof(struct vmw_ctx_bindinfo_cb),
    204 		.offsets = vmw_binding_cb_offsets,
    205 		.scrub_func = vmw_binding_scrub_cb},
    206 	[vmw_ctx_binding_dx_shader] = {
    207 		.size = sizeof(struct vmw_ctx_bindinfo_shader),
    208 		.offsets = vmw_binding_shader_offsets,
    209 		.scrub_func = vmw_binding_scrub_dx_shader},
    210 	[vmw_ctx_binding_dx_rt] = {
    211 		.size = sizeof(struct vmw_ctx_bindinfo_view),
    212 		.offsets = vmw_binding_rt_offsets,
    213 		.scrub_func = vmw_binding_scrub_dx_rt},
    214 	[vmw_ctx_binding_sr] = {
    215 		.size = sizeof(struct vmw_ctx_bindinfo_view),
    216 		.offsets = vmw_binding_sr_offsets,
    217 		.scrub_func = vmw_binding_scrub_sr},
    218 	[vmw_ctx_binding_ds] = {
    219 		.size = sizeof(struct vmw_ctx_bindinfo_view),
    220 		.offsets = vmw_binding_dx_ds_offsets,
    221 		.scrub_func = vmw_binding_scrub_dx_rt},
    222 	[vmw_ctx_binding_so] = {
    223 		.size = sizeof(struct vmw_ctx_bindinfo_so),
    224 		.offsets = vmw_binding_so_offsets,
    225 		.scrub_func = vmw_binding_scrub_so},
    226 	[vmw_ctx_binding_vb] = {
    227 		.size = sizeof(struct vmw_ctx_bindinfo_vb),
    228 		.offsets = vmw_binding_vb_offsets,
    229 		.scrub_func = vmw_binding_scrub_vb},
    230 	[vmw_ctx_binding_ib] = {
    231 		.size = sizeof(struct vmw_ctx_bindinfo_ib),
    232 		.offsets = vmw_binding_ib_offsets,
    233 		.scrub_func = vmw_binding_scrub_ib},
    234 };
    235 
    236 /**
    237  * vmw_cbs_context - Return a pointer to the context resource of a
    238  * context binding state tracker.
    239  *
    240  * @cbs: The context binding state tracker.
    241  *
    242  * Provided there are any active bindings, this function will return an
    243  * unreferenced pointer to the context resource that owns the context
    244  * binding state tracker. If there are no active bindings, this function
    245  * will return NULL. Note that the caller must somehow ensure that a reference
    246  * is held on the context resource prior to calling this function.
    247  */
    248 static const struct vmw_resource *
    249 vmw_cbs_context(const struct vmw_ctx_binding_state *cbs)
    250 {
    251 	if (list_empty(&cbs->list))
    252 		return NULL;
    253 
    254 	return list_first_entry(&cbs->list, struct vmw_ctx_bindinfo,
    255 				ctx_list)->ctx;
    256 }
    257 
    258 /**
    259  * vmw_binding_loc - determine the struct vmw_ctx_bindinfo slot location.
    260  *
    261  * @cbs: Pointer to a struct vmw_ctx_binding state which holds the slot.
    262  * @bt: The binding type.
    263  * @shader_slot: The shader slot of the binding. If none, then set to 0.
    264  * @slot: The slot of the binding.
    265  */
    266 static struct vmw_ctx_bindinfo *
    267 vmw_binding_loc(struct vmw_ctx_binding_state *cbs,
    268 		enum vmw_ctx_binding_type bt, u32 shader_slot, u32 slot)
    269 {
    270 	const struct vmw_binding_info *b = &vmw_binding_infos[bt];
    271 	size_t offset = b->offsets[shader_slot] + b->size*slot;
    272 
    273 	return (struct vmw_ctx_bindinfo *)((u8 *) cbs + offset);
    274 }
    275 
    276 /**
    277  * vmw_binding_drop: Stop tracking a context binding
    278  *
    279  * @bi: Pointer to binding tracker storage.
    280  *
    281  * Stops tracking a context binding, and re-initializes its storage.
    282  * Typically used when the context binding is replaced with a binding to
    283  * another (or the same, for that matter) resource.
    284  */
    285 static void vmw_binding_drop(struct vmw_ctx_bindinfo *bi)
    286 {
    287 	list_del(&bi->ctx_list);
    288 	if (!list_empty(&bi->res_list))
    289 		list_del(&bi->res_list);
    290 	bi->ctx = NULL;
    291 }
    292 
    293 /**
    294  * vmw_binding_add: Start tracking a context binding
    295  *
    296  * @cbs: Pointer to the context binding state tracker.
    297  * @bi: Information about the binding to track.
    298  *
    299  * Starts tracking the binding in the context binding
    300  * state structure @cbs.
    301  */
    302 void vmw_binding_add(struct vmw_ctx_binding_state *cbs,
    303 		    const struct vmw_ctx_bindinfo *bi,
    304 		    u32 shader_slot, u32 slot)
    305 {
    306 	struct vmw_ctx_bindinfo *loc =
    307 		vmw_binding_loc(cbs, bi->bt, shader_slot, slot);
    308 	const struct vmw_binding_info *b = &vmw_binding_infos[bi->bt];
    309 
    310 	if (loc->ctx != NULL)
    311 		vmw_binding_drop(loc);
    312 
    313 	memcpy(loc, bi, b->size);
    314 	loc->scrubbed = false;
    315 	list_add(&loc->ctx_list, &cbs->list);
    316 	INIT_LIST_HEAD(&loc->res_list);
    317 }
    318 
    319 /**
    320  * vmw_binding_transfer: Transfer a context binding tracking entry.
    321  *
    322  * @cbs: Pointer to the persistent context binding state tracker.
    323  * @bi: Information about the binding to track.
    324  *
    325  */
    326 static void vmw_binding_transfer(struct vmw_ctx_binding_state *cbs,
    327 				 const struct vmw_ctx_binding_state *from,
    328 				 const struct vmw_ctx_bindinfo *bi)
    329 {
    330 	size_t offset = (unsigned long)bi - (unsigned long)from;
    331 	struct vmw_ctx_bindinfo *loc = (struct vmw_ctx_bindinfo *)
    332 		((unsigned long) cbs + offset);
    333 
    334 	if (loc->ctx != NULL) {
    335 		WARN_ON(bi->scrubbed);
    336 
    337 		vmw_binding_drop(loc);
    338 	}
    339 
    340 	if (bi->res != NULL) {
    341 		memcpy(loc, bi, vmw_binding_infos[bi->bt].size);
    342 		list_add_tail(&loc->ctx_list, &cbs->list);
    343 		list_add_tail(&loc->res_list, &loc->res->binding_head);
    344 	}
    345 }
    346 
    347 /**
    348  * vmw_binding_state_kill - Kill all bindings associated with a
    349  * struct vmw_ctx_binding state structure, and re-initialize the structure.
    350  *
    351  * @cbs: Pointer to the context binding state tracker.
    352  *
    353  * Emits commands to scrub all bindings associated with the
    354  * context binding state tracker. Then re-initializes the whole structure.
    355  */
    356 void vmw_binding_state_kill(struct vmw_ctx_binding_state *cbs)
    357 {
    358 	struct vmw_ctx_bindinfo *entry, *next;
    359 
    360 	vmw_binding_state_scrub(cbs);
    361 	list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
    362 		vmw_binding_drop(entry);
    363 }
    364 
    365 /**
    366  * vmw_binding_state_scrub - Scrub all bindings associated with a
    367  * struct vmw_ctx_binding state structure.
    368  *
    369  * @cbs: Pointer to the context binding state tracker.
    370  *
    371  * Emits commands to scrub all bindings associated with the
    372  * context binding state tracker.
    373  */
    374 void vmw_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
    375 {
    376 	struct vmw_ctx_bindinfo *entry;
    377 
    378 	list_for_each_entry(entry, &cbs->list, ctx_list) {
    379 		if (!entry->scrubbed) {
    380 			(void) vmw_binding_infos[entry->bt].scrub_func
    381 				(entry, false);
    382 			entry->scrubbed = true;
    383 		}
    384 	}
    385 
    386 	(void) vmw_binding_emit_dirty(cbs);
    387 }
    388 
    389 /**
    390  * vmw_binding_res_list_kill - Kill all bindings on a
    391  * resource binding list
    392  *
    393  * @head: list head of resource binding list
    394  *
    395  * Kills all bindings associated with a specific resource. Typically
    396  * called before the resource is destroyed.
    397  */
    398 void vmw_binding_res_list_kill(struct list_head *head)
    399 {
    400 	struct vmw_ctx_bindinfo *entry, *next;
    401 
    402 	vmw_binding_res_list_scrub(head);
    403 	list_for_each_entry_safe(entry, next, head, res_list)
    404 		vmw_binding_drop(entry);
    405 }
    406 
    407 /**
    408  * vmw_binding_res_list_scrub - Scrub all bindings on a
    409  * resource binding list
    410  *
    411  * @head: list head of resource binding list
    412  *
    413  * Scrub all bindings associated with a specific resource. Typically
    414  * called before the resource is evicted.
    415  */
    416 void vmw_binding_res_list_scrub(struct list_head *head)
    417 {
    418 	struct vmw_ctx_bindinfo *entry;
    419 
    420 	list_for_each_entry(entry, head, res_list) {
    421 		if (!entry->scrubbed) {
    422 			(void) vmw_binding_infos[entry->bt].scrub_func
    423 				(entry, false);
    424 			entry->scrubbed = true;
    425 		}
    426 	}
    427 
    428 	list_for_each_entry(entry, head, res_list) {
    429 		struct vmw_ctx_binding_state *cbs =
    430 			vmw_context_binding_state(entry->ctx);
    431 
    432 		(void) vmw_binding_emit_dirty(cbs);
    433 	}
    434 }
    435 
    436 
    437 /**
    438  * vmw_binding_state_commit - Commit staged binding info
    439  *
    440  * @ctx: Pointer to context to commit the staged binding info to.
    441  * @from: Staged binding info built during execbuf.
    442  * @scrubbed: Transfer only scrubbed bindings.
    443  *
    444  * Transfers binding info from a temporary structure
    445  * (typically used by execbuf) to the persistent
    446  * structure in the context. This can be done once commands have been
    447  * submitted to hardware
    448  */
    449 void vmw_binding_state_commit(struct vmw_ctx_binding_state *to,
    450 			      struct vmw_ctx_binding_state *from)
    451 {
    452 	struct vmw_ctx_bindinfo *entry, *next;
    453 
    454 	list_for_each_entry_safe(entry, next, &from->list, ctx_list) {
    455 		vmw_binding_transfer(to, from, entry);
    456 		vmw_binding_drop(entry);
    457 	}
    458 }
    459 
    460 /**
    461  * vmw_binding_rebind_all - Rebind all scrubbed bindings of a context
    462  *
    463  * @ctx: The context resource
    464  *
    465  * Walks through the context binding list and rebinds all scrubbed
    466  * resources.
    467  */
    468 int vmw_binding_rebind_all(struct vmw_ctx_binding_state *cbs)
    469 {
    470 	struct vmw_ctx_bindinfo *entry;
    471 	int ret;
    472 
    473 	list_for_each_entry(entry, &cbs->list, ctx_list) {
    474 		if (likely(!entry->scrubbed))
    475 			continue;
    476 
    477 		if ((entry->res == NULL || entry->res->id ==
    478 			    SVGA3D_INVALID_ID))
    479 			continue;
    480 
    481 		ret = vmw_binding_infos[entry->bt].scrub_func(entry, true);
    482 		if (unlikely(ret != 0))
    483 			return ret;
    484 
    485 		entry->scrubbed = false;
    486 	}
    487 
    488 	return vmw_binding_emit_dirty(cbs);
    489 }
    490 
    491 /**
    492  * vmw_binding_scrub_shader - scrub a shader binding from a context.
    493  *
    494  * @bi: single binding information.
    495  * @rebind: Whether to issue a bind instead of scrub command.
    496  */
    497 static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
    498 {
    499 	struct vmw_ctx_bindinfo_shader *binding =
    500 		container_of(bi, typeof(*binding), bi);
    501 	struct vmw_private *dev_priv = bi->ctx->dev_priv;
    502 	struct {
    503 		SVGA3dCmdHeader header;
    504 		SVGA3dCmdSetShader body;
    505 	} *cmd;
    506 
    507 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
    508 	if (unlikely(cmd == NULL)) {
    509 		DRM_ERROR("Failed reserving FIFO space for shader "
    510 			  "unbinding.\n");
    511 		return -ENOMEM;
    512 	}
    513 
    514 	cmd->header.id = SVGA_3D_CMD_SET_SHADER;
    515 	cmd->header.size = sizeof(cmd->body);
    516 	cmd->body.cid = bi->ctx->id;
    517 	cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
    518 	cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
    519 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
    520 
    521 	return 0;
    522 }
    523 
    524 /**
    525  * vmw_binding_scrub_render_target - scrub a render target binding
    526  * from a context.
    527  *
    528  * @bi: single binding information.
    529  * @rebind: Whether to issue a bind instead of scrub command.
    530  */
    531 static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
    532 					   bool rebind)
    533 {
    534 	struct vmw_ctx_bindinfo_view *binding =
    535 		container_of(bi, typeof(*binding), bi);
    536 	struct vmw_private *dev_priv = bi->ctx->dev_priv;
    537 	struct {
    538 		SVGA3dCmdHeader header;
    539 		SVGA3dCmdSetRenderTarget body;
    540 	} *cmd;
    541 
    542 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
    543 	if (unlikely(cmd == NULL)) {
    544 		DRM_ERROR("Failed reserving FIFO space for render target "
    545 			  "unbinding.\n");
    546 		return -ENOMEM;
    547 	}
    548 
    549 	cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
    550 	cmd->header.size = sizeof(cmd->body);
    551 	cmd->body.cid = bi->ctx->id;
    552 	cmd->body.type = binding->slot;
    553 	cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
    554 	cmd->body.target.face = 0;
    555 	cmd->body.target.mipmap = 0;
    556 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
    557 
    558 	return 0;
    559 }
    560 
    561 /**
    562  * vmw_binding_scrub_texture - scrub a texture binding from a context.
    563  *
    564  * @bi: single binding information.
    565  * @rebind: Whether to issue a bind instead of scrub command.
    566  *
    567  * TODO: Possibly complement this function with a function that takes
    568  * a list of texture bindings and combines them to a single command.
    569  */
    570 static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi,
    571 				     bool rebind)
    572 {
    573 	struct vmw_ctx_bindinfo_tex *binding =
    574 		container_of(bi, typeof(*binding), bi);
    575 	struct vmw_private *dev_priv = bi->ctx->dev_priv;
    576 	struct {
    577 		SVGA3dCmdHeader header;
    578 		struct {
    579 			SVGA3dCmdSetTextureState c;
    580 			SVGA3dTextureState s1;
    581 		} body;
    582 	} *cmd;
    583 
    584 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
    585 	if (unlikely(cmd == NULL)) {
    586 		DRM_ERROR("Failed reserving FIFO space for texture "
    587 			  "unbinding.\n");
    588 		return -ENOMEM;
    589 	}
    590 
    591 	cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
    592 	cmd->header.size = sizeof(cmd->body);
    593 	cmd->body.c.cid = bi->ctx->id;
    594 	cmd->body.s1.stage = binding->texture_stage;
    595 	cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
    596 	cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
    597 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
    598 
    599 	return 0;
    600 }
    601 
    602 /**
    603  * vmw_binding_scrub_dx_shader - scrub a dx shader binding from a context.
    604  *
    605  * @bi: single binding information.
    606  * @rebind: Whether to issue a bind instead of scrub command.
    607  */
    608 static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
    609 {
    610 	struct vmw_ctx_bindinfo_shader *binding =
    611 		container_of(bi, typeof(*binding), bi);
    612 	struct vmw_private *dev_priv = bi->ctx->dev_priv;
    613 	struct {
    614 		SVGA3dCmdHeader header;
    615 		SVGA3dCmdDXSetShader body;
    616 	} *cmd;
    617 
    618 	cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id);
    619 	if (unlikely(cmd == NULL)) {
    620 		DRM_ERROR("Failed reserving FIFO space for DX shader "
    621 			  "unbinding.\n");
    622 		return -ENOMEM;
    623 	}
    624 	cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER;
    625 	cmd->header.size = sizeof(cmd->body);
    626 	cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
    627 	cmd->body.shaderId = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
    628 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
    629 
    630 	return 0;
    631 }
    632 
    633 /**
    634  * vmw_binding_scrub_cb - scrub a constant buffer binding from a context.
    635  *
    636  * @bi: single binding information.
    637  * @rebind: Whether to issue a bind instead of scrub command.
    638  */
    639 static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind)
    640 {
    641 	struct vmw_ctx_bindinfo_cb *binding =
    642 		container_of(bi, typeof(*binding), bi);
    643 	struct vmw_private *dev_priv = bi->ctx->dev_priv;
    644 	struct {
    645 		SVGA3dCmdHeader header;
    646 		SVGA3dCmdDXSetSingleConstantBuffer body;
    647 	} *cmd;
    648 
    649 	cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id);
    650 	if (unlikely(cmd == NULL)) {
    651 		DRM_ERROR("Failed reserving FIFO space for DX shader "
    652 			  "unbinding.\n");
    653 		return -ENOMEM;
    654 	}
    655 
    656 	cmd->header.id = SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER;
    657 	cmd->header.size = sizeof(cmd->body);
    658 	cmd->body.slot = binding->slot;
    659 	cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
    660 	if (rebind) {
    661 		cmd->body.offsetInBytes = binding->offset;
    662 		cmd->body.sizeInBytes = binding->size;
    663 		cmd->body.sid = bi->res->id;
    664 	} else {
    665 		cmd->body.offsetInBytes = 0;
    666 		cmd->body.sizeInBytes = 0;
    667 		cmd->body.sid = SVGA3D_INVALID_ID;
    668 	}
    669 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
    670 
    671 	return 0;
    672 }
    673 
    674 /**
    675  * vmw_collect_view_ids - Build view id data for a view binding command
    676  * without checking which bindings actually need to be emitted
    677  *
    678  * @cbs: Pointer to the context's struct vmw_ctx_binding_state
    679  * @bi: Pointer to where the binding info array is stored in @cbs
    680  * @max_num: Maximum number of entries in the @bi array.
    681  *
    682  * Scans the @bi array for bindings and builds a buffer of view id data.
    683  * Stops at the first non-existing binding in the @bi array.
    684  * On output, @cbs->bind_cmd_count contains the number of bindings to be
    685  * emitted, @cbs->bind_first_slot is set to zero, and @cbs->bind_cmd_buffer
    686  * contains the command data.
    687  */
    688 static void vmw_collect_view_ids(struct vmw_ctx_binding_state *cbs,
    689 				 const struct vmw_ctx_bindinfo *bi,
    690 				 u32 max_num)
    691 {
    692 	const struct vmw_ctx_bindinfo_view *biv =
    693 		container_of(bi, struct vmw_ctx_bindinfo_view, bi);
    694 	unsigned long i;
    695 
    696 	cbs->bind_cmd_count = 0;
    697 	cbs->bind_first_slot = 0;
    698 
    699 	for (i = 0; i < max_num; ++i, ++biv) {
    700 		if (!biv->bi.ctx)
    701 			break;
    702 
    703 		cbs->bind_cmd_buffer[cbs->bind_cmd_count++] =
    704 			((biv->bi.scrubbed) ?
    705 			 SVGA3D_INVALID_ID : biv->bi.res->id);
    706 	}
    707 }
    708 
    709 /**
    710  * vmw_collect_dirty_view_ids - Build view id data for a view binding command
    711  *
    712  * @cbs: Pointer to the context's struct vmw_ctx_binding_state
    713  * @bi: Pointer to where the binding info array is stored in @cbs
    714  * @dirty: Bitmap indicating which bindings need to be emitted.
    715  * @max_num: Maximum number of entries in the @bi array.
    716  *
    717  * Scans the @bi array for bindings that need to be emitted and
    718  * builds a buffer of view id data.
    719  * On output, @cbs->bind_cmd_count contains the number of bindings to be
    720  * emitted, @cbs->bind_first_slot indicates the index of the first emitted
    721  * binding, and @cbs->bind_cmd_buffer contains the command data.
    722  */
    723 static void vmw_collect_dirty_view_ids(struct vmw_ctx_binding_state *cbs,
    724 				       const struct vmw_ctx_bindinfo *bi,
    725 				       unsigned long *dirty,
    726 				       u32 max_num)
    727 {
    728 	const struct vmw_ctx_bindinfo_view *biv =
    729 		container_of(bi, struct vmw_ctx_bindinfo_view, bi);
    730 	unsigned long i, next_bit;
    731 
    732 	cbs->bind_cmd_count = 0;
    733 	i = find_first_bit(dirty, max_num);
    734 	next_bit = i;
    735 	cbs->bind_first_slot = i;
    736 
    737 	biv += i;
    738 	for (; i < max_num; ++i, ++biv) {
    739 		cbs->bind_cmd_buffer[cbs->bind_cmd_count++] =
    740 			((!biv->bi.ctx || biv->bi.scrubbed) ?
    741 			 SVGA3D_INVALID_ID : biv->bi.res->id);
    742 
    743 		if (next_bit == i) {
    744 			next_bit = find_next_bit(dirty, max_num, i + 1);
    745 			if (next_bit >= max_num)
    746 				break;
    747 		}
    748 	}
    749 }
    750 
    751 /**
    752  * vmw_binding_emit_set_sr - Issue delayed DX shader resource binding commands
    753  *
    754  * @cbs: Pointer to the context's struct vmw_ctx_binding_state
    755  */
    756 static int vmw_emit_set_sr(struct vmw_ctx_binding_state *cbs,
    757 			   int shader_slot)
    758 {
    759 	const struct vmw_ctx_bindinfo *loc =
    760 		&cbs->per_shader[shader_slot].shader_res[0].bi;
    761 	struct {
    762 		SVGA3dCmdHeader header;
    763 		SVGA3dCmdDXSetShaderResources body;
    764 	} *cmd;
    765 	size_t cmd_size, view_id_size;
    766 	const struct vmw_resource *ctx = vmw_cbs_context(cbs);
    767 
    768 	vmw_collect_dirty_view_ids(cbs, loc,
    769 				   cbs->per_shader[shader_slot].dirty_sr,
    770 				   SVGA3D_DX_MAX_SRVIEWS);
    771 	if (cbs->bind_cmd_count == 0)
    772 		return 0;
    773 
    774 	view_id_size = cbs->bind_cmd_count*sizeof(uint32);
    775 	cmd_size = sizeof(*cmd) + view_id_size;
    776 	cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
    777 	if (unlikely(cmd == NULL)) {
    778 		DRM_ERROR("Failed reserving FIFO space for DX shader"
    779 			  " resource binding.\n");
    780 		return -ENOMEM;
    781 	}
    782 
    783 	cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER_RESOURCES;
    784 	cmd->header.size = sizeof(cmd->body) + view_id_size;
    785 	cmd->body.type = shader_slot + SVGA3D_SHADERTYPE_MIN;
    786 	cmd->body.startView = cbs->bind_first_slot;
    787 
    788 	memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
    789 
    790 	vmw_fifo_commit(ctx->dev_priv, cmd_size);
    791 	bitmap_clear(cbs->per_shader[shader_slot].dirty_sr,
    792 		     cbs->bind_first_slot, cbs->bind_cmd_count);
    793 
    794 	return 0;
    795 }
    796 
    797 /**
    798  * vmw_binding_emit_set_rt - Issue delayed DX rendertarget binding commands
    799  *
    800  * @cbs: Pointer to the context's struct vmw_ctx_binding_state
    801  */
    802 static int vmw_emit_set_rt(struct vmw_ctx_binding_state *cbs)
    803 {
    804 	const struct vmw_ctx_bindinfo *loc = &cbs->render_targets[0].bi;
    805 	struct {
    806 		SVGA3dCmdHeader header;
    807 		SVGA3dCmdDXSetRenderTargets body;
    808 	} *cmd;
    809 	size_t cmd_size, view_id_size;
    810 	const struct vmw_resource *ctx = vmw_cbs_context(cbs);
    811 
    812 	vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS);
    813 	view_id_size = cbs->bind_cmd_count*sizeof(uint32);
    814 	cmd_size = sizeof(*cmd) + view_id_size;
    815 	cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
    816 	if (unlikely(cmd == NULL)) {
    817 		DRM_ERROR("Failed reserving FIFO space for DX render-target"
    818 			  " binding.\n");
    819 		return -ENOMEM;
    820 	}
    821 
    822 	cmd->header.id = SVGA_3D_CMD_DX_SET_RENDERTARGETS;
    823 	cmd->header.size = sizeof(cmd->body) + view_id_size;
    824 
    825 	if (cbs->ds_view.bi.ctx && !cbs->ds_view.bi.scrubbed)
    826 		cmd->body.depthStencilViewId = cbs->ds_view.bi.res->id;
    827 	else
    828 		cmd->body.depthStencilViewId = SVGA3D_INVALID_ID;
    829 
    830 	memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
    831 
    832 	vmw_fifo_commit(ctx->dev_priv, cmd_size);
    833 
    834 	return 0;
    835 
    836 }
    837 
    838 /**
    839  * vmw_collect_so_targets - Build SVGA3dSoTarget data for a binding command
    840  * without checking which bindings actually need to be emitted
    841  *
    842  * @cbs: Pointer to the context's struct vmw_ctx_binding_state
    843  * @bi: Pointer to where the binding info array is stored in @cbs
    844  * @max_num: Maximum number of entries in the @bi array.
    845  *
    846  * Scans the @bi array for bindings and builds a buffer of SVGA3dSoTarget data.
    847  * Stops at the first non-existing binding in the @bi array.
    848  * On output, @cbs->bind_cmd_count contains the number of bindings to be
    849  * emitted, @cbs->bind_first_slot is set to zero, and @cbs->bind_cmd_buffer
    850  * contains the command data.
    851  */
    852 static void vmw_collect_so_targets(struct vmw_ctx_binding_state *cbs,
    853 				   const struct vmw_ctx_bindinfo *bi,
    854 				   u32 max_num)
    855 {
    856 	const struct vmw_ctx_bindinfo_so *biso =
    857 		container_of(bi, struct vmw_ctx_bindinfo_so, bi);
    858 	unsigned long i;
    859 	SVGA3dSoTarget *so_buffer = (SVGA3dSoTarget *) cbs->bind_cmd_buffer;
    860 
    861 	cbs->bind_cmd_count = 0;
    862 	cbs->bind_first_slot = 0;
    863 
    864 	for (i = 0; i < max_num; ++i, ++biso, ++so_buffer,
    865 		    ++cbs->bind_cmd_count) {
    866 		if (!biso->bi.ctx)
    867 			break;
    868 
    869 		if (!biso->bi.scrubbed) {
    870 			so_buffer->sid = biso->bi.res->id;
    871 			so_buffer->offset = biso->offset;
    872 			so_buffer->sizeInBytes = biso->size;
    873 		} else {
    874 			so_buffer->sid = SVGA3D_INVALID_ID;
    875 			so_buffer->offset = 0;
    876 			so_buffer->sizeInBytes = 0;
    877 		}
    878 	}
    879 }
    880 
    881 /**
    882  * vmw_binding_emit_set_so - Issue delayed streamout binding commands
    883  *
    884  * @cbs: Pointer to the context's struct vmw_ctx_binding_state
    885  */
    886 static int vmw_emit_set_so(struct vmw_ctx_binding_state *cbs)
    887 {
    888 	const struct vmw_ctx_bindinfo *loc = &cbs->so_targets[0].bi;
    889 	struct {
    890 		SVGA3dCmdHeader header;
    891 		SVGA3dCmdDXSetSOTargets body;
    892 	} *cmd;
    893 	size_t cmd_size, so_target_size;
    894 	const struct vmw_resource *ctx = vmw_cbs_context(cbs);
    895 
    896 	vmw_collect_so_targets(cbs, loc, SVGA3D_DX_MAX_SOTARGETS);
    897 	if (cbs->bind_cmd_count == 0)
    898 		return 0;
    899 
    900 	so_target_size = cbs->bind_cmd_count*sizeof(SVGA3dSoTarget);
    901 	cmd_size = sizeof(*cmd) + so_target_size;
    902 	cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
    903 	if (unlikely(cmd == NULL)) {
    904 		DRM_ERROR("Failed reserving FIFO space for DX SO target"
    905 			  " binding.\n");
    906 		return -ENOMEM;
    907 	}
    908 
    909 	cmd->header.id = SVGA_3D_CMD_DX_SET_SOTARGETS;
    910 	cmd->header.size = sizeof(cmd->body) + so_target_size;
    911 	memcpy(&cmd[1], cbs->bind_cmd_buffer, so_target_size);
    912 
    913 	vmw_fifo_commit(ctx->dev_priv, cmd_size);
    914 
    915 	return 0;
    916 
    917 }
    918 
    919 /**
    920  * vmw_binding_emit_dirty_ps - Issue delayed per shader binding commands
    921  *
    922  * @cbs: Pointer to the context's struct vmw_ctx_binding_state
    923  *
    924  */
    925 static int vmw_binding_emit_dirty_ps(struct vmw_ctx_binding_state *cbs)
    926 {
    927 	struct vmw_dx_shader_bindings *sb = &cbs->per_shader[0];
    928 	u32 i;
    929 	int ret;
    930 
    931 	for (i = 0; i < SVGA3D_NUM_SHADERTYPE_DX10; ++i, ++sb) {
    932 		if (!test_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty))
    933 			continue;
    934 
    935 		ret = vmw_emit_set_sr(cbs, i);
    936 		if (ret)
    937 			break;
    938 
    939 		__clear_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty);
    940 	}
    941 
    942 	return 0;
    943 }
    944 
    945 /**
    946  * vmw_collect_dirty_vbs - Build SVGA3dVertexBuffer data for a
    947  * SVGA3dCmdDXSetVertexBuffers command
    948  *
    949  * @cbs: Pointer to the context's struct vmw_ctx_binding_state
    950  * @bi: Pointer to where the binding info array is stored in @cbs
    951  * @dirty: Bitmap indicating which bindings need to be emitted.
    952  * @max_num: Maximum number of entries in the @bi array.
    953  *
    954  * Scans the @bi array for bindings that need to be emitted and
    955  * builds a buffer of SVGA3dVertexBuffer data.
    956  * On output, @cbs->bind_cmd_count contains the number of bindings to be
    957  * emitted, @cbs->bind_first_slot indicates the index of the first emitted
    958  * binding, and @cbs->bind_cmd_buffer contains the command data.
    959  */
    960 static void vmw_collect_dirty_vbs(struct vmw_ctx_binding_state *cbs,
    961 				  const struct vmw_ctx_bindinfo *bi,
    962 				  unsigned long *dirty,
    963 				  u32 max_num)
    964 {
    965 	const struct vmw_ctx_bindinfo_vb *biv =
    966 		container_of(bi, struct vmw_ctx_bindinfo_vb, bi);
    967 	unsigned long i, next_bit;
    968 	SVGA3dVertexBuffer *vbs = (SVGA3dVertexBuffer *) &cbs->bind_cmd_buffer;
    969 
    970 	cbs->bind_cmd_count = 0;
    971 	i = find_first_bit(dirty, max_num);
    972 	next_bit = i;
    973 	cbs->bind_first_slot = i;
    974 
    975 	biv += i;
    976 	for (; i < max_num; ++i, ++biv, ++vbs) {
    977 		if (!biv->bi.ctx || biv->bi.scrubbed) {
    978 			vbs->sid = SVGA3D_INVALID_ID;
    979 			vbs->stride = 0;
    980 			vbs->offset = 0;
    981 		} else {
    982 			vbs->sid = biv->bi.res->id;
    983 			vbs->stride = biv->stride;
    984 			vbs->offset = biv->offset;
    985 		}
    986 		cbs->bind_cmd_count++;
    987 		if (next_bit == i) {
    988 			next_bit = find_next_bit(dirty, max_num, i + 1);
    989 			if (next_bit >= max_num)
    990 				break;
    991 		}
    992 	}
    993 }
    994 
    995 /**
    996  * vmw_binding_emit_set_vb - Issue delayed vertex buffer binding commands
    997  *
    998  * @cbs: Pointer to the context's struct vmw_ctx_binding_state
    999  *
   1000  */
   1001 static int vmw_emit_set_vb(struct vmw_ctx_binding_state *cbs)
   1002 {
   1003 	const struct vmw_ctx_bindinfo *loc =
   1004 		&cbs->vertex_buffers[0].bi;
   1005 	struct {
   1006 		SVGA3dCmdHeader header;
   1007 		SVGA3dCmdDXSetVertexBuffers body;
   1008 	} *cmd;
   1009 	size_t cmd_size, set_vb_size;
   1010 	const struct vmw_resource *ctx = vmw_cbs_context(cbs);
   1011 
   1012 	vmw_collect_dirty_vbs(cbs, loc, cbs->dirty_vb,
   1013 			     SVGA3D_DX_MAX_VERTEXBUFFERS);
   1014 	if (cbs->bind_cmd_count == 0)
   1015 		return 0;
   1016 
   1017 	set_vb_size = cbs->bind_cmd_count*sizeof(SVGA3dVertexBuffer);
   1018 	cmd_size = sizeof(*cmd) + set_vb_size;
   1019 	cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
   1020 	if (unlikely(cmd == NULL)) {
   1021 		DRM_ERROR("Failed reserving FIFO space for DX vertex buffer"
   1022 			  " binding.\n");
   1023 		return -ENOMEM;
   1024 	}
   1025 
   1026 	cmd->header.id = SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS;
   1027 	cmd->header.size = sizeof(cmd->body) + set_vb_size;
   1028 	cmd->body.startBuffer = cbs->bind_first_slot;
   1029 
   1030 	memcpy(&cmd[1], cbs->bind_cmd_buffer, set_vb_size);
   1031 
   1032 	vmw_fifo_commit(ctx->dev_priv, cmd_size);
   1033 	bitmap_clear(cbs->dirty_vb,
   1034 		     cbs->bind_first_slot, cbs->bind_cmd_count);
   1035 
   1036 	return 0;
   1037 }
   1038 
   1039 /**
   1040  * vmw_binding_emit_dirty - Issue delayed binding commands
   1041  *
   1042  * @cbs: Pointer to the context's struct vmw_ctx_binding_state
   1043  *
   1044  * This function issues the delayed binding commands that arise from
   1045  * previous scrub / unscrub calls. These binding commands are typically
   1046  * commands that batch a number of bindings and therefore it makes sense
   1047  * to delay them.
   1048  */
   1049 static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs)
   1050 {
   1051 	int ret = 0;
   1052 	unsigned long hit = 0;
   1053 
   1054 	while ((hit = find_next_bit(&cbs->dirty, VMW_BINDING_NUM_BITS, hit))
   1055 	      < VMW_BINDING_NUM_BITS) {
   1056 
   1057 		switch (hit) {
   1058 		case VMW_BINDING_RT_BIT:
   1059 			ret = vmw_emit_set_rt(cbs);
   1060 			break;
   1061 		case VMW_BINDING_PS_BIT:
   1062 			ret = vmw_binding_emit_dirty_ps(cbs);
   1063 			break;
   1064 		case VMW_BINDING_SO_BIT:
   1065 			ret = vmw_emit_set_so(cbs);
   1066 			break;
   1067 		case VMW_BINDING_VB_BIT:
   1068 			ret = vmw_emit_set_vb(cbs);
   1069 			break;
   1070 		default:
   1071 			BUG();
   1072 		}
   1073 		if (ret)
   1074 			return ret;
   1075 
   1076 		__clear_bit(hit, &cbs->dirty);
   1077 		hit++;
   1078 	}
   1079 
   1080 	return 0;
   1081 }
   1082 
   1083 /**
   1084  * vmw_binding_scrub_sr - Schedule a dx shaderresource binding
   1085  * scrub from a context
   1086  *
   1087  * @bi: single binding information.
   1088  * @rebind: Whether to issue a bind instead of scrub command.
   1089  */
   1090 static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind)
   1091 {
   1092 	struct vmw_ctx_bindinfo_view *biv =
   1093 		container_of(bi, struct vmw_ctx_bindinfo_view, bi);
   1094 	struct vmw_ctx_binding_state *cbs =
   1095 		vmw_context_binding_state(bi->ctx);
   1096 
   1097 	__set_bit(biv->slot, cbs->per_shader[biv->shader_slot].dirty_sr);
   1098 	__set_bit(VMW_BINDING_PS_SR_BIT,
   1099 		  &cbs->per_shader[biv->shader_slot].dirty);
   1100 	__set_bit(VMW_BINDING_PS_BIT, &cbs->dirty);
   1101 
   1102 	return 0;
   1103 }
   1104 
   1105 /**
   1106  * vmw_binding_scrub_dx_rt - Schedule a dx rendertarget binding
   1107  * scrub from a context
   1108  *
   1109  * @bi: single binding information.
   1110  * @rebind: Whether to issue a bind instead of scrub command.
   1111  */
   1112 static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind)
   1113 {
   1114 	struct vmw_ctx_binding_state *cbs =
   1115 		vmw_context_binding_state(bi->ctx);
   1116 
   1117 	__set_bit(VMW_BINDING_RT_BIT, &cbs->dirty);
   1118 
   1119 	return 0;
   1120 }
   1121 
   1122 /**
   1123  * vmw_binding_scrub_so - Schedule a dx streamoutput buffer binding
   1124  * scrub from a context
   1125  *
   1126  * @bi: single binding information.
   1127  * @rebind: Whether to issue a bind instead of scrub command.
   1128  */
   1129 static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind)
   1130 {
   1131 	struct vmw_ctx_binding_state *cbs =
   1132 		vmw_context_binding_state(bi->ctx);
   1133 
   1134 	__set_bit(VMW_BINDING_SO_BIT, &cbs->dirty);
   1135 
   1136 	return 0;
   1137 }
   1138 
   1139 /**
   1140  * vmw_binding_scrub_vb - Schedule a dx vertex buffer binding
   1141  * scrub from a context
   1142  *
   1143  * @bi: single binding information.
   1144  * @rebind: Whether to issue a bind instead of scrub command.
   1145  */
   1146 static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind)
   1147 {
   1148 	struct vmw_ctx_bindinfo_vb *bivb =
   1149 		container_of(bi, struct vmw_ctx_bindinfo_vb, bi);
   1150 	struct vmw_ctx_binding_state *cbs =
   1151 		vmw_context_binding_state(bi->ctx);
   1152 
   1153 	__set_bit(bivb->slot, cbs->dirty_vb);
   1154 	__set_bit(VMW_BINDING_VB_BIT, &cbs->dirty);
   1155 
   1156 	return 0;
   1157 }
   1158 
   1159 /**
   1160  * vmw_binding_scrub_ib - scrub a dx index buffer binding from a context
   1161  *
   1162  * @bi: single binding information.
   1163  * @rebind: Whether to issue a bind instead of scrub command.
   1164  */
   1165 static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind)
   1166 {
   1167 	struct vmw_ctx_bindinfo_ib *binding =
   1168 		container_of(bi, typeof(*binding), bi);
   1169 	struct vmw_private *dev_priv = bi->ctx->dev_priv;
   1170 	struct {
   1171 		SVGA3dCmdHeader header;
   1172 		SVGA3dCmdDXSetIndexBuffer body;
   1173 	} *cmd;
   1174 
   1175 	cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id);
   1176 	if (unlikely(cmd == NULL)) {
   1177 		DRM_ERROR("Failed reserving FIFO space for DX index buffer "
   1178 			  "binding.\n");
   1179 		return -ENOMEM;
   1180 	}
   1181 	cmd->header.id = SVGA_3D_CMD_DX_SET_INDEX_BUFFER;
   1182 	cmd->header.size = sizeof(cmd->body);
   1183 	if (rebind) {
   1184 		cmd->body.sid = bi->res->id;
   1185 		cmd->body.format = binding->format;
   1186 		cmd->body.offset = binding->offset;
   1187 	} else {
   1188 		cmd->body.sid = SVGA3D_INVALID_ID;
   1189 		cmd->body.format = 0;
   1190 		cmd->body.offset = 0;
   1191 	}
   1192 
   1193 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
   1194 
   1195 	return 0;
   1196 }
   1197 
   1198 /**
   1199  * vmw_binding_state_alloc - Allocate a struct vmw_ctx_binding_state with
   1200  * memory accounting.
   1201  *
   1202  * @dev_priv: Pointer to a device private structure.
   1203  *
   1204  * Returns a pointer to a newly allocated struct or an error pointer on error.
   1205  */
   1206 struct vmw_ctx_binding_state *
   1207 vmw_binding_state_alloc(struct vmw_private *dev_priv)
   1208 {
   1209 	struct vmw_ctx_binding_state *cbs;
   1210 	int ret;
   1211 
   1212 	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), sizeof(*cbs),
   1213 				   false, false);
   1214 	if (ret)
   1215 		return ERR_PTR(ret);
   1216 
   1217 	cbs = vzalloc(sizeof(*cbs));
   1218 	if (!cbs) {
   1219 		ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs));
   1220 		return ERR_PTR(-ENOMEM);
   1221 	}
   1222 
   1223 	cbs->dev_priv = dev_priv;
   1224 	INIT_LIST_HEAD(&cbs->list);
   1225 
   1226 	return cbs;
   1227 }
   1228 
   1229 /**
   1230  * vmw_binding_state_free - Free a struct vmw_ctx_binding_state and its
   1231  * memory accounting info.
   1232  *
   1233  * @cbs: Pointer to the struct vmw_ctx_binding_state to be freed.
   1234  */
   1235 void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs)
   1236 {
   1237 	struct vmw_private *dev_priv = cbs->dev_priv;
   1238 
   1239 	vfree(cbs);
   1240 	ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs));
   1241 }
   1242 
   1243 /**
   1244  * vmw_binding_state_list - Get the binding list of a
   1245  * struct vmw_ctx_binding_state
   1246  *
   1247  * @cbs: Pointer to the struct vmw_ctx_binding_state
   1248  *
   1249  * Returns the binding list which can be used to traverse through the bindings
   1250  * and access the resource information of all bindings.
   1251  */
   1252 struct list_head *vmw_binding_state_list(struct vmw_ctx_binding_state *cbs)
   1253 {
   1254 	return &cbs->list;
   1255 }
   1256 
   1257 /**
   1258  * vmwgfx_binding_state_reset - clear a struct vmw_ctx_binding_state
   1259  *
   1260  * @cbs: Pointer to the struct vmw_ctx_binding_state to be cleared
   1261  *
   1262  * Drops all bindings registered in @cbs. No device binding actions are
   1263  * performed.
   1264  */
   1265 void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs)
   1266 {
   1267 	struct vmw_ctx_bindinfo *entry, *next;
   1268 
   1269 	list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
   1270 		vmw_binding_drop(entry);
   1271 }
   1272 
   1273 /*
   1274  * This function is unused at run-time, and only used to hold various build
   1275  * asserts important for code optimization assumptions.
   1276  */
   1277 static void vmw_binding_build_asserts(void)
   1278 {
   1279 	BUILD_BUG_ON(SVGA3D_NUM_SHADERTYPE_DX10 != 3);
   1280 	BUILD_BUG_ON(SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS > SVGA3D_RT_MAX);
   1281 	BUILD_BUG_ON(sizeof(uint32) != sizeof(u32));
   1282 
   1283 	/*
   1284 	 * struct vmw_ctx_binding_state::bind_cmd_buffer is used for various
   1285 	 * view id arrays.
   1286 	 */
   1287 	BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_RT_MAX);
   1288 	BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_DX_MAX_SRVIEWS);
   1289 	BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_DX_MAX_CONSTBUFFERS);
   1290 
   1291 	/*
   1292 	 * struct vmw_ctx_binding_state::bind_cmd_buffer is used for
   1293 	 * u32 view ids, SVGA3dSoTargets and SVGA3dVertexBuffers
   1294 	 */
   1295 	BUILD_BUG_ON(SVGA3D_DX_MAX_SOTARGETS*sizeof(SVGA3dSoTarget) >
   1296 		     VMW_MAX_VIEW_BINDINGS*sizeof(u32));
   1297 	BUILD_BUG_ON(SVGA3D_DX_MAX_VERTEXBUFFERS*sizeof(SVGA3dVertexBuffer) >
   1298 		     VMW_MAX_VIEW_BINDINGS*sizeof(u32));
   1299 }
   1300