886e8bd
From d74617cb4aebe5a4cb3eeda3070053ccfc36a0ae Mon Sep 17 00:00:00 2001
886e8bd
From: Eric Anholt <eric@anholt.net>
886e8bd
Date: Tue, 25 Jul 2017 09:27:32 -0700
886e8bd
Subject: [PATCH 1/6] drm/vc4: Demote user-accessible DRM_ERROR paths to
886e8bd
 DRM_DEBUG.
886e8bd
886e8bd
Userspace shouldn't be able to spam dmesg by passing bad arguments.
886e8bd
This has particularly become an issues since we started using a bad
886e8bd
argument to set_tiling to detect if set_tiling was supported.
886e8bd
886e8bd
Signed-off-by: Eric Anholt <eric@anholt.net>
886e8bd
Fixes: 83753117f1de ("drm/vc4: Add get/set tiling ioctls.")
886e8bd
Link: https://patchwork.freedesktop.org/patch/msgid/20170725162733.28007-1-eric@anholt.net
886e8bd
Reviewed-by: Boris Brezillon <boris.brezillon@free-electrons.com>
886e8bd
---
886e8bd
 drivers/gpu/drm/vc4/vc4_bo.c               | 14 +++---
886e8bd
 drivers/gpu/drm/vc4/vc4_gem.c              | 10 ++--
886e8bd
 drivers/gpu/drm/vc4/vc4_kms.c              |  2 +-
886e8bd
 drivers/gpu/drm/vc4/vc4_render_cl.c        | 40 +++++++--------
886e8bd
 drivers/gpu/drm/vc4/vc4_validate.c         | 78 +++++++++++++++---------------
886e8bd
 drivers/gpu/drm/vc4/vc4_validate_shaders.c | 72 +++++++++++++--------------
886e8bd
 6 files changed, 108 insertions(+), 108 deletions(-)
886e8bd
886e8bd
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
886e8bd
index 487f96412d35..ede80199001d 100644
886e8bd
--- a/drivers/gpu/drm/vc4/vc4_bo.c
886e8bd
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
886e8bd
@@ -389,7 +389,7 @@ vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags)
886e8bd
 	struct vc4_bo *bo = to_vc4_bo(obj);
886e8bd
 
886e8bd
 	if (bo->validated_shader) {
886e8bd
-		DRM_ERROR("Attempting to export shader BO\n");
886e8bd
+		DRM_DEBUG("Attempting to export shader BO\n");
886e8bd
 		return ERR_PTR(-EINVAL);
886e8bd
 	}
886e8bd
 
886e8bd
@@ -410,7 +410,7 @@ int vc4_mmap(struct file *filp, struct vm_area_struct *vma)
886e8bd
 	bo = to_vc4_bo(gem_obj);
886e8bd
 
886e8bd
 	if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
886e8bd
-		DRM_ERROR("mmaping of shader BOs for writing not allowed.\n");
886e8bd
+		DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n");
886e8bd
 		return -EINVAL;
886e8bd
 	}
886e8bd
 
886e8bd
@@ -435,7 +435,7 @@ int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
886e8bd
 	struct vc4_bo *bo = to_vc4_bo(obj);
886e8bd
 
886e8bd
 	if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) {
886e8bd
-		DRM_ERROR("mmaping of shader BOs for writing not allowed.\n");
886e8bd
+		DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n");
886e8bd
 		return -EINVAL;
886e8bd
 	}
886e8bd
 
886e8bd
@@ -447,7 +447,7 @@ void *vc4_prime_vmap(struct drm_gem_object *obj)
886e8bd
 	struct vc4_bo *bo = to_vc4_bo(obj);
886e8bd
 
886e8bd
 	if (bo->validated_shader) {
886e8bd
-		DRM_ERROR("mmaping of shader BOs not allowed.\n");
886e8bd
+		DRM_DEBUG("mmaping of shader BOs not allowed.\n");
886e8bd
 		return ERR_PTR(-EINVAL);
886e8bd
 	}
886e8bd
 
886e8bd
@@ -501,7 +501,7 @@ int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
886e8bd
 
886e8bd
 	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
886e8bd
 	if (!gem_obj) {
886e8bd
-		DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
886e8bd
+		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
886e8bd
 		return -EINVAL;
886e8bd
 	}
886e8bd
 
886e8bd
@@ -605,7 +605,7 @@ int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
886e8bd
 
886e8bd
 	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
886e8bd
 	if (!gem_obj) {
886e8bd
-		DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
886e8bd
+		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
886e8bd
 		return -ENOENT;
886e8bd
 	}
886e8bd
 	bo = to_vc4_bo(gem_obj);
886e8bd
@@ -636,7 +636,7 @@ int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
886e8bd
 
886e8bd
 	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
886e8bd
 	if (!gem_obj) {
886e8bd
-		DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
886e8bd
+		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
886e8bd
 		return -ENOENT;
886e8bd
 	}
886e8bd
 	bo = to_vc4_bo(gem_obj);
886e8bd
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
886e8bd
index d5b821ad06af..a3e45e67f417 100644
886e8bd
--- a/drivers/gpu/drm/vc4/vc4_gem.c
886e8bd
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
886e8bd
@@ -659,7 +659,7 @@ vc4_cl_lookup_bos(struct drm_device *dev,
886e8bd
 		/* See comment on bo_index for why we have to check
886e8bd
 		 * this.
886e8bd
 		 */
886e8bd
-		DRM_ERROR("Rendering requires BOs to validate\n");
886e8bd
+		DRM_DEBUG("Rendering requires BOs to validate\n");
886e8bd
 		return -EINVAL;
886e8bd
 	}
886e8bd
 
886e8bd
@@ -691,7 +691,7 @@ vc4_cl_lookup_bos(struct drm_device *dev,
886e8bd
 		struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
886e8bd
 						     handles[i]);
886e8bd
 		if (!bo) {
886e8bd
-			DRM_ERROR("Failed to look up GEM BO %d: %d\n",
886e8bd
+			DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
886e8bd
 				  i, handles[i]);
886e8bd
 			ret = -EINVAL;
886e8bd
 			spin_unlock(&file_priv->table_lock);
886e8bd
@@ -729,7 +729,7 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
886e8bd
 	    args->shader_rec_count >= (UINT_MAX /
886e8bd
 					  sizeof(struct vc4_shader_state)) ||
886e8bd
 	    temp_size < exec_size) {
886e8bd
-		DRM_ERROR("overflow in exec arguments\n");
886e8bd
+		DRM_DEBUG("overflow in exec arguments\n");
886e8bd
 		ret = -EINVAL;
886e8bd
 		goto fail;
886e8bd
 	}
886e8bd
@@ -974,7 +974,7 @@ vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
886e8bd
 
886e8bd
 	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
886e8bd
 	if (!gem_obj) {
886e8bd
-		DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
886e8bd
+		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
886e8bd
 		return -EINVAL;
886e8bd
 	}
886e8bd
 	bo = to_vc4_bo(gem_obj);
886e8bd
@@ -1009,7 +1009,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
886e8bd
 	int ret = 0;
886e8bd
 
886e8bd
 	if ((args->flags & ~VC4_SUBMIT_CL_USE_CLEAR_COLOR) != 0) {
886e8bd
-		DRM_ERROR("Unknown flags: 0x%02x\n", args->flags);
886e8bd
+		DRM_DEBUG("Unknown flags: 0x%02x\n", args->flags);
886e8bd
 		return -EINVAL;
886e8bd
 	}
886e8bd
 
886e8bd
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
886e8bd
index bc6ecdc6f104..b2c55eb09ca3 100644
886e8bd
--- a/drivers/gpu/drm/vc4/vc4_kms.c
886e8bd
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
886e8bd
@@ -204,7 +204,7 @@ static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
886e8bd
 		gem_obj = drm_gem_object_lookup(file_priv,
886e8bd
 						mode_cmd->handles[0]);
886e8bd
 		if (!gem_obj) {
886e8bd
-			DRM_ERROR("Failed to look up GEM BO %d\n",
886e8bd
+			DRM_DEBUG("Failed to look up GEM BO %d\n",
886e8bd
 				  mode_cmd->handles[0]);
886e8bd
 			return ERR_PTR(-ENOENT);
886e8bd
 		}
886e8bd
diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c
886e8bd
index 5dc19429d4ae..da3bfd53f0bd 100644
886e8bd
--- a/drivers/gpu/drm/vc4/vc4_render_cl.c
886e8bd
+++ b/drivers/gpu/drm/vc4/vc4_render_cl.c
886e8bd
@@ -378,14 +378,14 @@ static int vc4_full_res_bounds_check(struct vc4_exec_info *exec,
886e8bd
 	u32 render_tiles_stride = DIV_ROUND_UP(exec->args->width, 32);
886e8bd
 
886e8bd
 	if (surf->offset > obj->base.size) {
886e8bd
-		DRM_ERROR("surface offset %d > BO size %zd\n",
886e8bd
+		DRM_DEBUG("surface offset %d > BO size %zd\n",
886e8bd
 			  surf->offset, obj->base.size);
886e8bd
 		return -EINVAL;
886e8bd
 	}
886e8bd
 
886e8bd
 	if ((obj->base.size - surf->offset) / VC4_TILE_BUFFER_SIZE <
886e8bd
 	    render_tiles_stride * args->max_y_tile + args->max_x_tile) {
886e8bd
-		DRM_ERROR("MSAA tile %d, %d out of bounds "
886e8bd
+		DRM_DEBUG("MSAA tile %d, %d out of bounds "
886e8bd
 			  "(bo size %zd, offset %d).\n",
886e8bd
 			  args->max_x_tile, args->max_y_tile,
886e8bd
 			  obj->base.size,
886e8bd
@@ -401,7 +401,7 @@ static int vc4_rcl_msaa_surface_setup(struct vc4_exec_info *exec,
886e8bd
 				      struct drm_vc4_submit_rcl_surface *surf)
886e8bd
 {
886e8bd
 	if (surf->flags != 0 || surf->bits != 0) {
886e8bd
-		DRM_ERROR("MSAA surface had nonzero flags/bits\n");
886e8bd
+		DRM_DEBUG("MSAA surface had nonzero flags/bits\n");
886e8bd
 		return -EINVAL;
886e8bd
 	}
886e8bd
 
886e8bd
@@ -415,7 +415,7 @@ static int vc4_rcl_msaa_surface_setup(struct vc4_exec_info *exec,
886e8bd
 	exec->rcl_write_bo[exec->rcl_write_bo_count++] = *obj;
886e8bd
 
886e8bd
 	if (surf->offset & 0xf) {
886e8bd
-		DRM_ERROR("MSAA write must be 16b aligned.\n");
886e8bd
+		DRM_DEBUG("MSAA write must be 16b aligned.\n");
886e8bd
 		return -EINVAL;
886e8bd
 	}
886e8bd
 
886e8bd
@@ -437,7 +437,7 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
886e8bd
 	int ret;
886e8bd
 
886e8bd
 	if (surf->flags & ~VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) {
886e8bd
-		DRM_ERROR("Extra flags set\n");
886e8bd
+		DRM_DEBUG("Extra flags set\n");
886e8bd
 		return -EINVAL;
886e8bd
 	}
886e8bd
 
886e8bd
@@ -453,12 +453,12 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
886e8bd
 
886e8bd
 	if (surf->flags & VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) {
886e8bd
 		if (surf == &exec->args->zs_write) {
886e8bd
-			DRM_ERROR("general zs write may not be a full-res.\n");
886e8bd
+			DRM_DEBUG("general zs write may not be a full-res.\n");
886e8bd
 			return -EINVAL;
886e8bd
 		}
886e8bd
 
886e8bd
 		if (surf->bits != 0) {
886e8bd
-			DRM_ERROR("load/store general bits set with "
886e8bd
+			DRM_DEBUG("load/store general bits set with "
886e8bd
 				  "full res load/store.\n");
886e8bd
 			return -EINVAL;
886e8bd
 		}
886e8bd
@@ -473,19 +473,19 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
886e8bd
 	if (surf->bits & ~(VC4_LOADSTORE_TILE_BUFFER_TILING_MASK |
886e8bd
 			   VC4_LOADSTORE_TILE_BUFFER_BUFFER_MASK |
886e8bd
 			   VC4_LOADSTORE_TILE_BUFFER_FORMAT_MASK)) {
886e8bd
-		DRM_ERROR("Unknown bits in load/store: 0x%04x\n",
886e8bd
+		DRM_DEBUG("Unknown bits in load/store: 0x%04x\n",
886e8bd
 			  surf->bits);
886e8bd
 		return -EINVAL;
886e8bd
 	}
886e8bd
 
886e8bd
 	if (tiling > VC4_TILING_FORMAT_LT) {
886e8bd
-		DRM_ERROR("Bad tiling format\n");
886e8bd
+		DRM_DEBUG("Bad tiling format\n");
886e8bd
 		return -EINVAL;
886e8bd
 	}
886e8bd
 
886e8bd
 	if (buffer == VC4_LOADSTORE_TILE_BUFFER_ZS) {
886e8bd
 		if (format != 0) {
886e8bd
-			DRM_ERROR("No color format should be set for ZS\n");
886e8bd
+			DRM_DEBUG("No color format should be set for ZS\n");
886e8bd
 			return -EINVAL;
886e8bd
 		}
886e8bd
 		cpp = 4;
886e8bd
@@ -499,16 +499,16 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
886e8bd
 			cpp = 4;
886e8bd
 			break;
886e8bd
 		default:
886e8bd
-			DRM_ERROR("Bad tile buffer format\n");
886e8bd
+			DRM_DEBUG("Bad tile buffer format\n");
886e8bd
 			return -EINVAL;
886e8bd
 		}
886e8bd
 	} else {
886e8bd
-		DRM_ERROR("Bad load/store buffer %d.\n", buffer);
886e8bd
+		DRM_DEBUG("Bad load/store buffer %d.\n", buffer);
886e8bd
 		return -EINVAL;
886e8bd
 	}
886e8bd
 
886e8bd
 	if (surf->offset & 0xf) {
886e8bd
-		DRM_ERROR("load/store buffer must be 16b aligned.\n");
886e8bd
+		DRM_DEBUG("load/store buffer must be 16b aligned.\n");
886e8bd
 		return -EINVAL;
886e8bd
 	}
886e8bd
 
886e8bd
@@ -533,7 +533,7 @@ vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec,
886e8bd
 	int cpp;
886e8bd
 
886e8bd
 	if (surf->flags != 0) {
886e8bd
-		DRM_ERROR("No flags supported on render config.\n");
886e8bd
+		DRM_DEBUG("No flags supported on render config.\n");
886e8bd
 		return -EINVAL;
886e8bd
 	}
886e8bd
 
886e8bd
@@ -541,7 +541,7 @@ vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec,
886e8bd
 			   VC4_RENDER_CONFIG_FORMAT_MASK |
886e8bd
 			   VC4_RENDER_CONFIG_MS_MODE_4X |
886e8bd
 			   VC4_RENDER_CONFIG_DECIMATE_MODE_4X)) {
886e8bd
-		DRM_ERROR("Unknown bits in render config: 0x%04x\n",
886e8bd
+		DRM_DEBUG("Unknown bits in render config: 0x%04x\n",
886e8bd
 			  surf->bits);
886e8bd
 		return -EINVAL;
886e8bd
 	}
886e8bd
@@ -556,7 +556,7 @@ vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec,
886e8bd
 	exec->rcl_write_bo[exec->rcl_write_bo_count++] = *obj;
886e8bd
 
886e8bd
 	if (tiling > VC4_TILING_FORMAT_LT) {
886e8bd
-		DRM_ERROR("Bad tiling format\n");
886e8bd
+		DRM_DEBUG("Bad tiling format\n");
886e8bd
 		return -EINVAL;
886e8bd
 	}
886e8bd
 
886e8bd
@@ -569,7 +569,7 @@ vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec,
886e8bd
 		cpp = 4;
886e8bd
 		break;
886e8bd
 	default:
886e8bd
-		DRM_ERROR("Bad tile buffer format\n");
886e8bd
+		DRM_DEBUG("Bad tile buffer format\n");
886e8bd
 		return -EINVAL;
886e8bd
 	}
886e8bd
 
886e8bd
@@ -590,7 +590,7 @@ int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec)
886e8bd
 
886e8bd
 	if (args->min_x_tile > args->max_x_tile ||
886e8bd
 	    args->min_y_tile > args->max_y_tile) {
886e8bd
-		DRM_ERROR("Bad render tile set (%d,%d)-(%d,%d)\n",
886e8bd
+		DRM_DEBUG("Bad render tile set (%d,%d)-(%d,%d)\n",
886e8bd
 			  args->min_x_tile, args->min_y_tile,
886e8bd
 			  args->max_x_tile, args->max_y_tile);
886e8bd
 		return -EINVAL;
886e8bd
@@ -599,7 +599,7 @@ int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec)
886e8bd
 	if (has_bin &&
886e8bd
 	    (args->max_x_tile > exec->bin_tiles_x ||
886e8bd
 	     args->max_y_tile > exec->bin_tiles_y)) {
886e8bd
-		DRM_ERROR("Render tiles (%d,%d) outside of bin config "
886e8bd
+		DRM_DEBUG("Render tiles (%d,%d) outside of bin config "
886e8bd
 			  "(%d,%d)\n",
886e8bd
 			  args->max_x_tile, args->max_y_tile,
886e8bd
 			  exec->bin_tiles_x, exec->bin_tiles_y);
886e8bd
@@ -642,7 +642,7 @@ int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec)
886e8bd
 	 */
886e8bd
 	if (!setup.color_write && !setup.zs_write &&
886e8bd
 	    !setup.msaa_color_write && !setup.msaa_zs_write) {
886e8bd
-		DRM_ERROR("RCL requires color or Z/S write\n");
886e8bd
+		DRM_DEBUG("RCL requires color or Z/S write\n");
886e8bd
 		return -EINVAL;
886e8bd
 	}
886e8bd
 
886e8bd
diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c
886e8bd
index 814b512c6b9a..2db485abb186 100644
886e8bd
--- a/drivers/gpu/drm/vc4/vc4_validate.c
886e8bd
+++ b/drivers/gpu/drm/vc4/vc4_validate.c
886e8bd
@@ -109,7 +109,7 @@ vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex)
886e8bd
 	struct vc4_bo *bo;
886e8bd
 
886e8bd
 	if (hindex >= exec->bo_count) {
886e8bd
-		DRM_ERROR("BO index %d greater than BO count %d\n",
886e8bd
+		DRM_DEBUG("BO index %d greater than BO count %d\n",
886e8bd
 			  hindex, exec->bo_count);
886e8bd
 		return NULL;
886e8bd
 	}
886e8bd
@@ -117,7 +117,7 @@ vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex)
886e8bd
 	bo = to_vc4_bo(&obj->base);
886e8bd
 
886e8bd
 	if (bo->validated_shader) {
886e8bd
-		DRM_ERROR("Trying to use shader BO as something other than "
886e8bd
+		DRM_DEBUG("Trying to use shader BO as something other than "
886e8bd
 			  "a shader\n");
886e8bd
 		return NULL;
886e8bd
 	}
886e8bd
@@ -172,7 +172,7 @@ vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_cma_object *fbo,
886e8bd
 	 * our math.
886e8bd
 	 */
886e8bd
 	if (width > 4096 || height > 4096) {
886e8bd
-		DRM_ERROR("Surface dimensions (%d,%d) too large",
886e8bd
+		DRM_DEBUG("Surface dimensions (%d,%d) too large",
886e8bd
 			  width, height);
886e8bd
 		return false;
886e8bd
 	}
886e8bd
@@ -191,7 +191,7 @@ vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_cma_object *fbo,
886e8bd
 		aligned_height = round_up(height, utile_h);
886e8bd
 		break;
886e8bd
 	default:
886e8bd
-		DRM_ERROR("buffer tiling %d unsupported\n", tiling_format);
886e8bd
+		DRM_DEBUG("buffer tiling %d unsupported\n", tiling_format);
886e8bd
 		return false;
886e8bd
 	}
886e8bd
 
886e8bd
@@ -200,7 +200,7 @@ vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_cma_object *fbo,
886e8bd
 
886e8bd
 	if (size + offset < size ||
886e8bd
 	    size + offset > fbo->base.size) {
886e8bd
-		DRM_ERROR("Overflow in %dx%d (%dx%d) fbo size (%d + %d > %zd)\n",
886e8bd
+		DRM_DEBUG("Overflow in %dx%d (%dx%d) fbo size (%d + %d > %zd)\n",
886e8bd
 			  width, height,
886e8bd
 			  aligned_width, aligned_height,
886e8bd
 			  size, offset, fbo->base.size);
886e8bd
@@ -214,7 +214,7 @@ static int
886e8bd
 validate_flush(VALIDATE_ARGS)
886e8bd
 {
886e8bd
 	if (!validate_bin_pos(exec, untrusted, exec->args->bin_cl_size - 1)) {
886e8bd
-		DRM_ERROR("Bin CL must end with VC4_PACKET_FLUSH\n");
886e8bd
+		DRM_DEBUG("Bin CL must end with VC4_PACKET_FLUSH\n");
886e8bd
 		return -EINVAL;
886e8bd
 	}
886e8bd
 	exec->found_flush = true;
886e8bd
@@ -226,13 +226,13 @@ static int
886e8bd
 validate_start_tile_binning(VALIDATE_ARGS)
886e8bd
 {
886e8bd
 	if (exec->found_start_tile_binning_packet) {
886e8bd
-		DRM_ERROR("Duplicate VC4_PACKET_START_TILE_BINNING\n");
886e8bd
+		DRM_DEBUG("Duplicate VC4_PACKET_START_TILE_BINNING\n");
886e8bd
 		return -EINVAL;
886e8bd
 	}
886e8bd
 	exec->found_start_tile_binning_packet = true;
886e8bd
 
886e8bd
 	if (!exec->found_tile_binning_mode_config_packet) {
886e8bd
-		DRM_ERROR("missing VC4_PACKET_TILE_BINNING_MODE_CONFIG\n");
886e8bd
+		DRM_DEBUG("missing VC4_PACKET_TILE_BINNING_MODE_CONFIG\n");
886e8bd
 		return -EINVAL;
886e8bd
 	}
886e8bd
 
886e8bd
@@ -243,7 +243,7 @@ static int
886e8bd
 validate_increment_semaphore(VALIDATE_ARGS)
886e8bd
 {
886e8bd
 	if (!validate_bin_pos(exec, untrusted, exec->args->bin_cl_size - 2)) {
886e8bd
-		DRM_ERROR("Bin CL must end with "
886e8bd
+		DRM_DEBUG("Bin CL must end with "
886e8bd
 			  "VC4_PACKET_INCREMENT_SEMAPHORE\n");
886e8bd
 		return -EINVAL;
886e8bd
 	}
886e8bd
@@ -264,7 +264,7 @@ validate_indexed_prim_list(VALIDATE_ARGS)
886e8bd
 
886e8bd
 	/* Check overflow condition */
886e8bd
 	if (exec->shader_state_count == 0) {
886e8bd
-		DRM_ERROR("shader state must precede primitives\n");
886e8bd
+		DRM_DEBUG("shader state must precede primitives\n");
886e8bd
 		return -EINVAL;
886e8bd
 	}
886e8bd
 	shader_state = &exec->shader_state[exec->shader_state_count - 1];
886e8bd
@@ -281,7 +281,7 @@ validate_indexed_prim_list(VALIDATE_ARGS)
886e8bd
 
886e8bd
 	if (offset > ib->base.size ||
886e8bd
 	    (ib->base.size - offset) / index_size < length) {
886e8bd
-		DRM_ERROR("IB access overflow (%d + %d*%d > %zd)\n",
886e8bd
+		DRM_DEBUG("IB access overflow (%d + %d*%d > %zd)\n",
886e8bd
 			  offset, length, index_size, ib->base.size);
886e8bd
 		return -EINVAL;
886e8bd
 	}
886e8bd
@@ -301,13 +301,13 @@ validate_gl_array_primitive(VALIDATE_ARGS)
886e8bd
 
886e8bd
 	/* Check overflow condition */
886e8bd
 	if (exec->shader_state_count == 0) {
886e8bd
-		DRM_ERROR("shader state must precede primitives\n");
886e8bd
+		DRM_DEBUG("shader state must precede primitives\n");
886e8bd
 		return -EINVAL;
886e8bd
 	}
886e8bd
 	shader_state = &exec->shader_state[exec->shader_state_count - 1];
886e8bd
 
886e8bd
 	if (length + base_index < length) {
886e8bd
-		DRM_ERROR("primitive vertex count overflow\n");
886e8bd
+		DRM_DEBUG("primitive vertex count overflow\n");
886e8bd
 		return -EINVAL;
886e8bd
 	}
886e8bd
 	max_index = length + base_index - 1;
886e8bd
@@ -324,7 +324,7 @@ validate_gl_shader_state(VALIDATE_ARGS)
886e8bd
 	uint32_t i = exec->shader_state_count++;
886e8bd
 
886e8bd
 	if (i >= exec->shader_state_size) {
886e8bd
-		DRM_ERROR("More requests for shader states than declared\n");
886e8bd
+		DRM_DEBUG("More requests for shader states than declared\n");
886e8bd
 		return -EINVAL;
886e8bd
 	}
886e8bd
 
886e8bd
@@ -332,7 +332,7 @@ validate_gl_shader_state(VALIDATE_ARGS)
886e8bd
 	exec->shader_state[i].max_index = 0;
886e8bd
 
886e8bd
 	if (exec->shader_state[i].addr & ~0xf) {
886e8bd
-		DRM_ERROR("high bits set in GL shader rec reference\n");
886e8bd
+		DRM_DEBUG("high bits set in GL shader rec reference\n");
886e8bd
 		return -EINVAL;
886e8bd
 	}
886e8bd
 
886e8bd
@@ -356,7 +356,7 @@ validate_tile_binning_config(VALIDATE_ARGS)
886e8bd
 	int bin_slot;
886e8bd
 
886e8bd
 	if (exec->found_tile_binning_mode_config_packet) {
886e8bd
-		DRM_ERROR("Duplicate VC4_PACKET_TILE_BINNING_MODE_CONFIG\n");
886e8bd
+		DRM_DEBUG("Duplicate VC4_PACKET_TILE_BINNING_MODE_CONFIG\n");
886e8bd
 		return -EINVAL;
886e8bd
 	}
886e8bd
 	exec->found_tile_binning_mode_config_packet = true;
886e8bd
@@ -368,14 +368,14 @@ validate_tile_binning_config(VALIDATE_ARGS)
886e8bd
 
886e8bd
 	if (exec->bin_tiles_x == 0 ||
886e8bd
 	    exec->bin_tiles_y == 0) {
886e8bd
-		DRM_ERROR("Tile binning config of %dx%d too small\n",
886e8bd
+		DRM_DEBUG("Tile binning config of %dx%d too small\n",
886e8bd
 			  exec->bin_tiles_x, exec->bin_tiles_y);
886e8bd
 		return -EINVAL;
886e8bd
 	}
886e8bd
 
886e8bd
 	if (flags & (VC4_BIN_CONFIG_DB_NON_MS |
886e8bd
 		     VC4_BIN_CONFIG_TILE_BUFFER_64BIT)) {
886e8bd
-		DRM_ERROR("unsupported binning config flags 0x%02x\n", flags);
886e8bd
+		DRM_DEBUG("unsupported binning config flags 0x%02x\n", flags);
886e8bd
 		return -EINVAL;
886e8bd
 	}
886e8bd
 
886e8bd
@@ -493,20 +493,20 @@ vc4_validate_bin_cl(struct drm_device *dev,
886e8bd
 		const struct cmd_info *info;
886e8bd
 
886e8bd
 		if (cmd >= ARRAY_SIZE(cmd_info)) {
886e8bd
-			DRM_ERROR("0x%08x: packet %d out of bounds\n",
886e8bd
+			DRM_DEBUG("0x%08x: packet %d out of bounds\n",
886e8bd
 				  src_offset, cmd);
886e8bd
 			return -EINVAL;
886e8bd
 		}
886e8bd
 
886e8bd
 		info = &cmd_info[cmd];
886e8bd
 		if (!info->name) {
886e8bd
-			DRM_ERROR("0x%08x: packet %d invalid\n",
886e8bd
+			DRM_DEBUG("0x%08x: packet %d invalid\n",
886e8bd
 				  src_offset, cmd);
886e8bd
 			return -EINVAL;
886e8bd
 		}
886e8bd
 
886e8bd
 		if (src_offset + info->len > len) {
886e8bd
-			DRM_ERROR("0x%08x: packet %d (%s) length 0x%08x "
886e8bd
+			DRM_DEBUG("0x%08x: packet %d (%s) length 0x%08x "
886e8bd
 				  "exceeds bounds (0x%08x)\n",
886e8bd
 				  src_offset, cmd, info->name, info->len,
886e8bd
 				  src_offset + len);
886e8bd
@@ -519,7 +519,7 @@ vc4_validate_bin_cl(struct drm_device *dev,
886e8bd
 		if (info->func && info->func(exec,
886e8bd
 					     dst_pkt + 1,
886e8bd
 					     src_pkt + 1)) {
886e8bd
-			DRM_ERROR("0x%08x: packet %d (%s) failed to validate\n",
886e8bd
+			DRM_DEBUG("0x%08x: packet %d (%s) failed to validate\n",
886e8bd
 				  src_offset, cmd, info->name);
886e8bd
 			return -EINVAL;
886e8bd
 		}
886e8bd
@@ -537,7 +537,7 @@ vc4_validate_bin_cl(struct drm_device *dev,
886e8bd
 	exec->ct0ea = exec->ct0ca + dst_offset;
886e8bd
 
886e8bd
 	if (!exec->found_start_tile_binning_packet) {
886e8bd
-		DRM_ERROR("Bin CL missing VC4_PACKET_START_TILE_BINNING\n");
886e8bd
+		DRM_DEBUG("Bin CL missing VC4_PACKET_START_TILE_BINNING\n");
886e8bd
 		return -EINVAL;
886e8bd
 	}
886e8bd
 
886e8bd
@@ -549,7 +549,7 @@ vc4_validate_bin_cl(struct drm_device *dev,
886e8bd
 	 * semaphore increment.
886e8bd
 	 */
886e8bd
 	if (!exec->found_increment_semaphore_packet || !exec->found_flush) {
886e8bd
-		DRM_ERROR("Bin CL missing VC4_PACKET_INCREMENT_SEMAPHORE + "
886e8bd
+		DRM_DEBUG("Bin CL missing VC4_PACKET_INCREMENT_SEMAPHORE + "
886e8bd
 			  "VC4_PACKET_FLUSH\n");
886e8bd
 		return -EINVAL;
886e8bd
 	}
886e8bd
@@ -588,11 +588,11 @@ reloc_tex(struct vc4_exec_info *exec,
886e8bd
 		uint32_t remaining_size = tex->base.size - p0;
886e8bd
 
886e8bd
 		if (p0 > tex->base.size - 4) {
886e8bd
-			DRM_ERROR("UBO offset greater than UBO size\n");
886e8bd
+			DRM_DEBUG("UBO offset greater than UBO size\n");
886e8bd
 			goto fail;
886e8bd
 		}
886e8bd
 		if (p1 > remaining_size - 4) {
886e8bd
-			DRM_ERROR("UBO clamp would allow reads "
886e8bd
+			DRM_DEBUG("UBO clamp would allow reads "
886e8bd
 				  "outside of UBO\n");
886e8bd
 			goto fail;
886e8bd
 		}
886e8bd
@@ -612,14 +612,14 @@ reloc_tex(struct vc4_exec_info *exec,
886e8bd
 		if (VC4_GET_FIELD(p3, VC4_TEX_P2_PTYPE) ==
886e8bd
 		    VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE) {
886e8bd
 			if (cube_map_stride) {
886e8bd
-				DRM_ERROR("Cube map stride set twice\n");
886e8bd
+				DRM_DEBUG("Cube map stride set twice\n");
886e8bd
 				goto fail;
886e8bd
 			}
886e8bd
 
886e8bd
 			cube_map_stride = p3 & VC4_TEX_P2_CMST_MASK;
886e8bd
 		}
886e8bd
 		if (!cube_map_stride) {
886e8bd
-			DRM_ERROR("Cube map stride not set\n");
886e8bd
+			DRM_DEBUG("Cube map stride not set\n");
886e8bd
 			goto fail;
886e8bd
 		}
886e8bd
 	}
886e8bd
@@ -660,7 +660,7 @@ reloc_tex(struct vc4_exec_info *exec,
886e8bd
 	case VC4_TEXTURE_TYPE_RGBA64:
886e8bd
 	case VC4_TEXTURE_TYPE_YUV422R:
886e8bd
 	default:
886e8bd
-		DRM_ERROR("Texture format %d unsupported\n", type);
886e8bd
+		DRM_DEBUG("Texture format %d unsupported\n", type);
886e8bd
 		goto fail;
886e8bd
 	}
886e8bd
 	utile_w = utile_width(cpp);
886e8bd
@@ -713,7 +713,7 @@ reloc_tex(struct vc4_exec_info *exec,
886e8bd
 		level_size = aligned_width * cpp * aligned_height;
886e8bd
 
886e8bd
 		if (offset < level_size) {
886e8bd
-			DRM_ERROR("Level %d (%dx%d -> %dx%d) size %db "
886e8bd
+			DRM_DEBUG("Level %d (%dx%d -> %dx%d) size %db "
886e8bd
 				  "overflowed buffer bounds (offset %d)\n",
886e8bd
 				  i, level_width, level_height,
886e8bd
 				  aligned_width, aligned_height,
886e8bd
@@ -764,7 +764,7 @@ validate_gl_shader_rec(struct drm_device *dev,
886e8bd
 
886e8bd
 	nr_relocs = ARRAY_SIZE(shader_reloc_offsets) + nr_attributes;
886e8bd
 	if (nr_relocs * 4 > exec->shader_rec_size) {
886e8bd
-		DRM_ERROR("overflowed shader recs reading %d handles "
886e8bd
+		DRM_DEBUG("overflowed shader recs reading %d handles "
886e8bd
 			  "from %d bytes left\n",
886e8bd
 			  nr_relocs, exec->shader_rec_size);
886e8bd
 		return -EINVAL;
886e8bd
@@ -774,7 +774,7 @@ validate_gl_shader_rec(struct drm_device *dev,
886e8bd
 	exec->shader_rec_size -= nr_relocs * 4;
886e8bd
 
886e8bd
 	if (packet_size > exec->shader_rec_size) {
886e8bd
-		DRM_ERROR("overflowed shader recs copying %db packet "
886e8bd
+		DRM_DEBUG("overflowed shader recs copying %db packet "
886e8bd
 			  "from %d bytes left\n",
886e8bd
 			  packet_size, exec->shader_rec_size);
886e8bd
 		return -EINVAL;
886e8bd
@@ -794,7 +794,7 @@ validate_gl_shader_rec(struct drm_device *dev,
886e8bd
 
886e8bd
 	for (i = 0; i < shader_reloc_count; i++) {
886e8bd
 		if (src_handles[i] > exec->bo_count) {
886e8bd
-			DRM_ERROR("Shader handle %d too big\n", src_handles[i]);
886e8bd
+			DRM_DEBUG("Shader handle %d too big\n", src_handles[i]);
886e8bd
 			return -EINVAL;
886e8bd
 		}
886e8bd
 
886e8bd
@@ -810,13 +810,13 @@ validate_gl_shader_rec(struct drm_device *dev,
886e8bd
 
886e8bd
 	if (((*(uint16_t *)pkt_u & VC4_SHADER_FLAG_FS_SINGLE_THREAD) == 0) !=
886e8bd
 	    to_vc4_bo(&bo[0]->base)->validated_shader->is_threaded) {
886e8bd
-		DRM_ERROR("Thread mode of CL and FS do not match\n");
886e8bd
+		DRM_DEBUG("Thread mode of CL and FS do not match\n");
886e8bd
 		return -EINVAL;
886e8bd
 	}
886e8bd
 
886e8bd
 	if (to_vc4_bo(&bo[1]->base)->validated_shader->is_threaded ||
886e8bd
 	    to_vc4_bo(&bo[2]->base)->validated_shader->is_threaded) {
886e8bd
-		DRM_ERROR("cs and vs cannot be threaded\n");
886e8bd
+		DRM_DEBUG("cs and vs cannot be threaded\n");
886e8bd
 		return -EINVAL;
886e8bd
 	}
886e8bd
 
886e8bd
@@ -831,7 +831,7 @@ validate_gl_shader_rec(struct drm_device *dev,
886e8bd
 		*(uint32_t *)(pkt_v + o) = bo[i]->paddr + src_offset;
886e8bd
 
886e8bd
 		if (src_offset != 0) {
886e8bd
-			DRM_ERROR("Shaders must be at offset 0 of "
886e8bd
+			DRM_DEBUG("Shaders must be at offset 0 of "
886e8bd
 				  "the BO.\n");
886e8bd
 			return -EINVAL;
886e8bd
 		}
886e8bd
@@ -842,7 +842,7 @@ validate_gl_shader_rec(struct drm_device *dev,
886e8bd
 
886e8bd
 		if (validated_shader->uniforms_src_size >
886e8bd
 		    exec->uniforms_size) {
886e8bd
-			DRM_ERROR("Uniforms src buffer overflow\n");
886e8bd
+			DRM_DEBUG("Uniforms src buffer overflow\n");
886e8bd
 			return -EINVAL;
886e8bd
 		}
886e8bd
 
886e8bd
@@ -900,7 +900,7 @@ validate_gl_shader_rec(struct drm_device *dev,
886e8bd
 
886e8bd
 		if (vbo->base.size < offset ||
886e8bd
 		    vbo->base.size - offset < attr_size) {
886e8bd
-			DRM_ERROR("BO offset overflow (%d + %d > %zu)\n",
886e8bd
+			DRM_DEBUG("BO offset overflow (%d + %d > %zu)\n",
886e8bd
 				  offset, attr_size, vbo->base.size);
886e8bd
 			return -EINVAL;
886e8bd
 		}
886e8bd
@@ -909,7 +909,7 @@ validate_gl_shader_rec(struct drm_device *dev,
886e8bd
 			max_index = ((vbo->base.size - offset - attr_size) /
886e8bd
 				     stride);
886e8bd
 			if (state->max_index > max_index) {
886e8bd
-				DRM_ERROR("primitives use index %d out of "
886e8bd
+				DRM_DEBUG("primitives use index %d out of "
886e8bd
 					  "supplied %d\n",
886e8bd
 					  state->max_index, max_index);
886e8bd
 				return -EINVAL;
886e8bd
diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
886e8bd
index 0b2df5c6efb4..d3f15bf60900 100644
886e8bd
--- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
886e8bd
+++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
886e8bd
@@ -200,7 +200,7 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader,
886e8bd
 		uint32_t clamp_reg, clamp_offset;
886e8bd
 
886e8bd
 		if (sig == QPU_SIG_SMALL_IMM) {
886e8bd
-			DRM_ERROR("direct TMU read used small immediate\n");
886e8bd
+			DRM_DEBUG("direct TMU read used small immediate\n");
886e8bd
 			return false;
886e8bd
 		}
886e8bd
 
886e8bd
@@ -209,7 +209,7 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader,
886e8bd
 		 */
886e8bd
 		if (is_mul ||
886e8bd
 		    QPU_GET_FIELD(inst, QPU_OP_ADD) != QPU_A_ADD) {
886e8bd
-			DRM_ERROR("direct TMU load wasn't an add\n");
886e8bd
+			DRM_DEBUG("direct TMU load wasn't an add\n");
886e8bd
 			return false;
886e8bd
 		}
886e8bd
 
886e8bd
@@ -220,13 +220,13 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader,
886e8bd
 		 */
886e8bd
 		clamp_reg = raddr_add_a_to_live_reg_index(inst);
886e8bd
 		if (clamp_reg == ~0) {
886e8bd
-			DRM_ERROR("direct TMU load wasn't clamped\n");
886e8bd
+			DRM_DEBUG("direct TMU load wasn't clamped\n");
886e8bd
 			return false;
886e8bd
 		}
886e8bd
 
886e8bd
 		clamp_offset = validation_state->live_min_clamp_offsets[clamp_reg];
886e8bd
 		if (clamp_offset == ~0) {
886e8bd
-			DRM_ERROR("direct TMU load wasn't clamped\n");
886e8bd
+			DRM_DEBUG("direct TMU load wasn't clamped\n");
886e8bd
 			return false;
886e8bd
 		}
886e8bd
 
886e8bd
@@ -238,7 +238,7 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader,
886e8bd
 
886e8bd
 		if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) &&
886e8bd
 		    !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF)) {
886e8bd
-			DRM_ERROR("direct TMU load didn't add to a uniform\n");
886e8bd
+			DRM_DEBUG("direct TMU load didn't add to a uniform\n");
886e8bd
 			return false;
886e8bd
 		}
886e8bd
 
886e8bd
@@ -246,14 +246,14 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader,
886e8bd
 	} else {
886e8bd
 		if (raddr_a == QPU_R_UNIF || (sig != QPU_SIG_SMALL_IMM &&
886e8bd
 					      raddr_b == QPU_R_UNIF)) {
886e8bd
-			DRM_ERROR("uniform read in the same instruction as "
886e8bd
+			DRM_DEBUG("uniform read in the same instruction as "
886e8bd
 				  "texture setup.\n");
886e8bd
 			return false;
886e8bd
 		}
886e8bd
 	}
886e8bd
 
886e8bd
 	if (validation_state->tmu_write_count[tmu] >= 4) {
886e8bd
-		DRM_ERROR("TMU%d got too many parameters before dispatch\n",
886e8bd
+		DRM_DEBUG("TMU%d got too many parameters before dispatch\n",
886e8bd
 			  tmu);
886e8bd
 		return false;
886e8bd
 	}
886e8bd
@@ -265,7 +265,7 @@ check_tmu_write(struct vc4_validated_shader_info *validated_shader,
886e8bd
 	 */
886e8bd
 	if (!is_direct) {
886e8bd
 		if (validation_state->needs_uniform_address_update) {
886e8bd
-			DRM_ERROR("Texturing with undefined uniform address\n");
886e8bd
+			DRM_DEBUG("Texturing with undefined uniform address\n");
886e8bd
 			return false;
886e8bd
 		}
886e8bd
 
886e8bd
@@ -336,35 +336,35 @@ validate_uniform_address_write(struct vc4_validated_shader_info *validated_shade
886e8bd
 	case QPU_SIG_LOAD_TMU1:
886e8bd
 		break;
886e8bd
 	default:
886e8bd
-		DRM_ERROR("uniforms address change must be "
886e8bd
+		DRM_DEBUG("uniforms address change must be "
886e8bd
 			  "normal math\n");
886e8bd
 		return false;
886e8bd
 	}
886e8bd
 
886e8bd
 	if (is_mul || QPU_GET_FIELD(inst, QPU_OP_ADD) != QPU_A_ADD) {
886e8bd
-		DRM_ERROR("Uniform address reset must be an ADD.\n");
886e8bd
+		DRM_DEBUG("Uniform address reset must be an ADD.\n");
886e8bd
 		return false;
886e8bd
 	}
886e8bd
 
886e8bd
 	if (QPU_GET_FIELD(inst, QPU_COND_ADD) != QPU_COND_ALWAYS) {
886e8bd
-		DRM_ERROR("Uniform address reset must be unconditional.\n");
886e8bd
+		DRM_DEBUG("Uniform address reset must be unconditional.\n");
886e8bd
 		return false;
886e8bd
 	}
886e8bd
 
886e8bd
 	if (QPU_GET_FIELD(inst, QPU_PACK) != QPU_PACK_A_NOP &&
886e8bd
 	    !(inst & QPU_PM)) {
886e8bd
-		DRM_ERROR("No packing allowed on uniforms reset\n");
886e8bd
+		DRM_DEBUG("No packing allowed on uniforms reset\n");
886e8bd
 		return false;
886e8bd
 	}
886e8bd
 
886e8bd
 	if (add_lri == -1) {
886e8bd
-		DRM_ERROR("First argument of uniform address write must be "
886e8bd
+		DRM_DEBUG("First argument of uniform address write must be "
886e8bd
 			  "an immediate value.\n");
886e8bd
 		return false;
886e8bd
 	}
886e8bd
 
886e8bd
 	if (validation_state->live_immediates[add_lri] != expected_offset) {
886e8bd
-		DRM_ERROR("Resetting uniforms with offset %db instead of %db\n",
886e8bd
+		DRM_DEBUG("Resetting uniforms with offset %db instead of %db\n",
886e8bd
 			  validation_state->live_immediates[add_lri],
886e8bd
 			  expected_offset);
886e8bd
 		return false;
886e8bd
@@ -372,7 +372,7 @@ validate_uniform_address_write(struct vc4_validated_shader_info *validated_shade
886e8bd
 
886e8bd
 	if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) &&
886e8bd
 	    !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF)) {
886e8bd
-		DRM_ERROR("Second argument of uniform address write must be "
886e8bd
+		DRM_DEBUG("Second argument of uniform address write must be "
886e8bd
 			  "a uniform.\n");
886e8bd
 		return false;
886e8bd
 	}
886e8bd
@@ -417,7 +417,7 @@ check_reg_write(struct vc4_validated_shader_info *validated_shader,
886e8bd
 	switch (waddr) {
886e8bd
 	case QPU_W_UNIFORMS_ADDRESS:
886e8bd
 		if (is_b) {
886e8bd
-			DRM_ERROR("relative uniforms address change "
886e8bd
+			DRM_DEBUG("relative uniforms address change "
886e8bd
 				  "unsupported\n");
886e8bd
 			return false;
886e8bd
 		}
886e8bd
@@ -452,11 +452,11 @@ check_reg_write(struct vc4_validated_shader_info *validated_shader,
886e8bd
 		/* XXX: I haven't thought about these, so don't support them
886e8bd
 		 * for now.
886e8bd
 		 */
886e8bd
-		DRM_ERROR("Unsupported waddr %d\n", waddr);
886e8bd
+		DRM_DEBUG("Unsupported waddr %d\n", waddr);
886e8bd
 		return false;
886e8bd
 
886e8bd
 	case QPU_W_VPM_ADDR:
886e8bd
-		DRM_ERROR("General VPM DMA unsupported\n");
886e8bd
+		DRM_DEBUG("General VPM DMA unsupported\n");
886e8bd
 		return false;
886e8bd
 
886e8bd
 	case QPU_W_VPM:
886e8bd
@@ -559,7 +559,7 @@ check_instruction_writes(struct vc4_validated_shader_info *validated_shader,
886e8bd
 	bool ok;
886e8bd
 
886e8bd
 	if (is_tmu_write(waddr_add) && is_tmu_write(waddr_mul)) {
886e8bd
-		DRM_ERROR("ADD and MUL both set up textures\n");
886e8bd
+		DRM_DEBUG("ADD and MUL both set up textures\n");
886e8bd
 		return false;
886e8bd
 	}
886e8bd
 
886e8bd
@@ -588,7 +588,7 @@ check_branch(uint64_t inst,
886e8bd
 	 * there's no need for it.
886e8bd
 	 */
886e8bd
 	if (waddr_add != QPU_W_NOP || waddr_mul != QPU_W_NOP) {
886e8bd
-		DRM_ERROR("branch instruction at %d wrote a register.\n",
886e8bd
+		DRM_DEBUG("branch instruction at %d wrote a register.\n",
886e8bd
 			  validation_state->ip);
886e8bd
 		return false;
886e8bd
 	}
886e8bd
@@ -614,7 +614,7 @@ check_instruction_reads(struct vc4_validated_shader_info *validated_shader,
886e8bd
 		validated_shader->uniforms_size += 4;
886e8bd
 
886e8bd
 		if (validation_state->needs_uniform_address_update) {
886e8bd
-			DRM_ERROR("Uniform read with undefined uniform "
886e8bd
+			DRM_DEBUG("Uniform read with undefined uniform "
886e8bd
 				  "address\n");
886e8bd
 			return false;
886e8bd
 		}
886e8bd
@@ -660,19 +660,19 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
886e8bd
 			continue;
886e8bd
 
886e8bd
 		if (ip - last_branch < 4) {
886e8bd
-			DRM_ERROR("Branch at %d during delay slots\n", ip);
886e8bd
+			DRM_DEBUG("Branch at %d during delay slots\n", ip);
886e8bd
 			return false;
886e8bd
 		}
886e8bd
 		last_branch = ip;
886e8bd
 
886e8bd
 		if (inst & QPU_BRANCH_REG) {
886e8bd
-			DRM_ERROR("branching from register relative "
886e8bd
+			DRM_DEBUG("branching from register relative "
886e8bd
 				  "not supported\n");
886e8bd
 			return false;
886e8bd
 		}
886e8bd
 
886e8bd
 		if (!(inst & QPU_BRANCH_REL)) {
886e8bd
-			DRM_ERROR("relative branching required\n");
886e8bd
+			DRM_DEBUG("relative branching required\n");
886e8bd
 			return false;
886e8bd
 		}
886e8bd
 
886e8bd
@@ -682,13 +682,13 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
886e8bd
 		 * end of the shader object.
886e8bd
 		 */
886e8bd
 		if (branch_imm % sizeof(inst) != 0) {
886e8bd
-			DRM_ERROR("branch target not aligned\n");
886e8bd
+			DRM_DEBUG("branch target not aligned\n");
886e8bd
 			return false;
886e8bd
 		}
886e8bd
 
886e8bd
 		branch_target_ip = after_delay_ip + (branch_imm >> 3);
886e8bd
 		if (branch_target_ip >= validation_state->max_ip) {
886e8bd
-			DRM_ERROR("Branch at %d outside of shader (ip %d/%d)\n",
886e8bd
+			DRM_DEBUG("Branch at %d outside of shader (ip %d/%d)\n",
886e8bd
 				  ip, branch_target_ip,
886e8bd
 				  validation_state->max_ip);
886e8bd
 			return false;
886e8bd
@@ -699,7 +699,7 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
886e8bd
 		 * the shader.
886e8bd
 		 */
886e8bd
 		if (after_delay_ip >= validation_state->max_ip) {
886e8bd
-			DRM_ERROR("Branch at %d continues past shader end "
886e8bd
+			DRM_DEBUG("Branch at %d continues past shader end "
886e8bd
 				  "(%d/%d)\n",
886e8bd
 				  ip, after_delay_ip, validation_state->max_ip);
886e8bd
 			return false;
886e8bd
@@ -709,7 +709,7 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
886e8bd
 	}
886e8bd
 
886e8bd
 	if (max_branch_target > validation_state->max_ip - 3) {
886e8bd
-		DRM_ERROR("Branch landed after QPU_SIG_PROG_END");
886e8bd
+		DRM_DEBUG("Branch landed after QPU_SIG_PROG_END");
886e8bd
 		return false;
886e8bd
 	}
886e8bd
 
886e8bd
@@ -750,7 +750,7 @@ vc4_handle_branch_target(struct vc4_shader_validation_state *validation_state)
886e8bd
 		return true;
886e8bd
 
886e8bd
 	if (texturing_in_progress(validation_state)) {
886e8bd
-		DRM_ERROR("Branch target landed during TMU setup\n");
886e8bd
+		DRM_DEBUG("Branch target landed during TMU setup\n");
886e8bd
 		return false;
886e8bd
 	}
886e8bd
 
886e8bd
@@ -837,7 +837,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
886e8bd
 		case QPU_SIG_LAST_THREAD_SWITCH:
886e8bd
 			if (!check_instruction_writes(validated_shader,
886e8bd
 						      &validation_state)) {
886e8bd
-				DRM_ERROR("Bad write at ip %d\n", ip);
886e8bd
+				DRM_DEBUG("Bad write at ip %d\n", ip);
886e8bd
 				goto fail;
886e8bd
 			}
886e8bd
 
886e8bd
@@ -855,7 +855,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
886e8bd
 				validated_shader->is_threaded = true;
886e8bd
 
886e8bd
 				if (ip < last_thread_switch_ip + 3) {
886e8bd
-					DRM_ERROR("Thread switch too soon after "
886e8bd
+					DRM_DEBUG("Thread switch too soon after "
886e8bd
 						  "last switch at ip %d\n", ip);
886e8bd
 					goto fail;
886e8bd
 				}
886e8bd
@@ -867,7 +867,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
886e8bd
 		case QPU_SIG_LOAD_IMM:
886e8bd
 			if (!check_instruction_writes(validated_shader,
886e8bd
 						      &validation_state)) {
886e8bd
-				DRM_ERROR("Bad LOAD_IMM write at ip %d\n", ip);
886e8bd
+				DRM_DEBUG("Bad LOAD_IMM write at ip %d\n", ip);
886e8bd
 				goto fail;
886e8bd
 			}
886e8bd
 			break;
886e8bd
@@ -878,14 +878,14 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
886e8bd
 				goto fail;
886e8bd
 
886e8bd
 			if (ip < last_thread_switch_ip + 3) {
886e8bd
-				DRM_ERROR("Branch in thread switch at ip %d",
886e8bd
+				DRM_DEBUG("Branch in thread switch at ip %d",
886e8bd
 					  ip);
886e8bd
 				goto fail;
886e8bd
 			}
886e8bd
 
886e8bd
 			break;
886e8bd
 		default:
886e8bd
-			DRM_ERROR("Unsupported QPU signal %d at "
886e8bd
+			DRM_DEBUG("Unsupported QPU signal %d at "
886e8bd
 				  "instruction %d\n", sig, ip);
886e8bd
 			goto fail;
886e8bd
 		}
886e8bd
@@ -898,7 +898,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
886e8bd
 	}
886e8bd
 
886e8bd
 	if (ip == validation_state.max_ip) {
886e8bd
-		DRM_ERROR("shader failed to terminate before "
886e8bd
+		DRM_DEBUG("shader failed to terminate before "
886e8bd
 			  "shader BO end at %zd\n",
886e8bd
 			  shader_obj->base.size);
886e8bd
 		goto fail;
886e8bd
@@ -907,7 +907,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
886e8bd
 	/* Might corrupt other thread */
886e8bd
 	if (validated_shader->is_threaded &&
886e8bd
 	    validation_state.all_registers_used) {
886e8bd
-		DRM_ERROR("Shader uses threading, but uses the upper "
886e8bd
+		DRM_DEBUG("Shader uses threading, but uses the upper "
886e8bd
 			  "half of the registers, too\n");
886e8bd
 		goto fail;
886e8bd
 	}
886e8bd
-- 
886e8bd
2.13.5
886e8bd
886e8bd
From 28b369f5abc790f56e668869d88f261ca7a27c55 Mon Sep 17 00:00:00 2001
886e8bd
From: Eric Anholt <eric@anholt.net>
886e8bd
Date: Tue, 8 Aug 2017 13:56:05 -0700
886e8bd
Subject: [PATCH 2/6] drm/vc4: Fix leak of HDMI EDID
886e8bd
886e8bd
We don't keep a pointer to it around anywhere, so it's our job to free
886e8bd
it.
886e8bd
886e8bd
Cc: Stefan Wahren <stefan.wahren@i2se.com>
886e8bd
Link: https://github.com/anholt/linux/issues/101
886e8bd
Fixes: c8b75bca92cb ("drm/vc4: Add KMS support for Raspberry Pi.")
886e8bd
Signed-off-by: Eric Anholt <eric@anholt.net>
886e8bd
Link: https://patchwork.freedesktop.org/patch/msgid/20170808205605.4432-1-eric@anholt.net
886e8bd
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
886e8bd
Tested-by: Stefan Wahren <stefan.wahren@i2se.com>
886e8bd
---
886e8bd
 drivers/gpu/drm/vc4/vc4_hdmi.c | 1 +
886e8bd
 1 file changed, 1 insertion(+)
886e8bd
886e8bd
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
886e8bd
index ed63d4e85762..f7803fd7f47c 100644
886e8bd
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
886e8bd
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
886e8bd
@@ -260,6 +260,7 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
886e8bd
 	drm_mode_connector_update_edid_property(connector, edid);
886e8bd
 	ret = drm_add_edid_modes(connector, edid);
886e8bd
 	drm_edid_to_eld(connector, edid);
886e8bd
+	kfree(edid);
886e8bd
 
886e8bd
 	return ret;
886e8bd
 }
886e8bd
-- 
886e8bd
2.13.5
886e8bd
886e8bd
From 3b688b6d347f777a8e86165decc33198b063b8c0 Mon Sep 17 00:00:00 2001
886e8bd
From: Eric Anholt <eric@anholt.net>
886e8bd
Date: Tue, 25 Jul 2017 11:27:16 -0700
886e8bd
Subject: [PATCH 3/6] drm/vc4: Start using u64_to_user_ptr.
886e8bd
886e8bd
Chris Wilson pointed out this little cleanup in a review of new code,
886e8bd
so let's fix up the code I was copying from.
886e8bd
886e8bd
Signed-off-by: Eric Anholt <eric@anholt.net>
886e8bd
Link: https://patchwork.freedesktop.org/patch/msgid/20170725182718.31468-1-eric@anholt.net
886e8bd
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
886e8bd
---
886e8bd
 drivers/gpu/drm/vc4/vc4_gem.c | 11 +++++------
886e8bd
 1 file changed, 5 insertions(+), 6 deletions(-)
886e8bd
886e8bd
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
886e8bd
index a3e45e67f417..8b551bc630c4 100644
886e8bd
--- a/drivers/gpu/drm/vc4/vc4_gem.c
886e8bd
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
886e8bd
@@ -119,7 +119,7 @@ vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
886e8bd
 		bo_state[i].size = vc4_bo->base.base.size;
886e8bd
 	}
886e8bd
 
886e8bd
-	if (copy_to_user((void __user *)(uintptr_t)get_state->bo,
886e8bd
+	if (copy_to_user(u64_to_user_ptr(get_state->bo),
886e8bd
 			 bo_state,
886e8bd
 			 state->bo_count * sizeof(*bo_state)))
886e8bd
 		ret = -EFAULT;
886e8bd
@@ -678,8 +678,7 @@ vc4_cl_lookup_bos(struct drm_device *dev,
886e8bd
 		goto fail;
886e8bd
 	}
886e8bd
 
886e8bd
-	if (copy_from_user(handles,
886e8bd
-			   (void __user *)(uintptr_t)args->bo_handles,
886e8bd
+	if (copy_from_user(handles, u64_to_user_ptr(args->bo_handles),
886e8bd
 			   exec->bo_count * sizeof(uint32_t))) {
886e8bd
 		ret = -EFAULT;
886e8bd
 		DRM_ERROR("Failed to copy in GEM handles\n");
886e8bd
@@ -755,21 +754,21 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
886e8bd
 	exec->shader_state_size = args->shader_rec_count;
886e8bd
 
886e8bd
 	if (copy_from_user(bin,
886e8bd
-			   (void __user *)(uintptr_t)args->bin_cl,
886e8bd
+			   u64_to_user_ptr(args->bin_cl),
886e8bd
 			   args->bin_cl_size)) {
886e8bd
 		ret = -EFAULT;
886e8bd
 		goto fail;
886e8bd
 	}
886e8bd
 
886e8bd
 	if (copy_from_user(exec->shader_rec_u,
886e8bd
-			   (void __user *)(uintptr_t)args->shader_rec,
886e8bd
+			   u64_to_user_ptr(args->shader_rec),
886e8bd
 			   args->shader_rec_size)) {
886e8bd
 		ret = -EFAULT;
886e8bd
 		goto fail;
886e8bd
 	}
886e8bd
 
886e8bd
 	if (copy_from_user(exec->uniforms_u,
886e8bd
-			   (void __user *)(uintptr_t)args->uniforms,
886e8bd
+			   u64_to_user_ptr(args->uniforms),
886e8bd
 			   args->uniforms_size)) {
886e8bd
 		ret = -EFAULT;
886e8bd
 		goto fail;
886e8bd
-- 
886e8bd
2.13.5
886e8bd
886e8bd
From da81d76bce216c160d2924a52e362b160bbb6ca1 Mon Sep 17 00:00:00 2001
886e8bd
From: Eric Anholt <eric@anholt.net>
886e8bd
Date: Tue, 25 Jul 2017 11:27:17 -0700
886e8bd
Subject: [PATCH 4/6] drm/vc4: Add an ioctl for labeling GEM BOs for summary
886e8bd
 stats
886e8bd
886e8bd
This has proven immensely useful for debugging memory leaks and
886e8bd
overallocation (which is a rather serious concern on the platform,
886e8bd
given that we typically run at about 256MB of CMA out of up to 1GB
886e8bd
total memory, with framebuffers that are about 8MB ecah).
886e8bd
886e8bd
The state of the art without this is to dump debug logs from every GL
886e8bd
application, guess as to kernel allocations based on bo_stats, and try
886e8bd
to merge that all together into a global picture of memory allocation
886e8bd
state.  With this, you can add a couple of calls to the debug build of
886e8bd
the 3D driver and get a pretty detailed view of GPU memory usage from
886e8bd
/debug/dri/0/bo_stats (or when we debug print to dmesg on allocation
886e8bd
failure).
886e8bd
886e8bd
The Mesa side currently labels at the gallium resource level (so you
886e8bd
see that a 1920x20 pixmap has been created, presumably for the window
886e8bd
system panel), but we could extend that to be even more useful with
886e8bd
glObjectLabel() names being sent all the way down to the kernel.
886e8bd
886e8bd
(partial) example of sorted debugfs output with Mesa labeling all
886e8bd
resources:
886e8bd
886e8bd
               kernel BO cache:  16392kb BOs (3)
886e8bd
       tiling shadow 1920x1080:   8160kb BOs (1)
886e8bd
       resource 1920x1080@32/0:   8160kb BOs (1)
886e8bd
scanout resource 1920x1080@32/0:   8100kb BOs (1)
886e8bd
                        kernel:   8100kb BOs (1)
886e8bd
886e8bd
v2: Use strndup_user(), use lockdep assertion instead of just a
886e8bd
    comment, fix an array[-1] reference, extend comment about name
886e8bd
    freeing.
886e8bd
886e8bd
Signed-off-by: Eric Anholt <eric@anholt.net>
886e8bd
Link: https://patchwork.freedesktop.org/patch/msgid/20170725182718.31468-2-eric@anholt.net
886e8bd
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
886e8bd
---
886e8bd
 drivers/gpu/drm/vc4/vc4_bo.c        | 258 ++++++++++++++++++++++++++++--------
886e8bd
 drivers/gpu/drm/vc4/vc4_drv.c       |   8 +-
886e8bd
 drivers/gpu/drm/vc4/vc4_drv.h       |  39 +++++-
886e8bd
 drivers/gpu/drm/vc4/vc4_gem.c       |   2 +-
886e8bd
 drivers/gpu/drm/vc4/vc4_render_cl.c |   2 +-
886e8bd
 drivers/gpu/drm/vc4/vc4_v3d.c       |   3 +-
886e8bd
 include/uapi/drm/vc4_drm.h          |  11 ++
886e8bd
 7 files changed, 257 insertions(+), 66 deletions(-)
886e8bd
886e8bd
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
886e8bd
index ede80199001d..27c4a927311f 100644
886e8bd
--- a/drivers/gpu/drm/vc4/vc4_bo.c
886e8bd
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
886e8bd
@@ -24,21 +24,35 @@
886e8bd
 #include "vc4_drv.h"
886e8bd
 #include "uapi/drm/vc4_drm.h"
886e8bd
 
886e8bd
+static const char * const bo_type_names[] = {
886e8bd
+	"kernel",
886e8bd
+	"V3D",
886e8bd
+	"V3D shader",
886e8bd
+	"dumb",
886e8bd
+	"binner",
886e8bd
+	"RCL",
886e8bd
+	"BCL",
886e8bd
+	"kernel BO cache",
886e8bd
+};
886e8bd
+
886e8bd
+static bool is_user_label(int label)
886e8bd
+{
886e8bd
+	return label >= VC4_BO_TYPE_COUNT;
886e8bd
+}
886e8bd
+
886e8bd
 static void vc4_bo_stats_dump(struct vc4_dev *vc4)
886e8bd
 {
886e8bd
-	DRM_INFO("num bos allocated: %d\n",
886e8bd
-		 vc4->bo_stats.num_allocated);
886e8bd
-	DRM_INFO("size bos allocated: %dkb\n",
886e8bd
-		 vc4->bo_stats.size_allocated / 1024);
886e8bd
-	DRM_INFO("num bos used: %d\n",
886e8bd
-		 vc4->bo_stats.num_allocated - vc4->bo_stats.num_cached);
886e8bd
-	DRM_INFO("size bos used: %dkb\n",
886e8bd
-		 (vc4->bo_stats.size_allocated -
886e8bd
-		  vc4->bo_stats.size_cached) / 1024);
886e8bd
-	DRM_INFO("num bos cached: %d\n",
886e8bd
-		 vc4->bo_stats.num_cached);
886e8bd
-	DRM_INFO("size bos cached: %dkb\n",
886e8bd
-		 vc4->bo_stats.size_cached / 1024);
886e8bd
+	int i;
886e8bd
+
886e8bd
+	for (i = 0; i < vc4->num_labels; i++) {
886e8bd
+		if (!vc4->bo_labels[i].num_allocated)
886e8bd
+			continue;
886e8bd
+
886e8bd
+		DRM_INFO("%30s: %6dkb BOs (%d)\n",
886e8bd
+			 vc4->bo_labels[i].name,
886e8bd
+			 vc4->bo_labels[i].size_allocated / 1024,
886e8bd
+			 vc4->bo_labels[i].num_allocated);
886e8bd
+	}
886e8bd
 }
886e8bd
 
886e8bd
 #ifdef CONFIG_DEBUG_FS
886e8bd
@@ -47,30 +61,103 @@ int vc4_bo_stats_debugfs(struct seq_file *m, void *unused)
886e8bd
 	struct drm_info_node *node = (struct drm_info_node *)m->private;
886e8bd
 	struct drm_device *dev = node->minor->dev;
886e8bd
 	struct vc4_dev *vc4 = to_vc4_dev(dev);
886e8bd
-	struct vc4_bo_stats stats;
886e8bd
+	int i;
886e8bd
 
886e8bd
-	/* Take a snapshot of the current stats with the lock held. */
886e8bd
 	mutex_lock(&vc4->bo_lock);
886e8bd
-	stats = vc4->bo_stats;
886e8bd
+	for (i = 0; i < vc4->num_labels; i++) {
886e8bd
+		if (!vc4->bo_labels[i].num_allocated)
886e8bd
+			continue;
886e8bd
+
886e8bd
+		seq_printf(m, "%30s: %6dkb BOs (%d)\n",
886e8bd
+			   vc4->bo_labels[i].name,
886e8bd
+			   vc4->bo_labels[i].size_allocated / 1024,
886e8bd
+			   vc4->bo_labels[i].num_allocated);
886e8bd
+	}
886e8bd
 	mutex_unlock(&vc4->bo_lock);
886e8bd
 
886e8bd
-	seq_printf(m, "num bos allocated: %d\n",
886e8bd
-		   stats.num_allocated);
886e8bd
-	seq_printf(m, "size bos allocated: %dkb\n",
886e8bd
-		   stats.size_allocated / 1024);
886e8bd
-	seq_printf(m, "num bos used: %d\n",
886e8bd
-		   stats.num_allocated - stats.num_cached);
886e8bd
-	seq_printf(m, "size bos used: %dkb\n",
886e8bd
-		   (stats.size_allocated - stats.size_cached) / 1024);
886e8bd
-	seq_printf(m, "num bos cached: %d\n",
886e8bd
-		   stats.num_cached);
886e8bd
-	seq_printf(m, "size bos cached: %dkb\n",
886e8bd
-		   stats.size_cached / 1024);
886e8bd
-
886e8bd
 	return 0;
886e8bd
 }
886e8bd
 #endif
886e8bd
 
886e8bd
+/* Takes ownership of *name and returns the appropriate slot for it in
886e8bd
+ * the bo_labels[] array, extending it as necessary.
886e8bd
+ *
886e8bd
+ * This is inefficient and could use a hash table instead of walking
886e8bd
+ * an array and strcmp()ing.  However, the assumption is that user
886e8bd
+ * labeling will be infrequent (scanout buffers and other long-lived
886e8bd
+ * objects, or debug driver builds), so we can live with it for now.
886e8bd
+ */
886e8bd
+static int vc4_get_user_label(struct vc4_dev *vc4, const char *name)
886e8bd
+{
886e8bd
+	int i;
886e8bd
+	int free_slot = -1;
886e8bd
+
886e8bd
+	for (i = 0; i < vc4->num_labels; i++) {
886e8bd
+		if (!vc4->bo_labels[i].name) {
886e8bd
+			free_slot = i;
886e8bd
+		} else if (strcmp(vc4->bo_labels[i].name, name) == 0) {
886e8bd
+			kfree(name);
886e8bd
+			return i;
886e8bd
+		}
886e8bd
+	}
886e8bd
+
886e8bd
+	if (free_slot != -1) {
886e8bd
+		WARN_ON(vc4->bo_labels[free_slot].num_allocated != 0);
886e8bd
+		vc4->bo_labels[free_slot].name = name;
886e8bd
+		return free_slot;
886e8bd
+	} else {
886e8bd
+		u32 new_label_count = vc4->num_labels + 1;
886e8bd
+		struct vc4_label *new_labels =
886e8bd
+			krealloc(vc4->bo_labels,
886e8bd
+				 new_label_count * sizeof(*new_labels),
886e8bd
+				 GFP_KERNEL);
886e8bd
+
886e8bd
+		if (!new_labels) {
886e8bd
+			kfree(name);
886e8bd
+			return -1;
886e8bd
+		}
886e8bd
+
886e8bd
+		free_slot = vc4->num_labels;
886e8bd
+		vc4->bo_labels = new_labels;
886e8bd
+		vc4->num_labels = new_label_count;
886e8bd
+
886e8bd
+		vc4->bo_labels[free_slot].name = name;
886e8bd
+		vc4->bo_labels[free_slot].num_allocated = 0;
886e8bd
+		vc4->bo_labels[free_slot].size_allocated = 0;
886e8bd
+
886e8bd
+		return free_slot;
886e8bd
+	}
886e8bd
+}
886e8bd
+
886e8bd
+static void vc4_bo_set_label(struct drm_gem_object *gem_obj, int label)
886e8bd
+{
886e8bd
+	struct vc4_bo *bo = to_vc4_bo(gem_obj);
886e8bd
+	struct vc4_dev *vc4 = to_vc4_dev(gem_obj->dev);
886e8bd
+
886e8bd
+	lockdep_assert_held(&vc4->bo_lock);
886e8bd
+
886e8bd
+	if (label != -1) {
886e8bd
+		vc4->bo_labels[label].num_allocated++;
886e8bd
+		vc4->bo_labels[label].size_allocated += gem_obj->size;
886e8bd
+	}
886e8bd
+
886e8bd
+	vc4->bo_labels[bo->label].num_allocated--;
886e8bd
+	vc4->bo_labels[bo->label].size_allocated -= gem_obj->size;
886e8bd
+
886e8bd
+	if (vc4->bo_labels[bo->label].num_allocated == 0 &&
886e8bd
+	    is_user_label(bo->label)) {
886e8bd
+		/* Free user BO label slots on last unreference.
886e8bd
+		 * Slots are just where we track the stats for a given
886e8bd
+		 * name, and once a name is unused we can reuse that
886e8bd
+		 * slot.
886e8bd
+		 */
886e8bd
+		kfree(vc4->bo_labels[bo->label].name);
886e8bd
+		vc4->bo_labels[bo->label].name = NULL;
886e8bd
+	}
886e8bd
+
886e8bd
+	bo->label = label;
886e8bd
+}
886e8bd
+
886e8bd
 static uint32_t bo_page_index(size_t size)
886e8bd
 {
886e8bd
 	return (size / PAGE_SIZE) - 1;
886e8bd
@@ -80,7 +167,8 @@ static uint32_t bo_page_index(size_t size)
886e8bd
 static void vc4_bo_destroy(struct vc4_bo *bo)
886e8bd
 {
886e8bd
 	struct drm_gem_object *obj = &bo->base.base;
886e8bd
-	struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
886e8bd
+
886e8bd
+	vc4_bo_set_label(obj, -1);
886e8bd
 
886e8bd
 	if (bo->validated_shader) {
886e8bd
 		kfree(bo->validated_shader->texture_samples);
886e8bd
@@ -88,9 +176,6 @@ static void vc4_bo_destroy(struct vc4_bo *bo)
886e8bd
 		bo->validated_shader = NULL;
886e8bd
 	}
886e8bd
 
886e8bd
-	vc4->bo_stats.num_allocated--;
886e8bd
-	vc4->bo_stats.size_allocated -= obj->size;
886e8bd
-
886e8bd
 	reservation_object_fini(&bo->_resv);
886e8bd
 
886e8bd
 	drm_gem_cma_free_object(obj);
886e8bd
@@ -99,12 +184,6 @@ static void vc4_bo_destroy(struct vc4_bo *bo)
886e8bd
 /* Must be called with bo_lock held. */
886e8bd
 static void vc4_bo_remove_from_cache(struct vc4_bo *bo)
886e8bd
 {
886e8bd
-	struct drm_gem_object *obj = &bo->base.base;
886e8bd
-	struct vc4_dev *vc4 = to_vc4_dev(obj->dev);
886e8bd
-
886e8bd
-	vc4->bo_stats.num_cached--;
886e8bd
-	vc4->bo_stats.size_cached -= obj->size;
886e8bd
-
886e8bd
 	list_del(&bo->unref_head);
886e8bd
 	list_del(&bo->size_head);
886e8bd
 }
886e8bd
@@ -165,7 +244,8 @@ static void vc4_bo_cache_purge(struct drm_device *dev)
886e8bd
 }
886e8bd
 
886e8bd
 static struct vc4_bo *vc4_bo_get_from_cache(struct drm_device *dev,
886e8bd
-					    uint32_t size)
886e8bd
+					    uint32_t size,
886e8bd
+					    enum vc4_kernel_bo_type type)
886e8bd
 {
886e8bd
 	struct vc4_dev *vc4 = to_vc4_dev(dev);
886e8bd
 	uint32_t page_index = bo_page_index(size);
886e8bd
@@ -186,6 +266,8 @@ static struct vc4_bo *vc4_bo_get_from_cache(struct drm_device *dev,
886e8bd
 	kref_init(&bo->base.base.refcount);
886e8bd
 
886e8bd
 out:
886e8bd
+	if (bo)
886e8bd
+		vc4_bo_set_label(&bo->base.base, type);
886e8bd
 	mutex_unlock(&vc4->bo_lock);
886e8bd
 	return bo;
886e8bd
 }
886e8bd
@@ -208,8 +290,9 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
886e8bd
 		return ERR_PTR(-ENOMEM);
886e8bd
 
886e8bd
 	mutex_lock(&vc4->bo_lock);
886e8bd
-	vc4->bo_stats.num_allocated++;
886e8bd
-	vc4->bo_stats.size_allocated += size;
886e8bd
+	bo->label = VC4_BO_TYPE_KERNEL;
886e8bd
+	vc4->bo_labels[VC4_BO_TYPE_KERNEL].num_allocated++;
886e8bd
+	vc4->bo_labels[VC4_BO_TYPE_KERNEL].size_allocated += size;
886e8bd
 	mutex_unlock(&vc4->bo_lock);
886e8bd
 	bo->resv = &bo->_resv;
886e8bd
 	reservation_object_init(bo->resv);
886e8bd
@@ -218,7 +301,7 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
886e8bd
 }
886e8bd
 
886e8bd
 struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
886e8bd
-			     bool allow_unzeroed)
886e8bd
+			     bool allow_unzeroed, enum vc4_kernel_bo_type type)
886e8bd
 {
886e8bd
 	size_t size = roundup(unaligned_size, PAGE_SIZE);
886e8bd
 	struct vc4_dev *vc4 = to_vc4_dev(dev);
886e8bd
@@ -229,7 +312,7 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
886e8bd
 		return ERR_PTR(-EINVAL);
886e8bd
 
886e8bd
 	/* First, try to get a vc4_bo from the kernel BO cache. */
886e8bd
-	bo = vc4_bo_get_from_cache(dev, size);
886e8bd
+	bo = vc4_bo_get_from_cache(dev, size, type);
886e8bd
 	if (bo) {
886e8bd
 		if (!allow_unzeroed)
886e8bd
 			memset(bo->base.vaddr, 0, bo->base.base.size);
886e8bd
@@ -251,7 +334,13 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
886e8bd
 			return ERR_PTR(-ENOMEM);
886e8bd
 		}
886e8bd
 	}
886e8bd
-	return to_vc4_bo(&cma_obj->base);
886e8bd
+	bo = to_vc4_bo(&cma_obj->base);
886e8bd
+
886e8bd
+	mutex_lock(&vc4->bo_lock);
886e8bd
+	vc4_bo_set_label(&cma_obj->base, type);
886e8bd
+	mutex_unlock(&vc4->bo_lock);
886e8bd
+
886e8bd
+	return bo;
886e8bd
 }
886e8bd
 
886e8bd
 int vc4_dumb_create(struct drm_file *file_priv,
886e8bd
@@ -268,7 +357,7 @@ int vc4_dumb_create(struct drm_file *file_priv,
886e8bd
 	if (args->size < args->pitch * args->height)
886e8bd
 		args->size = args->pitch * args->height;
886e8bd
 
886e8bd
-	bo = vc4_bo_create(dev, args->size, false);
886e8bd
+	bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_DUMB);
886e8bd
 	if (IS_ERR(bo))
886e8bd
 		return PTR_ERR(bo);
886e8bd
 
886e8bd
@@ -348,8 +437,7 @@ void vc4_free_object(struct drm_gem_object *gem_bo)
886e8bd
 	list_add(&bo->size_head, cache_list);
886e8bd
 	list_add(&bo->unref_head, &vc4->bo_cache.time_list);
886e8bd
 
886e8bd
-	vc4->bo_stats.num_cached++;
886e8bd
-	vc4->bo_stats.size_cached += gem_bo->size;
886e8bd
+	vc4_bo_set_label(&bo->base.base, VC4_BO_TYPE_KERNEL_CACHE);
886e8bd
 
886e8bd
 	vc4_bo_cache_free_old(dev);
886e8bd
 
886e8bd
@@ -483,7 +571,7 @@ int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
886e8bd
 	 * We can't allocate from the BO cache, because the BOs don't
886e8bd
 	 * get zeroed, and that might leak data between users.
886e8bd
 	 */
886e8bd
-	bo = vc4_bo_create(dev, args->size, false);
886e8bd
+	bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_V3D);
886e8bd
 	if (IS_ERR(bo))
886e8bd
 		return PTR_ERR(bo);
886e8bd
 
886e8bd
@@ -536,7 +624,7 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
886e8bd
 		return -EINVAL;
886e8bd
 	}
886e8bd
 
886e8bd
-	bo = vc4_bo_create(dev, args->size, true);
886e8bd
+	bo = vc4_bo_create(dev, args->size, true, VC4_BO_TYPE_V3D_SHADER);
886e8bd
 	if (IS_ERR(bo))
886e8bd
 		return PTR_ERR(bo);
886e8bd
 
886e8bd
@@ -651,9 +739,24 @@ int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
886e8bd
 	return 0;
886e8bd
 }
886e8bd
 
886e8bd
-void vc4_bo_cache_init(struct drm_device *dev)
886e8bd
+int vc4_bo_cache_init(struct drm_device *dev)
886e8bd
 {
886e8bd
 	struct vc4_dev *vc4 = to_vc4_dev(dev);
886e8bd
+	int i;
886e8bd
+
886e8bd
+	/* Create the initial set of BO labels that the kernel will
886e8bd
+	 * use.  This lets us avoid a bunch of string reallocation in
886e8bd
+	 * the kernel's draw and BO allocation paths.
886e8bd
+	 */
886e8bd
+	vc4->bo_labels = kcalloc(VC4_BO_TYPE_COUNT, sizeof(*vc4->bo_labels),
886e8bd
+				 GFP_KERNEL);
886e8bd
+	if (!vc4->bo_labels)
886e8bd
+		return -ENOMEM;
886e8bd
+	vc4->num_labels = VC4_BO_TYPE_COUNT;
886e8bd
+
886e8bd
+	BUILD_BUG_ON(ARRAY_SIZE(bo_type_names) != VC4_BO_TYPE_COUNT);
886e8bd
+	for (i = 0; i < VC4_BO_TYPE_COUNT; i++)
886e8bd
+		vc4->bo_labels[i].name = bo_type_names[i];
886e8bd
 
886e8bd
 	mutex_init(&vc4->bo_lock);
886e8bd
 
886e8bd
@@ -663,19 +766,66 @@ void vc4_bo_cache_init(struct drm_device *dev)
886e8bd
 	setup_timer(&vc4->bo_cache.time_timer,
886e8bd
 		    vc4_bo_cache_time_timer,
886e8bd
 		    (unsigned long)dev);
886e8bd
+
886e8bd
+	return 0;
886e8bd
 }
886e8bd
 
886e8bd
 void vc4_bo_cache_destroy(struct drm_device *dev)
886e8bd
 {
886e8bd
 	struct vc4_dev *vc4 = to_vc4_dev(dev);
886e8bd
+	int i;
886e8bd
 
886e8bd
 	del_timer(&vc4->bo_cache.time_timer);
886e8bd
 	cancel_work_sync(&vc4->bo_cache.time_work);
886e8bd
 
886e8bd
 	vc4_bo_cache_purge(dev);
886e8bd
 
886e8bd
-	if (vc4->bo_stats.num_allocated) {
886e8bd
-		DRM_ERROR("Destroying BO cache while BOs still allocated:\n");
886e8bd
-		vc4_bo_stats_dump(vc4);
886e8bd
+	for (i = 0; i < vc4->num_labels; i++) {
886e8bd
+		if (vc4->bo_labels[i].num_allocated) {
886e8bd
+			DRM_ERROR("Destroying BO cache with %d %s "
886e8bd
+				  "BOs still allocated\n",
886e8bd
+				  vc4->bo_labels[i].num_allocated,
886e8bd
+				  vc4->bo_labels[i].name);
886e8bd
+		}
886e8bd
+
886e8bd
+		if (is_user_label(i))
886e8bd
+			kfree(vc4->bo_labels[i].name);
886e8bd
 	}
886e8bd
+	kfree(vc4->bo_labels);
886e8bd
+}
886e8bd
+
886e8bd
+int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
886e8bd
+		       struct drm_file *file_priv)
886e8bd
+{
886e8bd
+	struct vc4_dev *vc4 = to_vc4_dev(dev);
886e8bd
+	struct drm_vc4_label_bo *args = data;
886e8bd
+	char *name;
886e8bd
+	struct drm_gem_object *gem_obj;
886e8bd
+	int ret = 0, label;
886e8bd
+
886e8bd
+	if (!args->len)
886e8bd
+		return -EINVAL;
886e8bd
+
886e8bd
+	name = strndup_user(u64_to_user_ptr(args->name), args->len + 1);
886e8bd
+	if (IS_ERR(name))
886e8bd
+		return PTR_ERR(name);
886e8bd
+
886e8bd
+	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
886e8bd
+	if (!gem_obj) {
886e8bd
+		DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
886e8bd
+		kfree(name);
886e8bd
+		return -ENOENT;
886e8bd
+	}
886e8bd
+
886e8bd
+	mutex_lock(&vc4->bo_lock);
886e8bd
+	label = vc4_get_user_label(vc4, name);
886e8bd
+	if (label != -1)
886e8bd
+		vc4_bo_set_label(gem_obj, label);
886e8bd
+	else
886e8bd
+		ret = -ENOMEM;
886e8bd
+	mutex_unlock(&vc4->bo_lock);
886e8bd
+
886e8bd
+	drm_gem_object_unreference_unlocked(gem_obj);
886e8bd
+
886e8bd
+	return ret;
886e8bd
 }
886e8bd
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
886e8bd
index c6b487c3d2b7..75c1f50a7b5d 100644
886e8bd
--- a/drivers/gpu/drm/vc4/vc4_drv.c
886e8bd
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
886e8bd
@@ -140,6 +140,7 @@ static const struct drm_ioctl_desc vc4_drm_ioctls[] = {
886e8bd
 	DRM_IOCTL_DEF_DRV(VC4_GET_PARAM, vc4_get_param_ioctl, DRM_RENDER_ALLOW),
886e8bd
 	DRM_IOCTL_DEF_DRV(VC4_SET_TILING, vc4_set_tiling_ioctl, DRM_RENDER_ALLOW),
886e8bd
 	DRM_IOCTL_DEF_DRV(VC4_GET_TILING, vc4_get_tiling_ioctl, DRM_RENDER_ALLOW),
886e8bd
+	DRM_IOCTL_DEF_DRV(VC4_LABEL_BO, vc4_label_bo_ioctl, DRM_RENDER_ALLOW),
886e8bd
 };
886e8bd
 
886e8bd
 static struct drm_driver vc4_drm_driver = {
886e8bd
@@ -257,7 +258,9 @@ static int vc4_drm_bind(struct device *dev)
886e8bd
 	vc4->dev = drm;
886e8bd
 	drm->dev_private = vc4;
886e8bd
 
886e8bd
-	vc4_bo_cache_init(drm);
886e8bd
+	ret = vc4_bo_cache_init(drm);
886e8bd
+	if (ret)
886e8bd
+		goto dev_unref;
886e8bd
 
886e8bd
 	drm_mode_config_init(drm);
886e8bd
 
886e8bd
@@ -281,8 +284,9 @@ static int vc4_drm_bind(struct device *dev)
886e8bd
 	component_unbind_all(dev, drm);
886e8bd
 gem_destroy:
886e8bd
 	vc4_gem_destroy(drm);
886e8bd
-	drm_dev_unref(drm);
886e8bd
 	vc4_bo_cache_destroy(drm);
886e8bd
+dev_unref:
886e8bd
+	drm_dev_unref(drm);
886e8bd
 	return ret;
886e8bd
 }
886e8bd
 
886e8bd
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
886e8bd
index df22698d62ee..75d9957cb76d 100644
886e8bd
--- a/drivers/gpu/drm/vc4/vc4_drv.h
886e8bd
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
886e8bd
@@ -11,6 +11,24 @@
886e8bd
 #include <drm/drm_encoder.h>
886e8bd
 #include <drm/drm_gem_cma_helper.h>
886e8bd
 
886e8bd
+/* Don't forget to update vc4_bo.c: bo_type_names[] when adding to
886e8bd
+ * this.
886e8bd
+ */
886e8bd
+enum vc4_kernel_bo_type {
886e8bd
+	/* Any kernel allocation (gem_create_object hook) before it
886e8bd
+	 * gets another type set.
886e8bd
+	 */
886e8bd
+	VC4_BO_TYPE_KERNEL,
886e8bd
+	VC4_BO_TYPE_V3D,
886e8bd
+	VC4_BO_TYPE_V3D_SHADER,
886e8bd
+	VC4_BO_TYPE_DUMB,
886e8bd
+	VC4_BO_TYPE_BIN,
886e8bd
+	VC4_BO_TYPE_RCL,
886e8bd
+	VC4_BO_TYPE_BCL,
886e8bd
+	VC4_BO_TYPE_KERNEL_CACHE,
886e8bd
+	VC4_BO_TYPE_COUNT
886e8bd
+};
886e8bd
+
886e8bd
 struct vc4_dev {
886e8bd
 	struct drm_device *dev;
886e8bd
 
886e8bd
@@ -46,14 +64,14 @@ struct vc4_dev {
886e8bd
 		struct timer_list time_timer;
886e8bd
 	} bo_cache;
886e8bd
 
886e8bd
-	struct vc4_bo_stats {
886e8bd
+	u32 num_labels;
886e8bd
+	struct vc4_label {
886e8bd
+		const char *name;
886e8bd
 		u32 num_allocated;
886e8bd
 		u32 size_allocated;
886e8bd
-		u32 num_cached;
886e8bd
-		u32 size_cached;
886e8bd
-	} bo_stats;
886e8bd
+	} *bo_labels;
886e8bd
 
886e8bd
-	/* Protects bo_cache and the BO stats. */
886e8bd
+	/* Protects bo_cache and bo_labels. */
886e8bd
 	struct mutex bo_lock;
886e8bd
 
886e8bd
 	uint64_t dma_fence_context;
886e8bd
@@ -169,6 +187,11 @@ struct vc4_bo {
886e8bd
 	/* normally (resv == &_resv) except for imported bo's */
886e8bd
 	struct reservation_object *resv;
886e8bd
 	struct reservation_object _resv;
886e8bd
+
886e8bd
+	/* One of enum vc4_kernel_bo_type, or VC4_BO_TYPE_COUNT + i
886e8bd
+	 * for user-allocated labels.
886e8bd
+	 */
886e8bd
+	int label;
886e8bd
 };
886e8bd
 
886e8bd
 static inline struct vc4_bo *
886e8bd
@@ -460,7 +483,7 @@ struct vc4_validated_shader_info {
886e8bd
 struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size);
886e8bd
 void vc4_free_object(struct drm_gem_object *gem_obj);
886e8bd
 struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size,
886e8bd
-			     bool from_cache);
886e8bd
+			     bool from_cache, enum vc4_kernel_bo_type type);
886e8bd
 int vc4_dumb_create(struct drm_file *file_priv,
886e8bd
 		    struct drm_device *dev,
886e8bd
 		    struct drm_mode_create_dumb *args);
886e8bd
@@ -478,6 +501,8 @@ int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
886e8bd
 			 struct drm_file *file_priv);
886e8bd
 int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
886e8bd
 			     struct drm_file *file_priv);
886e8bd
+int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
886e8bd
+		       struct drm_file *file_priv);
886e8bd
 int vc4_mmap(struct file *filp, struct vm_area_struct *vma);
886e8bd
 struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj);
886e8bd
 int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
886e8bd
@@ -485,7 +510,7 @@ struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev,
886e8bd
 						 struct dma_buf_attachment *attach,
886e8bd
 						 struct sg_table *sgt);
886e8bd
 void *vc4_prime_vmap(struct drm_gem_object *obj);
886e8bd
-void vc4_bo_cache_init(struct drm_device *dev);
886e8bd
+int vc4_bo_cache_init(struct drm_device *dev);
886e8bd
 void vc4_bo_cache_destroy(struct drm_device *dev);
886e8bd
 int vc4_bo_stats_debugfs(struct seq_file *m, void *arg);
886e8bd
 
886e8bd
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
886e8bd
index 8b551bc630c4..80f1953b4938 100644
886e8bd
--- a/drivers/gpu/drm/vc4/vc4_gem.c
886e8bd
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
886e8bd
@@ -774,7 +774,7 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
886e8bd
 		goto fail;
886e8bd
 	}
886e8bd
 
886e8bd
-	bo = vc4_bo_create(dev, exec_size, true);
886e8bd
+	bo = vc4_bo_create(dev, exec_size, true, VC4_BO_TYPE_BCL);
886e8bd
 	if (IS_ERR(bo)) {
886e8bd
 		DRM_ERROR("Couldn't allocate BO for binning\n");
886e8bd
 		ret = PTR_ERR(bo);
886e8bd
diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c
886e8bd
index da3bfd53f0bd..e0539731130b 100644
886e8bd
--- a/drivers/gpu/drm/vc4/vc4_render_cl.c
886e8bd
+++ b/drivers/gpu/drm/vc4/vc4_render_cl.c
886e8bd
@@ -320,7 +320,7 @@ static int vc4_create_rcl_bo(struct drm_device *dev, struct vc4_exec_info *exec,
886e8bd
 
886e8bd
 	size += xtiles * ytiles * loop_body_size;
886e8bd
 
886e8bd
-	setup->rcl = &vc4_bo_create(dev, size, true)->base;
886e8bd
+	setup->rcl = &vc4_bo_create(dev, size, true, VC4_BO_TYPE_RCL)->base;
886e8bd
 	if (IS_ERR(setup->rcl))
886e8bd
 		return PTR_ERR(setup->rcl);
886e8bd
 	list_add_tail(&to_vc4_bo(&setup->rcl->base)->unref_head,
886e8bd
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
886e8bd
index 8c723da71f66..622cd43840b8 100644
886e8bd
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
886e8bd
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
886e8bd
@@ -236,7 +236,8 @@ vc4_allocate_bin_bo(struct drm_device *drm)
886e8bd
 	INIT_LIST_HEAD(&list);
886e8bd
 
886e8bd
 	while (true) {
886e8bd
-		struct vc4_bo *bo = vc4_bo_create(drm, size, true);
886e8bd
+		struct vc4_bo *bo = vc4_bo_create(drm, size, true,
886e8bd
+						  VC4_BO_TYPE_BIN);
886e8bd
 
886e8bd
 		if (IS_ERR(bo)) {
886e8bd
 			ret = PTR_ERR(bo);
886e8bd
diff --git a/include/uapi/drm/vc4_drm.h b/include/uapi/drm/vc4_drm.h
886e8bd
index 6ac4c5c014cb..551628e571f9 100644
886e8bd
--- a/include/uapi/drm/vc4_drm.h
886e8bd
+++ b/include/uapi/drm/vc4_drm.h
886e8bd
@@ -40,6 +40,7 @@ extern "C" {
886e8bd
 #define DRM_VC4_GET_PARAM                         0x07
886e8bd
 #define DRM_VC4_SET_TILING                        0x08
886e8bd
 #define DRM_VC4_GET_TILING                        0x09
886e8bd
+#define DRM_VC4_LABEL_BO                          0x0a
886e8bd
 
886e8bd
 #define DRM_IOCTL_VC4_SUBMIT_CL           DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl)
886e8bd
 #define DRM_IOCTL_VC4_WAIT_SEQNO          DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno)
886e8bd
@@ -51,6 +52,7 @@ extern "C" {
886e8bd
 #define DRM_IOCTL_VC4_GET_PARAM           DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_PARAM, struct drm_vc4_get_param)
886e8bd
 #define DRM_IOCTL_VC4_SET_TILING          DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SET_TILING, struct drm_vc4_set_tiling)
886e8bd
 #define DRM_IOCTL_VC4_GET_TILING          DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_TILING, struct drm_vc4_get_tiling)
886e8bd
+#define DRM_IOCTL_VC4_LABEL_BO            DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_LABEL_BO, struct drm_vc4_label_bo)
886e8bd
 
886e8bd
 struct drm_vc4_submit_rcl_surface {
886e8bd
 	__u32 hindex; /* Handle index, or ~0 if not present. */
886e8bd
@@ -311,6 +313,15 @@ struct drm_vc4_set_tiling {
886e8bd
 	__u64 modifier;
886e8bd
 };
886e8bd
 
886e8bd
+/**
886e8bd
+ * struct drm_vc4_label_bo - Attach a name to a BO for debug purposes.
886e8bd
+ */
886e8bd
+struct drm_vc4_label_bo {
886e8bd
+	__u32 handle;
886e8bd
+	__u32 len;
886e8bd
+	__u64 name;
886e8bd
+};
886e8bd
+
886e8bd
 #if defined(__cplusplus)
886e8bd
 }
886e8bd
 #endif
886e8bd
-- 
886e8bd
2.13.5
886e8bd
886e8bd
From 34cbed8ed9441caa13017108dac189e09c35f9af Mon Sep 17 00:00:00 2001
886e8bd
From: Eric Anholt <eric@anholt.net>
886e8bd
Date: Wed, 2 Aug 2017 13:32:40 -0700
886e8bd
Subject: [PATCH 5/6] drm/vc4: Fix double destroy of the BO cache on teardown.
886e8bd
MIME-Version: 1.0
886e8bd
Content-Type: text/plain; charset=UTF-8
886e8bd
Content-Transfer-Encoding: 8bit
886e8bd
886e8bd
It's also destroyed from the top level vc4_drv.c initialization, which
886e8bd
is where the cache was actually initialized from.
886e8bd
886e8bd
This used to just involve duplicate del_timer() and cancel_work_sync()
886e8bd
being called, but it started causing kmalloc issues once we
886e8bd
double-freed the new BO label array.
886e8bd
886e8bd
Fixes: 1908a876f909 ("drm/vc4: Add an ioctl for labeling GEM BOs for summary stats")
886e8bd
Signed-off-by: Eric Anholt <eric@anholt.net>
886e8bd
Link: https://patchwork.freedesktop.org/patch/msgid/20170802203242.12815-1-eric@anholt.net
886e8bd
Tested-by: Noralf Trønnes <noralf@tronnes.org>
886e8bd
Acked-by: Noralf Trønnes <noralf@tronnes.org>
886e8bd
Reviewed-by: Boris Brezillon <boris.brezillon@free-electrons.com>
886e8bd
---
886e8bd
 drivers/gpu/drm/vc4/vc4_gem.c | 2 --
886e8bd
 1 file changed, 2 deletions(-)
886e8bd
886e8bd
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
886e8bd
index 80f1953b4938..624177b9cce4 100644
886e8bd
--- a/drivers/gpu/drm/vc4/vc4_gem.c
886e8bd
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
886e8bd
@@ -1117,6 +1117,4 @@ vc4_gem_destroy(struct drm_device *dev)
886e8bd
 
886e8bd
 	if (vc4->hang_state)
886e8bd
 		vc4_free_hang_state(dev, vc4->hang_state);
886e8bd
-
886e8bd
-	vc4_bo_cache_destroy(dev);
886e8bd
 }
886e8bd
-- 
886e8bd
2.13.5
886e8bd
886e8bd
From 4f218eea5be54c8506e6db700750e8b8019dc6af Mon Sep 17 00:00:00 2001
886e8bd
From: Boris Brezillon <boris.brezillon@free-electrons.com>
886e8bd
Date: Fri, 16 Jun 2017 10:30:33 +0200
886e8bd
Subject: [PATCH 6/6] drm/vc4: Send a VBLANK event when disabling a CRTC
886e8bd
886e8bd
VBLANK events are missed when the CRTC is being disabled because the
886e8bd
driver does not wait till the end of the frame before stopping the
886e8bd
HVS and PV blocks. In this case, we should explicitly issue a VBLANK
886e8bd
event if there's one waiting.
886e8bd
886e8bd
Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com>
886e8bd
Reviewed-by: Eric Anholt <eric@anholt.net>
886e8bd
Link: http://patchwork.freedesktop.org/patch/msgid/1497601833-24588-1-git-send-email-boris.brezillon@free-electrons.com
886e8bd
---
886e8bd
 drivers/gpu/drm/vc4/vc4_crtc.c | 13 +++++++++++++
886e8bd
 1 file changed, 13 insertions(+)
886e8bd
886e8bd
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
886e8bd
index a12cc7ea99b6..b0582ad3f459 100644
886e8bd
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
886e8bd
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
886e8bd
@@ -518,6 +518,19 @@ static void vc4_crtc_disable(struct drm_crtc *crtc)
886e8bd
 	WARN_ON_ONCE((HVS_READ(SCALER_DISPSTATX(chan)) &
886e8bd
 		      (SCALER_DISPSTATX_FULL | SCALER_DISPSTATX_EMPTY)) !=
886e8bd
 		     SCALER_DISPSTATX_EMPTY);
886e8bd
+
886e8bd
+	/*
886e8bd
+	 * Make sure we issue a vblank event after disabling the CRTC if
886e8bd
+	 * someone was waiting it.
886e8bd
+	 */
886e8bd
+	if (crtc->state->event) {
886e8bd
+		unsigned long flags;
886e8bd
+
886e8bd
+		spin_lock_irqsave(&dev->event_lock, flags);
886e8bd
+		drm_crtc_send_vblank_event(crtc, crtc->state->event);
886e8bd
+		crtc->state->event = NULL;
886e8bd
+		spin_unlock_irqrestore(&dev->event_lock, flags);
886e8bd
+	}
886e8bd
 }
886e8bd
 
886e8bd
 static void vc4_crtc_update_dlist(struct drm_crtc *crtc)
886e8bd
-- 
886e8bd
2.13.5
886e8bd