From: Dave Airlie Date: Fri, 11 Jun 2021 03:34:42 +0000 (+1000) Subject: Merge tag 'drm-intel-gt-next-2021-06-10' of git://anongit.freedesktop.org/drm/drm... X-Git-Url: https://git.baikalelectronics.ru/sdk/?a=commitdiff_plain;h=2a7005c8a3982ba27fab237d85c27da446484e9c;p=kernel.git Merge tag 'drm-intel-gt-next-2021-06-10' of git://anongit.freedesktop.org/drm/drm-intel into drm-next UAPI Changes: - Disable mmap ioctl for gen12+ (excl. TGL-LP) - Start enabling HuC loading by default for upcoming Gen12+ platforms (excludes TGL and RKL) Core Changes: - Backmerge of drm-next Driver Changes: - Revert "i915: use io_mapping_map_user" (Eero, Matt A) - Initialize the TTM device and memory managers (Thomas) - Major rework to the GuC submission backend to prepare for enabling on new platforms (Michal Wa., Daniele, Matt B, Rodrigo) - Fix i915_sg_page_sizes to record dma segments rather than physical pages (Thomas) - Locking rework to prep for TTM conversion (Thomas) - Replace IS_GEN and friends with GRAPHICS_VER (Lucas) - Use DEVICE_ATTR_RO macro (Yue) - Static code checker fixes (Zhihao) Signed-off-by: Dave Airlie From: Joonas Lahtinen Link: https://patchwork.freedesktop.org/patch/msgid/YMHeDxg9VLiFtyn3@jlahtine-mobl.ger.corp.intel.com --- 2a7005c8a3982ba27fab237d85c27da446484e9c diff --cc drivers/gpu/drm/i915/intel_memory_region.c index d98e8b81d322e,4092cc9876796..e6024eb7cca4b --- a/drivers/gpu/drm/i915/intel_memory_region.c +++ b/drivers/gpu/drm/i915/intel_memory_region.c @@@ -28,6 -28,11 +28,11 @@@ static const struct }, }; + struct intel_region_reserve { + struct list_head link; - void *node; ++ struct ttm_resource *res; + }; + struct intel_memory_region * intel_memory_region_lookup(struct drm_i915_private *i915, u16 class, u16 instance) @@@ -58,146 -63,61 +63,61 @@@ intel_memory_region_by_type(struct drm_ return NULL; } - static u64 - intel_memory_region_free_pages(struct intel_memory_region *mem, - struct list_head *blocks) + /** + * intel_memory_region_unreserve - Unreserve all previously reserved + * ranges + * @mem: The region containing the reserved ranges. + */ + void intel_memory_region_unreserve(struct intel_memory_region *mem) { - struct i915_buddy_block *block, *on; - u64 size = 0; + struct intel_region_reserve *reserve, *next; - list_for_each_entry_safe(block, on, blocks, link) { - size += i915_buddy_block_size(&mem->mm, block); - i915_buddy_free(&mem->mm, block); - } - INIT_LIST_HEAD(blocks); + if (!mem->priv_ops || !mem->priv_ops->free) + return; - return size; - } - - void - __intel_memory_region_put_pages_buddy(struct intel_memory_region *mem, - struct list_head *blocks) - { mutex_lock(&mem->mm_lock); - mem->avail += intel_memory_region_free_pages(mem, blocks); - mutex_unlock(&mem->mm_lock); - } - - void - __intel_memory_region_put_block_buddy(struct i915_buddy_block *block) - { - struct list_head blocks; - - INIT_LIST_HEAD(&blocks); - list_add(&block->link, &blocks); - __intel_memory_region_put_pages_buddy(block->private, &blocks); - } - - int - __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem, - resource_size_t size, - unsigned int flags, - struct list_head *blocks) - { - unsigned int min_order = 0; - unsigned long n_pages; - - GEM_BUG_ON(!IS_ALIGNED(size, mem->mm.chunk_size)); - GEM_BUG_ON(!list_empty(blocks)); - - if (flags & I915_ALLOC_MIN_PAGE_SIZE) { - min_order = ilog2(mem->min_page_size) - - ilog2(mem->mm.chunk_size); - } - - if (flags & I915_ALLOC_CONTIGUOUS) { - size = roundup_pow_of_two(size); - min_order = ilog2(size) - ilog2(mem->mm.chunk_size); + list_for_each_entry_safe(reserve, next, &mem->reserved, link) { + list_del(&reserve->link); - mem->priv_ops->free(mem, reserve->node); ++ mem->priv_ops->free(mem, reserve->res); + kfree(reserve); } - - if (size > mem->mm.size) - return -E2BIG; - - n_pages = size >> ilog2(mem->mm.chunk_size); - - mutex_lock(&mem->mm_lock); - - do { - struct i915_buddy_block *block; - unsigned int order; - - order = fls(n_pages) - 1; - GEM_BUG_ON(order > mem->mm.max_order); - GEM_BUG_ON(order < min_order); - - do { - block = i915_buddy_alloc(&mem->mm, order); - if (!IS_ERR(block)) - break; - - if (order-- == min_order) - goto err_free_blocks; - } while (1); - - n_pages -= BIT(order); - - block->private = mem; - list_add_tail(&block->link, blocks); - - if (!n_pages) - break; - } while (1); - - mem->avail -= size; mutex_unlock(&mem->mm_lock); - return 0; - - err_free_blocks: - intel_memory_region_free_pages(mem, blocks); - mutex_unlock(&mem->mm_lock); - return -ENXIO; } - struct i915_buddy_block * - __intel_memory_region_get_block_buddy(struct intel_memory_region *mem, - resource_size_t size, - unsigned int flags) + /** + * intel_memory_region_reserve - Reserve a memory range + * @mem: The region for which we want to reserve a range. + * @offset: Start of the range to reserve. + * @size: The size of the range to reserve. + * + * Return: 0 on success, negative error code on failure. + */ + int intel_memory_region_reserve(struct intel_memory_region *mem, + resource_size_t offset, + resource_size_t size) { - struct i915_buddy_block *block; - LIST_HEAD(blocks); int ret; + struct intel_region_reserve *reserve; - ret = __intel_memory_region_get_pages_buddy(mem, size, flags, &blocks); - if (ret) - return ERR_PTR(ret); + if (!mem->priv_ops || !mem->priv_ops->reserve) + return -EINVAL; - block = list_first_entry(&blocks, typeof(*block), link); - list_del_init(&block->link); - return block; - } + reserve = kzalloc(sizeof(*reserve), GFP_KERNEL); + if (!reserve) + return -ENOMEM; - int intel_memory_region_init_buddy(struct intel_memory_region *mem) - { - return i915_buddy_init(&mem->mm, resource_size(&mem->region), - PAGE_SIZE); - } - - void intel_memory_region_release_buddy(struct intel_memory_region *mem) - { - i915_buddy_free_list(&mem->mm, &mem->reserved); - i915_buddy_fini(&mem->mm); - } - - int intel_memory_region_reserve(struct intel_memory_region *mem, - u64 offset, u64 size) - { - int ret; - reserve->node = mem->priv_ops->reserve(mem, offset, size); - if (IS_ERR(reserve->node)) { - ret = PTR_ERR(reserve->node); ++ reserve->res = mem->priv_ops->reserve(mem, offset, size); ++ if (IS_ERR(reserve->res)) { ++ ret = PTR_ERR(reserve->res); + kfree(reserve); + return ret; + } mutex_lock(&mem->mm_lock); - ret = i915_buddy_alloc_range(&mem->mm, &mem->reserved, offset, size); + list_add_tail(&reserve->link, &mem->reserved); mutex_unlock(&mem->mm_lock); - return ret; + return 0; } struct intel_memory_region * diff --cc drivers/gpu/drm/i915/intel_memory_region.h index d24ce5a0b30b3,e69cde13daf29..1f7dac63abb7f --- a/drivers/gpu/drm/i915/intel_memory_region.h +++ b/drivers/gpu/drm/i915/intel_memory_region.h @@@ -19,6 -17,6 +17,7 @@@ struct drm_i915_private struct drm_i915_gem_object; struct intel_memory_region; struct sg_table; ++struct ttm_resource; enum intel_memory_type { INTEL_MEMORY_SYSTEM = I915_MEMORY_CLASS_SYSTEM, @@@ -59,6 -58,14 +59,14 @@@ struct intel_memory_region_ops unsigned int flags); }; + struct intel_memory_region_private_ops { - void *(*reserve)(struct intel_memory_region *mem, - resource_size_t offset, - resource_size_t size); ++ struct ttm_resource *(*reserve)(struct intel_memory_region *mem, ++ resource_size_t offset, ++ resource_size_t size); + void (*free)(struct intel_memory_region *mem, - void *node); ++ struct ttm_resource *res); + }; + struct intel_memory_region { struct drm_i915_private *i915; diff --cc drivers/gpu/drm/i915/intel_region_ttm.c index 0000000000000,c8ac118c21f6a..82a6727ede465 mode 000000,100644..100644 --- a/drivers/gpu/drm/i915/intel_region_ttm.c +++ b/drivers/gpu/drm/i915/intel_region_ttm.c @@@ -1,0 -1,220 +1,226 @@@ + // SPDX-License-Identifier: MIT + /* + * Copyright © 2021 Intel Corporation + */ + #include + #include ++#include + + #include "i915_drv.h" + #include "i915_scatterlist.h" + + #include "intel_region_ttm.h" + + /** + * DOC: TTM support structure + * + * The code in this file deals with setting up memory managers for TTM + * LMEM and MOCK regions and converting the output from + * the managers to struct sg_table, Basically providing the mapping from + * i915 GEM regions to TTM memory types and resource managers. + */ + + /* A Zero-initialized driver for now. We don't have a TTM backend yet. */ + static struct ttm_device_funcs i915_ttm_bo_driver; + + /** + * intel_region_ttm_device_init - Initialize a TTM device + * @dev_priv: Pointer to an i915 device private structure. + * + * Return: 0 on success, negative error code on failure. + */ + int intel_region_ttm_device_init(struct drm_i915_private *dev_priv) + { + struct drm_device *drm = &dev_priv->drm; + + return ttm_device_init(&dev_priv->bdev, &i915_ttm_bo_driver, + drm->dev, drm->anon_inode->i_mapping, + drm->vma_offset_manager, false, false); + } + + /** + * intel_region_ttm_device_fini - Finalize a TTM device + * @dev_priv: Pointer to an i915 device private structure. + */ + void intel_region_ttm_device_fini(struct drm_i915_private *dev_priv) + { + ttm_device_fini(&dev_priv->bdev); + } + + /* + * Map the i915 memory regions to TTM memory types. We use the + * driver-private types for now, reserving TTM_PL_VRAM for stolen + * memory and TTM_PL_TT for GGTT use if decided to implement this. + */ + static int intel_region_to_ttm_type(struct intel_memory_region *mem) + { + int type; + + GEM_BUG_ON(mem->type != INTEL_MEMORY_LOCAL && + mem->type != INTEL_MEMORY_MOCK); + + type = mem->instance + TTM_PL_PRIV; + GEM_BUG_ON(type >= TTM_NUM_MEM_TYPES); + + return type; + } + -static void *intel_region_ttm_node_reserve(struct intel_memory_region *mem, - resource_size_t offset, - resource_size_t size) ++static struct ttm_resource * ++intel_region_ttm_node_reserve(struct intel_memory_region *mem, ++ resource_size_t offset, ++ resource_size_t size) + { + struct ttm_resource_manager *man = mem->region_private; + struct ttm_place place = {}; - struct ttm_resource res = {}; + struct ttm_buffer_object mock_bo = {}; ++ struct ttm_resource *res; + int ret; + + /* + * Having to use a mock_bo is unfortunate but stems from some + * drivers having private managers that insist to know what the + * allocate memory is intended for, using it to send private + * data to the manager. Also recently the bo has been used to send + * alignment info to the manager. Assume that apart from the latter, + * none of the managers we use will ever access the buffer object + * members, hoping we can pass the alignment info in the + * struct ttm_place in the future. + */ + + place.fpfn = offset >> PAGE_SHIFT; + place.lpfn = place.fpfn + (size >> PAGE_SHIFT); - res.num_pages = size >> PAGE_SHIFT; ++ mock_bo.base.size = size; + ret = man->func->alloc(man, &mock_bo, &place, &res); + if (ret == -ENOSPC) + ret = -ENXIO; + - return ret ? ERR_PTR(ret) : res.mm_node; ++ return ret ? ERR_PTR(ret) : res; + } + + /** + * intel_region_ttm_node_free - Free a node allocated from a resource manager + * @mem: The region the node was allocated from. + * @node: The opaque node representing an allocation. + */ + void intel_region_ttm_node_free(struct intel_memory_region *mem, - void *node) ++ struct ttm_resource *res) + { + struct ttm_resource_manager *man = mem->region_private; - struct ttm_resource res = {}; + - res.mm_node = node; - man->func->free(man, &res); ++ man->func->free(man, res); + } + + static const struct intel_memory_region_private_ops priv_ops = { + .reserve = intel_region_ttm_node_reserve, + .free = intel_region_ttm_node_free, + }; + + int intel_region_ttm_init(struct intel_memory_region *mem) + { + struct ttm_device *bdev = &mem->i915->bdev; + int mem_type = intel_region_to_ttm_type(mem); + int ret; + + ret = ttm_range_man_init(bdev, mem_type, false, + resource_size(&mem->region) >> PAGE_SHIFT); + if (ret) + return ret; + + mem->chunk_size = PAGE_SIZE; + mem->max_order = + get_order(rounddown_pow_of_two(resource_size(&mem->region))); + mem->is_range_manager = true; + mem->priv_ops = &priv_ops; + mem->region_private = ttm_manager_type(bdev, mem_type); + + return 0; + } + + /** + * intel_region_ttm_fini - Finalize a TTM region. + * @mem: The memory region + * + * This functions takes down the TTM resource manager associated with the + * memory region, and if it was registered with the TTM device, + * removes that registration. + */ + void intel_region_ttm_fini(struct intel_memory_region *mem) + { + int ret; + + ret = ttm_range_man_fini(&mem->i915->bdev, + intel_region_to_ttm_type(mem)); + GEM_WARN_ON(ret); + mem->region_private = NULL; + } + + /** + * intel_region_ttm_node_to_st - Convert an opaque TTM resource manager node + * to an sg_table. + * @mem: The memory region. + * @node: The resource manager node obtained from the TTM resource manager. + * + * The gem backends typically use sg-tables for operations on the underlying + * io_memory. So provide a way for the backends to translate the + * nodes they are handed from TTM to sg-tables. + * + * Return: A malloced sg_table on success, an error pointer on failure. + */ + struct sg_table *intel_region_ttm_node_to_st(struct intel_memory_region *mem, - void *node) ++ struct ttm_resource *res) + { - return i915_sg_from_mm_node(node, mem->region.start); ++ struct ttm_range_mgr_node *range_node = ++ container_of(res, typeof(*range_node), base); ++ ++ GEM_WARN_ON(!mem->is_range_manager); ++ return i915_sg_from_mm_node(&range_node->mm_nodes[0], ++ mem->region.start); + } + + /** + * intel_region_ttm_node_alloc - Allocate memory resources from a region + * @mem: The memory region, + * @size: The requested size in bytes + * @flags: Allocation flags + * + * This functionality is provided only for callers that need to allocate + * memory from standalone TTM range managers, without the TTM eviction + * functionality. Don't use if you are not completely sure that's the + * case. The returned opaque node can be converted to an sg_table using + * intel_region_ttm_node_to_st(), and can be freed using + * intel_region_ttm_node_free(). + * + * Return: A valid pointer on success, an error pointer on failure. + */ -void *intel_region_ttm_node_alloc(struct intel_memory_region *mem, - resource_size_t size, - unsigned int flags) ++struct ttm_resource * ++intel_region_ttm_node_alloc(struct intel_memory_region *mem, ++ resource_size_t size, ++ unsigned int flags) + { + struct ttm_resource_manager *man = mem->region_private; + struct ttm_place place = {}; - struct ttm_resource res = {}; + struct ttm_buffer_object mock_bo = {}; ++ struct ttm_resource *res; + int ret; + + /* + * We ignore the flags for now since we're using the range + * manager and contigous and min page size would be fulfilled + * by default if size is min page size aligned. + */ - res.num_pages = size >> PAGE_SHIFT; ++ mock_bo.base.size = size; + + if (mem->is_range_manager) { + if (size >= SZ_1G) + mock_bo.page_alignment = SZ_1G >> PAGE_SHIFT; + else if (size >= SZ_2M) + mock_bo.page_alignment = SZ_2M >> PAGE_SHIFT; + else if (size >= SZ_64K) + mock_bo.page_alignment = SZ_64K >> PAGE_SHIFT; + } + + ret = man->func->alloc(man, &mock_bo, &place, &res); + if (ret == -ENOSPC) + ret = -ENXIO; - return ret ? ERR_PTR(ret) : res.mm_node; ++ return ret ? ERR_PTR(ret) : res; + } diff --cc drivers/gpu/drm/i915/intel_region_ttm.h index 0000000000000,1c82c6c3429d3..11b0574ab7914 mode 000000,100644..100644 --- a/drivers/gpu/drm/i915/intel_region_ttm.h +++ b/drivers/gpu/drm/i915/intel_region_ttm.h @@@ -1,0 -1,32 +1,34 @@@ + /* SPDX-License-Identifier: MIT */ + /* + * Copyright © 2021 Intel Corporation + */ + #ifndef _INTEL_REGION_TTM_H_ + #define _INTEL_REGION_TTM_H_ + + #include + + #include "i915_selftest.h" + + struct drm_i915_private; + struct intel_memory_region; ++struct ttm_resource; + + int intel_region_ttm_device_init(struct drm_i915_private *dev_priv); + + void intel_region_ttm_device_fini(struct drm_i915_private *dev_priv); + + int intel_region_ttm_init(struct intel_memory_region *mem); + + void intel_region_ttm_fini(struct intel_memory_region *mem); + + struct sg_table *intel_region_ttm_node_to_st(struct intel_memory_region *mem, - void *node); ++ struct ttm_resource *res); + -void *intel_region_ttm_node_alloc(struct intel_memory_region *mem, - resource_size_t size, - unsigned int flags); ++struct ttm_resource * ++intel_region_ttm_node_alloc(struct intel_memory_region *mem, ++ resource_size_t size, ++ unsigned int flags); + + void intel_region_ttm_node_free(struct intel_memory_region *mem, - void *node); ++ struct ttm_resource *node); + #endif /* _INTEL_REGION_TTM_H_ */