1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26 /* 27 * Authors: 28 * Christian König <[email protected]> 29 */ 30 31 #include <drm/drmP.h> 32 #include "amdgpu.h" 33 #include "amdgpu_trace.h" 34 35 #define AMDGPU_BO_LIST_MAX_PRIORITY 32u 36 #define AMDGPU_BO_LIST_NUM_BUCKETS (AMDGPU_BO_LIST_MAX_PRIORITY + 1) 37 38 static int amdgpu_bo_list_create(struct amdgpu_fpriv *fpriv, 39 struct amdgpu_bo_list **result, 40 int *id) 41 { 42 int r; 43 44 *result = kzalloc(sizeof(struct amdgpu_bo_list), GFP_KERNEL); 45 if (!*result) 46 return -ENOMEM; 47 48 mutex_lock(&fpriv->bo_list_lock); 49 r = idr_alloc(&fpriv->bo_list_handles, *result, 50 1, 0, GFP_KERNEL); 51 if (r < 0) { 52 mutex_unlock(&fpriv->bo_list_lock); 53 kfree(*result); 54 return r; 55 } 56 *id = r; 57 58 mutex_init(&(*result)->lock); 59 (*result)->num_entries = 0; 60 (*result)->array = NULL; 61 62 mutex_lock(&(*result)->lock); 63 mutex_unlock(&fpriv->bo_list_lock); 64 65 return 0; 66 } 67 68 static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id) 69 { 70 struct amdgpu_bo_list *list; 71 72 mutex_lock(&fpriv->bo_list_lock); 73 list = idr_remove(&fpriv->bo_list_handles, id); 74 if (list) { 75 /* Another user may have a reference to this list still */ 76 mutex_lock(&list->lock); 77 mutex_unlock(&list->lock); 78 amdgpu_bo_list_free(list); 79 } 80 mutex_unlock(&fpriv->bo_list_lock); 81 } 82 83 static int amdgpu_bo_list_set(struct amdgpu_device *adev, 84 struct drm_file *filp, 85 struct amdgpu_bo_list *list, 86 struct drm_amdgpu_bo_list_entry *info, 87 unsigned num_entries) 88 { 89 struct amdgpu_bo_list_entry *array; 90 struct amdgpu_bo *gds_obj = adev->gds.gds_gfx_bo; 91 struct amdgpu_bo *gws_obj = adev->gds.gws_gfx_bo; 92 struct amdgpu_bo *oa_obj = adev->gds.oa_gfx_bo; 93 94 unsigned last_entry = 0, first_userptr = num_entries; 95 unsigned i; 96 int r; 97 unsigned long total_size = 0; 98 99 array = drm_malloc_ab(num_entries, sizeof(struct amdgpu_bo_list_entry)); 100 if (!array) 101 return -ENOMEM; 102 memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry)); 103 104 for (i = 0; i < num_entries; ++i) { 105 struct amdgpu_bo_list_entry *entry; 106 struct drm_gem_object *gobj; 107 struct amdgpu_bo *bo; 108 struct mm_struct *usermm; 109 110 gobj = drm_gem_object_lookup(filp, info[i].bo_handle); 111 if (!gobj) { 112 r = -ENOENT; 113 goto error_free; 114 } 115 116 bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); 117 drm_gem_object_unreference_unlocked(gobj); 118 119 usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm); 120 if (usermm) { 121 if (usermm != current->mm) { 122 amdgpu_bo_unref(&bo); 123 r = -EPERM; 124 goto error_free; 125 } 126 entry = &array[--first_userptr]; 127 } else { 128 entry = &array[last_entry++]; 129 } 130 131 entry->robj = bo; 132 entry->priority = min(info[i].bo_priority, 133 AMDGPU_BO_LIST_MAX_PRIORITY); 134 entry->tv.bo = &entry->robj->tbo; 135 entry->tv.shared = !entry->robj->prime_shared_count; 136 137 if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GDS) 138 gds_obj = entry->robj; 139 if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GWS) 140 gws_obj = entry->robj; 141 if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_OA) 142 oa_obj = entry->robj; 143 144 total_size += amdgpu_bo_size(entry->robj); 145 trace_amdgpu_bo_list_set(list, entry->robj); 146 } 147 148 for (i = 0; i < list->num_entries; ++i) 149 amdgpu_bo_unref(&list->array[i].robj); 150 151 drm_free_large(list->array); 152 153 list->gds_obj = gds_obj; 154 list->gws_obj = gws_obj; 155 list->oa_obj = oa_obj; 156 list->first_userptr = first_userptr; 157 list->array = array; 158 list->num_entries = num_entries; 159 160 trace_amdgpu_cs_bo_status(list->num_entries, total_size); 161 return 0; 162 163 error_free: 164 while (i--) 165 amdgpu_bo_unref(&array[i].robj); 166 drm_free_large(array); 167 return r; 168 } 169 170 struct amdgpu_bo_list * 171 amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id) 172 { 173 struct amdgpu_bo_list *result; 174 175 mutex_lock(&fpriv->bo_list_lock); 176 result = idr_find(&fpriv->bo_list_handles, id); 177 if (result) 178 mutex_lock(&result->lock); 179 mutex_unlock(&fpriv->bo_list_lock); 180 return result; 181 } 182 183 void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list, 184 struct list_head *validated) 185 { 186 /* This is based on the bucket sort with O(n) time complexity. 187 * An item with priority "i" is added to bucket[i]. The lists are then 188 * concatenated in descending order. 189 */ 190 struct list_head bucket[AMDGPU_BO_LIST_NUM_BUCKETS]; 191 unsigned i; 192 193 for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++) 194 INIT_LIST_HEAD(&bucket[i]); 195 196 /* Since buffers which appear sooner in the relocation list are 197 * likely to be used more often than buffers which appear later 198 * in the list, the sort mustn't change the ordering of buffers 199 * with the same priority, i.e. it must be stable. 200 */ 201 for (i = 0; i < list->num_entries; i++) { 202 unsigned priority = list->array[i].priority; 203 204 list_add_tail(&list->array[i].tv.head, 205 &bucket[priority]); 206 list->array[i].user_pages = NULL; 207 } 208 209 /* Connect the sorted buckets in the output list. */ 210 for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++) 211 list_splice(&bucket[i], validated); 212 } 213 214 void amdgpu_bo_list_put(struct amdgpu_bo_list *list) 215 { 216 mutex_unlock(&list->lock); 217 } 218 219 void amdgpu_bo_list_free(struct amdgpu_bo_list *list) 220 { 221 unsigned i; 222 223 for (i = 0; i < list->num_entries; ++i) 224 amdgpu_bo_unref(&list->array[i].robj); 225 226 mutex_destroy(&list->lock); 227 drm_free_large(list->array); 228 kfree(list); 229 } 230 231 int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, 232 struct drm_file *filp) 233 { 234 const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry); 235 236 struct amdgpu_device *adev = dev->dev_private; 237 struct amdgpu_fpriv *fpriv = filp->driver_priv; 238 union drm_amdgpu_bo_list *args = data; 239 uint32_t handle = args->in.list_handle; 240 const void __user *uptr = (const void*)(uintptr_t)args->in.bo_info_ptr; 241 242 struct drm_amdgpu_bo_list_entry *info; 243 struct amdgpu_bo_list *list; 244 245 int r; 246 247 info = drm_malloc_ab(args->in.bo_number, 248 sizeof(struct drm_amdgpu_bo_list_entry)); 249 if (!info) 250 return -ENOMEM; 251 252 /* copy the handle array from userspace to a kernel buffer */ 253 r = -EFAULT; 254 if (likely(info_size == args->in.bo_info_size)) { 255 unsigned long bytes = args->in.bo_number * 256 args->in.bo_info_size; 257 258 if (copy_from_user(info, uptr, bytes)) 259 goto error_free; 260 261 } else { 262 unsigned long bytes = min(args->in.bo_info_size, info_size); 263 unsigned i; 264 265 memset(info, 0, args->in.bo_number * info_size); 266 for (i = 0; i < args->in.bo_number; ++i) { 267 if (copy_from_user(&info[i], uptr, bytes)) 268 goto error_free; 269 270 uptr += args->in.bo_info_size; 271 } 272 } 273 274 switch (args->in.operation) { 275 case AMDGPU_BO_LIST_OP_CREATE: 276 r = amdgpu_bo_list_create(fpriv, &list, &handle); 277 if (r) 278 goto error_free; 279 280 r = amdgpu_bo_list_set(adev, filp, list, info, 281 args->in.bo_number); 282 amdgpu_bo_list_put(list); 283 if (r) 284 goto error_free; 285 286 break; 287 288 case AMDGPU_BO_LIST_OP_DESTROY: 289 amdgpu_bo_list_destroy(fpriv, handle); 290 handle = 0; 291 break; 292 293 case AMDGPU_BO_LIST_OP_UPDATE: 294 r = -ENOENT; 295 list = amdgpu_bo_list_get(fpriv, handle); 296 if (!list) 297 goto error_free; 298 299 r = amdgpu_bo_list_set(adev, filp, list, info, 300 args->in.bo_number); 301 amdgpu_bo_list_put(list); 302 if (r) 303 goto error_free; 304 305 break; 306 307 default: 308 r = -EINVAL; 309 goto error_free; 310 } 311 312 memset(args, 0, sizeof(*args)); 313 args->out.list_handle = handle; 314 drm_free_large(info); 315 316 return 0; 317 318 error_free: 319 drm_free_large(info); 320 return r; 321 } 322