radeon_cs_gem.c revision 22944501
1/*
2 * Copyright © 2008 Jérôme Glisse
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
14 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
15 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
16 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
17 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
24 * of the Software.
25 */
26/*
27 * Authors:
28 *      Aapo Tahkola <aet@rasterburn.org>
29 *      Nicolai Haehnle <prefect_@gmx.net>
30 *      Jérôme Glisse <glisse@freedesktop.org>
31 */
32#include <assert.h>
33#include <errno.h>
34#include <stdlib.h>
35#include <pthread.h>
36#include <sys/mman.h>
37#include <sys/ioctl.h>
38#include "radeon_cs.h"
39#include "radeon_cs_int.h"
40#include "radeon_bo_int.h"
41#include "radeon_cs_gem.h"
42#include "radeon_bo_gem.h"
43#include "drm.h"
44#include "xf86drm.h"
45#include "xf86atomic.h"
46#include "radeon_drm.h"
47
48struct radeon_cs_manager_gem {
49    struct radeon_cs_manager base;
50    uint32_t                 device_id;
51};
52
53#pragma pack(1)
54struct cs_reloc_gem {
55    uint32_t    handle;
56    uint32_t    read_domain;
57    uint32_t    write_domain;
58    uint32_t    flags;
59};
60
61#pragma pack()
62#define RELOC_SIZE (sizeof(struct cs_reloc_gem) / sizeof(uint32_t))
63
64struct cs_gem {
65    struct radeon_cs_int            base;
66    struct drm_radeon_cs        cs;
67    struct drm_radeon_cs_chunk  chunks[2];
68    unsigned                    nrelocs;
69    uint32_t                    *relocs;
70    struct radeon_bo_int        **relocs_bo;
71};
72
73static pthread_mutex_t id_mutex = PTHREAD_MUTEX_INITIALIZER;
74static uint32_t cs_id_source = 0;
75
76/**
77 * result is undefined if called with ~0
78 */
79static uint32_t get_first_zero(const uint32_t n)
80{
81    /* __builtin_ctz returns number of trailing zeros. */
82    return 1 << __builtin_ctz(~n);
83}
84
85/**
86 * Returns a free id for cs.
87 * If there is no free id we return zero
88 **/
89static uint32_t generate_id(void)
90{
91    uint32_t r = 0;
92    pthread_mutex_lock( &id_mutex );
93    /* check for free ids */
94    if (cs_id_source != ~r) {
95        /* find first zero bit */
96        r = get_first_zero(cs_id_source);
97
98        /* set id as reserved */
99        cs_id_source |= r;
100    }
101    pthread_mutex_unlock( &id_mutex );
102    return r;
103}
104
105/**
106 * Free the id for later reuse
107 **/
108static void free_id(uint32_t id)
109{
110    pthread_mutex_lock( &id_mutex );
111
112    cs_id_source &= ~id;
113
114    pthread_mutex_unlock( &id_mutex );
115}
116
117static struct radeon_cs_int *cs_gem_create(struct radeon_cs_manager *csm,
118                                       uint32_t ndw)
119{
120    struct cs_gem *csg;
121
122    /* max cmd buffer size is 64Kb */
123    if (ndw > (64 * 1024 / 4)) {
124        return NULL;
125    }
126    csg = (struct cs_gem*)calloc(1, sizeof(struct cs_gem));
127    if (csg == NULL) {
128        return NULL;
129    }
130    csg->base.csm = csm;
131    csg->base.ndw = 64 * 1024 / 4;
132    csg->base.packets = (uint32_t*)calloc(1, 64 * 1024);
133    if (csg->base.packets == NULL) {
134        free(csg);
135        return NULL;
136    }
137    csg->base.relocs_total_size = 0;
138    csg->base.crelocs = 0;
139    csg->base.id = generate_id();
140    csg->nrelocs = 4096 / (4 * 4) ;
141    csg->relocs_bo = (struct radeon_bo_int**)calloc(1,
142                                                csg->nrelocs*sizeof(void*));
143    if (csg->relocs_bo == NULL) {
144        free(csg->base.packets);
145        free(csg);
146        return NULL;
147    }
148    csg->base.relocs = csg->relocs = (uint32_t*)calloc(1, 4096);
149    if (csg->relocs == NULL) {
150        free(csg->relocs_bo);
151        free(csg->base.packets);
152        free(csg);
153        return NULL;
154    }
155    csg->chunks[0].chunk_id = RADEON_CHUNK_ID_IB;
156    csg->chunks[0].length_dw = 0;
157    csg->chunks[0].chunk_data = (uint64_t)(uintptr_t)csg->base.packets;
158    csg->chunks[1].chunk_id = RADEON_CHUNK_ID_RELOCS;
159    csg->chunks[1].length_dw = 0;
160    csg->chunks[1].chunk_data = (uint64_t)(uintptr_t)csg->relocs;
161    return (struct radeon_cs_int*)csg;
162}
163
164static int cs_gem_write_reloc(struct radeon_cs_int *cs,
165                              struct radeon_bo *bo,
166                              uint32_t read_domain,
167                              uint32_t write_domain,
168                              uint32_t flags)
169{
170    struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
171    struct cs_gem *csg = (struct cs_gem*)cs;
172    struct cs_reloc_gem *reloc;
173    uint32_t idx;
174    unsigned i;
175
176    assert(boi->space_accounted);
177
178    /* check domains */
179    if ((read_domain && write_domain) || (!read_domain && !write_domain)) {
180        /* in one CS a bo can only be in read or write domain but not
181         * in read & write domain at the same sime
182         */
183        return -EINVAL;
184    }
185    if (read_domain == RADEON_GEM_DOMAIN_CPU) {
186        return -EINVAL;
187    }
188    if (write_domain == RADEON_GEM_DOMAIN_CPU) {
189        return -EINVAL;
190    }
191    /* use bit field hash function to determine
192       if this bo is for sure not in this cs.*/
193    if ((atomic_read((atomic_t *)radeon_gem_get_reloc_in_cs(bo)) & cs->id)) {
194        /* check if bo is already referenced.
195         * Scanning from end to begin reduces cycles with mesa because
196         * it often relocates same shared dma bo again. */
197        for(i = cs->crelocs; i != 0;) {
198            --i;
199            idx = i * RELOC_SIZE;
200            reloc = (struct cs_reloc_gem*)&csg->relocs[idx];
201            if (reloc->handle == bo->handle) {
202                /* Check domains must be in read or write. As we check already
203                 * checked that in argument one of the read or write domain was
204                 * set we only need to check that if previous reloc as the read
205                 * domain set then the read_domain should also be set for this
206                 * new relocation.
207                 */
208                /* the DDX expects to read and write from same pixmap */
209                if (write_domain && (reloc->read_domain & write_domain)) {
210                    reloc->read_domain = 0;
211                    reloc->write_domain = write_domain;
212                } else if (read_domain & reloc->write_domain) {
213                    reloc->read_domain = 0;
214                } else {
215                    if (write_domain != reloc->write_domain)
216                        return -EINVAL;
217                    if (read_domain != reloc->read_domain)
218                        return -EINVAL;
219                }
220
221                reloc->read_domain |= read_domain;
222                reloc->write_domain |= write_domain;
223                /* update flags */
224                reloc->flags |= (flags & reloc->flags);
225                /* write relocation packet */
226                radeon_cs_write_dword((struct radeon_cs *)cs, 0xc0001000);
227                radeon_cs_write_dword((struct radeon_cs *)cs, idx);
228                return 0;
229            }
230        }
231    }
232    /* new relocation */
233    if (csg->base.crelocs >= csg->nrelocs) {
234        /* allocate more memory (TODO: should use a slab allocatore maybe) */
235        uint32_t *tmp, size;
236        size = ((csg->nrelocs + 1) * sizeof(struct radeon_bo*));
237        tmp = (uint32_t*)realloc(csg->relocs_bo, size);
238        if (tmp == NULL) {
239            return -ENOMEM;
240        }
241        csg->relocs_bo = (struct radeon_bo_int **)tmp;
242        size = ((csg->nrelocs + 1) * RELOC_SIZE * 4);
243        tmp = (uint32_t*)realloc(csg->relocs, size);
244        if (tmp == NULL) {
245            return -ENOMEM;
246        }
247        cs->relocs = csg->relocs = tmp;
248        csg->nrelocs += 1;
249        csg->chunks[1].chunk_data = (uint64_t)(uintptr_t)csg->relocs;
250    }
251    csg->relocs_bo[csg->base.crelocs] = boi;
252    idx = (csg->base.crelocs++) * RELOC_SIZE;
253    reloc = (struct cs_reloc_gem*)&csg->relocs[idx];
254    reloc->handle = bo->handle;
255    reloc->read_domain = read_domain;
256    reloc->write_domain = write_domain;
257    reloc->flags = flags;
258    csg->chunks[1].length_dw += RELOC_SIZE;
259    radeon_bo_ref(bo);
260    /* bo might be referenced from another context so have to use atomic opertions */
261    atomic_add((atomic_t *)radeon_gem_get_reloc_in_cs(bo), cs->id);
262    cs->relocs_total_size += boi->size;
263    radeon_cs_write_dword((struct radeon_cs *)cs, 0xc0001000);
264    radeon_cs_write_dword((struct radeon_cs *)cs, idx);
265    return 0;
266}
267
268static int cs_gem_begin(struct radeon_cs_int *cs,
269                        uint32_t ndw,
270                        const char *file,
271                        const char *func,
272                        int line)
273{
274
275    if (cs->section_ndw) {
276        fprintf(stderr, "CS already in a section(%s,%s,%d)\n",
277                cs->section_file, cs->section_func, cs->section_line);
278        fprintf(stderr, "CS can't start section(%s,%s,%d)\n",
279                file, func, line);
280        return -EPIPE;
281    }
282    cs->section_ndw = ndw;
283    cs->section_cdw = 0;
284    cs->section_file = file;
285    cs->section_func = func;
286    cs->section_line = line;
287
288    if (cs->cdw + ndw > cs->ndw) {
289        uint32_t tmp, *ptr;
290
291        /* round up the required size to a multiple of 1024 */
292        tmp = (cs->cdw + ndw + 0x3FF) & (~0x3FF);
293        ptr = (uint32_t*)realloc(cs->packets, 4 * tmp);
294        if (ptr == NULL) {
295            return -ENOMEM;
296        }
297        cs->packets = ptr;
298        cs->ndw = tmp;
299    }
300    return 0;
301}
302
303static int cs_gem_end(struct radeon_cs_int *cs,
304                      const char *file,
305                      const char *func,
306                      int line)
307
308{
309    if (!cs->section_ndw) {
310        fprintf(stderr, "CS no section to end at (%s,%s,%d)\n",
311                file, func, line);
312        return -EPIPE;
313    }
314    if (cs->section_ndw != cs->section_cdw) {
315        fprintf(stderr, "CS section size missmatch start at (%s,%s,%d) %d vs %d\n",
316                cs->section_file, cs->section_func, cs->section_line, cs->section_ndw, cs->section_cdw);
317        fprintf(stderr, "CS section end at (%s,%s,%d)\n",
318                file, func, line);
319
320        /* We must reset the section even when there is error. */
321        cs->section_ndw = 0;
322        return -EPIPE;
323    }
324    cs->section_ndw = 0;
325    return 0;
326}
327
328static int cs_gem_emit(struct radeon_cs_int *cs)
329{
330    struct cs_gem *csg = (struct cs_gem*)cs;
331    uint64_t chunk_array[2];
332    unsigned i;
333    int r;
334
335    csg->chunks[0].length_dw = cs->cdw;
336
337    chunk_array[0] = (uint64_t)(uintptr_t)&csg->chunks[0];
338    chunk_array[1] = (uint64_t)(uintptr_t)&csg->chunks[1];
339
340    csg->cs.num_chunks = 2;
341    csg->cs.chunks = (uint64_t)(uintptr_t)chunk_array;
342
343    r = drmCommandWriteRead(cs->csm->fd, DRM_RADEON_CS,
344                            &csg->cs, sizeof(struct drm_radeon_cs));
345    for (i = 0; i < csg->base.crelocs; i++) {
346        csg->relocs_bo[i]->space_accounted = 0;
347        /* bo might be referenced from another context so have to use atomic opertions */
348        atomic_dec((atomic_t *)radeon_gem_get_reloc_in_cs((struct radeon_bo*)csg->relocs_bo[i]), cs->id);
349        radeon_bo_unref((struct radeon_bo *)csg->relocs_bo[i]);
350        csg->relocs_bo[i] = NULL;
351    }
352
353    cs->csm->read_used = 0;
354    cs->csm->vram_write_used = 0;
355    cs->csm->gart_write_used = 0;
356    return r;
357}
358
359static int cs_gem_destroy(struct radeon_cs_int *cs)
360{
361    struct cs_gem *csg = (struct cs_gem*)cs;
362
363    free_id(cs->id);
364    free(csg->relocs_bo);
365    free(cs->relocs);
366    free(cs->packets);
367    free(cs);
368    return 0;
369}
370
371static int cs_gem_erase(struct radeon_cs_int *cs)
372{
373    struct cs_gem *csg = (struct cs_gem*)cs;
374    unsigned i;
375
376    if (csg->relocs_bo) {
377        for (i = 0; i < csg->base.crelocs; i++) {
378            if (csg->relocs_bo[i]) {
379                /* bo might be referenced from another context so have to use atomic opertions */
380                atomic_dec((atomic_t *)radeon_gem_get_reloc_in_cs((struct radeon_bo*)csg->relocs_bo[i]), cs->id);
381                radeon_bo_unref((struct radeon_bo *)csg->relocs_bo[i]);
382                csg->relocs_bo[i] = NULL;
383            }
384        }
385    }
386    cs->relocs_total_size = 0;
387    cs->cdw = 0;
388    cs->section_ndw = 0;
389    cs->crelocs = 0;
390    csg->chunks[0].length_dw = 0;
391    csg->chunks[1].length_dw = 0;
392    return 0;
393}
394
395static int cs_gem_need_flush(struct radeon_cs_int *cs)
396{
397    return 0; //(cs->relocs_total_size > (32*1024*1024));
398}
399
400static void cs_gem_print(struct radeon_cs_int *cs, FILE *file)
401{
402    struct radeon_cs_manager_gem *csm;
403    unsigned int i;
404
405    csm = (struct radeon_cs_manager_gem *)cs->csm;
406    fprintf(file, "VENDORID:DEVICEID 0x%04X:0x%04X\n", 0x1002, csm->device_id);
407    for (i = 0; i < cs->cdw; i++) {
408        fprintf(file, "0x%08X\n", cs->packets[i]);
409    }
410}
411
412static struct radeon_cs_funcs radeon_cs_gem_funcs = {
413    cs_gem_create,
414    cs_gem_write_reloc,
415    cs_gem_begin,
416    cs_gem_end,
417    cs_gem_emit,
418    cs_gem_destroy,
419    cs_gem_erase,
420    cs_gem_need_flush,
421    cs_gem_print,
422};
423
424static int radeon_get_device_id(int fd, uint32_t *device_id)
425{
426    struct drm_radeon_info info;
427    int r;
428
429    *device_id = 0;
430    info.request = RADEON_INFO_DEVICE_ID;
431    info.value = device_id;
432    r = drmCommandWriteRead(fd, DRM_RADEON_INFO, &info,
433                            sizeof(struct drm_radeon_info));
434    return r;
435}
436
437struct radeon_cs_manager *radeon_cs_manager_gem_ctor(int fd)
438{
439    struct radeon_cs_manager_gem *csm;
440
441    csm = calloc(1, sizeof(struct radeon_cs_manager_gem));
442    if (csm == NULL) {
443        return NULL;
444    }
445    csm->base.funcs = &radeon_cs_gem_funcs;
446    csm->base.fd = fd;
447    radeon_get_device_id(fd, &csm->device_id);
448    return &csm->base;
449}
450
451void radeon_cs_manager_gem_dtor(struct radeon_cs_manager *csm)
452{
453    free(csm);
454}
455