Wed May 4 15:25:00 2022 +0800

This commit is contained in:
Ziyang Zhou
2022-05-04 15:25:00 +08:00
commit 7b6ae28f7f
1145 changed files with 270040 additions and 0 deletions

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,111 @@
/*
* Copyright (C) 2010-2011 Chia-I Wu <olvaffe@gmail.com>
* Copyright (C) 2010-2011 LunarG Inc.
* Copyright (C) 2016 Linaro, Ltd., Rob Herring <robh@kernel.org>
* Copyright (C) 2018 Collabora, Robert Foss <robert.foss@collabora.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __ANDROID_GRALLOC_HANDLE_H__
#define __ANDROID_GRALLOC_HANDLE_H__
#include <cutils/native_handle.h>
#include <stdint.h>
/* support users of drm_gralloc/gbm_gralloc */
#define gralloc_gbm_handle_t gralloc_handle_t
#define gralloc_drm_handle_t gralloc_handle_t
struct gralloc_handle_t {
native_handle_t base;
/* dma-buf file descriptor
* Must be located first since, native_handle_t is allocated
* using native_handle_create(), which allocates space for
* sizeof(native_handle_t) + sizeof(int) * (numFds + numInts)
* numFds = GRALLOC_HANDLE_NUM_FDS
* numInts = GRALLOC_HANDLE_NUM_INTS
* Where numFds represents the number of FDs and
* numInts represents the space needed for the
* remainder of this struct.
* And the FDs are expected to be found first following
* native_handle_t.
*/
int prime_fd;
/* api variables */
uint32_t magic; /* differentiate between allocator impls */
uint32_t version; /* api version */
uint32_t width; /* width of buffer in pixels */
uint32_t height; /* height of buffer in pixels */
uint32_t format; /* pixel format (Android) */
uint32_t usage; /* android libhardware usage flags */
uint32_t stride; /* the stride in bytes */
int data_owner; /* owner of data (for validation) */
uint64_t modifier __attribute__((aligned(8))); /* buffer modifiers */
union {
void *data; /* pointer to struct gralloc_gbm_bo_t */
uint64_t reserved;
} __attribute__((aligned(8)));
};
#define GRALLOC_HANDLE_VERSION 4
#define GRALLOC_HANDLE_MAGIC 0x60585350
#define GRALLOC_HANDLE_NUM_FDS 1
#define GRALLOC_HANDLE_NUM_INTS ( \
((sizeof(struct gralloc_handle_t) - sizeof(native_handle_t))/sizeof(int)) \
- GRALLOC_HANDLE_NUM_FDS)
static inline struct gralloc_handle_t *gralloc_handle(buffer_handle_t handle)
{
return (struct gralloc_handle_t *)handle;
}
/**
* Create a buffer handle.
*/
static inline native_handle_t *gralloc_handle_create(int32_t width,
int32_t height,
int32_t hal_format,
int32_t usage)
{
struct gralloc_handle_t *handle;
native_handle_t *nhandle = native_handle_create(GRALLOC_HANDLE_NUM_FDS,
GRALLOC_HANDLE_NUM_INTS);
if (!nhandle)
return NULL;
handle = gralloc_handle(nhandle);
handle->magic = GRALLOC_HANDLE_MAGIC;
handle->version = GRALLOC_HANDLE_VERSION;
handle->width = width;
handle->height = height;
handle->format = hal_format;
handle->usage = usage;
handle->prime_fd = -1;
return nhandle;
}
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,92 @@
/**
* \file drm_sarea.h
* \brief SAREA definitions
*
* \author Michel Dänzer <michel@daenzer.net>
*/
/*
* Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef _DRM_SAREA_H_
#define _DRM_SAREA_H_
#include "drm.h"
#if defined(__cplusplus)
extern "C" {
#endif
/* SAREA area needs to be at least a page */
#if defined(__alpha__)
#define SAREA_MAX 0x2000U
#elif defined(__mips__)
#define SAREA_MAX 0x4000U
#elif defined(__ia64__)
#define SAREA_MAX 0x10000U /* 64kB */
#else
/* Intel 830M driver needs at least 8k SAREA */
#define SAREA_MAX 0x2000U
#endif
/** Maximum number of drawables in the SAREA */
#define SAREA_MAX_DRAWABLES 256
#define SAREA_DRAWABLE_CLAIMED_ENTRY 0x80000000
/** SAREA drawable */
struct drm_sarea_drawable {
unsigned int stamp;
unsigned int flags;
};
/** SAREA frame */
struct drm_sarea_frame {
unsigned int x;
unsigned int y;
unsigned int width;
unsigned int height;
unsigned int fullscreen;
};
/** SAREA */
struct drm_sarea {
/** first thing is always the DRM locking structure */
struct drm_hw_lock lock;
/** \todo Use readers/writer lock for drm_sarea::drawable_lock */
struct drm_hw_lock drawable_lock;
struct drm_sarea_drawable drawableTable[SAREA_MAX_DRAWABLES]; /**< drawables */
struct drm_sarea_frame frame; /**< frame */
drm_context_t dummy_context;
};
typedef struct drm_sarea_drawable drm_sarea_drawable_t;
typedef struct drm_sarea_frame drm_sarea_frame_t;
typedef struct drm_sarea drm_sarea_t;
#if defined(__cplusplus)
}
#endif
#endif /* _DRM_SAREA_H_ */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,153 @@
/*
* Copyright © 2010 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
/** @file intel_aub.h
*
* The AUB file is a file format used by Intel's internal simulation
* and other validation tools. It can be used at various levels by a
* driver to input state to the simulated hardware or a replaying
* debugger.
*
* We choose to dump AUB files using the trace block format for ease
* of implementation -- dump out the blocks of memory as plain blobs
* and insert ring commands to execute the batchbuffer blob.
*/
#ifndef _INTEL_AUB_H
#define _INTEL_AUB_H
#define AUB_MI_NOOP (0)
#define AUB_MI_BATCH_BUFFER_START (0x31 << 23)
#define AUB_PIPE_CONTROL (0x7a000002)
/* DW0: instruction type. */
#define CMD_AUB (7 << 29)
#define CMD_AUB_HEADER (CMD_AUB | (1 << 23) | (0x05 << 16))
/* DW1 */
# define AUB_HEADER_MAJOR_SHIFT 24
# define AUB_HEADER_MINOR_SHIFT 16
#define CMD_AUB_TRACE_HEADER_BLOCK (CMD_AUB | (1 << 23) | (0x41 << 16))
#define CMD_AUB_DUMP_BMP (CMD_AUB | (1 << 23) | (0x9e << 16))
/* DW1 */
#define AUB_TRACE_OPERATION_MASK 0x000000ff
#define AUB_TRACE_OP_COMMENT 0x00000000
#define AUB_TRACE_OP_DATA_WRITE 0x00000001
#define AUB_TRACE_OP_COMMAND_WRITE 0x00000002
#define AUB_TRACE_OP_MMIO_WRITE 0x00000003
// operation = TRACE_DATA_WRITE, Type
#define AUB_TRACE_TYPE_MASK 0x0000ff00
#define AUB_TRACE_TYPE_NOTYPE (0 << 8)
#define AUB_TRACE_TYPE_BATCH (1 << 8)
#define AUB_TRACE_TYPE_VERTEX_BUFFER (5 << 8)
#define AUB_TRACE_TYPE_2D_MAP (6 << 8)
#define AUB_TRACE_TYPE_CUBE_MAP (7 << 8)
#define AUB_TRACE_TYPE_VOLUME_MAP (9 << 8)
#define AUB_TRACE_TYPE_1D_MAP (10 << 8)
#define AUB_TRACE_TYPE_CONSTANT_BUFFER (11 << 8)
#define AUB_TRACE_TYPE_CONSTANT_URB (12 << 8)
#define AUB_TRACE_TYPE_INDEX_BUFFER (13 << 8)
#define AUB_TRACE_TYPE_GENERAL (14 << 8)
#define AUB_TRACE_TYPE_SURFACE (15 << 8)
// operation = TRACE_COMMAND_WRITE, Type =
#define AUB_TRACE_TYPE_RING_HWB (1 << 8)
#define AUB_TRACE_TYPE_RING_PRB0 (2 << 8)
#define AUB_TRACE_TYPE_RING_PRB1 (3 << 8)
#define AUB_TRACE_TYPE_RING_PRB2 (4 << 8)
// Address space
#define AUB_TRACE_ADDRESS_SPACE_MASK 0x00ff0000
#define AUB_TRACE_MEMTYPE_GTT (0 << 16)
#define AUB_TRACE_MEMTYPE_LOCAL (1 << 16)
#define AUB_TRACE_MEMTYPE_NONLOCAL (2 << 16)
#define AUB_TRACE_MEMTYPE_PCI (3 << 16)
#define AUB_TRACE_MEMTYPE_GTT_ENTRY (4 << 16)
/* DW2 */
/**
* aub_state_struct_type enum values are encoded with the top 16 bits
* representing the type to be delivered to the .aub file, and the bottom 16
* bits representing the subtype. This macro performs the encoding.
*/
#define ENCODE_SS_TYPE(type, subtype) (((type) << 16) | (subtype))
enum aub_state_struct_type {
AUB_TRACE_VS_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 1),
AUB_TRACE_GS_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 2),
AUB_TRACE_CLIP_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 3),
AUB_TRACE_SF_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 4),
AUB_TRACE_WM_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 5),
AUB_TRACE_CC_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 6),
AUB_TRACE_CLIP_VP_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 7),
AUB_TRACE_SF_VP_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 8),
AUB_TRACE_CC_VP_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 0x9),
AUB_TRACE_SAMPLER_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 0xa),
AUB_TRACE_KERNEL_INSTRUCTIONS = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 0xb),
AUB_TRACE_SCRATCH_SPACE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 0xc),
AUB_TRACE_SAMPLER_DEFAULT_COLOR = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 0xd),
AUB_TRACE_SCISSOR_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 0x15),
AUB_TRACE_BLEND_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 0x16),
AUB_TRACE_DEPTH_STENCIL_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 0x17),
AUB_TRACE_VERTEX_BUFFER = ENCODE_SS_TYPE(AUB_TRACE_TYPE_VERTEX_BUFFER, 0),
AUB_TRACE_BINDING_TABLE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_SURFACE, 0x100),
AUB_TRACE_SURFACE_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_SURFACE, 0x200),
AUB_TRACE_VS_CONSTANTS = ENCODE_SS_TYPE(AUB_TRACE_TYPE_CONSTANT_BUFFER, 0),
AUB_TRACE_WM_CONSTANTS = ENCODE_SS_TYPE(AUB_TRACE_TYPE_CONSTANT_BUFFER, 1),
};
#undef ENCODE_SS_TYPE
/**
* Decode a aub_state_struct_type value to determine the type that should be
* stored in the .aub file.
*/
static inline uint32_t AUB_TRACE_TYPE(enum aub_state_struct_type ss_type)
{
return (ss_type & 0xFFFF0000) >> 16;
}
/**
* Decode a state_struct_type value to determine the subtype that should be
* stored in the .aub file.
*/
static inline uint32_t AUB_TRACE_SUBTYPE(enum aub_state_struct_type ss_type)
{
return ss_type & 0xFFFF;
}
/* DW3: address */
/* DW4: len */
#endif /* _INTEL_AUB_H */

View File

@@ -0,0 +1,341 @@
/*
* Copyright © 2008-2012 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
/**
* @file intel_bufmgr.h
*
* Public definitions of Intel-specific bufmgr functions.
*/
#ifndef INTEL_BUFMGR_H
#define INTEL_BUFMGR_H
#include <stdio.h>
#include <stdint.h>
#include <stdio.h>
#if defined(__cplusplus)
extern "C" {
#endif
struct drm_clip_rect;
typedef struct _drm_intel_bufmgr drm_intel_bufmgr;
typedef struct _drm_intel_context drm_intel_context;
typedef struct _drm_intel_bo drm_intel_bo;
struct _drm_intel_bo {
/**
* Size in bytes of the buffer object.
*
* The size may be larger than the size originally requested for the
* allocation, such as being aligned to page size.
*/
unsigned long size;
/**
* Alignment requirement for object
*
* Used for GTT mapping & pinning the object.
*/
unsigned long align;
/**
* Deprecated field containing (possibly the low 32-bits of) the last
* seen virtual card address. Use offset64 instead.
*/
unsigned long offset;
/**
* Virtual address for accessing the buffer data. Only valid while
* mapped.
*/
#ifdef __cplusplus
void *virt;
#else
void *virtual;
#endif
/** Buffer manager context associated with this buffer object */
drm_intel_bufmgr *bufmgr;
/**
* MM-specific handle for accessing object
*/
int handle;
/**
* Last seen card virtual address (offset from the beginning of the
* aperture) for the object. This should be used to fill relocation
* entries when calling drm_intel_bo_emit_reloc()
*/
uint64_t offset64;
};
enum aub_dump_bmp_format {
AUB_DUMP_BMP_FORMAT_8BIT = 1,
AUB_DUMP_BMP_FORMAT_ARGB_4444 = 4,
AUB_DUMP_BMP_FORMAT_ARGB_0888 = 6,
AUB_DUMP_BMP_FORMAT_ARGB_8888 = 7,
};
typedef struct _drm_intel_aub_annotation {
uint32_t type;
uint32_t subtype;
uint32_t ending_offset;
} drm_intel_aub_annotation;
#define BO_ALLOC_FOR_RENDER (1<<0)
drm_intel_bo *drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
unsigned long size, unsigned int alignment);
drm_intel_bo *drm_intel_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
const char *name,
unsigned long size,
unsigned int alignment);
drm_intel_bo *drm_intel_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
const char *name,
void *addr, uint32_t tiling_mode,
uint32_t stride, unsigned long size,
unsigned long flags);
drm_intel_bo *drm_intel_bo_alloc_tiled(drm_intel_bufmgr *bufmgr,
const char *name,
int x, int y, int cpp,
uint32_t *tiling_mode,
unsigned long *pitch,
unsigned long flags);
void drm_intel_bo_reference(drm_intel_bo *bo);
void drm_intel_bo_unreference(drm_intel_bo *bo);
int drm_intel_bo_map(drm_intel_bo *bo, int write_enable);
int drm_intel_bo_unmap(drm_intel_bo *bo);
int drm_intel_bo_subdata(drm_intel_bo *bo, unsigned long offset,
unsigned long size, const void *data);
int drm_intel_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
unsigned long size, void *data);
void drm_intel_bo_wait_rendering(drm_intel_bo *bo);
void drm_intel_bufmgr_set_debug(drm_intel_bufmgr *bufmgr, int enable_debug);
void drm_intel_bufmgr_destroy(drm_intel_bufmgr *bufmgr);
int drm_intel_bo_exec(drm_intel_bo *bo, int used,
struct drm_clip_rect *cliprects, int num_cliprects, int DR4);
int drm_intel_bo_mrb_exec(drm_intel_bo *bo, int used,
struct drm_clip_rect *cliprects, int num_cliprects, int DR4,
unsigned int flags);
int drm_intel_bufmgr_check_aperture_space(drm_intel_bo ** bo_array, int count);
int drm_intel_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
drm_intel_bo *target_bo, uint32_t target_offset,
uint32_t read_domains, uint32_t write_domain);
int drm_intel_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
drm_intel_bo *target_bo,
uint32_t target_offset,
uint32_t read_domains, uint32_t write_domain);
int drm_intel_bo_pin(drm_intel_bo *bo, uint32_t alignment);
int drm_intel_bo_unpin(drm_intel_bo *bo);
int drm_intel_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
uint32_t stride);
int drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
uint32_t * swizzle_mode);
int drm_intel_bo_flink(drm_intel_bo *bo, uint32_t * name);
int drm_intel_bo_busy(drm_intel_bo *bo);
int drm_intel_bo_madvise(drm_intel_bo *bo, int madv);
int drm_intel_bo_use_48b_address_range(drm_intel_bo *bo, uint32_t enable);
int drm_intel_bo_set_softpin_offset(drm_intel_bo *bo, uint64_t offset);
int drm_intel_bo_disable_reuse(drm_intel_bo *bo);
int drm_intel_bo_is_reusable(drm_intel_bo *bo);
int drm_intel_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo);
/* drm_intel_bufmgr_gem.c */
drm_intel_bufmgr *drm_intel_bufmgr_gem_init(int fd, int batch_size);
drm_intel_bo *drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
const char *name,
unsigned int handle);
void drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr);
void drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr);
void drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr,
int limit);
int drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo);
int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo);
int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo);
#define HAVE_DRM_INTEL_GEM_BO_DISABLE_IMPLICIT_SYNC 1
int drm_intel_bufmgr_gem_can_disable_implicit_sync(drm_intel_bufmgr *bufmgr);
void drm_intel_gem_bo_disable_implicit_sync(drm_intel_bo *bo);
void drm_intel_gem_bo_enable_implicit_sync(drm_intel_bo *bo);
void *drm_intel_gem_bo_map__cpu(drm_intel_bo *bo);
void *drm_intel_gem_bo_map__gtt(drm_intel_bo *bo);
void *drm_intel_gem_bo_map__wc(drm_intel_bo *bo);
int drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo);
void drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start);
void drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable);
void
drm_intel_bufmgr_gem_set_aub_filename(drm_intel_bufmgr *bufmgr,
const char *filename);
void drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable);
void drm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo,
int x1, int y1, int width, int height,
enum aub_dump_bmp_format format,
int pitch, int offset);
void
drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo,
drm_intel_aub_annotation *annotations,
unsigned count);
int drm_intel_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id);
int drm_intel_get_aperture_sizes(int fd, size_t *mappable, size_t *total);
int drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr);
int drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns);
drm_intel_context *drm_intel_gem_context_create(drm_intel_bufmgr *bufmgr);
int drm_intel_gem_context_get_id(drm_intel_context *ctx,
uint32_t *ctx_id);
void drm_intel_gem_context_destroy(drm_intel_context *ctx);
int drm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx,
int used, unsigned int flags);
int drm_intel_gem_bo_fence_exec(drm_intel_bo *bo,
drm_intel_context *ctx,
int used,
int in_fence,
int *out_fence,
unsigned int flags);
int drm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd);
drm_intel_bo *drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr,
int prime_fd, int size);
/* drm_intel_bufmgr_fake.c */
drm_intel_bufmgr *drm_intel_bufmgr_fake_init(int fd,
unsigned long low_offset,
void *low_virtual,
unsigned long size,
volatile unsigned int
*last_dispatch);
void drm_intel_bufmgr_fake_set_last_dispatch(drm_intel_bufmgr *bufmgr,
volatile unsigned int
*last_dispatch);
void drm_intel_bufmgr_fake_set_exec_callback(drm_intel_bufmgr *bufmgr,
int (*exec) (drm_intel_bo *bo,
unsigned int used,
void *priv),
void *priv);
void drm_intel_bufmgr_fake_set_fence_callback(drm_intel_bufmgr *bufmgr,
unsigned int (*emit) (void *priv),
void (*wait) (unsigned int fence,
void *priv),
void *priv);
drm_intel_bo *drm_intel_bo_fake_alloc_static(drm_intel_bufmgr *bufmgr,
const char *name,
unsigned long offset,
unsigned long size, void *virt);
void drm_intel_bo_fake_disable_backing_store(drm_intel_bo *bo,
void (*invalidate_cb) (drm_intel_bo
* bo,
void *ptr),
void *ptr);
void drm_intel_bufmgr_fake_contended_lock_take(drm_intel_bufmgr *bufmgr);
void drm_intel_bufmgr_fake_evict_all(drm_intel_bufmgr *bufmgr);
struct drm_intel_decode *drm_intel_decode_context_alloc(uint32_t devid);
void drm_intel_decode_context_free(struct drm_intel_decode *ctx);
void drm_intel_decode_set_batch_pointer(struct drm_intel_decode *ctx,
void *data, uint32_t hw_offset,
int count);
void drm_intel_decode_set_dump_past_end(struct drm_intel_decode *ctx,
int dump_past_end);
void drm_intel_decode_set_head_tail(struct drm_intel_decode *ctx,
uint32_t head, uint32_t tail);
void drm_intel_decode_set_output_file(struct drm_intel_decode *ctx, FILE *out);
void drm_intel_decode(struct drm_intel_decode *ctx);
int drm_intel_reg_read(drm_intel_bufmgr *bufmgr,
uint32_t offset,
uint64_t *result);
int drm_intel_get_reset_stats(drm_intel_context *ctx,
uint32_t *reset_count,
uint32_t *active,
uint32_t *pending);
int drm_intel_get_subslice_total(int fd, unsigned int *subslice_total);
int drm_intel_get_eu_total(int fd, unsigned int *eu_total);
int drm_intel_get_pooled_eu(int fd);
int drm_intel_get_min_eu_in_pool(int fd);
/** @{ Compatibility defines to keep old code building despite the symbol rename
* from dri_* to drm_intel_*
*/
#define dri_bo drm_intel_bo
#define dri_bufmgr drm_intel_bufmgr
#define dri_bo_alloc drm_intel_bo_alloc
#define dri_bo_reference drm_intel_bo_reference
#define dri_bo_unreference drm_intel_bo_unreference
#define dri_bo_map drm_intel_bo_map
#define dri_bo_unmap drm_intel_bo_unmap
#define dri_bo_subdata drm_intel_bo_subdata
#define dri_bo_get_subdata drm_intel_bo_get_subdata
#define dri_bo_wait_rendering drm_intel_bo_wait_rendering
#define dri_bufmgr_set_debug drm_intel_bufmgr_set_debug
#define dri_bufmgr_destroy drm_intel_bufmgr_destroy
#define dri_bo_exec drm_intel_bo_exec
#define dri_bufmgr_check_aperture_space drm_intel_bufmgr_check_aperture_space
#define dri_bo_emit_reloc(reloc_bo, read, write, target_offset, \
reloc_offset, target_bo) \
drm_intel_bo_emit_reloc(reloc_bo, reloc_offset, \
target_bo, target_offset, \
read, write);
#define dri_bo_pin drm_intel_bo_pin
#define dri_bo_unpin drm_intel_bo_unpin
#define dri_bo_get_tiling drm_intel_bo_get_tiling
#define dri_bo_set_tiling(bo, mode) drm_intel_bo_set_tiling(bo, mode, 0)
#define dri_bo_flink drm_intel_bo_flink
#define intel_bufmgr_gem_init drm_intel_bufmgr_gem_init
#define intel_bo_gem_create_from_name drm_intel_bo_gem_create_from_name
#define intel_bufmgr_gem_enable_reuse drm_intel_bufmgr_gem_enable_reuse
#define intel_bufmgr_fake_init drm_intel_bufmgr_fake_init
#define intel_bufmgr_fake_set_last_dispatch drm_intel_bufmgr_fake_set_last_dispatch
#define intel_bufmgr_fake_set_exec_callback drm_intel_bufmgr_fake_set_exec_callback
#define intel_bufmgr_fake_set_fence_callback drm_intel_bufmgr_fake_set_fence_callback
#define intel_bo_fake_alloc_static drm_intel_bo_fake_alloc_static
#define intel_bo_fake_disable_backing_store drm_intel_bo_fake_disable_backing_store
#define intel_bufmgr_fake_contended_lock_take drm_intel_bufmgr_fake_contended_lock_take
#define intel_bufmgr_fake_evict_all drm_intel_bufmgr_fake_evict_all
/** @{ */
#if defined(__cplusplus)
}
#endif
#endif /* INTEL_BUFMGR_H */

View File

@@ -0,0 +1,44 @@
/*
* Copyright © 2011 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Ben Widawsky <ben@bwidawsk.net>
*
*/
#ifndef INTEL_DEBUG_H
#define INTEL_DEBUG_H
#include <stdint.h>
#define SHADER_DEBUG_SOCKET "/var/run/gen_debug"
#define DEBUG_HANDSHAKE_VERSION 0x3
#define DEBUG_HANDSHAKE_ACK "okay"
/* First byte must always be the 1 byte version */
struct intel_debug_handshake {
uint32_t version;
int flink_handle;
uint32_t per_thread_scratch;
} __attribute__((packed));
#endif

View File

@@ -0,0 +1,256 @@
/* mach64_drm.h -- Public header for the mach64 driver -*- linux-c -*-
* Created: Thu Nov 30 20:04:32 2000 by gareth@valinux.com
*/
/*
* Copyright 2000 Gareth Hughes
* Copyright 2002 Frank C. Earl
* Copyright 2002-2003 Leif Delgass
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT OWNER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Gareth Hughes <gareth@valinux.com>
* Frank C. Earl <fearl@airmail.net>
* Leif Delgass <ldelgass@retinalburn.net>
*/
#ifndef __MACH64_DRM_H__
#define __MACH64_DRM_H__
/* WARNING: If you change any of these defines, make sure to change the
* defines in the Xserver file (mach64_sarea.h)
*/
#ifndef __MACH64_SAREA_DEFINES__
#define __MACH64_SAREA_DEFINES__
/* What needs to be changed for the current vertex buffer?
* GH: We're going to be pedantic about this. We want the card to do as
* little as possible, so let's avoid having it fetch a whole bunch of
* register values that don't change all that often, if at all.
*/
#define MACH64_UPLOAD_DST_OFF_PITCH 0x0001
#define MACH64_UPLOAD_Z_OFF_PITCH 0x0002
#define MACH64_UPLOAD_Z_ALPHA_CNTL 0x0004
#define MACH64_UPLOAD_SCALE_3D_CNTL 0x0008
#define MACH64_UPLOAD_DP_FOG_CLR 0x0010
#define MACH64_UPLOAD_DP_WRITE_MASK 0x0020
#define MACH64_UPLOAD_DP_PIX_WIDTH 0x0040
#define MACH64_UPLOAD_SETUP_CNTL 0x0080
#define MACH64_UPLOAD_MISC 0x0100
#define MACH64_UPLOAD_TEXTURE 0x0200
#define MACH64_UPLOAD_TEX0IMAGE 0x0400
#define MACH64_UPLOAD_TEX1IMAGE 0x0800
#define MACH64_UPLOAD_CLIPRECTS 0x1000 /* handled client-side */
#define MACH64_UPLOAD_CONTEXT 0x00ff
#define MACH64_UPLOAD_ALL 0x1fff
/* DMA buffer size
*/
#define MACH64_BUFFER_SIZE 16384
/* Max number of swaps allowed on the ring
* before the client must wait
*/
#define MACH64_MAX_QUEUED_FRAMES 3U
/* Byte offsets for host blit buffer data
*/
#define MACH64_HOSTDATA_BLIT_OFFSET 104
/* Keep these small for testing.
*/
#define MACH64_NR_SAREA_CLIPRECTS 8
#define MACH64_CARD_HEAP 0
#define MACH64_AGP_HEAP 1
#define MACH64_NR_TEX_HEAPS 2
#define MACH64_NR_TEX_REGIONS 64
#define MACH64_LOG_TEX_GRANULARITY 16
#define MACH64_TEX_MAXLEVELS 1
#define MACH64_NR_CONTEXT_REGS 15
#define MACH64_NR_TEXTURE_REGS 4
#endif /* __MACH64_SAREA_DEFINES__ */
typedef struct {
unsigned int dst_off_pitch;
unsigned int z_off_pitch;
unsigned int z_cntl;
unsigned int alpha_tst_cntl;
unsigned int scale_3d_cntl;
unsigned int sc_left_right;
unsigned int sc_top_bottom;
unsigned int dp_fog_clr;
unsigned int dp_write_mask;
unsigned int dp_pix_width;
unsigned int dp_mix;
unsigned int dp_src;
unsigned int clr_cmp_cntl;
unsigned int gui_traj_cntl;
unsigned int setup_cntl;
unsigned int tex_size_pitch;
unsigned int tex_cntl;
unsigned int secondary_tex_off;
unsigned int tex_offset;
} drm_mach64_context_regs_t;
typedef struct drm_mach64_sarea {
/* The channel for communication of state information to the kernel
* on firing a vertex dma buffer.
*/
drm_mach64_context_regs_t context_state;
unsigned int dirty;
unsigned int vertsize;
/* The current cliprects, or a subset thereof.
*/
struct drm_clip_rect boxes[MACH64_NR_SAREA_CLIPRECTS];
unsigned int nbox;
/* Counters for client-side throttling of rendering clients.
*/
unsigned int frames_queued;
/* Texture memory LRU.
*/
struct drm_tex_region tex_list[MACH64_NR_TEX_HEAPS][MACH64_NR_TEX_REGIONS +
1];
unsigned int tex_age[MACH64_NR_TEX_HEAPS];
int ctx_owner;
} drm_mach64_sarea_t;
/* WARNING: If you change any of these defines, make sure to change the
* defines in the Xserver file (mach64_common.h)
*/
/* Mach64 specific ioctls
* The device specific ioctl range is 0x40 to 0x79.
*/
#define DRM_MACH64_INIT 0x00
#define DRM_MACH64_IDLE 0x01
#define DRM_MACH64_RESET 0x02
#define DRM_MACH64_SWAP 0x03
#define DRM_MACH64_CLEAR 0x04
#define DRM_MACH64_VERTEX 0x05
#define DRM_MACH64_BLIT 0x06
#define DRM_MACH64_FLUSH 0x07
#define DRM_MACH64_GETPARAM 0x08
#define DRM_IOCTL_MACH64_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_MACH64_INIT, drm_mach64_init_t)
#define DRM_IOCTL_MACH64_IDLE DRM_IO( DRM_COMMAND_BASE + DRM_MACH64_IDLE )
#define DRM_IOCTL_MACH64_RESET DRM_IO( DRM_COMMAND_BASE + DRM_MACH64_RESET )
#define DRM_IOCTL_MACH64_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_MACH64_SWAP )
#define DRM_IOCTL_MACH64_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_MACH64_CLEAR, drm_mach64_clear_t)
#define DRM_IOCTL_MACH64_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_MACH64_VERTEX, drm_mach64_vertex_t)
#define DRM_IOCTL_MACH64_BLIT DRM_IOW( DRM_COMMAND_BASE + DRM_MACH64_BLIT, drm_mach64_blit_t)
#define DRM_IOCTL_MACH64_FLUSH DRM_IO( DRM_COMMAND_BASE + DRM_MACH64_FLUSH )
#define DRM_IOCTL_MACH64_GETPARAM DRM_IOWR( DRM_COMMAND_BASE + DRM_MACH64_GETPARAM, drm_mach64_getparam_t)
/* Buffer flags for clears
*/
#define MACH64_FRONT 0x1
#define MACH64_BACK 0x2
#define MACH64_DEPTH 0x4
/* Primitive types for vertex buffers
*/
#define MACH64_PRIM_POINTS 0x00000000
#define MACH64_PRIM_LINES 0x00000001
#define MACH64_PRIM_LINE_LOOP 0x00000002
#define MACH64_PRIM_LINE_STRIP 0x00000003
#define MACH64_PRIM_TRIANGLES 0x00000004
#define MACH64_PRIM_TRIANGLE_STRIP 0x00000005
#define MACH64_PRIM_TRIANGLE_FAN 0x00000006
#define MACH64_PRIM_QUADS 0x00000007
#define MACH64_PRIM_QUAD_STRIP 0x00000008
#define MACH64_PRIM_POLYGON 0x00000009
typedef enum _drm_mach64_dma_mode_t {
MACH64_MODE_DMA_ASYNC,
MACH64_MODE_DMA_SYNC,
MACH64_MODE_MMIO
} drm_mach64_dma_mode_t;
typedef struct drm_mach64_init {
enum {
DRM_MACH64_INIT_DMA = 0x01,
DRM_MACH64_CLEANUP_DMA = 0x02
} func;
unsigned long sarea_priv_offset;
int is_pci;
drm_mach64_dma_mode_t dma_mode;
unsigned int fb_bpp;
unsigned int front_offset, front_pitch;
unsigned int back_offset, back_pitch;
unsigned int depth_bpp;
unsigned int depth_offset, depth_pitch;
unsigned long fb_offset;
unsigned long mmio_offset;
unsigned long ring_offset;
unsigned long buffers_offset;
unsigned long agp_textures_offset;
} drm_mach64_init_t;
typedef struct drm_mach64_clear {
unsigned int flags;
int x, y, w, h;
unsigned int clear_color;
unsigned int clear_depth;
} drm_mach64_clear_t;
typedef struct drm_mach64_vertex {
int prim;
void *buf; /* Address of vertex buffer */
unsigned long used; /* Number of bytes in buffer */
int discard; /* Client finished with buffer? */
} drm_mach64_vertex_t;
typedef struct drm_mach64_blit {
void *buf;
int pitch;
int offset;
int format;
unsigned short x, y;
unsigned short width, height;
} drm_mach64_blit_t;
typedef struct drm_mach64_getparam {
enum {
MACH64_PARAM_FRAMES_QUEUED = 0x01,
MACH64_PARAM_IRQ_NR = 0x02
} param;
void *value;
} drm_mach64_getparam_t;
#endif

View File

@@ -0,0 +1,427 @@
/* mga_drm.h -- Public header for the Matrox g200/g400 driver -*- linux-c -*-
* Created: Tue Jan 25 01:50:01 1999 by jhartmann@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Jeff Hartmann <jhartmann@valinux.com>
* Keith Whitwell <keith@tungstengraphics.com>
*
* Rewritten by:
* Gareth Hughes <gareth@valinux.com>
*/
#ifndef __MGA_DRM_H__
#define __MGA_DRM_H__
#include "drm.h"
#if defined(__cplusplus)
extern "C" {
#endif
/* WARNING: If you change any of these defines, make sure to change the
* defines in the Xserver file (mga_sarea.h)
*/
#ifndef __MGA_SAREA_DEFINES__
#define __MGA_SAREA_DEFINES__
/* WARP pipe flags
*/
#define MGA_F 0x1 /* fog */
#define MGA_A 0x2 /* alpha */
#define MGA_S 0x4 /* specular */
#define MGA_T2 0x8 /* multitexture */
#define MGA_WARP_TGZ 0
#define MGA_WARP_TGZF (MGA_F)
#define MGA_WARP_TGZA (MGA_A)
#define MGA_WARP_TGZAF (MGA_F|MGA_A)
#define MGA_WARP_TGZS (MGA_S)
#define MGA_WARP_TGZSF (MGA_S|MGA_F)
#define MGA_WARP_TGZSA (MGA_S|MGA_A)
#define MGA_WARP_TGZSAF (MGA_S|MGA_F|MGA_A)
#define MGA_WARP_T2GZ (MGA_T2)
#define MGA_WARP_T2GZF (MGA_T2|MGA_F)
#define MGA_WARP_T2GZA (MGA_T2|MGA_A)
#define MGA_WARP_T2GZAF (MGA_T2|MGA_A|MGA_F)
#define MGA_WARP_T2GZS (MGA_T2|MGA_S)
#define MGA_WARP_T2GZSF (MGA_T2|MGA_S|MGA_F)
#define MGA_WARP_T2GZSA (MGA_T2|MGA_S|MGA_A)
#define MGA_WARP_T2GZSAF (MGA_T2|MGA_S|MGA_F|MGA_A)
#define MGA_MAX_G200_PIPES 8 /* no multitex */
#define MGA_MAX_G400_PIPES 16
#define MGA_MAX_WARP_PIPES MGA_MAX_G400_PIPES
#define MGA_WARP_UCODE_SIZE 32768 /* in bytes */
#define MGA_CARD_TYPE_G200 1
#define MGA_CARD_TYPE_G400 2
#define MGA_CARD_TYPE_G450 3 /* not currently used */
#define MGA_CARD_TYPE_G550 4
#define MGA_FRONT 0x1
#define MGA_BACK 0x2
#define MGA_DEPTH 0x4
/* What needs to be changed for the current vertex dma buffer?
*/
#define MGA_UPLOAD_CONTEXT 0x1
#define MGA_UPLOAD_TEX0 0x2
#define MGA_UPLOAD_TEX1 0x4
#define MGA_UPLOAD_PIPE 0x8
#define MGA_UPLOAD_TEX0IMAGE 0x10 /* handled client-side */
#define MGA_UPLOAD_TEX1IMAGE 0x20 /* handled client-side */
#define MGA_UPLOAD_2D 0x40
#define MGA_WAIT_AGE 0x80 /* handled client-side */
#define MGA_UPLOAD_CLIPRECTS 0x100 /* handled client-side */
#if 0
#define MGA_DMA_FLUSH 0x200 /* set when someone gets the lock
quiescent */
#endif
/* 32 buffers of 64k each, total 2 meg.
*/
#define MGA_BUFFER_SIZE (1 << 16)
#define MGA_NUM_BUFFERS 128
/* Keep these small for testing.
*/
#define MGA_NR_SAREA_CLIPRECTS 8
/* 2 heaps (1 for card, 1 for agp), each divided into up to 128
* regions, subject to a minimum region size of (1<<16) == 64k.
*
* Clients may subdivide regions internally, but when sharing between
* clients, the region size is the minimum granularity.
*/
#define MGA_CARD_HEAP 0
#define MGA_AGP_HEAP 1
#define MGA_NR_TEX_HEAPS 2
#define MGA_NR_TEX_REGIONS 16
#define MGA_LOG_MIN_TEX_REGION_SIZE 16
#define DRM_MGA_IDLE_RETRY 2048
#endif /* __MGA_SAREA_DEFINES__ */
/* Setup registers for 3D context
*/
typedef struct {
unsigned int dstorg;
unsigned int maccess;
unsigned int plnwt;
unsigned int dwgctl;
unsigned int alphactrl;
unsigned int fogcolor;
unsigned int wflag;
unsigned int tdualstage0;
unsigned int tdualstage1;
unsigned int fcol;
unsigned int stencil;
unsigned int stencilctl;
} drm_mga_context_regs_t;
/* Setup registers for 2D, X server
*/
typedef struct {
unsigned int pitch;
} drm_mga_server_regs_t;
/* Setup registers for each texture unit
*/
typedef struct {
unsigned int texctl;
unsigned int texctl2;
unsigned int texfilter;
unsigned int texbordercol;
unsigned int texorg;
unsigned int texwidth;
unsigned int texheight;
unsigned int texorg1;
unsigned int texorg2;
unsigned int texorg3;
unsigned int texorg4;
} drm_mga_texture_regs_t;
/* General aging mechanism
*/
typedef struct {
unsigned int head; /* Position of head pointer */
unsigned int wrap; /* Primary DMA wrap count */
} drm_mga_age_t;
typedef struct _drm_mga_sarea {
/* The channel for communication of state information to the kernel
* on firing a vertex dma buffer.
*/
drm_mga_context_regs_t context_state;
drm_mga_server_regs_t server_state;
drm_mga_texture_regs_t tex_state[2];
unsigned int warp_pipe;
unsigned int dirty;
unsigned int vertsize;
/* The current cliprects, or a subset thereof.
*/
struct drm_clip_rect boxes[MGA_NR_SAREA_CLIPRECTS];
unsigned int nbox;
/* Information about the most recently used 3d drawable. The
* client fills in the req_* fields, the server fills in the
* exported_ fields and puts the cliprects into boxes, above.
*
* The client clears the exported_drawable field before
* clobbering the boxes data.
*/
unsigned int req_drawable; /* the X drawable id */
unsigned int req_draw_buffer; /* MGA_FRONT or MGA_BACK */
unsigned int exported_drawable;
unsigned int exported_index;
unsigned int exported_stamp;
unsigned int exported_buffers;
unsigned int exported_nfront;
unsigned int exported_nback;
int exported_back_x, exported_front_x, exported_w;
int exported_back_y, exported_front_y, exported_h;
struct drm_clip_rect exported_boxes[MGA_NR_SAREA_CLIPRECTS];
/* Counters for aging textures and for client-side throttling.
*/
unsigned int status[4];
unsigned int last_wrap;
drm_mga_age_t last_frame;
unsigned int last_enqueue; /* last time a buffer was enqueued */
unsigned int last_dispatch; /* age of the most recently dispatched buffer */
unsigned int last_quiescent; /* */
/* LRU lists for texture memory in agp space and on the card.
*/
struct drm_tex_region texList[MGA_NR_TEX_HEAPS][MGA_NR_TEX_REGIONS + 1];
unsigned int texAge[MGA_NR_TEX_HEAPS];
/* Mechanism to validate card state.
*/
int ctxOwner;
} drm_mga_sarea_t;
/* MGA specific ioctls
* The device specific ioctl range is 0x40 to 0x79.
*/
#define DRM_MGA_INIT 0x00
#define DRM_MGA_FLUSH 0x01
#define DRM_MGA_RESET 0x02
#define DRM_MGA_SWAP 0x03
#define DRM_MGA_CLEAR 0x04
#define DRM_MGA_VERTEX 0x05
#define DRM_MGA_INDICES 0x06
#define DRM_MGA_ILOAD 0x07
#define DRM_MGA_BLIT 0x08
#define DRM_MGA_GETPARAM 0x09
/* 3.2:
* ioctls for operating on fences.
*/
#define DRM_MGA_SET_FENCE 0x0a
#define DRM_MGA_WAIT_FENCE 0x0b
#define DRM_MGA_DMA_BOOTSTRAP 0x0c
#define DRM_IOCTL_MGA_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INIT, drm_mga_init_t)
#define DRM_IOCTL_MGA_FLUSH DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_FLUSH, struct drm_lock)
#define DRM_IOCTL_MGA_RESET DRM_IO( DRM_COMMAND_BASE + DRM_MGA_RESET)
#define DRM_IOCTL_MGA_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_MGA_SWAP)
#define DRM_IOCTL_MGA_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_CLEAR, drm_mga_clear_t)
#define DRM_IOCTL_MGA_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_VERTEX, drm_mga_vertex_t)
#define DRM_IOCTL_MGA_INDICES DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INDICES, drm_mga_indices_t)
#define DRM_IOCTL_MGA_ILOAD DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_ILOAD, drm_mga_iload_t)
#define DRM_IOCTL_MGA_BLIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_BLIT, drm_mga_blit_t)
#define DRM_IOCTL_MGA_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_GETPARAM, drm_mga_getparam_t)
#define DRM_IOCTL_MGA_SET_FENCE DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_SET_FENCE, __u32)
#define DRM_IOCTL_MGA_WAIT_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_WAIT_FENCE, __u32)
#define DRM_IOCTL_MGA_DMA_BOOTSTRAP DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_DMA_BOOTSTRAP, drm_mga_dma_bootstrap_t)
typedef struct _drm_mga_warp_index {
int installed;
unsigned long phys_addr;
int size;
} drm_mga_warp_index_t;
typedef struct drm_mga_init {
enum {
MGA_INIT_DMA = 0x01,
MGA_CLEANUP_DMA = 0x02
} func;
unsigned long sarea_priv_offset;
int chipset;
int sgram;
unsigned int maccess;
unsigned int fb_cpp;
unsigned int front_offset, front_pitch;
unsigned int back_offset, back_pitch;
unsigned int depth_cpp;
unsigned int depth_offset, depth_pitch;
unsigned int texture_offset[MGA_NR_TEX_HEAPS];
unsigned int texture_size[MGA_NR_TEX_HEAPS];
unsigned long fb_offset;
unsigned long mmio_offset;
unsigned long status_offset;
unsigned long warp_offset;
unsigned long primary_offset;
unsigned long buffers_offset;
} drm_mga_init_t;
typedef struct drm_mga_dma_bootstrap {
/**
* \name AGP texture region
*
* On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, these fields will
* be filled in with the actual AGP texture settings.
*
* \warning
* If these fields are non-zero, but dma_mga_dma_bootstrap::agp_mode
* is zero, it means that PCI memory (most likely through the use of
* an IOMMU) is being used for "AGP" textures.
*/
/*@{ */
unsigned long texture_handle; /**< Handle used to map AGP textures. */
__u32 texture_size; /**< Size of the AGP texture region. */
/*@} */
/**
* Requested size of the primary DMA region.
*
* On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
* filled in with the actual AGP mode. If AGP was not available
*/
__u32 primary_size;
/**
* Requested number of secondary DMA buffers.
*
* On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
* filled in with the actual number of secondary DMA buffers
* allocated. Particularly when PCI DMA is used, this may be
* (subtantially) less than the number requested.
*/
__u32 secondary_bin_count;
/**
* Requested size of each secondary DMA buffer.
*
* While the kernel \b is free to reduce
* dma_mga_dma_bootstrap::secondary_bin_count, it is \b not allowed
* to reduce dma_mga_dma_bootstrap::secondary_bin_size.
*/
__u32 secondary_bin_size;
/**
* Bit-wise mask of AGPSTAT2_* values. Currently only \c AGPSTAT2_1X,
* \c AGPSTAT2_2X, and \c AGPSTAT2_4X are supported. If this value is
* zero, it means that PCI DMA should be used, even if AGP is
* possible.
*
* On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
* filled in with the actual AGP mode. If AGP was not available
* (i.e., PCI DMA was used), this value will be zero.
*/
__u32 agp_mode;
/**
* Desired AGP GART size, measured in megabytes.
*/
__u8 agp_size;
} drm_mga_dma_bootstrap_t;
typedef struct drm_mga_clear {
unsigned int flags;
unsigned int clear_color;
unsigned int clear_depth;
unsigned int color_mask;
unsigned int depth_mask;
} drm_mga_clear_t;
typedef struct drm_mga_vertex {
int idx; /* buffer to queue */
int used; /* bytes in use */
int discard; /* client finished with buffer? */
} drm_mga_vertex_t;
typedef struct drm_mga_indices {
int idx; /* buffer to queue */
unsigned int start;
unsigned int end;
int discard; /* client finished with buffer? */
} drm_mga_indices_t;
typedef struct drm_mga_iload {
int idx;
unsigned int dstorg;
unsigned int length;
} drm_mga_iload_t;
typedef struct _drm_mga_blit {
unsigned int planemask;
unsigned int srcorg;
unsigned int dstorg;
int src_pitch, dst_pitch;
int delta_sx, delta_sy;
int delta_dx, delta_dy;
int height, ydir; /* flip image vertically */
int source_pitch, dest_pitch;
} drm_mga_blit_t;
/* 3.1: An ioctl to get parameters that aren't available to the 3d
* client any other way.
*/
#define MGA_PARAM_IRQ_NR 1
/* 3.2: Query the actual card type. The DDX only distinguishes between
* G200 chips and non-G200 chips, which it calls G400. It turns out that
* there are some very sublte differences between the G4x0 chips and the G550
* chips. Using this parameter query, a client-side driver can detect the
* difference between a G4x0 and a G550.
*/
#define MGA_PARAM_CARD_TYPE 2
typedef struct drm_mga_getparam {
int param;
void *value;
} drm_mga_getparam_t;
#if defined(__cplusplus)
}
#endif
#endif

View File

@@ -0,0 +1,308 @@
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __MSM_DRM_H__
#define __MSM_DRM_H__
#include "drm.h"
#if defined(__cplusplus)
extern "C" {
#endif
/* Please note that modifications to all structs defined here are
* subject to backwards-compatibility constraints:
* 1) Do not use pointers, use __u64 instead for 32 bit / 64 bit
* user/kernel compatibility
* 2) Keep fields aligned to their size
* 3) Because of how drm_ioctl() works, we can add new fields at
* the end of an ioctl if some care is taken: drm_ioctl() will
* zero out the new fields at the tail of the ioctl, so a zero
* value should have a backwards compatible meaning. And for
* output params, userspace won't see the newly added output
* fields.. so that has to be somehow ok.
*/
#define MSM_PIPE_NONE 0x00
#define MSM_PIPE_2D0 0x01
#define MSM_PIPE_2D1 0x02
#define MSM_PIPE_3D0 0x10
/* The pipe-id just uses the lower bits, so can be OR'd with flags in
* the upper 16 bits (which could be extended further, if needed, maybe
* we extend/overload the pipe-id some day to deal with multiple rings,
* but even then I don't think we need the full lower 16 bits).
*/
#define MSM_PIPE_ID_MASK 0xffff
#define MSM_PIPE_ID(x) ((x) & MSM_PIPE_ID_MASK)
#define MSM_PIPE_FLAGS(x) ((x) & ~MSM_PIPE_ID_MASK)
/* timeouts are specified in clock-monotonic absolute times (to simplify
* restarting interrupted ioctls). The following struct is logically the
* same as 'struct timespec' but 32/64b ABI safe.
*/
struct drm_msm_timespec {
__s64 tv_sec; /* seconds */
__s64 tv_nsec; /* nanoseconds */
};
#define MSM_PARAM_GPU_ID 0x01
#define MSM_PARAM_GMEM_SIZE 0x02
#define MSM_PARAM_CHIP_ID 0x03
#define MSM_PARAM_MAX_FREQ 0x04
#define MSM_PARAM_TIMESTAMP 0x05
#define MSM_PARAM_GMEM_BASE 0x06
#define MSM_PARAM_NR_RINGS 0x07
struct drm_msm_param {
__u32 pipe; /* in, MSM_PIPE_x */
__u32 param; /* in, MSM_PARAM_x */
__u64 value; /* out (get_param) or in (set_param) */
};
/*
* GEM buffers:
*/
#define MSM_BO_SCANOUT 0x00000001 /* scanout capable */
#define MSM_BO_GPU_READONLY 0x00000002
#define MSM_BO_CACHE_MASK 0x000f0000
/* cache modes */
#define MSM_BO_CACHED 0x00010000
#define MSM_BO_WC 0x00020000
#define MSM_BO_UNCACHED 0x00040000
#define MSM_BO_FLAGS (MSM_BO_SCANOUT | \
MSM_BO_GPU_READONLY | \
MSM_BO_CACHED | \
MSM_BO_WC | \
MSM_BO_UNCACHED)
struct drm_msm_gem_new {
__u64 size; /* in */
__u32 flags; /* in, mask of MSM_BO_x */
__u32 handle; /* out */
};
#define MSM_INFO_IOVA 0x01
#define MSM_INFO_FLAGS (MSM_INFO_IOVA)
struct drm_msm_gem_info {
__u32 handle; /* in */
__u32 flags; /* in - combination of MSM_INFO_* flags */
__u64 offset; /* out, mmap() offset or iova */
};
#define MSM_PREP_READ 0x01
#define MSM_PREP_WRITE 0x02
#define MSM_PREP_NOSYNC 0x04
#define MSM_PREP_FLAGS (MSM_PREP_READ | MSM_PREP_WRITE | MSM_PREP_NOSYNC)
struct drm_msm_gem_cpu_prep {
__u32 handle; /* in */
__u32 op; /* in, mask of MSM_PREP_x */
struct drm_msm_timespec timeout; /* in */
};
struct drm_msm_gem_cpu_fini {
__u32 handle; /* in */
};
/*
* Cmdstream Submission:
*/
/* The value written into the cmdstream is logically:
*
* ((relocbuf->gpuaddr + reloc_offset) << shift) | or
*
* When we have GPU's w/ >32bit ptrs, it should be possible to deal
* with this by emit'ing two reloc entries with appropriate shift
* values. Or a new MSM_SUBMIT_CMD_x type would also be an option.
*
* NOTE that reloc's must be sorted by order of increasing submit_offset,
* otherwise EINVAL.
*/
struct drm_msm_gem_submit_reloc {
__u32 submit_offset; /* in, offset from submit_bo */
__u32 or; /* in, value OR'd with result */
__s32 shift; /* in, amount of left shift (can be negative) */
__u32 reloc_idx; /* in, index of reloc_bo buffer */
__u64 reloc_offset; /* in, offset from start of reloc_bo */
};
/* submit-types:
* BUF - this cmd buffer is executed normally.
* IB_TARGET_BUF - this cmd buffer is an IB target. Reloc's are
* processed normally, but the kernel does not setup an IB to
* this buffer in the first-level ringbuffer
* CTX_RESTORE_BUF - only executed if there has been a GPU context
* switch since the last SUBMIT ioctl
*/
#define MSM_SUBMIT_CMD_BUF 0x0001
#define MSM_SUBMIT_CMD_IB_TARGET_BUF 0x0002
#define MSM_SUBMIT_CMD_CTX_RESTORE_BUF 0x0003
struct drm_msm_gem_submit_cmd {
__u32 type; /* in, one of MSM_SUBMIT_CMD_x */
__u32 submit_idx; /* in, index of submit_bo cmdstream buffer */
__u32 submit_offset; /* in, offset into submit_bo */
__u32 size; /* in, cmdstream size */
__u32 pad;
__u32 nr_relocs; /* in, number of submit_reloc's */
__u64 relocs; /* in, ptr to array of submit_reloc's */
};
/* Each buffer referenced elsewhere in the cmdstream submit (ie. the
* cmdstream buffer(s) themselves or reloc entries) has one (and only
* one) entry in the submit->bos[] table.
*
* As a optimization, the current buffer (gpu virtual address) can be
* passed back through the 'presumed' field. If on a subsequent reloc,
* userspace passes back a 'presumed' address that is still valid,
* then patching the cmdstream for this entry is skipped. This can
* avoid kernel needing to map/access the cmdstream bo in the common
* case.
*/
#define MSM_SUBMIT_BO_READ 0x0001
#define MSM_SUBMIT_BO_WRITE 0x0002
#define MSM_SUBMIT_BO_FLAGS (MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE)
struct drm_msm_gem_submit_bo {
__u32 flags; /* in, mask of MSM_SUBMIT_BO_x */
__u32 handle; /* in, GEM handle */
__u64 presumed; /* in/out, presumed buffer address */
};
/* Valid submit ioctl flags: */
#define MSM_SUBMIT_NO_IMPLICIT 0x80000000 /* disable implicit sync */
#define MSM_SUBMIT_FENCE_FD_IN 0x40000000 /* enable input fence_fd */
#define MSM_SUBMIT_FENCE_FD_OUT 0x20000000 /* enable output fence_fd */
#define MSM_SUBMIT_SUDO 0x10000000 /* run submitted cmds from RB */
#define MSM_SUBMIT_FLAGS ( \
MSM_SUBMIT_NO_IMPLICIT | \
MSM_SUBMIT_FENCE_FD_IN | \
MSM_SUBMIT_FENCE_FD_OUT | \
MSM_SUBMIT_SUDO | \
0)
/* Each cmdstream submit consists of a table of buffers involved, and
* one or more cmdstream buffers. This allows for conditional execution
* (context-restore), and IB buffers needed for per tile/bin draw cmds.
*/
struct drm_msm_gem_submit {
__u32 flags; /* MSM_PIPE_x | MSM_SUBMIT_x */
__u32 fence; /* out */
__u32 nr_bos; /* in, number of submit_bo's */
__u32 nr_cmds; /* in, number of submit_cmd's */
__u64 bos; /* in, ptr to array of submit_bo's */
__u64 cmds; /* in, ptr to array of submit_cmd's */
__s32 fence_fd; /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */
__u32 queueid; /* in, submitqueue id */
};
/* The normal way to synchronize with the GPU is just to CPU_PREP on
* a buffer if you need to access it from the CPU (other cmdstream
* submission from same or other contexts, PAGE_FLIP ioctl, etc, all
* handle the required synchronization under the hood). This ioctl
* mainly just exists as a way to implement the gallium pipe_fence
* APIs without requiring a dummy bo to synchronize on.
*/
struct drm_msm_wait_fence {
__u32 fence; /* in */
__u32 pad;
struct drm_msm_timespec timeout; /* in */
__u32 queueid; /* in, submitqueue id */
};
/* madvise provides a way to tell the kernel in case a buffers contents
* can be discarded under memory pressure, which is useful for userspace
* bo cache where we want to optimistically hold on to buffer allocate
* and potential mmap, but allow the pages to be discarded under memory
* pressure.
*
* Typical usage would involve madvise(DONTNEED) when buffer enters BO
* cache, and madvise(WILLNEED) if trying to recycle buffer from BO cache.
* In the WILLNEED case, 'retained' indicates to userspace whether the
* backing pages still exist.
*/
#define MSM_MADV_WILLNEED 0 /* backing pages are needed, status returned in 'retained' */
#define MSM_MADV_DONTNEED 1 /* backing pages not needed */
#define __MSM_MADV_PURGED 2 /* internal state */
struct drm_msm_gem_madvise {
__u32 handle; /* in, GEM handle */
__u32 madv; /* in, MSM_MADV_x */
__u32 retained; /* out, whether backing store still exists */
};
/*
* Draw queues allow the user to set specific submission parameter. Command
* submissions specify a specific submitqueue to use. ID 0 is reserved for
* backwards compatibility as a "default" submitqueue
*/
#define MSM_SUBMITQUEUE_FLAGS (0)
struct drm_msm_submitqueue {
__u32 flags; /* in, MSM_SUBMITQUEUE_x */
__u32 prio; /* in, Priority level */
__u32 id; /* out, identifier */
};
#define DRM_MSM_GET_PARAM 0x00
/* placeholder:
#define DRM_MSM_SET_PARAM 0x01
*/
#define DRM_MSM_GEM_NEW 0x02
#define DRM_MSM_GEM_INFO 0x03
#define DRM_MSM_GEM_CPU_PREP 0x04
#define DRM_MSM_GEM_CPU_FINI 0x05
#define DRM_MSM_GEM_SUBMIT 0x06
#define DRM_MSM_WAIT_FENCE 0x07
#define DRM_MSM_GEM_MADVISE 0x08
/* placeholder:
#define DRM_MSM_GEM_SVM_NEW 0x09
*/
#define DRM_MSM_SUBMITQUEUE_NEW 0x0A
#define DRM_MSM_SUBMITQUEUE_CLOSE 0x0B
#define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
#define DRM_IOCTL_MSM_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new)
#define DRM_IOCTL_MSM_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_INFO, struct drm_msm_gem_info)
#define DRM_IOCTL_MSM_GEM_CPU_PREP DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_GEM_CPU_PREP, struct drm_msm_gem_cpu_prep)
#define DRM_IOCTL_MSM_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_GEM_CPU_FINI, struct drm_msm_gem_cpu_fini)
#define DRM_IOCTL_MSM_GEM_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_SUBMIT, struct drm_msm_gem_submit)
#define DRM_IOCTL_MSM_WAIT_FENCE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_WAIT_FENCE, struct drm_msm_wait_fence)
#define DRM_IOCTL_MSM_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_MADVISE, struct drm_msm_gem_madvise)
#define DRM_IOCTL_MSM_SUBMITQUEUE_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_NEW, struct drm_msm_submitqueue)
#define DRM_IOCTL_MSM_SUBMITQUEUE_CLOSE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_CLOSE, __u32)
#if defined(__cplusplus)
}
#endif
#endif /* __MSM_DRM_H__ */

View File

@@ -0,0 +1,276 @@
#ifndef __NOUVEAU_H__
#define __NOUVEAU_H__
#include <stdint.h>
#include <stdbool.h>
/* Supported class information, provided by the kernel */
struct nouveau_sclass {
int32_t oclass;
int minver;
int maxver;
};
/* Client-provided array describing class versions that are desired.
*
* These are used to match against the kernel's list of supported classes.
*/
struct nouveau_mclass {
int32_t oclass;
int version;
void *data;
};
struct nouveau_object {
struct nouveau_object *parent;
uint64_t handle;
uint32_t oclass;
uint32_t length; /* deprecated */
void *data; /* deprecated */
};
int nouveau_object_new(struct nouveau_object *parent, uint64_t handle,
uint32_t oclass, void *data, uint32_t length,
struct nouveau_object **);
void nouveau_object_del(struct nouveau_object **);
int nouveau_object_mthd(struct nouveau_object *, uint32_t mthd,
void *data, uint32_t size);
int nouveau_object_sclass_get(struct nouveau_object *,
struct nouveau_sclass **);
void nouveau_object_sclass_put(struct nouveau_sclass **);
int nouveau_object_mclass(struct nouveau_object *,
const struct nouveau_mclass *);
struct nouveau_drm {
struct nouveau_object client;
int fd;
uint32_t version;
bool nvif;
};
static inline struct nouveau_drm *
nouveau_drm(struct nouveau_object *obj)
{
while (obj && obj->parent)
obj = obj->parent;
return (struct nouveau_drm *)obj;
}
int nouveau_drm_new(int fd, struct nouveau_drm **);
void nouveau_drm_del(struct nouveau_drm **);
struct nouveau_device {
struct nouveau_object object;
int fd; /* deprecated */
uint32_t lib_version; /* deprecated */
uint32_t drm_version; /* deprecated */
uint32_t chipset;
uint64_t vram_size;
uint64_t gart_size;
uint64_t vram_limit;
uint64_t gart_limit;
};
int nouveau_device_new(struct nouveau_object *parent, int32_t oclass,
void *data, uint32_t size, struct nouveau_device **);
void nouveau_device_del(struct nouveau_device **);
int nouveau_getparam(struct nouveau_device *, uint64_t param, uint64_t *value);
int nouveau_setparam(struct nouveau_device *, uint64_t param, uint64_t value);
/* deprecated */
int nouveau_device_wrap(int fd, int close, struct nouveau_device **);
int nouveau_device_open(const char *busid, struct nouveau_device **);
struct nouveau_client {
struct nouveau_device *device;
int id;
};
int nouveau_client_new(struct nouveau_device *, struct nouveau_client **);
void nouveau_client_del(struct nouveau_client **);
union nouveau_bo_config {
struct {
#define NV04_BO_16BPP 0x00000001
#define NV04_BO_32BPP 0x00000002
#define NV04_BO_ZETA 0x00000004
uint32_t surf_flags;
uint32_t surf_pitch;
} nv04;
struct {
uint32_t memtype;
uint32_t tile_mode;
} nv50;
struct {
uint32_t memtype;
uint32_t tile_mode;
} nvc0;
uint32_t data[8];
};
#define NOUVEAU_BO_VRAM 0x00000001
#define NOUVEAU_BO_GART 0x00000002
#define NOUVEAU_BO_APER (NOUVEAU_BO_VRAM | NOUVEAU_BO_GART)
#define NOUVEAU_BO_RD 0x00000100
#define NOUVEAU_BO_WR 0x00000200
#define NOUVEAU_BO_RDWR (NOUVEAU_BO_RD | NOUVEAU_BO_WR)
#define NOUVEAU_BO_NOBLOCK 0x00000400
#define NOUVEAU_BO_LOW 0x00001000
#define NOUVEAU_BO_HIGH 0x00002000
#define NOUVEAU_BO_OR 0x00004000
#define NOUVEAU_BO_MAP 0x80000000
#define NOUVEAU_BO_CONTIG 0x40000000
#define NOUVEAU_BO_NOSNOOP 0x20000000
#define NOUVEAU_BO_COHERENT 0x10000000
struct nouveau_bo {
struct nouveau_device *device;
uint32_t handle;
uint64_t size;
uint32_t flags;
uint64_t offset;
void *map;
union nouveau_bo_config config;
};
int nouveau_bo_new(struct nouveau_device *, uint32_t flags, uint32_t align,
uint64_t size, union nouveau_bo_config *,
struct nouveau_bo **);
int nouveau_bo_wrap(struct nouveau_device *, uint32_t handle,
struct nouveau_bo **);
int nouveau_bo_name_ref(struct nouveau_device *v, uint32_t name,
struct nouveau_bo **);
int nouveau_bo_name_get(struct nouveau_bo *, uint32_t *name);
void nouveau_bo_ref(struct nouveau_bo *, struct nouveau_bo **);
int nouveau_bo_map(struct nouveau_bo *, uint32_t access,
struct nouveau_client *);
int nouveau_bo_wait(struct nouveau_bo *, uint32_t access,
struct nouveau_client *);
int nouveau_bo_prime_handle_ref(struct nouveau_device *, int prime_fd,
struct nouveau_bo **);
int nouveau_bo_set_prime(struct nouveau_bo *, int *prime_fd);
struct nouveau_list {
struct nouveau_list *prev;
struct nouveau_list *next;
};
struct nouveau_bufref {
struct nouveau_list thead;
struct nouveau_bo *bo;
uint32_t packet;
uint32_t flags;
uint32_t data;
uint32_t vor;
uint32_t tor;
uint32_t priv_data;
void *priv;
};
struct nouveau_bufctx {
struct nouveau_client *client;
struct nouveau_list head;
struct nouveau_list pending;
struct nouveau_list current;
int relocs;
};
int nouveau_bufctx_new(struct nouveau_client *, int bins,
struct nouveau_bufctx **);
void nouveau_bufctx_del(struct nouveau_bufctx **);
struct nouveau_bufref *
nouveau_bufctx_refn(struct nouveau_bufctx *, int bin,
struct nouveau_bo *, uint32_t flags);
struct nouveau_bufref *
nouveau_bufctx_mthd(struct nouveau_bufctx *, int bin, uint32_t packet,
struct nouveau_bo *, uint64_t data, uint32_t flags,
uint32_t vor, uint32_t tor);
void nouveau_bufctx_reset(struct nouveau_bufctx *, int bin);
struct nouveau_pushbuf_krec;
struct nouveau_pushbuf {
struct nouveau_client *client;
struct nouveau_object *channel;
struct nouveau_bufctx *bufctx;
void (*kick_notify)(struct nouveau_pushbuf *);
void *user_priv;
uint32_t rsvd_kick;
uint32_t flags;
uint32_t *cur;
uint32_t *end;
};
struct nouveau_pushbuf_refn {
struct nouveau_bo *bo;
uint32_t flags;
};
int nouveau_pushbuf_new(struct nouveau_client *, struct nouveau_object *chan,
int nr, uint32_t size, bool immediate,
struct nouveau_pushbuf **);
void nouveau_pushbuf_del(struct nouveau_pushbuf **);
int nouveau_pushbuf_space(struct nouveau_pushbuf *, uint32_t dwords,
uint32_t relocs, uint32_t pushes);
void nouveau_pushbuf_data(struct nouveau_pushbuf *, struct nouveau_bo *,
uint64_t offset, uint64_t length);
int nouveau_pushbuf_refn(struct nouveau_pushbuf *,
struct nouveau_pushbuf_refn *, int nr);
/* Emits a reloc into the push buffer at the current position, you *must*
* have previously added the referenced buffer to a buffer context, and
* validated it against the current push buffer.
*/
void nouveau_pushbuf_reloc(struct nouveau_pushbuf *, struct nouveau_bo *,
uint32_t data, uint32_t flags,
uint32_t vor, uint32_t tor);
int nouveau_pushbuf_validate(struct nouveau_pushbuf *);
uint32_t nouveau_pushbuf_refd(struct nouveau_pushbuf *, struct nouveau_bo *);
int nouveau_pushbuf_kick(struct nouveau_pushbuf *, struct nouveau_object *chan);
struct nouveau_bufctx *
nouveau_pushbuf_bufctx(struct nouveau_pushbuf *, struct nouveau_bufctx *);
#define NOUVEAU_DEVICE_CLASS 0x80000000
#define NOUVEAU_FIFO_CHANNEL_CLASS 0x80000001
#define NOUVEAU_NOTIFIER_CLASS 0x80000002
struct nouveau_fifo {
struct nouveau_object *object;
uint32_t channel;
uint32_t pushbuf;
uint64_t unused1[3];
};
struct nv04_fifo {
struct nouveau_fifo base;
uint32_t vram;
uint32_t gart;
uint32_t notify;
};
struct nvc0_fifo {
struct nouveau_fifo base;
uint32_t notify;
};
#define NVE0_FIFO_ENGINE_GR 0x00000001
#define NVE0_FIFO_ENGINE_VP 0x00000002
#define NVE0_FIFO_ENGINE_PPP 0x00000004
#define NVE0_FIFO_ENGINE_BSP 0x00000008
#define NVE0_FIFO_ENGINE_CE0 0x00000010
#define NVE0_FIFO_ENGINE_CE1 0x00000020
#define NVE0_FIFO_ENGINE_ENC 0x00000040
struct nve0_fifo {
struct {
struct nouveau_fifo base;
uint32_t notify;
};
uint32_t engine;
};
struct nv04_notify {
struct nouveau_object *object;
uint32_t offset;
uint32_t length;
};
#endif

View File

@@ -0,0 +1,45 @@
#ifndef __NVIF_CL0080_H__
#define __NVIF_CL0080_H__
struct nv_device_v0 {
__u8 version;
__u8 pad01[7];
__u64 device; /* device identifier, ~0 for client default */
};
#define NV_DEVICE_V0_INFO 0x00
#define NV_DEVICE_V0_TIME 0x01
struct nv_device_info_v0 {
__u8 version;
#define NV_DEVICE_INFO_V0_IGP 0x00
#define NV_DEVICE_INFO_V0_PCI 0x01
#define NV_DEVICE_INFO_V0_AGP 0x02
#define NV_DEVICE_INFO_V0_PCIE 0x03
#define NV_DEVICE_INFO_V0_SOC 0x04
__u8 platform;
__u16 chipset; /* from NV_PMC_BOOT_0 */
__u8 revision; /* from NV_PMC_BOOT_0 */
#define NV_DEVICE_INFO_V0_TNT 0x01
#define NV_DEVICE_INFO_V0_CELSIUS 0x02
#define NV_DEVICE_INFO_V0_KELVIN 0x03
#define NV_DEVICE_INFO_V0_RANKINE 0x04
#define NV_DEVICE_INFO_V0_CURIE 0x05
#define NV_DEVICE_INFO_V0_TESLA 0x06
#define NV_DEVICE_INFO_V0_FERMI 0x07
#define NV_DEVICE_INFO_V0_KEPLER 0x08
#define NV_DEVICE_INFO_V0_MAXWELL 0x09
__u8 family;
__u8 pad06[2];
__u64 ram_size;
__u64 ram_user;
char chip[16];
char name[64];
};
struct nv_device_time_v0 {
__u8 version;
__u8 pad01[7];
__u64 time;
};
#endif

View File

@@ -0,0 +1,44 @@
#ifndef __NVIF_CL9097_H__
#define __NVIF_CL9097_H__
#define FERMI_A_ZBC_COLOR 0x00
#define FERMI_A_ZBC_DEPTH 0x01
struct fermi_a_zbc_color_v0 {
__u8 version;
#define FERMI_A_ZBC_COLOR_V0_FMT_ZERO 0x01
#define FERMI_A_ZBC_COLOR_V0_FMT_UNORM_ONE 0x02
#define FERMI_A_ZBC_COLOR_V0_FMT_RF32_GF32_BF32_AF32 0x04
#define FERMI_A_ZBC_COLOR_V0_FMT_R16_G16_B16_A16 0x08
#define FERMI_A_ZBC_COLOR_V0_FMT_RN16_GN16_BN16_AN16 0x0c
#define FERMI_A_ZBC_COLOR_V0_FMT_RS16_GS16_BS16_AS16 0x10
#define FERMI_A_ZBC_COLOR_V0_FMT_RU16_GU16_BU16_AU16 0x14
#define FERMI_A_ZBC_COLOR_V0_FMT_RF16_GF16_BF16_AF16 0x16
#define FERMI_A_ZBC_COLOR_V0_FMT_A8R8G8B8 0x18
#define FERMI_A_ZBC_COLOR_V0_FMT_A8RL8GL8BL8 0x1c
#define FERMI_A_ZBC_COLOR_V0_FMT_A2B10G10R10 0x20
#define FERMI_A_ZBC_COLOR_V0_FMT_AU2BU10GU10RU10 0x24
#define FERMI_A_ZBC_COLOR_V0_FMT_A8B8G8R8 0x28
#define FERMI_A_ZBC_COLOR_V0_FMT_A8BL8GL8RL8 0x2c
#define FERMI_A_ZBC_COLOR_V0_FMT_AN8BN8GN8RN8 0x30
#define FERMI_A_ZBC_COLOR_V0_FMT_AS8BS8GS8RS8 0x34
#define FERMI_A_ZBC_COLOR_V0_FMT_AU8BU8GU8RU8 0x38
#define FERMI_A_ZBC_COLOR_V0_FMT_A2R10G10B10 0x3c
#define FERMI_A_ZBC_COLOR_V0_FMT_BF10GF11RF11 0x40
__u8 format;
__u8 index;
__u8 pad03[5];
__u32 ds[4];
__u32 l2[4];
};
struct fermi_a_zbc_depth_v0 {
__u8 version;
#define FERMI_A_ZBC_DEPTH_V0_FMT_FP32 0x01
__u8 format;
__u8 index;
__u8 pad03[5];
__u32 ds;
__u32 l2;
};
#endif

View File

@@ -0,0 +1,141 @@
#ifndef __NVIF_CLASS_H__
#define __NVIF_CLASS_H__
/* these class numbers are made up by us, and not nvidia-assigned */
#define NVIF_CLASS_CONTROL /* if0001.h */ -1
#define NVIF_CLASS_PERFMON /* if0002.h */ -2
#define NVIF_CLASS_PERFDOM /* if0003.h */ -3
#define NVIF_CLASS_SW_NV04 /* if0004.h */ -4
#define NVIF_CLASS_SW_NV10 /* if0005.h */ -5
#define NVIF_CLASS_SW_NV50 /* if0005.h */ -6
#define NVIF_CLASS_SW_GF100 /* if0005.h */ -7
/* the below match nvidia-assigned (either in hw, or sw) class numbers */
#define NV_DEVICE /* cl0080.h */ 0x00000080
#define NV_DMA_FROM_MEMORY /* cl0002.h */ 0x00000002
#define NV_DMA_TO_MEMORY /* cl0002.h */ 0x00000003
#define NV_DMA_IN_MEMORY /* cl0002.h */ 0x0000003d
#define FERMI_TWOD_A 0x0000902d
#define FERMI_MEMORY_TO_MEMORY_FORMAT_A 0x00009039
#define KEPLER_INLINE_TO_MEMORY_A 0x0000a040
#define KEPLER_INLINE_TO_MEMORY_B 0x0000a140
#define NV04_DISP /* cl0046.h */ 0x00000046
#define NV03_CHANNEL_DMA /* cl506b.h */ 0x0000006b
#define NV10_CHANNEL_DMA /* cl506b.h */ 0x0000006e
#define NV17_CHANNEL_DMA /* cl506b.h */ 0x0000176e
#define NV40_CHANNEL_DMA /* cl506b.h */ 0x0000406e
#define NV50_CHANNEL_DMA /* cl506e.h */ 0x0000506e
#define G82_CHANNEL_DMA /* cl826e.h */ 0x0000826e
#define NV50_CHANNEL_GPFIFO /* cl506f.h */ 0x0000506f
#define G82_CHANNEL_GPFIFO /* cl826f.h */ 0x0000826f
#define FERMI_CHANNEL_GPFIFO /* cl906f.h */ 0x0000906f
#define KEPLER_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000a06f
#define MAXWELL_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000b06f
#define NV50_DISP /* cl5070.h */ 0x00005070
#define G82_DISP /* cl5070.h */ 0x00008270
#define GT200_DISP /* cl5070.h */ 0x00008370
#define GT214_DISP /* cl5070.h */ 0x00008570
#define GT206_DISP /* cl5070.h */ 0x00008870
#define GF110_DISP /* cl5070.h */ 0x00009070
#define GK104_DISP /* cl5070.h */ 0x00009170
#define GK110_DISP /* cl5070.h */ 0x00009270
#define GM107_DISP /* cl5070.h */ 0x00009470
#define GM204_DISP /* cl5070.h */ 0x00009570
#define NV31_MPEG 0x00003174
#define G82_MPEG 0x00008274
#define NV74_VP2 0x00007476
#define NV50_DISP_CURSOR /* cl507a.h */ 0x0000507a
#define G82_DISP_CURSOR /* cl507a.h */ 0x0000827a
#define GT214_DISP_CURSOR /* cl507a.h */ 0x0000857a
#define GF110_DISP_CURSOR /* cl507a.h */ 0x0000907a
#define GK104_DISP_CURSOR /* cl507a.h */ 0x0000917a
#define NV50_DISP_OVERLAY /* cl507b.h */ 0x0000507b
#define G82_DISP_OVERLAY /* cl507b.h */ 0x0000827b
#define GT214_DISP_OVERLAY /* cl507b.h */ 0x0000857b
#define GF110_DISP_OVERLAY /* cl507b.h */ 0x0000907b
#define GK104_DISP_OVERLAY /* cl507b.h */ 0x0000917b
#define NV50_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000507c
#define G82_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000827c
#define GT200_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000837c
#define GT214_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000857c
#define GF110_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000907c
#define GK104_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000917c
#define GK110_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000927c
#define NV50_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000507d
#define G82_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000827d
#define GT200_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000837d
#define GT214_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000857d
#define GT206_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000887d
#define GF110_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000907d
#define GK104_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000917d
#define GK110_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000927d
#define GM107_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000947d
#define GM204_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000957d
#define NV50_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000507e
#define G82_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000827e
#define GT200_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000837e
#define GT214_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000857e
#define GF110_DISP_OVERLAY_CONTROL_DMA /* cl507e.h */ 0x0000907e
#define GK104_DISP_OVERLAY_CONTROL_DMA /* cl507e.h */ 0x0000917e
#define FERMI_A /* cl9097.h */ 0x00009097
#define FERMI_B /* cl9097.h */ 0x00009197
#define FERMI_C /* cl9097.h */ 0x00009297
#define KEPLER_A /* cl9097.h */ 0x0000a097
#define KEPLER_B /* cl9097.h */ 0x0000a197
#define KEPLER_C /* cl9097.h */ 0x0000a297
#define MAXWELL_A /* cl9097.h */ 0x0000b097
#define MAXWELL_B /* cl9097.h */ 0x0000b197
#define NV74_BSP 0x000074b0
#define GT212_MSVLD 0x000085b1
#define IGT21A_MSVLD 0x000086b1
#define G98_MSVLD 0x000088b1
#define GF100_MSVLD 0x000090b1
#define GK104_MSVLD 0x000095b1
#define GT212_MSPDEC 0x000085b2
#define G98_MSPDEC 0x000088b2
#define GF100_MSPDEC 0x000090b2
#define GK104_MSPDEC 0x000095b2
#define GT212_MSPPP 0x000085b3
#define G98_MSPPP 0x000088b3
#define GF100_MSPPP 0x000090b3
#define G98_SEC 0x000088b4
#define GT212_DMA 0x000085b5
#define FERMI_DMA 0x000090b5
#define KEPLER_DMA_COPY_A 0x0000a0b5
#define MAXWELL_DMA_COPY_A 0x0000b0b5
#define FERMI_DECOMPRESS 0x000090b8
#define FERMI_COMPUTE_A 0x000090c0
#define FERMI_COMPUTE_B 0x000091c0
#define KEPLER_COMPUTE_A 0x0000a0c0
#define KEPLER_COMPUTE_B 0x0000a1c0
#define MAXWELL_COMPUTE_A 0x0000b0c0
#define MAXWELL_COMPUTE_B 0x0000b1c0
#define NV74_CIPHER 0x000074c1
#endif

View File

@@ -0,0 +1,38 @@
#ifndef __NVIF_IF0002_H__
#define __NVIF_IF0002_H__
#define NVIF_PERFMON_V0_QUERY_DOMAIN 0x00
#define NVIF_PERFMON_V0_QUERY_SIGNAL 0x01
#define NVIF_PERFMON_V0_QUERY_SOURCE 0x02
struct nvif_perfmon_query_domain_v0 {
__u8 version;
__u8 id;
__u8 counter_nr;
__u8 iter;
__u16 signal_nr;
__u8 pad05[2];
char name[64];
};
struct nvif_perfmon_query_signal_v0 {
__u8 version;
__u8 domain;
__u16 iter;
__u8 signal;
__u8 source_nr;
__u8 pad05[2];
char name[64];
};
struct nvif_perfmon_query_source_v0 {
__u8 version;
__u8 domain;
__u8 signal;
__u8 iter;
__u8 pad04[4];
__u32 source;
__u32 mask;
char name[64];
};
#endif

View File

@@ -0,0 +1,33 @@
#ifndef __NVIF_IF0003_H__
#define __NVIF_IF0003_H__
struct nvif_perfdom_v0 {
__u8 version;
__u8 domain;
__u8 mode;
__u8 pad03[1];
struct {
__u8 signal[4];
__u64 source[4][8];
__u16 logic_op;
} ctr[4];
};
#define NVIF_PERFDOM_V0_INIT 0x00
#define NVIF_PERFDOM_V0_SAMPLE 0x01
#define NVIF_PERFDOM_V0_READ 0x02
struct nvif_perfdom_init {
};
struct nvif_perfdom_sample {
};
struct nvif_perfdom_read_v0 {
__u8 version;
__u8 pad01[7];
__u32 ctr[4];
__u32 clk;
__u8 pad04[4];
};
#endif

View File

@@ -0,0 +1,132 @@
#ifndef __NVIF_IOCTL_H__
#define __NVIF_IOCTL_H__
#define NVIF_VERSION_LATEST 0x0000000000000000ULL
struct nvif_ioctl_v0 {
__u8 version;
#define NVIF_IOCTL_V0_NOP 0x00
#define NVIF_IOCTL_V0_SCLASS 0x01
#define NVIF_IOCTL_V0_NEW 0x02
#define NVIF_IOCTL_V0_DEL 0x03
#define NVIF_IOCTL_V0_MTHD 0x04
#define NVIF_IOCTL_V0_RD 0x05
#define NVIF_IOCTL_V0_WR 0x06
#define NVIF_IOCTL_V0_MAP 0x07
#define NVIF_IOCTL_V0_UNMAP 0x08
#define NVIF_IOCTL_V0_NTFY_NEW 0x09
#define NVIF_IOCTL_V0_NTFY_DEL 0x0a
#define NVIF_IOCTL_V0_NTFY_GET 0x0b
#define NVIF_IOCTL_V0_NTFY_PUT 0x0c
__u8 type;
__u8 pad02[4];
#define NVIF_IOCTL_V0_OWNER_NVIF 0x00
#define NVIF_IOCTL_V0_OWNER_ANY 0xff
__u8 owner;
#define NVIF_IOCTL_V0_ROUTE_NVIF 0x00
#define NVIF_IOCTL_V0_ROUTE_HIDDEN 0xff
__u8 route;
__u64 token;
__u64 object;
__u8 data[]; /* ioctl data (below) */
};
struct nvif_ioctl_nop_v0 {
__u64 version;
};
struct nvif_ioctl_sclass_v0 {
/* nvif_ioctl ... */
__u8 version;
__u8 count;
__u8 pad02[6];
struct nvif_ioctl_sclass_oclass_v0 {
__s32 oclass;
__s16 minver;
__s16 maxver;
} oclass[];
};
struct nvif_ioctl_new_v0 {
/* nvif_ioctl ... */
__u8 version;
__u8 pad01[6];
__u8 route;
__u64 token;
__u64 object;
__u32 handle;
__s32 oclass;
__u8 data[]; /* class data (class.h) */
};
struct nvif_ioctl_del {
};
struct nvif_ioctl_rd_v0 {
/* nvif_ioctl ... */
__u8 version;
__u8 size;
__u8 pad02[2];
__u32 data;
__u64 addr;
};
struct nvif_ioctl_wr_v0 {
/* nvif_ioctl ... */
__u8 version;
__u8 size;
__u8 pad02[2];
__u32 data;
__u64 addr;
};
struct nvif_ioctl_map_v0 {
/* nvif_ioctl ... */
__u8 version;
__u8 pad01[3];
__u32 length;
__u64 handle;
};
struct nvif_ioctl_unmap {
};
struct nvif_ioctl_ntfy_new_v0 {
/* nvif_ioctl ... */
__u8 version;
__u8 event;
__u8 index;
__u8 pad03[5];
__u8 data[]; /* event request data (event.h) */
};
struct nvif_ioctl_ntfy_del_v0 {
/* nvif_ioctl ... */
__u8 version;
__u8 index;
__u8 pad02[6];
};
struct nvif_ioctl_ntfy_get_v0 {
/* nvif_ioctl ... */
__u8 version;
__u8 index;
__u8 pad02[6];
};
struct nvif_ioctl_ntfy_put_v0 {
/* nvif_ioctl ... */
__u8 version;
__u8 index;
__u8 pad02[6];
};
struct nvif_ioctl_mthd_v0 {
/* nvif_ioctl ... */
__u8 version;
__u8 method;
__u8 pad02[6];
__u8 data[]; /* method data (class.h) */
};
#endif

View File

@@ -0,0 +1,28 @@
#ifndef __NVIF_UNPACK_H__
#define __NVIF_UNPACK_H__
#define nvif_unvers(r,d,s,m) ({ \
void **_data = (d); __u32 *_size = (s); int _ret = (r); \
if (_ret == -ENOSYS && *_size == sizeof(m)) { \
*_data = NULL; \
*_size = _ret = 0; \
} \
_ret; \
})
#define nvif_unpack(r,d,s,m,vl,vh,x) ({ \
void **_data = (d); __u32 *_size = (s); \
int _ret = (r), _vl = (vl), _vh = (vh); \
if (_ret == -ENOSYS && *_size >= sizeof(m) && \
(m).version >= _vl && (m).version <= _vh) { \
*_data = (__u8 *)*_data + sizeof(m); \
*_size = *_size - sizeof(m); \
if (_ret = 0, !(x)) { \
_ret = *_size ? -E2BIG : 0; \
*_data = NULL; \
*_size = 0; \
} \
} \
_ret; \
})
#endif

View File

@@ -0,0 +1,256 @@
/*
* Copyright 2005 Stephane Marchesin.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef __NOUVEAU_DRM_H__
#define __NOUVEAU_DRM_H__
#define NOUVEAU_DRM_HEADER_PATCHLEVEL 16
#include "drm.h"
#if defined(__cplusplus)
extern "C" {
#endif
struct drm_nouveau_channel_alloc {
uint32_t fb_ctxdma_handle;
uint32_t tt_ctxdma_handle;
int channel;
uint32_t pushbuf_domains;
/* Notifier memory */
uint32_t notifier_handle;
/* DRM-enforced subchannel assignments */
struct {
uint32_t handle;
uint32_t grclass;
} subchan[8];
uint32_t nr_subchan;
};
struct drm_nouveau_channel_free {
int channel;
};
struct drm_nouveau_grobj_alloc {
int channel;
uint32_t handle;
int class;
};
struct drm_nouveau_notifierobj_alloc {
uint32_t channel;
uint32_t handle;
uint32_t size;
uint32_t offset;
};
struct drm_nouveau_gpuobj_free {
int channel;
uint32_t handle;
};
#define NOUVEAU_GETPARAM_PCI_VENDOR 3
#define NOUVEAU_GETPARAM_PCI_DEVICE 4
#define NOUVEAU_GETPARAM_BUS_TYPE 5
#define NOUVEAU_GETPARAM_FB_SIZE 8
#define NOUVEAU_GETPARAM_AGP_SIZE 9
#define NOUVEAU_GETPARAM_CHIPSET_ID 11
#define NOUVEAU_GETPARAM_VM_VRAM_BASE 12
#define NOUVEAU_GETPARAM_GRAPH_UNITS 13
#define NOUVEAU_GETPARAM_PTIMER_TIME 14
#define NOUVEAU_GETPARAM_HAS_BO_USAGE 15
#define NOUVEAU_GETPARAM_HAS_PAGEFLIP 16
struct drm_nouveau_getparam {
uint64_t param;
uint64_t value;
};
struct drm_nouveau_setparam {
uint64_t param;
uint64_t value;
};
#define NOUVEAU_GEM_DOMAIN_CPU (1 << 0)
#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1)
#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
#define NOUVEAU_GEM_DOMAIN_MAPPABLE (1 << 3)
#define NOUVEAU_GEM_DOMAIN_COHERENT (1 << 4)
#define NOUVEAU_GEM_TILE_COMP 0x00030000 /* nv50-only */
#define NOUVEAU_GEM_TILE_LAYOUT_MASK 0x0000ff00
#define NOUVEAU_GEM_TILE_16BPP 0x00000001
#define NOUVEAU_GEM_TILE_32BPP 0x00000002
#define NOUVEAU_GEM_TILE_ZETA 0x00000004
#define NOUVEAU_GEM_TILE_NONCONTIG 0x00000008
struct drm_nouveau_gem_info {
__u32 handle;
__u32 domain;
__u64 size;
__u64 offset;
__u64 map_handle;
__u32 tile_mode;
__u32 tile_flags;
};
struct drm_nouveau_gem_new {
struct drm_nouveau_gem_info info;
__u32 channel_hint;
__u32 align;
};
#define NOUVEAU_GEM_MAX_BUFFERS 1024
struct drm_nouveau_gem_pushbuf_bo_presumed {
__u32 valid;
__u32 domain;
__u64 offset;
};
struct drm_nouveau_gem_pushbuf_bo {
__u64 user_priv;
__u32 handle;
__u32 read_domains;
__u32 write_domains;
__u32 valid_domains;
struct drm_nouveau_gem_pushbuf_bo_presumed presumed;
};
#define NOUVEAU_GEM_RELOC_LOW (1 << 0)
#define NOUVEAU_GEM_RELOC_HIGH (1 << 1)
#define NOUVEAU_GEM_RELOC_OR (1 << 2)
#define NOUVEAU_GEM_MAX_RELOCS 1024
struct drm_nouveau_gem_pushbuf_reloc {
__u32 reloc_bo_index;
__u32 reloc_bo_offset;
__u32 bo_index;
__u32 flags;
__u32 data;
__u32 vor;
__u32 tor;
};
#define NOUVEAU_GEM_MAX_PUSH 512
struct drm_nouveau_gem_pushbuf_push {
__u32 bo_index;
__u32 pad;
__u64 offset;
__u64 length;
};
struct drm_nouveau_gem_pushbuf {
__u32 channel;
__u32 nr_buffers;
__u64 buffers;
__u32 nr_relocs;
__u32 nr_push;
__u64 relocs;
__u64 push;
__u32 suffix0;
__u32 suffix1;
#define NOUVEAU_GEM_PUSHBUF_SYNC (1ULL << 0)
__u64 vram_available;
__u64 gart_available;
};
#define NOUVEAU_GEM_CPU_PREP_NOWAIT 0x00000001
#define NOUVEAU_GEM_CPU_PREP_WRITE 0x00000004
struct drm_nouveau_gem_cpu_prep {
__u32 handle;
__u32 flags;
};
struct drm_nouveau_gem_cpu_fini {
__u32 handle;
};
#define DRM_NOUVEAU_GETPARAM 0x00 /* deprecated */
#define DRM_NOUVEAU_SETPARAM 0x01 /* deprecated */
#define DRM_NOUVEAU_CHANNEL_ALLOC 0x02 /* deprecated */
#define DRM_NOUVEAU_CHANNEL_FREE 0x03 /* deprecated */
#define DRM_NOUVEAU_GROBJ_ALLOC 0x04 /* deprecated */
#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x05 /* deprecated */
#define DRM_NOUVEAU_GPUOBJ_FREE 0x06 /* deprecated */
#define DRM_NOUVEAU_NVIF 0x07
#define DRM_NOUVEAU_SVM_INIT 0x08
#define DRM_NOUVEAU_SVM_BIND 0x09
#define DRM_NOUVEAU_GEM_NEW 0x40
#define DRM_NOUVEAU_GEM_PUSHBUF 0x41
#define DRM_NOUVEAU_GEM_CPU_PREP 0x42
#define DRM_NOUVEAU_GEM_CPU_FINI 0x43
#define DRM_NOUVEAU_GEM_INFO 0x44
struct drm_nouveau_svm_init {
__u64 unmanaged_addr;
__u64 unmanaged_size;
};
struct drm_nouveau_svm_bind {
__u64 header;
__u64 va_start;
__u64 va_end;
__u64 npages;
__u64 stride;
__u64 result;
__u64 reserved0;
__u64 reserved1;
};
#define NOUVEAU_SVM_BIND_COMMAND_SHIFT 0
#define NOUVEAU_SVM_BIND_COMMAND_BITS 8
#define NOUVEAU_SVM_BIND_COMMAND_MASK ((1 << 8) - 1)
#define NOUVEAU_SVM_BIND_PRIORITY_SHIFT 8
#define NOUVEAU_SVM_BIND_PRIORITY_BITS 8
#define NOUVEAU_SVM_BIND_PRIORITY_MASK ((1 << 8) - 1)
#define NOUVEAU_SVM_BIND_TARGET_SHIFT 16
#define NOUVEAU_SVM_BIND_TARGET_BITS 32
#define NOUVEAU_SVM_BIND_TARGET_MASK 0xffffffff
/*
* Below is use to validate ioctl argument, userspace can also use it to make
* sure that no bit are set beyond known fields for a given kernel version.
*/
#define NOUVEAU_SVM_BIND_VALID_BITS 48
#define NOUVEAU_SVM_BIND_VALID_MASK ((1ULL << NOUVEAU_SVM_BIND_VALID_BITS) - 1)
/*
* NOUVEAU_BIND_COMMAND__MIGRATE: synchronous migrate to target memory.
* result: number of page successfuly migrate to the target memory.
*/
#define NOUVEAU_SVM_BIND_COMMAND__MIGRATE 0
/*
* NOUVEAU_SVM_BIND_HEADER_TARGET__GPU_VRAM: target the GPU VRAM memory.
*/
#define NOUVEAU_SVM_BIND_TARGET__GPU_VRAM (1UL << 31)
#if defined(__cplusplus)
}
#endif
#endif /* __NOUVEAU_DRM_H__ */

View File

@@ -0,0 +1,158 @@
/*
* Copyright 2013 Red Hat
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef QXL_DRM_H
#define QXL_DRM_H
#include "drm.h"
#if defined(__cplusplus)
extern "C" {
#endif
/* Please note that modifications to all structs defined here are
* subject to backwards-compatibility constraints.
*
* Do not use pointers, use __u64 instead for 32 bit / 64 bit user/kernel
* compatibility Keep fields aligned to their size
*/
#define QXL_GEM_DOMAIN_CPU 0
#define QXL_GEM_DOMAIN_VRAM 1
#define QXL_GEM_DOMAIN_SURFACE 2
#define DRM_QXL_ALLOC 0x00
#define DRM_QXL_MAP 0x01
#define DRM_QXL_EXECBUFFER 0x02
#define DRM_QXL_UPDATE_AREA 0x03
#define DRM_QXL_GETPARAM 0x04
#define DRM_QXL_CLIENTCAP 0x05
#define DRM_QXL_ALLOC_SURF 0x06
struct drm_qxl_alloc {
__u32 size;
__u32 handle; /* 0 is an invalid handle */
};
struct drm_qxl_map {
__u64 offset; /* use for mmap system call */
__u32 handle;
__u32 pad;
};
/*
* dest is the bo we are writing the relocation into
* src is bo we are relocating.
* *(dest_handle.base_addr + dest_offset) = physical_address(src_handle.addr +
* src_offset)
*/
#define QXL_RELOC_TYPE_BO 1
#define QXL_RELOC_TYPE_SURF 2
struct drm_qxl_reloc {
__u64 src_offset; /* offset into src_handle or src buffer */
__u64 dst_offset; /* offset in dest handle */
__u32 src_handle; /* dest handle to compute address from */
__u32 dst_handle; /* 0 if to command buffer */
__u32 reloc_type;
__u32 pad;
};
struct drm_qxl_command {
__u64 command; /* void* */
__u64 relocs; /* struct drm_qxl_reloc* */
__u32 type;
__u32 command_size;
__u32 relocs_num;
__u32 pad;
};
struct drm_qxl_execbuffer {
__u32 flags; /* for future use */
__u32 commands_num;
__u64 commands; /* struct drm_qxl_command* */
};
struct drm_qxl_update_area {
__u32 handle;
__u32 top;
__u32 left;
__u32 bottom;
__u32 right;
__u32 pad;
};
#define QXL_PARAM_NUM_SURFACES 1 /* rom->n_surfaces */
#define QXL_PARAM_MAX_RELOCS 2
struct drm_qxl_getparam {
__u64 param;
__u64 value;
};
/* these are one bit values */
struct drm_qxl_clientcap {
__u32 index;
__u32 pad;
};
struct drm_qxl_alloc_surf {
__u32 format;
__u32 width;
__u32 height;
__s32 stride;
__u32 handle;
__u32 pad;
};
#define DRM_IOCTL_QXL_ALLOC \
DRM_IOWR(DRM_COMMAND_BASE + DRM_QXL_ALLOC, struct drm_qxl_alloc)
#define DRM_IOCTL_QXL_MAP \
DRM_IOWR(DRM_COMMAND_BASE + DRM_QXL_MAP, struct drm_qxl_map)
#define DRM_IOCTL_QXL_EXECBUFFER \
DRM_IOW(DRM_COMMAND_BASE + DRM_QXL_EXECBUFFER,\
struct drm_qxl_execbuffer)
#define DRM_IOCTL_QXL_UPDATE_AREA \
DRM_IOW(DRM_COMMAND_BASE + DRM_QXL_UPDATE_AREA,\
struct drm_qxl_update_area)
#define DRM_IOCTL_QXL_GETPARAM \
DRM_IOWR(DRM_COMMAND_BASE + DRM_QXL_GETPARAM,\
struct drm_qxl_getparam)
#define DRM_IOCTL_QXL_CLIENTCAP \
DRM_IOW(DRM_COMMAND_BASE + DRM_QXL_CLIENTCAP,\
struct drm_qxl_clientcap)
#define DRM_IOCTL_QXL_ALLOC_SURF \
DRM_IOWR(DRM_COMMAND_BASE + DRM_QXL_ALLOC_SURF,\
struct drm_qxl_alloc_surf)
#if defined(__cplusplus)
}
#endif
#endif

View File

@@ -0,0 +1,336 @@
/* r128_drm.h -- Public header for the r128 driver -*- linux-c -*-
* Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com
*/
/*
* Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Gareth Hughes <gareth@valinux.com>
* Kevin E. Martin <martin@valinux.com>
*/
#ifndef __R128_DRM_H__
#define __R128_DRM_H__
#include "drm.h"
#if defined(__cplusplus)
extern "C" {
#endif
/* WARNING: If you change any of these defines, make sure to change the
* defines in the X server file (r128_sarea.h)
*/
#ifndef __R128_SAREA_DEFINES__
#define __R128_SAREA_DEFINES__
/* What needs to be changed for the current vertex buffer?
*/
#define R128_UPLOAD_CONTEXT 0x001
#define R128_UPLOAD_SETUP 0x002
#define R128_UPLOAD_TEX0 0x004
#define R128_UPLOAD_TEX1 0x008
#define R128_UPLOAD_TEX0IMAGES 0x010
#define R128_UPLOAD_TEX1IMAGES 0x020
#define R128_UPLOAD_CORE 0x040
#define R128_UPLOAD_MASKS 0x080
#define R128_UPLOAD_WINDOW 0x100
#define R128_UPLOAD_CLIPRECTS 0x200 /* handled client-side */
#define R128_REQUIRE_QUIESCENCE 0x400
#define R128_UPLOAD_ALL 0x7ff
#define R128_FRONT 0x1
#define R128_BACK 0x2
#define R128_DEPTH 0x4
/* Primitive types
*/
#define R128_POINTS 0x1
#define R128_LINES 0x2
#define R128_LINE_STRIP 0x3
#define R128_TRIANGLES 0x4
#define R128_TRIANGLE_FAN 0x5
#define R128_TRIANGLE_STRIP 0x6
/* Vertex/indirect buffer size
*/
#define R128_BUFFER_SIZE 16384
/* Byte offsets for indirect buffer data
*/
#define R128_INDEX_PRIM_OFFSET 20
#define R128_HOSTDATA_BLIT_OFFSET 32
/* Keep these small for testing.
*/
#define R128_NR_SAREA_CLIPRECTS 12
/* There are 2 heaps (local/AGP). Each region within a heap is a
* minimum of 64k, and there are at most 64 of them per heap.
*/
#define R128_LOCAL_TEX_HEAP 0
#define R128_AGP_TEX_HEAP 1
#define R128_NR_TEX_HEAPS 2
#define R128_NR_TEX_REGIONS 64
#define R128_LOG_TEX_GRANULARITY 16
#define R128_NR_CONTEXT_REGS 12
#define R128_MAX_TEXTURE_LEVELS 11
#define R128_MAX_TEXTURE_UNITS 2
#endif /* __R128_SAREA_DEFINES__ */
typedef struct {
/* Context state - can be written in one large chunk */
unsigned int dst_pitch_offset_c;
unsigned int dp_gui_master_cntl_c;
unsigned int sc_top_left_c;
unsigned int sc_bottom_right_c;
unsigned int z_offset_c;
unsigned int z_pitch_c;
unsigned int z_sten_cntl_c;
unsigned int tex_cntl_c;
unsigned int misc_3d_state_cntl_reg;
unsigned int texture_clr_cmp_clr_c;
unsigned int texture_clr_cmp_msk_c;
unsigned int fog_color_c;
/* Texture state */
unsigned int tex_size_pitch_c;
unsigned int constant_color_c;
/* Setup state */
unsigned int pm4_vc_fpu_setup;
unsigned int setup_cntl;
/* Mask state */
unsigned int dp_write_mask;
unsigned int sten_ref_mask_c;
unsigned int plane_3d_mask_c;
/* Window state */
unsigned int window_xy_offset;
/* Core state */
unsigned int scale_3d_cntl;
} drm_r128_context_regs_t;
/* Setup registers for each texture unit
*/
typedef struct {
unsigned int tex_cntl;
unsigned int tex_combine_cntl;
unsigned int tex_size_pitch;
unsigned int tex_offset[R128_MAX_TEXTURE_LEVELS];
unsigned int tex_border_color;
} drm_r128_texture_regs_t;
typedef struct drm_r128_sarea {
/* The channel for communication of state information to the kernel
* on firing a vertex buffer.
*/
drm_r128_context_regs_t context_state;
drm_r128_texture_regs_t tex_state[R128_MAX_TEXTURE_UNITS];
unsigned int dirty;
unsigned int vertsize;
unsigned int vc_format;
/* The current cliprects, or a subset thereof.
*/
struct drm_clip_rect boxes[R128_NR_SAREA_CLIPRECTS];
unsigned int nbox;
/* Counters for client-side throttling of rendering clients.
*/
unsigned int last_frame;
unsigned int last_dispatch;
struct drm_tex_region tex_list[R128_NR_TEX_HEAPS][R128_NR_TEX_REGIONS + 1];
unsigned int tex_age[R128_NR_TEX_HEAPS];
int ctx_owner;
int pfAllowPageFlip; /* number of 3d windows (0,1,2 or more) */
int pfCurrentPage; /* which buffer is being displayed? */
} drm_r128_sarea_t;
/* WARNING: If you change any of these defines, make sure to change the
* defines in the Xserver file (xf86drmR128.h)
*/
/* Rage 128 specific ioctls
* The device specific ioctl range is 0x40 to 0x79.
*/
#define DRM_R128_INIT 0x00
#define DRM_R128_CCE_START 0x01
#define DRM_R128_CCE_STOP 0x02
#define DRM_R128_CCE_RESET 0x03
#define DRM_R128_CCE_IDLE 0x04
/* 0x05 not used */
#define DRM_R128_RESET 0x06
#define DRM_R128_SWAP 0x07
#define DRM_R128_CLEAR 0x08
#define DRM_R128_VERTEX 0x09
#define DRM_R128_INDICES 0x0a
#define DRM_R128_BLIT 0x0b
#define DRM_R128_DEPTH 0x0c
#define DRM_R128_STIPPLE 0x0d
/* 0x0e not used */
#define DRM_R128_INDIRECT 0x0f
#define DRM_R128_FULLSCREEN 0x10
#define DRM_R128_CLEAR2 0x11
#define DRM_R128_GETPARAM 0x12
#define DRM_R128_FLIP 0x13
#define DRM_IOCTL_R128_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_R128_INIT, drm_r128_init_t)
#define DRM_IOCTL_R128_CCE_START DRM_IO( DRM_COMMAND_BASE + DRM_R128_CCE_START)
#define DRM_IOCTL_R128_CCE_STOP DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CCE_STOP, drm_r128_cce_stop_t)
#define DRM_IOCTL_R128_CCE_RESET DRM_IO( DRM_COMMAND_BASE + DRM_R128_CCE_RESET)
#define DRM_IOCTL_R128_CCE_IDLE DRM_IO( DRM_COMMAND_BASE + DRM_R128_CCE_IDLE)
/* 0x05 not used */
#define DRM_IOCTL_R128_RESET DRM_IO( DRM_COMMAND_BASE + DRM_R128_RESET)
#define DRM_IOCTL_R128_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_R128_SWAP)
#define DRM_IOCTL_R128_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CLEAR, drm_r128_clear_t)
#define DRM_IOCTL_R128_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_R128_VERTEX, drm_r128_vertex_t)
#define DRM_IOCTL_R128_INDICES DRM_IOW( DRM_COMMAND_BASE + DRM_R128_INDICES, drm_r128_indices_t)
#define DRM_IOCTL_R128_BLIT DRM_IOW( DRM_COMMAND_BASE + DRM_R128_BLIT, drm_r128_blit_t)
#define DRM_IOCTL_R128_DEPTH DRM_IOW( DRM_COMMAND_BASE + DRM_R128_DEPTH, drm_r128_depth_t)
#define DRM_IOCTL_R128_STIPPLE DRM_IOW( DRM_COMMAND_BASE + DRM_R128_STIPPLE, drm_r128_stipple_t)
/* 0x0e not used */
#define DRM_IOCTL_R128_INDIRECT DRM_IOWR(DRM_COMMAND_BASE + DRM_R128_INDIRECT, drm_r128_indirect_t)
#define DRM_IOCTL_R128_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_R128_FULLSCREEN, drm_r128_fullscreen_t)
#define DRM_IOCTL_R128_CLEAR2 DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CLEAR2, drm_r128_clear2_t)
#define DRM_IOCTL_R128_GETPARAM DRM_IOWR( DRM_COMMAND_BASE + DRM_R128_GETPARAM, drm_r128_getparam_t)
#define DRM_IOCTL_R128_FLIP DRM_IO( DRM_COMMAND_BASE + DRM_R128_FLIP)
typedef struct drm_r128_init {
enum {
R128_INIT_CCE = 0x01,
R128_CLEANUP_CCE = 0x02
} func;
unsigned long sarea_priv_offset;
int is_pci;
int cce_mode;
int cce_secure;
int ring_size;
int usec_timeout;
unsigned int fb_bpp;
unsigned int front_offset, front_pitch;
unsigned int back_offset, back_pitch;
unsigned int depth_bpp;
unsigned int depth_offset, depth_pitch;
unsigned int span_offset;
unsigned long fb_offset;
unsigned long mmio_offset;
unsigned long ring_offset;
unsigned long ring_rptr_offset;
unsigned long buffers_offset;
unsigned long agp_textures_offset;
} drm_r128_init_t;
typedef struct drm_r128_cce_stop {
int flush;
int idle;
} drm_r128_cce_stop_t;
typedef struct drm_r128_clear {
unsigned int flags;
unsigned int clear_color;
unsigned int clear_depth;
unsigned int color_mask;
unsigned int depth_mask;
} drm_r128_clear_t;
typedef struct drm_r128_vertex {
int prim;
int idx; /* Index of vertex buffer */
int count; /* Number of vertices in buffer */
int discard; /* Client finished with buffer? */
} drm_r128_vertex_t;
typedef struct drm_r128_indices {
int prim;
int idx;
int start;
int end;
int discard; /* Client finished with buffer? */
} drm_r128_indices_t;
typedef struct drm_r128_blit {
int idx;
int pitch;
int offset;
int format;
unsigned short x, y;
unsigned short width, height;
} drm_r128_blit_t;
typedef struct drm_r128_depth {
enum {
R128_WRITE_SPAN = 0x01,
R128_WRITE_PIXELS = 0x02,
R128_READ_SPAN = 0x03,
R128_READ_PIXELS = 0x04
} func;
int n;
int *x;
int *y;
unsigned int *buffer;
unsigned char *mask;
} drm_r128_depth_t;
typedef struct drm_r128_stipple {
unsigned int *mask;
} drm_r128_stipple_t;
typedef struct drm_r128_indirect {
int idx;
int start;
int end;
int discard;
} drm_r128_indirect_t;
typedef struct drm_r128_fullscreen {
enum {
R128_INIT_FULLSCREEN = 0x01,
R128_CLEANUP_FULLSCREEN = 0x02
} func;
} drm_r128_fullscreen_t;
/* 2.3: An ioctl to get parameters that aren't available to the 3d
* client any other way.
*/
#define R128_PARAM_IRQ_NR 1
typedef struct drm_r128_getparam {
int param;
void *value;
} drm_r128_getparam_t;
#if defined(__cplusplus)
}
#endif
#endif

View File

@@ -0,0 +1,487 @@
CHIPSET(0x9400, R600_9400, R600)
CHIPSET(0x9401, R600_9401, R600)
CHIPSET(0x9402, R600_9402, R600)
CHIPSET(0x9403, R600_9403, R600)
CHIPSET(0x9405, R600_9405, R600)
CHIPSET(0x940A, R600_940A, R600)
CHIPSET(0x940B, R600_940B, R600)
CHIPSET(0x940F, R600_940F, R600)
CHIPSET(0x94C0, RV610_94C0, RV610)
CHIPSET(0x94C1, RV610_94C1, RV610)
CHIPSET(0x94C3, RV610_94C3, RV610)
CHIPSET(0x94C4, RV610_94C4, RV610)
CHIPSET(0x94C5, RV610_94C5, RV610)
CHIPSET(0x94C6, RV610_94C6, RV610)
CHIPSET(0x94C7, RV610_94C7, RV610)
CHIPSET(0x94C8, RV610_94C8, RV610)
CHIPSET(0x94C9, RV610_94C9, RV610)
CHIPSET(0x94CB, RV610_94CB, RV610)
CHIPSET(0x94CC, RV610_94CC, RV610)
CHIPSET(0x94CD, RV610_94CD, RV610)
CHIPSET(0x9580, RV630_9580, RV630)
CHIPSET(0x9581, RV630_9581, RV630)
CHIPSET(0x9583, RV630_9583, RV630)
CHIPSET(0x9586, RV630_9586, RV630)
CHIPSET(0x9587, RV630_9587, RV630)
CHIPSET(0x9588, RV630_9588, RV630)
CHIPSET(0x9589, RV630_9589, RV630)
CHIPSET(0x958A, RV630_958A, RV630)
CHIPSET(0x958B, RV630_958B, RV630)
CHIPSET(0x958C, RV630_958C, RV630)
CHIPSET(0x958D, RV630_958D, RV630)
CHIPSET(0x958E, RV630_958E, RV630)
CHIPSET(0x958F, RV630_958F, RV630)
CHIPSET(0x9500, RV670_9500, RV670)
CHIPSET(0x9501, RV670_9501, RV670)
CHIPSET(0x9504, RV670_9504, RV670)
CHIPSET(0x9505, RV670_9505, RV670)
CHIPSET(0x9506, RV670_9506, RV670)
CHIPSET(0x9507, RV670_9507, RV670)
CHIPSET(0x9508, RV670_9508, RV670)
CHIPSET(0x9509, RV670_9509, RV670)
CHIPSET(0x950F, RV670_950F, RV670)
CHIPSET(0x9511, RV670_9511, RV670)
CHIPSET(0x9515, RV670_9515, RV670)
CHIPSET(0x9517, RV670_9517, RV670)
CHIPSET(0x9519, RV670_9519, RV670)
CHIPSET(0x95C0, RV620_95C0, RV620)
CHIPSET(0x95C2, RV620_95C2, RV620)
CHIPSET(0x95C4, RV620_95C4, RV620)
CHIPSET(0x95C5, RV620_95C5, RV620)
CHIPSET(0x95C6, RV620_95C6, RV620)
CHIPSET(0x95C7, RV620_95C7, RV620)
CHIPSET(0x95C9, RV620_95C9, RV620)
CHIPSET(0x95CC, RV620_95CC, RV620)
CHIPSET(0x95CD, RV620_95CD, RV620)
CHIPSET(0x95CE, RV620_95CE, RV620)
CHIPSET(0x95CF, RV620_95CF, RV620)
CHIPSET(0x9590, RV635_9590, RV635)
CHIPSET(0x9591, RV635_9591, RV635)
CHIPSET(0x9593, RV635_9593, RV635)
CHIPSET(0x9595, RV635_9595, RV635)
CHIPSET(0x9596, RV635_9596, RV635)
CHIPSET(0x9597, RV635_9597, RV635)
CHIPSET(0x9598, RV635_9598, RV635)
CHIPSET(0x9599, RV635_9599, RV635)
CHIPSET(0x959B, RV635_959B, RV635)
CHIPSET(0x9610, RS780_9610, RS780)
CHIPSET(0x9611, RS780_9611, RS780)
CHIPSET(0x9612, RS780_9612, RS780)
CHIPSET(0x9613, RS780_9613, RS780)
CHIPSET(0x9614, RS780_9614, RS780)
CHIPSET(0x9615, RS780_9615, RS780)
CHIPSET(0x9616, RS780_9616, RS780)
CHIPSET(0x9710, RS880_9710, RS880)
CHIPSET(0x9711, RS880_9711, RS880)
CHIPSET(0x9712, RS880_9712, RS880)
CHIPSET(0x9713, RS880_9713, RS880)
CHIPSET(0x9714, RS880_9714, RS880)
CHIPSET(0x9715, RS880_9715, RS880)
CHIPSET(0x9440, RV770_9440, RV770)
CHIPSET(0x9441, RV770_9441, RV770)
CHIPSET(0x9442, RV770_9442, RV770)
CHIPSET(0x9443, RV770_9443, RV770)
CHIPSET(0x9444, RV770_9444, RV770)
CHIPSET(0x9446, RV770_9446, RV770)
CHIPSET(0x944A, RV770_944A, RV770)
CHIPSET(0x944B, RV770_944B, RV770)
CHIPSET(0x944C, RV770_944C, RV770)
CHIPSET(0x944E, RV770_944E, RV770)
CHIPSET(0x9450, RV770_9450, RV770)
CHIPSET(0x9452, RV770_9452, RV770)
CHIPSET(0x9456, RV770_9456, RV770)
CHIPSET(0x945A, RV770_945A, RV770)
CHIPSET(0x945B, RV770_945B, RV770)
CHIPSET(0x945E, RV770_945E, RV770)
CHIPSET(0x9460, RV790_9460, RV770)
CHIPSET(0x9462, RV790_9462, RV770)
CHIPSET(0x946A, RV770_946A, RV770)
CHIPSET(0x946B, RV770_946B, RV770)
CHIPSET(0x947A, RV770_947A, RV770)
CHIPSET(0x947B, RV770_947B, RV770)
CHIPSET(0x9480, RV730_9480, RV730)
CHIPSET(0x9487, RV730_9487, RV730)
CHIPSET(0x9488, RV730_9488, RV730)
CHIPSET(0x9489, RV730_9489, RV730)
CHIPSET(0x948A, RV730_948A, RV730)
CHIPSET(0x948F, RV730_948F, RV730)
CHIPSET(0x9490, RV730_9490, RV730)
CHIPSET(0x9491, RV730_9491, RV730)
CHIPSET(0x9495, RV730_9495, RV730)
CHIPSET(0x9498, RV730_9498, RV730)
CHIPSET(0x949C, RV730_949C, RV730)
CHIPSET(0x949E, RV730_949E, RV730)
CHIPSET(0x949F, RV730_949F, RV730)
CHIPSET(0x9540, RV710_9540, RV710)
CHIPSET(0x9541, RV710_9541, RV710)
CHIPSET(0x9542, RV710_9542, RV710)
CHIPSET(0x954E, RV710_954E, RV710)
CHIPSET(0x954F, RV710_954F, RV710)
CHIPSET(0x9552, RV710_9552, RV710)
CHIPSET(0x9553, RV710_9553, RV710)
CHIPSET(0x9555, RV710_9555, RV710)
CHIPSET(0x9557, RV710_9557, RV710)
CHIPSET(0x955F, RV710_955F, RV710)
CHIPSET(0x94A0, RV740_94A0, RV740)
CHIPSET(0x94A1, RV740_94A1, RV740)
CHIPSET(0x94A3, RV740_94A3, RV740)
CHIPSET(0x94B1, RV740_94B1, RV740)
CHIPSET(0x94B3, RV740_94B3, RV740)
CHIPSET(0x94B4, RV740_94B4, RV740)
CHIPSET(0x94B5, RV740_94B5, RV740)
CHIPSET(0x94B9, RV740_94B9, RV740)
CHIPSET(0x68E0, CEDAR_68E0, CEDAR)
CHIPSET(0x68E1, CEDAR_68E1, CEDAR)
CHIPSET(0x68E4, CEDAR_68E4, CEDAR)
CHIPSET(0x68E5, CEDAR_68E5, CEDAR)
CHIPSET(0x68E8, CEDAR_68E8, CEDAR)
CHIPSET(0x68E9, CEDAR_68E9, CEDAR)
CHIPSET(0x68F1, CEDAR_68F1, CEDAR)
CHIPSET(0x68F2, CEDAR_68F2, CEDAR)
CHIPSET(0x68F8, CEDAR_68F8, CEDAR)
CHIPSET(0x68F9, CEDAR_68F9, CEDAR)
CHIPSET(0x68FA, CEDAR_68FA, CEDAR)
CHIPSET(0x68FE, CEDAR_68FE, CEDAR)
CHIPSET(0x68C0, REDWOOD_68C0, REDWOOD)
CHIPSET(0x68C1, REDWOOD_68C1, REDWOOD)
CHIPSET(0x68C7, REDWOOD_68C7, REDWOOD)
CHIPSET(0x68C8, REDWOOD_68C8, REDWOOD)
CHIPSET(0x68C9, REDWOOD_68C9, REDWOOD)
CHIPSET(0x68D8, REDWOOD_68D8, REDWOOD)
CHIPSET(0x68D9, REDWOOD_68D9, REDWOOD)
CHIPSET(0x68DA, REDWOOD_68DA, REDWOOD)
CHIPSET(0x68DE, REDWOOD_68DE, REDWOOD)
CHIPSET(0x68A0, JUNIPER_68A0, JUNIPER)
CHIPSET(0x68A1, JUNIPER_68A1, JUNIPER)
CHIPSET(0x68A8, JUNIPER_68A8, JUNIPER)
CHIPSET(0x68A9, JUNIPER_68A9, JUNIPER)
CHIPSET(0x68B0, JUNIPER_68B0, JUNIPER)
CHIPSET(0x68B8, JUNIPER_68B8, JUNIPER)
CHIPSET(0x68B9, JUNIPER_68B9, JUNIPER)
CHIPSET(0x68BA, JUNIPER_68BA, JUNIPER)
CHIPSET(0x68BE, JUNIPER_68BE, JUNIPER)
CHIPSET(0x68BF, JUNIPER_68BF, JUNIPER)
CHIPSET(0x6880, CYPRESS_6880, CYPRESS)
CHIPSET(0x6888, CYPRESS_6888, CYPRESS)
CHIPSET(0x6889, CYPRESS_6889, CYPRESS)
CHIPSET(0x688A, CYPRESS_688A, CYPRESS)
CHIPSET(0x688C, CYPRESS_688C, CYPRESS)
CHIPSET(0x688D, CYPRESS_688D, CYPRESS)
CHIPSET(0x6898, CYPRESS_6898, CYPRESS)
CHIPSET(0x6899, CYPRESS_6899, CYPRESS)
CHIPSET(0x689B, CYPRESS_689B, CYPRESS)
CHIPSET(0x689E, CYPRESS_689E, CYPRESS)
CHIPSET(0x689C, HEMLOCK_689C, HEMLOCK)
CHIPSET(0x689D, HEMLOCK_689D, HEMLOCK)
CHIPSET(0x9802, PALM_9802, PALM)
CHIPSET(0x9803, PALM_9803, PALM)
CHIPSET(0x9804, PALM_9804, PALM)
CHIPSET(0x9805, PALM_9805, PALM)
CHIPSET(0x9806, PALM_9806, PALM)
CHIPSET(0x9807, PALM_9807, PALM)
CHIPSET(0x9808, PALM_9808, PALM)
CHIPSET(0x9809, PALM_9809, PALM)
CHIPSET(0x980A, PALM_980A, PALM)
CHIPSET(0x9640, SUMO_9640, SUMO)
CHIPSET(0x9641, SUMO_9641, SUMO)
CHIPSET(0x9642, SUMO2_9642, SUMO2)
CHIPSET(0x9643, SUMO2_9643, SUMO2)
CHIPSET(0x9644, SUMO2_9644, SUMO2)
CHIPSET(0x9645, SUMO2_9645, SUMO2)
CHIPSET(0x9647, SUMO_9647, SUMO)
CHIPSET(0x9648, SUMO_9648, SUMO)
CHIPSET(0x9649, SUMO2_9649, SUMO2)
CHIPSET(0x964a, SUMO_964A, SUMO)
CHIPSET(0x964b, SUMO_964B, SUMO)
CHIPSET(0x964c, SUMO_964C, SUMO)
CHIPSET(0x964e, SUMO_964E, SUMO)
CHIPSET(0x964f, SUMO_964F, SUMO)
CHIPSET(0x6700, CAYMAN_6700, CAYMAN)
CHIPSET(0x6701, CAYMAN_6701, CAYMAN)
CHIPSET(0x6702, CAYMAN_6702, CAYMAN)
CHIPSET(0x6703, CAYMAN_6703, CAYMAN)
CHIPSET(0x6704, CAYMAN_6704, CAYMAN)
CHIPSET(0x6705, CAYMAN_6705, CAYMAN)
CHIPSET(0x6706, CAYMAN_6706, CAYMAN)
CHIPSET(0x6707, CAYMAN_6707, CAYMAN)
CHIPSET(0x6708, CAYMAN_6708, CAYMAN)
CHIPSET(0x6709, CAYMAN_6709, CAYMAN)
CHIPSET(0x6718, CAYMAN_6718, CAYMAN)
CHIPSET(0x6719, CAYMAN_6719, CAYMAN)
CHIPSET(0x671C, CAYMAN_671C, CAYMAN)
CHIPSET(0x671D, CAYMAN_671D, CAYMAN)
CHIPSET(0x671F, CAYMAN_671F, CAYMAN)
CHIPSET(0x6720, BARTS_6720, BARTS)
CHIPSET(0x6721, BARTS_6721, BARTS)
CHIPSET(0x6722, BARTS_6722, BARTS)
CHIPSET(0x6723, BARTS_6723, BARTS)
CHIPSET(0x6724, BARTS_6724, BARTS)
CHIPSET(0x6725, BARTS_6725, BARTS)
CHIPSET(0x6726, BARTS_6726, BARTS)
CHIPSET(0x6727, BARTS_6727, BARTS)
CHIPSET(0x6728, BARTS_6728, BARTS)
CHIPSET(0x6729, BARTS_6729, BARTS)
CHIPSET(0x6738, BARTS_6738, BARTS)
CHIPSET(0x6739, BARTS_6739, BARTS)
CHIPSET(0x673E, BARTS_673E, BARTS)
CHIPSET(0x6740, TURKS_6740, TURKS)
CHIPSET(0x6741, TURKS_6741, TURKS)
CHIPSET(0x6742, TURKS_6742, TURKS)
CHIPSET(0x6743, TURKS_6743, TURKS)
CHIPSET(0x6744, TURKS_6744, TURKS)
CHIPSET(0x6745, TURKS_6745, TURKS)
CHIPSET(0x6746, TURKS_6746, TURKS)
CHIPSET(0x6747, TURKS_6747, TURKS)
CHIPSET(0x6748, TURKS_6748, TURKS)
CHIPSET(0x6749, TURKS_6749, TURKS)
CHIPSET(0x674A, TURKS_674A, TURKS)
CHIPSET(0x6750, TURKS_6750, TURKS)
CHIPSET(0x6751, TURKS_6751, TURKS)
CHIPSET(0x6758, TURKS_6758, TURKS)
CHIPSET(0x6759, TURKS_6759, TURKS)
CHIPSET(0x675B, TURKS_675B, TURKS)
CHIPSET(0x675D, TURKS_675D, TURKS)
CHIPSET(0x675F, TURKS_675F, TURKS)
CHIPSET(0x6840, TURKS_6840, TURKS)
CHIPSET(0x6841, TURKS_6841, TURKS)
CHIPSET(0x6842, TURKS_6842, TURKS)
CHIPSET(0x6843, TURKS_6843, TURKS)
CHIPSET(0x6849, TURKS_6849, TURKS)
CHIPSET(0x6850, TURKS_6850, TURKS)
CHIPSET(0x6858, TURKS_6858, TURKS)
CHIPSET(0x6859, TURKS_6859, TURKS)
CHIPSET(0x6760, CAICOS_6760, CAICOS)
CHIPSET(0x6761, CAICOS_6761, CAICOS)
CHIPSET(0x6762, CAICOS_6762, CAICOS)
CHIPSET(0x6763, CAICOS_6763, CAICOS)
CHIPSET(0x6764, CAICOS_6764, CAICOS)
CHIPSET(0x6765, CAICOS_6765, CAICOS)
CHIPSET(0x6766, CAICOS_6766, CAICOS)
CHIPSET(0x6767, CAICOS_6767, CAICOS)
CHIPSET(0x6768, CAICOS_6768, CAICOS)
CHIPSET(0x6770, CAICOS_6770, CAICOS)
CHIPSET(0x6771, CAICOS_6771, CAICOS)
CHIPSET(0x6772, CAICOS_6772, CAICOS)
CHIPSET(0x6778, CAICOS_6778, CAICOS)
CHIPSET(0x6779, CAICOS_6779, CAICOS)
CHIPSET(0x677B, CAICOS_677B, CAICOS)
CHIPSET(0x9900, ARUBA_9900, ARUBA)
CHIPSET(0x9901, ARUBA_9901, ARUBA)
CHIPSET(0x9903, ARUBA_9903, ARUBA)
CHIPSET(0x9904, ARUBA_9904, ARUBA)
CHIPSET(0x9905, ARUBA_9905, ARUBA)
CHIPSET(0x9906, ARUBA_9906, ARUBA)
CHIPSET(0x9907, ARUBA_9907, ARUBA)
CHIPSET(0x9908, ARUBA_9908, ARUBA)
CHIPSET(0x9909, ARUBA_9909, ARUBA)
CHIPSET(0x990A, ARUBA_990A, ARUBA)
CHIPSET(0x990B, ARUBA_990B, ARUBA)
CHIPSET(0x990C, ARUBA_990C, ARUBA)
CHIPSET(0x990D, ARUBA_990D, ARUBA)
CHIPSET(0x990E, ARUBA_990E, ARUBA)
CHIPSET(0x990F, ARUBA_990F, ARUBA)
CHIPSET(0x9910, ARUBA_9910, ARUBA)
CHIPSET(0x9913, ARUBA_9913, ARUBA)
CHIPSET(0x9917, ARUBA_9917, ARUBA)
CHIPSET(0x9918, ARUBA_9918, ARUBA)
CHIPSET(0x9919, ARUBA_9919, ARUBA)
CHIPSET(0x9990, ARUBA_9990, ARUBA)
CHIPSET(0x9991, ARUBA_9991, ARUBA)
CHIPSET(0x9992, ARUBA_9992, ARUBA)
CHIPSET(0x9993, ARUBA_9993, ARUBA)
CHIPSET(0x9994, ARUBA_9994, ARUBA)
CHIPSET(0x9995, ARUBA_9995, ARUBA)
CHIPSET(0x9996, ARUBA_9996, ARUBA)
CHIPSET(0x9997, ARUBA_9997, ARUBA)
CHIPSET(0x9998, ARUBA_9998, ARUBA)
CHIPSET(0x9999, ARUBA_9999, ARUBA)
CHIPSET(0x999A, ARUBA_999A, ARUBA)
CHIPSET(0x999B, ARUBA_999B, ARUBA)
CHIPSET(0x999C, ARUBA_999C, ARUBA)
CHIPSET(0x999D, ARUBA_999D, ARUBA)
CHIPSET(0x99A0, ARUBA_99A0, ARUBA)
CHIPSET(0x99A2, ARUBA_99A2, ARUBA)
CHIPSET(0x99A4, ARUBA_99A4, ARUBA)
CHIPSET(0x6780, TAHITI_6780, TAHITI)
CHIPSET(0x6784, TAHITI_6784, TAHITI)
CHIPSET(0x6788, TAHITI_6788, TAHITI)
CHIPSET(0x678A, TAHITI_678A, TAHITI)
CHIPSET(0x6790, TAHITI_6790, TAHITI)
CHIPSET(0x6791, TAHITI_6791, TAHITI)
CHIPSET(0x6792, TAHITI_6792, TAHITI)
CHIPSET(0x6798, TAHITI_6798, TAHITI)
CHIPSET(0x6799, TAHITI_6799, TAHITI)
CHIPSET(0x679A, TAHITI_679A, TAHITI)
CHIPSET(0x679B, TAHITI_679B, TAHITI)
CHIPSET(0x679E, TAHITI_679E, TAHITI)
CHIPSET(0x679F, TAHITI_679F, TAHITI)
CHIPSET(0x6800, PITCAIRN_6800, PITCAIRN)
CHIPSET(0x6801, PITCAIRN_6801, PITCAIRN)
CHIPSET(0x6802, PITCAIRN_6802, PITCAIRN)
CHIPSET(0x6806, PITCAIRN_6806, PITCAIRN)
CHIPSET(0x6808, PITCAIRN_6808, PITCAIRN)
CHIPSET(0x6809, PITCAIRN_6809, PITCAIRN)
CHIPSET(0x6810, PITCAIRN_6810, PITCAIRN)
CHIPSET(0x6811, PITCAIRN_6811, PITCAIRN)
CHIPSET(0x6816, PITCAIRN_6816, PITCAIRN)
CHIPSET(0x6817, PITCAIRN_6817, PITCAIRN)
CHIPSET(0x6818, PITCAIRN_6818, PITCAIRN)
CHIPSET(0x6819, PITCAIRN_6819, PITCAIRN)
CHIPSET(0x684C, PITCAIRN_684C, PITCAIRN)
CHIPSET(0x6820, VERDE_6820, VERDE)
CHIPSET(0x6821, VERDE_6821, VERDE)
CHIPSET(0x6822, VERDE_6822, VERDE)
CHIPSET(0x6823, VERDE_6823, VERDE)
CHIPSET(0x6824, VERDE_6824, VERDE)
CHIPSET(0x6825, VERDE_6825, VERDE)
CHIPSET(0x6826, VERDE_6826, VERDE)
CHIPSET(0x6827, VERDE_6827, VERDE)
CHIPSET(0x6828, VERDE_6828, VERDE)
CHIPSET(0x6829, VERDE_6829, VERDE)
CHIPSET(0x682A, VERDE_682A, VERDE)
CHIPSET(0x682B, VERDE_682B, VERDE)
CHIPSET(0x682C, VERDE_682C, VERDE)
CHIPSET(0x682D, VERDE_682D, VERDE)
CHIPSET(0x682F, VERDE_682F, VERDE)
CHIPSET(0x6830, VERDE_6830, VERDE)
CHIPSET(0x6831, VERDE_6831, VERDE)
CHIPSET(0x6835, VERDE_6835, VERDE)
CHIPSET(0x6837, VERDE_6837, VERDE)
CHIPSET(0x6838, VERDE_6838, VERDE)
CHIPSET(0x6839, VERDE_6839, VERDE)
CHIPSET(0x683B, VERDE_683B, VERDE)
CHIPSET(0x683D, VERDE_683D, VERDE)
CHIPSET(0x683F, VERDE_683F, VERDE)
CHIPSET(0x6600, OLAND_6600, OLAND)
CHIPSET(0x6601, OLAND_6601, OLAND)
CHIPSET(0x6602, OLAND_6602, OLAND)
CHIPSET(0x6603, OLAND_6603, OLAND)
CHIPSET(0x6604, OLAND_6604, OLAND)
CHIPSET(0x6605, OLAND_6605, OLAND)
CHIPSET(0x6606, OLAND_6606, OLAND)
CHIPSET(0x6607, OLAND_6607, OLAND)
CHIPSET(0x6608, OLAND_6608, OLAND)
CHIPSET(0x6610, OLAND_6610, OLAND)
CHIPSET(0x6611, OLAND_6611, OLAND)
CHIPSET(0x6613, OLAND_6613, OLAND)
CHIPSET(0x6617, OLAND_6617, OLAND)
CHIPSET(0x6620, OLAND_6620, OLAND)
CHIPSET(0x6621, OLAND_6621, OLAND)
CHIPSET(0x6623, OLAND_6623, OLAND)
CHIPSET(0x6631, OLAND_6631, OLAND)
CHIPSET(0x6660, HAINAN_6660, HAINAN)
CHIPSET(0x6663, HAINAN_6663, HAINAN)
CHIPSET(0x6664, HAINAN_6664, HAINAN)
CHIPSET(0x6665, HAINAN_6665, HAINAN)
CHIPSET(0x6667, HAINAN_6667, HAINAN)
CHIPSET(0x666F, HAINAN_666F, HAINAN)
CHIPSET(0x6640, BONAIRE_6640, BONAIRE)
CHIPSET(0x6641, BONAIRE_6641, BONAIRE)
CHIPSET(0x6646, BONAIRE_6646, BONAIRE)
CHIPSET(0x6647, BONAIRE_6647, BONAIRE)
CHIPSET(0x6649, BONAIRE_6649, BONAIRE)
CHIPSET(0x6650, BONAIRE_6650, BONAIRE)
CHIPSET(0x6651, BONAIRE_6651, BONAIRE)
CHIPSET(0x6658, BONAIRE_6658, BONAIRE)
CHIPSET(0x665C, BONAIRE_665C, BONAIRE)
CHIPSET(0x665D, BONAIRE_665D, BONAIRE)
CHIPSET(0x665F, BONAIRE_665F, BONAIRE)
CHIPSET(0x9830, KABINI_9830, KABINI)
CHIPSET(0x9831, KABINI_9831, KABINI)
CHIPSET(0x9832, KABINI_9832, KABINI)
CHIPSET(0x9833, KABINI_9833, KABINI)
CHIPSET(0x9834, KABINI_9834, KABINI)
CHIPSET(0x9835, KABINI_9835, KABINI)
CHIPSET(0x9836, KABINI_9836, KABINI)
CHIPSET(0x9837, KABINI_9837, KABINI)
CHIPSET(0x9838, KABINI_9838, KABINI)
CHIPSET(0x9839, KABINI_9839, KABINI)
CHIPSET(0x983A, KABINI_983A, KABINI)
CHIPSET(0x983B, KABINI_983B, KABINI)
CHIPSET(0x983C, KABINI_983C, KABINI)
CHIPSET(0x983D, KABINI_983D, KABINI)
CHIPSET(0x983E, KABINI_983E, KABINI)
CHIPSET(0x983F, KABINI_983F, KABINI)
CHIPSET(0x9850, MULLINS_9850, MULLINS)
CHIPSET(0x9851, MULLINS_9851, MULLINS)
CHIPSET(0x9852, MULLINS_9852, MULLINS)
CHIPSET(0x9853, MULLINS_9853, MULLINS)
CHIPSET(0x9854, MULLINS_9854, MULLINS)
CHIPSET(0x9855, MULLINS_9855, MULLINS)
CHIPSET(0x9856, MULLINS_9856, MULLINS)
CHIPSET(0x9857, MULLINS_9857, MULLINS)
CHIPSET(0x9858, MULLINS_9858, MULLINS)
CHIPSET(0x9859, MULLINS_9859, MULLINS)
CHIPSET(0x985A, MULLINS_985A, MULLINS)
CHIPSET(0x985B, MULLINS_985B, MULLINS)
CHIPSET(0x985C, MULLINS_985C, MULLINS)
CHIPSET(0x985D, MULLINS_985D, MULLINS)
CHIPSET(0x985E, MULLINS_985E, MULLINS)
CHIPSET(0x985F, MULLINS_985F, MULLINS)
CHIPSET(0x1304, KAVERI_1304, KAVERI)
CHIPSET(0x1305, KAVERI_1305, KAVERI)
CHIPSET(0x1306, KAVERI_1306, KAVERI)
CHIPSET(0x1307, KAVERI_1307, KAVERI)
CHIPSET(0x1309, KAVERI_1309, KAVERI)
CHIPSET(0x130A, KAVERI_130A, KAVERI)
CHIPSET(0x130B, KAVERI_130B, KAVERI)
CHIPSET(0x130C, KAVERI_130C, KAVERI)
CHIPSET(0x130D, KAVERI_130D, KAVERI)
CHIPSET(0x130E, KAVERI_130E, KAVERI)
CHIPSET(0x130F, KAVERI_130F, KAVERI)
CHIPSET(0x1310, KAVERI_1310, KAVERI)
CHIPSET(0x1311, KAVERI_1311, KAVERI)
CHIPSET(0x1312, KAVERI_1312, KAVERI)
CHIPSET(0x1313, KAVERI_1313, KAVERI)
CHIPSET(0x1315, KAVERI_1315, KAVERI)
CHIPSET(0x1316, KAVERI_1316, KAVERI)
CHIPSET(0x1317, KAVERI_1317, KAVERI)
CHIPSET(0x1318, KAVERI_1318, KAVERI)
CHIPSET(0x131B, KAVERI_131B, KAVERI)
CHIPSET(0x131C, KAVERI_131C, KAVERI)
CHIPSET(0x131D, KAVERI_131D, KAVERI)
CHIPSET(0x67A0, HAWAII_67A0, HAWAII)
CHIPSET(0x67A1, HAWAII_67A1, HAWAII)
CHIPSET(0x67A2, HAWAII_67A2, HAWAII)
CHIPSET(0x67A8, HAWAII_67A8, HAWAII)
CHIPSET(0x67A9, HAWAII_67A9, HAWAII)
CHIPSET(0x67AA, HAWAII_67AA, HAWAII)
CHIPSET(0x67B0, HAWAII_67B0, HAWAII)
CHIPSET(0x67B1, HAWAII_67B1, HAWAII)
CHIPSET(0x67B8, HAWAII_67B8, HAWAII)
CHIPSET(0x67B9, HAWAII_67B9, HAWAII)
CHIPSET(0x67BA, HAWAII_67BA, HAWAII)
CHIPSET(0x67BE, HAWAII_67BE, HAWAII)

View File

@@ -0,0 +1,73 @@
/*
* Copyright © 2008 Jérôme Glisse
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
* AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*/
/*
* Authors:
* Jérôme Glisse <glisse@freedesktop.org>
*/
#ifndef RADEON_BO_H
#define RADEON_BO_H
#include <stdio.h>
#include <stdint.h>
/* bo object */
#define RADEON_BO_FLAGS_MACRO_TILE 1
#define RADEON_BO_FLAGS_MICRO_TILE 2
#define RADEON_BO_FLAGS_MICRO_TILE_SQUARE 0x20
struct radeon_bo_manager;
struct radeon_cs;
struct radeon_bo {
void *ptr;
uint32_t flags;
uint32_t handle;
uint32_t size;
};
void radeon_bo_debug(struct radeon_bo *bo, const char *op);
struct radeon_bo *radeon_bo_open(struct radeon_bo_manager *bom,
uint32_t handle,
uint32_t size,
uint32_t alignment,
uint32_t domains,
uint32_t flags);
void radeon_bo_ref(struct radeon_bo *bo);
struct radeon_bo *radeon_bo_unref(struct radeon_bo *bo);
int radeon_bo_map(struct radeon_bo *bo, int write);
int radeon_bo_unmap(struct radeon_bo *bo);
int radeon_bo_wait(struct radeon_bo *bo);
int radeon_bo_is_busy(struct radeon_bo *bo, uint32_t *domain);
int radeon_bo_set_tiling(struct radeon_bo *bo, uint32_t tiling_flags, uint32_t pitch);
int radeon_bo_get_tiling(struct radeon_bo *bo, uint32_t *tiling_flags, uint32_t *pitch);
int radeon_bo_is_static(struct radeon_bo *bo);
int radeon_bo_is_referenced_by_cs(struct radeon_bo *bo, struct radeon_cs *cs);
uint32_t radeon_bo_get_handle(struct radeon_bo *bo);
uint32_t radeon_bo_get_src_domain(struct radeon_bo *bo);
#endif

View File

@@ -0,0 +1,48 @@
/*
* Copyright © 2008 Dave Airlie
* Copyright © 2008 Jérôme Glisse
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
* AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*/
/*
* Authors:
* Dave Airlie
* Jérôme Glisse <glisse@freedesktop.org>
*/
#ifndef RADEON_BO_GEM_H
#define RADEON_BO_GEM_H
#include "radeon_bo.h"
struct radeon_bo_manager *radeon_bo_manager_gem_ctor(int fd);
void radeon_bo_manager_gem_dtor(struct radeon_bo_manager *bom);
uint32_t radeon_gem_name_bo(struct radeon_bo *bo);
void *radeon_gem_get_reloc_in_cs(struct radeon_bo *bo);
int radeon_gem_set_domain(struct radeon_bo *bo, uint32_t read_domains, uint32_t write_domain);
int radeon_gem_get_kernel_name(struct radeon_bo *bo, uint32_t *name);
int radeon_gem_prime_share_bo(struct radeon_bo *bo, int *handle);
struct radeon_bo *radeon_gem_bo_open_prime(struct radeon_bo_manager *bom,
int fd_handle,
uint32_t size);
#endif

View File

@@ -0,0 +1,45 @@
#ifndef RADEON_BO_INT
#define RADEON_BO_INT
struct radeon_bo_manager {
const struct radeon_bo_funcs *funcs;
int fd;
};
struct radeon_bo_int {
void *ptr;
uint32_t flags;
uint32_t handle;
uint32_t size;
/* private members */
uint32_t alignment;
uint32_t domains;
unsigned cref;
struct radeon_bo_manager *bom;
uint32_t space_accounted;
uint32_t referenced_in_cs;
};
/* bo functions */
struct radeon_bo_funcs {
struct radeon_bo *(*bo_open)(struct radeon_bo_manager *bom,
uint32_t handle,
uint32_t size,
uint32_t alignment,
uint32_t domains,
uint32_t flags);
void (*bo_ref)(struct radeon_bo_int *bo);
struct radeon_bo *(*bo_unref)(struct radeon_bo_int *bo);
int (*bo_map)(struct radeon_bo_int *bo, int write);
int (*bo_unmap)(struct radeon_bo_int *bo);
int (*bo_wait)(struct radeon_bo_int *bo);
int (*bo_is_static)(struct radeon_bo_int *bo);
int (*bo_set_tiling)(struct radeon_bo_int *bo, uint32_t tiling_flags,
uint32_t pitch);
int (*bo_get_tiling)(struct radeon_bo_int *bo, uint32_t *tiling_flags,
uint32_t *pitch);
int (*bo_is_busy)(struct radeon_bo_int *bo, uint32_t *domain);
int (*bo_is_referenced_by_cs)(struct radeon_bo_int *bo, struct radeon_cs *cs);
};
#endif

View File

@@ -0,0 +1,141 @@
/*
* Copyright © 2008 Nicolai Haehnle
* Copyright © 2008 Jérôme Glisse
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*/
/*
* Authors:
* Aapo Tahkola <aet@rasterburn.org>
* Nicolai Haehnle <prefect_@gmx.net>
* Jérôme Glisse <glisse@freedesktop.org>
*/
#ifndef RADEON_CS_H
#define RADEON_CS_H
#include <stdint.h>
#include <string.h>
#include "drm.h"
#include "radeon_drm.h"
#include "radeon_bo.h"
struct radeon_cs_reloc {
struct radeon_bo *bo;
uint32_t read_domain;
uint32_t write_domain;
uint32_t flags;
};
#define RADEON_CS_SPACE_OK 0
#define RADEON_CS_SPACE_OP_TO_BIG 1
#define RADEON_CS_SPACE_FLUSH 2
struct radeon_cs {
uint32_t *packets;
unsigned cdw;
unsigned ndw;
unsigned section_ndw;
unsigned section_cdw;
};
#define MAX_SPACE_BOS (32)
struct radeon_cs_manager;
extern struct radeon_cs *radeon_cs_create(struct radeon_cs_manager *csm,
uint32_t ndw);
extern int radeon_cs_begin(struct radeon_cs *cs,
uint32_t ndw,
const char *file,
const char *func, int line);
extern int radeon_cs_end(struct radeon_cs *cs,
const char *file,
const char *func,
int line);
extern int radeon_cs_emit(struct radeon_cs *cs);
extern int radeon_cs_destroy(struct radeon_cs *cs);
extern int radeon_cs_erase(struct radeon_cs *cs);
extern int radeon_cs_need_flush(struct radeon_cs *cs);
extern void radeon_cs_print(struct radeon_cs *cs, FILE *file);
extern void radeon_cs_set_limit(struct radeon_cs *cs, uint32_t domain, uint32_t limit);
extern void radeon_cs_space_set_flush(struct radeon_cs *cs, void (*fn)(void *), void *data);
extern int radeon_cs_write_reloc(struct radeon_cs *cs,
struct radeon_bo *bo,
uint32_t read_domain,
uint32_t write_domain,
uint32_t flags);
extern uint32_t radeon_cs_get_id(struct radeon_cs *cs);
/*
* add a persistent BO to the list
* a persistent BO is one that will be referenced across flushes,
* i.e. colorbuffer, textures etc.
* They get reset when a new "operation" happens, where an operation
* is a state emission with a color/textures etc followed by a bunch of vertices.
*/
void radeon_cs_space_add_persistent_bo(struct radeon_cs *cs,
struct radeon_bo *bo,
uint32_t read_domains,
uint32_t write_domain);
/* reset the persistent BO list */
void radeon_cs_space_reset_bos(struct radeon_cs *cs);
/* do a space check with the current persistent BO list */
int radeon_cs_space_check(struct radeon_cs *cs);
/* do a space check with the current persistent BO list and a temporary BO
* a temporary BO is like a DMA buffer, which gets flushed with the
* command buffer */
int radeon_cs_space_check_with_bo(struct radeon_cs *cs,
struct radeon_bo *bo,
uint32_t read_domains,
uint32_t write_domain);
static inline void radeon_cs_write_dword(struct radeon_cs *cs, uint32_t dword)
{
cs->packets[cs->cdw++] = dword;
if (cs->section_ndw) {
cs->section_cdw++;
}
}
static inline void radeon_cs_write_qword(struct radeon_cs *cs, uint64_t qword)
{
memcpy(cs->packets + cs->cdw, &qword, sizeof(uint64_t));
cs->cdw += 2;
if (cs->section_ndw) {
cs->section_cdw += 2;
}
}
static inline void radeon_cs_write_table(struct radeon_cs *cs,
const void *data, uint32_t size)
{
memcpy(cs->packets + cs->cdw, data, size * 4);
cs->cdw += size;
if (cs->section_ndw) {
cs->section_cdw += size;
}
}
#endif

View File

@@ -0,0 +1,41 @@
/*
* Copyright © 2008 Nicolai Haehnle
* Copyright © 2008 Jérôme Glisse
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
* AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*/
/*
* Authors:
* Aapo Tahkola <aet@rasterburn.org>
* Nicolai Haehnle <prefect_@gmx.net>
* Jérôme Glisse <glisse@freedesktop.org>
*/
#ifndef RADEON_CS_GEM_H
#define RADEON_CS_GEM_H
#include "radeon_cs.h"
struct radeon_cs_manager *radeon_cs_manager_gem_ctor(int fd);
void radeon_cs_manager_gem_dtor(struct radeon_cs_manager *csm);
#endif

View File

@@ -0,0 +1,67 @@
#ifndef _RADEON_CS_INT_H_
#define _RADEON_CS_INT_H_
struct radeon_cs_space_check {
struct radeon_bo_int *bo;
uint32_t read_domains;
uint32_t write_domain;
uint32_t new_accounted;
};
struct radeon_cs_int {
/* keep first two in same place */
uint32_t *packets;
unsigned cdw;
unsigned ndw;
unsigned section_ndw;
unsigned section_cdw;
/* private members */
struct radeon_cs_manager *csm;
void *relocs;
unsigned crelocs;
unsigned relocs_total_size;
const char *section_file;
const char *section_func;
int section_line;
struct radeon_cs_space_check bos[MAX_SPACE_BOS];
int bo_count;
void (*space_flush_fn)(void *);
void *space_flush_data;
uint32_t id;
};
/* cs functions */
struct radeon_cs_funcs {
struct radeon_cs_int *(*cs_create)(struct radeon_cs_manager *csm,
uint32_t ndw);
int (*cs_write_reloc)(struct radeon_cs_int *cs,
struct radeon_bo *bo,
uint32_t read_domain,
uint32_t write_domain,
uint32_t flags);
int (*cs_begin)(struct radeon_cs_int *cs,
uint32_t ndw,
const char *file,
const char *func,
int line);
int (*cs_end)(struct radeon_cs_int *cs,
const char *file, const char *func,
int line);
int (*cs_emit)(struct radeon_cs_int *cs);
int (*cs_destroy)(struct radeon_cs_int *cs);
int (*cs_erase)(struct radeon_cs_int *cs);
int (*cs_need_flush)(struct radeon_cs_int *cs);
void (*cs_print)(struct radeon_cs_int *cs, FILE *file);
};
struct radeon_cs_manager {
const struct radeon_cs_funcs *funcs;
int fd;
int32_t vram_limit, gart_limit;
int32_t vram_write_used, gart_write_used;
int32_t read_used;
};
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,149 @@
/*
* Copyright © 2011 Red Hat All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
* AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*/
/*
* Authors:
* Jérôme Glisse <jglisse@redhat.com>
*/
#ifndef RADEON_SURFACE_H
#define RADEON_SURFACE_H
/* Note :
*
* For texture array, the n layer are stored one after the other within each
* mipmap level. 0 value for field than can be hint is always valid.
*/
#define RADEON_SURF_MAX_LEVEL 32
#define RADEON_SURF_TYPE_MASK 0xFF
#define RADEON_SURF_TYPE_SHIFT 0
#define RADEON_SURF_TYPE_1D 0
#define RADEON_SURF_TYPE_2D 1
#define RADEON_SURF_TYPE_3D 2
#define RADEON_SURF_TYPE_CUBEMAP 3
#define RADEON_SURF_TYPE_1D_ARRAY 4
#define RADEON_SURF_TYPE_2D_ARRAY 5
#define RADEON_SURF_MODE_MASK 0xFF
#define RADEON_SURF_MODE_SHIFT 8
#define RADEON_SURF_MODE_LINEAR 0
#define RADEON_SURF_MODE_LINEAR_ALIGNED 1
#define RADEON_SURF_MODE_1D 2
#define RADEON_SURF_MODE_2D 3
#define RADEON_SURF_SCANOUT (1 << 16)
#define RADEON_SURF_ZBUFFER (1 << 17)
#define RADEON_SURF_SBUFFER (1 << 18)
#define RADEON_SURF_Z_OR_SBUFFER (RADEON_SURF_ZBUFFER | RADEON_SURF_SBUFFER)
#define RADEON_SURF_HAS_SBUFFER_MIPTREE (1 << 19)
#define RADEON_SURF_HAS_TILE_MODE_INDEX (1 << 20)
#define RADEON_SURF_FMASK (1 << 21)
#define RADEON_SURF_GET(v, field) (((v) >> RADEON_SURF_ ## field ## _SHIFT) & RADEON_SURF_ ## field ## _MASK)
#define RADEON_SURF_SET(v, field) (((v) & RADEON_SURF_ ## field ## _MASK) << RADEON_SURF_ ## field ## _SHIFT)
#define RADEON_SURF_CLR(v, field) ((v) & ~(RADEON_SURF_ ## field ## _MASK << RADEON_SURF_ ## field ## _SHIFT))
/* first field up to mode need to match r6 struct so that we can reuse
* same function for linear & linear aligned
*/
struct radeon_surface_level {
uint64_t offset;
uint64_t slice_size;
uint32_t npix_x;
uint32_t npix_y;
uint32_t npix_z;
uint32_t nblk_x;
uint32_t nblk_y;
uint32_t nblk_z;
uint32_t pitch_bytes;
uint32_t mode;
};
enum si_tiling_mode {
SI_TILING_AUTO = 0,
SI_TILING_COLOR_1D,
SI_TILING_COLOR_1D_SCANOUT,
SI_TILING_COLOR_2D_8BPP,
SI_TILING_COLOR_2D_16BPP,
SI_TILING_COLOR_2D_32BPP,
SI_TILING_COLOR_2D_64BPP,
SI_TILING_COLOR_2D_SCANOUT_16BPP,
SI_TILING_COLOR_2D_SCANOUT_32BPP,
SI_TILING_COLOR_LINEAR,
SI_TILING_STENCIL_1D,
SI_TILING_STENCIL_2D,
SI_TILING_STENCIL_2D_2AA,
SI_TILING_STENCIL_2D_4AA,
SI_TILING_STENCIL_2D_8AA,
SI_TILING_DEPTH_1D,
SI_TILING_DEPTH_2D,
SI_TILING_DEPTH_2D_2AA,
SI_TILING_DEPTH_2D_4AA,
SI_TILING_DEPTH_2D_8AA,
SI_TILING_LAST_MODE,
};
struct radeon_surface {
uint32_t npix_x;
uint32_t npix_y;
uint32_t npix_z;
uint32_t blk_w;
uint32_t blk_h;
uint32_t blk_d;
uint32_t array_size;
uint32_t last_level;
uint32_t bpe;
uint32_t nsamples;
uint32_t flags;
/* Following is updated/fill by the allocator. It's allowed to
* set some of the value but they are use as hint and can be
* overridden (things lile bankw/bankh on evergreen for
* instance).
*/
uint64_t bo_size;
uint64_t bo_alignment;
/* apply to eg */
uint32_t bankw;
uint32_t bankh;
uint32_t mtilea;
uint32_t tile_split;
uint32_t stencil_tile_split;
uint64_t stencil_offset;
struct radeon_surface_level level[RADEON_SURF_MAX_LEVEL];
struct radeon_surface_level stencil_level[RADEON_SURF_MAX_LEVEL];
uint32_t tiling_index[RADEON_SURF_MAX_LEVEL];
uint32_t stencil_tiling_index[RADEON_SURF_MAX_LEVEL];
};
struct radeon_surface_manager *radeon_surface_manager_new(int fd);
void radeon_surface_manager_free(struct radeon_surface_manager *surf_man);
int radeon_surface_init(struct radeon_surface_manager *surf_man,
struct radeon_surface *surf);
int radeon_surface_best(struct radeon_surface_manager *surf_man,
struct radeon_surface *surf);
#endif

View File

@@ -0,0 +1,220 @@
/* savage_drm.h -- Public header for the savage driver
*
* Copyright 2004 Felix Kuehling
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef __SAVAGE_DRM_H__
#define __SAVAGE_DRM_H__
#include "drm.h"
#if defined(__cplusplus)
extern "C" {
#endif
#ifndef __SAVAGE_SAREA_DEFINES__
#define __SAVAGE_SAREA_DEFINES__
/* 2 heaps (1 for card, 1 for agp), each divided into up to 128
* regions, subject to a minimum region size of (1<<16) == 64k.
*
* Clients may subdivide regions internally, but when sharing between
* clients, the region size is the minimum granularity.
*/
#define SAVAGE_CARD_HEAP 0
#define SAVAGE_AGP_HEAP 1
#define SAVAGE_NR_TEX_HEAPS 2
#define SAVAGE_NR_TEX_REGIONS 16
#define SAVAGE_LOG_MIN_TEX_REGION_SIZE 16
#endif /* __SAVAGE_SAREA_DEFINES__ */
typedef struct _drm_savage_sarea {
/* LRU lists for texture memory in agp space and on the card.
*/
struct drm_tex_region texList[SAVAGE_NR_TEX_HEAPS][SAVAGE_NR_TEX_REGIONS +
1];
unsigned int texAge[SAVAGE_NR_TEX_HEAPS];
/* Mechanism to validate card state.
*/
int ctxOwner;
} drm_savage_sarea_t, *drm_savage_sarea_ptr;
/* Savage-specific ioctls
*/
#define DRM_SAVAGE_BCI_INIT 0x00
#define DRM_SAVAGE_BCI_CMDBUF 0x01
#define DRM_SAVAGE_BCI_EVENT_EMIT 0x02
#define DRM_SAVAGE_BCI_EVENT_WAIT 0x03
#define DRM_IOCTL_SAVAGE_BCI_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_INIT, drm_savage_init_t)
#define DRM_IOCTL_SAVAGE_BCI_CMDBUF DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_CMDBUF, drm_savage_cmdbuf_t)
#define DRM_IOCTL_SAVAGE_BCI_EVENT_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_EMIT, drm_savage_event_emit_t)
#define DRM_IOCTL_SAVAGE_BCI_EVENT_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_WAIT, drm_savage_event_wait_t)
#define SAVAGE_DMA_PCI 1
#define SAVAGE_DMA_AGP 3
typedef struct drm_savage_init {
enum {
SAVAGE_INIT_BCI = 1,
SAVAGE_CLEANUP_BCI = 2
} func;
unsigned int sarea_priv_offset;
/* some parameters */
unsigned int cob_size;
unsigned int bci_threshold_lo, bci_threshold_hi;
unsigned int dma_type;
/* frame buffer layout */
unsigned int fb_bpp;
unsigned int front_offset, front_pitch;
unsigned int back_offset, back_pitch;
unsigned int depth_bpp;
unsigned int depth_offset, depth_pitch;
/* local textures */
unsigned int texture_offset;
unsigned int texture_size;
/* physical locations of non-permanent maps */
unsigned long status_offset;
unsigned long buffers_offset;
unsigned long agp_textures_offset;
unsigned long cmd_dma_offset;
} drm_savage_init_t;
typedef union drm_savage_cmd_header drm_savage_cmd_header_t;
typedef struct drm_savage_cmdbuf {
/* command buffer in client's address space */
drm_savage_cmd_header_t *cmd_addr;
unsigned int size; /* size of the command buffer in 64bit units */
unsigned int dma_idx; /* DMA buffer index to use */
int discard; /* discard DMA buffer when done */
/* vertex buffer in client's address space */
unsigned int *vb_addr;
unsigned int vb_size; /* size of client vertex buffer in bytes */
unsigned int vb_stride; /* stride of vertices in 32bit words */
/* boxes in client's address space */
struct drm_clip_rect *box_addr;
unsigned int nbox; /* number of clipping boxes */
} drm_savage_cmdbuf_t;
#define SAVAGE_WAIT_2D 0x1 /* wait for 2D idle before updating event tag */
#define SAVAGE_WAIT_3D 0x2 /* wait for 3D idle before updating event tag */
#define SAVAGE_WAIT_IRQ 0x4 /* emit or wait for IRQ, not implemented yet */
typedef struct drm_savage_event {
unsigned int count;
unsigned int flags;
} drm_savage_event_emit_t, drm_savage_event_wait_t;
/* Commands for the cmdbuf ioctl
*/
#define SAVAGE_CMD_STATE 0 /* a range of state registers */
#define SAVAGE_CMD_DMA_PRIM 1 /* vertices from DMA buffer */
#define SAVAGE_CMD_VB_PRIM 2 /* vertices from client vertex buffer */
#define SAVAGE_CMD_DMA_IDX 3 /* indexed vertices from DMA buffer */
#define SAVAGE_CMD_VB_IDX 4 /* indexed vertices client vertex buffer */
#define SAVAGE_CMD_CLEAR 5 /* clear buffers */
#define SAVAGE_CMD_SWAP 6 /* swap buffers */
/* Primitive types
*/
#define SAVAGE_PRIM_TRILIST 0 /* triangle list */
#define SAVAGE_PRIM_TRISTRIP 1 /* triangle strip */
#define SAVAGE_PRIM_TRIFAN 2 /* triangle fan */
#define SAVAGE_PRIM_TRILIST_201 3 /* reorder verts for correct flat
* shading on s3d */
/* Skip flags (vertex format)
*/
#define SAVAGE_SKIP_Z 0x01
#define SAVAGE_SKIP_W 0x02
#define SAVAGE_SKIP_C0 0x04
#define SAVAGE_SKIP_C1 0x08
#define SAVAGE_SKIP_S0 0x10
#define SAVAGE_SKIP_T0 0x20
#define SAVAGE_SKIP_ST0 0x30
#define SAVAGE_SKIP_S1 0x40
#define SAVAGE_SKIP_T1 0x80
#define SAVAGE_SKIP_ST1 0xc0
#define SAVAGE_SKIP_ALL_S3D 0x3f
#define SAVAGE_SKIP_ALL_S4 0xff
/* Buffer names for clear command
*/
#define SAVAGE_FRONT 0x1
#define SAVAGE_BACK 0x2
#define SAVAGE_DEPTH 0x4
/* 64-bit command header
*/
union drm_savage_cmd_header {
struct {
unsigned char cmd; /* command */
unsigned char pad0;
unsigned short pad1;
unsigned short pad2;
unsigned short pad3;
} cmd; /* generic */
struct {
unsigned char cmd;
unsigned char global; /* need idle engine? */
unsigned short count; /* number of consecutive registers */
unsigned short start; /* first register */
unsigned short pad3;
} state; /* SAVAGE_CMD_STATE */
struct {
unsigned char cmd;
unsigned char prim; /* primitive type */
unsigned short skip; /* vertex format (skip flags) */
unsigned short count; /* number of vertices */
unsigned short start; /* first vertex in DMA/vertex buffer */
} prim; /* SAVAGE_CMD_DMA_PRIM, SAVAGE_CMD_VB_PRIM */
struct {
unsigned char cmd;
unsigned char prim;
unsigned short skip;
unsigned short count; /* number of indices that follow */
unsigned short pad3;
} idx; /* SAVAGE_CMD_DMA_IDX, SAVAGE_CMD_VB_IDX */
struct {
unsigned char cmd;
unsigned char pad0;
unsigned short pad1;
unsigned int flags;
} clear0; /* SAVAGE_CMD_CLEAR */
struct {
unsigned int mask;
unsigned int value;
} clear1; /* SAVAGE_CMD_CLEAR data */
};
#if defined(__cplusplus)
}
#endif
#endif

View File

@@ -0,0 +1,77 @@
/* sis_drv.h -- Private header for sis driver -*- linux-c -*- */
/*
* Copyright 2005 Eric Anholt
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef __SIS_DRM_H__
#define __SIS_DRM_H__
#include "drm.h"
#if defined(__cplusplus)
extern "C" {
#endif
/* SiS specific ioctls */
#define NOT_USED_0_3
#define DRM_SIS_FB_ALLOC 0x04
#define DRM_SIS_FB_FREE 0x05
#define NOT_USED_6_12
#define DRM_SIS_AGP_INIT 0x13
#define DRM_SIS_AGP_ALLOC 0x14
#define DRM_SIS_AGP_FREE 0x15
#define DRM_SIS_FB_INIT 0x16
#define DRM_IOCTL_SIS_FB_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_SIS_FB_ALLOC, drm_sis_mem_t)
#define DRM_IOCTL_SIS_FB_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_SIS_FB_FREE, drm_sis_mem_t)
#define DRM_IOCTL_SIS_AGP_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_SIS_AGP_INIT, drm_sis_agp_t)
#define DRM_IOCTL_SIS_AGP_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_SIS_AGP_ALLOC, drm_sis_mem_t)
#define DRM_IOCTL_SIS_AGP_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_SIS_AGP_FREE, drm_sis_mem_t)
#define DRM_IOCTL_SIS_FB_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_SIS_FB_INIT, drm_sis_fb_t)
/*
#define DRM_IOCTL_SIS_FLIP DRM_IOW( 0x48, drm_sis_flip_t)
#define DRM_IOCTL_SIS_FLIP_INIT DRM_IO( 0x49)
#define DRM_IOCTL_SIS_FLIP_FINAL DRM_IO( 0x50)
*/
typedef struct {
int context;
unsigned int offset;
unsigned int size;
unsigned long free;
} drm_sis_mem_t;
typedef struct {
unsigned int offset, size;
} drm_sis_agp_t;
typedef struct {
unsigned int offset, size;
} drm_sis_fb_t;
#if defined(__cplusplus)
}
#endif
#endif /* __SIS_DRM_H__ */

View File

@@ -0,0 +1,681 @@
/*
* Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef _TEGRA_DRM_H_
#define _TEGRA_DRM_H_
#include "drm.h"
#if defined(__cplusplus)
extern "C" {
#endif
#define DRM_TEGRA_GEM_CREATE_TILED (1 << 0)
#define DRM_TEGRA_GEM_CREATE_BOTTOM_UP (1 << 1)
/**
* struct drm_tegra_gem_create - parameters for the GEM object creation IOCTL
*/
struct drm_tegra_gem_create {
/**
* @size:
*
* The size, in bytes, of the buffer object to be created.
*/
__u64 size;
/**
* @flags:
*
* A bitmask of flags that influence the creation of GEM objects:
*
* DRM_TEGRA_GEM_CREATE_TILED
* Use the 16x16 tiling format for this buffer.
*
* DRM_TEGRA_GEM_CREATE_BOTTOM_UP
* The buffer has a bottom-up layout.
*/
__u32 flags;
/**
* @handle:
*
* The handle of the created GEM object. Set by the kernel upon
* successful completion of the IOCTL.
*/
__u32 handle;
};
/**
* struct drm_tegra_gem_mmap - parameters for the GEM mmap IOCTL
*/
struct drm_tegra_gem_mmap {
/**
* @handle:
*
* Handle of the GEM object to obtain an mmap offset for.
*/
__u32 handle;
/**
* @pad:
*
* Structure padding that may be used in the future. Must be 0.
*/
__u32 pad;
/**
* @offset:
*
* The mmap offset for the given GEM object. Set by the kernel upon
* successful completion of the IOCTL.
*/
__u64 offset;
};
/**
* struct drm_tegra_syncpt_read - parameters for the read syncpoint IOCTL
*/
struct drm_tegra_syncpt_read {
/**
* @id:
*
* ID of the syncpoint to read the current value from.
*/
__u32 id;
/**
* @value:
*
* The current syncpoint value. Set by the kernel upon successful
* completion of the IOCTL.
*/
__u32 value;
};
/**
* struct drm_tegra_syncpt_incr - parameters for the increment syncpoint IOCTL
*/
struct drm_tegra_syncpt_incr {
/**
* @id:
*
* ID of the syncpoint to increment.
*/
__u32 id;
/**
* @pad:
*
* Structure padding that may be used in the future. Must be 0.
*/
__u32 pad;
};
/**
* struct drm_tegra_syncpt_wait - parameters for the wait syncpoint IOCTL
*/
struct drm_tegra_syncpt_wait {
/**
* @id:
*
* ID of the syncpoint to wait on.
*/
__u32 id;
/**
* @thresh:
*
* Threshold value for which to wait.
*/
__u32 thresh;
/**
* @timeout:
*
* Timeout, in milliseconds, to wait.
*/
__u32 timeout;
/**
* @value:
*
* The new syncpoint value after the wait. Set by the kernel upon
* successful completion of the IOCTL.
*/
__u32 value;
};
#define DRM_TEGRA_NO_TIMEOUT (0xffffffff)
/**
* struct drm_tegra_open_channel - parameters for the open channel IOCTL
*/
struct drm_tegra_open_channel {
/**
* @client:
*
* The client ID for this channel.
*/
__u32 client;
/**
* @pad:
*
* Structure padding that may be used in the future. Must be 0.
*/
__u32 pad;
/**
* @context:
*
* The application context of this channel. Set by the kernel upon
* successful completion of the IOCTL. This context needs to be passed
* to the DRM_TEGRA_CHANNEL_CLOSE or the DRM_TEGRA_SUBMIT IOCTLs.
*/
__u64 context;
};
/**
* struct drm_tegra_close_channel - parameters for the close channel IOCTL
*/
struct drm_tegra_close_channel {
/**
* @context:
*
* The application context of this channel. This is obtained from the
* DRM_TEGRA_OPEN_CHANNEL IOCTL.
*/
__u64 context;
};
/**
* struct drm_tegra_get_syncpt - parameters for the get syncpoint IOCTL
*/
struct drm_tegra_get_syncpt {
/**
* @context:
*
* The application context identifying the channel for which to obtain
* the syncpoint ID.
*/
__u64 context;
/**
* @index:
*
* Index of the client syncpoint for which to obtain the ID.
*/
__u32 index;
/**
* @id:
*
* The ID of the given syncpoint. Set by the kernel upon successful
* completion of the IOCTL.
*/
__u32 id;
};
/**
* struct drm_tegra_get_syncpt_base - parameters for the get wait base IOCTL
*/
struct drm_tegra_get_syncpt_base {
/**
* @context:
*
* The application context identifying for which channel to obtain the
* wait base.
*/
__u64 context;
/**
* @syncpt:
*
* ID of the syncpoint for which to obtain the wait base.
*/
__u32 syncpt;
/**
* @id:
*
* The ID of the wait base corresponding to the client syncpoint. Set
* by the kernel upon successful completion of the IOCTL.
*/
__u32 id;
};
/**
* struct drm_tegra_syncpt - syncpoint increment operation
*/
struct drm_tegra_syncpt {
/**
* @id:
*
* ID of the syncpoint to operate on.
*/
__u32 id;
/**
* @incrs:
*
* Number of increments to perform for the syncpoint.
*/
__u32 incrs;
};
/**
* struct drm_tegra_cmdbuf - structure describing a command buffer
*/
struct drm_tegra_cmdbuf {
/**
* @handle:
*
* Handle to a GEM object containing the command buffer.
*/
__u32 handle;
/**
* @offset:
*
* Offset, in bytes, into the GEM object identified by @handle at
* which the command buffer starts.
*/
__u32 offset;
/**
* @words:
*
* Number of 32-bit words in this command buffer.
*/
__u32 words;
/**
* @pad:
*
* Structure padding that may be used in the future. Must be 0.
*/
__u32 pad;
};
/**
* struct drm_tegra_reloc - GEM object relocation structure
*/
struct drm_tegra_reloc {
struct {
/**
* @cmdbuf.handle:
*
* Handle to the GEM object containing the command buffer for
* which to perform this GEM object relocation.
*/
__u32 handle;
/**
* @cmdbuf.offset:
*
* Offset, in bytes, into the command buffer at which to
* insert the relocated address.
*/
__u32 offset;
} cmdbuf;
struct {
/**
* @target.handle:
*
* Handle to the GEM object to be relocated.
*/
__u32 handle;
/**
* @target.offset:
*
* Offset, in bytes, into the target GEM object at which the
* relocated data starts.
*/
__u32 offset;
} target;
/**
* @shift:
*
* The number of bits by which to shift relocated addresses.
*/
__u32 shift;
/**
* @pad:
*
* Structure padding that may be used in the future. Must be 0.
*/
__u32 pad;
};
/**
* struct drm_tegra_waitchk - wait check structure
*/
struct drm_tegra_waitchk {
/**
* @handle:
*
* Handle to the GEM object containing a command stream on which to
* perform the wait check.
*/
__u32 handle;
/**
* @offset:
*
* Offset, in bytes, of the location in the command stream to perform
* the wait check on.
*/
__u32 offset;
/**
* @syncpt:
*
* ID of the syncpoint to wait check.
*/
__u32 syncpt;
/**
* @thresh:
*
* Threshold value for which to check.
*/
__u32 thresh;
};
/**
* struct drm_tegra_submit - job submission structure
*/
struct drm_tegra_submit {
/**
* @context:
*
* The application context identifying the channel to use for the
* execution of this job.
*/
__u64 context;
/**
* @num_syncpts:
*
* The number of syncpoints operated on by this job. This defines the
* length of the array pointed to by @syncpts.
*/
__u32 num_syncpts;
/**
* @num_cmdbufs:
*
* The number of command buffers to execute as part of this job. This
* defines the length of the array pointed to by @cmdbufs.
*/
__u32 num_cmdbufs;
/**
* @num_relocs:
*
* The number of relocations to perform before executing this job.
* This defines the length of the array pointed to by @relocs.
*/
__u32 num_relocs;
/**
* @num_waitchks:
*
* The number of wait checks to perform as part of this job. This
* defines the length of the array pointed to by @waitchks.
*/
__u32 num_waitchks;
/**
* @waitchk_mask:
*
* Bitmask of valid wait checks.
*/
__u32 waitchk_mask;
/**
* @timeout:
*
* Timeout, in milliseconds, before this job is cancelled.
*/
__u32 timeout;
/**
* @syncpts:
*
* A pointer to an array of &struct drm_tegra_syncpt structures that
* specify the syncpoint operations performed as part of this job.
* The number of elements in the array must be equal to the value
* given by @num_syncpts.
*/
__u64 syncpts;
/**
* @cmdbufs:
*
* A pointer to an array of &struct drm_tegra_cmdbuf structures that
* define the command buffers to execute as part of this job. The
* number of elements in the array must be equal to the value given
* by @num_syncpts.
*/
__u64 cmdbufs;
/**
* @relocs:
*
* A pointer to an array of &struct drm_tegra_reloc structures that
* specify the relocations that need to be performed before executing
* this job. The number of elements in the array must be equal to the
* value given by @num_relocs.
*/
__u64 relocs;
/**
* @waitchks:
*
* A pointer to an array of &struct drm_tegra_waitchk structures that
* specify the wait checks to be performed while executing this job.
* The number of elements in the array must be equal to the value
* given by @num_waitchks.
*/
__u64 waitchks;
/**
* @fence:
*
* The threshold of the syncpoint associated with this job after it
* has been completed. Set by the kernel upon successful completion of
* the IOCTL. This can be used with the DRM_TEGRA_SYNCPT_WAIT IOCTL to
* wait for this job to be finished.
*/
__u32 fence;
/**
* @reserved:
*
* This field is reserved for future use. Must be 0.
*/
__u32 reserved[5];
};
#define DRM_TEGRA_GEM_TILING_MODE_PITCH 0
#define DRM_TEGRA_GEM_TILING_MODE_TILED 1
#define DRM_TEGRA_GEM_TILING_MODE_BLOCK 2
/**
* struct drm_tegra_gem_set_tiling - parameters for the set tiling IOCTL
*/
struct drm_tegra_gem_set_tiling {
/**
* @handle:
*
* Handle to the GEM object for which to set the tiling parameters.
*/
__u32 handle;
/**
* @mode:
*
* The tiling mode to set. Must be one of:
*
* DRM_TEGRA_GEM_TILING_MODE_PITCH
* pitch linear format
*
* DRM_TEGRA_GEM_TILING_MODE_TILED
* 16x16 tiling format
*
* DRM_TEGRA_GEM_TILING_MODE_BLOCK
* 16Bx2 tiling format
*/
__u32 mode;
/**
* @value:
*
* The value to set for the tiling mode parameter.
*/
__u32 value;
/**
* @pad:
*
* Structure padding that may be used in the future. Must be 0.
*/
__u32 pad;
};
/**
* struct drm_tegra_gem_get_tiling - parameters for the get tiling IOCTL
*/
struct drm_tegra_gem_get_tiling {
/**
* @handle:
*
* Handle to the GEM object for which to query the tiling parameters.
*/
__u32 handle;
/**
* @mode:
*
* The tiling mode currently associated with the GEM object. Set by
* the kernel upon successful completion of the IOCTL.
*/
__u32 mode;
/**
* @value:
*
* The tiling mode parameter currently associated with the GEM object.
* Set by the kernel upon successful completion of the IOCTL.
*/
__u32 value;
/**
* @pad:
*
* Structure padding that may be used in the future. Must be 0.
*/
__u32 pad;
};
#define DRM_TEGRA_GEM_BOTTOM_UP (1 << 0)
#define DRM_TEGRA_GEM_FLAGS (DRM_TEGRA_GEM_BOTTOM_UP)
/**
* struct drm_tegra_gem_set_flags - parameters for the set flags IOCTL
*/
struct drm_tegra_gem_set_flags {
/**
* @handle:
*
* Handle to the GEM object for which to set the flags.
*/
__u32 handle;
/**
* @flags:
*
* The flags to set for the GEM object.
*/
__u32 flags;
};
/**
* struct drm_tegra_gem_get_flags - parameters for the get flags IOCTL
*/
struct drm_tegra_gem_get_flags {
/**
* @handle:
*
* Handle to the GEM object for which to query the flags.
*/
__u32 handle;
/**
* @flags:
*
* The flags currently associated with the GEM object. Set by the
* kernel upon successful completion of the IOCTL.
*/
__u32 flags;
};
#define DRM_TEGRA_GEM_CREATE 0x00
#define DRM_TEGRA_GEM_MMAP 0x01
#define DRM_TEGRA_SYNCPT_READ 0x02
#define DRM_TEGRA_SYNCPT_INCR 0x03
#define DRM_TEGRA_SYNCPT_WAIT 0x04
#define DRM_TEGRA_OPEN_CHANNEL 0x05
#define DRM_TEGRA_CLOSE_CHANNEL 0x06
#define DRM_TEGRA_GET_SYNCPT 0x07
#define DRM_TEGRA_SUBMIT 0x08
#define DRM_TEGRA_GET_SYNCPT_BASE 0x09
#define DRM_TEGRA_GEM_SET_TILING 0x0a
#define DRM_TEGRA_GEM_GET_TILING 0x0b
#define DRM_TEGRA_GEM_SET_FLAGS 0x0c
#define DRM_TEGRA_GEM_GET_FLAGS 0x0d
#define DRM_IOCTL_TEGRA_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_CREATE, struct drm_tegra_gem_create)
#define DRM_IOCTL_TEGRA_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_MMAP, struct drm_tegra_gem_mmap)
#define DRM_IOCTL_TEGRA_SYNCPT_READ DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_SYNCPT_READ, struct drm_tegra_syncpt_read)
#define DRM_IOCTL_TEGRA_SYNCPT_INCR DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_SYNCPT_INCR, struct drm_tegra_syncpt_incr)
#define DRM_IOCTL_TEGRA_SYNCPT_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_SYNCPT_WAIT, struct drm_tegra_syncpt_wait)
#define DRM_IOCTL_TEGRA_OPEN_CHANNEL DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_OPEN_CHANNEL, struct drm_tegra_open_channel)
#define DRM_IOCTL_TEGRA_CLOSE_CHANNEL DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_CLOSE_CHANNEL, struct drm_tegra_close_channel)
#define DRM_IOCTL_TEGRA_GET_SYNCPT DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GET_SYNCPT, struct drm_tegra_get_syncpt)
#define DRM_IOCTL_TEGRA_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_SUBMIT, struct drm_tegra_submit)
#define DRM_IOCTL_TEGRA_GET_SYNCPT_BASE DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GET_SYNCPT_BASE, struct drm_tegra_get_syncpt_base)
#define DRM_IOCTL_TEGRA_GEM_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_SET_TILING, struct drm_tegra_gem_set_tiling)
#define DRM_IOCTL_TEGRA_GEM_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_GET_TILING, struct drm_tegra_gem_get_tiling)
#define DRM_IOCTL_TEGRA_GEM_SET_FLAGS DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_SET_FLAGS, struct drm_tegra_gem_set_flags)
#define DRM_IOCTL_TEGRA_GEM_GET_FLAGS DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GEM_GET_FLAGS, struct drm_tegra_gem_get_flags)
#if defined(__cplusplus)
}
#endif
#endif

View File

@@ -0,0 +1,442 @@
/*
* Copyright © 2014-2015 Broadcom
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef _VC4_DRM_H_
#define _VC4_DRM_H_
#include "drm.h"
#if defined(__cplusplus)
extern "C" {
#endif
#define DRM_VC4_SUBMIT_CL 0x00
#define DRM_VC4_WAIT_SEQNO 0x01
#define DRM_VC4_WAIT_BO 0x02
#define DRM_VC4_CREATE_BO 0x03
#define DRM_VC4_MMAP_BO 0x04
#define DRM_VC4_CREATE_SHADER_BO 0x05
#define DRM_VC4_GET_HANG_STATE 0x06
#define DRM_VC4_GET_PARAM 0x07
#define DRM_VC4_SET_TILING 0x08
#define DRM_VC4_GET_TILING 0x09
#define DRM_VC4_LABEL_BO 0x0a
#define DRM_VC4_GEM_MADVISE 0x0b
#define DRM_VC4_PERFMON_CREATE 0x0c
#define DRM_VC4_PERFMON_DESTROY 0x0d
#define DRM_VC4_PERFMON_GET_VALUES 0x0e
#define DRM_IOCTL_VC4_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl)
#define DRM_IOCTL_VC4_WAIT_SEQNO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno)
#define DRM_IOCTL_VC4_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_BO, struct drm_vc4_wait_bo)
#define DRM_IOCTL_VC4_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_BO, struct drm_vc4_create_bo)
#define DRM_IOCTL_VC4_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_MMAP_BO, struct drm_vc4_mmap_bo)
#define DRM_IOCTL_VC4_CREATE_SHADER_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_SHADER_BO, struct drm_vc4_create_shader_bo)
#define DRM_IOCTL_VC4_GET_HANG_STATE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_HANG_STATE, struct drm_vc4_get_hang_state)
#define DRM_IOCTL_VC4_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_PARAM, struct drm_vc4_get_param)
#define DRM_IOCTL_VC4_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SET_TILING, struct drm_vc4_set_tiling)
#define DRM_IOCTL_VC4_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_TILING, struct drm_vc4_get_tiling)
#define DRM_IOCTL_VC4_LABEL_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_LABEL_BO, struct drm_vc4_label_bo)
#define DRM_IOCTL_VC4_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GEM_MADVISE, struct drm_vc4_gem_madvise)
#define DRM_IOCTL_VC4_PERFMON_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_PERFMON_CREATE, struct drm_vc4_perfmon_create)
#define DRM_IOCTL_VC4_PERFMON_DESTROY DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_PERFMON_DESTROY, struct drm_vc4_perfmon_destroy)
#define DRM_IOCTL_VC4_PERFMON_GET_VALUES DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_PERFMON_GET_VALUES, struct drm_vc4_perfmon_get_values)
struct drm_vc4_submit_rcl_surface {
__u32 hindex; /* Handle index, or ~0 if not present. */
__u32 offset; /* Offset to start of buffer. */
/*
* Bits for either render config (color_write) or load/store packet.
* Bits should all be 0 for MSAA load/stores.
*/
__u16 bits;
#define VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES (1 << 0)
__u16 flags;
};
/**
* struct drm_vc4_submit_cl - ioctl argument for submitting commands to the 3D
* engine.
*
* Drivers typically use GPU BOs to store batchbuffers / command lists and
* their associated state. However, because the VC4 lacks an MMU, we have to
* do validation of memory accesses by the GPU commands. If we were to store
* our commands in BOs, we'd need to do uncached readback from them to do the
* validation process, which is too expensive. Instead, userspace accumulates
* commands and associated state in plain memory, then the kernel copies the
* data to its own address space, and then validates and stores it in a GPU
* BO.
*/
struct drm_vc4_submit_cl {
/* Pointer to the binner command list.
*
* This is the first set of commands executed, which runs the
* coordinate shader to determine where primitives land on the screen,
* then writes out the state updates and draw calls necessary per tile
* to the tile allocation BO.
*/
__u64 bin_cl;
/* Pointer to the shader records.
*
* Shader records are the structures read by the hardware that contain
* pointers to uniforms, shaders, and vertex attributes. The
* reference to the shader record has enough information to determine
* how many pointers are necessary (fixed number for shaders/uniforms,
* and an attribute count), so those BO indices into bo_handles are
* just stored as __u32s before each shader record passed in.
*/
__u64 shader_rec;
/* Pointer to uniform data and texture handles for the textures
* referenced by the shader.
*
* For each shader state record, there is a set of uniform data in the
* order referenced by the record (FS, VS, then CS). Each set of
* uniform data has a __u32 index into bo_handles per texture
* sample operation, in the order the QPU_W_TMUn_S writes appear in
* the program. Following the texture BO handle indices is the actual
* uniform data.
*
* The individual uniform state blocks don't have sizes passed in,
* because the kernel has to determine the sizes anyway during shader
* code validation.
*/
__u64 uniforms;
__u64 bo_handles;
/* Size in bytes of the binner command list. */
__u32 bin_cl_size;
/* Size in bytes of the set of shader records. */
__u32 shader_rec_size;
/* Number of shader records.
*
* This could just be computed from the contents of shader_records and
* the address bits of references to them from the bin CL, but it
* keeps the kernel from having to resize some allocations it makes.
*/
__u32 shader_rec_count;
/* Size in bytes of the uniform state. */
__u32 uniforms_size;
/* Number of BO handles passed in (size is that times 4). */
__u32 bo_handle_count;
/* RCL setup: */
__u16 width;
__u16 height;
__u8 min_x_tile;
__u8 min_y_tile;
__u8 max_x_tile;
__u8 max_y_tile;
struct drm_vc4_submit_rcl_surface color_read;
struct drm_vc4_submit_rcl_surface color_write;
struct drm_vc4_submit_rcl_surface zs_read;
struct drm_vc4_submit_rcl_surface zs_write;
struct drm_vc4_submit_rcl_surface msaa_color_write;
struct drm_vc4_submit_rcl_surface msaa_zs_write;
__u32 clear_color[2];
__u32 clear_z;
__u8 clear_s;
__u32 pad:24;
#define VC4_SUBMIT_CL_USE_CLEAR_COLOR (1 << 0)
/* By default, the kernel gets to choose the order that the tiles are
* rendered in. If this is set, then the tiles will be rendered in a
* raster order, with the right-to-left vs left-to-right and
* top-to-bottom vs bottom-to-top dictated by
* VC4_SUBMIT_CL_RCL_ORDER_INCREASING_*. This allows overlapping
* blits to be implemented using the 3D engine.
*/
#define VC4_SUBMIT_CL_FIXED_RCL_ORDER (1 << 1)
#define VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X (1 << 2)
#define VC4_SUBMIT_CL_RCL_ORDER_INCREASING_Y (1 << 3)
__u32 flags;
/* Returned value of the seqno of this render job (for the
* wait ioctl).
*/
__u64 seqno;
/* ID of the perfmon to attach to this job. 0 means no perfmon. */
__u32 perfmonid;
/* Syncobj handle to wait on. If set, processing of this render job
* will not start until the syncobj is signaled. 0 means ignore.
*/
__u32 in_sync;
/* Syncobj handle to export fence to. If set, the fence in the syncobj
* will be replaced with a fence that signals upon completion of this
* render job. 0 means ignore.
*/
__u32 out_sync;
__u32 pad2;
};
/**
* struct drm_vc4_wait_seqno - ioctl argument for waiting for
* DRM_VC4_SUBMIT_CL completion using its returned seqno.
*
* timeout_ns is the timeout in nanoseconds, where "0" means "don't
* block, just return the status."
*/
struct drm_vc4_wait_seqno {
__u64 seqno;
__u64 timeout_ns;
};
/**
* struct drm_vc4_wait_bo - ioctl argument for waiting for
* completion of the last DRM_VC4_SUBMIT_CL on a BO.
*
* This is useful for cases where multiple processes might be
* rendering to a BO and you want to wait for all rendering to be
* completed.
*/
struct drm_vc4_wait_bo {
__u32 handle;
__u32 pad;
__u64 timeout_ns;
};
/**
* struct drm_vc4_create_bo - ioctl argument for creating VC4 BOs.
*
* There are currently no values for the flags argument, but it may be
* used in a future extension.
*/
struct drm_vc4_create_bo {
__u32 size;
__u32 flags;
/** Returned GEM handle for the BO. */
__u32 handle;
__u32 pad;
};
/**
* struct drm_vc4_mmap_bo - ioctl argument for mapping VC4 BOs.
*
* This doesn't actually perform an mmap. Instead, it returns the
* offset you need to use in an mmap on the DRM device node. This
* means that tools like valgrind end up knowing about the mapped
* memory.
*
* There are currently no values for the flags argument, but it may be
* used in a future extension.
*/
struct drm_vc4_mmap_bo {
/** Handle for the object being mapped. */
__u32 handle;
__u32 flags;
/** offset into the drm node to use for subsequent mmap call. */
__u64 offset;
};
/**
* struct drm_vc4_create_shader_bo - ioctl argument for creating VC4
* shader BOs.
*
* Since allowing a shader to be overwritten while it's also being
* executed from would allow privlege escalation, shaders must be
* created using this ioctl, and they can't be mmapped later.
*/
struct drm_vc4_create_shader_bo {
/* Size of the data argument. */
__u32 size;
/* Flags, currently must be 0. */
__u32 flags;
/* Pointer to the data. */
__u64 data;
/** Returned GEM handle for the BO. */
__u32 handle;
/* Pad, must be 0. */
__u32 pad;
};
struct drm_vc4_get_hang_state_bo {
__u32 handle;
__u32 paddr;
__u32 size;
__u32 pad;
};
/**
* struct drm_vc4_hang_state - ioctl argument for collecting state
* from a GPU hang for analysis.
*/
struct drm_vc4_get_hang_state {
/** Pointer to array of struct drm_vc4_get_hang_state_bo. */
__u64 bo;
/**
* On input, the size of the bo array. Output is the number
* of bos to be returned.
*/
__u32 bo_count;
__u32 start_bin, start_render;
__u32 ct0ca, ct0ea;
__u32 ct1ca, ct1ea;
__u32 ct0cs, ct1cs;
__u32 ct0ra0, ct1ra0;
__u32 bpca, bpcs;
__u32 bpoa, bpos;
__u32 vpmbase;
__u32 dbge;
__u32 fdbgo;
__u32 fdbgb;
__u32 fdbgr;
__u32 fdbgs;
__u32 errstat;
/* Pad that we may save more registers into in the future. */
__u32 pad[16];
};
#define DRM_VC4_PARAM_V3D_IDENT0 0
#define DRM_VC4_PARAM_V3D_IDENT1 1
#define DRM_VC4_PARAM_V3D_IDENT2 2
#define DRM_VC4_PARAM_SUPPORTS_BRANCHES 3
#define DRM_VC4_PARAM_SUPPORTS_ETC1 4
#define DRM_VC4_PARAM_SUPPORTS_THREADED_FS 5
#define DRM_VC4_PARAM_SUPPORTS_FIXED_RCL_ORDER 6
#define DRM_VC4_PARAM_SUPPORTS_MADVISE 7
#define DRM_VC4_PARAM_SUPPORTS_PERFMON 8
struct drm_vc4_get_param {
__u32 param;
__u32 pad;
__u64 value;
};
struct drm_vc4_get_tiling {
__u32 handle;
__u32 flags;
__u64 modifier;
};
struct drm_vc4_set_tiling {
__u32 handle;
__u32 flags;
__u64 modifier;
};
/**
* struct drm_vc4_label_bo - Attach a name to a BO for debug purposes.
*/
struct drm_vc4_label_bo {
__u32 handle;
__u32 len;
__u64 name;
};
/*
* States prefixed with '__' are internal states and cannot be passed to the
* DRM_IOCTL_VC4_GEM_MADVISE ioctl.
*/
#define VC4_MADV_WILLNEED 0
#define VC4_MADV_DONTNEED 1
#define __VC4_MADV_PURGED 2
#define __VC4_MADV_NOTSUPP 3
struct drm_vc4_gem_madvise {
__u32 handle;
__u32 madv;
__u32 retained;
__u32 pad;
};
enum {
VC4_PERFCNT_FEP_VALID_PRIMS_NO_RENDER,
VC4_PERFCNT_FEP_VALID_PRIMS_RENDER,
VC4_PERFCNT_FEP_CLIPPED_QUADS,
VC4_PERFCNT_FEP_VALID_QUADS,
VC4_PERFCNT_TLB_QUADS_NOT_PASSING_STENCIL,
VC4_PERFCNT_TLB_QUADS_NOT_PASSING_Z_AND_STENCIL,
VC4_PERFCNT_TLB_QUADS_PASSING_Z_AND_STENCIL,
VC4_PERFCNT_TLB_QUADS_ZERO_COVERAGE,
VC4_PERFCNT_TLB_QUADS_NON_ZERO_COVERAGE,
VC4_PERFCNT_TLB_QUADS_WRITTEN_TO_COLOR_BUF,
VC4_PERFCNT_PLB_PRIMS_OUTSIDE_VIEWPORT,
VC4_PERFCNT_PLB_PRIMS_NEED_CLIPPING,
VC4_PERFCNT_PSE_PRIMS_REVERSED,
VC4_PERFCNT_QPU_TOTAL_IDLE_CYCLES,
VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_VERTEX_COORD_SHADING,
VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_FRAGMENT_SHADING,
VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_EXEC_VALID_INST,
VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_WAITING_TMUS,
VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_WAITING_SCOREBOARD,
VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_WAITING_VARYINGS,
VC4_PERFCNT_QPU_TOTAL_INST_CACHE_HIT,
VC4_PERFCNT_QPU_TOTAL_INST_CACHE_MISS,
VC4_PERFCNT_QPU_TOTAL_UNIFORM_CACHE_HIT,
VC4_PERFCNT_QPU_TOTAL_UNIFORM_CACHE_MISS,
VC4_PERFCNT_TMU_TOTAL_TEXT_QUADS_PROCESSED,
VC4_PERFCNT_TMU_TOTAL_TEXT_CACHE_MISS,
VC4_PERFCNT_VPM_TOTAL_CLK_CYCLES_VDW_STALLED,
VC4_PERFCNT_VPM_TOTAL_CLK_CYCLES_VCD_STALLED,
VC4_PERFCNT_L2C_TOTAL_L2_CACHE_HIT,
VC4_PERFCNT_L2C_TOTAL_L2_CACHE_MISS,
VC4_PERFCNT_NUM_EVENTS,
};
#define DRM_VC4_MAX_PERF_COUNTERS 16
struct drm_vc4_perfmon_create {
__u32 id;
__u32 ncounters;
__u8 events[DRM_VC4_MAX_PERF_COUNTERS];
};
struct drm_vc4_perfmon_destroy {
__u32 id;
};
/*
* Returns the values of the performance counters tracked by this
* perfmon (as an array of ncounters u64 values).
*
* No implicit synchronization is performed, so the user has to
* guarantee that any jobs using this perfmon have already been
* completed (probably by blocking on the seqno returned by the
* last exec that used the perfmon).
*/
struct drm_vc4_perfmon_get_values {
__u32 id;
__u64 values_ptr;
};
#if defined(__cplusplus)
}
#endif
#endif /* _VC4_DRM_H_ */

View File

@@ -0,0 +1,283 @@
/*
* Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
* Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _VIA_DRM_H_
#define _VIA_DRM_H_
#include "drm.h"
#if defined(__cplusplus)
extern "C" {
#endif
/* WARNING: These defines must be the same as what the Xserver uses.
* if you change them, you must change the defines in the Xserver.
*/
#ifndef _VIA_DEFINES_
#define _VIA_DEFINES_
#include "via_drmclient.h"
#define VIA_NR_SAREA_CLIPRECTS 8
#define VIA_NR_XVMC_PORTS 10
#define VIA_NR_XVMC_LOCKS 5
#define VIA_MAX_CACHELINE_SIZE 64
#define XVMCLOCKPTR(saPriv,lockNo) \
((__volatile__ struct drm_hw_lock *)(((((unsigned long) (saPriv)->XvMCLockArea) + \
(VIA_MAX_CACHELINE_SIZE - 1)) & \
~(VIA_MAX_CACHELINE_SIZE - 1)) + \
VIA_MAX_CACHELINE_SIZE*(lockNo)))
/* Each region is a minimum of 64k, and there are at most 64 of them.
*/
#define VIA_NR_TEX_REGIONS 64
#define VIA_LOG_MIN_TEX_REGION_SIZE 16
#endif
#define VIA_UPLOAD_TEX0IMAGE 0x1 /* handled clientside */
#define VIA_UPLOAD_TEX1IMAGE 0x2 /* handled clientside */
#define VIA_UPLOAD_CTX 0x4
#define VIA_UPLOAD_BUFFERS 0x8
#define VIA_UPLOAD_TEX0 0x10
#define VIA_UPLOAD_TEX1 0x20
#define VIA_UPLOAD_CLIPRECTS 0x40
#define VIA_UPLOAD_ALL 0xff
/* VIA specific ioctls */
#define DRM_VIA_ALLOCMEM 0x00
#define DRM_VIA_FREEMEM 0x01
#define DRM_VIA_AGP_INIT 0x02
#define DRM_VIA_FB_INIT 0x03
#define DRM_VIA_MAP_INIT 0x04
#define DRM_VIA_DEC_FUTEX 0x05
#define NOT_USED
#define DRM_VIA_DMA_INIT 0x07
#define DRM_VIA_CMDBUFFER 0x08
#define DRM_VIA_FLUSH 0x09
#define DRM_VIA_PCICMD 0x0a
#define DRM_VIA_CMDBUF_SIZE 0x0b
#define NOT_USED
#define DRM_VIA_WAIT_IRQ 0x0d
#define DRM_VIA_DMA_BLIT 0x0e
#define DRM_VIA_BLIT_SYNC 0x0f
#define DRM_IOCTL_VIA_ALLOCMEM DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_ALLOCMEM, drm_via_mem_t)
#define DRM_IOCTL_VIA_FREEMEM DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_FREEMEM, drm_via_mem_t)
#define DRM_IOCTL_VIA_AGP_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_AGP_INIT, drm_via_agp_t)
#define DRM_IOCTL_VIA_FB_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_FB_INIT, drm_via_fb_t)
#define DRM_IOCTL_VIA_MAP_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_MAP_INIT, drm_via_init_t)
#define DRM_IOCTL_VIA_DEC_FUTEX DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_DEC_FUTEX, drm_via_futex_t)
#define DRM_IOCTL_VIA_DMA_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_DMA_INIT, drm_via_dma_init_t)
#define DRM_IOCTL_VIA_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_CMDBUFFER, drm_via_cmdbuffer_t)
#define DRM_IOCTL_VIA_FLUSH DRM_IO( DRM_COMMAND_BASE + DRM_VIA_FLUSH)
#define DRM_IOCTL_VIA_PCICMD DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_PCICMD, drm_via_cmdbuffer_t)
#define DRM_IOCTL_VIA_CMDBUF_SIZE DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_CMDBUF_SIZE, \
drm_via_cmdbuf_size_t)
#define DRM_IOCTL_VIA_WAIT_IRQ DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_WAIT_IRQ, drm_via_irqwait_t)
#define DRM_IOCTL_VIA_DMA_BLIT DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_DMA_BLIT, drm_via_dmablit_t)
#define DRM_IOCTL_VIA_BLIT_SYNC DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_BLIT_SYNC, drm_via_blitsync_t)
/* Indices into buf.Setup where various bits of state are mirrored per
* context and per buffer. These can be fired at the card as a unit,
* or in a piecewise fashion as required.
*/
#define VIA_TEX_SETUP_SIZE 8
/* Flags for clear ioctl
*/
#define VIA_FRONT 0x1
#define VIA_BACK 0x2
#define VIA_DEPTH 0x4
#define VIA_STENCIL 0x8
#define VIA_MEM_VIDEO 0 /* matches drm constant */
#define VIA_MEM_AGP 1 /* matches drm constant */
#define VIA_MEM_SYSTEM 2
#define VIA_MEM_MIXED 3
#define VIA_MEM_UNKNOWN 4
typedef struct {
__u32 offset;
__u32 size;
} drm_via_agp_t;
typedef struct {
__u32 offset;
__u32 size;
} drm_via_fb_t;
typedef struct {
__u32 context;
__u32 type;
__u32 size;
unsigned long index;
unsigned long offset;
} drm_via_mem_t;
typedef struct _drm_via_init {
enum {
VIA_INIT_MAP = 0x01,
VIA_CLEANUP_MAP = 0x02
} func;
unsigned long sarea_priv_offset;
unsigned long fb_offset;
unsigned long mmio_offset;
unsigned long agpAddr;
} drm_via_init_t;
typedef struct _drm_via_futex {
enum {
VIA_FUTEX_WAIT = 0x00,
VIA_FUTEX_WAKE = 0X01
} func;
__u32 ms;
__u32 lock;
__u32 val;
} drm_via_futex_t;
typedef struct _drm_via_dma_init {
enum {
VIA_INIT_DMA = 0x01,
VIA_CLEANUP_DMA = 0x02,
VIA_DMA_INITIALIZED = 0x03
} func;
unsigned long offset;
unsigned long size;
unsigned long reg_pause_addr;
} drm_via_dma_init_t;
typedef struct _drm_via_cmdbuffer {
char *buf;
unsigned long size;
} drm_via_cmdbuffer_t;
/* Warning: If you change the SAREA structure you must change the Xserver
* structure as well */
typedef struct _drm_via_tex_region {
unsigned char next, prev; /* indices to form a circular LRU */
unsigned char inUse; /* owned by a client, or free? */
int age; /* tracked by clients to update local LRU's */
} drm_via_tex_region_t;
typedef struct _drm_via_sarea {
unsigned int dirty;
unsigned int nbox;
struct drm_clip_rect boxes[VIA_NR_SAREA_CLIPRECTS];
drm_via_tex_region_t texList[VIA_NR_TEX_REGIONS + 1];
int texAge; /* last time texture was uploaded */
int ctxOwner; /* last context to upload state */
int vertexPrim;
/*
* Below is for XvMC.
* We want the lock integers alone on, and aligned to, a cache line.
* Therefore this somewhat strange construct.
*/
char XvMCLockArea[VIA_MAX_CACHELINE_SIZE * (VIA_NR_XVMC_LOCKS + 1)];
unsigned int XvMCDisplaying[VIA_NR_XVMC_PORTS];
unsigned int XvMCSubPicOn[VIA_NR_XVMC_PORTS];
unsigned int XvMCCtxNoGrabbed; /* Last context to hold decoder */
/* Used by the 3d driver only at this point, for pageflipping:
*/
unsigned int pfCurrentOffset;
} drm_via_sarea_t;
typedef struct _drm_via_cmdbuf_size {
enum {
VIA_CMDBUF_SPACE = 0x01,
VIA_CMDBUF_LAG = 0x02
} func;
int wait;
__u32 size;
} drm_via_cmdbuf_size_t;
typedef enum {
VIA_IRQ_ABSOLUTE = 0x0,
VIA_IRQ_RELATIVE = 0x1,
VIA_IRQ_SIGNAL = 0x10000000,
VIA_IRQ_FORCE_SEQUENCE = 0x20000000
} via_irq_seq_type_t;
#define VIA_IRQ_FLAGS_MASK 0xF0000000
enum drm_via_irqs {
drm_via_irq_hqv0 = 0,
drm_via_irq_hqv1,
drm_via_irq_dma0_dd,
drm_via_irq_dma0_td,
drm_via_irq_dma1_dd,
drm_via_irq_dma1_td,
drm_via_irq_num
};
struct drm_via_wait_irq_request {
unsigned irq;
via_irq_seq_type_t type;
__u32 sequence;
__u32 signal;
};
typedef union drm_via_irqwait {
struct drm_via_wait_irq_request request;
struct drm_wait_vblank_reply reply;
} drm_via_irqwait_t;
typedef struct drm_via_blitsync {
__u32 sync_handle;
unsigned engine;
} drm_via_blitsync_t;
/* - * Below,"flags" is currently unused but will be used for possible future
* extensions like kernel space bounce buffers for bad alignments and
* blit engine busy-wait polling for better latency in the absence of
* interrupts.
*/
typedef struct drm_via_dmablit {
__u32 num_lines;
__u32 line_length;
__u32 fb_addr;
__u32 fb_stride;
unsigned char *mem_addr;
__u32 mem_stride;
__u32 flags;
int to_fb;
drm_via_blitsync_t sync;
} drm_via_dmablit_t;
#if defined(__cplusplus)
}
#endif
#endif /* _VIA_DRM_H_ */

View File

@@ -0,0 +1,182 @@
/*
* Copyright 2013 Red Hat
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef VIRTGPU_DRM_H
#define VIRTGPU_DRM_H
#include "drm.h"
#if defined(__cplusplus)
extern "C" {
#endif
/* Please note that modifications to all structs defined here are
* subject to backwards-compatibility constraints.
*
* Do not use pointers, use __u64 instead for 32 bit / 64 bit user/kernel
* compatibility Keep fields aligned to their size
*/
#define DRM_VIRTGPU_MAP 0x01
#define DRM_VIRTGPU_EXECBUFFER 0x02
#define DRM_VIRTGPU_GETPARAM 0x03
#define DRM_VIRTGPU_RESOURCE_CREATE 0x04
#define DRM_VIRTGPU_RESOURCE_INFO 0x05
#define DRM_VIRTGPU_TRANSFER_FROM_HOST 0x06
#define DRM_VIRTGPU_TRANSFER_TO_HOST 0x07
#define DRM_VIRTGPU_WAIT 0x08
#define DRM_VIRTGPU_GET_CAPS 0x09
#define VIRTGPU_EXECBUF_FENCE_FD_IN 0x01
#define VIRTGPU_EXECBUF_FENCE_FD_OUT 0x02
#define VIRTGPU_EXECBUF_FLAGS (\
VIRTGPU_EXECBUF_FENCE_FD_IN |\
VIRTGPU_EXECBUF_FENCE_FD_OUT |\
0)
struct drm_virtgpu_map {
__u64 offset; /* use for mmap system call */
__u32 handle;
__u32 pad;
};
struct drm_virtgpu_execbuffer {
__u32 flags;
__u32 size;
__u64 command; /* void* */
__u64 bo_handles;
__u32 num_bo_handles;
__s32 fence_fd; /* in/out fence fd (see VIRTGPU_EXECBUF_FENCE_FD_IN/OUT) */
};
#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
#define VIRTGPU_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */
struct drm_virtgpu_getparam {
__u64 param;
__u64 value;
};
/* NO_BO flags? NO resource flag? */
/* resource flag for y_0_top */
struct drm_virtgpu_resource_create {
__u32 target;
__u32 format;
__u32 bind;
__u32 width;
__u32 height;
__u32 depth;
__u32 array_size;
__u32 last_level;
__u32 nr_samples;
__u32 flags;
__u32 bo_handle; /* if this is set - recreate a new resource attached to this bo ? */
__u32 res_handle; /* returned by kernel */
__u32 size; /* validate transfer in the host */
__u32 stride; /* validate transfer in the host */
};
struct drm_virtgpu_resource_info {
__u32 bo_handle;
__u32 res_handle;
__u32 size;
__u32 stride;
};
struct drm_virtgpu_3d_box {
__u32 x;
__u32 y;
__u32 z;
__u32 w;
__u32 h;
__u32 d;
};
struct drm_virtgpu_3d_transfer_to_host {
__u32 bo_handle;
struct drm_virtgpu_3d_box box;
__u32 level;
__u32 offset;
};
struct drm_virtgpu_3d_transfer_from_host {
__u32 bo_handle;
struct drm_virtgpu_3d_box box;
__u32 level;
__u32 offset;
};
#define VIRTGPU_WAIT_NOWAIT 1 /* like it */
struct drm_virtgpu_3d_wait {
__u32 handle; /* 0 is an invalid handle */
__u32 flags;
};
struct drm_virtgpu_get_caps {
__u32 cap_set_id;
__u32 cap_set_ver;
__u64 addr;
__u32 size;
__u32 pad;
};
#define DRM_IOCTL_VIRTGPU_MAP \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map)
#define DRM_IOCTL_VIRTGPU_EXECBUFFER \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_EXECBUFFER,\
struct drm_virtgpu_execbuffer)
#define DRM_IOCTL_VIRTGPU_GETPARAM \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GETPARAM,\
struct drm_virtgpu_getparam)
#define DRM_IOCTL_VIRTGPU_RESOURCE_CREATE \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_CREATE, \
struct drm_virtgpu_resource_create)
#define DRM_IOCTL_VIRTGPU_RESOURCE_INFO \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_INFO, \
struct drm_virtgpu_resource_info)
#define DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_TRANSFER_FROM_HOST, \
struct drm_virtgpu_3d_transfer_from_host)
#define DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_TRANSFER_TO_HOST, \
struct drm_virtgpu_3d_transfer_to_host)
#define DRM_IOCTL_VIRTGPU_WAIT \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_WAIT, \
struct drm_virtgpu_3d_wait)
#define DRM_IOCTL_VIRTGPU_GET_CAPS \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GET_CAPS, \
struct drm_virtgpu_get_caps)
#if defined(__cplusplus)
}
#endif
#endif

File diff suppressed because it is too large Load Diff