diff --git a/Android.bp b/Android.bp new file mode 100644 index 0000000..4aa6a22 --- /dev/null +++ b/Android.bp @@ -0,0 +1,188 @@ +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +cc_defaults { + name: "minigbm_defaults_celadon", + + srcs: [ + "amdgpu.c", + "drv.c", + "evdi.c", + "exynos.c", + "helpers_array.c", + "helpers.c", + "i915.c", + "marvell.c", + "mediatek.c", + "meson.c", + "msm.c", + "nouveau.c", + "radeon.c", + "rockchip.c", + "tegra.c", + "udl.c", + "vc4.c", + "vgem.c", + "virtio_gpu.c", + "i915_private.c", + ], + + cflags: [ + "-D_GNU_SOURCE=1", + "-D_FILE_OFFSET_BITS=64", + "-Wall", + "-Wsign-compare", + "-Wpointer-arith", + "-Wcast-qual", + "-Wcast-align", + "-Wno-unused-parameter", + "-Wno-switch", + "-Wno-format", + "-Wno-unused-variable", + "-DDRV_I915", + "-DUSE_GRALLOC1", + "-Wno-cast-qual", + ], + cppflags: ["-std=c++14"], + + vendor: true, + + header_libs: [ + "libhardware_headers", + "libnativebase_headers", + "libnativewindow_headers", + "libsystem_headers", + ], + + export_header_lib_headers: [ + "libhardware_headers", + "libnativebase_headers", + "libnativewindow_headers", + "libsystem_headers", + ], + + shared_libs: [ + "libcutils", + "libdrm", + "libnativewindow", + "libsync", + "liblog", + ], + + static_libs: ["libarect"], + + export_static_lib_headers: ["libarect"], +} + +cc_defaults { + name: "minigbm_cros_gralloc_defaults_celadon", + + defaults: ["minigbm_defaults_celadon"], + + local_include_dirs: [ + "cros_gralloc", + ], + + cflags: [ + "-DDRV_I915", + "-DUSE_GRALLOC1", + ], + + srcs: [ + "cros_gralloc/cros_gralloc_buffer.cc", + "cros_gralloc/cros_gralloc_helpers.cc", + "cros_gralloc/cros_gralloc_driver.cc", + "cros_gralloc/i915_private_android.cc", + ] +} + +cc_library_static { + name: "libminigbm_celadon", + defaults: ["minigbm_defaults_celadon"], + shared_libs: ["liblog"], + static_libs: ["libdrm"], + + srcs: [ + "gbm.c", + "gbm_helpers.c", + ], + + export_include_dirs: ["."], +} + +cc_library_static { + name: "libminigbm_cros_gralloc_celadon", + defaults: ["minigbm_cros_gralloc_defaults_celadon"], + shared_libs: ["liblog"], + static_libs: ["libdrm"], + + export_include_dirs: ["."], +} + +cc_library_shared { + name: "gralloc.minigbm_celadon", + defaults: ["minigbm_cros_gralloc_defaults_celadon"], + enabled: false, + srcs: ["cros_gralloc/gralloc0/gralloc0.cc"], +} + +cc_library_shared { + name: "gralloc.minigbm_intel_celadon", + defaults: ["minigbm_cros_gralloc_defaults_celadon"], + enabled: false, + arch: { + x86: { + enabled: true, + }, + x86_64: { + enabled: true, + }, + }, + cflags: ["-DDRV_I915"], + srcs: ["cros_gralloc/gralloc0/gralloc0.cc"], +} + +cc_library_shared { + name: "gralloc.intel", + defaults: ["minigbm_cros_gralloc_defaults_celadon"], + arch: { + x86: { + enabled: true, + }, + x86_64: { + enabled: true, + }, + }, + cflags: [ + "-DDRV_I915", + "-DUSE_GRALLOC1", + ], + relative_install_path: "hw", + srcs: ["cros_gralloc/gralloc1/cros_gralloc1_module.cc"], +} + +cc_library_shared { + name: "gralloc.celadon", + defaults: ["minigbm_cros_gralloc_defaults_celadon"], + arch: { + x86: { + enabled: true, + }, + x86_64: { + enabled: true, + }, + }, + cflags: [ + "-DDRV_I915", + "-DUSE_GRALLOC1", + ], + relative_install_path: "hw", + srcs: ["cros_gralloc/gralloc1/cros_gralloc1_module.cc"], +} + +cc_library_shared { + name: "gralloc.minigbm_meson_celadon", + defaults: ["minigbm_cros_gralloc_defaults_celadon"], + cflags: ["-DDRV_MESON"], + srcs: ["cros_gralloc/gralloc0/gralloc0.cc"], +} diff --git a/Android.mk b/Android.mk deleted file mode 100644 index c4798ad..0000000 --- a/Android.mk +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright 2017 The Chromium OS Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -ifeq ($(strip $(BOARD_USES_MINIGBM)), true) - -MINIGBM_GRALLOC_MK := $(call my-dir)/Android.gralloc.mk -LOCAL_PATH := $(call my-dir) -intel_drivers := i915 i965 -include $(CLEAR_VARS) - -SUBDIRS := cros_gralloc - -LOCAL_SHARED_LIBRARIES := \ - libcutils \ - libdrm - -LOCAL_SRC_FILES := \ - amdgpu.c \ - cirrus.c \ - drv.c \ - evdi.c \ - exynos.c \ - gma500.c \ - helpers.c \ - i915.c \ - marvell.c \ - mediatek.c \ - nouveau.c \ - rockchip.c \ - tegra.c \ - udl.c \ - vc4.c \ - vgem.c \ - virtio_gpu.c - -include $(MINIGBM_GRALLOC_MK) - -LOCAL_CPPFLAGS += -std=c++14 -D_GNU_SOURCE=1 -D_FILE_OFFSET_BITS=64 -LOCAL_CFLAGS += -Wall -Wsign-compare -Wpointer-arith \ - -Wcast-qual -Wcast-align \ - -D_GNU_SOURCE=1 -D_FILE_OFFSET_BITS=64 - -ifneq ($(filter $(intel_drivers), $(BOARD_GPU_DRIVERS)),) -LOCAL_CPPFLAGS += -DDRV_I915 -LOCAL_CFLAGS += -DDRV_I915 -LOCAL_SHARED_LIBRARIES += libdrm_intel -endif - -LOCAL_MODULE := gralloc.$(TARGET_BOARD_PLATFORM) -LOCAL_MODULE_TAGS := optional -# The preferred path for vendor HALs is /vendor/lib/hw -LOCAL_PROPRIETARY_MODULE := true -LOCAL_MODULE_RELATIVE_PATH := hw -LOCAL_MODULE_CLASS := SHARED_LIBRARIES -LOCAL_MODULE_SUFFIX := $(TARGET_SHLIB_SUFFIX) -include $(BUILD_SHARED_LIBRARY) - -#endif diff --git a/METADATA b/METADATA new file mode 100644 index 0000000..b281ea4 --- /dev/null +++ b/METADATA @@ -0,0 +1,17 @@ +name: "minigbm" +description: + "" + +third_party { + url { + type: HOMEPAGE + value: "https://www.chromium.org/" + } + url { + type: GIT + value: "https://chromium.googlesource.com/chromiumos/platform/minigbm/" + } + version: "" + last_upgrade_date { year: 2018 month: 6 day: 25 } + license_type: NOTICE +} diff --git a/MODULE_LICENSE_BSD b/MODULE_LICENSE_BSD new file mode 100644 index 0000000..e69de29 diff --git a/Makefile b/Makefile index 482e074..35f92f2 100644 --- a/Makefile +++ b/Makefile @@ -14,7 +14,7 @@ CFLAGS += -std=c99 -Wall -Wsign-compare -Wpointer-arith -Wcast-qual \ ifdef DRV_AMDGPU CFLAGS += $(shell $(PKG_CONFIG) --cflags libdrm_amdgpu) - LDLIBS += -lamdgpuaddr + LDLIBS += -ldrm_amdgpu -ldl endif ifdef DRV_EXYNOS CFLAGS += $(shell $(PKG_CONFIG) --cflags libdrm_exynos) @@ -22,12 +22,18 @@ endif ifdef DRV_I915 CFLAGS += $(shell $(PKG_CONFIG) --cflags libdrm_intel) endif +ifdef DRV_MESON + CFLAGS += $(shell $(PKG_CONFIG) --cflags libdrm_meson) +endif ifdef DRV_RADEON CFLAGS += $(shell $(PKG_CONFIG) --cflags libdrm_radeon) endif ifdef DRV_ROCKCHIP CFLAGS += $(shell $(PKG_CONFIG) --cflags libdrm_rockchip) endif +ifdef DRV_VC4 + CFLAGS += $(shell $(PKG_CONFIG) --cflags libdrm_vc4) +endif CPPFLAGS += $(PC_CFLAGS) LDLIBS += $(PC_LIBS) diff --git a/OWNERS b/OWNERS new file mode 100644 index 0000000..d9d5bf3 --- /dev/null +++ b/OWNERS @@ -0,0 +1,11 @@ +dbehr@chromium.org +dcastagna@chromium.org +ddavenport@chromium.org +gurchetansingh@chromium.org +hoegsberg@chromium.org +ihf@chromium.org +lepton@chromium.org +marcheu@chromium.org +stevensd@chromium.org +tfiga@chromium.org +tutankhamen@chromium.org diff --git a/OWNERS.android b/OWNERS.android new file mode 100644 index 0000000..be55e00 --- /dev/null +++ b/OWNERS.android @@ -0,0 +1,2 @@ +adelva@google.com +natsu@google.com diff --git a/amdgpu.c b/amdgpu.c index 1a1f9fc..795d137 100644 --- a/amdgpu.c +++ b/amdgpu.c @@ -13,411 +13,314 @@ #include #include -#include "addrinterface.h" +#include "dri.h" #include "drv_priv.h" #include "helpers.h" #include "util.h" -#ifndef CIASICIDGFXENGINE_SOUTHERNISLAND -#define CIASICIDGFXENGINE_SOUTHERNISLAND 0x0000000A -#endif - // clang-format off -#define mmCC_RB_BACKEND_DISABLE 0x263d -#define mmGB_TILE_MODE0 0x2644 -#define mmGB_MACROTILE_MODE0 0x2664 -#define mmGB_ADDR_CONFIG 0x263e -#define mmMC_ARB_RAMCFG 0x9d8 - -enum { - FAMILY_UNKNOWN, - FAMILY_SI, - FAMILY_CI, - FAMILY_KV, - FAMILY_VI, - FAMILY_CZ, - FAMILY_PI, - FAMILY_LAST, -}; +#define DRI_PATH STRINGIZE(DRI_DRIVER_DIR/radeonsi_dri.so) // clang-format on -const static uint32_t render_target_formats[] = { DRM_FORMAT_ARGB8888, DRM_FORMAT_RGB565, - DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB8888 }; +#define TILE_TYPE_LINEAR 0 +/* DRI backend decides tiling in this case. */ +#define TILE_TYPE_DRI 1 -const static uint32_t texture_source_formats[] = { DRM_FORMAT_GR88, DRM_FORMAT_R8, DRM_FORMAT_NV21, - DRM_FORMAT_NV12 }; +struct amdgpu_priv { + struct dri_driver dri; + int drm_version; +}; -static int amdgpu_set_metadata(int fd, uint32_t handle, struct amdgpu_bo_metadata *info) -{ - struct drm_amdgpu_gem_metadata args = { 0 }; +const static uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888, + DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888, + DRM_FORMAT_XRGB8888 }; - if (!info) - return -EINVAL; +const static uint32_t texture_source_formats[] = { DRM_FORMAT_GR88, DRM_FORMAT_R8, + DRM_FORMAT_NV21, DRM_FORMAT_NV12, + DRM_FORMAT_YVU420_ANDROID, DRM_FORMAT_YVU420 }; - args.handle = handle; - args.op = AMDGPU_GEM_METADATA_OP_SET_METADATA; - args.data.flags = info->flags; - args.data.tiling_info = info->tiling_info; +static int amdgpu_init(struct driver *drv) +{ + struct amdgpu_priv *priv; + drmVersionPtr drm_version; + struct format_metadata metadata; + uint64_t use_flags = BO_USE_RENDER_MASK; - if (info->size_metadata > sizeof(args.data.data)) - return -EINVAL; + priv = calloc(1, sizeof(struct amdgpu_priv)); + if (!priv) + return -ENOMEM; - if (info->size_metadata) { - args.data.data_size_bytes = info->size_metadata; - memcpy(args.data.data, info->umd_metadata, info->size_metadata); + drm_version = drmGetVersion(drv_get_fd(drv)); + if (!drm_version) { + free(priv); + return -ENODEV; } - return drmCommandWriteRead(fd, DRM_AMDGPU_GEM_METADATA, &args, sizeof(args)); -} + priv->drm_version = drm_version->version_minor; + drmFreeVersion(drm_version); -static int amdgpu_read_mm_regs(int fd, unsigned dword_offset, unsigned count, uint32_t instance, - uint32_t flags, uint32_t *values) -{ - struct drm_amdgpu_info request; - - memset(&request, 0, sizeof(request)); - request.return_pointer = (uintptr_t)values; - request.return_size = count * sizeof(uint32_t); - request.query = AMDGPU_INFO_READ_MMR_REG; - request.read_mmr_reg.dword_offset = dword_offset; - request.read_mmr_reg.count = count; - request.read_mmr_reg.instance = instance; - request.read_mmr_reg.flags = flags; - - return drmCommandWrite(fd, DRM_AMDGPU_INFO, &request, sizeof(struct drm_amdgpu_info)); -} + drv->priv = priv; -static int amdgpu_query_gpu(int fd, struct amdgpu_gpu_info *gpu_info) -{ - int ret; - uint32_t instance; + if (dri_init(drv, DRI_PATH, "radeonsi")) { + free(priv); + drv->priv = NULL; + return -ENODEV; + } - if (!gpu_info) - return -EINVAL; + metadata.tiling = TILE_TYPE_LINEAR; + metadata.priority = 1; + metadata.modifier = DRM_FORMAT_MOD_LINEAR; - instance = AMDGPU_INFO_MMR_SH_INDEX_MASK << AMDGPU_INFO_MMR_SH_INDEX_SHIFT; + drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats), + &metadata, use_flags); - ret = amdgpu_read_mm_regs(fd, mmCC_RB_BACKEND_DISABLE, 1, instance, 0, - &gpu_info->backend_disable[0]); - if (ret) - return ret; - /* extract bitfield CC_RB_BACKEND_DISABLE.BACKEND_DISABLE */ - gpu_info->backend_disable[0] = (gpu_info->backend_disable[0] >> 16) & 0xff; + drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats), + &metadata, BO_USE_TEXTURE_MASK); - ret = amdgpu_read_mm_regs(fd, mmGB_TILE_MODE0, 32, 0xffffffff, 0, gpu_info->gb_tile_mode); - if (ret) - return ret; + /* + * Chrome uses DMA-buf mmap to write to YV12 buffers, which are then accessed by the + * Video Encoder Accelerator (VEA). It could also support NV12 potentially in the future. + */ + drv_modify_combination(drv, DRM_FORMAT_YVU420, &metadata, BO_USE_HW_VIDEO_ENCODER); + drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata, BO_USE_HW_VIDEO_ENCODER); - ret = amdgpu_read_mm_regs(fd, mmGB_MACROTILE_MODE0, 16, 0xffffffff, 0, - gpu_info->gb_macro_tile_mode); - if (ret) - return ret; + /* Android CTS tests require this. */ + drv_add_combination(drv, DRM_FORMAT_BGR888, &metadata, BO_USE_SW_MASK); - ret = amdgpu_read_mm_regs(fd, mmGB_ADDR_CONFIG, 1, 0xffffffff, 0, &gpu_info->gb_addr_cfg); - if (ret) - return ret; + /* Linear formats supported by display. */ + drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT); + drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT); + drv_modify_combination(drv, DRM_FORMAT_ABGR8888, &metadata, BO_USE_SCANOUT); + drv_modify_combination(drv, DRM_FORMAT_XBGR8888, &metadata, BO_USE_SCANOUT); - ret = amdgpu_read_mm_regs(fd, mmMC_ARB_RAMCFG, 1, 0xffffffff, 0, &gpu_info->mc_arb_ramcfg); - if (ret) - return ret; + /* YUV formats for camera and display. */ + drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata, + BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_SCANOUT | + BO_USE_HW_VIDEO_DECODER); + + drv_modify_combination(drv, DRM_FORMAT_NV21, &metadata, BO_USE_SCANOUT); + + /* + * R8 format is used for Android's HAL_PIXEL_FORMAT_BLOB and is used for JPEG snapshots + * from camera. + */ + drv_modify_combination(drv, DRM_FORMAT_R8, &metadata, + BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE); + + /* + * The following formats will be allocated by the DRI backend and may be potentially tiled. + * Since format modifier support hasn't been implemented fully yet, it's not + * possible to enumerate the different types of buffers (like i915 can). + */ + use_flags &= ~BO_USE_RENDERSCRIPT; + use_flags &= ~BO_USE_SW_WRITE_OFTEN; + use_flags &= ~BO_USE_SW_READ_OFTEN; + use_flags &= ~BO_USE_LINEAR; + metadata.tiling = TILE_TYPE_DRI; + metadata.priority = 2; + + drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats), + &metadata, use_flags); + + /* Potentially tiled formats supported by display. */ + drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT); + drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT); + drv_modify_combination(drv, DRM_FORMAT_ABGR8888, &metadata, BO_USE_SCANOUT); + drv_modify_combination(drv, DRM_FORMAT_XBGR8888, &metadata, BO_USE_SCANOUT); return 0; } -static void *ADDR_API alloc_sys_mem(const ADDR_ALLOCSYSMEM_INPUT *in) +static void amdgpu_close(struct driver *drv) { - return malloc(in->sizeInBytes); + dri_close(drv); + free(drv->priv); + drv->priv = NULL; } -static ADDR_E_RETURNCODE ADDR_API free_sys_mem(const ADDR_FREESYSMEM_INPUT *in) +static int amdgpu_create_bo_linear(struct bo *bo, uint32_t width, uint32_t height, uint32_t format, + uint64_t use_flags) { - free(in->pVirtAddr); - return ADDR_OK; -} + int ret; + uint32_t plane, stride; + union drm_amdgpu_gem_create gem_create; -static int amdgpu_addrlib_compute(void *addrlib, uint32_t width, uint32_t height, uint32_t format, - uint64_t use_flags, uint32_t *tiling_flags, - ADDR_COMPUTE_SURFACE_INFO_OUTPUT *addr_out) -{ - ADDR_COMPUTE_SURFACE_INFO_INPUT addr_surf_info_in = { 0 }; - ADDR_TILEINFO addr_tile_info = { 0 }; - ADDR_TILEINFO addr_tile_info_out = { 0 }; - uint32_t bits_per_pixel; - - addr_surf_info_in.size = sizeof(ADDR_COMPUTE_SURFACE_INFO_INPUT); - - /* Set the requested tiling mode. */ - addr_surf_info_in.tileMode = ADDR_TM_2D_TILED_THIN1; - if (use_flags & - (BO_USE_CURSOR | BO_USE_LINEAR | BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN)) - addr_surf_info_in.tileMode = ADDR_TM_LINEAR_ALIGNED; - else if (width <= 16 || height <= 16) - addr_surf_info_in.tileMode = ADDR_TM_1D_TILED_THIN1; - - bits_per_pixel = drv_stride_from_format(format, 1, 0) * 8; - /* Bits per pixel should be calculated from format*/ - addr_surf_info_in.bpp = bits_per_pixel; - addr_surf_info_in.numSamples = 1; - addr_surf_info_in.width = width; - addr_surf_info_in.height = height; - addr_surf_info_in.numSlices = 1; - addr_surf_info_in.pTileInfo = &addr_tile_info; - addr_surf_info_in.tileIndex = -1; - - /* This disables incorrect calculations (hacks) in addrlib. */ - addr_surf_info_in.flags.noStencil = 1; - - /* Set the micro tile type. */ - if (use_flags & BO_USE_SCANOUT) - addr_surf_info_in.tileType = ADDR_DISPLAYABLE; - else - addr_surf_info_in.tileType = ADDR_NON_DISPLAYABLE; + stride = drv_stride_from_format(format, width, 0); + stride = ALIGN(stride, 256); - addr_out->size = sizeof(ADDR_COMPUTE_SURFACE_INFO_OUTPUT); - addr_out->pTileInfo = &addr_tile_info_out; + drv_bo_from_format(bo, stride, height, format); - if (AddrComputeSurfaceInfo(addrlib, &addr_surf_info_in, addr_out) != ADDR_OK) - return -EINVAL; + memset(&gem_create, 0, sizeof(gem_create)); + gem_create.in.bo_size = bo->meta.total_size; + gem_create.in.alignment = 256; + gem_create.in.domain_flags = 0; - ADDR_CONVERT_TILEINFOTOHW_INPUT s_in = { 0 }; - ADDR_CONVERT_TILEINFOTOHW_OUTPUT s_out = { 0 }; - ADDR_TILEINFO s_tile_hw_info_out = { 0 }; + if (use_flags & (BO_USE_LINEAR | BO_USE_SW_MASK)) + gem_create.in.domain_flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; - s_in.size = sizeof(ADDR_CONVERT_TILEINFOTOHW_INPUT); - /* Convert from real value to HW value */ - s_in.reverse = 0; - s_in.pTileInfo = &addr_tile_info_out; - s_in.tileIndex = -1; + gem_create.in.domains = AMDGPU_GEM_DOMAIN_GTT; + if (!(use_flags & (BO_USE_SW_READ_OFTEN | BO_USE_SCANOUT))) + gem_create.in.domain_flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC; - s_out.size = sizeof(ADDR_CONVERT_TILEINFOTOHW_OUTPUT); - s_out.pTileInfo = &s_tile_hw_info_out; + /* Allocate the buffer with the preferred heap. */ + ret = drmCommandWriteRead(drv_get_fd(bo->drv), DRM_AMDGPU_GEM_CREATE, &gem_create, + sizeof(gem_create)); + if (ret < 0) + return ret; - if (AddrConvertTileInfoToHW(addrlib, &s_in, &s_out) != ADDR_OK) - return -EINVAL; + for (plane = 0; plane < bo->meta.num_planes; plane++) + bo->handles[plane].u32 = gem_create.out.handle; - if (addr_out->tileMode >= ADDR_TM_2D_TILED_THIN1) - /* 2D_TILED_THIN1 */ - *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 4); - else if (addr_out->tileMode >= ADDR_TM_1D_TILED_THIN1) - /* 1D_TILED_THIN1 */ - *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 2); - else - /* LINEAR_ALIGNED */ - *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 1); - - *tiling_flags |= AMDGPU_TILING_SET(BANK_WIDTH, drv_log_base2(addr_tile_info_out.bankWidth)); - *tiling_flags |= - AMDGPU_TILING_SET(BANK_HEIGHT, drv_log_base2(addr_tile_info_out.bankHeight)); - *tiling_flags |= AMDGPU_TILING_SET(TILE_SPLIT, s_tile_hw_info_out.tileSplitBytes); - *tiling_flags |= AMDGPU_TILING_SET(MACRO_TILE_ASPECT, - drv_log_base2(addr_tile_info_out.macroAspectRatio)); - *tiling_flags |= AMDGPU_TILING_SET(PIPE_CONFIG, s_tile_hw_info_out.pipeConfig); - *tiling_flags |= AMDGPU_TILING_SET(NUM_BANKS, s_tile_hw_info_out.banks); + bo->meta.format_modifiers[0] = DRM_FORMAT_MOD_LINEAR; return 0; } -static void *amdgpu_addrlib_init(int fd) +static int amdgpu_create_bo(struct bo *bo, uint32_t width, uint32_t height, uint32_t format, + uint64_t use_flags) { - int ret; - ADDR_CREATE_INPUT addr_create_input = { 0 }; - ADDR_CREATE_OUTPUT addr_create_output = { 0 }; - ADDR_REGISTER_VALUE reg_value = { 0 }; - ADDR_CREATE_FLAGS create_flags = { { 0 } }; - ADDR_E_RETURNCODE addr_ret; + struct combination *combo; - addr_create_input.size = sizeof(ADDR_CREATE_INPUT); - addr_create_output.size = sizeof(ADDR_CREATE_OUTPUT); - - struct amdgpu_gpu_info gpu_info = { 0 }; + combo = drv_get_combination(bo->drv, format, use_flags); + if (!combo) + return -EINVAL; - ret = amdgpu_query_gpu(fd, &gpu_info); + if (combo->metadata.tiling == TILE_TYPE_DRI) { + bool needs_alignment = false; +#ifdef __ANDROID__ + /* + * Currently, the gralloc API doesn't differentiate between allocation time and map + * time strides. A workaround for amdgpu DRI buffers is to always to align to 256 at + * allocation time. + * + * See b/115946221,b/117942643 + */ + if (use_flags & (BO_USE_SW_MASK)) + needs_alignment = true; +#endif + // See b/122049612 + if (use_flags & (BO_USE_SCANOUT)) + needs_alignment = true; - if (ret) { - fprintf(stderr, "[%s]failed with error =%d\n", __func__, ret); - return NULL; - } + if (needs_alignment) { + uint32_t bytes_per_pixel = drv_bytes_per_pixel_from_format(format, 0); + width = ALIGN(width, 256 / bytes_per_pixel); + } - reg_value.noOfBanks = gpu_info.mc_arb_ramcfg & 0x3; - reg_value.gbAddrConfig = gpu_info.gb_addr_cfg; - reg_value.noOfRanks = (gpu_info.mc_arb_ramcfg & 0x4) >> 2; - - reg_value.backendDisables = gpu_info.backend_disable[0]; - reg_value.pTileConfig = gpu_info.gb_tile_mode; - reg_value.noOfEntries = sizeof(gpu_info.gb_tile_mode) / sizeof(gpu_info.gb_tile_mode[0]); - reg_value.pMacroTileConfig = gpu_info.gb_macro_tile_mode; - reg_value.noOfMacroEntries = - sizeof(gpu_info.gb_macro_tile_mode) / sizeof(gpu_info.gb_macro_tile_mode[0]); - create_flags.value = 0; - create_flags.useTileIndex = 1; - - addr_create_input.chipEngine = CIASICIDGFXENGINE_SOUTHERNISLAND; - - addr_create_input.chipFamily = FAMILY_CZ; - addr_create_input.createFlags = create_flags; - addr_create_input.callbacks.allocSysMem = alloc_sys_mem; - addr_create_input.callbacks.freeSysMem = free_sys_mem; - addr_create_input.callbacks.debugPrint = 0; - addr_create_input.regValue = reg_value; - - addr_ret = AddrCreate(&addr_create_input, &addr_create_output); - - if (addr_ret != ADDR_OK) { - fprintf(stderr, "[%s]failed error =%d\n", __func__, addr_ret); - return NULL; + return dri_bo_create(bo, width, height, format, use_flags); } - return addr_create_output.hLib; + return amdgpu_create_bo_linear(bo, width, height, format, use_flags); } -static int amdgpu_init(struct driver *drv) +static int amdgpu_create_bo_with_modifiers(struct bo *bo, uint32_t width, uint32_t height, + uint32_t format, const uint64_t *modifiers, + uint32_t count) { - int ret; - void *addrlib; - struct format_metadata metadata; - uint64_t use_flags = BO_USE_RENDER_MASK; - - addrlib = amdgpu_addrlib_init(drv_get_fd(drv)); - if (!addrlib) - return -1; + bool only_use_linear = true; - drv->priv = addrlib; - - ret = drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats), - &LINEAR_METADATA, BO_USE_TEXTURE_MASK); - if (ret) - return ret; + for (uint32_t i = 0; i < count; ++i) + if (modifiers[i] != DRM_FORMAT_MOD_LINEAR) + only_use_linear = false; - drv_modify_combination(drv, DRM_FORMAT_NV21, &LINEAR_METADATA, BO_USE_SCANOUT); - drv_modify_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA, BO_USE_SCANOUT); + if (only_use_linear) + return amdgpu_create_bo_linear(bo, width, height, format, BO_USE_SCANOUT); - metadata.tiling = ADDR_DISPLAYABLE << 16 | ADDR_TM_LINEAR_ALIGNED; - metadata.priority = 2; - metadata.modifier = DRM_FORMAT_MOD_LINEAR; - - ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats), - &metadata, use_flags); - if (ret) - return ret; - - drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT); - drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT); - drv_modify_combination(drv, DRM_FORMAT_XBGR8888, &metadata, BO_USE_SCANOUT); - - metadata.tiling = ADDR_NON_DISPLAYABLE << 16 | ADDR_TM_LINEAR_ALIGNED; - metadata.priority = 3; - metadata.modifier = DRM_FORMAT_MOD_LINEAR; - - ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats), - &metadata, use_flags); - if (ret) - return ret; - - use_flags &= ~BO_USE_SW_WRITE_OFTEN; - use_flags &= ~BO_USE_SW_READ_OFTEN; - use_flags &= ~BO_USE_LINEAR; - - metadata.tiling = ADDR_DISPLAYABLE << 16 | ADDR_TM_2D_TILED_THIN1; - metadata.priority = 4; - - ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats), - &metadata, use_flags); - if (ret) - return ret; - - drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_SCANOUT); - drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_SCANOUT); - drv_modify_combination(drv, DRM_FORMAT_XBGR8888, &metadata, BO_USE_SCANOUT); + return dri_bo_create_with_modifiers(bo, width, height, format, modifiers, count); +} - metadata.tiling = ADDR_NON_DISPLAYABLE << 16 | ADDR_TM_2D_TILED_THIN1; - metadata.priority = 5; +static int amdgpu_import_bo(struct bo *bo, struct drv_import_fd_data *data) +{ + bool dri_tiling = data->format_modifiers[0] != DRM_FORMAT_MOD_LINEAR; + if (data->format_modifiers[0] == DRM_FORMAT_MOD_INVALID) { + struct combination *combo; + combo = drv_get_combination(bo->drv, data->format, data->use_flags); + if (!combo) + return -EINVAL; - ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats), - &metadata, use_flags); - if (ret) - return ret; + dri_tiling = combo->metadata.tiling == TILE_TYPE_DRI; + } - return ret; + if (dri_tiling) + return dri_bo_import(bo, data); + else + return drv_prime_bo_import(bo, data); } -static void amdgpu_close(struct driver *drv) +static int amdgpu_destroy_bo(struct bo *bo) { - AddrDestroy(drv->priv); - drv->priv = NULL; + if (bo->priv) + return dri_bo_destroy(bo); + else + return drv_gem_bo_destroy(bo); } -static int amdgpu_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format, - uint64_t use_flags) +static void *amdgpu_map_bo(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags) { - void *addrlib = bo->drv->priv; - union drm_amdgpu_gem_create gem_create; - struct amdgpu_bo_metadata metadata = { 0 }; - ADDR_COMPUTE_SURFACE_INFO_OUTPUT addr_out = { 0 }; - uint32_t tiling_flags = 0; - size_t plane; int ret; + union drm_amdgpu_gem_mmap gem_map; - if (format == DRM_FORMAT_NV12 || format == DRM_FORMAT_NV21) { - drv_bo_from_format(bo, ALIGN(width, 64), height, format); - } else { - if (amdgpu_addrlib_compute(addrlib, width, height, format, use_flags, &tiling_flags, - &addr_out) < 0) - return -EINVAL; - - bo->tiling = tiling_flags; - /* RGB has 1 plane only */ - bo->offsets[0] = 0; - bo->total_size = bo->sizes[0] = addr_out.surfSize; - bo->strides[0] = addr_out.pixelPitch * DIV_ROUND_UP(addr_out.pixelBits, 8); - } - - memset(&gem_create, 0, sizeof(gem_create)); + if (bo->priv) + return dri_bo_map(bo, vma, plane, map_flags); - gem_create.in.bo_size = bo->total_size; - gem_create.in.alignment = addr_out.baseAlign; - /* Set the placement. */ - gem_create.in.domains = AMDGPU_GEM_DOMAIN_VRAM; - gem_create.in.domain_flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; - /* Allocate the buffer with the preferred heap. */ - ret = drmCommandWriteRead(drv_get_fd(bo->drv), DRM_AMDGPU_GEM_CREATE, &gem_create, - sizeof(gem_create)); + memset(&gem_map, 0, sizeof(gem_map)); + gem_map.in.handle = bo->handles[plane].u32; - if (ret < 0) - return ret; + ret = drmIoctl(bo->drv->fd, DRM_IOCTL_AMDGPU_GEM_MMAP, &gem_map); + if (ret) { + drv_log("DRM_IOCTL_AMDGPU_GEM_MMAP failed\n"); + return MAP_FAILED; + } - metadata.tiling_info = tiling_flags; + vma->length = bo->meta.total_size; - for (plane = 0; plane < bo->num_planes; plane++) - bo->handles[plane].u32 = gem_create.out.handle; - - ret = amdgpu_set_metadata(drv_get_fd(bo->drv), bo->handles[0].u32, &metadata); + return mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd, + gem_map.out.addr_ptr); +} - return ret; +static int amdgpu_unmap_bo(struct bo *bo, struct vma *vma) +{ + if (bo->priv) + return dri_bo_unmap(bo, vma); + else + return munmap(vma->addr, vma->length); } -static void *amdgpu_bo_map(struct bo *bo, struct map_info *data, size_t plane, uint32_t map_flags) +static int amdgpu_bo_invalidate(struct bo *bo, struct mapping *mapping) { int ret; - union drm_amdgpu_gem_mmap gem_map; + union drm_amdgpu_gem_wait_idle wait_idle; - memset(&gem_map, 0, sizeof(gem_map)); - gem_map.in.handle = bo->handles[plane].u32; + if (bo->priv) + return 0; - ret = drmIoctl(bo->drv->fd, DRM_IOCTL_AMDGPU_GEM_MMAP, &gem_map); - if (ret) { - fprintf(stderr, "drv: DRM_IOCTL_AMDGPU_GEM_MMAP failed\n"); - return MAP_FAILED; + memset(&wait_idle, 0, sizeof(wait_idle)); + wait_idle.in.handle = bo->handles[0].u32; + wait_idle.in.timeout = AMDGPU_TIMEOUT_INFINITE; + + ret = drmCommandWriteRead(bo->drv->fd, DRM_AMDGPU_GEM_WAIT_IDLE, &wait_idle, + sizeof(wait_idle)); + + if (ret < 0) { + drv_log("DRM_AMDGPU_GEM_WAIT_IDLE failed with %d\n", ret); + return ret; } - data->length = bo->total_size; + if (ret == 0 && wait_idle.out.status) + drv_log("DRM_AMDGPU_GEM_WAIT_IDLE BO is busy\n"); - return mmap(0, bo->total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd, - gem_map.out.addr_ptr); + return 0; } -static uint32_t amdgpu_resolve_format(uint32_t format, uint64_t use_flags) +static uint32_t amdgpu_resolve_format(struct driver *drv, uint32_t format, uint64_t use_flags) { switch (format) { + case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED: + /* Camera subsystem requires NV12. */ + if (use_flags & (BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE)) + return DRM_FORMAT_NV12; + /*HACK: See b/28671744 */ + return DRM_FORMAT_XBGR8888; case DRM_FORMAT_FLEX_YCbCr_420_888: return DRM_FORMAT_NV12; default: @@ -425,16 +328,19 @@ static uint32_t amdgpu_resolve_format(uint32_t format, uint64_t use_flags) } } -struct backend backend_amdgpu = { +const struct backend backend_amdgpu = { .name = "amdgpu", .init = amdgpu_init, .close = amdgpu_close, - .bo_create = amdgpu_bo_create, - .bo_destroy = drv_gem_bo_destroy, - .bo_import = drv_prime_bo_import, - .bo_map = amdgpu_bo_map, - .bo_unmap = drv_bo_munmap, + .bo_create = amdgpu_create_bo, + .bo_create_with_modifiers = amdgpu_create_bo_with_modifiers, + .bo_destroy = amdgpu_destroy_bo, + .bo_import = amdgpu_import_bo, + .bo_map = amdgpu_map_bo, + .bo_unmap = amdgpu_unmap_bo, + .bo_invalidate = amdgpu_bo_invalidate, .resolve_format = amdgpu_resolve_format, + .num_planes_from_modifier = dri_num_planes_from_modifier, }; #endif diff --git a/cirrus.c b/cirrus.c deleted file mode 100644 index d92bab4..0000000 --- a/cirrus.c +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright 2014 The Chromium OS Authors. All rights reserved. - * Use of this source code is governed by a BSD-style license that can be - * found in the LICENSE file. - */ - -#include "drv_priv.h" -#include "helpers.h" -#include "util.h" - -const static uint32_t render_target_formats[] = { DRM_FORMAT_ARGB8888, DRM_FORMAT_RGB888, - DRM_FORMAT_XRGB8888 }; - -static int cirrus_init(struct driver *drv) -{ - int ret; - ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats), - &LINEAR_METADATA, BO_USE_RENDER_MASK); - if (ret) - return ret; - - return drv_modify_linear_combinations(drv); -} - -struct backend backend_cirrus = { - .name = "cirrus", - .init = cirrus_init, - .bo_create = drv_dumb_bo_create, - .bo_destroy = drv_dumb_bo_destroy, - .bo_import = drv_prime_bo_import, - .bo_map = drv_dumb_bo_map, - .bo_unmap = drv_bo_munmap, -}; diff --git a/cros_gralloc/Makefile b/cros_gralloc/Makefile index 1583a6f..17e884f 100644 --- a/cros_gralloc/Makefile +++ b/cros_gralloc/Makefile @@ -21,10 +21,6 @@ CXXFLAGS += -std=c++14 CFLAGS += -std=c99 LIBS += -shared -lcutils -lhardware -lsync $(LIBDRM_LIBS) -ifdef DRV_AMDGPU - LIBS += -lamdgpuaddr -endif - OBJS = $(foreach source, $(SOURCES), $(addsuffix .o, $(basename $(source)))) OBJECTS = $(addprefix $(TARGET_DIR), $(notdir $(OBJS))) diff --git a/cros_gralloc/cros_gralloc_buffer.cc b/cros_gralloc/cros_gralloc_buffer.cc index fe9c01a..fdaf7d3 100644 --- a/cros_gralloc/cros_gralloc_buffer.cc +++ b/cros_gralloc/cros_gralloc_buffer.cc @@ -10,8 +10,11 @@ #include cros_gralloc_buffer::cros_gralloc_buffer(uint32_t id, struct bo *acquire_bo, - struct cros_gralloc_handle *acquire_handle) - : id_(id), bo_(acquire_bo), hnd_(acquire_handle), refcount_(1), lockcount_(0) + struct cros_gralloc_handle *acquire_handle, + int32_t reserved_region_fd, uint64_t reserved_region_size) + : id_(id), bo_(acquire_bo), hnd_(acquire_handle), refcount_(1), lockcount_(0), + reserved_region_fd_(reserved_region_fd), reserved_region_size_(reserved_region_size), + reserved_region_addr_(nullptr) { assert(bo_); num_planes_ = drv_bo_get_num_planes(bo_); @@ -26,6 +29,9 @@ cros_gralloc_buffer::~cros_gralloc_buffer() native_handle_close(&hnd_->base); delete hnd_; } + if (reserved_region_addr_) { + munmap(reserved_region_addr_, reserved_region_size_); + } } uint32_t cros_gralloc_buffer::get_id() const @@ -44,7 +50,8 @@ int32_t cros_gralloc_buffer::decrease_refcount() return --refcount_; } -int32_t cros_gralloc_buffer::lock(uint32_t map_flags, uint8_t *addr[DRV_MAX_PLANES]) +int32_t cros_gralloc_buffer::lock(const struct rectangle *rect, uint32_t map_flags, + uint8_t *addr[DRV_MAX_PLANES]) { void *vaddr = nullptr; @@ -55,21 +62,31 @@ int32_t cros_gralloc_buffer::lock(uint32_t map_flags, uint8_t *addr[DRV_MAX_PLAN * just use the first kernel buffer. */ if (drv_num_buffers_per_bo(bo_) != 1) { - cros_gralloc_error("Can only support one buffer per bo."); + drv_log("Can only support one buffer per bo.\n"); return -EINVAL; } if (map_flags) { if (lock_data_[0]) { drv_bo_invalidate(bo_, lock_data_[0]); - vaddr = lock_data_[0]->addr; + vaddr = lock_data_[0]->vma->addr; } else { - vaddr = drv_bo_map(bo_, 0, 0, drv_bo_get_width(bo_), drv_bo_get_height(bo_), - map_flags, &lock_data_[0], 0); + struct rectangle r = *rect; + + if (!r.width && !r.height && !r.x && !r.y) { + /* + * Android IMapper.hal: An accessRegion of all-zeros means the + * entire buffer. + */ + r.width = drv_bo_get_width(bo_); + r.height = drv_bo_get_height(bo_); + } + + vaddr = drv_bo_map(bo_, &r, map_flags, &lock_data_[0], 0); } if (vaddr == MAP_FAILED) { - cros_gralloc_error("Mapping failed."); + drv_log("Mapping failed.\n"); return -EFAULT; } } @@ -81,19 +98,122 @@ int32_t cros_gralloc_buffer::lock(uint32_t map_flags, uint8_t *addr[DRV_MAX_PLAN return 0; } +#ifdef USE_GRALLOC1 +int32_t cros_gralloc_buffer::lock(uint32_t map_flags, uint8_t *addr[DRV_MAX_PLANES]) +{ + void *vaddr = nullptr; + + memset(addr, 0, DRV_MAX_PLANES * sizeof(*addr)); + + /* + * Gralloc consumers don't support more than one kernel buffer per buffer object yet, so + * just use the first kernel buffer. + */ + if (drv_num_buffers_per_bo(bo_) != 1) { + drv_log("Can only support one buffer per bo."); + return -EINVAL; + } + + if (map_flags) { + if (lock_data_[0]) { + drv_bo_invalidate(bo_, lock_data_[0]); + vaddr = lock_data_[0]->vma->addr; + } else { + struct rectangle r; + + if (!r.width && !r.height && !r.x && !r.y) { + /* + * Android IMapper.hal: An accessRegion of all-zeros means the + * entire buffer. + */ + r.width = drv_bo_get_width(bo_); + r.height = drv_bo_get_height(bo_); + } + vaddr = drv_bo_map(bo_, &r, map_flags, &lock_data_[0], 0); + } + + if (vaddr == MAP_FAILED) { + drv_log("Mapping failed."); + return -EFAULT; + } + } + + for (uint32_t plane = 0; plane < num_planes_; plane++) + addr[plane] = static_cast(vaddr) + drv_bo_get_plane_offset(bo_, plane); + + lockcount_++; + return 0; +} +#endif + int32_t cros_gralloc_buffer::unlock() { if (lockcount_ <= 0) { - cros_gralloc_error("Buffer was not locked."); + drv_log("Buffer was not locked.\n"); return -EINVAL; } if (!--lockcount_) { if (lock_data_[0]) { - drv_bo_unmap(bo_, lock_data_[0]); + drv_bo_flush_or_unmap(bo_, lock_data_[0]); lock_data_[0] = nullptr; } } return 0; } + +int32_t cros_gralloc_buffer::resource_info(uint32_t strides[DRV_MAX_PLANES], + uint32_t offsets[DRV_MAX_PLANES]) +{ + return drv_resource_info(bo_, strides, offsets); +} + +int32_t cros_gralloc_buffer::invalidate() +{ + if (lockcount_ <= 0) { + drv_log("Buffer was not locked.\n"); + return -EINVAL; + } + + if (lock_data_[0]) { + return drv_bo_invalidate(bo_, lock_data_[0]); + } + + return 0; +} + +int32_t cros_gralloc_buffer::flush() +{ + if (lockcount_ <= 0) { + drv_log("Buffer was not locked.\n"); + return -EINVAL; + } + + if (lock_data_[0]) { + return drv_bo_flush(bo_, lock_data_[0]); + } + + return 0; +} + +int32_t cros_gralloc_buffer::get_reserved_region(void **addr, uint64_t *size) +{ + if (reserved_region_fd_ <= 0) { + drv_log("Buffer does not have reserved region.\n"); + return -EINVAL; + } + + if (!reserved_region_addr_) { + reserved_region_addr_ = mmap(nullptr, reserved_region_size_, PROT_WRITE | PROT_READ, + MAP_SHARED, reserved_region_fd_, 0); + if (reserved_region_addr_ == MAP_FAILED) { + drv_log("Failed to mmap reserved region: %s.\n", strerror(errno)); + return -errno; + } + } + + *addr = reserved_region_addr_; + *size = reserved_region_size_; + return 0; +} diff --git a/cros_gralloc/cros_gralloc_buffer.h b/cros_gralloc/cros_gralloc_buffer.h index f629199..ff5c171 100644 --- a/cros_gralloc/cros_gralloc_buffer.h +++ b/cros_gralloc/cros_gralloc_buffer.h @@ -14,7 +14,8 @@ class cros_gralloc_buffer { public: cros_gralloc_buffer(uint32_t id, struct bo *acquire_bo, - struct cros_gralloc_handle *acquire_handle); + struct cros_gralloc_handle *acquire_handle, int32_t reserved_region_fd, + uint64_t reserved_region_size); ~cros_gralloc_buffer(); uint32_t get_id() const; @@ -23,8 +24,18 @@ class cros_gralloc_buffer int32_t increase_refcount(); int32_t decrease_refcount(); + int32_t lock(const struct rectangle *rect, uint32_t map_flags, + uint8_t *addr[DRV_MAX_PLANES]); +#ifdef USE_GRALLOC1 int32_t lock(uint32_t map_flags, uint8_t *addr[DRV_MAX_PLANES]); +#endif int32_t unlock(); + int32_t resource_info(uint32_t strides[DRV_MAX_PLANES], uint32_t offsets[DRV_MAX_PLANES]); + + int32_t invalidate(); + int32_t flush(); + + int32_t get_reserved_region(void **reserved_region_addr, uint64_t *reserved_region_size); private: cros_gralloc_buffer(cros_gralloc_buffer const &); @@ -32,13 +43,20 @@ class cros_gralloc_buffer uint32_t id_; struct bo *bo_; + + /* Note: this will be nullptr for imported/retained buffers. */ struct cros_gralloc_handle *hnd_; int32_t refcount_; int32_t lockcount_; uint32_t num_planes_; - struct map_info *lock_data_[DRV_MAX_PLANES]; + struct mapping *lock_data_[DRV_MAX_PLANES]; + + /* Optional additional shared memory region attached to some gralloc4 buffers. */ + int32_t reserved_region_fd_; + uint64_t reserved_region_size_; + void *reserved_region_addr_; }; #endif diff --git a/cros_gralloc/cros_gralloc_driver.cc b/cros_gralloc/cros_gralloc_driver.cc index 3a0b013..fd8d7b3 100644 --- a/cros_gralloc/cros_gralloc_driver.cc +++ b/cros_gralloc/cros_gralloc_driver.cc @@ -5,12 +5,20 @@ */ #include "cros_gralloc_driver.h" -#include "../util.h" #include #include +#include #include +#include "../drv_priv.h" +#include "../helpers.h" +#include "../util.h" + +#ifdef USE_GRALLOC1 +#include "i915_private_android.h" +#endif + cros_gralloc_driver::cros_gralloc_driver() : drv_(nullptr) { } @@ -21,8 +29,10 @@ cros_gralloc_driver::~cros_gralloc_driver() handles_.clear(); if (drv_) { + int fd = drv_get_fd(drv_); drv_destroy(drv_); drv_ = nullptr; + close(fd); } } @@ -56,10 +66,13 @@ int32_t cros_gralloc_driver::init() continue; version = drmGetVersion(fd); - if (!version) + if (!version) { + close(fd); continue; + } if (undesired[i] && !strcmp(version->name, undesired[i])) { + close(fd); drmFreeVersion(version); continue; } @@ -68,6 +81,8 @@ int32_t cros_gralloc_driver::init() drv_ = drv_create(fd); if (drv_) return 0; + + close(fd); } } @@ -83,22 +98,78 @@ bool cros_gralloc_driver::is_supported(const struct cros_gralloc_buffer_descript return (combo != nullptr); } +int32_t create_reserved_region(const std::string &buffer_name, uint64_t reserved_region_size) +{ + int32_t reserved_region_fd; + std::string reserved_region_name = buffer_name + " reserved region"; + + reserved_region_fd = memfd_create(reserved_region_name.c_str(), FD_CLOEXEC); + if (reserved_region_fd == -1) { + drv_log("Failed to create reserved region fd: %s.\n", strerror(errno)); + return -errno; + } + + if (ftruncate(reserved_region_fd, reserved_region_size)) { + drv_log("Failed to set reserved region size: %s.\n", strerror(errno)); + return -errno; + } + + return reserved_region_fd; +} + int32_t cros_gralloc_driver::allocate(const struct cros_gralloc_buffer_descriptor *descriptor, buffer_handle_t *out_handle) { - uint32_t id; +#ifdef USE_GRALLOC1 uint64_t mod; +#endif + uint32_t id; size_t num_planes; + size_t num_fds; + size_t num_ints; + size_t num_bytes; uint32_t resolved_format; + uint32_t bytes_per_pixel; + uint64_t use_flags; + int32_t reserved_region_fd; + char *name; struct bo *bo; struct cros_gralloc_handle *hnd; resolved_format = drv_resolve_format(drv_, descriptor->drm_format, descriptor->use_flags); - bo = drv_bo_create(drv_, descriptor->width, descriptor->height, resolved_format, - descriptor->use_flags); + use_flags = descriptor->use_flags; + /* + * TODO(b/79682290): ARC++ assumes NV12 is always linear and doesn't + * send modifiers across Wayland protocol, so we or in the + * BO_USE_LINEAR flag here. We need to fix ARC++ to allocate and work + * with tiled buffers. + */ + if (resolved_format == DRM_FORMAT_NV12) + use_flags |= BO_USE_LINEAR; + + /* + * This unmask is a backup in the case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED is resolved + * to non-YUV formats. + */ + if (descriptor->drm_format == DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED && + (resolved_format == DRM_FORMAT_XBGR8888 || resolved_format == DRM_FORMAT_ABGR8888)) { + use_flags &= ~BO_USE_HW_VIDEO_ENCODER; + } + +#ifdef USE_GRALLOC1 + if (descriptor->modifier == 0) { + bo = drv_bo_create(drv_, descriptor->width, descriptor->height, resolved_format, + use_flags); + } else { + bo = drv_bo_create_with_modifiers(drv_, descriptor->width, descriptor->height, + resolved_format, &descriptor->modifier, 1); + } +#else + bo = drv_bo_create(drv_, descriptor->width, descriptor->height, resolved_format, use_flags); +#endif if (!bo) { - cros_gralloc_error("Failed to create bo."); + drv_log("Failed to create bo.\n"); return -ENOMEM; } @@ -109,44 +180,92 @@ int32_t cros_gralloc_driver::allocate(const struct cros_gralloc_buffer_descripto */ if (drv_num_buffers_per_bo(bo) != 1) { drv_bo_destroy(bo); - cros_gralloc_error("Can only support one buffer per bo."); + drv_log("Can only support one buffer per bo.\n"); return -EINVAL; } - hnd = new cros_gralloc_handle(); num_planes = drv_bo_get_num_planes(bo); + num_fds = num_planes; + + if (descriptor->reserved_region_size > 0) { + reserved_region_fd = + create_reserved_region(descriptor->name, descriptor->reserved_region_size); + if (reserved_region_fd < 0) { + drv_bo_destroy(bo); + return reserved_region_fd; + } + num_fds += 1; + } else { + reserved_region_fd = -1; + } + num_bytes = sizeof(struct cros_gralloc_handle); + num_bytes += (descriptor->name.size() + 1); + /* + * Ensure that the total number of bytes is a multiple of sizeof(int) as + * native_handle_clone() copies data based on hnd->base.numInts. + */ + num_bytes = ALIGN(num_bytes, sizeof(int)); + num_ints = num_bytes - sizeof(native_handle_t) - num_fds; + /* + * Malloc is used as handles are ultimetly destroyed via free in + * native_handle_delete(). + */ + hnd = static_cast(malloc(num_bytes)); hnd->base.version = sizeof(hnd->base); - hnd->base.numFds = num_planes; - hnd->base.numInts = handle_data_size - num_planes; - + hnd->base.numFds = num_fds; + hnd->base.numInts = num_ints; + hnd->num_planes = num_planes; for (size_t plane = 0; plane < num_planes; plane++) { hnd->fds[plane] = drv_bo_get_plane_fd(bo, plane); hnd->strides[plane] = drv_bo_get_plane_stride(bo, plane); hnd->offsets[plane] = drv_bo_get_plane_offset(bo, plane); - + hnd->sizes[plane] = drv_bo_get_plane_size(bo, plane); +#ifdef USE_GRALLOC1 mod = drv_bo_get_plane_format_modifier(bo, plane); hnd->format_modifiers[2 * plane] = static_cast(mod >> 32); hnd->format_modifiers[2 * plane + 1] = static_cast(mod); +#endif } - + hnd->fds[hnd->num_planes] = reserved_region_fd; + hnd->reserved_region_size = descriptor->reserved_region_size; + static std::atomic next_buffer_id{ 1 }; + hnd->id = next_buffer_id++; hnd->width = drv_bo_get_width(bo); hnd->height = drv_bo_get_height(bo); hnd->format = drv_bo_get_format(bo); - hnd->use_flags[0] = static_cast(descriptor->use_flags >> 32); - hnd->use_flags[1] = static_cast(descriptor->use_flags); - hnd->pixel_stride = drv_bo_get_stride_in_pixels(bo); + hnd->format_modifier = drv_bo_get_plane_format_modifier(bo, 0); + hnd->use_flags = descriptor->use_flags; + bytes_per_pixel = drv_bytes_per_pixel_from_format(hnd->format, 0); + hnd->pixel_stride = DIV_ROUND_UP(hnd->strides[0], bytes_per_pixel); hnd->magic = cros_gralloc_magic; + hnd->usage = descriptor->droid_usage; +#ifdef USE_GRALLOC1 + hnd->producer_usage = descriptor->producer_usage; + hnd->consumer_usage = descriptor->consumer_usage; + hnd->tiling_mode = drv_bo_get_stride_or_tiling(bo); + int32_t format = i915_private_invert_format(hnd->format); + if (format == 0) { + format = descriptor->droid_format; + } + hnd->droid_format = format; +#else hnd->droid_format = descriptor->droid_format; - hnd->usage = descriptor->producer_usage; +#endif + hnd->total_size = descriptor->reserved_region_size + bo->meta.total_size; + hnd->name_offset = handle_data_size; + + name = (char *)(&hnd->base.data[hnd->name_offset]); + snprintf(name, descriptor->name.size() + 1, "%s", descriptor->name.c_str()); id = drv_bo_get_plane_handle(bo, 0).u32; - auto buffer = new cros_gralloc_buffer(id, bo, hnd); + auto buffer = new cros_gralloc_buffer(id, bo, hnd, hnd->fds[hnd->num_planes], + hnd->reserved_region_size); std::lock_guard lock(mutex_); buffers_.emplace(id, buffer); handles_.emplace(hnd, std::make_pair(buffer, 1)); - *out_handle = &hnd->base; + *out_handle = reinterpret_cast(hnd); return 0; } @@ -157,7 +276,7 @@ int32_t cros_gralloc_driver::retain(buffer_handle_t handle) auto hnd = cros_gralloc_convert_handle(handle); if (!hnd) { - cros_gralloc_error("Invalid handle."); + drv_log("Invalid handle.\n"); return -EINVAL; } @@ -169,7 +288,7 @@ int32_t cros_gralloc_driver::retain(buffer_handle_t handle) } if (drmPrimeFDToHandle(drv_get_fd(drv_), hnd->fds[0], &id)) { - cros_gralloc_error("drmPrimeFDToHandle failed."); + drv_log("drmPrimeFDToHandle failed.\n"); return -errno; } @@ -180,18 +299,16 @@ int32_t cros_gralloc_driver::retain(buffer_handle_t handle) struct bo *bo; struct drv_import_fd_data data; data.format = hnd->format; + data.width = hnd->width; data.height = hnd->height; - data.use_flags = static_cast(hnd->use_flags[0]) << 32; - data.use_flags |= hnd->use_flags[1]; + data.use_flags = hnd->use_flags; memcpy(data.fds, hnd->fds, sizeof(data.fds)); memcpy(data.strides, hnd->strides, sizeof(data.strides)); memcpy(data.offsets, hnd->offsets, sizeof(data.offsets)); for (uint32_t plane = 0; plane < DRV_MAX_PLANES; plane++) { - data.format_modifiers[plane] = - static_cast(hnd->format_modifiers[2 * plane]) << 32; - data.format_modifiers[plane] |= hnd->format_modifiers[2 * plane + 1]; + data.format_modifiers[plane] = hnd->format_modifier; } bo = drv_bo_import(drv_, &data); @@ -200,7 +317,8 @@ int32_t cros_gralloc_driver::retain(buffer_handle_t handle) id = drv_bo_get_plane_handle(bo, 0).u32; - buffer = new cros_gralloc_buffer(id, bo, nullptr); + buffer = new cros_gralloc_buffer(id, bo, nullptr, hnd->fds[hnd->num_planes], + hnd->reserved_region_size); buffers_.emplace(id, buffer); } @@ -214,13 +332,13 @@ int32_t cros_gralloc_driver::release(buffer_handle_t handle) auto hnd = cros_gralloc_convert_handle(handle); if (!hnd) { - cros_gralloc_error("Invalid handle."); + drv_log("Invalid handle.\n"); return -EINVAL; } auto buffer = get_buffer(hnd); if (!buffer) { - cros_gralloc_error("Invalid Reference."); + drv_log("Invalid Reference.\n"); return -EINVAL; } @@ -235,42 +353,68 @@ int32_t cros_gralloc_driver::release(buffer_handle_t handle) return 0; } -int32_t cros_gralloc_driver::lock(buffer_handle_t handle, int32_t acquire_fence, uint32_t map_flags, - uint8_t *addr[DRV_MAX_PLANES]) +int32_t cros_gralloc_driver::lock(buffer_handle_t handle, int32_t acquire_fence, + bool close_acquire_fence, const struct rectangle *rect, + uint32_t map_flags, uint8_t *addr[DRV_MAX_PLANES]) { - int32_t ret = cros_gralloc_sync_wait(acquire_fence); + int32_t ret = cros_gralloc_sync_wait(acquire_fence, close_acquire_fence); if (ret) return ret; std::lock_guard lock(mutex_); auto hnd = cros_gralloc_convert_handle(handle); if (!hnd) { - cros_gralloc_error("Invalid handle."); + drv_log("Invalid handle.\n"); return -EINVAL; } auto buffer = get_buffer(hnd); if (!buffer) { - cros_gralloc_error("Invalid Reference."); + drv_log("Invalid Reference.\n"); return -EINVAL; } - return buffer->lock(map_flags, addr); + return buffer->lock(rect, map_flags, addr); } +#ifdef USE_GRALLOC1 +int32_t cros_gralloc_driver::lock(buffer_handle_t handle, int32_t acquire_fence, uint32_t map_flags, + uint8_t *addr[DRV_MAX_PLANES]) +{ + int32_t ret = cros_gralloc_sync_wait(acquire_fence); + if (ret) + return ret; + + std::lock_guard lock(mutex_); + auto hnd = cros_gralloc_convert_handle(handle); + if (!hnd) { + drv_log("Invalid handle."); + return -EINVAL; + } + + auto buffer = get_buffer(hnd); + if (!buffer) { + drv_log("Invalid Reference."); + return -EINVAL; + } + + return buffer->lock(map_flags, addr); +} +#endif + int32_t cros_gralloc_driver::unlock(buffer_handle_t handle, int32_t *release_fence) { std::lock_guard lock(mutex_); auto hnd = cros_gralloc_convert_handle(handle); if (!hnd) { - cros_gralloc_error("Invalid handle."); + drv_log("Invalid handle.\n"); return -EINVAL; } auto buffer = get_buffer(hnd); if (!buffer) { - cros_gralloc_error("Invalid Reference."); + drv_log("Invalid Reference.\n"); return -EINVAL; } @@ -284,26 +428,121 @@ int32_t cros_gralloc_driver::unlock(buffer_handle_t handle, int32_t *release_fen return buffer->unlock(); } +int32_t cros_gralloc_driver::invalidate(buffer_handle_t handle) +{ + std::lock_guard lock(mutex_); + + auto hnd = cros_gralloc_convert_handle(handle); + if (!hnd) { + drv_log("Invalid handle.\n"); + return -EINVAL; + } + + auto buffer = get_buffer(hnd); + if (!buffer) { + drv_log("Invalid Reference.\n"); + return -EINVAL; + } + + return buffer->invalidate(); +} + +int32_t cros_gralloc_driver::flush(buffer_handle_t handle, int32_t *release_fence) +{ + std::lock_guard lock(mutex_); + + auto hnd = cros_gralloc_convert_handle(handle); + if (!hnd) { + drv_log("Invalid handle.\n"); + return -EINVAL; + } + + auto buffer = get_buffer(hnd); + if (!buffer) { + drv_log("Invalid Reference.\n"); + return -EINVAL; + } + + /* + * From the ANativeWindow::dequeueBuffer documentation: + * + * "A value of -1 indicates that the caller may access the buffer immediately without + * waiting on a fence." + */ + *release_fence = -1; + return buffer->flush(); +} + int32_t cros_gralloc_driver::get_backing_store(buffer_handle_t handle, uint64_t *out_store) { std::lock_guard lock(mutex_); auto hnd = cros_gralloc_convert_handle(handle); if (!hnd) { - cros_gralloc_error("Invalid handle."); + drv_log("Invalid handle.\n"); return -EINVAL; } +#ifdef USE_GRALLOC1 + *out_store = static_cast(hnd->id); +#else auto buffer = get_buffer(hnd); if (!buffer) { - cros_gralloc_error("Invalid Reference."); + drv_log("Invalid Reference.\n"); return -EINVAL; } *out_store = static_cast(buffer->get_id()); +#endif return 0; } +int32_t cros_gralloc_driver::resource_info(buffer_handle_t handle, uint32_t strides[DRV_MAX_PLANES], + uint32_t offsets[DRV_MAX_PLANES]) +{ + std::lock_guard lock(mutex_); + + auto hnd = cros_gralloc_convert_handle(handle); + if (!hnd) { + drv_log("Invalid handle.\n"); + return -EINVAL; + } + + auto buffer = get_buffer(hnd); + if (!buffer) { + drv_log("Invalid Reference.\n"); + return -EINVAL; + } + + return buffer->resource_info(strides, offsets); +} + +int32_t cros_gralloc_driver::get_reserved_region(buffer_handle_t handle, + void **reserved_region_addr, + uint64_t *reserved_region_size) +{ + std::lock_guard lock(mutex_); + + auto hnd = cros_gralloc_convert_handle(handle); + if (!hnd) { + drv_log("Invalid handle.\n"); + return -EINVAL; + } + + auto buffer = get_buffer(hnd); + if (!buffer) { + drv_log("Invalid Reference.\n"); + return -EINVAL; + } + + return buffer->get_reserved_region(reserved_region_addr, reserved_region_size); +} + +uint32_t cros_gralloc_driver::get_resolved_drm_format(uint32_t drm_format, uint64_t usage) +{ + return drv_resolve_format(drv_, drm_format, usage); +} + cros_gralloc_buffer *cros_gralloc_driver::get_buffer(cros_gralloc_handle_t hnd) { /* Assumes driver mutex is held. */ @@ -312,3 +551,13 @@ cros_gralloc_buffer *cros_gralloc_driver::get_buffer(cros_gralloc_handle_t hnd) return nullptr; } + +void cros_gralloc_driver::for_each_handle( + const std::function &function) +{ + std::lock_guard lock(mutex_); + + for (const auto &pair : handles_) { + function(pair.first); + } +} diff --git a/cros_gralloc/cros_gralloc_driver.h b/cros_gralloc/cros_gralloc_driver.h index dea2ac0..9ac4233 100644 --- a/cros_gralloc/cros_gralloc_driver.h +++ b/cros_gralloc/cros_gralloc_driver.h @@ -9,6 +9,7 @@ #include "cros_gralloc_buffer.h" +#include #include #include @@ -26,11 +27,28 @@ class cros_gralloc_driver int32_t retain(buffer_handle_t handle); int32_t release(buffer_handle_t handle); - int32_t lock(buffer_handle_t handle, int32_t acquire_fence, uint32_t map_flags, + int32_t lock(buffer_handle_t handle, int32_t acquire_fence, bool close_acquire_fence, + const struct rectangle *rect, uint32_t map_flags, uint8_t *addr[DRV_MAX_PLANES]); +#ifdef USE_GRALLOC1 + int32_t lock(buffer_handle_t handle, int32_t acquire_fence, uint32_t map_flags, + uint8_t *addr[DRV_MAX_PLANES]); +#endif int32_t unlock(buffer_handle_t handle, int32_t *release_fence); + int32_t invalidate(buffer_handle_t handle); + int32_t flush(buffer_handle_t handle, int32_t *release_fence); + int32_t get_backing_store(buffer_handle_t handle, uint64_t *out_store); + int32_t resource_info(buffer_handle_t handle, uint32_t strides[DRV_MAX_PLANES], + uint32_t offsets[DRV_MAX_PLANES]); + + int32_t get_reserved_region(buffer_handle_t handle, void **reserved_region_addr, + uint64_t *reserved_region_size); + + uint32_t get_resolved_drm_format(uint32_t drm_format, uint64_t usage); + + void for_each_handle(const std::function &function); private: cros_gralloc_driver(cros_gralloc_driver const &); diff --git a/cros_gralloc/cros_gralloc_handle.h b/cros_gralloc/cros_gralloc_handle.h index cd3edfe..4c53899 100644 --- a/cros_gralloc/cros_gralloc_handle.h +++ b/cros_gralloc/cros_gralloc_handle.h @@ -11,27 +11,56 @@ #include #define DRV_MAX_PLANES 4 - -/* - * Only use 32-bit integers in the handle. This guarantees that the handle is - * densely packed (i.e, the compiler does not insert any padding). - */ +#define DRV_MAX_FDS (DRV_MAX_PLANES + 1) struct cros_gralloc_handle { native_handle_t base; - int32_t fds[DRV_MAX_PLANES]; + /* + * File descriptors must immediately follow the native_handle_t base and used file + * descriptors must be packed at the beginning of this array to work with + * native_handle_clone(). + * + * This field contains 'num_planes' plane file descriptors followed by an optional metadata + * reserved region file descriptor if 'reserved_region_size' is greater than zero. + */ + int32_t fds[DRV_MAX_FDS]; uint32_t strides[DRV_MAX_PLANES]; uint32_t offsets[DRV_MAX_PLANES]; - uint32_t format_modifiers[2 * DRV_MAX_PLANES]; + uint32_t sizes[DRV_MAX_PLANES]; + uint32_t id; uint32_t width; uint32_t height; - uint32_t format; /* DRM format */ - uint32_t use_flags[2]; /* Buffer creation flags */ + uint32_t format; /* DRM format */ + uint64_t format_modifier; + uint64_t use_flags; /* Buffer creation flags */ uint32_t magic; uint32_t pixel_stride; int32_t droid_format; int32_t usage; /* Android usage. */ -}; + uint32_t num_planes; + uint64_t reserved_region_size; + uint64_t total_size; /* Total allocation size */ + /* + * Name is a null terminated char array located at handle->base.data[handle->name_offset]. + */ + uint32_t name_offset; +#ifdef USE_GRALLOC1 + uint32_t consumer_usage; + uint32_t producer_usage; + uint32_t yuv_color_range; // YUV Color range. + uint32_t is_updated; // frame updated flag + uint32_t is_encoded; // frame encoded flag + uint32_t is_encrypted; + uint32_t is_key_frame; + uint32_t is_interlaced; + uint32_t is_mmc_capable; + uint32_t compression_mode; + uint32_t compression_hint; + uint32_t codec; + uint32_t tiling_mode; + uint32_t format_modifiers[2 * DRV_MAX_PLANES]; +#endif +} __attribute__((packed)); typedef const struct cros_gralloc_handle *cros_gralloc_handle_t; diff --git a/cros_gralloc/cros_gralloc_helpers.cc b/cros_gralloc/cros_gralloc_helpers.cc index e662084..a23585c 100644 --- a/cros_gralloc/cros_gralloc_helpers.cc +++ b/cros_gralloc/cros_gralloc_helpers.cc @@ -6,10 +6,32 @@ #include "cros_gralloc_helpers.h" -#include -#include #include +#ifdef USE_GRALLOC1 +#include "i915_private_android.h" +const char* drmFormat2Str(int drm_format) +{ + static char buf[5]; + char *pDrmFormat = (char*) &drm_format; + snprintf(buf, sizeof(buf), "%c%c%c%c", *pDrmFormat, *(pDrmFormat + 1), + *(pDrmFormat + 2), *(pDrmFormat + 3)); + return buf; +} + +bool is_flex_format(uint32_t format) +{ + switch (format) { + case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED: + case DRM_FORMAT_FLEX_YCbCr_420_888: + return true; + default: + return false; + } + return false; +} +#endif + uint32_t cros_gralloc_convert_format(int format) { /* @@ -22,10 +44,12 @@ uint32_t cros_gralloc_convert_format(int format) return DRM_FORMAT_ARGB8888; case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED: return DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED; + case HAL_PIXEL_FORMAT_RAW16: + return DRM_FORMAT_R16; case HAL_PIXEL_FORMAT_RGB_565: return DRM_FORMAT_RGB565; case HAL_PIXEL_FORMAT_RGB_888: - return DRM_FORMAT_RGB888; + return DRM_FORMAT_BGR888; case HAL_PIXEL_FORMAT_RGBA_8888: return DRM_FORMAT_ABGR8888; case HAL_PIXEL_FORMAT_RGBX_8888: @@ -41,9 +65,19 @@ uint32_t cros_gralloc_convert_format(int format) */ case HAL_PIXEL_FORMAT_BLOB: return DRM_FORMAT_R8; +#if ANDROID_VERSION >= 0x0a00 + case HAL_PIXEL_FORMAT_RGBA_1010102: + return DRM_FORMAT_ABGR2101010; + case HAL_PIXEL_FORMAT_RGBA_FP16: + return DRM_FORMAT_ABGR16161616F; +#endif } +#ifdef USE_GRALLOC1 +return i915_private_convert_format(format); +#else return DRM_FORMAT_NONE; +#endif } cros_gralloc_handle_t cros_gralloc_convert_handle(buffer_handle_t handle) @@ -55,41 +89,62 @@ cros_gralloc_handle_t cros_gralloc_convert_handle(buffer_handle_t handle) return hnd; } -int32_t cros_gralloc_sync_wait(int32_t acquire_fence) +int32_t cros_gralloc_sync_wait(int32_t fence, bool close_fence) { - if (acquire_fence < 0) + if (fence < 0) return 0; /* * Wait initially for 1000 ms, and then wait indefinitely. The SYNC_IOC_WAIT * documentation states the caller waits indefinitely on the fence if timeout < 0. */ - int err = sync_wait(acquire_fence, 1000); + int err = sync_wait(fence, 1000); if (err < 0) { - cros_gralloc_error("Timed out on sync wait, err = %s", strerror(errno)); - err = sync_wait(acquire_fence, -1); + drv_log("Timed out on sync wait, err = %s\n", strerror(errno)); + err = sync_wait(fence, -1); if (err < 0) { - cros_gralloc_error("sync wait error = %s", strerror(errno)); + drv_log("sync wait error = %s\n", strerror(errno)); return -errno; } } - err = close(acquire_fence); - if (err) { - cros_gralloc_error("Unable to close fence fd, err = %s", strerror(errno)); - return -errno; + if (close_fence) { + err = close(fence); + if (err) { + drv_log("Unable to close fence fd, err = %s\n", strerror(errno)); + return -errno; + } } return 0; } -void cros_gralloc_log(const char *prefix, const char *file, int line, const char *format, ...) +#ifdef USE_GRALLOC1 +int32_t cros_gralloc_sync_wait(int32_t acquire_fence) { - char buf[50]; - snprintf(buf, sizeof(buf), "[%s:%s(%d)]", prefix, basename(file), line); + if (acquire_fence < 0) + return 0; + + /* + * Wait initially for 1000 ms, and then wait indefinitely. The SYNC_IOC_WAIT + * documentation states the caller waits indefinitely on the fence if timeout < 0. + */ + int err = sync_wait(acquire_fence, 1000); + if (err < 0) { + drv_log("Timed out on sync wait, err = %s", strerror(errno)); + err = sync_wait(acquire_fence, -1); + if (err < 0) { + drv_log("sync wait error = %s", strerror(errno)); + return -errno; + } + } + + err = close(acquire_fence); + if (err) { + drv_log("Unable to close fence fd, err = %s", strerror(errno)); + return -errno; + } - va_list args; - va_start(args, format); - __android_log_vprint(ANDROID_LOG_ERROR, buf, format, args); - va_end(args); + return 0; } +#endif diff --git a/cros_gralloc/cros_gralloc_helpers.h b/cros_gralloc/cros_gralloc_helpers.h index cf90ec8..abd3431 100644 --- a/cros_gralloc/cros_gralloc_helpers.h +++ b/cros_gralloc/cros_gralloc_helpers.h @@ -22,14 +22,12 @@ uint32_t cros_gralloc_convert_format(int32_t format); cros_gralloc_handle_t cros_gralloc_convert_handle(buffer_handle_t handle); -int32_t cros_gralloc_sync_wait(int32_t acquire_fence); - -__attribute__((format(printf, 4, 5))) void cros_gralloc_log(const char *prefix, const char *file, - int line, const char *format, ...); +int32_t cros_gralloc_sync_wait(int32_t fence, bool close_fence); -#define cros_gralloc_error(...) \ - do { \ - cros_gralloc_log("CROS_GRALLOC_ERROR", __FILE__, __LINE__, __VA_ARGS__); \ - } while (0) +#ifdef USE_GRALLOC1 +int32_t cros_gralloc_sync_wait(int32_t acquire_fence); +const char *drmFormat2Str(int format); +bool is_flex_format(uint32_t format); +#endif #endif diff --git a/cros_gralloc/cros_gralloc_types.h b/cros_gralloc/cros_gralloc_types.h index 1fa81de..e51a075 100644 --- a/cros_gralloc/cros_gralloc_types.h +++ b/cros_gralloc/cros_gralloc_types.h @@ -7,14 +7,22 @@ #ifndef CROS_GRALLOC_TYPES_H #define CROS_GRALLOC_TYPES_H +#include + struct cros_gralloc_buffer_descriptor { uint32_t width; uint32_t height; - uint32_t consumer_usage; - uint32_t producer_usage; - uint32_t droid_format; + int32_t droid_format; + int32_t droid_usage; uint32_t drm_format; uint64_t use_flags; + uint64_t reserved_region_size; + std::string name; +#ifdef USE_GRALLOC1 + uint32_t consumer_usage; + uint32_t producer_usage; + uint64_t modifier; +#endif }; #endif diff --git a/cros_gralloc/gralloc0/gralloc0.cc b/cros_gralloc/gralloc0/gralloc0.cc index ab05376..170dae9 100644 --- a/cros_gralloc/gralloc0/gralloc0.cc +++ b/cros_gralloc/gralloc0/gralloc0.cc @@ -4,8 +4,10 @@ * found in the LICENSE file. */ +#include "../../util.h" #include "../cros_gralloc_driver.h" +#include #include #include @@ -32,6 +34,11 @@ enum { }; // clang-format on +// Gralloc0 doesn't define a video decoder flag. However, the IAllocator gralloc0 +// passthrough gives the low 32-bits of the BufferUsage flags to gralloc0 in their +// entirety, so we can detect the video decoder flag passed by IAllocator clients. +#define BUFFER_USAGE_VIDEO_DECODER (1 << 22) + static uint64_t gralloc0_convert_usage(int usage) { uint64_t use_flags = BO_USE_NONE; @@ -65,15 +72,19 @@ static uint64_t gralloc0_convert_usage(int usage) use_flags |= BO_USE_NONE; if (usage & GRALLOC_USAGE_PROTECTED) use_flags |= BO_USE_PROTECTED; - if (usage & GRALLOC_USAGE_HW_VIDEO_ENCODER) + if (usage & GRALLOC_USAGE_HW_VIDEO_ENCODER) { + use_flags |= BO_USE_HW_VIDEO_ENCODER; /*HACK: See b/30054495 */ use_flags |= BO_USE_SW_READ_OFTEN; + } if (usage & GRALLOC_USAGE_HW_CAMERA_WRITE) use_flags |= BO_USE_CAMERA_WRITE; if (usage & GRALLOC_USAGE_HW_CAMERA_READ) use_flags |= BO_USE_CAMERA_READ; if (usage & GRALLOC_USAGE_RENDERSCRIPT) use_flags |= BO_USE_RENDERSCRIPT; + if (usage & BUFFER_USAGE_VIDEO_DECODER) + use_flags |= BO_USE_HW_VIDEO_DECODER; return use_flags; } @@ -90,32 +101,48 @@ static uint32_t gralloc0_convert_map_usage(int map_usage) return map_flags; } +static int gralloc0_droid_yuv_format(int droid_format) +{ + + return (droid_format == HAL_PIXEL_FORMAT_YCbCr_420_888 || + droid_format == HAL_PIXEL_FORMAT_YV12); +} + static int gralloc0_alloc(alloc_device_t *dev, int w, int h, int format, int usage, buffer_handle_t *handle, int *stride) { int32_t ret; bool supported; struct cros_gralloc_buffer_descriptor descriptor; - auto mod = (struct gralloc0_module *)dev->common.module; + auto mod = (struct gralloc0_module const *)dev->common.module; descriptor.width = w; descriptor.height = h; descriptor.droid_format = format; - descriptor.producer_usage = descriptor.consumer_usage = usage; + descriptor.droid_usage = usage; descriptor.drm_format = cros_gralloc_convert_format(format); descriptor.use_flags = gralloc0_convert_usage(usage); + descriptor.reserved_region_size = 0; supported = mod->driver->is_supported(&descriptor); if (!supported && (usage & GRALLOC_USAGE_HW_COMPOSER)) { descriptor.use_flags &= ~BO_USE_SCANOUT; supported = mod->driver->is_supported(&descriptor); } + if (!supported && (usage & GRALLOC_USAGE_HW_VIDEO_ENCODER) && + !gralloc0_droid_yuv_format(format)) { + // Unmask BO_USE_HW_VIDEO_ENCODER in the case of non-yuv formats + // because they are not input to a hw encoder but used as an + // intermediate format (e.g. camera). + descriptor.use_flags &= ~BO_USE_HW_VIDEO_ENCODER; + supported = mod->driver->is_supported(&descriptor); + } if (!supported) { - cros_gralloc_error("Unsupported combination -- HAL format: %u, HAL usage: %u, " - "drv_format: %4.4s, use_flags: %llu", - format, usage, reinterpret_cast(&descriptor.drm_format), - static_cast(descriptor.use_flags)); + drv_log("Unsupported combination -- HAL format: %u, HAL usage: %u, " + "drv_format: %4.4s, use_flags: %llu\n", + format, usage, reinterpret_cast(&descriptor.drm_format), + static_cast(descriptor.use_flags)); return -EINVAL; } @@ -131,7 +158,7 @@ static int gralloc0_alloc(alloc_device_t *dev, int w, int h, int format, int usa static int gralloc0_free(alloc_device_t *dev, buffer_handle_t handle) { - auto mod = (struct gralloc0_module *)dev->common.module; + auto mod = (struct gralloc0_module const *)dev->common.module; return mod->driver->release(handle); } @@ -150,7 +177,7 @@ static int gralloc0_init(struct gralloc0_module *mod, bool initialize_alloc) mod->driver = std::make_unique(); if (mod->driver->init()) { - cros_gralloc_error("Failed to initialize driver."); + drv_log("Failed to initialize driver.\n"); return -ENODEV; } @@ -170,7 +197,8 @@ static int gralloc0_init(struct gralloc0_module *mod, bool initialize_alloc) static int gralloc0_open(const struct hw_module_t *mod, const char *name, struct hw_device_t **dev) { - auto module = (struct gralloc0_module *)mod; + auto const_module = reinterpret_cast(mod); + auto module = const_cast(const_module); if (module->initialized) { *dev = &module->alloc->common; @@ -178,7 +206,7 @@ static int gralloc0_open(const struct hw_module_t *mod, const char *name, struct } if (strcmp(name, GRALLOC_HARDWARE_GPU0)) { - cros_gralloc_error("Incorrect device name - %s.", name); + drv_log("Incorrect device name - %s.\n", name); return -EINVAL; } @@ -191,7 +219,8 @@ static int gralloc0_open(const struct hw_module_t *mod, const char *name, struct static int gralloc0_register_buffer(struct gralloc_module_t const *module, buffer_handle_t handle) { - auto mod = (struct gralloc0_module *)module; + auto const_module = reinterpret_cast(module); + auto mod = const_cast(const_module); if (!mod->initialized) if (gralloc0_init(mod, false)) @@ -202,7 +231,7 @@ static int gralloc0_register_buffer(struct gralloc_module_t const *module, buffe static int gralloc0_unregister_buffer(struct gralloc_module_t const *module, buffer_handle_t handle) { - auto mod = (struct gralloc0_module *)module; + auto mod = (struct gralloc0_module const *)module; return mod->driver->release(handle); } @@ -215,12 +244,12 @@ static int gralloc0_lock(struct gralloc_module_t const *module, buffer_handle_t static int gralloc0_unlock(struct gralloc_module_t const *module, buffer_handle_t handle) { int32_t fence_fd, ret; - auto mod = (struct gralloc0_module *)module; + auto mod = (struct gralloc0_module const *)module; ret = mod->driver->unlock(handle, &fence_fd); if (ret) return ret; - ret = cros_gralloc_sync_wait(fence_fd); + ret = cros_gralloc_sync_wait(fence_fd, /*close_acquire_fence=*/true); if (ret) return ret; @@ -234,7 +263,9 @@ static int gralloc0_perform(struct gralloc_module_t const *module, int op, ...) uint64_t *out_store; buffer_handle_t handle; uint32_t *out_width, *out_height, *out_stride; - auto mod = (struct gralloc0_module *)module; + uint32_t strides[DRV_MAX_PLANES] = { 0, 0, 0, 0 }; + uint32_t offsets[DRV_MAX_PLANES] = { 0, 0, 0, 0 }; + auto mod = (struct gralloc0_module const *)module; switch (op) { case GRALLOC_DRM_GET_STRIDE: @@ -252,14 +283,24 @@ static int gralloc0_perform(struct gralloc_module_t const *module, int op, ...) handle = va_arg(args, buffer_handle_t); auto hnd = cros_gralloc_convert_handle(handle); if (!hnd) { - cros_gralloc_error("Invalid handle."); + drv_log("Invalid handle.\n"); return -EINVAL; } switch (op) { case GRALLOC_DRM_GET_STRIDE: out_stride = va_arg(args, uint32_t *); - *out_stride = hnd->pixel_stride; + ret = mod->driver->resource_info(handle, strides, offsets); + if (ret) + break; + + if (strides[0] != hnd->strides[0]) { + uint32_t bytes_per_pixel = drv_bytes_per_pixel_from_format(hnd->format, 0); + *out_stride = DIV_ROUND_UP(strides[0], bytes_per_pixel); + } else { + *out_stride = hnd->pixel_stride; + } + break; case GRALLOC_DRM_GET_FORMAT: out_format = va_arg(args, int32_t *); @@ -296,21 +337,30 @@ static int gralloc0_lock_async(struct gralloc_module_t const *module, buffer_han int32_t ret; uint32_t map_flags; uint8_t *addr[DRV_MAX_PLANES]; - auto mod = (struct gralloc0_module *)module; + auto mod = (struct gralloc0_module const *)module; + struct rectangle rect = { .x = static_cast(l), + .y = static_cast(t), + .width = static_cast(w), + .height = static_cast(h) }; auto hnd = cros_gralloc_convert_handle(handle); if (!hnd) { - cros_gralloc_error("Invalid handle."); + drv_log("Invalid handle.\n"); return -EINVAL; } if (hnd->droid_format == HAL_PIXEL_FORMAT_YCbCr_420_888) { - cros_gralloc_error("HAL_PIXEL_FORMAT_YCbCr_*_888 format not compatible."); + drv_log("HAL_PIXEL_FORMAT_YCbCr_*_888 format not compatible.\n"); return -EINVAL; } + assert(l >= 0); + assert(t >= 0); + assert(w >= 0); + assert(h >= 0); + map_flags = gralloc0_convert_map_usage(usage); - ret = mod->driver->lock(handle, fence_fd, map_flags, addr); + ret = mod->driver->lock(handle, fence_fd, true, &rect, map_flags, addr); *vaddr = addr[0]; return ret; } @@ -318,7 +368,7 @@ static int gralloc0_lock_async(struct gralloc_module_t const *module, buffer_han static int gralloc0_unlock_async(struct gralloc_module_t const *module, buffer_handle_t handle, int *fence_fd) { - auto mod = (struct gralloc0_module *)module; + auto mod = (struct gralloc0_module const *)module; return mod->driver->unlock(handle, fence_fd); } @@ -328,34 +378,54 @@ static int gralloc0_lock_async_ycbcr(struct gralloc_module_t const *module, buff { int32_t ret; uint32_t map_flags; + uint32_t strides[DRV_MAX_PLANES] = { 0, 0, 0, 0 }; + uint32_t offsets[DRV_MAX_PLANES] = { 0, 0, 0, 0 }; uint8_t *addr[DRV_MAX_PLANES] = { nullptr, nullptr, nullptr, nullptr }; - auto mod = (struct gralloc0_module *)module; + auto mod = (struct gralloc0_module const *)module; + struct rectangle rect = { .x = static_cast(l), + .y = static_cast(t), + .width = static_cast(w), + .height = static_cast(h) }; auto hnd = cros_gralloc_convert_handle(handle); if (!hnd) { - cros_gralloc_error("Invalid handle."); + drv_log("Invalid handle.\n"); return -EINVAL; } - if ((hnd->droid_format != HAL_PIXEL_FORMAT_YCbCr_420_888) && - (hnd->droid_format != HAL_PIXEL_FORMAT_YV12) && - (hnd->droid_format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED)) { - cros_gralloc_error("Non-YUV format not compatible."); + if (!gralloc0_droid_yuv_format(hnd->droid_format) && + hnd->droid_format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) { + drv_log("Non-YUV format not compatible.\n"); return -EINVAL; } + assert(l >= 0); + assert(t >= 0); + assert(w >= 0); + assert(h >= 0); + map_flags = gralloc0_convert_map_usage(usage); - ret = mod->driver->lock(handle, fence_fd, map_flags, addr); + ret = mod->driver->lock(handle, fence_fd, true, &rect, map_flags, addr); if (ret) return ret; + if (!map_flags) { + ret = mod->driver->resource_info(handle, strides, offsets); + if (ret) + return ret; + + for (uint32_t plane = 0; plane < DRV_MAX_PLANES; plane++) + addr[plane] = + reinterpret_cast(static_cast(offsets[plane])); + } + switch (hnd->format) { case DRM_FORMAT_NV12: ycbcr->y = addr[0]; ycbcr->cb = addr[1]; ycbcr->cr = addr[1] + 1; - ycbcr->ystride = hnd->strides[0]; - ycbcr->cstride = hnd->strides[1]; + ycbcr->ystride = (!map_flags) ? strides[0] : hnd->strides[0]; + ycbcr->cstride = (!map_flags) ? strides[1] : hnd->strides[1]; ycbcr->chroma_step = 2; break; case DRM_FORMAT_YVU420: @@ -363,8 +433,8 @@ static int gralloc0_lock_async_ycbcr(struct gralloc_module_t const *module, buff ycbcr->y = addr[0]; ycbcr->cb = addr[2]; ycbcr->cr = addr[1]; - ycbcr->ystride = hnd->strides[0]; - ycbcr->cstride = hnd->strides[1]; + ycbcr->ystride = (!map_flags) ? strides[0] : hnd->strides[0]; + ycbcr->cstride = (!map_flags) ? strides[1] : hnd->strides[1]; ycbcr->chroma_step = 1; break; default: @@ -402,6 +472,8 @@ struct gralloc0_module HAL_MODULE_INFO_SYM = { .lockAsync = gralloc0_lock_async, .unlockAsync = gralloc0_unlock_async, .lockAsync_ycbcr = gralloc0_lock_async_ycbcr, + .validateBufferSize = NULL, + .getTransportSize = NULL, }, .alloc = nullptr, diff --git a/cros_gralloc/gralloc0/tests/gralloctest.c b/cros_gralloc/gralloc0/tests/gralloctest.c index 9fc3b88..f663cd0 100644 --- a/cros_gralloc/gralloc0/tests/gralloctest.c +++ b/cros_gralloc/gralloc0/tests/gralloctest.c @@ -14,6 +14,8 @@ #define _GNU_SOURCE #include #include +#include +#include #include #include @@ -40,10 +42,11 @@ } while (0) /* Private API enumeration -- see */ -enum { GRALLOC_DRM_GET_STRIDE, - GRALLOC_DRM_GET_FORMAT, - GRALLOC_DRM_GET_DIMENSIONS, - GRALLOC_DRM_GET_BACKING_STORE, +enum { + GRALLOC_DRM_GET_STRIDE, + GRALLOC_DRM_GET_FORMAT, + GRALLOC_DRM_GET_DIMENSIONS, + GRALLOC_DRM_GET_BACKING_STORE, }; struct gralloctest_context { @@ -93,7 +96,7 @@ static struct combinations combos[] = { // clang-format on struct grallocinfo { - buffer_handle_t handle; /* handle to the buffer */ + buffer_handle_t handle; /* handle to the buffer */ int w; /* width of buffer */ int h; /* height of buffer */ int format; /* format of the buffer */ diff --git a/cros_gralloc/gralloc1/cros_gralloc1_module.cc b/cros_gralloc/gralloc1/cros_gralloc1_module.cc new file mode 100644 index 0000000..2365c70 --- /dev/null +++ b/cros_gralloc/gralloc1/cros_gralloc1_module.cc @@ -0,0 +1,812 @@ +/* + * Copyright 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#undef LOG_TAG +#define LOG_TAG "CrosGralloc1 " + +#include "cros_gralloc1_module.h" + +#include + +#include + +#include "i915_private_android.h" + +#include "i915_private_android_types.h" + +template static gralloc1_function_pointer_t asFP(T function) +{ + static_assert(std::is_same::value, "Incompatible function pointer"); + return reinterpret_cast(function); +} + +uint64_t cros_gralloc1_convert_usage(uint64_t producer_flags, uint64_t consumer_flags) +{ + uint64_t usage = BO_USE_NONE; + + if (consumer_flags & GRALLOC1_CONSUMER_USAGE_CURSOR) + usage |= BO_USE_CURSOR; + if (consumer_flags & GRALLOC1_CONSUMER_USAGE_CPU_READ) + usage |= BO_USE_SW_READ_RARELY; + if (consumer_flags & GRALLOC1_CONSUMER_USAGE_CPU_READ_OFTEN) + usage |= BO_USE_SW_READ_OFTEN; + if ((consumer_flags & GRALLOC1_CONSUMER_USAGE_HWCOMPOSER) || + (consumer_flags & GRALLOC1_CONSUMER_USAGE_CLIENT_TARGET)) { + /* HWC wants to use display hardware, but can defer to OpenGL. */ + usage |= BO_USE_SCANOUT | BO_USE_TEXTURE; + } else if (consumer_flags & GRALLOC1_CONSUMER_USAGE_GPU_TEXTURE) { + usage |= BO_USE_TEXTURE; + } + if (consumer_flags & GRALLOC1_CONSUMER_USAGE_VIDEO_ENCODER) + /*HACK: See b/30054495 */ + usage |= BO_USE_SW_READ_OFTEN; + if (consumer_flags & GRALLOC1_CONSUMER_USAGE_CAMERA) + usage |= BO_USE_CAMERA_READ; + if (consumer_flags & GRALLOC1_CONSUMER_USAGE_RENDERSCRIPT) + /* We use CPU for compute. */ + usage |= BO_USE_RENDERSCRIPT; + + if (producer_flags & GRALLOC1_PRODUCER_USAGE_CPU_READ) + usage |= BO_USE_SW_READ_RARELY; + if (producer_flags & GRALLOC1_PRODUCER_USAGE_CPU_READ_OFTEN) + usage |= BO_USE_SW_READ_OFTEN; + if (producer_flags & GRALLOC1_PRODUCER_USAGE_CPU_WRITE) + usage |= BO_USE_SW_WRITE_RARELY; + if (producer_flags & GRALLOC1_PRODUCER_USAGE_CPU_WRITE_OFTEN) + usage |= BO_USE_SW_WRITE_OFTEN; + if (producer_flags & GRALLOC1_PRODUCER_USAGE_GPU_RENDER_TARGET) + usage |= BO_USE_RENDERING; + if (producer_flags & GRALLOC1_PRODUCER_USAGE_VIDEO_DECODER) + /* Video wants to use display hardware, but can defer to OpenGL. */ + usage |= BO_USE_SCANOUT | BO_USE_RENDERING; + if (producer_flags & GRALLOC1_PRODUCER_USAGE_PROTECTED) + usage |= BO_USE_PROTECTED; + if (producer_flags & GRALLOC1_PRODUCER_USAGE_CAMERA) + usage |= BO_USE_CAMERA_WRITE; + + return usage; +} + +uint64_t cros_gralloc1_convert_map_usage(uint64_t producer_flags, uint64_t consumer_flags) +{ + uint64_t usage = BO_USE_NONE; + + if (consumer_flags & GRALLOC1_CONSUMER_USAGE_CPU_READ) + usage |= BO_MAP_READ; + if (consumer_flags & GRALLOC1_CONSUMER_USAGE_CPU_READ_OFTEN) + usage |= BO_MAP_READ; + if (consumer_flags & GRALLOC1_CONSUMER_USAGE_VIDEO_ENCODER) + /*HACK: See b/30054495 */ + usage |= BO_MAP_READ; + + if (producer_flags & GRALLOC1_PRODUCER_USAGE_CPU_READ) + usage |= BO_MAP_READ; + if (producer_flags & GRALLOC1_PRODUCER_USAGE_CPU_READ_OFTEN) + usage |= BO_MAP_READ; + if (producer_flags & GRALLOC1_PRODUCER_USAGE_CPU_WRITE) + usage |= BO_MAP_WRITE; + if (producer_flags & GRALLOC1_PRODUCER_USAGE_CPU_WRITE_OFTEN) + usage |= BO_MAP_WRITE; + + return usage; +} + +bool IsSupportedYUVFormat(uint32_t droid_format) +{ + switch (droid_format) { + case HAL_PIXEL_FORMAT_YCbCr_420_888: + case HAL_PIXEL_FORMAT_YV12: + case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED: + return true; + default: + return i915_private_supported_yuv_format(droid_format); + } + + return false; +} + +namespace android +{ + +/* CrosGralloc1 is a Singleton and pCrosGralloc1 holds pointer to its instance*/ +static CrosGralloc1 *pCrosGralloc1 = NULL; +static uint32_t ref_count = 0; +//static SpinLock global_lock_; + +CrosGralloc1::CrosGralloc1() +{ + getCapabilities = getCapabilitiesHook; + getFunction = getFunctionHook; + common.tag = HARDWARE_DEVICE_TAG; + common.version = HARDWARE_MODULE_API_VERSION(1, 0); + common.close = HookDevClose; +} + +CrosGralloc1::~CrosGralloc1() +{ +} + +bool CrosGralloc1::Init() +{ + if (driver) + return true; + + driver = std::make_unique(); + if (driver->init()) { + drv_log("Failed to initialize driver."); + return false; + } + + return true; +} + +void CrosGralloc1::doGetCapabilities(uint32_t *outCount, int32_t *outCapabilities) +{ + if (outCapabilities == nullptr) { + *outCount = 0; + } +} + +gralloc1_function_pointer_t CrosGralloc1::doGetFunction(int32_t intDescriptor) +{ + constexpr auto lastDescriptor = static_cast(GRALLOC1_LAST_FUNCTION); + if (intDescriptor < 0 || ((intDescriptor > lastDescriptor) && ((intDescriptor < 100) || (intDescriptor > GRALLOC1_LAST_CUSTOM)))) { + drv_log("Invalid function descriptor %d", intDescriptor); + return nullptr; + } + + auto descriptor = static_cast(intDescriptor); + switch (descriptor) { + case GRALLOC1_FUNCTION_DUMP: + return asFP(dumpHook); + case GRALLOC1_FUNCTION_CREATE_DESCRIPTOR: + return asFP(createDescriptorHook); + case GRALLOC1_FUNCTION_DESTROY_DESCRIPTOR: + return asFP(destroyDescriptorHook); + case GRALLOC1_FUNCTION_SET_CONSUMER_USAGE: + return asFP(setConsumerUsageHook); + case GRALLOC1_FUNCTION_SET_DIMENSIONS: + return asFP(setDimensionsHook); + case GRALLOC1_FUNCTION_SET_FORMAT: + return asFP(setFormatHook); + case GRALLOC1_FUNCTION_SET_PRODUCER_USAGE: + return asFP(setProducerUsageHook); + case GRALLOC1_FUNCTION_GET_BACKING_STORE: + return asFP(getBackingStoreHook); + case GRALLOC1_FUNCTION_GET_CONSUMER_USAGE: + return asFP(getConsumerUsageHook); + case GRALLOC1_FUNCTION_GET_DIMENSIONS: + return asFP(getDimensionsHook); + case GRALLOC1_FUNCTION_GET_FORMAT: + return asFP(getFormatHook); + case GRALLOC1_FUNCTION_GET_PRODUCER_USAGE: + return asFP(getProducerUsageHook); + case GRALLOC1_FUNCTION_GET_STRIDE: + return asFP(getStrideHook); + case GRALLOC1_FUNCTION_GET_BYTE_STRIDE: + return asFP(getByteStrideHook); + case GRALLOC1_FUNCTION_GET_PRIME: + return asFP(getPrimeHook); + case GRALLOC1_FUNCTION_ALLOCATE: + if (driver) { + return asFP(allocateBuffers); + } else { + return nullptr; + } + case GRALLOC1_FUNCTION_RETAIN: + return asFP(managementHook<&CrosGralloc1::retain>); + case GRALLOC1_FUNCTION_RELEASE: + return asFP(managementHook<&CrosGralloc1::release>); + case GRALLOC1_FUNCTION_GET_NUM_FLEX_PLANES: + return asFP(getNumFlexPlanesHook); + case GRALLOC1_FUNCTION_LOCK: + return asFP(lockHook); + case GRALLOC1_FUNCTION_LOCK_FLEX: + return asFP( + lockHook); + case GRALLOC1_FUNCTION_UNLOCK: + return asFP(unlockHook); + case GRALLOC1_FUNCTION_SET_MODIFIER: + return asFP(setModifierHook); + case GRALLOC1_FUNCTION_SET_INTERLACE: + return asFP(setInterlaceHook); + case GRALLOC1_FUNCTION_SET_PROTECTIONINFO: + return asFP(setProtectionInfoHook); + case GRALLOC1_FUNCTION_VALIDATE_BUFFER_SIZE: + return asFP(validateBufferSizeHook); + case GRALLOC1_FUNCTION_GET_TRANSPORT_SIZE: + return asFP(getTransportSizeHook); + case GRALLOC1_FUNCTION_IMPORT_BUFFER: + return asFP(importBufferHook); + case GRALLOC1_FUNCTION_INVALID: + drv_log("Invalid function descriptor"); + return nullptr; + } + + drv_log("Unknown function descriptor: %d", intDescriptor); + return nullptr; +} + +void CrosGralloc1::dump(uint32_t *outSize, char *outBuffer) +{ + drv_log("dump(%u (%p), %p", outSize ? *outSize : 0, outSize, outBuffer); +} + +int32_t CrosGralloc1::createDescriptor(gralloc1_buffer_descriptor_t *outDescriptor) +{ + if (!outDescriptor) + return CROS_GRALLOC_ERROR_BAD_DESCRIPTOR; + + struct cros_gralloc_buffer_descriptor *hnd = new cros_gralloc_buffer_descriptor(); + *outDescriptor = (gralloc1_buffer_descriptor_t)hnd; + return CROS_GRALLOC_ERROR_NONE; +} + +int32_t CrosGralloc1::destroyDescriptor(gralloc1_buffer_descriptor_t descriptor) +{ + auto hnd = (struct cros_gralloc_buffer_descriptor *)descriptor; + delete hnd; + return CROS_GRALLOC_ERROR_NONE; +} + +int32_t CrosGralloc1::setConsumerUsage(gralloc1_buffer_descriptor_t descriptorId, uint64_t intUsage) +{ + auto hnd = (struct cros_gralloc_buffer_descriptor *)descriptorId; + hnd->consumer_usage = intUsage; + return CROS_GRALLOC_ERROR_NONE; +} + +int32_t CrosGralloc1::setProducerUsage(gralloc1_buffer_descriptor_t descriptorId, uint64_t intUsage) +{ + auto hnd = (struct cros_gralloc_buffer_descriptor *)descriptorId; + hnd->producer_usage = intUsage; + return CROS_GRALLOC_ERROR_NONE; +} + +int32_t CrosGralloc1::setDimensions(gralloc1_buffer_descriptor_t descriptorId, uint32_t width, + uint32_t height) +{ + auto hnd = (struct cros_gralloc_buffer_descriptor *)descriptorId; + hnd->width = width; + hnd->height = height; + return CROS_GRALLOC_ERROR_NONE; +} + +int32_t CrosGralloc1::setFormat(gralloc1_buffer_descriptor_t descriptorId, int32_t format) +{ + auto hnd = (struct cros_gralloc_buffer_descriptor *)descriptorId; + hnd->droid_format = format; + hnd->drm_format = cros_gralloc_convert_format(format); + return CROS_GRALLOC_ERROR_NONE; +} + +int32_t CrosGralloc1::setModifier(gralloc1_buffer_descriptor_t descriptorId, uint64_t modifier) +{ + auto hnd = (struct cros_gralloc_buffer_descriptor *)descriptorId; + hnd->modifier = modifier; + return CROS_GRALLOC_ERROR_NONE; +} + +int32_t CrosGralloc1::setInterlace(buffer_handle_t buffer, uint32_t interlace) +{ + auto hnd = (cros_gralloc_handle*) cros_gralloc_convert_handle(buffer); + if (!hnd) { + return CROS_GRALLOC_ERROR_BAD_HANDLE; + } + hnd->is_interlaced = interlace; + return CROS_GRALLOC_ERROR_NONE; +} + +int32_t CrosGralloc1::setProtectionInfo(buffer_handle_t buffer, uint32_t protection_info) +{ + auto hnd = (cros_gralloc_handle *)cros_gralloc_convert_handle(buffer); + if (!hnd) { + return CROS_GRALLOC_ERROR_BAD_HANDLE; + } + hnd->is_encrypted = protection_info; + return CROS_GRALLOC_ERROR_NONE; +} + +int32_t CrosGralloc1::validateBufferSize(buffer_handle_t buffer, + const gralloc1_buffer_descriptor_info_t *descriptorInfo, + uint32_t stride) +{ + auto hnd = (cros_gralloc_handle *)cros_gralloc_convert_handle(buffer); + if (!hnd) { + return CROS_GRALLOC_ERROR_BAD_HANDLE; + } + + if (!is_flex_format(cros_gralloc_convert_format(descriptorInfo->format)) && + cros_gralloc_convert_format(descriptorInfo->format) != hnd->format) { + return CROS_GRALLOC_ERROR_BAD_VALUE; + } + + // Do not support GRALLOC1_CAPABILITY_LAYERED_BUFFERS, only allocate buffers with a + // single layer. + if (descriptorInfo->layerCount != 1) { + return CROS_GRALLOC_ERROR_BAD_VALUE; + } + if (stride > hnd->pixel_stride || descriptorInfo->width > hnd->width || + descriptorInfo->height > hnd->height) { + return CROS_GRALLOC_ERROR_BAD_VALUE; + } + return CROS_GRALLOC_ERROR_NONE; +} + +int32_t CrosGralloc1::getTransportSize(buffer_handle_t buffer, uint32_t *outNumFds, + uint32_t *outNumInts) +{ + auto hnd = (cros_gralloc_handle *)cros_gralloc_convert_handle(buffer); + if (!hnd) { + return CROS_GRALLOC_ERROR_BAD_HANDLE; + } + *outNumFds = hnd->base.numFds; + *outNumInts = hnd->base.numInts; + return CROS_GRALLOC_ERROR_NONE; +} + +int32_t CrosGralloc1::importBuffer(const buffer_handle_t rawHandle, buffer_handle_t *outBuffer) +{ + if (!rawHandle) { + *outBuffer = NULL; + return GRALLOC1_ERROR_BAD_HANDLE; + } + auto error = driver->retain(rawHandle); + if (error != GRALLOC1_ERROR_NONE) { + *outBuffer = NULL; + return error; + } + + *outBuffer = rawHandle; + return GRALLOC1_ERROR_NONE; +} + +int32_t CrosGralloc1::allocate(struct cros_gralloc_buffer_descriptor *descriptor, + buffer_handle_t *outBufferHandle) +{ + // If this function is being called, it's because we handed out its function + // pointer, which only occurs when mDevice has been loaded successfully and + // we are permitted to allocate + uint64_t usage = + cros_gralloc1_convert_usage(descriptor->producer_usage, descriptor->consumer_usage); + descriptor->use_flags = usage; + bool supported = driver->is_supported(descriptor); + if (!supported && (descriptor->consumer_usage & GRALLOC1_CONSUMER_USAGE_HWCOMPOSER)) { + descriptor->use_flags &= ~BO_USE_SCANOUT; + supported = driver->is_supported(descriptor); + } + + if (!supported) { + drv_log("Unsupported combination -- HAL format: %u, HAL flags: %u, " + "drv_format: %u, drv_flags: %llu", + descriptor->droid_format, usage, descriptor->drm_format, + static_cast(descriptor->use_flags)); + return CROS_GRALLOC_ERROR_UNSUPPORTED; + } + if (driver->allocate(descriptor, outBufferHandle)) + return CROS_GRALLOC_ERROR_NO_RESOURCES; + + return CROS_GRALLOC_ERROR_NONE; +} + +int32_t CrosGralloc1::allocateBuffers(gralloc1_device_t *device, uint32_t numDescriptors, + const gralloc1_buffer_descriptor_t *descriptors, + buffer_handle_t *outBuffers) +{ + auto adapter = getAdapter(device); + for (uint32_t i = 0; i < numDescriptors; i++) { + auto descriptor = (struct cros_gralloc_buffer_descriptor *)descriptors[i]; + if (!descriptor) { + return CROS_GRALLOC_ERROR_BAD_DESCRIPTOR; + } + + buffer_handle_t bufferHandle = nullptr; + int32_t error = adapter->allocate(descriptor, &bufferHandle); + if (error != CROS_GRALLOC_ERROR_NONE) { + return error; + } + + outBuffers[i] = bufferHandle; + } + + return CROS_GRALLOC_ERROR_NONE; +} + +int32_t CrosGralloc1::retain(buffer_handle_t bufferHandle) +{ + if (driver->retain(bufferHandle)) + return CROS_GRALLOC_ERROR_BAD_HANDLE; + + return CROS_GRALLOC_ERROR_NONE; +} + +int32_t CrosGralloc1::release(buffer_handle_t bufferHandle) +{ + if (!bufferHandle) { + drv_log("Failed to freeBuffer, empty handle.\n"); + return GRALLOC1_ERROR_BAD_HANDLE; + } + + int ret = driver->release(bufferHandle); + if (ret) { + drv_log("Failed to release handle, bad handle.\n"); + return GRALLOC1_ERROR_BAD_HANDLE; + } + + + return GRALLOC1_ERROR_NONE; +} + +int32_t CrosGralloc1::lock(buffer_handle_t bufferHandle, gralloc1_producer_usage_t producerUsage, + gralloc1_consumer_usage_t consumerUsage, + const gralloc1_rect_t &accessRegion, void **outData, + int32_t acquireFence) +{ + uint64_t map_flags; + uint8_t *addr[DRV_MAX_PLANES]; + + auto hnd = cros_gralloc_convert_handle(bufferHandle); + if (!hnd) { + drv_log("Invalid handle."); + return CROS_GRALLOC_ERROR_BAD_HANDLE; + } + + map_flags = cros_gralloc1_convert_map_usage(producerUsage, consumerUsage); + + if (driver->lock(bufferHandle, acquireFence, map_flags, addr)) { + drv_log("Plz switch to mapper 4.0 or call importBuffer & freeBuffer with mapper 2.0 before lock"); + buffer_handle_t buffer_handle = native_handle_clone(bufferHandle); + auto error = retain(buffer_handle); + if (error != GRALLOC1_ERROR_NONE) { + delete buffer_handle; + return error; + } + bufferHandle = buffer_handle; + if (driver->lock(bufferHandle, acquireFence, map_flags, addr)) + return CROS_GRALLOC_ERROR_BAD_HANDLE; + delete buffer_handle; + } + + *outData = addr[0]; + + return CROS_GRALLOC_ERROR_NONE; +} + +thread_local android_flex_plane_t ycbcrplanes[3]; + +int32_t update_flex_layout(struct android_ycbcr *ycbcr, struct android_flex_layout *outFlexLayout) +{ + outFlexLayout->format = FLEX_FORMAT_YCbCr; + outFlexLayout->num_planes = 3; + for (uint32_t i = 0; i < outFlexLayout->num_planes; i++) { + ycbcrplanes[i].bits_per_component = 8; + ycbcrplanes[i].bits_used = 8; + } + + ycbcrplanes[0].top_left = static_cast(ycbcr->y); + ycbcrplanes[0].component = FLEX_COMPONENT_Y; + ycbcrplanes[0].h_increment = 1; + ycbcrplanes[0].v_increment = static_cast(ycbcr->ystride); + + ycbcrplanes[1].top_left = static_cast(ycbcr->cb); + ycbcrplanes[1].component = FLEX_COMPONENT_Cb; + ycbcrplanes[1].h_increment = static_cast(ycbcr->chroma_step); + ycbcrplanes[1].v_increment = static_cast(ycbcr->cstride); + + ycbcrplanes[2].top_left = static_cast(ycbcr->cr); + ycbcrplanes[2].component = FLEX_COMPONENT_Cr; + ycbcrplanes[2].h_increment = static_cast(ycbcr->chroma_step); + ycbcrplanes[2].v_increment = static_cast(ycbcr->cstride); + + outFlexLayout->planes = ycbcrplanes; + return 0; +} + +int32_t CrosGralloc1::lockFlex(buffer_handle_t bufferHandle, + gralloc1_producer_usage_t producerUsage, + gralloc1_consumer_usage_t consumerUsage, + const gralloc1_rect_t &accessRegion, + struct android_flex_layout *outData, int32_t acquireFence) +{ + int32_t ret = -EINVAL; + struct android_ycbcr ycbcrData; + + /*Check the format and support only for YUV format */ + auto hnd = cros_gralloc_convert_handle(bufferHandle); + if (!hnd) { + drv_log("lockFlex: Invalid handle."); + return CROS_GRALLOC_ERROR_BAD_HANDLE; + } + + if (!IsSupportedYUVFormat(hnd->droid_format)) { + drv_log("lockFlex: Non-YUV format not compatible."); + return CROS_GRALLOC_ERROR_BAD_HANDLE; + } + + ret = lockYCbCr(bufferHandle, producerUsage, consumerUsage, accessRegion, &ycbcrData, + acquireFence); + + /* convert the data in flex format*/ + update_flex_layout(&ycbcrData, outData); + + return ret; +} + +int32_t CrosGralloc1::lockYCbCr(buffer_handle_t bufferHandle, + gralloc1_producer_usage_t producerUsage, + gralloc1_consumer_usage_t consumerUsage, + const gralloc1_rect_t &accessRegion, struct android_ycbcr *ycbcr, + int32_t acquireFence) +{ + uint64_t map_flags; + uint8_t *addr[DRV_MAX_PLANES] = { nullptr, nullptr, nullptr, nullptr }; + + auto hnd = cros_gralloc_convert_handle(bufferHandle); + if (!hnd) { + drv_log("Invalid handle."); + return CROS_GRALLOC_ERROR_BAD_HANDLE; + } + + if (!IsSupportedYUVFormat(hnd->droid_format)) { + drv_log("Non-YUV format not compatible."); + return CROS_GRALLOC_ERROR_BAD_HANDLE; + } + + map_flags = cros_gralloc1_convert_map_usage(producerUsage, consumerUsage); + + if (driver->lock(bufferHandle, acquireFence, map_flags, addr)) { + drv_log("Plz switch to mapper 4.0 or call importBuffer & freeBuffer with mapper 2.0 before lockFlex"); + buffer_handle_t buffer_handle = native_handle_clone(bufferHandle); + auto error = retain(buffer_handle); + if (error != GRALLOC1_ERROR_NONE) { + delete buffer_handle; + return error; + } + bufferHandle = buffer_handle; + if (driver->lock(bufferHandle, acquireFence, map_flags, addr)) + return CROS_GRALLOC_ERROR_BAD_HANDLE; + driver->release(buffer_handle); + delete buffer_handle; + } + + switch (hnd->format) { + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV12_Y_TILED_INTEL: + ycbcr->y = addr[0]; + ycbcr->cb = addr[1]; + ycbcr->cr = addr[1] + 1; + ycbcr->ystride = hnd->strides[0]; + ycbcr->cstride = hnd->strides[1]; + ycbcr->chroma_step = 2; + break; + case DRM_FORMAT_YVU420: + case DRM_FORMAT_YVU420_ANDROID: + ycbcr->y = addr[0]; + ycbcr->cb = addr[2]; + ycbcr->cr = addr[1]; + ycbcr->ystride = hnd->strides[0]; + ycbcr->cstride = hnd->strides[1]; + ycbcr->chroma_step = 1; + break; + case DRM_FORMAT_P010: + ycbcr->y = addr[0]; + ycbcr->cb = addr[1]; + ycbcr->cr = addr[1] + 2; + ycbcr->ystride = hnd->strides[0]; + ycbcr->cstride = hnd->strides[1]; + ycbcr->chroma_step = 4; + break; + default: + return CROS_GRALLOC_ERROR_UNSUPPORTED; + } + + return CROS_GRALLOC_ERROR_NONE; +} + +int32_t CrosGralloc1::unlock(buffer_handle_t bufferHandle, int32_t *outReleaseFence) +{ + if (driver->unlock(bufferHandle, outReleaseFence)) + return CROS_GRALLOC_ERROR_BAD_HANDLE; + + return CROS_GRALLOC_ERROR_NONE; +} + +int32_t CrosGralloc1::getNumFlexPlanes(buffer_handle_t buffer, uint32_t *outNumPlanes) +{ + auto hnd = cros_gralloc_convert_handle(buffer); + if (!hnd) { + drv_log("Invalid handle."); + return CROS_GRALLOC_ERROR_BAD_HANDLE; + } + + *outNumPlanes = drv_num_planes_from_format(hnd->format); + return CROS_GRALLOC_ERROR_NONE; +} + +int32_t CrosGralloc1::getBackingStore(buffer_handle_t buffer, gralloc1_backing_store_t *outStore) +{ + auto hnd = cros_gralloc_convert_handle(buffer); + if (!hnd) { + drv_log("Invalid handle."); + return CROS_GRALLOC_ERROR_BAD_HANDLE; + } + + if (driver->get_backing_store(buffer, outStore)) + return CROS_GRALLOC_ERROR_BAD_HANDLE; + + return CROS_GRALLOC_ERROR_NONE; +} + +int32_t CrosGralloc1::getConsumerUsage(buffer_handle_t buffer, + uint64_t * /*gralloc1_consumer_usage_t*/ outUsage) +{ + auto hnd = cros_gralloc_convert_handle(buffer); + if (!hnd) { + return CROS_GRALLOC_ERROR_BAD_HANDLE; + } + + *outUsage = hnd->consumer_usage; + return CROS_GRALLOC_ERROR_NONE; +} + +int32_t CrosGralloc1::getDimensions(buffer_handle_t buffer, uint32_t *outWidth, uint32_t *outHeight) +{ + auto hnd = cros_gralloc_convert_handle(buffer); + if (!hnd) { + return CROS_GRALLOC_ERROR_BAD_HANDLE; + } + + *outWidth = hnd->width; + *outHeight = hnd->height; + return CROS_GRALLOC_ERROR_NONE; +} + +int32_t CrosGralloc1::getFormat(buffer_handle_t buffer, int32_t *outFormat) +{ + auto hnd = cros_gralloc_convert_handle(buffer); + if (!hnd) { + return CROS_GRALLOC_ERROR_BAD_HANDLE; + } + + *outFormat = hnd->droid_format; + return CROS_GRALLOC_ERROR_NONE; +} + +int32_t CrosGralloc1::getProducerUsage(buffer_handle_t buffer, + uint64_t * /*gralloc1_producer_usage_t*/ outUsage) +{ + auto hnd = cros_gralloc_convert_handle(buffer); + if (!hnd) { + return CROS_GRALLOC_ERROR_BAD_HANDLE; + } + + *outUsage = hnd->producer_usage; + return CROS_GRALLOC_ERROR_NONE; +} + +int32_t CrosGralloc1::getStride(buffer_handle_t buffer, uint32_t *outStride) +{ + auto hnd = cros_gralloc_convert_handle(buffer); + if (!hnd) { + return CROS_GRALLOC_ERROR_BAD_HANDLE; + } + + *outStride = hnd->pixel_stride; + return CROS_GRALLOC_ERROR_NONE; +} + +int32_t CrosGralloc1::getByteStride(buffer_handle_t buffer, uint32_t *outStride, uint32_t size) +{ + auto hnd = cros_gralloc_convert_handle(buffer); + + if (!outStride) + return -EINVAL; + + if (!hnd) { + return CROS_GRALLOC_ERROR_BAD_HANDLE; + } + + if (size != drv_num_planes_from_format(hnd->format)) { + drv_log("Invalid array size- %d", size); + return -EINVAL; + } + + memcpy(outStride, hnd->strides, sizeof(*outStride) * size); + return CROS_GRALLOC_ERROR_NONE; +} + +int32_t CrosGralloc1::getPrime(buffer_handle_t buffer, uint32_t *prime) +{ + auto hnd = cros_gralloc_convert_handle(buffer); + if (!hnd) { + return CROS_GRALLOC_ERROR_BAD_HANDLE; + } + + *prime = hnd->fds[0]; + return CROS_GRALLOC_ERROR_NONE; +} + +// static +int CrosGralloc1::HookDevOpen(const struct hw_module_t *mod, const char *name, + struct hw_device_t **device) +{ + if (strcmp(name, GRALLOC_HARDWARE_MODULE_ID)) { + drv_log("Invalid module name- %s", name); + return -EINVAL; + } + + //ScopedSpinLock lock(global_lock_); + std::lock_guard lock(std::mutex); + ref_count++; + + if (pCrosGralloc1 != NULL) { + *device = &pCrosGralloc1->common; + return 0; + } else + pCrosGralloc1 = new CrosGralloc1(); + + std::unique_ptr ctx(pCrosGralloc1); + if (!ctx) { + drv_log("Failed to allocate CrosGralloc1"); + return -ENOMEM; + } + + if (!ctx->Init()) { + drv_log("Failed to initialize CrosGralloc1. \n"); + return -EINVAL; + } + + ctx->common.module = const_cast(mod); + *device = &ctx->common; + ctx.release(); + return 0; +} + +// static +int CrosGralloc1::HookDevClose(hw_device_t * /*dev*/) +{ + //ScopedSpinLock lock(global_lock_); + std::lock_guard lock(std::mutex); + if (ref_count > 0) { + ref_count--; + } + + if (ref_count > 0) { + return 0; + } + + if (pCrosGralloc1) { + delete pCrosGralloc1; + pCrosGralloc1 = NULL; + } + + return 0; +} + +} // namespace android + +static struct hw_module_methods_t cros_gralloc_module_methods = { + .open = android::CrosGralloc1::HookDevOpen, +}; + +hw_module_t HAL_MODULE_INFO_SYM = { + .tag = HARDWARE_MODULE_TAG, + .module_api_version = HARDWARE_MODULE_API_VERSION(1, 0), + .id = GRALLOC_HARDWARE_MODULE_ID, + .name = "Gralloc 1.0 module", + .author = "Intel Android", + .methods = &cros_gralloc_module_methods, +}; diff --git a/cros_gralloc/gralloc1/cros_gralloc1_module.h b/cros_gralloc/gralloc1/cros_gralloc1_module.h new file mode 100644 index 0000000..247b456 --- /dev/null +++ b/cros_gralloc/gralloc1/cros_gralloc1_module.h @@ -0,0 +1,355 @@ +/* + * Copyright 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef CROS_GRALLOC1_MODULE_H +#define CROS_GRALLOC1_MODULE_H + +#include + +#include "cros_gralloc_driver.h" + +#include + +#include "drv.h" + +struct cros_gralloc_module; + +namespace android +{ + +typedef enum { + CROS_GRALLOC_ERROR_NONE = 0, + CROS_GRALLOC_ERROR_BAD_DESCRIPTOR = 1, + CROS_GRALLOC_ERROR_BAD_HANDLE = 2, + CROS_GRALLOC_ERROR_BAD_VALUE = 3, + CROS_GRALLOC_ERROR_NOT_SHARED = 4, + CROS_GRALLOC_ERROR_NO_RESOURCES = 5, + CROS_GRALLOC_ERROR_UNDEFINED = 6, + CROS_GRALLOC_ERROR_UNSUPPORTED = 7, +} cros_gralloc_error_t; + +class CrosGralloc1 : public gralloc1_device_t +{ + public: + CrosGralloc1(); + ~CrosGralloc1(); + + bool Init(); + + static int HookDevOpen(const struct hw_module_t *mod, const char *name, + struct hw_device_t **device); + static int HookDevClose(hw_device_t *dev); + + private: + static inline CrosGralloc1 *getAdapter(gralloc1_device_t *device) + { + return static_cast(device); + } + + // getCapabilities + + void doGetCapabilities(uint32_t *outCount, + int32_t * /*gralloc1_capability_t*/ outCapabilities); + static void getCapabilitiesHook(gralloc1_device_t *device, uint32_t *outCount, + int32_t * /*gralloc1_capability_t*/ outCapabilities) + { + getAdapter(device)->doGetCapabilities(outCount, outCapabilities); + }; + + // getFunction + + gralloc1_function_pointer_t + doGetFunction(int32_t /*gralloc1_function_descriptor_t*/ descriptor); + static gralloc1_function_pointer_t + getFunctionHook(gralloc1_device_t *device, + int32_t /*gralloc1_function_descriptor_t*/ descriptor) + { + return getAdapter(device)->doGetFunction(descriptor); + } + + // dump + + void dump(uint32_t *outSize, char *outBuffer); + static void dumpHook(gralloc1_device_t *device, uint32_t *outSize, char *outBuffer) + { + return getAdapter(device)->dump(outSize, outBuffer); + } + + // Buffer descriptor functions + + int32_t setConsumerUsage(gralloc1_buffer_descriptor_t descriptorId, uint64_t intUsage); + + int32_t setProducerUsage(gralloc1_buffer_descriptor_t descriptorId, uint64_t intUsage); + + int32_t setDimensions(gralloc1_buffer_descriptor_t descriptorId, uint32_t width, + uint32_t height); + + int32_t setFormat(gralloc1_buffer_descriptor_t descriptorId, int32_t format); + + int32_t setInterlace(buffer_handle_t buffer, uint32_t interlace); + int32_t setProtectionInfo(buffer_handle_t buffer, uint32_t protection_info); + + int32_t createDescriptor(gralloc1_buffer_descriptor_t *outDescriptor); + static int32_t createDescriptorHook(gralloc1_device_t *device, + gralloc1_buffer_descriptor_t *outDescriptor) + { + return getAdapter(device)->createDescriptor(outDescriptor); + } + + int32_t destroyDescriptor(gralloc1_buffer_descriptor_t descriptor); + static int32_t destroyDescriptorHook(gralloc1_device_t *device, + gralloc1_buffer_descriptor_t descriptor) + { + return getAdapter(device)->destroyDescriptor(descriptor); + } + + static int32_t setConsumerUsageHook(gralloc1_device_t *device, + gralloc1_buffer_descriptor_t descriptorId, + uint64_t intUsage) + { + return getAdapter(device)->setConsumerUsage(descriptorId, intUsage); + } + + static int32_t setDimensionsHook(gralloc1_device_t *device, + gralloc1_buffer_descriptor_t descriptorId, uint32_t width, + uint32_t height) + { + return getAdapter(device)->setDimensions(descriptorId, width, height); + } + + static int32_t setFormatHook(gralloc1_device_t *device, + gralloc1_buffer_descriptor_t descriptorId, int32_t format) + { + return getAdapter(device)->setFormat(descriptorId, format); + } + + static int32_t setInterlaceHook(gralloc1_device_t *device, + buffer_handle_t buffer, uint32_t interlace) + { + return getAdapter(device)->setInterlace(buffer, interlace); + } + + static int32_t setProtectionInfoHook(gralloc1_device_t *device, + buffer_handle_t buffer, uint32_t protection_info) + { + return getAdapter(device)->setProtectionInfo(buffer, protection_info); + } + + static int32_t setProducerUsageHook(gralloc1_device_t *device, + gralloc1_buffer_descriptor_t descriptorId, + uint64_t intUsage) + { + return getAdapter(device)->setProducerUsage(descriptorId, intUsage); + } + + int32_t getNumFlexPlanes(buffer_handle_t buffer, uint32_t *outNumPlanes); + static int32_t getNumFlexPlanesHook(gralloc1_device_t *device, buffer_handle_t buffer, + uint32_t *outNumPlanes) + { + return getAdapter(device)->getNumFlexPlanes(buffer, outNumPlanes); + } + + int32_t getBackingStore(buffer_handle_t buffer, gralloc1_backing_store_t *outStore); + static int32_t getBackingStoreHook(gralloc1_device_t *device, buffer_handle_t buffer, + gralloc1_backing_store_t *outStore) + { + return getAdapter(device)->getBackingStore(buffer, outStore); + } + + int32_t getConsumerUsage(buffer_handle_t buffer, + uint64_t * /*gralloc1_consumer_usage_t*/ outUsage); + static int32_t getConsumerUsageHook(gralloc1_device_t *device, buffer_handle_t buffer, + uint64_t * /*gralloc1_consumer_usage_t*/ outUsage) + { + return getAdapter(device)->getConsumerUsage(buffer, outUsage); + } + + int32_t getDimensions(buffer_handle_t buffer, uint32_t *outWidth, uint32_t *outHeight); + static int32_t getDimensionsHook(gralloc1_device_t *device, buffer_handle_t buffer, + uint32_t *outWidth, uint32_t *outHeight) + { + return getAdapter(device)->getDimensions(buffer, outWidth, outHeight); + } + + int32_t getFormat(buffer_handle_t buffer, int32_t *outFormat); + static int32_t getFormatHook(gralloc1_device_t *device, buffer_handle_t buffer, + int32_t *outFormat) + { + return getAdapter(device)->getFormat(buffer, outFormat); + } + + int32_t validateBufferSize(buffer_handle_t buffer, + const gralloc1_buffer_descriptor_info_t *descriptorInfo, + uint32_t stride); + static int32_t + validateBufferSizeHook(gralloc1_device_t *device, buffer_handle_t buffer, + const gralloc1_buffer_descriptor_info_t *descriptorInfo, + uint32_t stride) + { + return getAdapter(device)->validateBufferSize(buffer, descriptorInfo, stride); + } + + int32_t getTransportSize(buffer_handle_t buffer, uint32_t *outNumFds, uint32_t *outNumInts); + static int32_t getTransportSizeHook(gralloc1_device_t *device, buffer_handle_t buffer, + uint32_t *outNumFds, uint32_t *outNumInts) + { + return getAdapter(device)->getTransportSize(buffer, outNumFds, outNumInts); + } + + int32_t importBuffer(const buffer_handle_t rawHandle, buffer_handle_t *outBuffer); + static int32_t importBufferHook(gralloc1_device_t *device, const buffer_handle_t rawHandle, + buffer_handle_t *outBuffer) + { + return getAdapter(device)->importBuffer(rawHandle, outBuffer); + } + + int32_t getProducerUsage(buffer_handle_t buffer, + uint64_t * /*gralloc1_producer_usage_t*/ outUsage); + static int32_t getProducerUsageHook(gralloc1_device_t *device, buffer_handle_t buffer, + uint64_t * /*gralloc1_producer_usage_t*/ outUsage) + { + return getAdapter(device)->getProducerUsage(buffer, outUsage); + } + + int32_t getStride(buffer_handle_t buffer, uint32_t *outStride); + static int32_t getStrideHook(gralloc1_device_t *device, buffer_handle_t buffer, + uint32_t *outStride) + { + return getAdapter(device)->getStride(buffer, outStride); + } + + int32_t getByteStride(buffer_handle_t buffer, uint32_t *outStride, uint32_t size); + static int32_t getByteStrideHook(gralloc1_device_t *device, buffer_handle_t buffer, + uint32_t *outStride, uint32_t size) + { + return getAdapter(device)->getByteStride(buffer, outStride, size); + } + + int32_t getPrime(buffer_handle_t buffer, uint32_t *prime); + static int32_t getPrimeHook(gralloc1_device_t *device, buffer_handle_t buffer, + uint32_t *prime) + { + return getAdapter(device)->getPrime(buffer, prime); + } + + // Buffer Management functions + int32_t allocate(struct cros_gralloc_buffer_descriptor *descriptor, + buffer_handle_t *outBufferHandle); + static int32_t allocateBuffers(gralloc1_device_t *device, uint32_t numDescriptors, + const gralloc1_buffer_descriptor_t *descriptors, + buffer_handle_t *outBuffers); + + int32_t release(buffer_handle_t bufferHandle); + int32_t retain(buffer_handle_t bufferHandle); + + // Member function pointer 'member' will either be retain or release + template + static int32_t managementHook(gralloc1_device_t *device, buffer_handle_t bufferHandle) + { + auto adapter = getAdapter(device); + return ((*adapter).*member)(bufferHandle); + } + + // Buffer access functions + int32_t lock(buffer_handle_t bufferHandle, gralloc1_producer_usage_t producerUsage, + gralloc1_consumer_usage_t consumerUsage, const gralloc1_rect_t &accessRegion, + void **outData, int32_t acquireFence); + int32_t lockFlex(buffer_handle_t bufferHandle, gralloc1_producer_usage_t producerUsage, + gralloc1_consumer_usage_t consumerUsage, + const gralloc1_rect_t &accessRegion, struct android_flex_layout *outFlex, + int32_t acquireFence); + int32_t lockYCbCr(buffer_handle_t bufferHandle, gralloc1_producer_usage_t producerUsage, + gralloc1_consumer_usage_t consumerUsage, + const gralloc1_rect_t &accessRegion, struct android_ycbcr *outFlex, + int32_t acquireFence); + + template + static int32_t lockHook(gralloc1_device_t *device, buffer_handle_t bufferHandle, + uint64_t /*gralloc1_producer_usage_t*/ uintProducerUsage, + uint64_t /*gralloc1_consumer_usage_t*/ uintConsumerUsage, + const gralloc1_rect_t *accessRegion, OUT *outData, + int32_t acquireFenceFd) + { + auto adapter = getAdapter(device); + + // Exactly one of producer and consumer usage must be *_USAGE_NONE, + // but we can't check this until the upper levels of the framework + // correctly distinguish between producer and consumer usage + /* + bool hasProducerUsage = + uintProducerUsage != GRALLOC1_PRODUCER_USAGE_NONE; + bool hasConsumerUsage = + uintConsumerUsage != GRALLOC1_CONSUMER_USAGE_NONE; + if (hasProducerUsage && hasConsumerUsage || + !hasProducerUsage && !hasConsumerUsage) { + return static_cast(GRALLOC1_ERROR_BAD_VALUE); + } + */ + + auto producerUsage = static_cast(uintProducerUsage); + auto consumerUsage = static_cast(uintConsumerUsage); + + if (!outData) { + const auto producerCpuUsage = + GRALLOC1_PRODUCER_USAGE_CPU_READ | GRALLOC1_PRODUCER_USAGE_CPU_WRITE; + if (producerUsage & (producerCpuUsage != 0)) { + return CROS_GRALLOC_ERROR_BAD_VALUE; + } + if (consumerUsage & (GRALLOC1_CONSUMER_USAGE_CPU_READ != 0)) { + return CROS_GRALLOC_ERROR_BAD_VALUE; + } + } + + if (!accessRegion) { + drv_log("accessRegion is null"); + return CROS_GRALLOC_ERROR_BAD_VALUE; + } + + return ((*adapter).*member)(bufferHandle, producerUsage, consumerUsage, + *accessRegion, outData, acquireFenceFd); + } + + int32_t unlock(buffer_handle_t bufferHandle, int32_t *outReleaseFence); + static int32_t unlockHook(gralloc1_device_t *device, buffer_handle_t bufferHandle, + int32_t *outReleaseFenceFd) + { + auto adapter = getAdapter(device); + *outReleaseFenceFd = -1; + int32_t releaseFence; + auto error = adapter->unlock(bufferHandle, &releaseFence); + if (error == CROS_GRALLOC_ERROR_NONE && releaseFence > 0) { + *outReleaseFenceFd = dup(releaseFence); + } + return error; + } + + int32_t setModifier(gralloc1_buffer_descriptor_t descriptor, uint64_t modifier); + static int32_t setModifierHook(gralloc1_device_t *device, + gralloc1_buffer_descriptor_t descriptor, uint64_t modifier) + { + return getAdapter(device)->setModifier(descriptor, modifier); + } + + // Adapter internals + std::unique_ptr driver; +}; + +} // namespace android + +#endif diff --git a/cros_gralloc/gralloc4/.clang-format b/cros_gralloc/gralloc4/.clang-format new file mode 100644 index 0000000..b310cc1 --- /dev/null +++ b/cros_gralloc/gralloc4/.clang-format @@ -0,0 +1,19 @@ +# Copyright 2020 The Chromium OS Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. +# +# This directory is formatted to match the format of the interfaces implemented. + +BasedOnStyle: Google +Standard: Cpp11 +AccessModifierOffset: -2 +AllowShortFunctionsOnASingleLine: Inline +ColumnLimit: 100 +CommentPragmas: NOLINT:.* +DerivePointerAlignment: false +IncludeBlocks: Preserve +IndentWidth: 4 +ContinuationIndentWidth: 8 +PointerAlignment: Left +TabWidth: 4 +UseTab: Never \ No newline at end of file diff --git a/cros_gralloc/gralloc4/Android.bp b/cros_gralloc/gralloc4/Android.bp new file mode 100644 index 0000000..223ef80 --- /dev/null +++ b/cros_gralloc/gralloc4/Android.bp @@ -0,0 +1,84 @@ +/* + * Copyright 2020 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +cc_binary { + name: "android.hardware.graphics.allocator@4.0-service.minigbm", + relative_install_path: "hw", + vendor: true, + init_rc: ["android.hardware.graphics.allocator@4.0-service.minigbm.rc"], + + cflags: [ + "-Wall", + "-Werror", + "-DUSE_GRALLOC1", + ], + + shared_libs: [ + "android.hardware.graphics.allocator@4.0", + "android.hardware.graphics.mapper@4.0", + "libbase", + "libcutils", + "libgralloctypes", + "libhidlbase", + "liblog", + "libsync", + "libutils", + ], + + static_libs: [ + "libdrm", + "libminigbm_cros_gralloc_celadon", + ], + + srcs: [ + "CrosGralloc4Allocator.cc", + "CrosGralloc4AllocatorService.cc", + "CrosGralloc4Utils.cc", + ], +} + +cc_library_shared { + name: "android.hardware.graphics.mapper@4.0-impl.minigbm", + relative_install_path: "hw", + vendor: true, + + cflags: [ + "-Wall", + "-Werror", + "-DUSE_GRALLOC1", + ], + + shared_libs: [ + "android.hardware.graphics.mapper@4.0", + "libbase", + "libcutils", + "libgralloctypes", + "libhidlbase", + "liblog", + "libsync", + "libutils", + ], + + static_libs: [ + "libdrm", + "libminigbm_cros_gralloc_celadon", + ], + + srcs: [ + "CrosGralloc4Mapper.cc", + "CrosGralloc4Utils.cc", + ], +} diff --git a/cros_gralloc/gralloc4/CrosGralloc4Allocator.cc b/cros_gralloc/gralloc4/CrosGralloc4Allocator.cc new file mode 100644 index 0000000..4fb7845 --- /dev/null +++ b/cros_gralloc/gralloc4/CrosGralloc4Allocator.cc @@ -0,0 +1,122 @@ +/* + * Copyright 2020 The Chromium OS Authors. All rights reserved. + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#include "cros_gralloc/gralloc4/CrosGralloc4Allocator.h" + +#include +#include + +#include "cros_gralloc/cros_gralloc_helpers.h" +#include "cros_gralloc/gralloc4/CrosGralloc4Utils.h" + +using android::hardware::hidl_handle; +using android::hardware::hidl_vec; +using android::hardware::Return; +using android::hardware::Void; +using android::hardware::graphics::common::V1_2::BufferUsage; +using android::hardware::graphics::common::V1_2::PixelFormat; +using android::hardware::graphics::mapper::V4_0::Error; + +using BufferDescriptorInfo = + android::hardware::graphics::mapper::V4_0::IMapper::BufferDescriptorInfo; + +CrosGralloc4Allocator::CrosGralloc4Allocator() : mDriver(std::make_unique()) { + if (mDriver->init()) { + drv_log("Failed to initialize driver.\n"); + mDriver = nullptr; + } +} + +Error CrosGralloc4Allocator::allocate(const BufferDescriptorInfo& descriptor, uint32_t* outStride, + hidl_handle* outHandle) { + if (!mDriver) { + drv_log("Failed to allocate. Driver is uninitialized.\n"); + return Error::NO_RESOURCES; + } + + if (!outStride || !outHandle) { + return Error::NO_RESOURCES; + } + + struct cros_gralloc_buffer_descriptor crosDescriptor; + if (convertToCrosDescriptor(descriptor, &crosDescriptor)) { + return Error::UNSUPPORTED; + } + + bool supported = mDriver->is_supported(&crosDescriptor); + if (!supported && (descriptor.usage & BufferUsage::COMPOSER_OVERLAY)) { + crosDescriptor.use_flags &= ~BO_USE_SCANOUT; + supported = mDriver->is_supported(&crosDescriptor); + } + + if (!supported) { + std::string drmFormatString = getDrmFormatString(crosDescriptor.drm_format); + std::string pixelFormatString = getPixelFormatString(descriptor.format); + std::string usageString = getUsageString(descriptor.usage); + drv_log("Unsupported combination -- pixel format: %s, drm format:%s, usage: %s\n", + pixelFormatString.c_str(), drmFormatString.c_str(), usageString.c_str()); + return Error::UNSUPPORTED; + } + + buffer_handle_t handle; + int ret = mDriver->allocate(&crosDescriptor, &handle); + if (ret) { + return Error::NO_RESOURCES; + } + + cros_gralloc_handle_t crosHandle = cros_gralloc_convert_handle(handle); + if (!crosHandle) { + return Error::NO_RESOURCES; + } + + *outHandle = handle; + *outStride = crosHandle->pixel_stride; + + return Error::NONE; +} + +Return CrosGralloc4Allocator::allocate(const hidl_vec& descriptor, uint32_t count, + allocate_cb hidlCb) { + hidl_vec handles; + + if (!mDriver) { + drv_log("Failed to allocate. Driver is uninitialized.\n"); + hidlCb(Error::NO_RESOURCES, 0, handles); + return Void(); + } + + BufferDescriptorInfo description; + + int ret = android::gralloc4::decodeBufferDescriptorInfo(descriptor, &description); + if (ret) { + drv_log("Failed to allocate. Failed to decode buffer descriptor: %d.\n", ret); + hidlCb(Error::BAD_DESCRIPTOR, 0, handles); + return Void(); + } + + handles.resize(count); + + uint32_t stride = 0; + for (int i = 0; i < handles.size(); i++) { + Error err = allocate(description, &stride, &(handles[i])); + if (err != Error::NONE) { + for (int j = 0; j < i; j++) { + mDriver->release(handles[j].getNativeHandle()); + } + handles.resize(0); + hidlCb(err, 0, handles); + return Void(); + } + } + + hidlCb(Error::NONE, stride, handles); + + for (const hidl_handle& handle : handles) { + mDriver->release(handle.getNativeHandle()); + } + + return Void(); +} diff --git a/cros_gralloc/gralloc4/CrosGralloc4Allocator.h b/cros_gralloc/gralloc4/CrosGralloc4Allocator.h new file mode 100644 index 0000000..21ad7ad --- /dev/null +++ b/cros_gralloc/gralloc4/CrosGralloc4Allocator.h @@ -0,0 +1,26 @@ +/* + * Copyright 2020 The Chromium OS Authors. All rights reserved. + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#include +#include + +#include "cros_gralloc/cros_gralloc_driver.h" + +class CrosGralloc4Allocator : public android::hardware::graphics::allocator::V4_0::IAllocator { + public: + CrosGralloc4Allocator(); + + android::hardware::Return allocate(const android::hardware::hidl_vec& descriptor, + uint32_t count, allocate_cb hidl_cb) override; + + private: + android::hardware::graphics::mapper::V4_0::Error allocate( + const android::hardware::graphics::mapper::V4_0::IMapper::BufferDescriptorInfo& + description, + uint32_t* outStride, android::hardware::hidl_handle* outHandle); + + std::unique_ptr mDriver; +}; diff --git a/cros_gralloc/gralloc4/CrosGralloc4AllocatorService.cc b/cros_gralloc/gralloc4/CrosGralloc4AllocatorService.cc new file mode 100644 index 0000000..5b79860 --- /dev/null +++ b/cros_gralloc/gralloc4/CrosGralloc4AllocatorService.cc @@ -0,0 +1,30 @@ +/* + * Copyright 2020 The Chromium OS Authors. All rights reserved. + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#define LOG_TAG "AllocatorService" + +#include + +#include "cros_gralloc/gralloc4/CrosGralloc4Allocator.h" + +using android::sp; +using android::hardware::configureRpcThreadpool; +using android::hardware::joinRpcThreadpool; +using android::hardware::graphics::allocator::V4_0::IAllocator; + +int main(int, char**) { + sp allocator = new CrosGralloc4Allocator(); + configureRpcThreadpool(4, true /* callerWillJoin */); + if (allocator->registerAsService() != android::NO_ERROR) { + ALOGE("failed to register graphics IAllocator 4.0 service"); + return -EINVAL; + } + + ALOGI("graphics IAllocator 4.0 service is initialized"); + android::hardware::joinRpcThreadpool(); + ALOGI("graphics IAllocator 4.0 service is terminating"); + return 0; +} diff --git a/cros_gralloc/gralloc4/CrosGralloc4Mapper.cc b/cros_gralloc/gralloc4/CrosGralloc4Mapper.cc new file mode 100644 index 0000000..1912a19 --- /dev/null +++ b/cros_gralloc/gralloc4/CrosGralloc4Mapper.cc @@ -0,0 +1,1016 @@ +/* + * Copyright 2020 The Chromium OS Authors. All rights reserved. + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#include "cros_gralloc/gralloc4/CrosGralloc4Mapper.h" + +#include +#include +#include +#include +#include +#include + +#include "cros_gralloc/gralloc4/CrosGralloc4Utils.h" +#include "helpers.h" + +#ifdef USE_GRALLOC1 +#include "cros_gralloc/i915_private_android_types.h" +#endif + +using aidl::android::hardware::graphics::common::BlendMode; +using aidl::android::hardware::graphics::common::Dataspace; +using aidl::android::hardware::graphics::common::PlaneLayout; +using aidl::android::hardware::graphics::common::Rect; +using android::hardware::hidl_handle; +using android::hardware::hidl_vec; +using android::hardware::Return; +using android::hardware::Void; +using android::hardware::graphics::common::V1_2::BufferUsage; +using android::hardware::graphics::common::V1_2::PixelFormat; +using android::hardware::graphics::mapper::V4_0::Error; +using android::hardware::graphics::mapper::V4_0::IMapper; + +CrosGralloc4Mapper::CrosGralloc4Mapper() : mDriver(std::make_unique()) { + if (mDriver->init()) { + drv_log("Failed to initialize driver.\n"); + mDriver = nullptr; + } +} + +Return CrosGralloc4Mapper::createDescriptor(const BufferDescriptorInfo& description, + createDescriptor_cb hidlCb) { + hidl_vec descriptor; + + if (description.width == 0) { + drv_log("Failed to createDescriptor. Bad width: %d.\n", description.width); + hidlCb(Error::BAD_VALUE, descriptor); + return Void(); + } + + if (description.height == 0) { + drv_log("Failed to createDescriptor. Bad height: %d.\n", description.height); + hidlCb(Error::BAD_VALUE, descriptor); + return Void(); + } + + if (description.layerCount == 0) { + drv_log("Failed to createDescriptor. Bad layer count: %d.\n", description.layerCount); + hidlCb(Error::BAD_VALUE, descriptor); + return Void(); + } + + int ret = android::gralloc4::encodeBufferDescriptorInfo(description, &descriptor); + if (ret) { + drv_log("Failed to createDescriptor. Failed to encode: %d.\n", ret); + hidlCb(Error::BAD_VALUE, descriptor); + return Void(); + } + + hidlCb(Error::NONE, descriptor); + return Void(); +} + +Return CrosGralloc4Mapper::importBuffer(const hidl_handle& handle, importBuffer_cb hidlCb) { + if (!mDriver) { + drv_log("Failed to import buffer. Driver is uninitialized.\n"); + hidlCb(Error::NO_RESOURCES, nullptr); + return Void(); + } + + const native_handle_t* bufferHandle = handle.getNativeHandle(); + if (!bufferHandle || bufferHandle->numFds == 0) { + drv_log("Failed to importBuffer. Bad handle.\n"); + hidlCb(Error::BAD_BUFFER, nullptr); + return Void(); + } + + native_handle_t* importedBufferHandle = native_handle_clone(bufferHandle); + if (!importedBufferHandle) { + drv_log("Failed to importBuffer. Handle clone failed.\n"); + hidlCb(Error::NO_RESOURCES, nullptr); + return Void(); + } + + int ret = mDriver->retain(importedBufferHandle); + if (ret) { + native_handle_close(importedBufferHandle); + native_handle_delete(importedBufferHandle); + hidlCb(Error::NO_RESOURCES, nullptr); + return Void(); + } + + hidlCb(Error::NONE, importedBufferHandle); + return Void(); +} + +Return CrosGralloc4Mapper::freeBuffer(void* rawHandle) { + if (!mDriver) { + drv_log("Failed to freeBuffer. Driver is uninitialized.\n"); + return Error::NO_RESOURCES; + } + + native_handle_t* bufferHandle = reinterpret_cast(rawHandle); + if (!bufferHandle) { + drv_log("Failed to freeBuffer. Empty handle.\n"); + return Error::BAD_BUFFER; + } + + int ret = mDriver->release(bufferHandle); + if (ret) { + return Error::BAD_BUFFER; + } + + native_handle_close(bufferHandle); + native_handle_delete(bufferHandle); + return Error::NONE; +} + +Return CrosGralloc4Mapper::validateBufferSize(void* rawHandle, + const BufferDescriptorInfo& descriptor, + uint32_t stride) { + if (!mDriver) { + drv_log("Failed to validateBufferSize. Driver is uninitialized.\n"); + return Error::NO_RESOURCES; + } + + native_handle_t* bufferHandle = reinterpret_cast(rawHandle); + if (!bufferHandle) { + drv_log("Failed to validateBufferSize. Empty handle.\n"); + return Error::BAD_BUFFER; + } + + cros_gralloc_handle_t crosHandle = cros_gralloc_convert_handle(bufferHandle); + if (!crosHandle) { + drv_log("Failed to validateBufferSize. Invalid handle.\n"); + return Error::BAD_BUFFER; + } + + PixelFormat crosHandleFormat = static_cast(crosHandle->droid_format); +#ifdef USE_GRALLOC1 + int32_t yuvFormat = static_cast(descriptor.format); + if (descriptor.format != crosHandleFormat && yuvFormat != crosHandle->droid_format && + !(descriptor.format == PixelFormat::YCBCR_420_888 && + crosHandle->droid_format == HAL_PIXEL_FORMAT_NV12)) { + drv_log("Failed to validateBufferSize. Format mismatch.\n"); +#else + if (descriptor.format != crosHandleFormat) { + drv_log("Failed to validateBufferSize. Format mismatch.\n"); +#endif + return Error::BAD_BUFFER; + } + + if (descriptor.width != crosHandle->width) { + drv_log("Failed to validateBufferSize. Width mismatch (%d vs %d).\n", descriptor.width, + crosHandle->width); + return Error::BAD_VALUE; + } + + if (descriptor.height != crosHandle->height) { + drv_log("Failed to validateBufferSize. Height mismatch (%d vs %d).\n", descriptor.height, + crosHandle->height); + return Error::BAD_VALUE; + } + + if (stride != crosHandle->pixel_stride) { + drv_log("Failed to validateBufferSize. Stride mismatch (%d vs %d).\n", stride, + crosHandle->pixel_stride); + return Error::BAD_VALUE; + } + + return Error::NONE; +} + +Return CrosGralloc4Mapper::getTransportSize(void* rawHandle, getTransportSize_cb hidlCb) { + if (!mDriver) { + drv_log("Failed to getTransportSize. Driver is uninitialized.\n"); + hidlCb(Error::BAD_BUFFER, 0, 0); + return Void(); + } + + native_handle_t* bufferHandle = reinterpret_cast(rawHandle); + if (!bufferHandle) { + drv_log("Failed to getTransportSize. Bad handle.\n"); + hidlCb(Error::BAD_BUFFER, 0, 0); + return Void(); + } + + // No local process data is currently stored on the native handle. + hidlCb(Error::NONE, bufferHandle->numFds, bufferHandle->numInts); + return Void(); +} + +Return CrosGralloc4Mapper::lock(void* rawBuffer, uint64_t cpuUsage, const Rect& region, + const hidl_handle& acquireFence, lock_cb hidlCb) { + if (!mDriver) { + drv_log("Failed to lock. Driver is uninitialized.\n"); + hidlCb(Error::NO_RESOURCES, nullptr); + return Void(); + } + + buffer_handle_t bufferHandle = reinterpret_cast(rawBuffer); + if (!bufferHandle) { + drv_log("Failed to lock. Empty handle.\n"); + hidlCb(Error::BAD_BUFFER, nullptr); + return Void(); + } + + if (cpuUsage == 0) { + drv_log("Failed to lock. Bad cpu usage: %" PRIu64 ".\n", cpuUsage); + hidlCb(Error::BAD_VALUE, nullptr); + return Void(); + } + + uint32_t mapUsage = 0; + int ret = convertToMapUsage(cpuUsage, &mapUsage); + if (ret) { + drv_log("Failed to lock. Convert usage failed.\n"); + hidlCb(Error::BAD_VALUE, nullptr); + return Void(); + } + + cros_gralloc_handle_t crosHandle = cros_gralloc_convert_handle(bufferHandle); + if (crosHandle == nullptr) { + drv_log("Failed to lock. Invalid handle.\n"); + hidlCb(Error::BAD_VALUE, nullptr); + return Void(); + } + + if (region.left < 0) { + drv_log("Failed to lock. Invalid region: negative left value %d.\n", region.left); + hidlCb(Error::BAD_VALUE, nullptr); + return Void(); + } + + if (region.top < 0) { + drv_log("Failed to lock. Invalid region: negative top value %d.\n", region.top); + hidlCb(Error::BAD_VALUE, nullptr); + return Void(); + } + + if (region.width < 0) { + drv_log("Failed to lock. Invalid region: negative width value %d.\n", region.width); + hidlCb(Error::BAD_VALUE, nullptr); + return Void(); + } + + if (region.height < 0) { + drv_log("Failed to lock. Invalid region: negative height value %d.\n", region.height); + hidlCb(Error::BAD_VALUE, nullptr); + return Void(); + } + + if (region.width > crosHandle->width) { + drv_log("Failed to lock. Invalid region: width greater than buffer width (%d vs %d).\n", + region.width, crosHandle->width); + hidlCb(Error::BAD_VALUE, nullptr); + return Void(); + } + + if (region.height > crosHandle->height) { + drv_log("Failed to lock. Invalid region: height greater than buffer height (%d vs %d).\n", + region.height, crosHandle->height); + hidlCb(Error::BAD_VALUE, nullptr); + return Void(); + } + + struct rectangle rect = {static_cast(region.left), static_cast(region.top), + static_cast(region.width), + static_cast(region.height)}; + + // An access region of all zeros means the entire buffer. + if (rect.x == 0 && rect.y == 0 && rect.width == 0 && rect.height == 0) { + rect.width = crosHandle->width; + rect.height = crosHandle->height; + } + + int acquireFenceFd = -1; + ret = convertToFenceFd(acquireFence, &acquireFenceFd); + if (ret) { + drv_log("Failed to lock. Bad acquire fence.\n"); + hidlCb(Error::BAD_VALUE, nullptr); + return Void(); + } + + uint8_t* addr[DRV_MAX_PLANES]; + ret = mDriver->lock(bufferHandle, acquireFenceFd, /*close_acquire_fence=*/false, &rect, + mapUsage, addr); + if (ret) { + hidlCb(Error::BAD_VALUE, nullptr); + return Void(); + } + + hidlCb(Error::NONE, addr[0]); + return Void(); +} + +Return CrosGralloc4Mapper::unlock(void* rawHandle, unlock_cb hidlCb) { + if (!mDriver) { + drv_log("Failed to unlock. Driver is uninitialized.\n"); + hidlCb(Error::BAD_BUFFER, nullptr); + return Void(); + } + + buffer_handle_t bufferHandle = reinterpret_cast(rawHandle); + if (!bufferHandle) { + drv_log("Failed to unlock. Empty handle.\n"); + hidlCb(Error::BAD_BUFFER, nullptr); + return Void(); + } + + int releaseFenceFd = -1; + int ret = mDriver->unlock(bufferHandle, &releaseFenceFd); + if (ret) { + drv_log("Failed to unlock.\n"); + hidlCb(Error::BAD_BUFFER, nullptr); + return Void(); + } + + hidl_handle releaseFenceHandle; + ret = convertToFenceHandle(releaseFenceFd, &releaseFenceHandle); + if (ret) { + drv_log("Failed to unlock. Failed to convert release fence to handle.\n"); + hidlCb(Error::BAD_BUFFER, nullptr); + return Void(); + } + + hidlCb(Error::NONE, releaseFenceHandle); + return Void(); +} + +Return CrosGralloc4Mapper::flushLockedBuffer(void* rawHandle, flushLockedBuffer_cb hidlCb) { + if (!mDriver) { + drv_log("Failed to flushLockedBuffer. Driver is uninitialized.\n"); + hidlCb(Error::NO_RESOURCES, nullptr); + return Void(); + } + + buffer_handle_t bufferHandle = reinterpret_cast(rawHandle); + if (!bufferHandle) { + drv_log("Failed to flushLockedBuffer. Empty handle.\n"); + hidlCb(Error::BAD_BUFFER, nullptr); + return Void(); + } + + int releaseFenceFd = -1; + int ret = mDriver->flush(bufferHandle, &releaseFenceFd); + if (ret) { + drv_log("Failed to flushLockedBuffer. Flush failed.\n"); + hidlCb(Error::BAD_BUFFER, nullptr); + return Void(); + } + + hidl_handle releaseFenceHandle; + ret = convertToFenceHandle(releaseFenceFd, &releaseFenceHandle); + if (ret) { + drv_log("Failed to flushLockedBuffer. Failed to convert release fence to handle.\n"); + hidlCb(Error::BAD_BUFFER, nullptr); + return Void(); + } + + hidlCb(Error::NONE, releaseFenceHandle); + return Void(); +} + +Return CrosGralloc4Mapper::rereadLockedBuffer(void* rawHandle) { + if (!mDriver) { + drv_log("Failed to rereadLockedBuffer. Driver is uninitialized.\n"); + return Error::NO_RESOURCES; + } + + buffer_handle_t bufferHandle = reinterpret_cast(rawHandle); + if (!bufferHandle) { + drv_log("Failed to rereadLockedBuffer. Empty handle.\n"); + return Error::BAD_BUFFER; + } + + int ret = mDriver->invalidate(bufferHandle); + if (ret) { + drv_log("Failed to rereadLockedBuffer. Failed to invalidate.\n"); + return Error::BAD_BUFFER; + } + + return Error::NONE; +} + +Return CrosGralloc4Mapper::isSupported(const BufferDescriptorInfo& descriptor, + isSupported_cb hidlCb) { + if (!mDriver) { + drv_log("Failed to isSupported. Driver is uninitialized.\n"); + hidlCb(Error::BAD_VALUE, false); + return Void(); + } + + struct cros_gralloc_buffer_descriptor crosDescriptor; + if (convertToCrosDescriptor(descriptor, &crosDescriptor)) { + hidlCb(Error::NONE, false); + return Void(); + } + + bool supported = mDriver->is_supported(&crosDescriptor); + if (!supported) { + crosDescriptor.use_flags &= ~BO_USE_SCANOUT; + supported = mDriver->is_supported(&crosDescriptor); + } + + hidlCb(Error::NONE, supported); + return Void(); +} + +Return CrosGralloc4Mapper::get(void* rawHandle, const MetadataType& metadataType, + get_cb hidlCb) { + hidl_vec encodedMetadata; + + if (!mDriver) { + drv_log("Failed to get. Driver is uninitialized.\n"); + hidlCb(Error::NO_RESOURCES, encodedMetadata); + return Void(); + } + + buffer_handle_t bufferHandle = reinterpret_cast(rawHandle); + if (!bufferHandle) { + drv_log("Failed to get. Empty handle.\n"); + hidlCb(Error::BAD_BUFFER, encodedMetadata); + return Void(); + } + + cros_gralloc_handle_t crosHandle = cros_gralloc_convert_handle(bufferHandle); + if (!crosHandle) { + drv_log("Failed to get. Invalid handle.\n"); + hidlCb(Error::BAD_BUFFER, encodedMetadata); + return Void(); + } + + get(crosHandle, metadataType, hidlCb); + return Void(); +} + +Return CrosGralloc4Mapper::get(cros_gralloc_handle_t crosHandle, + const MetadataType& metadataType, get_cb hidlCb) { + hidl_vec encodedMetadata; + + if (!mDriver) { + drv_log("Failed to get. Driver is uninitialized.\n"); + hidlCb(Error::NO_RESOURCES, encodedMetadata); + return Void(); + } + + if (!crosHandle) { + drv_log("Failed to get. Invalid handle.\n"); + hidlCb(Error::BAD_BUFFER, encodedMetadata); + return Void(); + } + + android::status_t status = android::NO_ERROR; + if (metadataType == android::gralloc4::MetadataType_BufferId) { + status = android::gralloc4::encodeBufferId(crosHandle->id, &encodedMetadata); + } else if (metadataType == android::gralloc4::MetadataType_Name) { + const char* name = (const char*)(&crosHandle->base.data[crosHandle->name_offset]); + status = android::gralloc4::encodeName(name, &encodedMetadata); + } else if (metadataType == android::gralloc4::MetadataType_Width) { + status = android::gralloc4::encodeWidth(crosHandle->width, &encodedMetadata); + } else if (metadataType == android::gralloc4::MetadataType_Height) { + status = android::gralloc4::encodeHeight(crosHandle->height, &encodedMetadata); + } else if (metadataType == android::gralloc4::MetadataType_LayerCount) { + status = android::gralloc4::encodeLayerCount(1, &encodedMetadata); + } else if (metadataType == android::gralloc4::MetadataType_PixelFormatRequested) { + PixelFormat pixelFormat = static_cast(crosHandle->droid_format); + status = android::gralloc4::encodePixelFormatRequested(pixelFormat, &encodedMetadata); + } else if (metadataType == android::gralloc4::MetadataType_PixelFormatFourCC) { + status = android::gralloc4::encodePixelFormatFourCC(crosHandle->format, &encodedMetadata); + } else if (metadataType == android::gralloc4::MetadataType_PixelFormatModifier) { + status = android::gralloc4::encodePixelFormatModifier(crosHandle->format_modifier, + &encodedMetadata); + } else if (metadataType == android::gralloc4::MetadataType_Usage) { + uint64_t usage = static_cast(crosHandle->usage); + status = android::gralloc4::encodeUsage(usage, &encodedMetadata); + } else if (metadataType == android::gralloc4::MetadataType_AllocationSize) { + status = android::gralloc4::encodeAllocationSize(crosHandle->total_size, &encodedMetadata); + } else if (metadataType == android::gralloc4::MetadataType_ProtectedContent) { + uint64_t hasProtectedContent = crosHandle->usage & BufferUsage::PROTECTED ? 1 : 0; + status = android::gralloc4::encodeProtectedContent(hasProtectedContent, &encodedMetadata); + } else if (metadataType == android::gralloc4::MetadataType_Compression) { + status = android::gralloc4::encodeCompression(android::gralloc4::Compression_None, + &encodedMetadata); + } else if (metadataType == android::gralloc4::MetadataType_Interlaced) { + status = android::gralloc4::encodeInterlaced(android::gralloc4::Interlaced_None, + &encodedMetadata); + } else if (metadataType == android::gralloc4::MetadataType_ChromaSiting) { + status = android::gralloc4::encodeChromaSiting(android::gralloc4::ChromaSiting_None, + &encodedMetadata); + } else if (metadataType == android::gralloc4::MetadataType_PlaneLayouts) { + std::vector planeLayouts; + getPlaneLayouts(crosHandle->format, &planeLayouts); + + for (size_t plane = 0; plane < planeLayouts.size(); plane++) { + PlaneLayout& planeLayout = planeLayouts[plane]; + planeLayout.offsetInBytes = crosHandle->offsets[plane]; + planeLayout.strideInBytes = crosHandle->strides[plane]; + planeLayout.totalSizeInBytes = crosHandle->sizes[plane]; + planeLayout.widthInSamples = crosHandle->width / planeLayout.horizontalSubsampling; + planeLayout.heightInSamples = crosHandle->height / planeLayout.verticalSubsampling; + } + + status = android::gralloc4::encodePlaneLayouts(planeLayouts, &encodedMetadata); + } else if (metadataType == android::gralloc4::MetadataType_Crop) { + std::vector crops; + for (size_t plane = 0; plane < crosHandle->num_planes; plane++) { + aidl::android::hardware::graphics::common::Rect crop; + crop.left = 0; + crop.top = 0; + crop.right = crosHandle->width; + crop.bottom = crosHandle->height; + crops.push_back(crop); + } + + status = android::gralloc4::encodeCrop(crops, &encodedMetadata); + } else if (metadataType == android::gralloc4::MetadataType_Dataspace) { + status = android::gralloc4::encodeDataspace(Dataspace::UNKNOWN, &encodedMetadata); + } else if (metadataType == android::gralloc4::MetadataType_BlendMode) { + status = android::gralloc4::encodeBlendMode(BlendMode::INVALID, &encodedMetadata); + } else if (metadataType == android::gralloc4::MetadataType_Smpte2086) { + status = android::gralloc4::encodeSmpte2086(std::nullopt, &encodedMetadata); + } else if (metadataType == android::gralloc4::MetadataType_Cta861_3) { + status = android::gralloc4::encodeCta861_3(std::nullopt, &encodedMetadata); + } else if (metadataType == android::gralloc4::MetadataType_Smpte2094_40) { + status = android::gralloc4::encodeSmpte2094_40(std::nullopt, &encodedMetadata); + } else { + hidlCb(Error::UNSUPPORTED, encodedMetadata); + return Void(); + } + + if (status != android::NO_ERROR) { + hidlCb(Error::NO_RESOURCES, encodedMetadata); + drv_log("Failed to get. Failed to encode metadata.\n"); + return Void(); + } + + hidlCb(Error::NONE, encodedMetadata); + return Void(); +} + +Return CrosGralloc4Mapper::set(void* rawHandle, const MetadataType& metadataType, + const hidl_vec& /*metadata*/) { + if (!mDriver) { + drv_log("Failed to set. Driver is uninitialized.\n"); + return Error::NO_RESOURCES; + } + + buffer_handle_t bufferHandle = reinterpret_cast(rawHandle); + if (!bufferHandle) { + drv_log("Failed to set. Empty handle.\n"); + return Error::BAD_BUFFER; + } + + cros_gralloc_handle_t crosHandle = cros_gralloc_convert_handle(bufferHandle); + if (!crosHandle) { + drv_log("Failed to set. Invalid handle.\n"); + return Error::BAD_BUFFER; + } + + if (metadataType == android::gralloc4::MetadataType_BufferId) { + return Error::BAD_VALUE; + } else if (metadataType == android::gralloc4::MetadataType_Name) { + return Error::BAD_VALUE; + } else if (metadataType == android::gralloc4::MetadataType_Width) { + return Error::BAD_VALUE; + } else if (metadataType == android::gralloc4::MetadataType_Height) { + return Error::BAD_VALUE; + } else if (metadataType == android::gralloc4::MetadataType_LayerCount) { + return Error::BAD_VALUE; + } else if (metadataType == android::gralloc4::MetadataType_PixelFormatRequested) { + return Error::BAD_VALUE; + } else if (metadataType == android::gralloc4::MetadataType_Usage) { + return Error::BAD_VALUE; + } + + return Error::UNSUPPORTED; +} + +int CrosGralloc4Mapper::getResolvedDrmFormat(PixelFormat pixelFormat, uint64_t bufferUsage, + uint32_t* outDrmFormat) { + uint32_t drmFormat; + if (convertToDrmFormat(pixelFormat, &drmFormat)) { + std::string pixelFormatString = getPixelFormatString(pixelFormat); + drv_log("Failed to getResolvedDrmFormat. Failed to convert format %s\n", + pixelFormatString.c_str()); + return -1; + } + + uint64_t usage; + if (convertToBufferUsage(bufferUsage, &usage)) { + std::string usageString = getUsageString(bufferUsage); + drv_log("Failed to getResolvedDrmFormat. Failed to convert usage %s\n", + usageString.c_str()); + return -1; + } + + uint32_t resolvedDrmFormat = mDriver->get_resolved_drm_format(drmFormat, usage); + if (resolvedDrmFormat == DRM_FORMAT_INVALID) { + std::string drmFormatString = getDrmFormatString(drmFormat); + drv_log("Failed to getResolvedDrmFormat. Failed to resolve drm format %s\n", + drmFormatString.c_str()); + return -1; + } + + *outDrmFormat = resolvedDrmFormat; + + return 0; +} + +Return CrosGralloc4Mapper::getFromBufferDescriptorInfo( + const BufferDescriptorInfo& descriptor, const MetadataType& metadataType, + getFromBufferDescriptorInfo_cb hidlCb) { + hidl_vec encodedMetadata; + + if (!mDriver) { + drv_log("Failed to getFromBufferDescriptorInfo. Driver is uninitialized.\n"); + hidlCb(Error::NO_RESOURCES, encodedMetadata); + return Void(); + } + + android::status_t status = android::NO_ERROR; + if (metadataType == android::gralloc4::MetadataType_Name) { + status = android::gralloc4::encodeName(descriptor.name, &encodedMetadata); + } else if (metadataType == android::gralloc4::MetadataType_Width) { + status = android::gralloc4::encodeWidth(descriptor.width, &encodedMetadata); + } else if (metadataType == android::gralloc4::MetadataType_Height) { + status = android::gralloc4::encodeHeight(descriptor.height, &encodedMetadata); + } else if (metadataType == android::gralloc4::MetadataType_LayerCount) { + status = android::gralloc4::encodeLayerCount(1, &encodedMetadata); + } else if (metadataType == android::gralloc4::MetadataType_PixelFormatRequested) { + status = android::gralloc4::encodePixelFormatRequested(descriptor.format, &encodedMetadata); + } else if (metadataType == android::gralloc4::MetadataType_PixelFormatFourCC) { + uint32_t drmFormat; + if (getResolvedDrmFormat(descriptor.format, descriptor.usage, &drmFormat)) { + hidlCb(Error::BAD_VALUE, encodedMetadata); + return Void(); + } + status = android::gralloc4::encodePixelFormatFourCC(drmFormat, &encodedMetadata); + } else if (metadataType == android::gralloc4::MetadataType_Usage) { + status = android::gralloc4::encodeUsage(descriptor.usage, &encodedMetadata); + } else if (metadataType == android::gralloc4::MetadataType_ProtectedContent) { + uint64_t hasProtectedContent = descriptor.usage & BufferUsage::PROTECTED ? 1 : 0; + status = android::gralloc4::encodeProtectedContent(hasProtectedContent, &encodedMetadata); + } else if (metadataType == android::gralloc4::MetadataType_Compression) { + status = android::gralloc4::encodeCompression(android::gralloc4::Compression_None, + &encodedMetadata); + } else if (metadataType == android::gralloc4::MetadataType_Interlaced) { + status = android::gralloc4::encodeInterlaced(android::gralloc4::Interlaced_None, + &encodedMetadata); + } else if (metadataType == android::gralloc4::MetadataType_ChromaSiting) { + status = android::gralloc4::encodeChromaSiting(android::gralloc4::ChromaSiting_None, + &encodedMetadata); + } else if (metadataType == android::gralloc4::MetadataType_Crop) { + uint32_t drmFormat; + if (getResolvedDrmFormat(descriptor.format, descriptor.usage, &drmFormat)) { + hidlCb(Error::BAD_VALUE, encodedMetadata); + return Void(); + } + + size_t numPlanes = drv_num_planes_from_format(drmFormat); + + std::vector crops; + for (size_t plane = 0; plane < numPlanes; plane++) { + aidl::android::hardware::graphics::common::Rect crop; + crop.left = 0; + crop.top = 0; + crop.right = descriptor.width; + crop.bottom = descriptor.height; + crops.push_back(crop); + } + status = android::gralloc4::encodeCrop(crops, &encodedMetadata); + } else if (metadataType == android::gralloc4::MetadataType_Dataspace) { + status = android::gralloc4::encodeDataspace(Dataspace::UNKNOWN, &encodedMetadata); + } else if (metadataType == android::gralloc4::MetadataType_BlendMode) { + status = android::gralloc4::encodeBlendMode(BlendMode::INVALID, &encodedMetadata); + } else if (metadataType == android::gralloc4::MetadataType_Smpte2086) { + status = android::gralloc4::encodeSmpte2086(std::nullopt, &encodedMetadata); + } else if (metadataType == android::gralloc4::MetadataType_Cta861_3) { + status = android::gralloc4::encodeCta861_3(std::nullopt, &encodedMetadata); + } else if (metadataType == android::gralloc4::MetadataType_Smpte2094_40) { + status = android::gralloc4::encodeSmpte2094_40(std::nullopt, &encodedMetadata); + } else { + hidlCb(Error::UNSUPPORTED, encodedMetadata); + return Void(); + } + + if (status != android::NO_ERROR) { + hidlCb(Error::NO_RESOURCES, encodedMetadata); + return Void(); + } + + hidlCb(Error::NONE, encodedMetadata); + return Void(); +} + +Return CrosGralloc4Mapper::listSupportedMetadataTypes(listSupportedMetadataTypes_cb hidlCb) { + hidl_vec supported; + + if (!mDriver) { + drv_log("Failed to listSupportedMetadataTypes. Driver is uninitialized.\n"); + hidlCb(Error::NO_RESOURCES, supported); + return Void(); + } + + supported = hidl_vec({ + { + android::gralloc4::MetadataType_BufferId, + "", + /*isGettable=*/true, + /*isSettable=*/false, + }, + { + android::gralloc4::MetadataType_Name, + "", + /*isGettable=*/true, + /*isSettable=*/false, + }, + { + android::gralloc4::MetadataType_Width, + "", + /*isGettable=*/true, + /*isSettable=*/false, + }, + { + android::gralloc4::MetadataType_Height, + "", + /*isGettable=*/true, + /*isSettable=*/false, + }, + { + android::gralloc4::MetadataType_LayerCount, + "", + /*isGettable=*/true, + /*isSettable=*/false, + }, + { + android::gralloc4::MetadataType_PixelFormatRequested, + "", + /*isGettable=*/true, + /*isSettable=*/false, + }, + { + android::gralloc4::MetadataType_PixelFormatFourCC, + "", + /*isGettable=*/true, + /*isSettable=*/false, + }, + { + android::gralloc4::MetadataType_PixelFormatModifier, + "", + /*isGettable=*/true, + /*isSettable=*/false, + }, + { + android::gralloc4::MetadataType_Usage, + "", + /*isGettable=*/true, + /*isSettable=*/false, + }, + { + android::gralloc4::MetadataType_AllocationSize, + "", + /*isGettable=*/true, + /*isSettable=*/false, + }, + { + android::gralloc4::MetadataType_ProtectedContent, + "", + /*isGettable=*/true, + /*isSettable=*/false, + }, + { + android::gralloc4::MetadataType_Compression, + "", + /*isGettable=*/true, + /*isSettable=*/false, + }, + { + android::gralloc4::MetadataType_Interlaced, + "", + /*isGettable=*/true, + /*isSettable=*/false, + }, + { + android::gralloc4::MetadataType_ChromaSiting, + "", + /*isGettable=*/true, + /*isSettable=*/false, + }, + { + android::gralloc4::MetadataType_PlaneLayouts, + "", + /*isGettable=*/true, + /*isSettable=*/false, + }, + { + android::gralloc4::MetadataType_Dataspace, + "", + /*isGettable=*/true, + /*isSettable=*/false, + }, + { + android::gralloc4::MetadataType_BlendMode, + "", + /*isGettable=*/true, + /*isSettable=*/false, + }, + { + android::gralloc4::MetadataType_Smpte2086, + "", + /*isGettable=*/true, + /*isSettable=*/false, + }, + { + android::gralloc4::MetadataType_Cta861_3, + "", + /*isGettable=*/true, + /*isSettable=*/false, + }, + { + android::gralloc4::MetadataType_Smpte2094_40, + "", + /*isGettable=*/true, + /*isSettable=*/false, + }, + }); + + hidlCb(Error::NONE, supported); + return Void(); +} + +Return CrosGralloc4Mapper::dumpBuffer(void* rawHandle, dumpBuffer_cb hidlCb) { + BufferDump bufferDump; + + if (!mDriver) { + drv_log("Failed to dumpBuffer. Driver is uninitialized.\n"); + hidlCb(Error::NO_RESOURCES, bufferDump); + return Void(); + } + + buffer_handle_t bufferHandle = reinterpret_cast(rawHandle); + if (!bufferHandle) { + drv_log("Failed to dumpBuffer. Empty handle.\n"); + hidlCb(Error::BAD_BUFFER, bufferDump); + return Void(); + } + + cros_gralloc_handle_t crosHandle = cros_gralloc_convert_handle(bufferHandle); + if (!crosHandle) { + drv_log("Failed to dumpBuffer. Invalid handle.\n"); + hidlCb(Error::BAD_BUFFER, bufferDump); + return Void(); + } + + return dumpBuffer(crosHandle, hidlCb); +} + +Return CrosGralloc4Mapper::dumpBuffer(cros_gralloc_handle_t crosHandle, + dumpBuffer_cb hidlCb) { + BufferDump bufferDump; + + if (!mDriver) { + drv_log("Failed to dumpBuffer. Driver is uninitialized.\n"); + hidlCb(Error::NO_RESOURCES, bufferDump); + return Void(); + } + + if (!crosHandle) { + drv_log("Failed to dumpBuffer. Invalid handle.\n"); + hidlCb(Error::BAD_BUFFER, bufferDump); + return Void(); + } + + std::vector metadataDumps; + + MetadataType metadataType = android::gralloc4::MetadataType_BufferId; + auto metadata_get_callback = [&](Error, hidl_vec metadata) { + MetadataDump metadataDump; + metadataDump.metadataType = metadataType; + metadataDump.metadata = metadata; + metadataDumps.push_back(metadataDump); + }; + + metadataType = android::gralloc4::MetadataType_BufferId; + get(crosHandle, metadataType, metadata_get_callback); + + metadataType = android::gralloc4::MetadataType_Name; + get(crosHandle, metadataType, metadata_get_callback); + + metadataType = android::gralloc4::MetadataType_Width; + get(crosHandle, metadataType, metadata_get_callback); + + metadataType = android::gralloc4::MetadataType_Height; + get(crosHandle, metadataType, metadata_get_callback); + + metadataType = android::gralloc4::MetadataType_LayerCount; + get(crosHandle, metadataType, metadata_get_callback); + + metadataType = android::gralloc4::MetadataType_PixelFormatRequested; + get(crosHandle, metadataType, metadata_get_callback); + + metadataType = android::gralloc4::MetadataType_PixelFormatFourCC; + get(crosHandle, metadataType, metadata_get_callback); + + metadataType = android::gralloc4::MetadataType_PixelFormatModifier; + get(crosHandle, metadataType, metadata_get_callback); + + metadataType = android::gralloc4::MetadataType_Usage; + get(crosHandle, metadataType, metadata_get_callback); + + metadataType = android::gralloc4::MetadataType_AllocationSize; + get(crosHandle, metadataType, metadata_get_callback); + + metadataType = android::gralloc4::MetadataType_ProtectedContent; + get(crosHandle, metadataType, metadata_get_callback); + + metadataType = android::gralloc4::MetadataType_Compression; + get(crosHandle, metadataType, metadata_get_callback); + + metadataType = android::gralloc4::MetadataType_Interlaced; + get(crosHandle, metadataType, metadata_get_callback); + + metadataType = android::gralloc4::MetadataType_ChromaSiting; + get(crosHandle, metadataType, metadata_get_callback); + + metadataType = android::gralloc4::MetadataType_PlaneLayouts; + get(crosHandle, metadataType, metadata_get_callback); + + metadataType = android::gralloc4::MetadataType_Dataspace; + get(crosHandle, metadataType, metadata_get_callback); + + metadataType = android::gralloc4::MetadataType_BlendMode; + get(crosHandle, metadataType, metadata_get_callback); + + bufferDump.metadataDump = metadataDumps; + hidlCb(Error::NONE, bufferDump); + return Void(); +} + +Return CrosGralloc4Mapper::dumpBuffers(dumpBuffers_cb hidlCb) { + std::vector bufferDumps; + + if (!mDriver) { + drv_log("Failed to dumpBuffers. Driver is uninitialized.\n"); + hidlCb(Error::NO_RESOURCES, bufferDumps); + return Void(); + } + + Error error = Error::NONE; + + auto handleCallback = [&](cros_gralloc_handle_t crosHandle) { + auto dumpBufferCallback = [&](Error err, BufferDump bufferDump) { + error = err; + if (error == Error::NONE) { + bufferDumps.push_back(bufferDump); + } + }; + + dumpBuffer(crosHandle, dumpBufferCallback); + }; + mDriver->for_each_handle(handleCallback); + + hidlCb(error, bufferDumps); + return Void(); +} + +Return CrosGralloc4Mapper::getReservedRegion(void* rawHandle, getReservedRegion_cb hidlCb) { + if (!mDriver) { + drv_log("Failed to getReservedRegion. Driver is uninitialized.\n"); + hidlCb(Error::NO_RESOURCES, nullptr, 0); + return Void(); + } + + buffer_handle_t bufferHandle = reinterpret_cast(rawHandle); + if (!bufferHandle) { + drv_log("Failed to getReservedRegion. Empty handle.\n"); + hidlCb(Error::BAD_BUFFER, nullptr, 0); + return Void(); + } + + cros_gralloc_handle_t crosHandle = cros_gralloc_convert_handle(bufferHandle); + if (!crosHandle) { + drv_log("Failed to getReservedRegion. Invalid handle.\n"); + hidlCb(Error::BAD_BUFFER, nullptr, 0); + return Void(); + } + + void* reservedRegionAddr = nullptr; + uint64_t reservedRegionSize = 0; + int ret = mDriver->get_reserved_region(bufferHandle, &reservedRegionAddr, &reservedRegionSize); + if (ret) { + drv_log("Failed to getReservedRegion.\n"); + hidlCb(Error::BAD_BUFFER, nullptr, 0); + return Void(); + } + + hidlCb(Error::NONE, reservedRegionAddr, reservedRegionSize); + return Void(); +} + +android::hardware::graphics::mapper::V4_0::IMapper* HIDL_FETCH_IMapper(const char* /*name*/) { + return static_cast(new CrosGralloc4Mapper); +} diff --git a/cros_gralloc/gralloc4/CrosGralloc4Mapper.h b/cros_gralloc/gralloc4/CrosGralloc4Mapper.h new file mode 100644 index 0000000..b318930 --- /dev/null +++ b/cros_gralloc/gralloc4/CrosGralloc4Mapper.h @@ -0,0 +1,80 @@ +/* + * Copyright 2020 The Chromium OS Authors. All rights reserved. + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#include + +#include "cros_gralloc/cros_gralloc_driver.h" +#include "cros_gralloc/cros_gralloc_handle.h" + +class CrosGralloc4Mapper : public android::hardware::graphics::mapper::V4_0::IMapper { + public: + CrosGralloc4Mapper(); + + android::hardware::Return createDescriptor(const BufferDescriptorInfo& description, + createDescriptor_cb hidlCb) override; + + android::hardware::Return importBuffer(const android::hardware::hidl_handle& rawHandle, + importBuffer_cb hidlCb) override; + + android::hardware::Return freeBuffer( + void* rawHandle) override; + + android::hardware::Return validateBufferSize( + void* rawHandle, const BufferDescriptorInfo& descriptor, uint32_t stride) override; + + android::hardware::Return getTransportSize(void* rawHandle, + getTransportSize_cb hidlCb) override; + + android::hardware::Return lock(void* rawHandle, uint64_t cpuUsage, + const Rect& accessRegion, + const android::hardware::hidl_handle& acquireFence, + lock_cb hidlCb) override; + + android::hardware::Return unlock(void* rawHandle, unlock_cb hidlCb) override; + + android::hardware::Return flushLockedBuffer(void* rawHandle, + flushLockedBuffer_cb hidlCb) override; + + android::hardware::Return rereadLockedBuffer( + void* rawHandle) override; + + android::hardware::Return isSupported(const BufferDescriptorInfo& descriptor, + isSupported_cb hidlCb) override; + + android::hardware::Return get(void* rawHandle, const MetadataType& metadataType, + get_cb hidlCb) override; + + android::hardware::Return set( + void* rawHandle, const MetadataType& metadataType, + const android::hardware::hidl_vec& metadata) override; + + android::hardware::Return getFromBufferDescriptorInfo( + const BufferDescriptorInfo& descriptor, const MetadataType& metadataType, + getFromBufferDescriptorInfo_cb hidlCb) override; + + android::hardware::Return listSupportedMetadataTypes( + listSupportedMetadataTypes_cb hidlCb) override; + + android::hardware::Return dumpBuffer(void* rawHandle, dumpBuffer_cb hidlCb) override; + android::hardware::Return dumpBuffers(dumpBuffers_cb hidlCb) override; + + android::hardware::Return getReservedRegion(void* rawHandle, + getReservedRegion_cb hidlCb) override; + + private: + android::hardware::Return get(cros_gralloc_handle_t crosHandle, + const MetadataType& metadataType, get_cb hidlCb); + + android::hardware::Return dumpBuffer(cros_gralloc_handle_t crosHandle, + dumpBuffer_cb hidlCb); + + int getResolvedDrmFormat(android::hardware::graphics::common::V1_2::PixelFormat pixelFormat, + uint64_t bufferUsage, uint32_t* outDrmFormat); + + std::unique_ptr mDriver; +}; + +extern "C" android::hardware::graphics::mapper::V4_0::IMapper* HIDL_FETCH_IMapper(const char* name); diff --git a/cros_gralloc/gralloc4/CrosGralloc4Utils.cc b/cros_gralloc/gralloc4/CrosGralloc4Utils.cc new file mode 100644 index 0000000..84cda7a --- /dev/null +++ b/cros_gralloc/gralloc4/CrosGralloc4Utils.cc @@ -0,0 +1,846 @@ +/* + * Copyright 2020 The Chromium OS Authors. All rights reserved. + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#include "cros_gralloc/gralloc4/CrosGralloc4Utils.h" + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "cros_gralloc/cros_gralloc_helpers.h" + +#ifdef USE_GRALLOC1 +#include "cros_gralloc/i915_private_android.h" +#endif + +using aidl::android::hardware::graphics::common::PlaneLayout; +using aidl::android::hardware::graphics::common::PlaneLayoutComponent; +using aidl::android::hardware::graphics::common::PlaneLayoutComponentType; +using android::hardware::hidl_bitfield; +using android::hardware::hidl_handle; +using android::hardware::graphics::common::V1_2::BufferUsage; +using android::hardware::graphics::common::V1_2::PixelFormat; + +using BufferDescriptorInfo = + android::hardware::graphics::mapper::V4_0::IMapper::BufferDescriptorInfo; + +std::string getDrmFormatString(uint32_t drmFormat) { + switch (drmFormat) { + case DRM_FORMAT_ABGR1555: + return "DRM_FORMAT_ABGR1555"; + case DRM_FORMAT_ABGR2101010: + return "DRM_FORMAT_ABGR2101010"; + case DRM_FORMAT_ABGR4444: + return "DRM_FORMAT_ABGR4444"; + case DRM_FORMAT_ABGR8888: + return "DRM_FORMAT_ABGR8888"; + case DRM_FORMAT_ARGB1555: + return "DRM_FORMAT_ARGB1555"; + case DRM_FORMAT_ARGB2101010: + return "DRM_FORMAT_ARGB2101010"; + case DRM_FORMAT_ARGB4444: + return "DRM_FORMAT_ARGB4444"; + case DRM_FORMAT_ARGB8888: + return "DRM_FORMAT_ARGB8888"; + case DRM_FORMAT_AYUV: + return "DRM_FORMAT_AYUV"; + case DRM_FORMAT_BGR233: + return "DRM_FORMAT_BGR233"; + case DRM_FORMAT_BGR565: + return "DRM_FORMAT_BGR565"; + case DRM_FORMAT_BGR888: + return "DRM_FORMAT_BGR888"; + case DRM_FORMAT_BGRA1010102: + return "DRM_FORMAT_BGRA1010102"; + case DRM_FORMAT_BGRA4444: + return "DRM_FORMAT_BGRA4444"; + case DRM_FORMAT_BGRA5551: + return "DRM_FORMAT_BGRA5551"; + case DRM_FORMAT_BGRA8888: + return "DRM_FORMAT_BGRA8888"; + case DRM_FORMAT_BGRX1010102: + return "DRM_FORMAT_BGRX1010102"; + case DRM_FORMAT_BGRX4444: + return "DRM_FORMAT_BGRX4444"; + case DRM_FORMAT_BGRX5551: + return "DRM_FORMAT_BGRX5551"; + case DRM_FORMAT_BGRX8888: + return "DRM_FORMAT_BGRX8888"; + case DRM_FORMAT_C8: + return "DRM_FORMAT_C8"; + case DRM_FORMAT_GR88: + return "DRM_FORMAT_GR88"; + case DRM_FORMAT_NV12: + return "DRM_FORMAT_NV12"; + case DRM_FORMAT_NV21: + return "DRM_FORMAT_NV21"; + case DRM_FORMAT_R8: + return "DRM_FORMAT_R8"; + case DRM_FORMAT_RG88: + return "DRM_FORMAT_RG88"; + case DRM_FORMAT_RGB332: + return "DRM_FORMAT_RGB332"; + case DRM_FORMAT_RGB565: + return "DRM_FORMAT_RGB565"; + case DRM_FORMAT_RGB888: + return "DRM_FORMAT_RGB888"; + case DRM_FORMAT_RGBA1010102: + return "DRM_FORMAT_RGBA1010102"; + case DRM_FORMAT_RGBA4444: + return "DRM_FORMAT_RGBA4444"; + case DRM_FORMAT_RGBA5551: + return "DRM_FORMAT_RGBA5551"; + case DRM_FORMAT_RGBA8888: + return "DRM_FORMAT_RGBA8888"; + case DRM_FORMAT_RGBX1010102: + return "DRM_FORMAT_RGBX1010102"; + case DRM_FORMAT_RGBX4444: + return "DRM_FORMAT_RGBX4444"; + case DRM_FORMAT_RGBX5551: + return "DRM_FORMAT_RGBX5551"; + case DRM_FORMAT_RGBX8888: + return "DRM_FORMAT_RGBX8888"; + case DRM_FORMAT_UYVY: + return "DRM_FORMAT_UYVY"; + case DRM_FORMAT_VYUY: + return "DRM_FORMAT_VYUY"; + case DRM_FORMAT_XBGR1555: + return "DRM_FORMAT_XBGR1555"; + case DRM_FORMAT_XBGR2101010: + return "DRM_FORMAT_XBGR2101010"; + case DRM_FORMAT_XBGR4444: + return "DRM_FORMAT_XBGR4444"; + case DRM_FORMAT_XBGR8888: + return "DRM_FORMAT_XBGR8888"; + case DRM_FORMAT_XRGB1555: + return "DRM_FORMAT_XRGB1555"; + case DRM_FORMAT_XRGB2101010: + return "DRM_FORMAT_XRGB2101010"; + case DRM_FORMAT_XRGB4444: + return "DRM_FORMAT_XRGB4444"; + case DRM_FORMAT_XRGB8888: + return "DRM_FORMAT_XRGB8888"; + case DRM_FORMAT_YUYV: + return "DRM_FORMAT_YUYV"; + case DRM_FORMAT_YVU420: + return "DRM_FORMAT_YVU420"; + case DRM_FORMAT_YVYU: + return "DRM_FORMAT_YVYU"; + } + return android::base::StringPrintf("Unknown(%d)", drmFormat); +} + +std::string getPixelFormatString(PixelFormat format) { + switch (format) { + case PixelFormat::BGRA_8888: + return "PixelFormat::BGRA_8888"; + case PixelFormat::BLOB: + return "PixelFormat::BLOB"; + case PixelFormat::DEPTH_16: + return "PixelFormat::DEPTH_16"; + case PixelFormat::DEPTH_24: + return "PixelFormat::DEPTH_24"; + case PixelFormat::DEPTH_24_STENCIL_8: + return "PixelFormat::DEPTH_24_STENCIL_8"; + case PixelFormat::DEPTH_32F: + return "PixelFormat::DEPTH_24"; + case PixelFormat::DEPTH_32F_STENCIL_8: + return "PixelFormat::DEPTH_24_STENCIL_8"; + case PixelFormat::HSV_888: + return "PixelFormat::HSV_888"; + case PixelFormat::IMPLEMENTATION_DEFINED: + return "PixelFormat::IMPLEMENTATION_DEFINED"; + case PixelFormat::RAW10: + return "PixelFormat::RAW10"; + case PixelFormat::RAW12: + return "PixelFormat::RAW12"; + case PixelFormat::RAW16: + return "PixelFormat::RAW16"; + case PixelFormat::RAW_OPAQUE: + return "PixelFormat::RAW_OPAQUE"; + case PixelFormat::RGBA_1010102: + return "PixelFormat::RGBA_1010102"; + case PixelFormat::RGBA_8888: + return "PixelFormat::RGBA_8888"; + case PixelFormat::RGBA_FP16: + return "PixelFormat::RGBA_FP16"; + case PixelFormat::RGBX_8888: + return "PixelFormat::RGBX_8888"; + case PixelFormat::RGB_565: + return "PixelFormat::RGB_565"; + case PixelFormat::RGB_888: + return "PixelFormat::RGB_888"; + case PixelFormat::STENCIL_8: + return "PixelFormat::STENCIL_8"; + case PixelFormat::Y16: + return "PixelFormat::Y16"; + case PixelFormat::Y8: + return "PixelFormat::Y8"; + case PixelFormat::YCBCR_420_888: + return "PixelFormat::YCBCR_420_888"; + case PixelFormat::YCBCR_422_I: + return "PixelFormat::YCBCR_422_I"; + case PixelFormat::YCBCR_422_SP: + return "PixelFormat::YCBCR_422_SP"; + case PixelFormat::YCBCR_P010: + return "PixelFormat::YCBCR_P010"; + case PixelFormat::YCRCB_420_SP: + return "PixelFormat::YCRCB_420_SP"; + case PixelFormat::YV12: + return "PixelFormat::YV12"; + } + return android::base::StringPrintf("PixelFormat::Unknown(%d)", static_cast(format)); +} + +std::string getUsageString(hidl_bitfield bufferUsage) { + using Underlying = typename std::underlying_type::type; + + Underlying usage = static_cast(bufferUsage); + + std::vector usages; + if (usage & BufferUsage::CAMERA_INPUT) { + usage &= ~static_cast(BufferUsage::CAMERA_INPUT); + usages.push_back("BufferUsage::CAMERA_INPUT"); + } + if (usage & BufferUsage::CAMERA_OUTPUT) { + usage &= ~static_cast(BufferUsage::CAMERA_OUTPUT); + usages.push_back("BufferUsage::CAMERA_OUTPUT"); + } + if (usage & BufferUsage::COMPOSER_CURSOR) { + usage &= ~static_cast(BufferUsage::COMPOSER_CURSOR); + usages.push_back("BufferUsage::COMPOSER_CURSOR"); + } + if (usage & BufferUsage::COMPOSER_OVERLAY) { + usage &= ~static_cast(BufferUsage::COMPOSER_OVERLAY); + usages.push_back("BufferUsage::COMPOSER_OVERLAY"); + } + if (usage & BufferUsage::CPU_READ_OFTEN) { + usage &= ~static_cast(BufferUsage::CPU_READ_OFTEN); + usages.push_back("BufferUsage::CPU_READ_OFTEN"); + } + if (usage & BufferUsage::CPU_READ_NEVER) { + usage &= ~static_cast(BufferUsage::CPU_READ_NEVER); + usages.push_back("BufferUsage::CPU_READ_NEVER"); + } + if (usage & BufferUsage::CPU_READ_RARELY) { + usage &= ~static_cast(BufferUsage::CPU_READ_RARELY); + usages.push_back("BufferUsage::CPU_READ_RARELY"); + } + if (usage & BufferUsage::CPU_WRITE_NEVER) { + usage &= ~static_cast(BufferUsage::CPU_WRITE_NEVER); + usages.push_back("BufferUsage::CPU_WRITE_NEVER"); + } + if (usage & BufferUsage::CPU_WRITE_OFTEN) { + usage &= ~static_cast(BufferUsage::CPU_WRITE_OFTEN); + usages.push_back("BufferUsage::CPU_WRITE_OFTEN"); + } + if (usage & BufferUsage::CPU_WRITE_RARELY) { + usage &= ~static_cast(BufferUsage::CPU_WRITE_RARELY); + usages.push_back("BufferUsage::CPU_WRITE_RARELY"); + } + if (usage & BufferUsage::GPU_RENDER_TARGET) { + usage &= ~static_cast(BufferUsage::GPU_RENDER_TARGET); + usages.push_back("BufferUsage::GPU_RENDER_TARGET"); + } + if (usage & BufferUsage::GPU_TEXTURE) { + usage &= ~static_cast(BufferUsage::GPU_TEXTURE); + usages.push_back("BufferUsage::GPU_TEXTURE"); + } + if (usage & BufferUsage::PROTECTED) { + usage &= ~static_cast(BufferUsage::PROTECTED); + usages.push_back("BufferUsage::PROTECTED"); + } + if (usage & BufferUsage::RENDERSCRIPT) { + usage &= ~static_cast(BufferUsage::RENDERSCRIPT); + usages.push_back("BufferUsage::RENDERSCRIPT"); + } + if (usage & BufferUsage::VIDEO_DECODER) { + usage &= ~static_cast(BufferUsage::VIDEO_DECODER); + usages.push_back("BufferUsage::VIDEO_DECODER"); + } + if (usage & BufferUsage::VIDEO_ENCODER) { + usage &= ~static_cast(BufferUsage::VIDEO_ENCODER); + usages.push_back("BufferUsage::VIDEO_ENCODER"); + } + + if (usage) { + usages.push_back(android::base::StringPrintf("UnknownUsageBits-%" PRIu64, usage)); + } + + return android::base::Join(usages, '|'); +} + +int convertToDrmFormat(PixelFormat format, uint32_t* outDrmFormat) { + switch (format) { + case PixelFormat::BGRA_8888: + *outDrmFormat = DRM_FORMAT_ARGB8888; + return 0; + /** + * Choose DRM_FORMAT_R8 because requires the buffers + * with a format HAL_PIXEL_FORMAT_BLOB have a height of 1, and width + * equal to their size in bytes. + */ + case PixelFormat::BLOB: + *outDrmFormat = DRM_FORMAT_R8; + return 0; + case PixelFormat::DEPTH_16: + return -EINVAL; + case PixelFormat::DEPTH_24: + return -EINVAL; + case PixelFormat::DEPTH_24_STENCIL_8: + return -EINVAL; + case PixelFormat::DEPTH_32F: + return -EINVAL; + case PixelFormat::DEPTH_32F_STENCIL_8: + return -EINVAL; + case PixelFormat::HSV_888: + return -EINVAL; + case PixelFormat::IMPLEMENTATION_DEFINED: + *outDrmFormat = DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED; + return 0; + case PixelFormat::RAW10: + return -EINVAL; + case PixelFormat::RAW12: + return -EINVAL; + case PixelFormat::RAW16: + *outDrmFormat = DRM_FORMAT_R16; + return 0; + /* TODO use blob */ + case PixelFormat::RAW_OPAQUE: + return -EINVAL; + case PixelFormat::RGBA_1010102: + *outDrmFormat = DRM_FORMAT_ABGR2101010; + return 0; + case PixelFormat::RGBA_8888: + *outDrmFormat = DRM_FORMAT_ABGR8888; + return 0; + case PixelFormat::RGBA_FP16: + *outDrmFormat = DRM_FORMAT_ABGR16161616F; + return 0; + case PixelFormat::RGBX_8888: + *outDrmFormat = DRM_FORMAT_XBGR8888; + return 0; + case PixelFormat::RGB_565: + *outDrmFormat = DRM_FORMAT_RGB565; + return 0; + case PixelFormat::RGB_888: + *outDrmFormat = DRM_FORMAT_RGB888; + return 0; + case PixelFormat::STENCIL_8: + return -EINVAL; + case PixelFormat::Y16: + *outDrmFormat = DRM_FORMAT_R16; + return 0; + case PixelFormat::Y8: + *outDrmFormat = DRM_FORMAT_R8; + return 0; + case PixelFormat::YCBCR_420_888: + *outDrmFormat = DRM_FORMAT_FLEX_YCbCr_420_888; + return 0; + case PixelFormat::YCBCR_422_SP: + return -EINVAL; + case PixelFormat::YCBCR_422_I: + return -EINVAL; + case PixelFormat::YCBCR_P010: + *outDrmFormat = DRM_FORMAT_P010; + return 0; + case PixelFormat::YCRCB_420_SP: + *outDrmFormat = DRM_FORMAT_NV21; + return 0; + case PixelFormat::YV12: + *outDrmFormat = DRM_FORMAT_YVU420_ANDROID; + return 0; + }; +#ifdef USE_GRALLOC1 + *outDrmFormat = i915_private_convert_format((int)format); + if (DRM_FORMAT_NONE != *outDrmFormat) + return 0; +#endif + return -EINVAL; +} + +int convertToBufferUsage(uint64_t grallocUsage, uint64_t* outBufferUsage) { + uint64_t bufferUsage = BO_USE_NONE; + + if ((grallocUsage & BufferUsage::CPU_READ_MASK) == + static_cast(BufferUsage::CPU_READ_RARELY)) { + bufferUsage |= BO_USE_SW_READ_RARELY; + } + if ((grallocUsage & BufferUsage::CPU_READ_MASK) == + static_cast(BufferUsage::CPU_READ_OFTEN)) { + bufferUsage |= BO_USE_SW_READ_OFTEN; + } + if ((grallocUsage & BufferUsage::CPU_WRITE_MASK) == + static_cast(BufferUsage::CPU_WRITE_RARELY)) { + bufferUsage |= BO_USE_SW_WRITE_RARELY; + } + if ((grallocUsage & BufferUsage::CPU_WRITE_MASK) == + static_cast(BufferUsage::CPU_WRITE_OFTEN)) { + bufferUsage |= BO_USE_SW_WRITE_OFTEN; + } + if (grallocUsage & BufferUsage::GPU_TEXTURE) { + bufferUsage |= BO_USE_TEXTURE; + } + if (grallocUsage & BufferUsage::GPU_RENDER_TARGET) { + bufferUsage |= BO_USE_RENDERING; + } + if (grallocUsage & BufferUsage::COMPOSER_OVERLAY) { + /* HWC wants to use display hardware, but can defer to OpenGL. */ + bufferUsage |= BO_USE_SCANOUT | BO_USE_TEXTURE; + } + if (grallocUsage & BufferUsage::PROTECTED) { + bufferUsage |= BO_USE_PROTECTED; + } + if (grallocUsage & BufferUsage::COMPOSER_CURSOR) { + bufferUsage |= BO_USE_NONE; + } + if (grallocUsage & BufferUsage::VIDEO_ENCODER) { + /*HACK: See b/30054495 */ + bufferUsage |= BO_USE_SW_READ_OFTEN; + } + if (grallocUsage & BufferUsage::CAMERA_OUTPUT) { + bufferUsage |= BO_USE_CAMERA_WRITE; + } + if (grallocUsage & BufferUsage::CAMERA_INPUT) { + bufferUsage |= BO_USE_CAMERA_READ; + } + if (grallocUsage & BufferUsage::RENDERSCRIPT) { + bufferUsage |= BO_USE_RENDERSCRIPT; + } + if (grallocUsage & BufferUsage::VIDEO_DECODER) { + bufferUsage |= BO_USE_HW_VIDEO_DECODER; + } +#ifdef USE_GRALLOC1 + if ((grallocUsage & BufferUsage::GPU_MIPMAP_COMPLETE) || + (grallocUsage & BufferUsage::GPU_CUBE_MAP)) { + drv_log("GPU_MIPMAP_COMPLETE or GPU_CUBE_MAP not supported"); + return -1; + } +#endif + + *outBufferUsage = bufferUsage; + return 0; +} + +#ifdef USE_GRALLOC1 +bool IsSupportedYUVFormat(uint32_t droid_format) { + + switch (droid_format) { + case HAL_PIXEL_FORMAT_YCbCr_420_888: + case HAL_PIXEL_FORMAT_YV12: + case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED: + return true; + default: + return i915_private_supported_yuv_format(droid_format); + } + + return false; +} + +#endif + +int convertToCrosDescriptor(const BufferDescriptorInfo& descriptor, + struct cros_gralloc_buffer_descriptor* outCrosDescriptor) { + outCrosDescriptor->name = descriptor.name; + outCrosDescriptor->width = descriptor.width; + outCrosDescriptor->height = descriptor.height; + outCrosDescriptor->droid_format = static_cast(descriptor.format); + outCrosDescriptor->droid_usage = descriptor.usage; + outCrosDescriptor->reserved_region_size = descriptor.reservedSize; + + if (convertToDrmFormat(descriptor.format, &outCrosDescriptor->drm_format)) { +#ifdef USE_GRALLOC1 + drv_log("Failed to convert descriptor by convertToDrmFormat for format = %d\n", descriptor.format); + if (!IsSupportedYUVFormat(static_cast(descriptor.format))) { + std::string pixelFormatString = getPixelFormatString(descriptor.format); + drv_log("Failed to convert descriptor. Unsupported fomat %s\n", pixelFormatString.c_str()); + return -1; + } else { + outCrosDescriptor->drm_format = cros_gralloc_convert_format(static_cast(descriptor.format)); + } +#else + std::string pixelFormatString = getPixelFormatString(descriptor.format); + drv_log("Failed to convert descriptor. Unsupported fomat %s\n", pixelFormatString.c_str()); + return -1; +#endif + } + if (convertToBufferUsage(descriptor.usage, &outCrosDescriptor->use_flags)) { + std::string usageString = getUsageString(descriptor.usage); + drv_log("Failed to convert descriptor. Unsupported usage flags %s\n", usageString.c_str()); + return -1; + } +#ifdef USE_GRALLOC1 + if (descriptor.layerCount > 1) { + drv_log("Failed to convert descriptor. Can't support more than 1 layercount %d\n", descriptor.layerCount); + return -1; + } +#endif + return 0; +} + +int convertToMapUsage(uint64_t grallocUsage, uint32_t* outMapUsage) { + uint32_t mapUsage = BO_MAP_NONE; + + if (grallocUsage & BufferUsage::CPU_READ_MASK) { + mapUsage |= BO_MAP_READ; + } + if (grallocUsage & BufferUsage::CPU_WRITE_MASK) { + mapUsage |= BO_MAP_WRITE; + } + + *outMapUsage = mapUsage; + return 0; +} + +int convertToFenceFd(const hidl_handle& fenceHandle, int* outFenceFd) { + if (!outFenceFd) { + return -EINVAL; + } + + const native_handle_t* nativeHandle = fenceHandle.getNativeHandle(); + if (nativeHandle && nativeHandle->numFds > 1) { + return -EINVAL; + } + + *outFenceFd = (nativeHandle && nativeHandle->numFds == 1) ? nativeHandle->data[0] : -1; + return 0; +} + +int convertToFenceHandle(int fenceFd, hidl_handle* outFenceHandle) { + if (!outFenceHandle) { + return -EINVAL; + } + if (fenceFd < 0) { + return 0; + } + + NATIVE_HANDLE_DECLARE_STORAGE(handleStorage, 1, 0); + auto fenceHandle = native_handle_init(handleStorage, 1, 0); + fenceHandle->data[0] = fenceFd; + + *outFenceHandle = fenceHandle; + return 0; +} + +const std::unordered_map>& GetPlaneLayoutsMap() { + static const auto* kPlaneLayoutsMap = + new std::unordered_map>({ + {DRM_FORMAT_ABGR8888, + {{ + .components = {{.type = android::gralloc4::PlaneLayoutComponentType_R, + .offsetInBits = 0, + .sizeInBits = 8}, + {.type = android::gralloc4::PlaneLayoutComponentType_G, + .offsetInBits = 8, + .sizeInBits = 8}, + {.type = android::gralloc4::PlaneLayoutComponentType_B, + .offsetInBits = 16, + .sizeInBits = 8}, + {.type = android::gralloc4::PlaneLayoutComponentType_A, + .offsetInBits = 24, + .sizeInBits = 8}}, + .sampleIncrementInBits = 32, + .horizontalSubsampling = 1, + .verticalSubsampling = 1, + }}}, + + {DRM_FORMAT_ABGR2101010, + {{ + .components = {{.type = android::gralloc4::PlaneLayoutComponentType_R, + .offsetInBits = 0, + .sizeInBits = 10}, + {.type = android::gralloc4::PlaneLayoutComponentType_G, + .offsetInBits = 10, + .sizeInBits = 10}, + {.type = android::gralloc4::PlaneLayoutComponentType_B, + .offsetInBits = 20, + .sizeInBits = 10}, + {.type = android::gralloc4::PlaneLayoutComponentType_A, + .offsetInBits = 30, + .sizeInBits = 2}}, + .sampleIncrementInBits = 32, + .horizontalSubsampling = 1, + .verticalSubsampling = 1, + }}}, + + {DRM_FORMAT_ABGR16161616F, + {{ + .components = {{.type = android::gralloc4::PlaneLayoutComponentType_R, + .offsetInBits = 0, + .sizeInBits = 16}, + {.type = android::gralloc4::PlaneLayoutComponentType_G, + .offsetInBits = 16, + .sizeInBits = 16}, + {.type = android::gralloc4::PlaneLayoutComponentType_B, + .offsetInBits = 32, + .sizeInBits = 16}, + {.type = android::gralloc4::PlaneLayoutComponentType_A, + .offsetInBits = 48, + .sizeInBits = 16}}, + .sampleIncrementInBits = 64, + .horizontalSubsampling = 1, + .verticalSubsampling = 1, + }}}, + + {DRM_FORMAT_ARGB8888, + {{ + .components = {{.type = android::gralloc4::PlaneLayoutComponentType_B, + .offsetInBits = 0, + .sizeInBits = 8}, + {.type = android::gralloc4::PlaneLayoutComponentType_G, + .offsetInBits = 8, + .sizeInBits = 8}, + {.type = android::gralloc4::PlaneLayoutComponentType_R, + .offsetInBits = 16, + .sizeInBits = 8}, + {.type = android::gralloc4::PlaneLayoutComponentType_A, + .offsetInBits = 24, + .sizeInBits = 8}}, + .sampleIncrementInBits = 32, + .horizontalSubsampling = 1, + .verticalSubsampling = 1, + }}}, + + {DRM_FORMAT_NV12, + {{ + .components = {{.type = android::gralloc4::PlaneLayoutComponentType_Y, + .offsetInBits = 0, + .sizeInBits = 8}}, + .sampleIncrementInBits = 8, + .horizontalSubsampling = 1, + .verticalSubsampling = 1, + }, + { + .components = + {{.type = android::gralloc4::PlaneLayoutComponentType_CB, + .offsetInBits = 0, + .sizeInBits = 8}, + {.type = android::gralloc4::PlaneLayoutComponentType_CR, + .offsetInBits = 8, + .sizeInBits = 8}}, + .sampleIncrementInBits = 16, + .horizontalSubsampling = 2, + .verticalSubsampling = 2, + }}}, + + {DRM_FORMAT_NV21, + {{ + .components = {{.type = android::gralloc4::PlaneLayoutComponentType_Y, + .offsetInBits = 0, + .sizeInBits = 8}}, + .sampleIncrementInBits = 8, + .horizontalSubsampling = 1, + .verticalSubsampling = 1, + }, + { + .components = + {{.type = android::gralloc4::PlaneLayoutComponentType_CR, + .offsetInBits = 0, + .sizeInBits = 8}, + {.type = android::gralloc4::PlaneLayoutComponentType_CB, + .offsetInBits = 8, + .sizeInBits = 8}}, + .sampleIncrementInBits = 16, + .horizontalSubsampling = 2, + .verticalSubsampling = 2, + }}}, + +#ifdef USE_GRALLOC1 + {DRM_FORMAT_NV12_Y_TILED_INTEL, + {{ + .components = {{.type = android::gralloc4::PlaneLayoutComponentType_Y, + .offsetInBits = 0, + .sizeInBits = 8}}, + .sampleIncrementInBits = 8, + .horizontalSubsampling = 1, + .verticalSubsampling = 1, + }, + { + .components = + {{.type = android::gralloc4::PlaneLayoutComponentType_CB, + .offsetInBits = 0, + .sizeInBits = 8}, + {.type = android::gralloc4::PlaneLayoutComponentType_CR, + .offsetInBits = 8, + .sizeInBits = 8}}, + .sampleIncrementInBits = 16, + .horizontalSubsampling = 2, + .verticalSubsampling = 2, + }}}, +#endif + + {DRM_FORMAT_P010, + {{ + .components = {{.type = android::gralloc4::PlaneLayoutComponentType_Y, + .offsetInBits = 6, + .sizeInBits = 10}}, + .sampleIncrementInBits = 16, + .horizontalSubsampling = 1, + .verticalSubsampling = 1, + }, + { + .components = + {{.type = android::gralloc4::PlaneLayoutComponentType_CB, + .offsetInBits = 6, + .sizeInBits = 10}, + {.type = android::gralloc4::PlaneLayoutComponentType_CR, + .offsetInBits = 22, + .sizeInBits = 10}}, + .sampleIncrementInBits = 32, + .horizontalSubsampling = 2, + .verticalSubsampling = 2, + }}}, + + {DRM_FORMAT_R8, + {{ + .components = {{.type = android::gralloc4::PlaneLayoutComponentType_R, + .offsetInBits = 0, + .sizeInBits = 8}}, + .sampleIncrementInBits = 8, + .horizontalSubsampling = 1, + .verticalSubsampling = 1, + }}}, + + {DRM_FORMAT_R16, + {{ + .components = {{.type = android::gralloc4::PlaneLayoutComponentType_R, + .offsetInBits = 0, + .sizeInBits = 16}}, + .sampleIncrementInBits = 16, + .horizontalSubsampling = 1, + .verticalSubsampling = 1, + }}}, + + {DRM_FORMAT_RGB565, + {{ + .components = {{.type = android::gralloc4::PlaneLayoutComponentType_R, + .offsetInBits = 0, + .sizeInBits = 5}, + {.type = android::gralloc4::PlaneLayoutComponentType_G, + .offsetInBits = 5, + .sizeInBits = 6}, + {.type = android::gralloc4::PlaneLayoutComponentType_B, + .offsetInBits = 11, + .sizeInBits = 5}}, + .sampleIncrementInBits = 16, + .horizontalSubsampling = 1, + .verticalSubsampling = 1, + }}}, + + {DRM_FORMAT_RGB888, + {{ + .components = {{.type = android::gralloc4::PlaneLayoutComponentType_R, + .offsetInBits = 0, + .sizeInBits = 8}, + {.type = android::gralloc4::PlaneLayoutComponentType_G, + .offsetInBits = 8, + .sizeInBits = 8}, + {.type = android::gralloc4::PlaneLayoutComponentType_B, + .offsetInBits = 16, + .sizeInBits = 8}}, + .sampleIncrementInBits = 24, + .horizontalSubsampling = 1, + .verticalSubsampling = 1, + }}}, + + {DRM_FORMAT_XBGR8888, + {{ + .components = {{.type = android::gralloc4::PlaneLayoutComponentType_B, + .offsetInBits = 0, + .sizeInBits = 8}, + {.type = android::gralloc4::PlaneLayoutComponentType_G, + .offsetInBits = 8, + .sizeInBits = 8}, + {.type = android::gralloc4::PlaneLayoutComponentType_R, + .offsetInBits = 16, + .sizeInBits = 8}}, + .sampleIncrementInBits = 32, + .horizontalSubsampling = 1, + .verticalSubsampling = 1, + }}}, + + {DRM_FORMAT_YVU420, + { + { + .components = {{.type = android::gralloc4:: + PlaneLayoutComponentType_Y, + .offsetInBits = 0, + .sizeInBits = 8}}, + .sampleIncrementInBits = 8, + .horizontalSubsampling = 1, + .verticalSubsampling = 1, + }, + { + .components = {{.type = android::gralloc4:: + PlaneLayoutComponentType_CB, + .offsetInBits = 0, + .sizeInBits = 8}}, + .sampleIncrementInBits = 8, + .horizontalSubsampling = 2, + .verticalSubsampling = 2, + }, + { + .components = {{.type = android::gralloc4:: + PlaneLayoutComponentType_CR, + .offsetInBits = 0, + .sizeInBits = 8}}, + .sampleIncrementInBits = 8, + .horizontalSubsampling = 2, + .verticalSubsampling = 2, + }, + }}, + + {DRM_FORMAT_YVU420_ANDROID, + { + { + .components = {{.type = android::gralloc4:: + PlaneLayoutComponentType_Y, + .offsetInBits = 0, + .sizeInBits = 8}}, + .sampleIncrementInBits = 8, + .horizontalSubsampling = 1, + .verticalSubsampling = 1, + }, + { + .components = {{.type = android::gralloc4:: + PlaneLayoutComponentType_CR, + .offsetInBits = 0, + .sizeInBits = 8}}, + .sampleIncrementInBits = 8, + .horizontalSubsampling = 2, + .verticalSubsampling = 2, + }, + { + .components = {{.type = android::gralloc4:: + PlaneLayoutComponentType_CB, + .offsetInBits = 0, + .sizeInBits = 8}}, + .sampleIncrementInBits = 8, + .horizontalSubsampling = 2, + .verticalSubsampling = 2, + }, + }}, + }); + return *kPlaneLayoutsMap; +} + +int getPlaneLayouts(uint32_t drmFormat, std::vector* outPlaneLayouts) { + const auto& planeLayoutsMap = GetPlaneLayoutsMap(); + const auto it = planeLayoutsMap.find(drmFormat); + if (it == planeLayoutsMap.end()) { + drv_log("Unknown plane layout for format %d\n", drmFormat); + return -1; + } + + *outPlaneLayouts = it->second; + return 0; +} diff --git a/cros_gralloc/gralloc4/CrosGralloc4Utils.h b/cros_gralloc/gralloc4/CrosGralloc4Utils.h new file mode 100644 index 0000000..094ef74 --- /dev/null +++ b/cros_gralloc/gralloc4/CrosGralloc4Utils.h @@ -0,0 +1,41 @@ +/* + * Copyright 2020 The Chromium OS Authors. All rights reserved. + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#include +#include + +#include +#include +#include + +#include "cros_gralloc/cros_gralloc_types.h" + +std::string getDrmFormatString(uint32_t drmFormat); + +std::string getPixelFormatString(android::hardware::graphics::common::V1_2::PixelFormat format); + +std::string getUsageString( + android::hardware::hidl_bitfield + usage); + +int convertToDrmFormat(android::hardware::graphics::common::V1_2::PixelFormat format, + uint32_t* outDrmFormat); + +int convertToBufferUsage(uint64_t grallocUsage, uint64_t* outBufferUsage); + +int convertToCrosDescriptor( + const android::hardware::graphics::mapper::V4_0::IMapper::BufferDescriptorInfo& descriptor, + struct cros_gralloc_buffer_descriptor* outCrosDescriptor); + +int convertToMapUsage(uint64_t grallocUsage, uint32_t* outMapUsage); + +int convertToFenceFd(const android::hardware::hidl_handle& fence_handle, int* out_fence_fd); + +int convertToFenceHandle(int fence_fd, android::hardware::hidl_handle* out_fence_handle); + +int getPlaneLayouts( + uint32_t drm_format, + std::vector* out_layouts); \ No newline at end of file diff --git a/cros_gralloc/gralloc4/android.hardware.graphics.allocator@4.0-service.minigbm.rc b/cros_gralloc/gralloc4/android.hardware.graphics.allocator@4.0-service.minigbm.rc new file mode 100644 index 0000000..a96a6e1 --- /dev/null +++ b/cros_gralloc/gralloc4/android.hardware.graphics.allocator@4.0-service.minigbm.rc @@ -0,0 +1,24 @@ +# +# Copyright 2020 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +service vendor.graphics.allocator-4-0 /vendor/bin/hw/android.hardware.graphics.allocator@4.0-service.minigbm + interface android.hardware.graphics.allocator@4.0::IAllocator default + class hal animation + user system + group graphics drmrpc + capabilities SYS_NICE + onrestart restart surfaceflinger + writepid /dev/cpuset/system-background/tasks diff --git a/cros_gralloc/i915_private_android.cc b/cros_gralloc/i915_private_android.cc new file mode 100644 index 0000000..f5d97c4 --- /dev/null +++ b/cros_gralloc/i915_private_android.cc @@ -0,0 +1,104 @@ +/* + * Copyright 2017 The Chromium OS Authors. All rights reserved. + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#include "i915_private_android.h" +#include "i915_private_android_types.h" + +#include "cros_gralloc_helpers.h" + +#include + +#include "drv.h" + +uint32_t i915_private_convert_format(int format) +{ + switch (format) { + case HAL_PIXEL_FORMAT_NV12: + return DRM_FORMAT_NV12; + case HAL_PIXEL_FORMAT_NV12_Y_TILED_INTEL: + return DRM_FORMAT_NV12_Y_TILED_INTEL; + case HAL_PIXEL_FORMAT_YCbCr_422_I: + return DRM_FORMAT_YUYV; + case HAL_PIXEL_FORMAT_Y16: + return DRM_FORMAT_R16; + case HAL_PIXEL_FORMAT_YCbCr_444_888: + return DRM_FORMAT_YUV444; + case HAL_PIXEL_FORMAT_YCrCb_420_SP: + return DRM_FORMAT_NV21; + case HAL_PIXEL_FORMAT_YCbCr_422_SP: + return DRM_FORMAT_NV16; + case HAL_PIXEL_FORMAT_YCbCr_422_888: + return DRM_FORMAT_YUV422; + case HAL_PIXEL_FORMAT_P010_INTEL: + return DRM_FORMAT_P010; + } + + return DRM_FORMAT_NONE; +} + +int32_t i915_private_invert_format(int format) +{ + /* Convert the DRM FourCC into the most specific HAL pixel format. */ + switch (format) { + case DRM_FORMAT_ARGB8888: + return HAL_PIXEL_FORMAT_BGRA_8888; + case DRM_FORMAT_RGB565: + return HAL_PIXEL_FORMAT_RGB_565; + case DRM_FORMAT_RGB888: + return HAL_PIXEL_FORMAT_RGB_888; + case DRM_FORMAT_ABGR8888: + return HAL_PIXEL_FORMAT_RGBA_8888; + case DRM_FORMAT_XBGR8888: + return HAL_PIXEL_FORMAT_RGBX_8888; + case DRM_FORMAT_FLEX_YCbCr_420_888: + return HAL_PIXEL_FORMAT_YCbCr_420_888; + case DRM_FORMAT_YVU420_ANDROID: + return HAL_PIXEL_FORMAT_YV12; + case DRM_FORMAT_R8: + return HAL_PIXEL_FORMAT_BLOB; + case DRM_FORMAT_NV12: + return HAL_PIXEL_FORMAT_NV12; + case DRM_FORMAT_NV12_Y_TILED_INTEL: + return HAL_PIXEL_FORMAT_NV12_Y_TILED_INTEL; + case DRM_FORMAT_YUYV: + return HAL_PIXEL_FORMAT_YCbCr_422_I; + case DRM_FORMAT_R16: + return HAL_PIXEL_FORMAT_Y16; + case DRM_FORMAT_P010: + return HAL_PIXEL_FORMAT_P010_INTEL; + case DRM_FORMAT_YUV444: + return HAL_PIXEL_FORMAT_YCbCr_444_888; + case DRM_FORMAT_NV21: + return HAL_PIXEL_FORMAT_YCrCb_420_SP; + case DRM_FORMAT_NV16: + return HAL_PIXEL_FORMAT_YCbCr_422_SP; + case DRM_FORMAT_YUV422: + return HAL_PIXEL_FORMAT_YCbCr_422_888; + default: + drv_log("Unhandled DRM format %4.4s", drmFormat2Str(format)); + } + + return 0; +} + +bool i915_private_supported_yuv_format(uint32_t droid_format) +{ + switch (droid_format) { + case HAL_PIXEL_FORMAT_NV12: + case HAL_PIXEL_FORMAT_NV12_Y_TILED_INTEL: + case HAL_PIXEL_FORMAT_YCbCr_422_I: + case HAL_PIXEL_FORMAT_YCbCr_422_888: + case HAL_PIXEL_FORMAT_YCbCr_444_888: + case HAL_PIXEL_FORMAT_YCrCb_420_SP: + case HAL_PIXEL_FORMAT_Y16: + case HAL_PIXEL_FORMAT_P010_INTEL: + return true; + default: + return false; + } + + return false; +} diff --git a/cros_gralloc/i915_private_android.h b/cros_gralloc/i915_private_android.h new file mode 100644 index 0000000..70c41f2 --- /dev/null +++ b/cros_gralloc/i915_private_android.h @@ -0,0 +1,19 @@ +/* + * Copyright 2017 The Chromium OS Authors. All rights reserved. + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ +#ifndef I915_PRIVATE_ANDROID +#define I915_PRIVATE_ANDROID + +#include + +#include "i915_private.h" + +uint32_t i915_private_convert_format(int format); + +int32_t i915_private_invert_format(int format); + +bool i915_private_supported_yuv_format(uint32_t droid_format); + +#endif diff --git a/cros_gralloc/i915_private_android_types.h b/cros_gralloc/i915_private_android_types.h new file mode 100644 index 0000000..a7072d0 --- /dev/null +++ b/cros_gralloc/i915_private_android_types.h @@ -0,0 +1,93 @@ +/* + * Copyright 2017 The Chromium OS Authors. All rights reserved. + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ +#ifndef I915_PRIVATE_ANDROID_TYPE +#define I915_PRIVATE_ANDROID_TYPE +/* + * Android graphics.h defines the formats and leaves 0x100 - 0x1FF + * range available for HAL implementation specific formats. + */ + +#include + +enum { HAL_PIXEL_FORMAT_NV12_Y_TILED_INTEL = 0x100, + HAL_PIXEL_FORMAT_NV12_LINEAR_INTEL = 0x101, + HAL_PIXEL_FORMAT_YCrCb_422_H_INTEL = 0x102, + HAL_PIXEL_FORMAT_NV12_LINEAR_PACKED_INTEL = 0x103, + HAL_PIXEL_FORMAT_YCbCr_422_H_INTEL = 0x104, + HAL_PIXEL_FORMAT_NV12_X_TILED_INTEL = 0x105, + HAL_PIXEL_FORMAT_RGBA_5551_INTEL = 0x106, + HAL_PIXEL_FORMAT_RGBA_4444_INTEL = 0x107, + HAL_PIXEL_FORMAT_GENERIC_8BIT_INTEL = 0x108, + HAL_PIXEL_FORMAT_YCbCr_411_INTEL = 0x109, + HAL_PIXEL_FORMAT_YCbCr_420_H_INTEL = 0x10A, + HAL_PIXEL_FORMAT_YCbCr_422_V_INTEL = 0x10B, + HAL_PIXEL_FORMAT_YCbCr_444_INTEL = 0x10C, + HAL_PIXEL_FORMAT_RGBP_INTEL = 0x10D, + HAL_PIXEL_FORMAT_BGRP_INTEL = 0x10E, + HAL_PIXEL_FORMAT_NV12_LINEAR_CAMERA_INTEL = 0x10F, + HAL_PIXEL_FORMAT_P010_INTEL = 0x110, + HAL_PIXEL_FORMAT_Z16_INTEL = 0x111, + HAL_PIXEL_FORMAT_UVMAP64_INTEL = 0x112, + HAL_PIXEL_FORMAT_A2R10G10B10_INTEL = 0x113, + HAL_PIXEL_FORMAT_A2B10G10R10_INTEL = 0x114, + HAL_PIXEL_FORMAT_YCrCb_NORMAL_INTEL = 0x115, + HAL_PIXEL_FORMAT_YCrCb_SWAPUVY_INTEL = 0x116, + HAL_PIXEL_FORMAT_YCrCb_SWAPUV_INTEL = 0x117, + HAL_PIXEL_FORMAT_YCrCb_SWAPY_INTEL = 0x118, + HAL_PIXEL_FORMAT_X2R10G10B10_INTEL = 0x119, + HAL_PIXEL_FORMAT_X2B10G10R10_INTEL = 0x11A, + HAL_PIXEL_FORMAT_P016_INTEL = 0x11C, + HAL_PIXEL_FORMAT_Y210_INTEL = 0x11D, + HAL_PIXEL_FORMAT_Y216_INTEL = 0x11E, + HAL_PIXEL_FORMAT_Y410_INTEL = 0x11F, + HAL_PIXEL_FORMAT_Y416_INTEL = 0x120, + HAL_PIXEL_FORMAT_Y8I_INTEL = 0x121, + HAL_PIXEL_FORMAT_Y12I_INTEL = 0x122, + HAL_PIXEL_FORMAT_YUYV_INTEL = HAL_PIXEL_FORMAT_YCrCb_NORMAL_INTEL, + HAL_PIXEL_FORMAT_YUY2_INTEL = HAL_PIXEL_FORMAT_YCrCb_NORMAL_INTEL, + HAL_PIXEL_FORMAT_VYUY_INTEL = HAL_PIXEL_FORMAT_YCrCb_SWAPUVY_INTEL, + HAL_PIXEL_FORMAT_YVYU_INTEL = HAL_PIXEL_FORMAT_YCrCb_SWAPUV_INTEL, + HAL_PIXEL_FORMAT_UYVY_INTEL = HAL_PIXEL_FORMAT_YCrCb_SWAPY_INTEL, + HAL_PIXEL_FORMAT_NV12_TILED_INTEL = HAL_PIXEL_FORMAT_NV12_Y_TILED_INTEL, + HAL_PIXEL_FORMAT_NV12_INTEL = HAL_PIXEL_FORMAT_NV12_Y_TILED_INTEL, + HAL_PIXEL_FORMAT_INTEL_NV12 = HAL_PIXEL_FORMAT_NV12_Y_TILED_INTEL, + HAL_PIXEL_FORMAT_NV12 = 0x10F, + HAL_PIXEL_FORMAT_YUV420PackedSemiPlanar_INTEL = 0x7FA00E00, + HAL_PIXEL_FORMAT_YUV420PackedSemiPlanar_Tiled_INTEL = 0x7FA00F00, +}; + +enum { GRALLOC1_FUNCTION_SET_MODIFIER = 101, + GRALLOC1_FUNCTION_GET_BYTE_STRIDE = 102, + GRALLOC1_FUNCTION_GET_PRIME = 103, + GRALLOC1_FUNCTION_SET_INTERLACE = 104, + GRALLOC1_FUNCTION_SET_PROTECTIONINFO = 105, + GRALLOC1_LAST_CUSTOM = 500 }; + +typedef int32_t /*gralloc1_error_t*/ (*GRALLOC1_PFN_SET_MODIFIER)( + gralloc1_device_t *device, gralloc1_buffer_descriptor_t descriptor, uint64_t modifier); + +typedef int32_t /*gralloc1_error_t*/ (*GRALLOC1_PFN_GET_BYTE_STRIDE)( + gralloc1_device_t *device, buffer_handle_t buffer, uint32_t *outStride, uint32_t size); + +typedef int32_t /*gralloc1_error_t*/ (*GRALLOC1_PFN_GET_PRIME)( + gralloc1_device_t *device, buffer_handle_t buffer, uint32_t *prime); + +typedef int32_t /*gralloc1_error_t*/ (*GRALLOC1_PFN_SET_INTERLACE)( + gralloc1_device_t *device, buffer_handle_t buffer, uint32_t interlace); + +typedef int32_t /*gralloc1_error_t*/ (*GRALLOC1_PFN_SET_PROTECTIONINFO)( + gralloc1_device_t *device, buffer_handle_t buffer, uint32_t protection_info); + +typedef union intel_protection_info_type_t { + uint32_t value; + struct { + uint32_t is_encrypted : 8; + uint32_t pavp_sesion_id : 8; + uint32_t pavp_instance_id: 16; + }; +}intel_protection_info_type_t; + +#endif diff --git a/dri.c b/dri.c new file mode 100644 index 0000000..97dc567 --- /dev/null +++ b/dri.c @@ -0,0 +1,465 @@ +/* + * Copyright 2017 Advanced Micro Devices. All rights reserved. + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifdef DRV_AMDGPU + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dri.h" +#include "drv_priv.h" +#include "helpers.h" +#include "util.h" + +static const struct { + uint32_t drm_format; + int dri_image_format; +} drm_to_dri_image_formats[] = { + { DRM_FORMAT_R8, __DRI_IMAGE_FORMAT_R8 }, + { DRM_FORMAT_GR88, __DRI_IMAGE_FORMAT_GR88 }, + { DRM_FORMAT_RGB565, __DRI_IMAGE_FORMAT_RGB565 }, + { DRM_FORMAT_XRGB8888, __DRI_IMAGE_FORMAT_XRGB8888 }, + { DRM_FORMAT_ARGB8888, __DRI_IMAGE_FORMAT_ARGB8888 }, + { DRM_FORMAT_XBGR8888, __DRI_IMAGE_FORMAT_XBGR8888 }, + { DRM_FORMAT_ABGR8888, __DRI_IMAGE_FORMAT_ABGR8888 }, + { DRM_FORMAT_XRGB2101010, __DRI_IMAGE_FORMAT_XRGB2101010 }, + { DRM_FORMAT_ARGB2101010, __DRI_IMAGE_FORMAT_ARGB2101010 }, +}; + +static int drm_format_to_dri_format(uint32_t drm_format) +{ + uint32_t i; + for (i = 0; i < ARRAY_SIZE(drm_to_dri_image_formats); i++) { + if (drm_to_dri_image_formats[i].drm_format == drm_format) + return drm_to_dri_image_formats[i].dri_image_format; + } + + return 0; +} + +static bool lookup_extension(const __DRIextension *const *extensions, const char *name, + int min_version, const __DRIextension **dst) +{ + while (*extensions) { + if ((*extensions)->name && !strcmp((*extensions)->name, name) && + (*extensions)->version >= min_version) { + *dst = *extensions; + return true; + } + + extensions++; + } + + return false; +} + +/* + * Close Gem Handle + */ +static void close_gem_handle(uint32_t handle, int fd) +{ + struct drm_gem_close gem_close; + int ret = 0; + + memset(&gem_close, 0, sizeof(gem_close)); + gem_close.handle = handle; + ret = drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &gem_close); + if (ret) + drv_log("DRM_IOCTL_GEM_CLOSE failed (handle=%x) error %d\n", handle, ret); +} + +/* + * The DRI GEM namespace may be different from the minigbm's driver GEM namespace. We have + * to import into minigbm. + */ +static int import_into_minigbm(struct dri_driver *dri, struct bo *bo) +{ + uint32_t handle; + int ret, modifier_upper, modifier_lower, num_planes, i, j; + off_t dmabuf_sizes[DRV_MAX_PLANES]; + __DRIimage *plane_image = NULL; + + if (dri->image_extension->queryImage(bo->priv, __DRI_IMAGE_ATTRIB_MODIFIER_UPPER, + &modifier_upper) && + dri->image_extension->queryImage(bo->priv, __DRI_IMAGE_ATTRIB_MODIFIER_LOWER, + &modifier_lower)) { + bo->meta.format_modifiers[0] = + ((uint64_t)modifier_upper << 32) | (uint32_t)modifier_lower; + } else { + bo->meta.format_modifiers[0] = DRM_FORMAT_MOD_INVALID; + } + + if (!dri->image_extension->queryImage(bo->priv, __DRI_IMAGE_ATTRIB_NUM_PLANES, + &num_planes)) { + return -errno; + } + + bo->meta.num_planes = num_planes; + + for (i = 0; i < num_planes; ++i) { + int prime_fd, stride, offset; + plane_image = dri->image_extension->fromPlanar(bo->priv, i, NULL); + __DRIimage *image = plane_image ? plane_image : bo->priv; + + if (i) + bo->meta.format_modifiers[i] = bo->meta.format_modifiers[0]; + + if (!dri->image_extension->queryImage(image, __DRI_IMAGE_ATTRIB_STRIDE, &stride) || + !dri->image_extension->queryImage(image, __DRI_IMAGE_ATTRIB_OFFSET, &offset)) { + ret = -errno; + goto cleanup; + } + + if (!dri->image_extension->queryImage(image, __DRI_IMAGE_ATTRIB_FD, &prime_fd)) { + ret = -errno; + goto cleanup; + } + + dmabuf_sizes[i] = lseek(prime_fd, 0, SEEK_END); + if (dmabuf_sizes[i] == (off_t)-1) { + ret = -errno; + close(prime_fd); + goto cleanup; + } + + lseek(prime_fd, 0, SEEK_SET); + + ret = drmPrimeFDToHandle(bo->drv->fd, prime_fd, &handle); + + close(prime_fd); + + if (ret) { + drv_log("drmPrimeFDToHandle failed with %s\n", strerror(errno)); + goto cleanup; + } + + bo->handles[i].u32 = handle; + + bo->meta.strides[i] = stride; + bo->meta.offsets[i] = offset; + + if (plane_image) + dri->image_extension->destroyImage(plane_image); + } + + for (i = 0; i < num_planes; ++i) { + off_t next_plane = dmabuf_sizes[i]; + for (j = 0; j < num_planes; ++j) { + if (bo->meta.offsets[j] < next_plane && + bo->meta.offsets[j] > bo->meta.offsets[i] && + bo->handles[j].u32 == bo->handles[i].u32) + next_plane = bo->meta.offsets[j]; + } + + bo->meta.sizes[i] = next_plane - bo->meta.offsets[i]; + + /* This is kind of misleading if different planes use + different dmabufs. */ + bo->meta.total_size += bo->meta.sizes[i]; + } + + return 0; + +cleanup: + if (plane_image) + dri->image_extension->destroyImage(plane_image); + while (--i >= 0) { + for (j = 0; j <= i; ++j) + if (bo->handles[j].u32 == bo->handles[i].u32) + break; + + /* Multiple equivalent handles) */ + if (i == j) + break; + + /* This kind of goes horribly wrong when we already imported + * the same handles earlier, as we should really reference + * count handles. */ + close_gem_handle(bo->handles[i].u32, bo->drv->fd); + } + return ret; +} + +/* + * The caller is responsible for setting drv->priv to a structure that derives from dri_driver. + */ +int dri_init(struct driver *drv, const char *dri_so_path, const char *driver_suffix) +{ + char fname[128]; + const __DRIextension **(*get_extensions)(); + const __DRIextension *loader_extensions[] = { NULL }; + + struct dri_driver *dri = drv->priv; + + dri->fd = open(drmGetRenderDeviceNameFromFd(drv_get_fd(drv)), O_RDWR); + if (dri->fd < 0) + return -ENODEV; + + dri->driver_handle = dlopen(dri_so_path, RTLD_NOW | RTLD_GLOBAL); + if (!dri->driver_handle) + goto close_dri_fd; + + snprintf(fname, sizeof(fname), __DRI_DRIVER_GET_EXTENSIONS "_%s", driver_suffix); + get_extensions = dlsym(dri->driver_handle, fname); + if (!get_extensions) + goto free_handle; + + dri->extensions = get_extensions(); + if (!dri->extensions) + goto free_handle; + + if (!lookup_extension(dri->extensions, __DRI_CORE, 2, + (const __DRIextension **)&dri->core_extension)) + goto free_handle; + + /* Version 4 for createNewScreen2 */ + if (!lookup_extension(dri->extensions, __DRI_DRI2, 4, + (const __DRIextension **)&dri->dri2_extension)) + goto free_handle; + + dri->device = dri->dri2_extension->createNewScreen2(0, dri->fd, loader_extensions, + dri->extensions, &dri->configs, NULL); + if (!dri->device) + goto free_handle; + + dri->context = + dri->dri2_extension->createNewContext(dri->device, *dri->configs, NULL, NULL); + + if (!dri->context) + goto free_screen; + + if (!lookup_extension(dri->core_extension->getExtensions(dri->device), __DRI_IMAGE, 12, + (const __DRIextension **)&dri->image_extension)) + goto free_context; + + if (!lookup_extension(dri->core_extension->getExtensions(dri->device), __DRI2_FLUSH, 4, + (const __DRIextension **)&dri->flush_extension)) + goto free_context; + + return 0; + +free_context: + dri->core_extension->destroyContext(dri->context); +free_screen: + dri->core_extension->destroyScreen(dri->device); +free_handle: + dlclose(dri->driver_handle); + dri->driver_handle = NULL; +close_dri_fd: + close(dri->fd); + return -ENODEV; +} + +/* + * The caller is responsible for freeing drv->priv. + */ +void dri_close(struct driver *drv) +{ + struct dri_driver *dri = drv->priv; + + dri->core_extension->destroyContext(dri->context); + dri->core_extension->destroyScreen(dri->device); + dlclose(dri->driver_handle); + dri->driver_handle = NULL; + close(dri->fd); +} + +int dri_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format, + uint64_t use_flags) +{ + unsigned int dri_use; + int ret, dri_format; + struct dri_driver *dri = bo->drv->priv; + + dri_format = drm_format_to_dri_format(format); + + /* Gallium drivers require shared to get the handle and stride. */ + dri_use = __DRI_IMAGE_USE_SHARE; + if (use_flags & BO_USE_SCANOUT) + dri_use |= __DRI_IMAGE_USE_SCANOUT; + if (use_flags & BO_USE_CURSOR) + dri_use |= __DRI_IMAGE_USE_CURSOR; + if (use_flags & BO_USE_LINEAR) + dri_use |= __DRI_IMAGE_USE_LINEAR; + + bo->priv = dri->image_extension->createImage(dri->device, width, height, dri_format, + dri_use, NULL); + if (!bo->priv) { + ret = -errno; + return ret; + } + + ret = import_into_minigbm(dri, bo); + if (ret) + goto free_image; + + return 0; + +free_image: + dri->image_extension->destroyImage(bo->priv); + return ret; +} + +int dri_bo_create_with_modifiers(struct bo *bo, uint32_t width, uint32_t height, uint32_t format, + const uint64_t *modifiers, uint32_t modifier_count) +{ + int ret, dri_format; + struct dri_driver *dri = bo->drv->priv; + + if (!dri->image_extension->createImageWithModifiers) { + return -ENOENT; + } + + dri_format = drm_format_to_dri_format(format); + + bo->priv = dri->image_extension->createImageWithModifiers( + dri->device, width, height, dri_format, modifiers, modifier_count, NULL); + if (!bo->priv) { + ret = -errno; + return ret; + } + + ret = import_into_minigbm(dri, bo); + if (ret) + goto free_image; + + return 0; + +free_image: + dri->image_extension->destroyImage(bo->priv); + return ret; +} + +int dri_bo_import(struct bo *bo, struct drv_import_fd_data *data) +{ + int ret; + struct dri_driver *dri = bo->drv->priv; + + if (data->format_modifiers[0] != DRM_FORMAT_MOD_INVALID) { + unsigned error; + + if (!dri->image_extension->createImageFromDmaBufs2) + return -ENOSYS; + + // clang-format off + bo->priv = dri->image_extension->createImageFromDmaBufs2(dri->device, data->width, data->height, + data->format, + data->format_modifiers[0], + data->fds, + bo->meta.num_planes, + (int *)data->strides, + (int *)data->offsets, + __DRI_YUV_COLOR_SPACE_UNDEFINED, + __DRI_YUV_RANGE_UNDEFINED, + __DRI_YUV_CHROMA_SITING_UNDEFINED, + __DRI_YUV_CHROMA_SITING_UNDEFINED, + &error, NULL); + // clang-format on + + /* Could translate the DRI error, but the Mesa GBM also returns ENOSYS. */ + if (!bo->priv) + return -ENOSYS; + } else { + // clang-format off + bo->priv = dri->image_extension->createImageFromFds(dri->device, data->width, data->height, + data->format, data->fds, + bo->meta.num_planes, + (int *)data->strides, + (int *)data->offsets, NULL); + // clang-format on + if (!bo->priv) + return -errno; + } + + ret = import_into_minigbm(dri, bo); + if (ret) { + dri->image_extension->destroyImage(bo->priv); + return ret; + } + + return 0; +} + +int dri_bo_destroy(struct bo *bo) +{ + struct dri_driver *dri = bo->drv->priv; + + assert(bo->priv); + close_gem_handle(bo->handles[0].u32, bo->drv->fd); + dri->image_extension->destroyImage(bo->priv); + bo->priv = NULL; + return 0; +} + +/* + * Map an image plane. + * + * This relies on the underlying driver to do a decompressing and/or de-tiling + * blit if necessary, + * + * This function itself is not thread-safe; we rely on the fact that the caller + * locks a per-driver mutex. + */ +void *dri_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags) +{ + struct dri_driver *dri = bo->drv->priv; + + /* GBM flags and DRI flags are the same. */ + vma->addr = dri->image_extension->mapImage(dri->context, bo->priv, 0, 0, bo->meta.width, + bo->meta.height, map_flags, + (int *)&vma->map_strides[plane], &vma->priv); + if (!vma->addr) + return MAP_FAILED; + + return vma->addr; +} + +int dri_bo_unmap(struct bo *bo, struct vma *vma) +{ + struct dri_driver *dri = bo->drv->priv; + + assert(vma->priv); + dri->image_extension->unmapImage(dri->context, bo->priv, vma->priv); + + /* + * From gbm_dri.c in Mesa: + * + * "Not all DRI drivers use direct maps. They may queue up DMA operations + * on the mapping context. Since there is no explicit gbm flush mechanism, + * we need to flush here." + */ + + dri->flush_extension->flush_with_flags(dri->context, NULL, __DRI2_FLUSH_CONTEXT, 0); + return 0; +} + +size_t dri_num_planes_from_modifier(struct driver *drv, uint32_t format, uint64_t modifier) +{ + struct dri_driver *dri = drv->priv; + if (!dri->image_extension->queryDmaBufFormatModifierAttribs) { + /* We do not do any modifier checks here. The create will fail + * later if the modifier is not supported. */ + return drv_num_planes_from_format(format); + } + + uint64_t planes; + GLboolean ret = dri->image_extension->queryDmaBufFormatModifierAttribs( + dri->device, format, modifier, __DRI_IMAGE_ATTRIB_NUM_PLANES, &planes); + if (!ret) + return 0; + + return planes; +} + +#endif diff --git a/dri.h b/dri.h new file mode 100644 index 0000000..6218e82 --- /dev/null +++ b/dri.h @@ -0,0 +1,41 @@ +/* + * Copyright 2017 Advanced Micro Devices. All rights reserved. + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifdef DRV_AMDGPU + +// Avoid transitively including a bunch of unnecessary headers. +#define GL_GLEXT_LEGACY +#include "GL/internal/dri_interface.h" +#undef GL_GLEXT_LEGACY + +#include "drv.h" + +struct dri_driver { + int fd; + void *driver_handle; + __DRIscreen *device; + __DRIcontext *context; /* Needed for map/unmap operations. */ + const __DRIextension **extensions; + const __DRIcoreExtension *core_extension; + const __DRIdri2Extension *dri2_extension; + const __DRIimageExtension *image_extension; + const __DRI2flushExtension *flush_extension; + const __DRIconfig **configs; +}; + +int dri_init(struct driver *drv, const char *dri_so_path, const char *driver_suffix); +void dri_close(struct driver *drv); +int dri_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format, + uint64_t use_flags); +int dri_bo_create_with_modifiers(struct bo *bo, uint32_t width, uint32_t height, uint32_t format, + const uint64_t *modifiers, uint32_t modifier_count); +int dri_bo_import(struct bo *bo, struct drv_import_fd_data *data); +int dri_bo_destroy(struct bo *bo); +void *dri_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags); +int dri_bo_unmap(struct bo *bo, struct vma *vma); +size_t dri_num_planes_from_modifier(struct driver *drv, uint32_t format, uint64_t modifier); + +#endif diff --git a/drv.c b/drv.c index 3ccf16e..c7c487a 100644 --- a/drv.c +++ b/drv.c @@ -16,46 +16,62 @@ #include #include +#ifdef __ANDROID__ +#include +#include +#endif + #include "drv_priv.h" #include "helpers.h" #include "util.h" +#ifdef USE_GRALLOC1 +#include "i915_private.h" +#endif + #ifdef DRV_AMDGPU -extern struct backend backend_amdgpu; +extern const struct backend backend_amdgpu; #endif -extern struct backend backend_cirrus; -extern struct backend backend_evdi; +extern const struct backend backend_evdi; #ifdef DRV_EXYNOS -extern struct backend backend_exynos; +extern const struct backend backend_exynos; #endif -extern struct backend backend_gma500; #ifdef DRV_I915 -extern struct backend backend_i915; +extern const struct backend backend_i915; #endif #ifdef DRV_MARVELL -extern struct backend backend_marvell; +extern const struct backend backend_marvell; #endif #ifdef DRV_MEDIATEK -extern struct backend backend_mediatek; +extern const struct backend backend_mediatek; +#endif +#ifdef DRV_MESON +extern const struct backend backend_meson; +#endif +#ifdef DRV_MSM +extern const struct backend backend_msm; #endif -extern struct backend backend_nouveau; +extern const struct backend backend_nouveau; #ifdef DRV_RADEON -extern struct backend backend_radeon; +extern const struct backend backend_radeon; #endif #ifdef DRV_ROCKCHIP -extern struct backend backend_rockchip; +extern const struct backend backend_rockchip; +#endif +#ifdef DRV_SYNAPTICS +extern const struct backend backend_synaptics; #endif #ifdef DRV_TEGRA -extern struct backend backend_tegra; +extern const struct backend backend_tegra; #endif -extern struct backend backend_udl; +extern const struct backend backend_udl; #ifdef DRV_VC4 -extern struct backend backend_vc4; +extern const struct backend backend_vc4; #endif -extern struct backend backend_vgem; -extern struct backend backend_virtio_gpu; +extern const struct backend backend_vgem; +extern const struct backend backend_virtio_gpu; -static struct backend *drv_get_backend(int fd) +static const struct backend *drv_get_backend(int fd) { drmVersionPtr drm_version; unsigned int i; @@ -65,15 +81,14 @@ static struct backend *drv_get_backend(int fd) if (!drm_version) return NULL; - struct backend *backend_list[] = { + const struct backend *backend_list[] = { #ifdef DRV_AMDGPU &backend_amdgpu, #endif - &backend_cirrus, &backend_evdi, + &backend_evdi, #ifdef DRV_EXYNOS &backend_exynos, #endif - &backend_gma500, #ifdef DRV_I915 &backend_i915, #endif @@ -82,6 +97,12 @@ static struct backend *drv_get_backend(int fd) #endif #ifdef DRV_MEDIATEK &backend_mediatek, +#endif +#ifdef DRV_MESON + &backend_meson, +#endif +#ifdef DRV_MSM + &backend_msm, #endif &backend_nouveau, #ifdef DRV_RADEON @@ -90,6 +111,9 @@ static struct backend *drv_get_backend(int fd) #ifdef DRV_ROCKCHIP &backend_rockchip, #endif +#ifdef DRV_SYNAPTICS + &backend_synaptics, +#endif #ifdef DRV_TEGRA &backend_tegra, #endif @@ -97,14 +121,24 @@ static struct backend *drv_get_backend(int fd) #ifdef DRV_VC4 &backend_vc4, #endif - &backend_vgem, &backend_virtio_gpu, + &backend_vgem, &backend_virtio_gpu, }; - for (i = 0; i < ARRAY_SIZE(backend_list); i++) - if (!strcmp(drm_version->name, backend_list[i]->name)) { + for (i = 0; i < ARRAY_SIZE(backend_list); i++) { + const struct backend *b = backend_list[i]; + // Exactly one of the main create functions must be defined. + assert((b->bo_create != NULL) ^ (b->bo_create_from_metadata != NULL)); + // Either both or neither must be implemented. + assert((b->bo_compute_metadata != NULL) == (b->bo_create_from_metadata != NULL)); + // Both can't be defined, but it's okay for neither to be (i.e. only bo_create). + assert((b->bo_create_with_modifiers == NULL) || + (b->bo_create_from_metadata == NULL)); + + if (!strcmp(drm_version->name, b->name)) { drmFreeVersion(drm_version); - return backend_list[i]; + return b; } + } drmFreeVersion(drm_version); return NULL; @@ -133,30 +167,26 @@ struct driver *drv_create(int fd) if (!drv->buffer_table) goto free_lock; - drv->map_table = drmHashCreate(); - if (!drv->map_table) + drv->mappings = drv_array_init(sizeof(struct mapping)); + if (!drv->mappings) goto free_buffer_table; - /* Start with a power of 2 number of allocations. */ - drv->combos.allocations = 2; - drv->combos.size = 0; - - drv->combos.data = calloc(drv->combos.allocations, sizeof(struct combination)); - if (!drv->combos.data) - goto free_map_table; + drv->combos = drv_array_init(sizeof(struct combination)); + if (!drv->combos) + goto free_mappings; if (drv->backend->init) { ret = drv->backend->init(drv); if (ret) { - free(drv->combos.data); - goto free_map_table; + drv_array_destroy(drv->combos); + goto free_mappings; } } return drv; -free_map_table: - drmHashDestroy(drv->map_table); +free_mappings: + drv_array_destroy(drv->mappings); free_buffer_table: drmHashDestroy(drv->buffer_table); free_lock: @@ -174,9 +204,8 @@ void drv_destroy(struct driver *drv) drv->backend->close(drv); drmHashDestroy(drv->buffer_table); - drmHashDestroy(drv->map_table); - - free(drv->combos.data); + drv_array_destroy(drv->mappings); + drv_array_destroy(drv->combos); pthread_mutex_unlock(&drv->driver_lock); pthread_mutex_destroy(&drv->driver_lock); @@ -203,8 +232,8 @@ struct combination *drv_get_combination(struct driver *drv, uint32_t format, uin best = NULL; uint32_t i; - for (i = 0; i < drv->combos.size; i++) { - curr = &drv->combos.data[i]; + for (i = 0; i < drv_array_size(drv->combos); i++) { + curr = drv_array_at_idx(drv->combos, i); if ((format == curr->format) && use_flags == (curr->use_flags & use_flags)) if (!best || best->metadata.priority < curr->metadata.priority) best = curr; @@ -214,7 +243,7 @@ struct combination *drv_get_combination(struct driver *drv, uint32_t format, uin } struct bo *drv_bo_new(struct driver *drv, uint32_t width, uint32_t height, uint32_t format, - uint64_t use_flags) + uint64_t use_flags, bool is_test_buffer) { struct bo *bo; @@ -224,13 +253,14 @@ struct bo *drv_bo_new(struct driver *drv, uint32_t width, uint32_t height, uint3 return NULL; bo->drv = drv; - bo->width = width; - bo->height = height; - bo->format = format; - bo->use_flags = use_flags; - bo->num_planes = drv_num_planes_from_format(format); - - if (!bo->num_planes) { + bo->meta.width = width; + bo->meta.height = height; + bo->meta.format = format; + bo->meta.use_flags = use_flags; + bo->meta.num_planes = drv_num_planes_from_format(format); + bo->is_test_buffer = is_test_buffer; + + if (!bo->meta.num_planes) { free(bo); return NULL; } @@ -244,13 +274,25 @@ struct bo *drv_bo_create(struct driver *drv, uint32_t width, uint32_t height, ui int ret; size_t plane; struct bo *bo; + bool is_test_alloc; - bo = drv_bo_new(drv, width, height, format, use_flags); + is_test_alloc = use_flags & BO_USE_TEST_ALLOC; + use_flags &= ~BO_USE_TEST_ALLOC; + + bo = drv_bo_new(drv, width, height, format, use_flags, is_test_alloc); if (!bo) return NULL; - ret = drv->backend->bo_create(bo, width, height, format, use_flags); + ret = -EINVAL; + if (drv->backend->bo_compute_metadata) { + ret = drv->backend->bo_compute_metadata(bo, width, height, format, use_flags, NULL, + 0); + if (!is_test_alloc && ret == 0) + ret = drv->backend->bo_create_from_metadata(bo); + } else if (!is_test_alloc) { + ret = drv->backend->bo_create(bo, width, height, format, use_flags); + } if (ret) { free(bo); @@ -259,9 +301,9 @@ struct bo *drv_bo_create(struct driver *drv, uint32_t width, uint32_t height, ui pthread_mutex_lock(&drv->driver_lock); - for (plane = 0; plane < bo->num_planes; plane++) { + for (plane = 0; plane < bo->meta.num_planes; plane++) { if (plane > 0) - assert(bo->offsets[plane] >= bo->offsets[plane - 1]); + assert(bo->meta.offsets[plane] >= bo->meta.offsets[plane - 1]); drv_increment_reference_count(drv, bo, plane); } @@ -278,17 +320,26 @@ struct bo *drv_bo_create_with_modifiers(struct driver *drv, uint32_t width, uint size_t plane; struct bo *bo; - if (!drv->backend->bo_create_with_modifiers) { + if (!drv->backend->bo_create_with_modifiers && !drv->backend->bo_compute_metadata) { errno = ENOENT; return NULL; } - bo = drv_bo_new(drv, width, height, format, BO_USE_NONE); + bo = drv_bo_new(drv, width, height, format, BO_USE_NONE, false); if (!bo) return NULL; - ret = drv->backend->bo_create_with_modifiers(bo, width, height, format, modifiers, count); + ret = -EINVAL; + if (drv->backend->bo_compute_metadata) { + ret = drv->backend->bo_compute_metadata(bo, width, height, format, BO_USE_NONE, + modifiers, count); + if (ret == 0) + ret = drv->backend->bo_create_from_metadata(bo); + } else { + ret = drv->backend->bo_create_with_modifiers(bo, width, height, format, modifiers, + count); + } if (ret) { free(bo); @@ -297,9 +348,9 @@ struct bo *drv_bo_create_with_modifiers(struct driver *drv, uint32_t width, uint pthread_mutex_lock(&drv->driver_lock); - for (plane = 0; plane < bo->num_planes; plane++) { + for (plane = 0; plane < bo->meta.num_planes; plane++) { if (plane > 0) - assert(bo->offsets[plane] >= bo->offsets[plane - 1]); + assert(bo->meta.offsets[plane] >= bo->meta.offsets[plane - 1]); drv_increment_reference_count(drv, bo, plane); } @@ -311,23 +362,27 @@ struct bo *drv_bo_create_with_modifiers(struct driver *drv, uint32_t width, uint void drv_bo_destroy(struct bo *bo) { + int ret; size_t plane; uintptr_t total = 0; struct driver *drv = bo->drv; - pthread_mutex_lock(&drv->driver_lock); + if (!bo->is_test_buffer) { + pthread_mutex_lock(&drv->driver_lock); - for (plane = 0; plane < bo->num_planes; plane++) - drv_decrement_reference_count(drv, bo, plane); + for (plane = 0; plane < bo->meta.num_planes; plane++) + drv_decrement_reference_count(drv, bo, plane); - for (plane = 0; plane < bo->num_planes; plane++) - total += drv_get_reference_count(drv, bo, plane); + for (plane = 0; plane < bo->meta.num_planes; plane++) + total += drv_get_reference_count(drv, bo, plane); - pthread_mutex_unlock(&drv->driver_lock); + pthread_mutex_unlock(&drv->driver_lock); - if (total == 0) { - assert(drv_map_info_destroy(bo) == 0); - bo->drv->backend->bo_destroy(bo); + if (total == 0) { + ret = drv_mapping_destroy(bo); + assert(ret == 0); + bo->drv->backend->bo_destroy(bo); + } } free(bo); @@ -340,7 +395,7 @@ struct bo *drv_bo_import(struct driver *drv, struct drv_import_fd_data *data) struct bo *bo; off_t seek_end; - bo = drv_bo_new(drv, data->width, data->height, data->format, data->use_flags); + bo = drv_bo_new(drv, data->width, data->height, data->format, data->use_flags, false); if (!bo) return NULL; @@ -351,29 +406,35 @@ struct bo *drv_bo_import(struct driver *drv, struct drv_import_fd_data *data) return NULL; } - for (plane = 0; plane < bo->num_planes; plane++) { - bo->strides[plane] = data->strides[plane]; - bo->offsets[plane] = data->offsets[plane]; - bo->format_modifiers[plane] = data->format_modifiers[plane]; + for (plane = 0; plane < bo->meta.num_planes; plane++) { + pthread_mutex_lock(&bo->drv->driver_lock); + drv_increment_reference_count(bo->drv, bo, plane); + pthread_mutex_unlock(&bo->drv->driver_lock); + } + + for (plane = 0; plane < bo->meta.num_planes; plane++) { + bo->meta.strides[plane] = data->strides[plane]; + bo->meta.offsets[plane] = data->offsets[plane]; + bo->meta.format_modifiers[plane] = data->format_modifiers[plane]; seek_end = lseek(data->fds[plane], 0, SEEK_END); if (seek_end == (off_t)(-1)) { - fprintf(stderr, "drv: lseek() failed with %s\n", strerror(errno)); + drv_log("lseek() failed with %s\n", strerror(errno)); goto destroy_bo; } lseek(data->fds[plane], 0, SEEK_SET); - if (plane == bo->num_planes - 1 || data->offsets[plane + 1] == 0) - bo->sizes[plane] = seek_end - data->offsets[plane]; + if (plane == bo->meta.num_planes - 1 || data->offsets[plane + 1] == 0) + bo->meta.sizes[plane] = seek_end - data->offsets[plane]; else - bo->sizes[plane] = data->offsets[plane + 1] - data->offsets[plane]; + bo->meta.sizes[plane] = data->offsets[plane + 1] - data->offsets[plane]; - if ((int64_t)bo->offsets[plane] + bo->sizes[plane] > seek_end) { - fprintf(stderr, "drv: buffer size is too large.\n"); + if ((int64_t)bo->meta.offsets[plane] + bo->meta.sizes[plane] > seek_end) { + drv_log("buffer size is too large.\n"); goto destroy_bo; } - bo->total_size += bo->sizes[plane]; + bo->meta.total_size += bo->meta.sizes[plane]; } return bo; @@ -383,121 +444,170 @@ struct bo *drv_bo_import(struct driver *drv, struct drv_import_fd_data *data) return NULL; } -void *drv_bo_map(struct bo *bo, uint32_t x, uint32_t y, uint32_t width, uint32_t height, - uint32_t map_flags, struct map_info **map_data, size_t plane) +void *drv_bo_map(struct bo *bo, const struct rectangle *rect, uint32_t map_flags, + struct mapping **map_data, size_t plane) { - void *ptr; + uint32_t i; uint8_t *addr; - size_t offset; - struct map_info *data; + struct mapping mapping; - assert(width > 0); - assert(height > 0); - assert(x + width <= drv_bo_get_width(bo)); - assert(y + height <= drv_bo_get_height(bo)); + assert(rect->width >= 0); + assert(rect->height >= 0); + assert(rect->x + rect->width <= drv_bo_get_width(bo)); + assert(rect->y + rect->height <= drv_bo_get_height(bo)); assert(BO_MAP_READ_WRITE & map_flags); /* No CPU access for protected buffers. */ - assert(!(bo->use_flags & BO_USE_PROTECTED)); + assert(!(bo->meta.use_flags & BO_USE_PROTECTED)); + + if (bo->is_test_buffer) { + return MAP_FAILED; + } + + memset(&mapping, 0, sizeof(mapping)); + mapping.rect = *rect; + mapping.refcount = 1; pthread_mutex_lock(&bo->drv->driver_lock); - if (!drmHashLookup(bo->drv->map_table, bo->handles[plane].u32, &ptr)) { - data = (struct map_info *)ptr; - /* TODO(gsingh): support mapping same buffer with different flags. */ - assert(data->map_flags == map_flags); - data->refcount++; + for (i = 0; i < drv_array_size(bo->drv->mappings); i++) { + struct mapping *prior = (struct mapping *)drv_array_at_idx(bo->drv->mappings, i); + if (prior->vma->handle != bo->handles[plane].u32 || + prior->vma->map_flags != map_flags) + continue; + + if (rect->x != prior->rect.x || rect->y != prior->rect.y || + rect->width != prior->rect.width || rect->height != prior->rect.height) + continue; + + prior->refcount++; + *map_data = prior; + goto exact_match; + } + + for (i = 0; i < drv_array_size(bo->drv->mappings); i++) { + struct mapping *prior = (struct mapping *)drv_array_at_idx(bo->drv->mappings, i); + if (prior->vma->handle != bo->handles[plane].u32 || + prior->vma->map_flags != map_flags) + continue; + + prior->vma->refcount++; + mapping.vma = prior->vma; goto success; } - data = calloc(1, sizeof(*data)); - addr = bo->drv->backend->bo_map(bo, data, plane, map_flags); + mapping.vma = calloc(1, sizeof(*mapping.vma)); + memcpy(mapping.vma->map_strides, bo->meta.strides, sizeof(mapping.vma->map_strides)); + addr = bo->drv->backend->bo_map(bo, mapping.vma, plane, map_flags); if (addr == MAP_FAILED) { *map_data = NULL; - free(data); + free(mapping.vma); pthread_mutex_unlock(&bo->drv->driver_lock); return MAP_FAILED; } - data->refcount = 1; - data->addr = addr; - data->handle = bo->handles[plane].u32; - data->map_flags = map_flags; - drmHashInsert(bo->drv->map_table, bo->handles[plane].u32, (void *)data); + mapping.vma->refcount = 1; + mapping.vma->addr = addr; + mapping.vma->handle = bo->handles[plane].u32; + mapping.vma->map_flags = map_flags; success: - drv_bo_invalidate(bo, data); - *map_data = data; - offset = drv_bo_get_plane_stride(bo, plane) * y; - offset += drv_stride_from_format(bo->format, x, plane); - addr = (uint8_t *)data->addr; - addr += drv_bo_get_plane_offset(bo, plane) + offset; + *map_data = drv_array_append(bo->drv->mappings, &mapping); +exact_match: + drv_bo_invalidate(bo, *map_data); + addr = (uint8_t *)((*map_data)->vma->addr); + addr += drv_bo_get_plane_offset(bo, plane); pthread_mutex_unlock(&bo->drv->driver_lock); - return (void *)addr; } -int drv_bo_unmap(struct bo *bo, struct map_info *data) +int drv_bo_unmap(struct bo *bo, struct mapping *mapping) { - int ret = drv_bo_flush(bo, data); - if (ret) - return ret; + uint32_t i; + int ret = 0; pthread_mutex_lock(&bo->drv->driver_lock); - if (!--data->refcount) { - ret = bo->drv->backend->bo_unmap(bo, data); - drmHashDelete(bo->drv->map_table, data->handle); - free(data); + if (--mapping->refcount) + goto out; + + if (!--mapping->vma->refcount) { + ret = bo->drv->backend->bo_unmap(bo, mapping->vma); + free(mapping->vma); } - pthread_mutex_unlock(&bo->drv->driver_lock); + for (i = 0; i < drv_array_size(bo->drv->mappings); i++) { + if (mapping == (struct mapping *)drv_array_at_idx(bo->drv->mappings, i)) { + drv_array_remove(bo->drv->mappings, i); + break; + } + } +out: + pthread_mutex_unlock(&bo->drv->driver_lock); return ret; } -int drv_bo_invalidate(struct bo *bo, struct map_info *data) +int drv_bo_invalidate(struct bo *bo, struct mapping *mapping) { int ret = 0; - assert(data); - assert(data->refcount >= 0); + + assert(mapping); + assert(mapping->vma); + assert(mapping->refcount > 0); + assert(mapping->vma->refcount > 0); if (bo->drv->backend->bo_invalidate) - ret = bo->drv->backend->bo_invalidate(bo, data); + ret = bo->drv->backend->bo_invalidate(bo, mapping); return ret; } -int drv_bo_flush(struct bo *bo, struct map_info *data) +int drv_bo_flush(struct bo *bo, struct mapping *mapping) { int ret = 0; - assert(data); - assert(data->refcount >= 0); - assert(!(bo->use_flags & BO_USE_PROTECTED)); + + assert(mapping); + assert(mapping->vma); + assert(mapping->refcount > 0); + assert(mapping->vma->refcount > 0); if (bo->drv->backend->bo_flush) - ret = bo->drv->backend->bo_flush(bo, data); + ret = bo->drv->backend->bo_flush(bo, mapping); return ret; } -uint32_t drv_bo_get_width(struct bo *bo) +int drv_bo_flush_or_unmap(struct bo *bo, struct mapping *mapping) { - return bo->width; + int ret = 0; + + assert(mapping); + assert(mapping->vma); + assert(mapping->refcount > 0); + assert(mapping->vma->refcount > 0); + assert(!(bo->meta.use_flags & BO_USE_PROTECTED)); + + if (bo->drv->backend->bo_flush) + ret = bo->drv->backend->bo_flush(bo, mapping); + else + ret = drv_bo_unmap(bo, mapping); + + return ret; } -uint32_t drv_bo_get_height(struct bo *bo) +uint32_t drv_bo_get_width(struct bo *bo) { - return bo->height; + return bo->meta.width; } -uint32_t drv_bo_get_stride_or_tiling(struct bo *bo) +uint32_t drv_bo_get_height(struct bo *bo) { - return bo->tiling ? bo->tiling : drv_bo_get_plane_stride(bo, 0); + return bo->meta.height; } size_t drv_bo_get_num_planes(struct bo *bo) { - return bo->num_planes; + return bo->meta.num_planes; } union bo_handle drv_bo_get_plane_handle(struct bo *bo, size_t plane) @@ -513,119 +623,73 @@ int drv_bo_get_plane_fd(struct bo *bo, size_t plane) { int ret, fd; - assert(plane < bo->num_planes); + assert(plane < bo->meta.num_planes); + + if (bo->is_test_buffer) { + return -EINVAL; + } ret = drmPrimeHandleToFD(bo->drv->fd, bo->handles[plane].u32, DRM_CLOEXEC | DRM_RDWR, &fd); + // Older DRM implementations blocked DRM_RDWR, but gave a read/write mapping anyways + if (ret) + ret = drmPrimeHandleToFD(bo->drv->fd, bo->handles[plane].u32, DRM_CLOEXEC, &fd); + return (ret) ? ret : fd; } uint32_t drv_bo_get_plane_offset(struct bo *bo, size_t plane) { - assert(plane < bo->num_planes); - return bo->offsets[plane]; + assert(plane < bo->meta.num_planes); + return bo->meta.offsets[plane]; } uint32_t drv_bo_get_plane_size(struct bo *bo, size_t plane) { - assert(plane < bo->num_planes); - return bo->sizes[plane]; + assert(plane < bo->meta.num_planes); + return bo->meta.sizes[plane]; } uint32_t drv_bo_get_plane_stride(struct bo *bo, size_t plane) { - assert(plane < bo->num_planes); - return bo->strides[plane]; + assert(plane < bo->meta.num_planes); + return bo->meta.strides[plane]; } uint64_t drv_bo_get_plane_format_modifier(struct bo *bo, size_t plane) { - assert(plane < bo->num_planes); - return bo->format_modifiers[plane]; + assert(plane < bo->meta.num_planes); + return bo->meta.format_modifiers[plane]; } uint32_t drv_bo_get_format(struct bo *bo) { - return bo->format; + return bo->meta.format; +} + +size_t drv_bo_get_total_size(struct bo *bo) +{ + return bo->meta.total_size; } uint32_t drv_resolve_format(struct driver *drv, uint32_t format, uint64_t use_flags) { if (drv->backend->resolve_format) - return drv->backend->resolve_format(format, use_flags); + return drv->backend->resolve_format(drv, format, use_flags); return format; } -size_t drv_num_planes_from_format(uint32_t format) -{ - switch (format) { - case DRM_FORMAT_ABGR1555: - case DRM_FORMAT_ABGR2101010: - case DRM_FORMAT_ABGR4444: - case DRM_FORMAT_ABGR8888: - case DRM_FORMAT_ARGB1555: - case DRM_FORMAT_ARGB2101010: - case DRM_FORMAT_ARGB4444: - case DRM_FORMAT_ARGB8888: - case DRM_FORMAT_AYUV: - case DRM_FORMAT_BGR233: - case DRM_FORMAT_BGR565: - case DRM_FORMAT_BGR888: - case DRM_FORMAT_BGRA1010102: - case DRM_FORMAT_BGRA4444: - case DRM_FORMAT_BGRA5551: - case DRM_FORMAT_BGRA8888: - case DRM_FORMAT_BGRX1010102: - case DRM_FORMAT_BGRX4444: - case DRM_FORMAT_BGRX5551: - case DRM_FORMAT_BGRX8888: - case DRM_FORMAT_C8: - case DRM_FORMAT_GR88: - case DRM_FORMAT_R8: - case DRM_FORMAT_RG88: - case DRM_FORMAT_RGB332: - case DRM_FORMAT_RGB565: - case DRM_FORMAT_RGB888: - case DRM_FORMAT_RGBA1010102: - case DRM_FORMAT_RGBA4444: - case DRM_FORMAT_RGBA5551: - case DRM_FORMAT_RGBA8888: - case DRM_FORMAT_RGBX1010102: - case DRM_FORMAT_RGBX4444: - case DRM_FORMAT_RGBX5551: - case DRM_FORMAT_RGBX8888: - case DRM_FORMAT_UYVY: - case DRM_FORMAT_VYUY: - case DRM_FORMAT_XBGR1555: - case DRM_FORMAT_XBGR2101010: - case DRM_FORMAT_XBGR4444: - case DRM_FORMAT_XBGR8888: - case DRM_FORMAT_XRGB1555: - case DRM_FORMAT_XRGB2101010: - case DRM_FORMAT_XRGB4444: - case DRM_FORMAT_XRGB8888: - case DRM_FORMAT_YUYV: - case DRM_FORMAT_YVYU: - return 1; - case DRM_FORMAT_NV12: - case DRM_FORMAT_NV21: - return 2; - case DRM_FORMAT_YVU420: - case DRM_FORMAT_YVU420_ANDROID: - return 3; - } - - fprintf(stderr, "drv: UNKNOWN FORMAT %d\n", format); - return 0; -} - uint32_t drv_num_buffers_per_bo(struct bo *bo) { uint32_t count = 0; size_t plane, p; - for (plane = 0; plane < bo->num_planes; plane++) { + if (bo->is_test_buffer) { + return 0; + } + + for (plane = 0; plane < bo->meta.num_planes; plane++) { for (p = 0; p < plane; p++) if (bo->handles[p].u32 == bo->handles[plane].u32) break; @@ -635,3 +699,40 @@ uint32_t drv_num_buffers_per_bo(struct bo *bo) return count; } + +void drv_log_prefix(const char *prefix, const char *file, int line, const char *format, ...) +{ + char buf[50]; + snprintf(buf, sizeof(buf), "[%s:%s(%d)]", prefix, basename(file), line); + + va_list args; + va_start(args, format); +#ifdef __ANDROID__ + __android_log_vprint(ANDROID_LOG_ERROR, buf, format, args); +#else + fprintf(stderr, "%s ", buf); + vfprintf(stderr, format, args); +#endif + va_end(args); +} + +int drv_resource_info(struct bo *bo, uint32_t strides[DRV_MAX_PLANES], + uint32_t offsets[DRV_MAX_PLANES]) +{ + for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++) { + strides[plane] = bo->meta.strides[plane]; + offsets[plane] = bo->meta.offsets[plane]; + } + + if (bo->drv->backend->resource_info) + return bo->drv->backend->resource_info(bo, strides, offsets); + + return 0; +} + +#ifdef USE_GRALLOC1 +uint32_t drv_bo_get_stride_or_tiling(struct bo *bo) +{ + return bo->meta.tiling ? bo->meta.tiling : drv_bo_get_plane_stride(bo, 0); +} +#endif diff --git a/drv.h b/drv.h index 7abaf79..f1667bf 100644 --- a/drv.h +++ b/drv.h @@ -12,6 +12,7 @@ extern "C" { #endif #include +#include #include #define DRV_MAX_PLANES 4 @@ -23,20 +24,24 @@ extern "C" { #define BO_USE_CURSOR (1ull << 1) #define BO_USE_CURSOR_64X64 BO_USE_CURSOR #define BO_USE_RENDERING (1ull << 2) -#define BO_USE_LINEAR (1ull << 3) -#define BO_USE_SW_READ_NEVER (1ull << 4) -#define BO_USE_SW_READ_RARELY (1ull << 5) -#define BO_USE_SW_READ_OFTEN (1ull << 6) -#define BO_USE_SW_WRITE_NEVER (1ull << 7) -#define BO_USE_SW_WRITE_RARELY (1ull << 8) -#define BO_USE_SW_WRITE_OFTEN (1ull << 9) -#define BO_USE_EXTERNAL_DISP (1ull << 10) -#define BO_USE_PROTECTED (1ull << 11) -#define BO_USE_HW_VIDEO_ENCODER (1ull << 12) -#define BO_USE_CAMERA_WRITE (1ull << 13) -#define BO_USE_CAMERA_READ (1ull << 14) +/* Skip for GBM_BO_USE_WRITE */ +#define BO_USE_LINEAR (1ull << 4) +#define BO_USE_TEXTURE (1ull << 5) +#define BO_USE_CAMERA_WRITE (1ull << 6) +#define BO_USE_CAMERA_READ (1ull << 7) +#define BO_USE_PROTECTED (1ull << 8) +#define BO_USE_SW_READ_OFTEN (1ull << 9) +#define BO_USE_SW_READ_RARELY (1ull << 10) +#define BO_USE_SW_WRITE_OFTEN (1ull << 11) +#define BO_USE_SW_WRITE_RARELY (1ull << 12) +#define BO_USE_HW_VIDEO_DECODER (1ull << 13) +#define BO_USE_HW_VIDEO_ENCODER (1ull << 14) +#define BO_USE_TEST_ALLOC (1ull << 15) #define BO_USE_RENDERSCRIPT (1ull << 16) -#define BO_USE_TEXTURE (1ull << 17) + +/* Quirks for allocating a buffer. */ +#define BO_QUIRK_NONE 0 +#define BO_QUIRK_DUMB32BPP (1ull << 0) /* Map flags */ #define BO_MAP_NONE 0 @@ -48,12 +53,21 @@ extern "C" { * on the namespace of already defined formats, which can be done by using invalid * fourcc codes. */ - #define DRM_FORMAT_NONE fourcc_code('0', '0', '0', '0') #define DRM_FORMAT_YVU420_ANDROID fourcc_code('9', '9', '9', '7') #define DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED fourcc_code('9', '9', '9', '8') #define DRM_FORMAT_FLEX_YCbCr_420_888 fourcc_code('9', '9', '9', '9') +/* This is a 10-bit bayer format for private reprocessing on MediaTek ISP. It's + * a private RAW format that other DRM drivers will never support and thus + * making it not upstreamable (i.e., defined in official DRM headers). */ +#define DRM_FORMAT_MTISP_SXYZW10 fourcc_code('M', 'B', '1', '0') + +// TODO(crbug.com/958181): remove this definition once drm_fourcc.h contains it. +#ifndef DRM_FORMAT_P010 +#define DRM_FORMAT_P010 fourcc_code('P', '0', '1', '0') +#endif + // clang-format on struct driver; struct bo; @@ -78,15 +92,29 @@ struct drv_import_fd_data { uint64_t use_flags; }; -struct map_info { +struct vma { void *addr; size_t length; uint32_t handle; uint32_t map_flags; int32_t refcount; + uint32_t map_strides[DRV_MAX_PLANES]; void *priv; }; +struct rectangle { + uint32_t x; + uint32_t y; + uint32_t width; + uint32_t height; +}; + +struct mapping { + struct vma *vma; + struct rectangle rect; + uint32_t refcount; +}; + struct driver *drv_create(int fd); void drv_destroy(struct driver *drv); @@ -98,7 +126,7 @@ const char *drv_get_name(struct driver *drv); struct combination *drv_get_combination(struct driver *drv, uint32_t format, uint64_t use_flags); struct bo *drv_bo_new(struct driver *drv, uint32_t width, uint32_t height, uint32_t format, - uint64_t use_flags); + uint64_t use_flags, bool is_test_buffer); struct bo *drv_bo_create(struct driver *drv, uint32_t width, uint32_t height, uint32_t format, uint64_t use_flags); @@ -110,21 +138,21 @@ void drv_bo_destroy(struct bo *bo); struct bo *drv_bo_import(struct driver *drv, struct drv_import_fd_data *data); -void *drv_bo_map(struct bo *bo, uint32_t x, uint32_t y, uint32_t width, uint32_t height, - uint32_t map_flags, struct map_info **map_data, size_t plane); +void *drv_bo_map(struct bo *bo, const struct rectangle *rect, uint32_t map_flags, + struct mapping **map_data, size_t plane); -int drv_bo_unmap(struct bo *bo, struct map_info *data); +int drv_bo_unmap(struct bo *bo, struct mapping *mapping); -int drv_bo_invalidate(struct bo *bo, struct map_info *data); +int drv_bo_invalidate(struct bo *bo, struct mapping *mapping); -int drv_bo_flush(struct bo *bo, struct map_info *data); +int drv_bo_flush(struct bo *bo, struct mapping *mapping); + +int drv_bo_flush_or_unmap(struct bo *bo, struct mapping *mapping); uint32_t drv_bo_get_width(struct bo *bo); uint32_t drv_bo_get_height(struct bo *bo); -uint32_t drv_bo_get_stride_or_tiling(struct bo *bo); - size_t drv_bo_get_num_planes(struct bo *bo); union bo_handle drv_bo_get_plane_handle(struct bo *bo, size_t plane); @@ -141,14 +169,33 @@ uint64_t drv_bo_get_plane_format_modifier(struct bo *bo, size_t plane); uint32_t drv_bo_get_format(struct bo *bo); -uint32_t drv_bo_get_stride_in_pixels(struct bo *bo); +uint32_t drv_bytes_per_pixel_from_format(uint32_t format, size_t plane); + +uint32_t drv_stride_from_format(uint32_t format, uint32_t width, size_t plane); uint32_t drv_resolve_format(struct driver *drv, uint32_t format, uint64_t use_flags); size_t drv_num_planes_from_format(uint32_t format); +size_t drv_num_planes_from_modifier(struct driver *drv, uint32_t format, uint64_t modifier); + uint32_t drv_num_buffers_per_bo(struct bo *bo); +int drv_resource_info(struct bo *bo, uint32_t strides[DRV_MAX_PLANES], + uint32_t offsets[DRV_MAX_PLANES]); + +#ifdef USE_GRALLOC1 +uint32_t drv_bo_get_stride_or_tiling(struct bo *bo); +#endif + +#define drv_log(format, ...) \ + do { \ + drv_log_prefix("minigbm", __FILE__, __LINE__, format, ##__VA_ARGS__); \ + } while (0) + +__attribute__((format(printf, 4, 5))) void drv_log_prefix(const char *prefix, const char *file, + int line, const char *format, ...); + #ifdef __cplusplus } #endif diff --git a/drv_priv.h b/drv_priv.h index 3399cc7..32c082d 100644 --- a/drv_priv.h +++ b/drv_priv.h @@ -8,33 +8,33 @@ #define DRV_PRIV_H #include +#include #include #include #include #include "drv.h" -struct bo { - struct driver *drv; +struct bo_metadata { uint32_t width; uint32_t height; uint32_t format; uint32_t tiling; size_t num_planes; - union bo_handle handles[DRV_MAX_PLANES]; uint32_t offsets[DRV_MAX_PLANES]; uint32_t sizes[DRV_MAX_PLANES]; uint32_t strides[DRV_MAX_PLANES]; uint64_t format_modifiers[DRV_MAX_PLANES]; uint64_t use_flags; size_t total_size; - void *priv; }; -struct kms_item { - uint32_t format; - uint64_t modifier; - uint64_t use_flags; +struct bo { + struct driver *drv; + struct bo_metadata meta; + bool is_test_buffer; + union bo_handle handles[DRV_MAX_PLANES]; + void *priv; }; struct format_metadata { @@ -49,19 +49,13 @@ struct combination { uint64_t use_flags; }; -struct combinations { - struct combination *data; - uint32_t size; - uint32_t allocations; -}; - struct driver { int fd; - struct backend *backend; + const struct backend *backend; void *priv; void *buffer_table; - void *map_table; - struct combinations combos; + struct drv_array *mappings; + struct drv_array *combos; pthread_mutex_t driver_lock; }; @@ -73,25 +67,43 @@ struct backend { uint64_t use_flags); int (*bo_create_with_modifiers)(struct bo *bo, uint32_t width, uint32_t height, uint32_t format, const uint64_t *modifiers, uint32_t count); + // Either both or neither _metadata functions must be implemented. + // If the functions are implemented, bo_create and bo_create_with_modifiers must not be. + int (*bo_compute_metadata)(struct bo *bo, uint32_t width, uint32_t height, uint32_t format, + uint64_t use_flags, const uint64_t *modifiers, uint32_t count); + int (*bo_create_from_metadata)(struct bo *bo); int (*bo_destroy)(struct bo *bo); int (*bo_import)(struct bo *bo, struct drv_import_fd_data *data); - void *(*bo_map)(struct bo *bo, struct map_info *data, size_t plane, uint32_t map_flags); - int (*bo_unmap)(struct bo *bo, struct map_info *data); - int (*bo_invalidate)(struct bo *bo, struct map_info *data); - int (*bo_flush)(struct bo *bo, struct map_info *data); - uint32_t (*resolve_format)(uint32_t format, uint64_t use_flags); + void *(*bo_map)(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags); + int (*bo_unmap)(struct bo *bo, struct vma *vma); + int (*bo_invalidate)(struct bo *bo, struct mapping *mapping); + int (*bo_flush)(struct bo *bo, struct mapping *mapping); + uint32_t (*resolve_format)(struct driver *drv, uint32_t format, uint64_t use_flags); + size_t (*num_planes_from_modifier)(struct driver *drv, uint32_t format, uint64_t modifier); + int (*resource_info)(struct bo *bo, uint32_t strides[DRV_MAX_PLANES], + uint32_t offsets[DRV_MAX_PLANES]); }; // clang-format off -#define BO_USE_RENDER_MASK BO_USE_LINEAR | BO_USE_PROTECTED | BO_USE_RENDERING | \ +#define BO_USE_RENDER_MASK (BO_USE_LINEAR | BO_USE_PROTECTED | BO_USE_RENDERING | \ BO_USE_RENDERSCRIPT | BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN | \ - BO_USE_SW_READ_RARELY | BO_USE_SW_WRITE_RARELY | BO_USE_TEXTURE + BO_USE_SW_READ_RARELY | BO_USE_SW_WRITE_RARELY | BO_USE_TEXTURE) -#define BO_USE_TEXTURE_MASK BO_USE_LINEAR | BO_USE_PROTECTED | BO_USE_RENDERSCRIPT | \ +#define BO_USE_TEXTURE_MASK (BO_USE_LINEAR | BO_USE_PROTECTED | BO_USE_RENDERSCRIPT | \ BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN | \ - BO_USE_SW_READ_RARELY | BO_USE_SW_WRITE_RARELY | BO_USE_TEXTURE + BO_USE_SW_READ_RARELY | BO_USE_SW_WRITE_RARELY | BO_USE_TEXTURE) + +#define BO_USE_SW_MASK (BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN | \ + BO_USE_SW_READ_RARELY | BO_USE_SW_WRITE_RARELY) + +#define BO_USE_NON_GPU_HW (BO_USE_SCANOUT | BO_USE_CAMERA_WRITE | BO_USE_CAMERA_READ | \ + BO_USE_HW_VIDEO_ENCODER | BO_USE_HW_VIDEO_DECODER) + +#ifndef DRM_FORMAT_MOD_LINEAR +#define DRM_FORMAT_MOD_LINEAR DRM_FORMAT_MOD_NONE +#endif -#define LINEAR_METADATA (struct format_metadata) { 0, 1, DRM_FORMAT_MOD_NONE } +#define LINEAR_METADATA (struct format_metadata) { 1, 0, DRM_FORMAT_MOD_LINEAR } // clang-format on #endif diff --git a/evdi.c b/evdi.c index 829d6ea..bfa62a0 100644 --- a/evdi.c +++ b/evdi.c @@ -12,16 +12,13 @@ static const uint32_t render_target_formats[] = { DRM_FORMAT_ARGB8888, DRM_FORMA static int evdi_init(struct driver *drv) { - int ret; - ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats), - &LINEAR_METADATA, BO_USE_RENDER_MASK); - if (ret) - return ret; + drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats), + &LINEAR_METADATA, BO_USE_RENDER_MASK); return drv_modify_linear_combinations(drv); } -struct backend backend_evdi = { +const struct backend backend_evdi = { .name = "evdi", .init = evdi_init, .bo_create = drv_dumb_bo_create, diff --git a/exynos.c b/exynos.c index 963f030..6a80107 100644 --- a/exynos.c +++ b/exynos.c @@ -25,16 +25,11 @@ static const uint32_t texture_source_formats[] = { DRM_FORMAT_NV12 }; static int exynos_init(struct driver *drv) { - int ret; - ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats), - &LINEAR_METADATA, BO_USE_RENDER_MASK); - if (ret) - return ret; + drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats), + &LINEAR_METADATA, BO_USE_RENDER_MASK); - ret = drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats), - &LINEAR_METADATA, BO_USE_TEXTURE_MASK); - if (ret) - return ret; + drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats), + &LINEAR_METADATA, BO_USE_TEXTURE_MASK); return drv_modify_linear_combinations(drv); } @@ -50,25 +45,25 @@ static int exynos_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint width = ALIGN(width, 16); height = ALIGN(height, 32); chroma_height = ALIGN(height / 2, 32); - bo->strides[0] = bo->strides[1] = width; + bo->meta.strides[0] = bo->meta.strides[1] = width; /* MFC v8+ requires 64 byte padding in the end of luma and chroma buffers. */ - bo->sizes[0] = bo->strides[0] * height + 64; - bo->sizes[1] = bo->strides[1] * chroma_height + 64; - bo->offsets[0] = bo->offsets[1] = 0; - bo->total_size = bo->sizes[0] + bo->sizes[1]; + bo->meta.sizes[0] = bo->meta.strides[0] * height + 64; + bo->meta.sizes[1] = bo->meta.strides[1] * chroma_height + 64; + bo->meta.offsets[0] = bo->meta.offsets[1] = 0; + bo->meta.total_size = bo->meta.sizes[0] + bo->meta.sizes[1]; } else if (format == DRM_FORMAT_XRGB8888 || format == DRM_FORMAT_ARGB8888) { - bo->strides[0] = drv_stride_from_format(format, width, 0); - bo->total_size = bo->sizes[0] = height * bo->strides[0]; - bo->offsets[0] = 0; + bo->meta.strides[0] = drv_stride_from_format(format, width, 0); + bo->meta.total_size = bo->meta.sizes[0] = height * bo->meta.strides[0]; + bo->meta.offsets[0] = 0; } else { - fprintf(stderr, "drv: unsupported format %X\n", format); + drv_log("unsupported format %X\n", format); assert(0); return -EINVAL; } int ret; - for (plane = 0; plane < bo->num_planes; plane++) { - size_t size = bo->sizes[plane]; + for (plane = 0; plane < bo->meta.num_planes; plane++) { + size_t size = bo->meta.sizes[plane]; struct drm_exynos_gem_create gem_create; memset(&gem_create, 0, sizeof(gem_create)); @@ -77,8 +72,8 @@ static int exynos_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint ret = drmIoctl(bo->drv->fd, DRM_IOCTL_EXYNOS_GEM_CREATE, &gem_create); if (ret) { - fprintf(stderr, "drv: DRM_IOCTL_EXYNOS_GEM_CREATE failed (size=%zu)\n", - size); + drv_log("DRM_IOCTL_EXYNOS_GEM_CREATE failed (size=%zu)\n", size); + ret = -errno; goto cleanup_planes; } @@ -94,7 +89,7 @@ static int exynos_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint gem_close.handle = bo->handles[plane - 1].u32; int gem_close_ret = drmIoctl(bo->drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close); if (gem_close_ret) { - fprintf(stderr, "drv: DRM_IOCTL_GEM_CLOSE failed: %d\n", gem_close_ret); + drv_log("DRM_IOCTL_GEM_CLOSE failed: %d\n", gem_close_ret); } } @@ -105,7 +100,7 @@ static int exynos_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint * Use dumb mapping with exynos even though a GEM buffer is created. * libdrm does the same thing in exynos_drm.c */ -struct backend backend_exynos = { +const struct backend backend_exynos = { .name = "exynos", .init = exynos_init, .bo_create = exynos_bo_create, diff --git a/gbm.c b/gbm.c index a78921c..ab5b3f7 100644 --- a/gbm.c +++ b/gbm.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include "drv.h" @@ -40,6 +41,12 @@ PUBLIC int gbm_device_is_format_supported(struct gbm_device *gbm, uint32_t forma return (drv_get_combination(gbm->drv, format, use_flags) != NULL); } +PUBLIC int gbm_device_get_format_modifier_plane_count(struct gbm_device *gbm, uint32_t format, + uint64_t modifier) +{ + return 0; +} + PUBLIC struct gbm_device *gbm_create_device(int fd) { struct gbm_device *gbm; @@ -75,9 +82,15 @@ PUBLIC struct gbm_surface *gbm_surface_create(struct gbm_device *gbm, uint32_t w return surface; } -PUBLIC void gbm_surface_destroy(struct gbm_surface *surface) +PUBLIC struct gbm_surface *gbm_surface_create_with_modifiers(struct gbm_device *gbm, uint32_t width, + uint32_t height, uint32_t format, + const uint64_t *modifiers, + const unsigned int count) { - free(surface); + if (count != 0 || modifiers != NULL) + return NULL; + + return gbm_surface_create(gbm, width, height, format, 0); } PUBLIC struct gbm_bo *gbm_surface_lock_front_buffer(struct gbm_surface *surface) @@ -89,6 +102,16 @@ PUBLIC void gbm_surface_release_buffer(struct gbm_surface *surface, struct gbm_b { } +PUBLIC int gbm_surface_has_free_buffers(struct gbm_surface *surface) +{ + return 0; +} + +PUBLIC void gbm_surface_destroy(struct gbm_surface *surface) +{ + free(surface); +} + static struct gbm_bo *gbm_bo_new(struct gbm_device *gbm, uint32_t format) { struct gbm_bo *bo; @@ -116,6 +139,14 @@ PUBLIC struct gbm_bo *gbm_bo_create(struct gbm_device *gbm, uint32_t width, uint if (!bo) return NULL; + /* + * HACK: This is for HAL_PIXEL_FORMAT_YV12 buffers allocated by arcvm. + * None of our platforms can display YV12, so we can treat as a SW buffer. + * Remove once this can be intelligently resolved in the guest. + */ + if (format == GBM_FORMAT_YVU420 && (usage & GBM_BO_USE_LINEAR)) + format = DRM_FORMAT_YVU420_ANDROID; + bo->bo = drv_bo_create(gbm->drv, width, height, format, gbm_convert_usage(usage)); if (!bo->bo) { @@ -165,9 +196,9 @@ PUBLIC struct gbm_bo *gbm_bo_import(struct gbm_device *gbm, uint32_t type, void struct gbm_bo *bo; struct drv_import_fd_data drv_data; struct gbm_import_fd_data *fd_data = buffer; - struct gbm_import_fd_planar_data *fd_planar_data = buffer; + struct gbm_import_fd_modifier_data *fd_modifier_data = buffer; uint32_t gbm_format; - size_t num_planes, i; + size_t num_planes, i, num_fds; memset(&drv_data, 0, sizeof(drv_data)); drv_data.use_flags = gbm_convert_usage(usage); @@ -179,21 +210,31 @@ PUBLIC struct gbm_bo *gbm_bo_import(struct gbm_device *gbm, uint32_t type, void drv_data.format = fd_data->format; drv_data.fds[0] = fd_data->fd; drv_data.strides[0] = fd_data->stride; - break; - case GBM_BO_IMPORT_FD_PLANAR: - gbm_format = fd_planar_data->format; - drv_data.width = fd_planar_data->width; - drv_data.height = fd_planar_data->height; - drv_data.format = fd_planar_data->format; - num_planes = drv_num_planes_from_format(drv_data.format); + for (i = 0; i < GBM_MAX_PLANES; ++i) + drv_data.format_modifiers[i] = DRM_FORMAT_MOD_INVALID; + break; + case GBM_BO_IMPORT_FD_MODIFIER: + gbm_format = fd_modifier_data->format; + drv_data.width = fd_modifier_data->width; + drv_data.height = fd_modifier_data->height; + drv_data.format = fd_modifier_data->format; + num_planes = drv_num_planes_from_modifier(gbm->drv, drv_data.format, + fd_modifier_data->modifier); assert(num_planes); + num_fds = fd_modifier_data->num_fds; + if (!num_fds || num_fds > num_planes) + return NULL; + for (i = 0; i < num_planes; i++) { - drv_data.fds[i] = fd_planar_data->fds[i]; - drv_data.offsets[i] = fd_planar_data->offsets[i]; - drv_data.strides[i] = fd_planar_data->strides[i]; - drv_data.format_modifiers[i] = fd_planar_data->format_modifiers[i]; + if (num_fds != num_planes) + drv_data.fds[i] = fd_modifier_data->fds[0]; + else + drv_data.fds[i] = fd_modifier_data->fds[i]; + drv_data.offsets[i] = fd_modifier_data->offsets[i]; + drv_data.strides[i] = fd_modifier_data->strides[i]; + drv_data.format_modifiers[i] = fd_modifier_data->modifier; } for (i = num_planes; i < GBM_MAX_PLANES; i++) @@ -222,24 +263,10 @@ PUBLIC struct gbm_bo *gbm_bo_import(struct gbm_device *gbm, uint32_t type, void return bo; } -PUBLIC void *gbm_bo_map(struct gbm_bo *bo, uint32_t x, uint32_t y, uint32_t width, uint32_t height, - uint32_t transfer_flags, uint32_t *stride, void **map_data, size_t plane) -{ - uint32_t map_flags; - if (!bo || width == 0 || height == 0 || !stride || !map_data) - return NULL; - - *stride = gbm_bo_get_plane_stride(bo, plane); - map_flags = (transfer_flags & GBM_BO_TRANSFER_READ) ? BO_MAP_READ : BO_MAP_NONE; - map_flags |= (transfer_flags & GBM_BO_TRANSFER_WRITE) ? BO_MAP_WRITE : BO_MAP_NONE; - return drv_bo_map(bo->bo, x, y, width, height, map_flags, (struct map_info **)map_data, - plane); -} - PUBLIC void gbm_bo_unmap(struct gbm_bo *bo, void *map_data) { assert(bo); - drv_bo_unmap(bo->bo, map_data); + drv_bo_flush_or_unmap(bo->bo, map_data); } PUBLIC uint32_t gbm_bo_get_width(struct gbm_bo *bo) @@ -254,22 +281,22 @@ PUBLIC uint32_t gbm_bo_get_height(struct gbm_bo *bo) PUBLIC uint32_t gbm_bo_get_stride(struct gbm_bo *bo) { - return gbm_bo_get_plane_stride(bo, 0); + return gbm_bo_get_stride_for_plane(bo, 0); } -PUBLIC uint32_t gbm_bo_get_stride_or_tiling(struct gbm_bo *bo) +PUBLIC uint32_t gbm_bo_get_format(struct gbm_bo *bo) { - return drv_bo_get_stride_or_tiling(bo->bo); + return bo->gbm_format; } -PUBLIC uint32_t gbm_bo_get_format(struct gbm_bo *bo) +PUBLIC uint32_t gbm_bo_get_bpp(struct gbm_bo *bo) { - return bo->gbm_format; + return drv_bytes_per_pixel_from_format(drv_bo_get_format(bo->bo), 0); } -PUBLIC uint64_t gbm_bo_get_format_modifier(struct gbm_bo *bo) +PUBLIC uint64_t gbm_bo_get_modifier(struct gbm_bo *bo) { - return gbm_bo_get_plane_format_modifier(bo, 0); + return drv_bo_get_plane_format_modifier(bo->bo, 0); } PUBLIC struct gbm_device *gbm_bo_get_device(struct gbm_bo *bo) @@ -279,7 +306,7 @@ PUBLIC struct gbm_device *gbm_bo_get_device(struct gbm_bo *bo) PUBLIC union gbm_bo_handle gbm_bo_get_handle(struct gbm_bo *bo) { - return gbm_bo_get_plane_handle(bo, 0); + return gbm_bo_get_handle_for_plane(bo, 0); } PUBLIC int gbm_bo_get_fd(struct gbm_bo *bo) @@ -287,49 +314,112 @@ PUBLIC int gbm_bo_get_fd(struct gbm_bo *bo) return gbm_bo_get_plane_fd(bo, 0); } -PUBLIC size_t gbm_bo_get_num_planes(struct gbm_bo *bo) +PUBLIC int gbm_bo_get_plane_count(struct gbm_bo *bo) { return drv_bo_get_num_planes(bo->bo); } -PUBLIC union gbm_bo_handle gbm_bo_get_plane_handle(struct gbm_bo *bo, size_t plane) +PUBLIC union gbm_bo_handle gbm_bo_get_handle_for_plane(struct gbm_bo *bo, size_t plane) { - return (union gbm_bo_handle)drv_bo_get_plane_handle(bo->bo, plane).u64; + return (union gbm_bo_handle)drv_bo_get_plane_handle(bo->bo, (size_t)plane).u64; } -PUBLIC int gbm_bo_get_plane_fd(struct gbm_bo *bo, size_t plane) +PUBLIC uint32_t gbm_bo_get_offset(struct gbm_bo *bo, size_t plane) { - return drv_bo_get_plane_fd(bo->bo, plane); + return drv_bo_get_plane_offset(bo->bo, (size_t)plane); } -PUBLIC uint32_t gbm_bo_get_plane_offset(struct gbm_bo *bo, size_t plane) +PUBLIC uint32_t gbm_bo_get_stride_for_plane(struct gbm_bo *bo, size_t plane) { - return drv_bo_get_plane_offset(bo->bo, plane); + return drv_bo_get_plane_stride(bo->bo, (size_t)plane); } -PUBLIC uint32_t gbm_bo_get_plane_size(struct gbm_bo *bo, size_t plane) +PUBLIC void gbm_bo_set_user_data(struct gbm_bo *bo, void *data, + void (*destroy_user_data)(struct gbm_bo *, void *)) { - return drv_bo_get_plane_size(bo->bo, plane); + bo->user_data = data; + bo->destroy_user_data = destroy_user_data; } -PUBLIC uint32_t gbm_bo_get_plane_stride(struct gbm_bo *bo, size_t plane) +PUBLIC void *gbm_bo_get_user_data(struct gbm_bo *bo) { - return drv_bo_get_plane_stride(bo->bo, plane); + return bo->user_data; } -PUBLIC uint64_t gbm_bo_get_plane_format_modifier(struct gbm_bo *bo, size_t plane) +/* The two GBM_BO_FORMAT_[XA]RGB8888 formats alias the GBM_FORMAT_* + * formats of the same name. We want to accept them whenever someone + * has a GBM format, but never return them to the user. + */ +static uint32_t gbm_format_canonicalize(uint32_t gbm_format) { - return drv_bo_get_plane_format_modifier(bo->bo, plane); + switch (gbm_format) { + case GBM_BO_FORMAT_XRGB8888: + return GBM_FORMAT_XRGB8888; + case GBM_BO_FORMAT_ARGB8888: + return GBM_FORMAT_ARGB8888; + default: + return gbm_format; + } } -PUBLIC void gbm_bo_set_user_data(struct gbm_bo *bo, void *data, - void (*destroy_user_data)(struct gbm_bo *, void *)) +/** + * Returns a string representing the fourcc format name. + */ +PUBLIC char *gbm_format_get_name(uint32_t gbm_format, struct gbm_format_name_desc *desc) { - bo->user_data = data; - bo->destroy_user_data = destroy_user_data; + gbm_format = gbm_format_canonicalize(gbm_format); + + desc->name[0] = gbm_format; + desc->name[1] = gbm_format >> 8; + desc->name[2] = gbm_format >> 16; + desc->name[3] = gbm_format >> 24; + desc->name[4] = 0; + + return desc->name; } -PUBLIC void *gbm_bo_get_user_data(struct gbm_bo *bo) +/* + * The following functions are not deprecated, but not in the Mesa the gbm + * header. The main difference is minigbm allows for the possibility of + * disjoint YUV images, while Mesa GBM does not. + */ +PUBLIC uint32_t gbm_bo_get_plane_size(struct gbm_bo *bo, size_t plane) { - return bo->user_data; + return drv_bo_get_plane_size(bo->bo, plane); +} + +PUBLIC int gbm_bo_get_plane_fd(struct gbm_bo *bo, size_t plane) +{ + return drv_bo_get_plane_fd(bo->bo, plane); +} + +PUBLIC void *gbm_bo_map(struct gbm_bo *bo, uint32_t x, uint32_t y, uint32_t width, uint32_t height, + uint32_t transfer_flags, uint32_t *stride, void **map_data, size_t plane) +{ + return gbm_bo_map2(bo, x, y, width, height, transfer_flags, stride, map_data, plane); +} + +PUBLIC void *gbm_bo_map2(struct gbm_bo *bo, uint32_t x, uint32_t y, uint32_t width, uint32_t height, + uint32_t transfer_flags, uint32_t *stride, void **map_data, int plane) +{ + void *addr; + off_t offset; + uint32_t map_flags; + plane = (size_t)plane; + struct rectangle rect = { .x = x, .y = y, .width = width, .height = height }; + if (!bo || width == 0 || height == 0 || !stride || !map_data) + return NULL; + + map_flags = (transfer_flags & GBM_BO_TRANSFER_READ) ? BO_MAP_READ : BO_MAP_NONE; + map_flags |= (transfer_flags & GBM_BO_TRANSFER_WRITE) ? BO_MAP_WRITE : BO_MAP_NONE; + + addr = drv_bo_map(bo->bo, &rect, map_flags, (struct mapping **)map_data, plane); + if (addr == MAP_FAILED) + return MAP_FAILED; + + *stride = ((struct mapping *)*map_data)->vma->map_strides[plane]; + + offset = *stride * rect.y; + offset += rect.x * drv_bytes_per_pixel_from_format(bo->gbm_format, plane); + return (void *)((uint8_t *)addr + offset); } diff --git a/gbm.h b/gbm.h index a58aadb..2492728 100644 --- a/gbm.h +++ b/gbm.h @@ -28,16 +28,16 @@ #ifndef _GBM_H_ #define _GBM_H_ -#ifdef __cplusplus -extern "C" { -#endif - - #define __GBM__ 1 #include #include +#ifdef __cplusplus +extern "C" { +#endif + + /** * \file gbm.h * \brief Generic Buffer Manager @@ -69,8 +69,20 @@ union gbm_bo_handle { uint64_t u64; }; -#define GBM_MAX_PLANES 4 +/** Format of the allocated buffer */ +enum gbm_bo_format { + /** RGB with 8 bits per channel in a 32 bit value */ + GBM_BO_FORMAT_XRGB8888, + /** ARGB with 8 bits per channel in a 32 bit value */ + GBM_BO_FORMAT_ARGB8888 +}; + +/** + * The FourCC format codes are taken from the drm_fourcc.h definition, and + * re-namespaced. New GBM formats must not be added, unless they are + * identical ports from drm_fourcc. + */ #define __gbm_fourcc_code(a,b,c,d) ((uint32_t)(a) | ((uint32_t)(b) << 8) | \ ((uint32_t)(c) << 16) | ((uint32_t)(d) << 24)) @@ -83,7 +95,6 @@ union gbm_bo_handle { #define GBM_FORMAT_R8 __gbm_fourcc_code('R', '8', ' ', ' ') /* [7:0] R */ /* 16 bpp RG */ -#define GBM_FORMAT_RG88 __gbm_fourcc_code('R', 'G', '8', '8') /* [15:0] R:G 8:8 little endian */ #define GBM_FORMAT_GR88 __gbm_fourcc_code('G', 'R', '8', '8') /* [15:0] G:R 8:8 little endian */ /* 8 bpp RGB */ @@ -139,6 +150,15 @@ union gbm_bo_handle { #define GBM_FORMAT_RGBA1010102 __gbm_fourcc_code('R', 'A', '3', '0') /* [31:0] R:G:B:A 10:10:10:2 little endian */ #define GBM_FORMAT_BGRA1010102 __gbm_fourcc_code('B', 'A', '3', '0') /* [31:0] B:G:R:A 10:10:10:2 little endian */ +/* + * Floating point 64bpp RGB + * IEEE 754-2008 binary16 half-precision float + * [15:0] sign:exponent:mantissa 1:5:10 + */ +#define GBM_FORMAT_XBGR16161616F __gbm_fourcc_code('X', 'B', '4', 'H') /* [63:0] x:B:G:R 16:16:16:16 little endian */ + +#define GBM_FORMAT_ABGR16161616F __gbm_fourcc_code('A', 'B', '4', 'H') /* [63:0] A:B:G:R 16:16:16:16 little endian */ + /* packed YCbCr */ #define GBM_FORMAT_YUYV __gbm_fourcc_code('Y', 'U', 'Y', 'V') /* [31:0] Cr0:Y1:Cb0:Y0 8:8:8:8 little endian */ #define GBM_FORMAT_YVYU __gbm_fourcc_code('Y', 'V', 'Y', 'U') /* [31:0] Cb0:Y1:Cr0:Y0 8:8:8:8 little endian */ @@ -179,28 +199,9 @@ union gbm_bo_handle { #define GBM_FORMAT_YUV444 __gbm_fourcc_code('Y', 'U', '2', '4') /* non-subsampled Cb (1) and Cr (2) planes */ #define GBM_FORMAT_YVU444 __gbm_fourcc_code('Y', 'V', '2', '4') /* non-subsampled Cr (1) and Cb (2) planes */ -/* - * Format Modifiers: - * - * Format modifiers describe, typically, a re-ordering or modification - * of the data in a plane of an FB. This can be used to express tiled/ - * swizzled formats, or compression, or a combination of the two. - * - * The upper 8 bits of the format modifier are a vendor-id as assigned - * below. The lower 56 bits are assigned as vendor sees fit. - */ - -/* Vendor Ids: */ -#define GBM_FORMAT_MOD_NONE 0 -#define GBM_FORMAT_MOD_VENDOR_INTEL 0x01 -#define GBM_FORMAT_MOD_VENDOR_AMD 0x02 -#define GBM_FORMAT_MOD_VENDOR_NV 0x03 -#define GBM_FORMAT_MOD_VENDOR_SAMSUNG 0x04 -#define GBM_FORMAT_MOD_VENDOR_QCOM 0x05 -/* add more to the end as needed */ - -#define gbm_fourcc_mod_code(vendor, val) \ - ((((__u64)GBM_FORMAT_MOD_VENDOR_## vendor) << 56) | (val & 0x00ffffffffffffffULL)) +struct gbm_format_name_desc { + char name[5]; +}; /** * Flags to indicate the intended use for the buffer - these are passed into @@ -229,16 +230,14 @@ enum gbm_bo_flags { */ GBM_BO_USE_RENDERING = (1 << 2), /** - * Deprecated + * Buffer can be used for gbm_bo_write. This is guaranteed to work + * with GBM_BO_USE_CURSOR, but may not work for other combinations. */ GBM_BO_USE_WRITE = (1 << 3), /** - * Buffer is guaranteed to be laid out linearly in memory. That is, the - * buffer is laid out as an array with 'height' blocks, each block with - * length 'stride'. Each stride is in the same order as the rows of the - * buffer. + * Buffer is linear, i.e. not tiled. */ - GBM_BO_USE_LINEAR = (1 << 4), + GBM_BO_USE_LINEAR = (1 << 4), /** * The buffer will be used as a texture that will be sampled from. */ @@ -255,6 +254,32 @@ enum gbm_bo_flags { * Buffer inaccessible to unprivileged users. */ GBM_BO_USE_PROTECTED = (1 << 8), + /** + * These flags specify the frequency of software access. These flags do not + * guarantee the buffer is linear, but do guarantee gbm_bo_map(..) will + * present a linear view. + */ + GBM_BO_USE_SW_READ_OFTEN = (1 << 9), + GBM_BO_USE_SW_READ_RARELY = (1 << 10), + GBM_BO_USE_SW_WRITE_OFTEN = (1 << 11), + GBM_BO_USE_SW_WRITE_RARELY = (1 << 12), + /** + * The buffer will be written by a video decode accelerator. + */ + GBM_BO_USE_HW_VIDEO_DECODER = (1 << 13), + /** + * The buffer will be read by a video encode accelerator. + */ + GBM_BO_USE_HW_VIDEO_ENCODER = (1 << 14), + + /** + * If this flag is set, no backing memory will be allocated for the + * created buffer. The metadata of the buffer (e.g. size) can be + * queried, and the values will be equal to a buffer allocated with + * the same same arguments minus this flag. However, any methods + * which would otherwise access the underlying buffer will fail. + */ + GBM_TEST_ALLOC = (1 << 15), }; int @@ -267,6 +292,11 @@ int gbm_device_is_format_supported(struct gbm_device *gbm, uint32_t format, uint32_t usage); +int +gbm_device_get_format_modifier_plane_count(struct gbm_device *gbm, + uint32_t format, + uint64_t modifier); + void gbm_device_destroy(struct gbm_device *gbm); @@ -282,12 +312,14 @@ struct gbm_bo * gbm_bo_create_with_modifiers(struct gbm_device *gbm, uint32_t width, uint32_t height, uint32_t format, - const uint64_t *modifiers, uint32_t count); - + const uint64_t *modifiers, + const unsigned int count); #define GBM_BO_IMPORT_WL_BUFFER 0x5501 #define GBM_BO_IMPORT_EGL_IMAGE 0x5502 #define GBM_BO_IMPORT_FD 0x5503 +// Deprecated. Use GBM_BO_IMPORT_FD_MODIFIER instead. #define GBM_BO_IMPORT_FD_PLANAR 0x5504 +#define GBM_BO_IMPORT_FD_MODIFIER 0x5505 struct gbm_import_fd_data { int fd; @@ -297,14 +329,17 @@ struct gbm_import_fd_data { uint32_t format; }; -struct gbm_import_fd_planar_data { - int fds[GBM_MAX_PLANES]; +#define GBM_MAX_PLANES 4 + +struct gbm_import_fd_modifier_data { uint32_t width; uint32_t height; uint32_t format; - uint32_t strides[GBM_MAX_PLANES]; - uint32_t offsets[GBM_MAX_PLANES]; - uint64_t format_modifiers[GBM_MAX_PLANES]; + uint32_t num_fds; + int fds[GBM_MAX_PLANES]; + int strides[GBM_MAX_PLANES]; + int offsets[GBM_MAX_PLANES]; + uint64_t modifier; }; struct gbm_bo * @@ -338,11 +373,6 @@ enum gbm_bo_transfer_flags { GBM_BO_TRANSFER_READ_WRITE = (GBM_BO_TRANSFER_READ | GBM_BO_TRANSFER_WRITE), }; -void * -gbm_bo_map(struct gbm_bo *bo, - uint32_t x, uint32_t y, uint32_t width, uint32_t height, - uint32_t flags, uint32_t *stride, void **map_data, size_t plane); - void gbm_bo_unmap(struct gbm_bo *bo, void *map_data); @@ -355,15 +385,17 @@ gbm_bo_get_height(struct gbm_bo *bo); uint32_t gbm_bo_get_stride(struct gbm_bo *bo); -/* Tegra bringup hack to pass tiling parameters at EGLImage creation. */ uint32_t -gbm_bo_get_stride_or_tiling(struct gbm_bo *bo); +gbm_bo_get_stride_for_plane(struct gbm_bo *bo, size_t plane); uint32_t gbm_bo_get_format(struct gbm_bo *bo); -uint64_t -gbm_bo_get_format_modifier(struct gbm_bo *bo); +uint32_t +gbm_bo_get_bpp(struct gbm_bo *bo); + +uint32_t +gbm_bo_get_offset(struct gbm_bo *bo, size_t plane); struct gbm_device * gbm_bo_get_device(struct gbm_bo *bo); @@ -374,26 +406,17 @@ gbm_bo_get_handle(struct gbm_bo *bo); int gbm_bo_get_fd(struct gbm_bo *bo); -size_t -gbm_bo_get_num_planes(struct gbm_bo *bo); - -union gbm_bo_handle -gbm_bo_get_plane_handle(struct gbm_bo *bo, size_t plane); +uint64_t +gbm_bo_get_modifier(struct gbm_bo *bo); int -gbm_bo_get_plane_fd(struct gbm_bo *bo, size_t plane); - -uint32_t -gbm_bo_get_plane_offset(struct gbm_bo *bo, size_t plane); +gbm_bo_get_plane_count(struct gbm_bo *bo); -uint32_t -gbm_bo_get_plane_size(struct gbm_bo *bo, size_t plane); - -uint32_t -gbm_bo_get_plane_stride(struct gbm_bo *bo, size_t plane); +union gbm_bo_handle +gbm_bo_get_handle_for_plane(struct gbm_bo *bo, size_t plane); -uint64_t -gbm_bo_get_plane_format_modifier(struct gbm_bo *bo, size_t plane); +int +gbm_bo_write(struct gbm_bo *bo, const void *buf, size_t count); void gbm_bo_set_user_data(struct gbm_bo *bo, void *data, @@ -410,6 +433,13 @@ gbm_surface_create(struct gbm_device *gbm, uint32_t width, uint32_t height, uint32_t format, uint32_t flags); +struct gbm_surface * +gbm_surface_create_with_modifiers(struct gbm_device *gbm, + uint32_t width, uint32_t height, + uint32_t format, + const uint64_t *modifiers, + const unsigned int count); + struct gbm_bo * gbm_surface_lock_front_buffer(struct gbm_surface *surface); @@ -422,6 +452,33 @@ gbm_surface_has_free_buffers(struct gbm_surface *surface); void gbm_surface_destroy(struct gbm_surface *surface); +char * +gbm_format_get_name(uint32_t gbm_format, struct gbm_format_name_desc *desc); + + +#ifndef MINIGBM +#define MINIGBM +#endif +/* + * The following functions are not deprecated, but not in the Mesa the gbm + * header. The main difference is minigbm allows for the possibility of + * disjoint YUV images, while Mesa GBM does not. + */ +uint32_t +gbm_bo_get_plane_size(struct gbm_bo *bo, size_t plane); + +int +gbm_bo_get_plane_fd(struct gbm_bo *bo, size_t plane); + +void * +gbm_bo_map(struct gbm_bo *bo, + uint32_t x, uint32_t y, uint32_t width, uint32_t height, + uint32_t flags, uint32_t *stride, void **map_data, size_t plane); +void * +gbm_bo_map2(struct gbm_bo *bo, + uint32_t x, uint32_t y, uint32_t width, uint32_t height, + uint32_t flags, uint32_t *stride, void **map_data, int plane); + #ifdef __cplusplus } #endif diff --git a/gbm.pc b/gbm.pc index e99ac83..a7509fc 100644 --- a/gbm.pc +++ b/gbm.pc @@ -5,6 +5,6 @@ libdir=${exec_prefix}/lib Name: libgbm Description: A small gbm implementation -Version: 0 +Version: 18.0.0 Cflags: -I${includedir} Libs: -L${libdir} -lgbm diff --git a/gbm_helpers.c b/gbm_helpers.c index c22233a..0626a6d 100644 --- a/gbm_helpers.c +++ b/gbm_helpers.c @@ -32,6 +32,18 @@ uint64_t gbm_convert_usage(uint32_t usage) use_flags |= BO_USE_CAMERA_READ; if (usage & GBM_BO_USE_PROTECTED) use_flags |= BO_USE_PROTECTED; + if (usage & GBM_BO_USE_SW_READ_OFTEN) + use_flags |= BO_USE_SW_READ_OFTEN; + if (usage & GBM_BO_USE_SW_READ_RARELY) + use_flags |= BO_USE_SW_READ_RARELY; + if (usage & GBM_BO_USE_SW_WRITE_OFTEN) + use_flags |= BO_USE_SW_WRITE_OFTEN; + if (usage & GBM_BO_USE_SW_WRITE_RARELY) + use_flags |= BO_USE_SW_WRITE_RARELY; + if (usage & GBM_BO_USE_HW_VIDEO_DECODER) + use_flags |= BO_USE_HW_VIDEO_DECODER; + if (usage & GBM_BO_USE_HW_VIDEO_ENCODER) + use_flags |= BO_USE_HW_VIDEO_ENCODER; return use_flags; } diff --git a/gma500.c b/gma500.c deleted file mode 100644 index c3f3c12..0000000 --- a/gma500.c +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright 2014 The Chromium OS Authors. All rights reserved. - * Use of this source code is governed by a BSD-style license that can be - * found in the LICENSE file. - */ - -#include "drv_priv.h" -#include "helpers.h" -#include "util.h" - -static const uint32_t render_target_formats[] = { DRM_FORMAT_RGBX8888 }; - -static int gma500_init(struct driver *drv) -{ - int ret; - ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats), - &LINEAR_METADATA, BO_USE_RENDER_MASK); - if (ret) - return ret; - - return drv_modify_linear_combinations(drv); -} - -struct backend backend_gma500 = { - .name = "gma500", - .init = gma500_init, - .bo_create = drv_dumb_bo_create, - .bo_destroy = drv_dumb_bo_destroy, - .bo_import = drv_prime_bo_import, - .bo_map = drv_dumb_bo_map, - .bo_unmap = drv_bo_munmap, -}; diff --git a/helpers.c b/helpers.c index bdae73d..f648d5a 100644 --- a/helpers.c +++ b/helpers.c @@ -6,49 +6,114 @@ #include #include -#include #include #include #include #include +#include +#include #include -#include #include "drv_priv.h" #include "helpers.h" #include "util.h" -static uint32_t subsample_stride(uint32_t stride, uint32_t format, size_t plane) -{ - - if (plane != 0) { - switch (format) { - case DRM_FORMAT_YVU420: - case DRM_FORMAT_YVU420_ANDROID: - stride = DIV_ROUND_UP(stride, 2); - break; - } - } - - return stride; -} - -static uint32_t bpp_from_format(uint32_t format, size_t plane) +#ifdef USE_GRALLOC1 +#include "i915_private.h" +#endif + +struct planar_layout { + size_t num_planes; + int horizontal_subsampling[DRV_MAX_PLANES]; + int vertical_subsampling[DRV_MAX_PLANES]; + int bytes_per_pixel[DRV_MAX_PLANES]; +}; + +// clang-format off + +static const struct planar_layout packed_1bpp_layout = { + .num_planes = 1, + .horizontal_subsampling = { 1 }, + .vertical_subsampling = { 1 }, + .bytes_per_pixel = { 1 } +}; + +static const struct planar_layout packed_2bpp_layout = { + .num_planes = 1, + .horizontal_subsampling = { 1 }, + .vertical_subsampling = { 1 }, + .bytes_per_pixel = { 2 } +}; + +static const struct planar_layout packed_3bpp_layout = { + .num_planes = 1, + .horizontal_subsampling = { 1 }, + .vertical_subsampling = { 1 }, + .bytes_per_pixel = { 3 } +}; + +static const struct planar_layout packed_4bpp_layout = { + .num_planes = 1, + .horizontal_subsampling = { 1 }, + .vertical_subsampling = { 1 }, + .bytes_per_pixel = { 4 } +}; + +static const struct planar_layout packed_8bpp_layout = { + .num_planes = 1, + .horizontal_subsampling = { 1 }, + .vertical_subsampling = { 1 }, + .bytes_per_pixel = { 8 } +}; + +static const struct planar_layout biplanar_yuv_420_layout = { + .num_planes = 2, + .horizontal_subsampling = { 1, 2 }, + .vertical_subsampling = { 1, 2 }, + .bytes_per_pixel = { 1, 2 } +}; + +static const struct planar_layout triplanar_yuv_420_layout = { + .num_planes = 3, + .horizontal_subsampling = { 1, 2, 2 }, + .vertical_subsampling = { 1, 2, 2 }, + .bytes_per_pixel = { 1, 1, 1 } +}; + +static const struct planar_layout biplanar_yuv_p010_layout = { + .num_planes = 2, + .horizontal_subsampling = { 1, 2 }, + .vertical_subsampling = { 1, 2 }, + .bytes_per_pixel = { 2, 4 } +}; + +// clang-format on + +static const struct planar_layout *layout_from_format(uint32_t format) { - assert(plane < drv_num_planes_from_format(format)); - switch (format) { case DRM_FORMAT_BGR233: case DRM_FORMAT_C8: case DRM_FORMAT_R8: case DRM_FORMAT_RGB332: + return &packed_1bpp_layout; + + case DRM_FORMAT_R16: + return &packed_2bpp_layout; + case DRM_FORMAT_YVU420: case DRM_FORMAT_YVU420_ANDROID: - return 8; + return &triplanar_yuv_420_layout; case DRM_FORMAT_NV12: case DRM_FORMAT_NV21: - return (plane == 0) ? 8 : 4; +#ifdef USE_GRALLOC1 + case DRM_FORMAT_NV12_Y_TILED_INTEL: +#endif + return &biplanar_yuv_420_layout; + + case DRM_FORMAT_P010: + return &biplanar_yuv_p010_layout; case DRM_FORMAT_ABGR1555: case DRM_FORMAT_ABGR4444: @@ -74,11 +139,12 @@ static uint32_t bpp_from_format(uint32_t format, size_t plane) case DRM_FORMAT_XRGB4444: case DRM_FORMAT_YUYV: case DRM_FORMAT_YVYU: - return 16; + case DRM_FORMAT_MTISP_SXYZW10: + return &packed_2bpp_layout; case DRM_FORMAT_BGR888: case DRM_FORMAT_RGB888: - return 24; + return &packed_3bpp_layout; case DRM_FORMAT_ABGR2101010: case DRM_FORMAT_ABGR8888: @@ -97,17 +163,74 @@ static uint32_t bpp_from_format(uint32_t format, size_t plane) case DRM_FORMAT_XBGR8888: case DRM_FORMAT_XRGB2101010: case DRM_FORMAT_XRGB8888: - return 32; + return &packed_4bpp_layout; + + case DRM_FORMAT_ABGR16161616F: + return &packed_8bpp_layout; + + default: + drv_log("UNKNOWN FORMAT %d\n", format); + return NULL; } +} - fprintf(stderr, "drv: UNKNOWN FORMAT %d\n", format); - return 0; +size_t drv_num_planes_from_format(uint32_t format) +{ + const struct planar_layout *layout = layout_from_format(format); + + /* + * drv_bo_new calls this function early to query number of planes and + * considers 0 planes to mean unknown format, so we have to support + * that. All other layout_from_format() queries can assume that the + * format is supported and that the return value is non-NULL. + */ + +#ifdef USE_GRALLOC1 + return layout ? layout->num_planes : i915_private_num_planes_from_format(format); +#else + return layout ? layout->num_planes : 0; +#endif +} + +size_t drv_num_planes_from_modifier(struct driver *drv, uint32_t format, uint64_t modifier) +{ + size_t planes = drv_num_planes_from_format(format); + + /* Disallow unsupported formats. */ + if (!planes) + return 0; + + if (drv->backend->num_planes_from_modifier && modifier != DRM_FORMAT_MOD_INVALID) + return drv->backend->num_planes_from_modifier(drv, format, modifier); + + return planes; +} + +uint32_t drv_height_from_format(uint32_t format, uint32_t height, size_t plane) +{ + const struct planar_layout *layout = layout_from_format(format); + + assert(plane < layout->num_planes); + + return DIV_ROUND_UP(height, layout->vertical_subsampling[plane]); +} + +uint32_t drv_vertical_subsampling_from_format(uint32_t format, size_t plane) +{ + const struct planar_layout *layout = layout_from_format(format); + + assert(plane < layout->num_planes); + + return layout->vertical_subsampling[plane]; } -uint32_t drv_bo_get_stride_in_pixels(struct bo *bo) +uint32_t drv_bytes_per_pixel_from_format(uint32_t format, size_t plane) { - uint32_t bytes_per_pixel = DIV_ROUND_UP(bpp_from_format(bo->format, 0), 8); - return DIV_ROUND_UP(bo->strides[0], bytes_per_pixel); + const struct planar_layout *layout = layout_from_format(format); + + assert(plane < layout->num_planes); + + return layout->bytes_per_pixel[plane]; } /* @@ -115,7 +238,11 @@ uint32_t drv_bo_get_stride_in_pixels(struct bo *bo) */ uint32_t drv_stride_from_format(uint32_t format, uint32_t width, size_t plane) { - uint32_t stride = DIV_ROUND_UP(width * bpp_from_format(format, plane), 8); + const struct planar_layout *layout = layout_from_format(format); + assert(plane < layout->num_planes); + + uint32_t plane_width = DIV_ROUND_UP(width, layout->horizontal_subsampling[plane]); + uint32_t stride = plane_width * layout->bytes_per_pixel[plane]; /* * The stride of Android YV12 buffers is required to be aligned to 16 bytes @@ -129,20 +256,21 @@ uint32_t drv_stride_from_format(uint32_t format, uint32_t width, size_t plane) uint32_t drv_size_from_format(uint32_t format, uint32_t stride, uint32_t height, size_t plane) { - assert(plane < drv_num_planes_from_format(format)); - uint32_t vertical_subsampling; + return stride * drv_height_from_format(format, height, plane); +} - switch (format) { - case DRM_FORMAT_NV12: - case DRM_FORMAT_YVU420: - case DRM_FORMAT_YVU420_ANDROID: - vertical_subsampling = (plane == 0) ? 1 : 2; - break; - default: - vertical_subsampling = 1; +static uint32_t subsample_stride(uint32_t stride, uint32_t format, size_t plane) +{ + if (plane != 0) { + switch (format) { + case DRM_FORMAT_YVU420: + case DRM_FORMAT_YVU420_ANDROID: + stride = DIV_ROUND_UP(stride, 2); + break; + } } - return stride * DIV_ROUND_UP(height, vertical_subsampling); + return stride; } /* @@ -152,7 +280,13 @@ uint32_t drv_size_from_format(uint32_t format, uint32_t stride, uint32_t height, */ int drv_bo_from_format(struct bo *bo, uint32_t stride, uint32_t aligned_height, uint32_t format) { + uint32_t padding[DRV_MAX_PLANES] = { 0 }; + return drv_bo_from_format_and_padding(bo, stride, aligned_height, format, padding); +} +int drv_bo_from_format_and_padding(struct bo *bo, uint32_t stride, uint32_t aligned_height, + uint32_t format, uint32_t padding[DRV_MAX_PLANES]) +{ size_t p, num_planes; uint32_t offset = 0; @@ -166,23 +300,25 @@ int drv_bo_from_format(struct bo *bo, uint32_t stride, uint32_t aligned_height, * is 32 bytes aligned. */ if (format == DRM_FORMAT_YVU420_ANDROID) { - assert(aligned_height == bo->height); + assert(aligned_height == bo->meta.height); assert(stride == ALIGN(stride, 32)); } for (p = 0; p < num_planes; p++) { - bo->strides[p] = subsample_stride(stride, format, p); - bo->sizes[p] = drv_size_from_format(format, bo->strides[p], aligned_height, p); - bo->offsets[p] = offset; - offset += bo->sizes[p]; + bo->meta.strides[p] = subsample_stride(stride, format, p); + bo->meta.sizes[p] = + drv_size_from_format(format, bo->meta.strides[p], aligned_height, p) + + padding[p]; + bo->meta.offsets[p] = offset; + offset += bo->meta.sizes[p]; } - bo->total_size = offset; + bo->meta.total_size = offset; return 0; } -int drv_dumb_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format, - uint64_t use_flags) +int drv_dumb_bo_create_ex(struct bo *bo, uint32_t width, uint32_t height, uint32_t format, + uint64_t use_flags, uint64_t quirks) { int ret; size_t plane; @@ -191,39 +327,68 @@ int drv_dumb_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t aligned_width = width; aligned_height = height; - if (format == DRM_FORMAT_YVU420_ANDROID) { - /* - * Align width to 32 pixels, so chroma strides are 16 bytes as - * Android requires. - */ + switch (format) { + case DRM_FORMAT_R16: + /* HAL_PIXEL_FORMAT_Y16 requires that the buffer's width be 16 pixel + * aligned. See hardware/interfaces/graphics/common/1.0/types.hal. */ + aligned_width = ALIGN(width, 16); + break; + case DRM_FORMAT_YVU420_ANDROID: + /* HAL_PIXEL_FORMAT_YV12 requires that the buffer's height not + * be aligned. Update 'height' so that drv_bo_from_format below + * uses the non-aligned height. */ + height = bo->meta.height; + + /* Align width to 32 pixels, so chroma strides are 16 bytes as + * Android requires. */ aligned_width = ALIGN(width, 32); - } - if (format == DRM_FORMAT_YVU420_ANDROID || format == DRM_FORMAT_YVU420) { + /* Adjust the height to include room for chroma planes. */ + aligned_height = 3 * DIV_ROUND_UP(height, 2); + break; + case DRM_FORMAT_YVU420: + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + /* Adjust the height to include room for chroma planes */ aligned_height = 3 * DIV_ROUND_UP(height, 2); + break; + default: + break; } memset(&create_dumb, 0, sizeof(create_dumb)); - create_dumb.height = aligned_height; + if (quirks & BO_QUIRK_DUMB32BPP) { + aligned_width = + DIV_ROUND_UP(aligned_width * layout_from_format(format)->bytes_per_pixel[0], 4); + create_dumb.bpp = 32; + } else { + create_dumb.bpp = layout_from_format(format)->bytes_per_pixel[0] * 8; + } create_dumb.width = aligned_width; - create_dumb.bpp = bpp_from_format(format, 0); + create_dumb.height = aligned_height; create_dumb.flags = 0; ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MODE_CREATE_DUMB, &create_dumb); if (ret) { - fprintf(stderr, "drv: DRM_IOCTL_MODE_CREATE_DUMB failed\n"); - return ret; + drv_log("DRM_IOCTL_MODE_CREATE_DUMB failed (%d, %d)\n", bo->drv->fd, errno); + return -errno; } drv_bo_from_format(bo, create_dumb.pitch, height, format); - for (plane = 0; plane < bo->num_planes; plane++) + for (plane = 0; plane < bo->meta.num_planes; plane++) bo->handles[plane].u32 = create_dumb.handle; - bo->total_size = create_dumb.size; + bo->meta.total_size = create_dumb.size; return 0; } +int drv_dumb_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format, + uint64_t use_flags) +{ + return drv_dumb_bo_create_ex(bo, width, height, format, use_flags, BO_QUIRK_NONE); +} + int drv_dumb_bo_destroy(struct bo *bo) { struct drm_mode_destroy_dumb destroy_dumb; @@ -234,9 +399,8 @@ int drv_dumb_bo_destroy(struct bo *bo) ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MODE_DESTROY_DUMB, &destroy_dumb); if (ret) { - fprintf(stderr, "drv: DRM_IOCTL_MODE_DESTROY_DUMB failed (handle=%x)\n", - bo->handles[0].u32); - return ret; + drv_log("DRM_IOCTL_MODE_DESTROY_DUMB failed (handle=%x)\n", bo->handles[0].u32); + return -errno; } return 0; @@ -248,7 +412,7 @@ int drv_gem_bo_destroy(struct bo *bo) int ret, error = 0; size_t plane, i; - for (plane = 0; plane < bo->num_planes; plane++) { + for (plane = 0; plane < bo->meta.num_planes; plane++) { for (i = 0; i < plane; i++) if (bo->handles[i].u32 == bo->handles[plane].u32) break; @@ -261,9 +425,9 @@ int drv_gem_bo_destroy(struct bo *bo) ret = drmIoctl(bo->drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close); if (ret) { - fprintf(stderr, "drv: DRM_IOCTL_GEM_CLOSE failed (handle=%x) error %d\n", + drv_log("DRM_IOCTL_GEM_CLOSE failed (handle=%x) error %d\n", bo->handles[plane].u32, ret); - error = ret; + error = -errno; } } @@ -276,15 +440,14 @@ int drv_prime_bo_import(struct bo *bo, struct drv_import_fd_data *data) size_t plane; struct drm_prime_handle prime_handle; - for (plane = 0; plane < bo->num_planes; plane++) { + for (plane = 0; plane < bo->meta.num_planes; plane++) { memset(&prime_handle, 0, sizeof(prime_handle)); prime_handle.fd = data->fds[plane]; ret = drmIoctl(bo->drv->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &prime_handle); if (ret) { - fprintf(stderr, "drv: DRM_IOCTL_PRIME_FD_TO_HANDLE failed (fd=%u)\n", - prime_handle.fd); + drv_log("DRM_IOCTL_PRIME_FD_TO_HANDLE failed (fd=%u)\n", prime_handle.fd); /* * Need to call GEM close on planes that were opened, @@ -292,24 +455,18 @@ int drv_prime_bo_import(struct bo *bo, struct drv_import_fd_data *data) * plane that failed, so GEM close will be called on * planes before that plane. */ - bo->num_planes = plane; + bo->meta.num_planes = plane; drv_gem_bo_destroy(bo); - return ret; + return -errno; } bo->handles[plane].u32 = prime_handle.handle; } - for (plane = 0; plane < bo->num_planes; plane++) { - pthread_mutex_lock(&bo->drv->driver_lock); - drv_increment_reference_count(bo->drv, bo, plane); - pthread_mutex_unlock(&bo->drv->driver_lock); - } - return 0; } -void *drv_dumb_bo_map(struct bo *bo, struct map_info *data, size_t plane, uint32_t map_flags) +void *drv_dumb_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags) { int ret; size_t i; @@ -320,46 +477,56 @@ void *drv_dumb_bo_map(struct bo *bo, struct map_info *data, size_t plane, uint32 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MODE_MAP_DUMB, &map_dumb); if (ret) { - fprintf(stderr, "drv: DRM_IOCTL_MODE_MAP_DUMB failed \n"); + drv_log("DRM_IOCTL_MODE_MAP_DUMB failed\n"); return MAP_FAILED; } - for (i = 0; i < bo->num_planes; i++) + for (i = 0; i < bo->meta.num_planes; i++) if (bo->handles[i].u32 == bo->handles[plane].u32) - data->length += bo->sizes[i]; + vma->length += bo->meta.sizes[i]; - return mmap(0, data->length, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd, + return mmap(0, vma->length, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd, map_dumb.offset); } -int drv_bo_munmap(struct bo *bo, struct map_info *data) +int drv_bo_munmap(struct bo *bo, struct vma *vma) { - return munmap(data->addr, data->length); + return munmap(vma->addr, vma->length); } -int drv_map_info_destroy(struct bo *bo) +int drv_mapping_destroy(struct bo *bo) { int ret; - void *ptr; size_t plane; - struct map_info *data; + struct mapping *mapping; + uint32_t idx; /* * This function is called right before the buffer is destroyed. It will free any mappings * associated with the buffer. */ - for (plane = 0; plane < bo->num_planes; plane++) { - if (!drmHashLookup(bo->drv->map_table, bo->handles[plane].u32, &ptr)) { - data = (struct map_info *)ptr; - ret = bo->drv->backend->bo_unmap(bo, data); - if (ret) { - fprintf(stderr, "drv: munmap failed"); - return ret; + idx = 0; + for (plane = 0; plane < bo->meta.num_planes; plane++) { + while (idx < drv_array_size(bo->drv->mappings)) { + mapping = (struct mapping *)drv_array_at_idx(bo->drv->mappings, idx); + if (mapping->vma->handle != bo->handles[plane].u32) { + idx++; + continue; + } + + if (!--mapping->vma->refcount) { + ret = bo->drv->backend->bo_unmap(bo, mapping->vma); + if (ret) { + drv_log("munmap failed\n"); + return ret; + } + + free(mapping->vma); } - drmHashDelete(bo->drv->map_table, data->handle); - free(data); + /* This shrinks and shifts the array, so don't increment idx. */ + drv_array_remove(bo->drv->mappings, idx); } } @@ -401,51 +568,28 @@ void drv_decrement_reference_count(struct driver *drv, struct bo *bo, size_t pla drmHashInsert(drv->buffer_table, bo->handles[plane].u32, (void *)(num - 1)); } -uint32_t drv_log_base2(uint32_t value) -{ - int ret = 0; - - while (value >>= 1) - ++ret; - - return ret; -} - -int drv_add_combination(struct driver *drv, uint32_t format, struct format_metadata *metadata, - uint64_t use_flags) +void drv_add_combination(struct driver *drv, const uint32_t format, + struct format_metadata *metadata, uint64_t use_flags) { - struct combinations *combos = &drv->combos; - if (combos->size >= combos->allocations) { - struct combination *new_data; - combos->allocations *= 2; - new_data = realloc(combos->data, combos->allocations * sizeof(*combos->data)); - if (!new_data) - return -ENOMEM; - - combos->data = new_data; - } + struct combination combo = { .format = format, + .metadata = *metadata, + .use_flags = use_flags }; - combos->data[combos->size].format = format; - combos->data[combos->size].metadata.priority = metadata->priority; - combos->data[combos->size].metadata.tiling = metadata->tiling; - combos->data[combos->size].metadata.modifier = metadata->modifier; - combos->data[combos->size].use_flags = use_flags; - combos->size++; - return 0; + drv_array_append(drv->combos, &combo); } -int drv_add_combinations(struct driver *drv, const uint32_t *formats, uint32_t num_formats, - struct format_metadata *metadata, uint64_t use_flags) +void drv_add_combinations(struct driver *drv, const uint32_t *formats, uint32_t num_formats, + struct format_metadata *metadata, uint64_t use_flags) { - int ret; uint32_t i; + for (i = 0; i < num_formats; i++) { - ret = drv_add_combination(drv, formats[i], metadata, use_flags); - if (ret) - return ret; - } + struct combination combo = { .format = formats[i], + .metadata = *metadata, + .use_flags = use_flags }; - return 0; + drv_array_append(drv->combos, &combo); + } } void drv_modify_combination(struct driver *drv, uint32_t format, struct format_metadata *metadata, @@ -454,155 +598,24 @@ void drv_modify_combination(struct driver *drv, uint32_t format, struct format_m uint32_t i; struct combination *combo; /* Attempts to add the specified flags to an existing combination. */ - for (i = 0; i < drv->combos.size; i++) { - combo = &drv->combos.data[i]; + for (i = 0; i < drv_array_size(drv->combos); i++) { + combo = (struct combination *)drv_array_at_idx(drv->combos, i); if (combo->format == format && combo->metadata.tiling == metadata->tiling && combo->metadata.modifier == metadata->modifier) combo->use_flags |= use_flags; } } -struct kms_item *drv_query_kms(struct driver *drv, uint32_t *num_items) -{ - struct kms_item *items; - uint64_t plane_type, use_flag; - uint32_t i, j, k, allocations, item_size; - - drmModePlanePtr plane; - drmModePropertyPtr prop; - drmModePlaneResPtr resources; - drmModeObjectPropertiesPtr props; - - /* Start with a power of 2 number of allocations. */ - allocations = 2; - item_size = 0; - items = calloc(allocations, sizeof(*items)); - if (!items) - goto out; - - /* - * The ability to return universal planes is only complete on - * ChromeOS kernel versions >= v3.18. The SET_CLIENT_CAP ioctl - * therefore might return an error code, so don't check it. If it - * fails, it'll just return the plane list as overlay planes, which is - * fine in our case (our drivers already have cursor bits set). - * modetest in libdrm does the same thing. - */ - drmSetClientCap(drv->fd, DRM_CLIENT_CAP_UNIVERSAL_PLANES, 1); - - resources = drmModeGetPlaneResources(drv->fd); - if (!resources) - goto out; - - for (i = 0; i < resources->count_planes; i++) { - plane = drmModeGetPlane(drv->fd, resources->planes[i]); - if (!plane) - goto out; - - props = drmModeObjectGetProperties(drv->fd, plane->plane_id, DRM_MODE_OBJECT_PLANE); - if (!props) - goto out; - - for (j = 0; j < props->count_props; j++) { - prop = drmModeGetProperty(drv->fd, props->props[j]); - if (prop) { - if (strcmp(prop->name, "type") == 0) { - plane_type = props->prop_values[j]; - } - - drmModeFreeProperty(prop); - } - } - - switch (plane_type) { - case DRM_PLANE_TYPE_OVERLAY: - case DRM_PLANE_TYPE_PRIMARY: - use_flag = BO_USE_SCANOUT; - break; - case DRM_PLANE_TYPE_CURSOR: - use_flag = BO_USE_CURSOR; - break; - default: - assert(0); - } - - for (j = 0; j < plane->count_formats; j++) { - bool found = false; - for (k = 0; k < item_size; k++) { - if (items[k].format == plane->formats[j] && - items[k].modifier == DRM_FORMAT_MOD_INVALID) { - items[k].use_flags |= use_flag; - found = true; - break; - } - } - - if (!found && item_size >= allocations) { - struct kms_item *new_data = NULL; - allocations *= 2; - new_data = realloc(items, allocations * sizeof(*items)); - if (!new_data) { - item_size = 0; - goto out; - } - - items = new_data; - } - - if (!found) { - items[item_size].format = plane->formats[j]; - items[item_size].modifier = DRM_FORMAT_MOD_INVALID; - items[item_size].use_flags = use_flag; - item_size++; - } - } - - drmModeFreeObjectProperties(props); - drmModeFreePlane(plane); - } - - drmModeFreePlaneResources(resources); -out: - if (items && item_size == 0) { - free(items); - items = NULL; - } - - *num_items = item_size; - return items; -} - int drv_modify_linear_combinations(struct driver *drv) { - uint32_t i, j, num_items; - struct kms_item *items; - struct combination *combo; - /* * All current drivers can scanout linear XRGB8888/ARGB8888 as a primary - * plane and as a cursor. Some drivers don't support - * drmModeGetPlaneResources, so add the combination here. Note that the - * kernel disregards the alpha component of ARGB unless it's an overlay - * plane. + * plane and as a cursor. */ drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &LINEAR_METADATA, BO_USE_CURSOR | BO_USE_SCANOUT); drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &LINEAR_METADATA, BO_USE_CURSOR | BO_USE_SCANOUT); - - items = drv_query_kms(drv, &num_items); - if (!items || !num_items) - return 0; - - for (i = 0; i < num_items; i++) { - for (j = 0; j < drv->combos.size; j++) { - combo = &drv->combos.data[j]; - if (items[i].format == combo->format) - combo->use_flags |= BO_USE_SCANOUT; - } - } - - free(items); return 0; } @@ -625,3 +638,16 @@ uint64_t drv_pick_modifier(const uint64_t *modifiers, uint32_t count, return DRM_FORMAT_MOD_LINEAR; } + +/* + * Search a list of modifiers to see if a given modifier is present + */ +bool drv_has_modifier(const uint64_t *list, uint32_t count, uint64_t modifier) +{ + uint32_t i; + for (i = 0; i < count; i++) + if (list[i] == modifier) + return true; + + return false; +} diff --git a/helpers.h b/helpers.h index 0e3fd14..19d0fd7 100644 --- a/helpers.h +++ b/helpers.h @@ -7,33 +7,39 @@ #ifndef HELPERS_H #define HELPERS_H +#include + #include "drv.h" +#include "helpers_array.h" -uint32_t drv_stride_from_format(uint32_t format, uint32_t width, size_t plane); +uint32_t drv_height_from_format(uint32_t format, uint32_t height, size_t plane); +uint32_t drv_vertical_subsampling_from_format(uint32_t format, size_t plane); uint32_t drv_size_from_format(uint32_t format, uint32_t stride, uint32_t height, size_t plane); int drv_bo_from_format(struct bo *bo, uint32_t stride, uint32_t aligned_height, uint32_t format); +int drv_bo_from_format_and_padding(struct bo *bo, uint32_t stride, uint32_t aligned_height, + uint32_t format, uint32_t padding[DRV_MAX_PLANES]); int drv_dumb_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format, uint64_t use_flags); +int drv_dumb_bo_create_ex(struct bo *bo, uint32_t width, uint32_t height, uint32_t format, + uint64_t use_flags, uint64_t quirks); int drv_dumb_bo_destroy(struct bo *bo); int drv_gem_bo_destroy(struct bo *bo); int drv_prime_bo_import(struct bo *bo, struct drv_import_fd_data *data); -void *drv_dumb_bo_map(struct bo *bo, struct map_info *data, size_t plane, uint32_t map_flags); -int drv_bo_munmap(struct bo *bo, struct map_info *data); -int drv_map_info_destroy(struct bo *bo); +void *drv_dumb_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags); +int drv_bo_munmap(struct bo *bo, struct vma *vma); +int drv_mapping_destroy(struct bo *bo); int drv_get_prot(uint32_t map_flags); uintptr_t drv_get_reference_count(struct driver *drv, struct bo *bo, size_t plane); void drv_increment_reference_count(struct driver *drv, struct bo *bo, size_t plane); void drv_decrement_reference_count(struct driver *drv, struct bo *bo, size_t plane); -uint32_t drv_log_base2(uint32_t value); -int drv_add_combination(struct driver *drv, uint32_t format, struct format_metadata *metadata, - uint64_t usage); -int drv_add_combinations(struct driver *drv, const uint32_t *formats, uint32_t num_formats, - struct format_metadata *metadata, uint64_t usage); +void drv_add_combination(struct driver *drv, uint32_t format, struct format_metadata *metadata, + uint64_t usage); +void drv_add_combinations(struct driver *drv, const uint32_t *formats, uint32_t num_formats, + struct format_metadata *metadata, uint64_t usage); void drv_modify_combination(struct driver *drv, uint32_t format, struct format_metadata *metadata, uint64_t usage); -struct kms_item *drv_query_kms(struct driver *drv, uint32_t *num_items); int drv_modify_linear_combinations(struct driver *drv); uint64_t drv_pick_modifier(const uint64_t *modifiers, uint32_t count, const uint64_t *modifier_order, uint32_t order_count); - +bool drv_has_modifier(const uint64_t *list, uint32_t count, uint64_t modifier); #endif diff --git a/helpers_array.c b/helpers_array.c new file mode 100644 index 0000000..20b43e2 --- /dev/null +++ b/helpers_array.c @@ -0,0 +1,96 @@ +/* + * Copyright 2017 The Chromium OS Authors. All rights reserved. + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#include +#include +#include +#include + +#include "util.h" + +struct drv_array { + void **items; + uint32_t size; + uint32_t item_size; + uint32_t allocations; +}; + +struct drv_array *drv_array_init(uint32_t item_size) +{ + struct drv_array *array; + + array = calloc(1, sizeof(*array)); + + /* Start with a power of 2 number of allocations. */ + array->allocations = 2; + array->items = calloc(array->allocations, sizeof(*array->items)); + array->item_size = item_size; + return array; +} + +void *drv_array_append(struct drv_array *array, void *data) +{ + void *item; + + if (array->size >= array->allocations) { + void **new_items = NULL; + array->allocations *= 2; + new_items = realloc(array->items, array->allocations * sizeof(*array->items)); + assert(new_items); + array->items = new_items; + } + + item = calloc(1, array->item_size); + memcpy(item, data, array->item_size); + array->items[array->size] = item; + array->size++; + return item; +} + +void drv_array_remove(struct drv_array *array, uint32_t idx) +{ + uint32_t i; + + assert(array); + assert(idx < array->size); + + free(array->items[idx]); + array->items[idx] = NULL; + + for (i = idx + 1; i < array->size; i++) + array->items[i - 1] = array->items[i]; + + array->size--; + if ((DIV_ROUND_UP(array->allocations, 2) > array->size) && array->allocations > 2) { + void **new_items = NULL; + array->allocations = DIV_ROUND_UP(array->allocations, 2); + new_items = realloc(array->items, array->allocations * sizeof(*array->items)); + assert(new_items); + array->items = new_items; + } +} + +void *drv_array_at_idx(struct drv_array *array, uint32_t idx) +{ + assert(idx < array->size); + return array->items[idx]; +} + +uint32_t drv_array_size(struct drv_array *array) +{ + return array->size; +} + +void drv_array_destroy(struct drv_array *array) +{ + uint32_t i; + + for (i = 0; i < array->size; i++) + free(array->items[i]); + + free(array->items); + free(array); +} diff --git a/helpers_array.h b/helpers_array.h new file mode 100644 index 0000000..2893976 --- /dev/null +++ b/helpers_array.h @@ -0,0 +1,22 @@ +/* + * Copyright 2017 The Chromium OS Authors. All rights reserved. + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +struct drv_array; + +struct drv_array *drv_array_init(uint32_t item_size); + +/* The data will be copied and appended to the array. */ +void *drv_array_append(struct drv_array *array, void *data); + +/* The data at the specified index will be freed -- the array will shrink. */ +void drv_array_remove(struct drv_array *array, uint32_t idx); + +void *drv_array_at_idx(struct drv_array *array, uint32_t idx); + +uint32_t drv_array_size(struct drv_array *array); + +/* The array and all associated data will be freed. */ +void drv_array_destroy(struct drv_array *array); diff --git a/i915.c b/i915.c index e25b068..7787d48 100644 --- a/i915.c +++ b/i915.c @@ -6,35 +6,50 @@ #ifdef DRV_I915 +#include #include #include +#include #include #include #include +#include #include #include "drv_priv.h" #include "helpers.h" #include "util.h" +#ifdef USE_GRALLOC1 +#include "i915_private.h" +#endif + #define I915_CACHELINE_SIZE 64 #define I915_CACHELINE_MASK (I915_CACHELINE_SIZE - 1) -static const uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB1555, - DRM_FORMAT_ARGB8888, DRM_FORMAT_RGB565, - DRM_FORMAT_XBGR2101010, DRM_FORMAT_XBGR8888, - DRM_FORMAT_XRGB1555, DRM_FORMAT_XRGB2101010, - DRM_FORMAT_XRGB8888 }; +static const uint32_t scanout_render_formats[] = { DRM_FORMAT_ABGR2101010, DRM_FORMAT_ABGR8888, + DRM_FORMAT_ARGB2101010, DRM_FORMAT_ARGB8888, + DRM_FORMAT_RGB565, DRM_FORMAT_XBGR2101010, + DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB2101010, + DRM_FORMAT_XRGB8888 }; -static const uint32_t tileable_texture_source_formats[] = { DRM_FORMAT_GR88, DRM_FORMAT_NV12, - DRM_FORMAT_R8, DRM_FORMAT_UYVY, - DRM_FORMAT_YUYV }; +static const uint32_t render_formats[] = { DRM_FORMAT_ABGR16161616F }; -static const uint32_t texture_source_formats[] = { DRM_FORMAT_YVU420, DRM_FORMAT_YVU420_ANDROID }; +static const uint32_t texture_only_formats[] = { DRM_FORMAT_R8, DRM_FORMAT_NV12, DRM_FORMAT_P010, +#ifdef USE_GRALLOC1 + DRM_FORMAT_YVU420, DRM_FORMAT_YVU420_ANDROID, + DRM_FORMAT_YUYV }; +#else + DRM_FORMAT_YVU420, DRM_FORMAT_YVU420_ANDROID }; +#endif struct i915_device { uint32_t gen; int32_t has_llc; +#ifdef USE_GRALLOC1 + uint64_t cursor_width; + uint64_t cursor_height; +#endif }; static uint32_t i915_get_gen(int device_id) @@ -49,75 +64,57 @@ static uint32_t i915_get_gen(int device_id) return 4; } -static int i915_add_kms_item(struct driver *drv, const struct kms_item *item) +static uint64_t unset_flags(uint64_t current_flags, uint64_t mask) { - uint32_t i; - struct combination *combo; - - /* - * Older hardware can't scanout Y-tiled formats. Newer devices can, and - * report this functionality via format modifiers. - */ - for (i = 0; i < drv->combos.size; i++) { - combo = &drv->combos.data[i]; - if (combo->format != item->format) - continue; - - if (item->modifier == DRM_FORMAT_MOD_INVALID && - combo->metadata.tiling == I915_TILING_X) { - /* - * FIXME: drv_query_kms() does not report the available modifiers - * yet, but we know that all hardware can scanout from X-tiled - * buffers, so let's add this to our combinations, except for - * cursor, which must not be tiled. - */ - combo->use_flags |= item->use_flags & ~BO_USE_CURSOR; - } - - if (combo->metadata.modifier == item->modifier) - combo->use_flags |= item->use_flags; - } - - return 0; + uint64_t value = current_flags & ~mask; + return value; } static int i915_add_combinations(struct driver *drv) { - int ret; - uint32_t i, num_items; - struct kms_item *items; struct format_metadata metadata; - uint64_t render_use_flags, texture_use_flags; + uint64_t render, scanout_and_render, texture_only; - render_use_flags = BO_USE_RENDER_MASK; - texture_use_flags = BO_USE_TEXTURE_MASK; + scanout_and_render = BO_USE_RENDER_MASK | BO_USE_SCANOUT; +#ifdef USE_GRALLOC1 + render = BO_USE_RENDER_MASK & ~(BO_USE_RENDERING | BO_USE_TEXTURE); +#else + render = BO_USE_RENDER_MASK; +#endif + texture_only = BO_USE_TEXTURE_MASK; + uint64_t linear_mask = BO_USE_RENDERSCRIPT | BO_USE_LINEAR | BO_USE_PROTECTED | + BO_USE_SW_READ_OFTEN | BO_USE_SW_WRITE_OFTEN; metadata.tiling = I915_TILING_NONE; metadata.priority = 1; metadata.modifier = DRM_FORMAT_MOD_LINEAR; - ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats), - &metadata, render_use_flags); - if (ret) - return ret; + drv_add_combinations(drv, scanout_render_formats, ARRAY_SIZE(scanout_render_formats), + &metadata, scanout_and_render); - ret = drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats), - &metadata, texture_use_flags); - if (ret) - return ret; + drv_add_combinations(drv, render_formats, ARRAY_SIZE(render_formats), &metadata, render); - ret = drv_add_combinations(drv, tileable_texture_source_formats, - ARRAY_SIZE(tileable_texture_source_formats), &metadata, - texture_use_flags); - if (ret) - return ret; - - drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT); - drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT); + drv_add_combinations(drv, texture_only_formats, ARRAY_SIZE(texture_only_formats), &metadata, + texture_only); + drv_modify_linear_combinations(drv); + /* + * Chrome uses DMA-buf mmap to write to YV12 buffers, which are then accessed by the + * Video Encoder Accelerator (VEA). It could also support NV12 potentially in the future. + */ + drv_modify_combination(drv, DRM_FORMAT_YVU420, &metadata, BO_USE_HW_VIDEO_ENCODER); /* IPU3 camera ISP supports only NV12 output. */ drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata, - BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE); + BO_USE_HW_VIDEO_ENCODER | BO_USE_HW_VIDEO_DECODER | + BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_SCANOUT); + + /* Android CTS tests require this. */ + drv_add_combination(drv, DRM_FORMAT_BGR888, &metadata, BO_USE_SW_MASK); +#ifdef USE_GRALLOC1 + drv_modify_combination(drv, DRM_FORMAT_ABGR2101010, &metadata, BO_USE_SW_MASK); + drv_add_combination(drv, DRM_FORMAT_RGB888, &metadata, BO_USE_SW_MASK); +#endif + /* * R8 format is used for Android's HAL_PIXEL_FORMAT_BLOB and is used for JPEG snapshots * from camera. @@ -125,59 +122,41 @@ static int i915_add_combinations(struct driver *drv) drv_modify_combination(drv, DRM_FORMAT_R8, &metadata, BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE); - render_use_flags &= ~BO_USE_RENDERSCRIPT; - render_use_flags &= ~BO_USE_SW_WRITE_OFTEN; - render_use_flags &= ~BO_USE_SW_READ_OFTEN; - render_use_flags &= ~BO_USE_LINEAR; - - texture_use_flags &= ~BO_USE_RENDERSCRIPT; - texture_use_flags &= ~BO_USE_SW_WRITE_OFTEN; - texture_use_flags &= ~BO_USE_SW_READ_OFTEN; - texture_use_flags &= ~BO_USE_LINEAR; + render = unset_flags(render, linear_mask); + scanout_and_render = unset_flags(scanout_and_render, linear_mask); metadata.tiling = I915_TILING_X; metadata.priority = 2; metadata.modifier = I915_FORMAT_MOD_X_TILED; - ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats), - &metadata, render_use_flags); - if (ret) - return ret; - - ret = drv_add_combinations(drv, tileable_texture_source_formats, - ARRAY_SIZE(tileable_texture_source_formats), &metadata, - texture_use_flags); - if (ret) - return ret; + drv_add_combinations(drv, render_formats, ARRAY_SIZE(render_formats), &metadata, render); + drv_add_combinations(drv, scanout_render_formats, ARRAY_SIZE(scanout_render_formats), + &metadata, scanout_and_render); metadata.tiling = I915_TILING_Y; metadata.priority = 3; metadata.modifier = I915_FORMAT_MOD_Y_TILED; - ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats), - &metadata, render_use_flags); - if (ret) - return ret; - - ret = drv_add_combinations(drv, tileable_texture_source_formats, - ARRAY_SIZE(tileable_texture_source_formats), &metadata, - texture_use_flags); - if (ret) - return ret; - - items = drv_query_kms(drv, &num_items); - if (!items || !num_items) - return 0; - - for (i = 0; i < num_items; i++) { - ret = i915_add_kms_item(drv, &items[i]); - if (ret) { - free(items); - return ret; - } - } - - free(items); + scanout_and_render = + unset_flags(scanout_and_render, BO_USE_SW_READ_RARELY | BO_USE_SW_WRITE_RARELY); +/* Support y-tiled NV12 and P010 for libva */ +#ifdef I915_SCANOUT_Y_TILED + drv_add_combination(drv, DRM_FORMAT_NV12, &metadata, + BO_USE_TEXTURE | BO_USE_HW_VIDEO_DECODER | BO_USE_SCANOUT); +#else + drv_add_combination(drv, DRM_FORMAT_NV12, &metadata, + BO_USE_TEXTURE | BO_USE_HW_VIDEO_DECODER); +#endif + scanout_and_render = unset_flags(scanout_and_render, BO_USE_SCANOUT); + drv_add_combination(drv, DRM_FORMAT_P010, &metadata, + BO_USE_TEXTURE | BO_USE_HW_VIDEO_DECODER); + + drv_add_combinations(drv, render_formats, ARRAY_SIZE(render_formats), &metadata, render); + drv_add_combinations(drv, scanout_render_formats, ARRAY_SIZE(scanout_render_formats), + &metadata, scanout_and_render); +#ifdef USE_GRALLOC1 + i915_private_add_combinations(drv); +#endif return 0; } @@ -185,13 +164,21 @@ static int i915_align_dimensions(struct bo *bo, uint32_t tiling, uint32_t *strid uint32_t *aligned_height) { struct i915_device *i915 = bo->drv->priv; - uint32_t horizontal_alignment = 4; - uint32_t vertical_alignment = 4; + uint32_t horizontal_alignment; + uint32_t vertical_alignment; switch (tiling) { default: case I915_TILING_NONE: + /* + * The Intel GPU doesn't need any alignment in linear mode, + * but libva requires the allocation stride to be aligned to + * 16 bytes and height to 4 rows. Further, we round up the + * horizontal alignment so that row start on a cache line (64 + * bytes). + */ horizontal_alignment = 64; + vertical_alignment = 4; break; case I915_TILING_X: @@ -210,23 +197,11 @@ static int i915_align_dimensions(struct bo *bo, uint32_t tiling, uint32_t *strid break; } - /* - * The alignment calculated above is based on the full size luma plane and to have chroma - * planes properly aligned with subsampled formats, we need to multiply luma alignment by - * subsampling factor. - */ - switch (bo->format) { - case DRM_FORMAT_YVU420_ANDROID: - case DRM_FORMAT_YVU420: - horizontal_alignment *= 2; - /* Fall through */ - case DRM_FORMAT_NV12: - vertical_alignment *= 2; - break; - } - - *aligned_height = ALIGN(bo->height, vertical_alignment); + *aligned_height = ALIGN(*aligned_height, vertical_alignment); if (i915->gen > 3) { +#ifdef USE_GRALLOC1 + if(DRM_FORMAT_R8 != bo->meta.format) +#endif *stride = ALIGN(*stride, horizontal_alignment); } else { while (*stride > horizontal_alignment) @@ -269,7 +244,7 @@ static int i915_init(struct driver *drv) get_param.value = &device_id; ret = drmIoctl(drv->fd, DRM_IOCTL_I915_GETPARAM, &get_param); if (ret) { - fprintf(stderr, "drv: Failed to get I915_PARAM_CHIPSET_ID\n"); + drv_log("Failed to get I915_PARAM_CHIPSET_ID\n"); free(i915); return -EINVAL; } @@ -281,98 +256,170 @@ static int i915_init(struct driver *drv) get_param.value = &i915->has_llc; ret = drmIoctl(drv->fd, DRM_IOCTL_I915_GETPARAM, &get_param); if (ret) { - fprintf(stderr, "drv: Failed to get I915_PARAM_HAS_LLC\n"); + drv_log("Failed to get I915_PARAM_HAS_LLC\n"); free(i915); return -EINVAL; } drv->priv = i915; +#ifdef USE_GRALLOC1 + i915_private_init(drv, &i915->cursor_width, &i915->cursor_height); +#endif + return i915_add_combinations(drv); } -static int i915_bo_create_for_modifier(struct bo *bo, uint32_t width, uint32_t height, - uint32_t format, uint64_t modifier) +static int i915_bo_from_format(struct bo *bo, uint32_t width, uint32_t height, uint32_t format) { - int ret; + uint32_t offset; size_t plane; - uint32_t stride; - struct drm_i915_gem_create gem_create; - struct drm_i915_gem_set_tiling gem_set_tiling; + int ret, pagesize; + + offset = 0; + pagesize = getpagesize(); + for (plane = 0; plane < drv_num_planes_from_format(format); plane++) { + uint32_t stride = drv_stride_from_format(format, width, plane); + uint32_t plane_height = drv_height_from_format(format, height, plane); + + if (bo->meta.tiling != I915_TILING_NONE) + assert(IS_ALIGNED(offset, pagesize)); + + ret = i915_align_dimensions(bo, bo->meta.tiling, &stride, &plane_height); + if (ret) + return ret; + + bo->meta.strides[plane] = stride; + bo->meta.sizes[plane] = stride * plane_height; + bo->meta.offsets[plane] = offset; + offset += bo->meta.sizes[plane]; + } + + bo->meta.total_size = ALIGN(offset, pagesize); + + return 0; +} + +static int i915_bo_compute_metadata(struct bo *bo, uint32_t width, uint32_t height, uint32_t format, + uint64_t use_flags, const uint64_t *modifiers, uint32_t count) +{ + static const uint64_t modifier_order[] = { + I915_FORMAT_MOD_Y_TILED, + I915_FORMAT_MOD_X_TILED, + DRM_FORMAT_MOD_LINEAR, + }; + uint64_t modifier; + + if (modifiers) { + modifier = + drv_pick_modifier(modifiers, count, modifier_order, ARRAY_SIZE(modifier_order)); + } else { + struct combination *combo = drv_get_combination(bo->drv, format, use_flags); + if (!combo) + return -EINVAL; + modifier = combo->metadata.modifier; + } switch (modifier) { case DRM_FORMAT_MOD_LINEAR: - bo->tiling = I915_TILING_NONE; + bo->meta.tiling = I915_TILING_NONE; break; case I915_FORMAT_MOD_X_TILED: - bo->tiling = I915_TILING_X; + bo->meta.tiling = I915_TILING_X; break; case I915_FORMAT_MOD_Y_TILED: - bo->tiling = I915_TILING_Y; + case I915_FORMAT_MOD_Y_TILED_CCS: +#ifdef USE_GRALLOC1 + case I915_FORMAT_MOD_Yf_TILED: + case I915_FORMAT_MOD_Yf_TILED_CCS: +#endif + bo->meta.tiling = I915_TILING_Y; break; } - stride = drv_stride_from_format(format, width, 0); + bo->meta.format_modifiers[0] = modifier; - ret = i915_align_dimensions(bo, bo->tiling, &stride, &height); - if (ret) - return ret; - - /* - * HAL_PIXEL_FORMAT_YV12 requires the buffer height not be aligned, but we need to keep - * total size as with aligned height to ensure enough padding space after each plane to - * satisfy GPU alignment requirements. - * - * We do it by first calling drv_bo_from_format() with aligned height and - * DRM_FORMAT_YVU420, which allows height alignment, saving the total size it calculates - * and then calling it again with requested parameters. - * - * This relies on the fact that i965 driver uses separate surfaces for each plane and - * contents of padding bytes is not affected, as it is only used to satisfy GPU cache - * requests. - * - * This is enforced by Mesa in src/intel/isl/isl_gen8.c, inside - * isl_gen8_choose_image_alignment_el(), which is used for GEN9 and GEN8. - */ if (format == DRM_FORMAT_YVU420_ANDROID) { - uint32_t unaligned_height = bo->height; - size_t total_size; - - drv_bo_from_format(bo, stride, height, DRM_FORMAT_YVU420); - total_size = bo->total_size; - drv_bo_from_format(bo, stride, unaligned_height, format); - bo->total_size = total_size; - } else { + /* + * We only need to be able to use this as a linear texture, + * which doesn't put any HW restrictions on how we lay it + * out. The Android format does require the stride to be a + * multiple of 16 and expects the Cr and Cb stride to be + * ALIGN(Y_stride / 2, 16), which we can make happen by + * aligning to 32 bytes here. + */ + uint32_t stride = ALIGN(width, 32); drv_bo_from_format(bo, stride, height, format); + } else if (modifier == I915_FORMAT_MOD_Y_TILED_CCS) { + /* + * For compressed surfaces, we need a color control surface + * (CCS). Color compression is only supported for Y tiled + * surfaces, and for each 32x16 tiles in the main surface we + * need a tile in the control surface. Y tiles are 128 bytes + * wide and 32 lines tall and we use that to first compute the + * width and height in tiles of the main surface. stride and + * height are already multiples of 128 and 32, respectively: + */ + uint32_t stride = drv_stride_from_format(format, width, 0); + uint32_t width_in_tiles = DIV_ROUND_UP(stride, 128); + uint32_t height_in_tiles = DIV_ROUND_UP(height, 32); + uint32_t size = width_in_tiles * height_in_tiles * 4096; + uint32_t offset = 0; + + bo->meta.strides[0] = width_in_tiles * 128; + bo->meta.sizes[0] = size; + bo->meta.offsets[0] = offset; + offset += size; + + /* + * Now, compute the width and height in tiles of the control + * surface by dividing and rounding up. + */ + uint32_t ccs_width_in_tiles = DIV_ROUND_UP(width_in_tiles, 32); + uint32_t ccs_height_in_tiles = DIV_ROUND_UP(height_in_tiles, 16); + uint32_t ccs_size = ccs_width_in_tiles * ccs_height_in_tiles * 4096; + + /* + * With stride and height aligned to y tiles, offset is + * already a multiple of 4096, which is the required alignment + * of the CCS. + */ + bo->meta.strides[1] = ccs_width_in_tiles * 128; + bo->meta.sizes[1] = ccs_size; + bo->meta.offsets[1] = offset; + offset += ccs_size; + + bo->meta.num_planes = 2; + bo->meta.total_size = offset; + } else { + i915_bo_from_format(bo, width, height, format); } + return 0; +} - /* - * Quoting Mesa ISL library: - * - * - For linear surfaces, additional padding of 64 bytes is required at - * the bottom of the surface. This is in addition to the padding - * required above. - */ - if (bo->tiling == I915_TILING_NONE) - bo->total_size += 64; +static int i915_bo_create_from_metadata(struct bo *bo) +{ + int ret; + size_t plane; + struct drm_i915_gem_create gem_create; + struct drm_i915_gem_set_tiling gem_set_tiling; memset(&gem_create, 0, sizeof(gem_create)); - gem_create.size = bo->total_size; + gem_create.size = bo->meta.total_size; ret = drmIoctl(bo->drv->fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create); if (ret) { - fprintf(stderr, "drv: DRM_IOCTL_I915_GEM_CREATE failed (size=%llu)\n", - gem_create.size); - return ret; + drv_log("DRM_IOCTL_I915_GEM_CREATE failed (size=%llu)\n", gem_create.size); + return -errno; } - for (plane = 0; plane < bo->num_planes; plane++) + for (plane = 0; plane < bo->meta.num_planes; plane++) bo->handles[plane].u32 = gem_create.handle; memset(&gem_set_tiling, 0, sizeof(gem_set_tiling)); gem_set_tiling.handle = bo->handles[0].u32; - gem_set_tiling.tiling_mode = bo->tiling; - gem_set_tiling.stride = bo->strides[0]; + gem_set_tiling.tiling_mode = bo->meta.tiling; + gem_set_tiling.stride = bo->meta.strides[0]; ret = drmIoctl(bo->drv->fd, DRM_IOCTL_I915_GEM_SET_TILING, &gem_set_tiling); if (ret) { @@ -381,40 +428,13 @@ static int i915_bo_create_for_modifier(struct bo *bo, uint32_t width, uint32_t h gem_close.handle = bo->handles[0].u32; drmIoctl(bo->drv->fd, DRM_IOCTL_GEM_CLOSE, &gem_close); - fprintf(stderr, "drv: DRM_IOCTL_I915_GEM_SET_TILING failed with %d", errno); + drv_log("DRM_IOCTL_I915_GEM_SET_TILING failed with %d\n", errno); return -errno; } return 0; } -static int i915_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format, - uint64_t use_flags) -{ - struct combination *combo; - - combo = drv_get_combination(bo->drv, format, use_flags); - if (!combo) - return -EINVAL; - - return i915_bo_create_for_modifier(bo, width, height, format, combo->metadata.modifier); -} - -static int i915_bo_create_with_modifiers(struct bo *bo, uint32_t width, uint32_t height, - uint32_t format, const uint64_t *modifiers, uint32_t count) -{ - static const uint64_t modifier_order[] = { - I915_FORMAT_MOD_Y_TILED, I915_FORMAT_MOD_X_TILED, DRM_FORMAT_MOD_LINEAR, - }; - uint64_t modifier; - - modifier = drv_pick_modifier(modifiers, count, modifier_order, ARRAY_SIZE(modifier_order)); - - bo->format_modifiers[0] = modifier; - - return i915_bo_create_for_modifier(bo, width, height, format, modifier); -} - static void i915_close(struct driver *drv) { free(drv->priv); @@ -437,33 +457,46 @@ static int i915_bo_import(struct bo *bo, struct drv_import_fd_data *data) ret = drmIoctl(bo->drv->fd, DRM_IOCTL_I915_GEM_GET_TILING, &gem_get_tiling); if (ret) { drv_gem_bo_destroy(bo); - fprintf(stderr, "drv: DRM_IOCTL_I915_GEM_GET_TILING failed."); + drv_log("DRM_IOCTL_I915_GEM_GET_TILING failed.\n"); return ret; } - bo->tiling = gem_get_tiling.tiling_mode; + bo->meta.tiling = gem_get_tiling.tiling_mode; return 0; } -static void *i915_bo_map(struct bo *bo, struct map_info *data, size_t plane, uint32_t map_flags) +static void *i915_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags) { int ret; void *addr; - if (bo->tiling == I915_TILING_NONE) { + if (bo->meta.format_modifiers[0] == I915_FORMAT_MOD_Y_TILED_CCS) + return MAP_FAILED; + + if (bo->meta.tiling == I915_TILING_NONE) { struct drm_i915_gem_mmap gem_map; memset(&gem_map, 0, sizeof(gem_map)); - if ((bo->use_flags & BO_USE_SCANOUT) && !(bo->use_flags & BO_USE_RENDERSCRIPT)) + /* TODO(b/118799155): We don't seem to have a good way to + * detect the use cases for which WC mapping is really needed. + * The current heuristic seems overly coarse and may be slowing + * down some other use cases unnecessarily. + * + * For now, care must be taken not to use WC mappings for + * Renderscript and camera use cases, as they're + * performance-sensitive. */ + if ((bo->meta.use_flags & BO_USE_SCANOUT) && + !(bo->meta.use_flags & + (BO_USE_RENDERSCRIPT | BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE))) gem_map.flags = I915_MMAP_WC; gem_map.handle = bo->handles[0].u32; gem_map.offset = 0; - gem_map.size = bo->total_size; + gem_map.size = bo->meta.total_size; ret = drmIoctl(bo->drv->fd, DRM_IOCTL_I915_GEM_MMAP, &gem_map); if (ret) { - fprintf(stderr, "drv: DRM_IOCTL_I915_GEM_MMAP failed\n"); + drv_log("DRM_IOCTL_I915_GEM_MMAP failed\n"); return MAP_FAILED; } @@ -476,60 +509,66 @@ static void *i915_bo_map(struct bo *bo, struct map_info *data, size_t plane, uin ret = drmIoctl(bo->drv->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &gem_map); if (ret) { - fprintf(stderr, "drv: DRM_IOCTL_I915_GEM_MMAP_GTT failed\n"); + drv_log("DRM_IOCTL_I915_GEM_MMAP_GTT failed\n"); return MAP_FAILED; } - addr = mmap(0, bo->total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd, - gem_map.offset); + addr = mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, + bo->drv->fd, gem_map.offset); } if (addr == MAP_FAILED) { - fprintf(stderr, "drv: i915 GEM mmap failed\n"); + drv_log("i915 GEM mmap failed\n"); return addr; } - data->length = bo->total_size; + vma->length = bo->meta.total_size; return addr; } -static int i915_bo_invalidate(struct bo *bo, struct map_info *data) +static int i915_bo_invalidate(struct bo *bo, struct mapping *mapping) { int ret; struct drm_i915_gem_set_domain set_domain; memset(&set_domain, 0, sizeof(set_domain)); set_domain.handle = bo->handles[0].u32; - if (bo->tiling == I915_TILING_NONE) { + if (bo->meta.tiling == I915_TILING_NONE) { set_domain.read_domains = I915_GEM_DOMAIN_CPU; - if (data->map_flags & BO_MAP_WRITE) + if (mapping->vma->map_flags & BO_MAP_WRITE) set_domain.write_domain = I915_GEM_DOMAIN_CPU; } else { set_domain.read_domains = I915_GEM_DOMAIN_GTT; - if (data->map_flags & BO_MAP_WRITE) + if (mapping->vma->map_flags & BO_MAP_WRITE) set_domain.write_domain = I915_GEM_DOMAIN_GTT; } ret = drmIoctl(bo->drv->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain); if (ret) { - fprintf(stderr, "drv: DRM_IOCTL_I915_GEM_SET_DOMAIN with %d\n", ret); + drv_log("DRM_IOCTL_I915_GEM_SET_DOMAIN with %d\n", ret); return ret; } return 0; } -static int i915_bo_flush(struct bo *bo, struct map_info *data) +static int i915_bo_flush(struct bo *bo, struct mapping *mapping) { struct i915_device *i915 = bo->drv->priv; - if (!i915->has_llc && bo->tiling == I915_TILING_NONE) - i915_clflush(data->addr, data->length); + if (!i915->has_llc && bo->meta.tiling == I915_TILING_NONE) + i915_clflush(mapping->vma->addr, mapping->vma->length); return 0; } -static uint32_t i915_resolve_format(uint32_t format, uint64_t use_flags) +static uint32_t i915_resolve_format(struct driver *drv, uint32_t format, uint64_t use_flags) { +#ifdef USE_GRALLOC1 + uint32_t resolved_format; + if (i915_private_resolve_format(format, use_flags, &resolved_format)) { + return resolved_format; + } +#endif switch (format) { case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED: /* KBL camera subsystem requires NV12. */ @@ -538,21 +577,27 @@ static uint32_t i915_resolve_format(uint32_t format, uint64_t use_flags) /*HACK: See b/28671744 */ return DRM_FORMAT_XBGR8888; case DRM_FORMAT_FLEX_YCbCr_420_888: - /* KBL camera subsystem requires NV12. */ - if (use_flags & (BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE)) - return DRM_FORMAT_NV12; - return DRM_FORMAT_YVU420; + /* + * KBL camera subsystem requires NV12. Our other use cases + * don't care: + * - Hardware video supports NV12, + * - USB Camera HALv3 supports NV12, + * - USB Camera HALv1 doesn't use this format. + * Moreover, NV12 is preferred for video, due to overlay + * support on SKL+. + */ + return DRM_FORMAT_NV12; default: return format; } } -struct backend backend_i915 = { +const struct backend backend_i915 = { .name = "i915", .init = i915_init, .close = i915_close, - .bo_create = i915_bo_create, - .bo_create_with_modifiers = i915_bo_create_with_modifiers, + .bo_compute_metadata = i915_bo_compute_metadata, + .bo_create_from_metadata = i915_bo_create_from_metadata, .bo_destroy = drv_gem_bo_destroy, .bo_import = i915_bo_import, .bo_map = i915_bo_map, diff --git a/i915_private.c b/i915_private.c new file mode 100644 index 0000000..b800d2b --- /dev/null +++ b/i915_private.c @@ -0,0 +1,204 @@ +/* + * Copyright 2017 The Chromium OS Authors. All rights reserved. + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "drv_priv.h" +#include "helpers.h" +#include "util.h" +#include "i915_private.h" + +static const uint32_t private_linear_source_formats[] = { DRM_FORMAT_R16, DRM_FORMAT_NV16, + DRM_FORMAT_YUV420, DRM_FORMAT_YUV422, + DRM_FORMAT_YUV444, DRM_FORMAT_NV21, + DRM_FORMAT_P010 }; + +static const uint32_t private_source_formats[] = { DRM_FORMAT_P010, DRM_FORMAT_NV12_Y_TILED_INTEL }; + +#if !defined(DRM_CAP_CURSOR_WIDTH) +#define DRM_CAP_CURSOR_WIDTH 0x8 +#endif + +#if !defined(DRM_CAP_CURSOR_HEIGHT) +#define DRM_CAP_CURSOR_HEIGHT 0x9 +#endif + +static const uint32_t kDefaultCursorWidth = 64; +static const uint32_t kDefaultCursorHeight = 64; + +#define BO_USE_CAMERA_MASK BO_USE_CAMERA_READ | BO_USE_SCANOUT | BO_USE_CAMERA_WRITE + +static void get_preferred_cursor_attributes(uint32_t drm_fd, uint64_t *cursor_width, + uint64_t *cursor_height) +{ + uint64_t width = 0, height = 0; + if (drmGetCap(drm_fd, DRM_CAP_CURSOR_WIDTH, &width)) { + fprintf(stderr, "cannot get cursor width. \n"); + } else if (drmGetCap(drm_fd, DRM_CAP_CURSOR_HEIGHT, &height)) { + fprintf(stderr, "cannot get cursor height. \n"); + } + + if (!width) + width = kDefaultCursorWidth; + + *cursor_width = width; + + if (!height) + height = kDefaultCursorHeight; + + *cursor_height = height; +} + +int i915_private_init(struct driver *drv, uint64_t *cursor_width, uint64_t *cursor_height) +{ + get_preferred_cursor_attributes(drv->fd, cursor_width, cursor_height); + return 0; +} + +int i915_private_add_combinations(struct driver *drv) +{ + struct format_metadata metadata; + uint64_t render_flags, texture_flags; + + render_flags = BO_USE_RENDER_MASK; + texture_flags = BO_USE_TEXTURE_MASK; + + metadata.tiling = I915_TILING_NONE; + metadata.priority = 1; + metadata.modifier = DRM_FORMAT_MOD_NONE; + + drv_modify_combination(drv, DRM_FORMAT_ABGR8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT); + drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata, + BO_USE_RENDERING | BO_USE_TEXTURE | BO_USE_CAMERA_MASK); + drv_modify_combination(drv, DRM_FORMAT_YUYV, &metadata, + BO_USE_TEXTURE | BO_USE_CAMERA_MASK | BO_USE_RENDERING); + drv_modify_combination(drv, DRM_FORMAT_VYUY, &metadata, + BO_USE_TEXTURE | BO_USE_CAMERA_MASK | BO_USE_RENDERING); + drv_modify_combination(drv, DRM_FORMAT_UYVY, &metadata, + BO_USE_TEXTURE | BO_USE_CAMERA_MASK | BO_USE_RENDERING); + drv_modify_combination(drv, DRM_FORMAT_YVYU, &metadata, + BO_USE_TEXTURE | BO_USE_CAMERA_MASK | BO_USE_RENDERING); + drv_modify_combination(drv, DRM_FORMAT_YVU420_ANDROID, &metadata, + BO_USE_TEXTURE | BO_USE_CAMERA_MASK); + + /* Media/Camera expect these formats support. */ + metadata.tiling = I915_TILING_NONE; + metadata.priority = 1; + metadata.modifier = DRM_FORMAT_MOD_NONE; + drv_add_combinations(drv, private_linear_source_formats, + ARRAY_SIZE(private_linear_source_formats), &metadata, + texture_flags | BO_USE_CAMERA_MASK); + + metadata.tiling = I915_TILING_Y; + metadata.priority = 3; + metadata.modifier = I915_FORMAT_MOD_Y_TILED; + drv_add_combinations(drv, private_source_formats, ARRAY_SIZE(private_source_formats), + &metadata, texture_flags | BO_USE_CAMERA_MASK); + + texture_flags &= ~BO_USE_RENDERSCRIPT; + texture_flags &= ~BO_USE_SW_WRITE_OFTEN; + texture_flags &= ~BO_USE_SW_READ_OFTEN; + texture_flags &= ~BO_USE_LINEAR; + + metadata.tiling = I915_TILING_X; + metadata.priority = 2; + metadata.modifier = I915_FORMAT_MOD_X_TILED; + + drv_add_combinations(drv, private_linear_source_formats, + ARRAY_SIZE(private_linear_source_formats), &metadata, + texture_flags | BO_USE_CAMERA_MASK); + + return 0; +} + +void i915_private_align_dimensions(uint32_t format, uint32_t *vertical_alignment) +{ + switch (format) { + case DRM_FORMAT_NV12_Y_TILED_INTEL: + *vertical_alignment = 64; + break; + } +} + +uint32_t i915_private_bpp_from_format(uint32_t format, size_t plane) +{ + assert(plane < drv_num_planes_from_format(format)); + + switch (format) { + case DRM_FORMAT_NV12_Y_TILED_INTEL: + return (plane == 0) ? 8 : 4; + case DRM_FORMAT_P010: + return (plane == 0) ? 16 : 8; + case DRM_FORMAT_YUV420: + case DRM_FORMAT_YUV422: + case DRM_FORMAT_YUV444: + case DRM_FORMAT_NV16: + return 8; + case DRM_FORMAT_R16: + return 16; + } + + fprintf(stderr, "drv: UNKNOWN FORMAT %d\n", format); + return 0; +} + +void i915_private_vertical_subsampling_from_format(uint32_t *vertical_subsampling, uint32_t format, + size_t plane) +{ + switch (format) { + case DRM_FORMAT_NV12_Y_TILED_INTEL: + case DRM_FORMAT_YUV420: + case DRM_FORMAT_P010: + *vertical_subsampling = (plane == 0) ? 1 : 2; + break; + default: + *vertical_subsampling = 1; + } +} + +size_t i915_private_num_planes_from_format(uint32_t format) +{ + switch (format) { + case DRM_FORMAT_R16: + return 1; + case DRM_FORMAT_NV12_Y_TILED_INTEL: + case DRM_FORMAT_NV16: + case DRM_FORMAT_P010: + return 2; + case DRM_FORMAT_YUV420: + case DRM_FORMAT_YUV422: + case DRM_FORMAT_YUV444: + return 3; + } + + fprintf(stderr, "drv: UNKNOWN FORMAT %d\n", format); + return 0; +} + +uint32_t i915_private_resolve_format(uint32_t format, uint64_t usage, uint32_t *resolved_format) +{ + switch (format) { + case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED: + /* KBL camera subsystem requires NV12. */ + if (usage & (BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE)) { + *resolved_format = DRM_FORMAT_NV12; + return 1; + } + + if (usage & BO_USE_TEXTURE) { + *resolved_format = DRM_FORMAT_ABGR8888; + return 1; + } + } + + return 0; +} diff --git a/i915_private.h b/i915_private.h new file mode 100644 index 0000000..3eb89f6 --- /dev/null +++ b/i915_private.h @@ -0,0 +1,57 @@ +/* + * Copyright 2017 The Chromium OS Authors. All rights reserved. + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ +#ifndef I915_PRIVATE +#define I915_PRIVATE + +#include + +#include "i915_private_types.h" + +struct driver; + +/* + * 2 plane YCbCr MSB aligned + * index 0 = Y plane, [15:0] Y:x [10:6] little endian + * index 1 = Cr:Cb plane, [31:0] Cr:x:Cb:x [10:6:10:6] little endian + */ +#ifndef DRM_FORMAT_P010 +#define DRM_FORMAT_P010 fourcc_code('P', '0', '1', '0') /* 2x2 subsampled Cr:Cb plane 10 bits per channel */ +#endif + +/* + * 2 plane YCbCr MSB aligned + * index 0 = Y plane, [15:0] Y:x [12:4] little endian + * index 1 = Cr:Cb plane, [31:0] Cr:x:Cb:x [12:4:12:4] little endian + */ +#define DRM_FORMAT_P012 fourcc_code('P', '0', '1', '2') /* 2x2 subsampled Cr:Cb plane 12 bits per channel */ + +/* + * 2 plane YCbCr MSB aligned + * index 0 = Y plane, [15:0] Y little endian + * index 1 = Cr:Cb plane, [31:0] Cr:Cb [16:16] little endian + */ +#define DRM_FORMAT_P016 fourcc_code('P', '0', '1', '6') /* 2x2 subsampled Cr:Cb plane 16 bits per channel */ + +/* 64 bpp RGB */ +#define DRM_FORMAT_XRGB161616 fourcc_code('X', 'R', '4', '8') /* [63:0] x:R:G:B 16:16:16:16 little endian */ +#define DRM_FORMAT_XBGR161616 fourcc_code('X', 'B', '4', '8') /* [63:0] x:B:G:R 16:16:16:16 little endian */ + +int i915_private_init(struct driver *drv, uint64_t *cursor_width, uint64_t *cursor_height); + +int i915_private_add_combinations(struct driver *drv); + +void i915_private_align_dimensions(uint32_t format, uint32_t *vertical_alignment); + +uint32_t i915_private_bpp_from_format(uint32_t format, size_t plane); + +void i915_private_vertical_subsampling_from_format(uint32_t *vertical_subsampling, uint32_t format, + size_t plane); + +size_t i915_private_num_planes_from_format(uint32_t format); + +uint32_t i915_private_resolve_format(uint32_t format, uint64_t usage, uint32_t *resolved_format); + +#endif diff --git a/i915_private_types.h b/i915_private_types.h new file mode 100644 index 0000000..57d3ec4 --- /dev/null +++ b/i915_private_types.h @@ -0,0 +1,11 @@ +/* + * Copyright 2017 The Chromium OS Authors. All rights reserved. + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ +#ifndef I915_PRIVATE_TYPES +#define I915_PRIVATE_TYPES + +#define DRM_FORMAT_NV12_Y_TILED_INTEL fourcc_code('9', '9', '9', '6') + +#endif diff --git a/marvell.c b/marvell.c index 455b033..c0b600b 100644 --- a/marvell.c +++ b/marvell.c @@ -14,17 +14,14 @@ static const uint32_t render_target_formats[] = { DRM_FORMAT_ARGB8888, DRM_FORMA static int marvell_init(struct driver *drv) { - int ret; - ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats), - &LINEAR_METADATA, BO_USE_RENDER_MASK); - if (ret) - return ret; + drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats), + &LINEAR_METADATA, BO_USE_RENDER_MASK); return drv_add_linear_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats)); } -struct backend backend_marvell = { +const struct backend backend_marvell = { .name = "marvell", .init = marvell_init, .bo_create = drv_dumb_bo_create, diff --git a/mediatek.c b/mediatek.c index 1a1061c..cdfc9ab 100644 --- a/mediatek.c +++ b/mediatek.c @@ -7,9 +7,14 @@ #ifdef DRV_MEDIATEK // clang-format off +#include +#include +#include +#include #include #include #include +#include #include #include // clang-format on @@ -18,69 +23,149 @@ #include "helpers.h" #include "util.h" +#define TILE_TYPE_LINEAR 0 + struct mediatek_private_map_data { void *cached_addr; void *gem_addr; + int prime_fd; }; static const uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888, DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB8888 }; -static const uint32_t texture_source_formats[] = { DRM_FORMAT_R8, DRM_FORMAT_YVU420, +#ifdef MTK_MT8183 +static const uint32_t texture_source_formats[] = { DRM_FORMAT_NV21, DRM_FORMAT_NV12, + DRM_FORMAT_YUYV, DRM_FORMAT_YVU420, DRM_FORMAT_YVU420_ANDROID }; +#else +static const uint32_t texture_source_formats[] = { DRM_FORMAT_YVU420, DRM_FORMAT_YVU420_ANDROID, + DRM_FORMAT_NV12 }; +#endif static int mediatek_init(struct driver *drv) { - int ret; - ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats), - &LINEAR_METADATA, BO_USE_RENDER_MASK); - if (ret) - return ret; + struct format_metadata metadata; + + drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats), + &LINEAR_METADATA, BO_USE_RENDER_MASK | BO_USE_SCANOUT); + + drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats), + &LINEAR_METADATA, BO_USE_TEXTURE_MASK); + + drv_add_combination(drv, DRM_FORMAT_R8, &LINEAR_METADATA, + BO_USE_SW_MASK | BO_USE_LINEAR | BO_USE_PROTECTED); + /* + * Chrome uses DMA-buf mmap to write to YV12 buffers, which are then accessed by the + * Video Encoder Accelerator (VEA). It could also support NV12 potentially in the future. + */ + drv_modify_combination(drv, DRM_FORMAT_YVU420, &LINEAR_METADATA, BO_USE_HW_VIDEO_ENCODER); + drv_modify_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA, BO_USE_HW_VIDEO_ENCODER); + + /* Android CTS tests require this. */ + drv_add_combination(drv, DRM_FORMAT_BGR888, &LINEAR_METADATA, BO_USE_SW_MASK); - ret = drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats), - &LINEAR_METADATA, BO_USE_TEXTURE_MASK); - if (ret) - return ret; + /* Support BO_USE_HW_VIDEO_DECODER for protected content minigbm allocations. */ + metadata.tiling = TILE_TYPE_LINEAR; + metadata.priority = 1; + metadata.modifier = DRM_FORMAT_MOD_LINEAR; + drv_modify_combination(drv, DRM_FORMAT_YVU420, &metadata, BO_USE_HW_VIDEO_DECODER); + drv_modify_combination(drv, DRM_FORMAT_YVU420_ANDROID, &metadata, BO_USE_HW_VIDEO_DECODER); + drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata, BO_USE_HW_VIDEO_DECODER); + +#ifdef MTK_MT8183 + /* Only for MT8183 Camera subsystem */ + drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata, + BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE); + drv_modify_combination(drv, DRM_FORMAT_NV21, &metadata, + BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE); + drv_modify_combination(drv, DRM_FORMAT_YUYV, &metadata, + BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE); + drv_modify_combination(drv, DRM_FORMAT_YVU420, &metadata, + BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE); + drv_modify_combination(drv, DRM_FORMAT_R8, &metadata, + BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE); + /* Private formats for private reprocessing in camera */ + drv_add_combination(drv, DRM_FORMAT_MTISP_SXYZW10, &metadata, + BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_SW_MASK); +#endif return drv_modify_linear_combinations(drv); } -static int mediatek_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format, - uint64_t use_flags) +static int mediatek_bo_create_with_modifiers(struct bo *bo, uint32_t width, uint32_t height, + uint32_t format, const uint64_t *modifiers, + uint32_t count) { int ret; size_t plane; uint32_t stride; struct drm_mtk_gem_create gem_create; + if (!drv_has_modifier(modifiers, count, DRM_FORMAT_MOD_LINEAR)) { + errno = EINVAL; + drv_log("no usable modifier found\n"); + return -EINVAL; + } + /* * Since the ARM L1 cache line size is 64 bytes, align to that as a * performance optimization. */ stride = drv_stride_from_format(format, width, 0); stride = ALIGN(stride, 64); - drv_bo_from_format(bo, stride, height, format); + + if (bo->meta.use_flags & BO_USE_HW_VIDEO_ENCODER) { + uint32_t aligned_height = ALIGN(height, 32); + uint32_t padding[DRV_MAX_PLANES] = { 0 }; + + for (plane = 0; plane < bo->meta.num_planes; ++plane) { + uint32_t plane_stride = drv_stride_from_format(format, stride, plane); + padding[plane] = plane_stride * + (32 / drv_vertical_subsampling_from_format(format, plane)); + } + + drv_bo_from_format_and_padding(bo, stride, aligned_height, format, padding); + } else { +#ifdef MTK_MT8183 + /* + * JPEG Encoder Accelerator requires 16x16 alignment. We want the buffer + * from camera can be put in JEA directly so align the height to 16 + * bytes. + */ + if (format == DRM_FORMAT_NV12) + height = ALIGN(height, 16); +#endif + drv_bo_from_format(bo, stride, height, format); + } memset(&gem_create, 0, sizeof(gem_create)); - gem_create.size = bo->total_size; + gem_create.size = bo->meta.total_size; ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MTK_GEM_CREATE, &gem_create); if (ret) { - fprintf(stderr, "drv: DRM_IOCTL_MTK_GEM_CREATE failed (size=%llu)\n", - gem_create.size); - return ret; + drv_log("DRM_IOCTL_MTK_GEM_CREATE failed (size=%" PRIu64 ")\n", gem_create.size); + return -errno; } - for (plane = 0; plane < bo->num_planes; plane++) + for (plane = 0; plane < bo->meta.num_planes; plane++) bo->handles[plane].u32 = gem_create.handle; return 0; } -static void *mediatek_bo_map(struct bo *bo, struct map_info *data, size_t plane, uint32_t map_flags) +static int mediatek_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format, + uint64_t use_flags) { - int ret; + uint64_t modifiers[] = { DRM_FORMAT_MOD_LINEAR }; + return mediatek_bo_create_with_modifiers(bo, width, height, format, modifiers, + ARRAY_SIZE(modifiers)); +} + +static void *mediatek_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags) +{ + int ret, prime_fd; struct drm_mtk_gem_map_off gem_map; struct mediatek_private_map_data *priv; @@ -89,70 +174,128 @@ static void *mediatek_bo_map(struct bo *bo, struct map_info *data, size_t plane, ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MTK_GEM_MAP_OFFSET, &gem_map); if (ret) { - fprintf(stderr, "drv: DRM_IOCTL_MTK_GEM_MAP_OFFSET failed\n"); + drv_log("DRM_IOCTL_MTK_GEM_MAP_OFFSET failed\n"); + return MAP_FAILED; + } + + prime_fd = drv_bo_get_plane_fd(bo, 0); + if (prime_fd < 0) { + drv_log("Failed to get a prime fd\n"); return MAP_FAILED; } - void *addr = mmap(0, bo->total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd, + void *addr = mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd, gem_map.offset); - data->length = bo->total_size; + vma->length = bo->meta.total_size; - if (bo->use_flags & BO_USE_RENDERSCRIPT) { - priv = calloc(1, sizeof(*priv)); - priv->cached_addr = calloc(1, bo->total_size); + priv = calloc(1, sizeof(*priv)); + priv->prime_fd = prime_fd; + vma->priv = priv; + + if (bo->meta.use_flags & BO_USE_RENDERSCRIPT) { + priv->cached_addr = calloc(1, bo->meta.total_size); priv->gem_addr = addr; - memcpy(priv->cached_addr, priv->gem_addr, bo->total_size); - data->priv = priv; addr = priv->cached_addr; } return addr; } -static int mediatek_bo_unmap(struct bo *bo, struct map_info *data) +static int mediatek_bo_unmap(struct bo *bo, struct vma *vma) { - if (data->priv) { - struct mediatek_private_map_data *priv = data->priv; - data->addr = priv->gem_addr; - free(priv->cached_addr); + if (vma->priv) { + struct mediatek_private_map_data *priv = vma->priv; + + if (priv->cached_addr) { + vma->addr = priv->gem_addr; + free(priv->cached_addr); + } + + close(priv->prime_fd); free(priv); - data->priv = NULL; + vma->priv = NULL; + } + + return munmap(vma->addr, vma->length); +} + +static int mediatek_bo_invalidate(struct bo *bo, struct mapping *mapping) +{ + struct mediatek_private_map_data *priv = mapping->vma->priv; + + if (priv) { + struct pollfd fds = { + .fd = priv->prime_fd, + }; + + if (mapping->vma->map_flags & BO_MAP_WRITE) + fds.events |= POLLOUT; + + if (mapping->vma->map_flags & BO_MAP_READ) + fds.events |= POLLIN; + + poll(&fds, 1, -1); + if (fds.revents != fds.events) + drv_log("poll prime_fd failed\n"); + + if (priv->cached_addr) + memcpy(priv->cached_addr, priv->gem_addr, bo->meta.total_size); } - return munmap(data->addr, data->length); + return 0; } -static int mediatek_bo_flush(struct bo *bo, struct map_info *data) +static int mediatek_bo_flush(struct bo *bo, struct mapping *mapping) { - struct mediatek_private_map_data *priv = data->priv; - if (priv && (data->map_flags & BO_MAP_WRITE)) - memcpy(priv->gem_addr, priv->cached_addr, bo->total_size); + struct mediatek_private_map_data *priv = mapping->vma->priv; + if (priv && priv->cached_addr && (mapping->vma->map_flags & BO_MAP_WRITE)) + memcpy(priv->gem_addr, priv->cached_addr, bo->meta.total_size); return 0; } -static uint32_t mediatek_resolve_format(uint32_t format, uint64_t use_flags) +static uint32_t mediatek_resolve_format(struct driver *drv, uint32_t format, uint64_t use_flags) { switch (format) { case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED: +#ifdef MTK_MT8183 + /* Only MT8183 Camera subsystem offers private reprocessing + * capability. CAMERA_READ indicates the buffer is intended for + * reprocessing and hence given the private format for MTK. */ + if (use_flags & BO_USE_CAMERA_READ) + return DRM_FORMAT_MTISP_SXYZW10; + /* For non-reprocessing uses, only MT8183 Camera subsystem + * requires NV12. */ + else if (use_flags & BO_USE_CAMERA_WRITE) + return DRM_FORMAT_NV12; +#endif /*HACK: See b/28671744 */ return DRM_FORMAT_XBGR8888; case DRM_FORMAT_FLEX_YCbCr_420_888: +#ifdef MTK_MT8183 + /* MT8183 camera and decoder subsystems require NV12. */ + if (use_flags & (BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | + BO_USE_HW_VIDEO_DECODER | BO_USE_HW_VIDEO_ENCODER)) { + return DRM_FORMAT_NV12; + } +#endif return DRM_FORMAT_YVU420; default: return format; } } -struct backend backend_mediatek = { +const struct backend backend_mediatek = { .name = "mediatek", .init = mediatek_init, .bo_create = mediatek_bo_create, + .bo_create_with_modifiers = mediatek_bo_create_with_modifiers, .bo_destroy = drv_gem_bo_destroy, .bo_import = drv_prime_bo_import, .bo_map = mediatek_bo_map, .bo_unmap = mediatek_bo_unmap, + .bo_invalidate = mediatek_bo_invalidate, .bo_flush = mediatek_bo_flush, .resolve_format = mediatek_resolve_format, }; diff --git a/meson.c b/meson.c new file mode 100644 index 0000000..f82c57a --- /dev/null +++ b/meson.c @@ -0,0 +1,35 @@ +/* + * Copyright 2018 The Chromium OS Authors. All rights reserved. + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifdef DRV_MESON + +#include "drv_priv.h" +#include "helpers.h" +#include "util.h" + +static const uint32_t scanout_render_formats[] = { DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB8888, + DRM_FORMAT_ABGR8888, DRM_FORMAT_XBGR8888, + DRM_FORMAT_BGR888, DRM_FORMAT_BGR565}; + +static int meson_init(struct driver *drv) +{ + drv_add_combinations(drv, scanout_render_formats, ARRAY_SIZE(scanout_render_formats), + &LINEAR_METADATA, BO_USE_RENDER_MASK | BO_USE_SCANOUT); + + return drv_modify_linear_combinations(drv); +} + +const struct backend backend_meson = { + .name = "meson", + .init = meson_init, + .bo_create = drv_dumb_bo_create, + .bo_destroy = drv_dumb_bo_destroy, + .bo_import = drv_prime_bo_import, + .bo_map = drv_dumb_bo_map, + .bo_unmap = drv_bo_munmap, +}; + +#endif diff --git a/msm.c b/msm.c new file mode 100644 index 0000000..fac1fd0 --- /dev/null +++ b/msm.c @@ -0,0 +1,313 @@ +/* + * Copyright 2018 The Chromium OS Authors. All rights reserved. + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifdef DRV_MSM + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "drv_priv.h" +#include "helpers.h" +#include "util.h" + +/* Alignment values are based on SDM845 Gfx IP */ +#define DEFAULT_ALIGNMENT 64 +#define BUFFER_SIZE_ALIGN 4096 + +#define VENUS_STRIDE_ALIGN 128 +#define VENUS_SCANLINE_ALIGN 16 +#define NV12_LINEAR_PADDING (12 * 1024) +#define NV12_UBWC_PADDING(y_stride) (MAX(16 * 1024, y_stride * 48)) +#define MACROTILE_WIDTH_ALIGN 64 +#define MACROTILE_HEIGHT_ALIGN 16 +#define PLANE_SIZE_ALIGN 4096 + +#define MSM_UBWC_TILING 1 + +static const uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888, + DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888, + DRM_FORMAT_XRGB8888 }; + +static const uint32_t texture_source_formats[] = { DRM_FORMAT_NV12, DRM_FORMAT_R8, + DRM_FORMAT_YVU420, DRM_FORMAT_YVU420_ANDROID }; + +/* + * Each macrotile consists of m x n (mostly 4 x 4) tiles. + * Pixel data pitch/stride is aligned with macrotile width. + * Pixel data height is aligned with macrotile height. + * Entire pixel data buffer is aligned with 4k(bytes). + */ +static uint32_t get_ubwc_meta_size(uint32_t width, uint32_t height, uint32_t tile_width, + uint32_t tile_height) +{ + uint32_t macrotile_width, macrotile_height; + + macrotile_width = DIV_ROUND_UP(width, tile_width); + macrotile_height = DIV_ROUND_UP(height, tile_height); + + // Align meta buffer width to 64 blocks + macrotile_width = ALIGN(macrotile_width, MACROTILE_WIDTH_ALIGN); + + // Align meta buffer height to 16 blocks + macrotile_height = ALIGN(macrotile_height, MACROTILE_HEIGHT_ALIGN); + + return ALIGN(macrotile_width * macrotile_height, PLANE_SIZE_ALIGN); +} + +static void msm_calculate_layout(struct bo *bo) +{ + uint32_t width, height; + + width = bo->meta.width; + height = bo->meta.height; + + /* NV12 format requires extra padding with platform + * specific alignments for venus driver + */ + if (bo->meta.format == DRM_FORMAT_NV12) { + uint32_t y_stride, uv_stride, y_scanline, uv_scanline, y_plane, uv_plane, size, + extra_padding; + + y_stride = ALIGN(width, VENUS_STRIDE_ALIGN); + uv_stride = ALIGN(width, VENUS_STRIDE_ALIGN); + y_scanline = ALIGN(height, VENUS_SCANLINE_ALIGN * 2); + uv_scanline = ALIGN(DIV_ROUND_UP(height, 2), VENUS_SCANLINE_ALIGN); + y_plane = y_stride * y_scanline; + uv_plane = uv_stride * uv_scanline; + + if (bo->meta.tiling == MSM_UBWC_TILING) { + y_plane += get_ubwc_meta_size(width, height, 32, 8); + uv_plane += get_ubwc_meta_size(width >> 1, height >> 1, 16, 8); + extra_padding = NV12_UBWC_PADDING(y_stride); + } else { + extra_padding = NV12_LINEAR_PADDING; + } + + bo->meta.strides[0] = y_stride; + bo->meta.sizes[0] = y_plane; + bo->meta.offsets[1] = y_plane; + bo->meta.strides[1] = uv_stride; + size = y_plane + uv_plane + extra_padding; + bo->meta.total_size = ALIGN(size, BUFFER_SIZE_ALIGN); + bo->meta.sizes[1] = bo->meta.total_size - bo->meta.sizes[0]; + } else { + uint32_t stride, alignw, alignh; + + alignw = ALIGN(width, DEFAULT_ALIGNMENT); + /* HAL_PIXEL_FORMAT_YV12 requires that the buffer's height not be aligned. + DRM_FORMAT_R8 of height one is used for JPEG camera output, so don't + height align that. */ + if (bo->meta.format == DRM_FORMAT_YVU420_ANDROID || + (bo->meta.format == DRM_FORMAT_R8 && height == 1)) { + alignh = height; + } else { + alignh = ALIGN(height, DEFAULT_ALIGNMENT); + } + + stride = drv_stride_from_format(bo->meta.format, alignw, 0); + + /* Calculate size and assign stride, size, offset to each plane based on format */ + drv_bo_from_format(bo, stride, alignh, bo->meta.format); + + /* For all RGB UBWC formats */ + if (bo->meta.tiling == MSM_UBWC_TILING) { + bo->meta.sizes[0] += get_ubwc_meta_size(width, height, 16, 4); + bo->meta.total_size = bo->meta.sizes[0]; + assert(IS_ALIGNED(bo->meta.total_size, BUFFER_SIZE_ALIGN)); + } + } +} + +static bool is_ubwc_fmt(uint32_t format) +{ + switch (format) { + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_NV12: + return 1; + default: + return 0; + } +} + +static void msm_add_ubwc_combinations(struct driver *drv, const uint32_t *formats, + uint32_t num_formats, struct format_metadata *metadata, + uint64_t use_flags) +{ + for (uint32_t i = 0; i < num_formats; i++) { + if (is_ubwc_fmt(formats[i])) { + struct combination combo = { .format = formats[i], + .metadata = *metadata, + .use_flags = use_flags }; + drv_array_append(drv->combos, &combo); + } + } +} + +static int msm_init(struct driver *drv) +{ + struct format_metadata metadata; + uint64_t render_use_flags = BO_USE_RENDER_MASK | BO_USE_SCANOUT; + uint64_t texture_use_flags = BO_USE_TEXTURE_MASK | BO_USE_HW_VIDEO_DECODER; + uint64_t sw_flags = (BO_USE_RENDERSCRIPT | BO_USE_SW_WRITE_OFTEN | BO_USE_SW_READ_OFTEN | + BO_USE_LINEAR | BO_USE_PROTECTED); + + drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats), + &LINEAR_METADATA, render_use_flags); + + drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats), + &LINEAR_METADATA, texture_use_flags); + + /* + * Chrome uses DMA-buf mmap to write to YV12 buffers, which are then accessed by the + * Video Encoder Accelerator (VEA). It could also support NV12 potentially in the future. + */ + drv_modify_combination(drv, DRM_FORMAT_YVU420, &LINEAR_METADATA, BO_USE_HW_VIDEO_ENCODER); + drv_modify_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA, BO_USE_HW_VIDEO_ENCODER); + + /* The camera stack standardizes on NV12 for YUV buffers. */ + drv_modify_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA, + BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_SCANOUT); + /* + * R8 format is used for Android's HAL_PIXEL_FORMAT_BLOB and is used for JPEG snapshots + * from camera. + */ + drv_modify_combination(drv, DRM_FORMAT_R8, &LINEAR_METADATA, + BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE); + + /* Android CTS tests require this. */ + drv_add_combination(drv, DRM_FORMAT_BGR888, &LINEAR_METADATA, BO_USE_SW_MASK); + + drv_modify_linear_combinations(drv); + + metadata.tiling = MSM_UBWC_TILING; + metadata.priority = 2; + metadata.modifier = DRM_FORMAT_MOD_QCOM_COMPRESSED; + + render_use_flags &= ~sw_flags; + texture_use_flags &= ~sw_flags; + + msm_add_ubwc_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats), + &metadata, render_use_flags); + + msm_add_ubwc_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats), + &metadata, texture_use_flags); + + return 0; +} + +static int msm_bo_create_for_modifier(struct bo *bo, uint32_t width, uint32_t height, + uint32_t format, const uint64_t modifier) +{ + struct drm_msm_gem_new req; + int ret; + size_t i; + + bo->meta.tiling = (modifier == DRM_FORMAT_MOD_QCOM_COMPRESSED) ? MSM_UBWC_TILING : 0; + + msm_calculate_layout(bo); + + memset(&req, 0, sizeof(req)); + req.flags = MSM_BO_WC | MSM_BO_SCANOUT; + req.size = bo->meta.total_size; + + ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MSM_GEM_NEW, &req); + if (ret) { + drv_log("DRM_IOCTL_MSM_GEM_NEW failed with %s\n", strerror(errno)); + return -errno; + } + + /* + * Though we use only one plane, we need to set handle for + * all planes to pass kernel checks + */ + for (i = 0; i < bo->meta.num_planes; i++) { + bo->handles[i].u32 = req.handle; + bo->meta.format_modifiers[i] = modifier; + } + + return 0; +} + +static int msm_bo_create_with_modifiers(struct bo *bo, uint32_t width, uint32_t height, + uint32_t format, const uint64_t *modifiers, uint32_t count) +{ + static const uint64_t modifier_order[] = { + DRM_FORMAT_MOD_QCOM_COMPRESSED, + DRM_FORMAT_MOD_LINEAR, + }; + + uint64_t modifier = + drv_pick_modifier(modifiers, count, modifier_order, ARRAY_SIZE(modifier_order)); + + return msm_bo_create_for_modifier(bo, width, height, format, modifier); +} + +/* msm_bo_create will create linear buffers for now */ +static int msm_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format, + uint64_t flags) +{ + struct combination *combo = drv_get_combination(bo->drv, format, flags); + + if (!combo) { + drv_log("invalid format = %d, flags = %" PRIx64 " combination\n", format, flags); + return -EINVAL; + } + + return msm_bo_create_for_modifier(bo, width, height, format, combo->metadata.modifier); +} + +static void *msm_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags) +{ + int ret; + struct drm_msm_gem_info req; + + memset(&req, 0, sizeof(req)); + req.handle = bo->handles[0].u32; + + ret = drmIoctl(bo->drv->fd, DRM_IOCTL_MSM_GEM_INFO, &req); + if (ret) { + drv_log("DRM_IOCLT_MSM_GEM_INFO failed with %s\n", strerror(errno)); + return MAP_FAILED; + } + vma->length = bo->meta.total_size; + + return mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd, + req.offset); +} + +static uint32_t msm_resolve_format(struct driver *drv, uint32_t format, uint64_t use_flags) +{ + switch (format) { + case DRM_FORMAT_FLEX_YCbCr_420_888: + return DRM_FORMAT_NV12; + default: + return format; + } +} + +const struct backend backend_msm = { + .name = "msm", + .init = msm_init, + .bo_create = msm_bo_create, + .bo_create_with_modifiers = msm_bo_create_with_modifiers, + .bo_destroy = drv_gem_bo_destroy, + .bo_import = drv_prime_bo_import, + .bo_map = msm_bo_map, + .bo_unmap = drv_bo_munmap, + .resolve_format = msm_resolve_format, +}; +#endif /* DRV_MSM */ diff --git a/nouveau.c b/nouveau.c index e8a02e3..d0f25d4 100644 --- a/nouveau.c +++ b/nouveau.c @@ -12,16 +12,13 @@ static const uint32_t render_target_formats[] = { DRM_FORMAT_ARGB8888, DRM_FORMA static int nouveau_init(struct driver *drv) { - int ret; - ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats), - &LINEAR_METADATA, BO_USE_RENDER_MASK); - if (ret) - return ret; + drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats), + &LINEAR_METADATA, BO_USE_RENDER_MASK); return drv_modify_linear_combinations(drv); } -struct backend backend_nouveau = { +const struct backend backend_nouveau = { .name = "nouveau", .init = nouveau_init, .bo_create = drv_dumb_bo_create, diff --git a/presubmit.sh b/presubmit.sh index 6d55f2a..5e8a32a 100755 --- a/presubmit.sh +++ b/presubmit.sh @@ -4,5 +4,6 @@ # found in the LICENSE file. find \ '(' -name '*.[ch]' -or -name '*.cc' ')' \ - -not -name 'gbm.h' \ + -not -name 'virtgpu_drm.h' \ + -not -name 'gbm.h' -not -name 'virgl_hw.h' \ -exec clang-format -style=file -i {} + diff --git a/radeon.c b/radeon.c index 3af0be1..68445c1 100644 --- a/radeon.c +++ b/radeon.c @@ -12,16 +12,13 @@ static const uint32_t render_target_formats[] = { DRM_FORMAT_ARGB8888, DRM_FORMA static int radeon_init(struct driver *drv) { - int ret; - ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats), - &LINEAR_METADATA, BO_USE_RENDER_MASK); - if (ret) - return ret; + drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats), + &LINEAR_METADATA, BO_USE_RENDER_MASK); return drv_modify_linear_combinations(drv); } -struct backend backend_radeon = { +const struct backend backend_radeon = { .name = "radeon", .init = radeon_init, .bo_create = drv_dumb_bo_create, diff --git a/rockchip.c b/rockchip.c index 5fb0be4..25f16ab 100644 --- a/rockchip.c +++ b/rockchip.c @@ -7,8 +7,8 @@ #ifdef DRV_ROCKCHIP #include +#include #include -#include #include #include #include @@ -23,12 +23,12 @@ struct rockchip_private_map_data { void *gem_addr; }; -static const uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888, - DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888, - DRM_FORMAT_XRGB8888 }; +static const uint32_t scanout_render_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888, + DRM_FORMAT_BGR888, DRM_FORMAT_RGB565, + DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB8888 }; -static const uint32_t texture_source_formats[] = { DRM_FORMAT_R8, DRM_FORMAT_NV12, - DRM_FORMAT_YVU420, DRM_FORMAT_YVU420_ANDROID }; +static const uint32_t texture_only_formats[] = { DRM_FORMAT_NV12, DRM_FORMAT_YVU420, + DRM_FORMAT_YVU420_ANDROID }; static int afbc_bo_from_format(struct bo *bo, uint32_t width, uint32_t height, uint32_t format) { @@ -63,112 +63,53 @@ static int afbc_bo_from_format(struct bo *bo, uint32_t width, uint32_t height, u const uint32_t body_plane_offset = ALIGN(header_plane_size, body_plane_alignment); const uint32_t total_size = body_plane_offset + body_plane_size; - bo->strides[0] = width_in_blocks * block_width * pixel_size; - bo->sizes[0] = total_size; - bo->offsets[0] = 0; + bo->meta.strides[0] = width_in_blocks * block_width * pixel_size; + bo->meta.sizes[0] = total_size; + bo->meta.offsets[0] = 0; - bo->total_size = total_size; + bo->meta.total_size = total_size; - bo->format_modifiers[0] = DRM_FORMAT_MOD_CHROMEOS_ROCKCHIP_AFBC; - - return 0; -} - -static int rockchip_add_kms_item(struct driver *drv, const struct kms_item *item) -{ - int ret; - uint32_t i, j; - uint64_t use_flags; - struct combination *combo; - struct format_metadata metadata; - - for (i = 0; i < drv->combos.size; i++) { - combo = &drv->combos.data[i]; - if (combo->format == item->format) { - if (item->modifier == DRM_FORMAT_MOD_CHROMEOS_ROCKCHIP_AFBC) { - use_flags = BO_USE_RENDERING | BO_USE_SCANOUT | BO_USE_TEXTURE; - metadata.modifier = item->modifier; - metadata.tiling = 0; - metadata.priority = 2; - - for (j = 0; j < ARRAY_SIZE(texture_source_formats); j++) { - if (item->format == texture_source_formats[j]) - use_flags &= ~BO_USE_RENDERING; - } - - ret = - drv_add_combination(drv, item[i].format, &metadata, use_flags); - if (ret) - return ret; - } else { - combo->use_flags |= item->use_flags; - } - } - } + bo->meta.format_modifiers[0] = DRM_FORMAT_MOD_CHROMEOS_ROCKCHIP_AFBC; return 0; } static int rockchip_init(struct driver *drv) { - int ret; - uint32_t i, num_items; - struct kms_item *items; struct format_metadata metadata; metadata.tiling = 0; metadata.priority = 1; metadata.modifier = DRM_FORMAT_MOD_LINEAR; - ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats), - &metadata, BO_USE_RENDER_MASK); - if (ret) - return ret; - - ret = drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats), - &metadata, BO_USE_TEXTURE_MASK); - if (ret) - return ret; + drv_add_combinations(drv, scanout_render_formats, ARRAY_SIZE(scanout_render_formats), + &metadata, BO_USE_RENDER_MASK | BO_USE_SCANOUT); - drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT); - drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT); + drv_add_combinations(drv, texture_only_formats, ARRAY_SIZE(texture_only_formats), &metadata, + BO_USE_TEXTURE_MASK); + /* + * Chrome uses DMA-buf mmap to write to YV12 buffers, which are then accessed by the + * Video Encoder Accelerator (VEA). It could also support NV12 potentially in the future. + */ + drv_modify_combination(drv, DRM_FORMAT_YVU420, &metadata, BO_USE_HW_VIDEO_ENCODER); /* Camera ISP supports only NV12 output. */ drv_modify_combination(drv, DRM_FORMAT_NV12, &metadata, - BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE); + BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER | + BO_USE_HW_VIDEO_ENCODER | BO_USE_SCANOUT); + + drv_modify_linear_combinations(drv); /* * R8 format is used for Android's HAL_PIXEL_FORMAT_BLOB and is used for JPEG snapshots * from camera. */ - drv_modify_combination(drv, DRM_FORMAT_R8, &metadata, - BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE); - - items = drv_query_kms(drv, &num_items); - if (!items || !num_items) - return 0; - - for (i = 0; i < num_items; i++) { - ret = rockchip_add_kms_item(drv, &items[i]); - if (ret) { - free(items); - return ret; - } - } + drv_add_combination(drv, DRM_FORMAT_R8, &metadata, + BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_SW_MASK | + BO_USE_LINEAR | BO_USE_PROTECTED); - free(items); return 0; } -static bool has_modifier(const uint64_t *list, uint32_t count, uint64_t modifier) -{ - uint32_t i; - for (i = 0; i < count; i++) - if (list[i] == modifier) - return true; - - return false; -} - static int rockchip_bo_create_with_modifiers(struct bo *bo, uint32_t width, uint32_t height, uint32_t format, const uint64_t *modifiers, uint32_t count) @@ -178,23 +119,27 @@ static int rockchip_bo_create_with_modifiers(struct bo *bo, uint32_t width, uint struct drm_rockchip_gem_create gem_create; if (format == DRM_FORMAT_NV12) { - uint32_t w_mbs = DIV_ROUND_UP(ALIGN(width, 16), 16); - uint32_t h_mbs = DIV_ROUND_UP(ALIGN(height, 16), 16); + uint32_t w_mbs = DIV_ROUND_UP(width, 16); + uint32_t h_mbs = DIV_ROUND_UP(height, 16); uint32_t aligned_width = w_mbs * 16; - uint32_t aligned_height = DIV_ROUND_UP(h_mbs * 16 * 3, 2); + uint32_t aligned_height = h_mbs * 16; - drv_bo_from_format(bo, aligned_width, height, format); - bo->total_size = bo->strides[0] * aligned_height + w_mbs * h_mbs * 128; + drv_bo_from_format(bo, aligned_width, aligned_height, format); + /* + * drv_bo_from_format updates total_size. Add an extra data space for rockchip video + * driver to store motion vectors. + */ + bo->meta.total_size += w_mbs * h_mbs * 128; } else if (width <= 2560 && - has_modifier(modifiers, count, DRM_FORMAT_MOD_CHROMEOS_ROCKCHIP_AFBC)) { + drv_has_modifier(modifiers, count, DRM_FORMAT_MOD_CHROMEOS_ROCKCHIP_AFBC)) { /* If the caller has decided they can use AFBC, always * pick that */ afbc_bo_from_format(bo, width, height, format); } else { - if (!has_modifier(modifiers, count, DRM_FORMAT_MOD_LINEAR)) { + if (!drv_has_modifier(modifiers, count, DRM_FORMAT_MOD_LINEAR)) { errno = EINVAL; - fprintf(stderr, "no usable modifier found\n"); + drv_log("no usable modifier found\n"); return -1; } @@ -215,17 +160,17 @@ static int rockchip_bo_create_with_modifiers(struct bo *bo, uint32_t width, uint } memset(&gem_create, 0, sizeof(gem_create)); - gem_create.size = bo->total_size; + gem_create.size = bo->meta.total_size; ret = drmIoctl(bo->drv->fd, DRM_IOCTL_ROCKCHIP_GEM_CREATE, &gem_create); if (ret) { - fprintf(stderr, "drv: DRM_IOCTL_ROCKCHIP_GEM_CREATE failed (size=%llu)\n", + drv_log("DRM_IOCTL_ROCKCHIP_GEM_CREATE failed (size=%" PRIu64 ")\n", gem_create.size); - return ret; + return -errno; } - for (plane = 0; plane < bo->num_planes; plane++) + for (plane = 0; plane < bo->meta.num_planes; plane++) bo->handles[plane].u32 = gem_create.handle; return 0; @@ -239,7 +184,7 @@ static int rockchip_bo_create(struct bo *bo, uint32_t width, uint32_t height, ui ARRAY_SIZE(modifiers)); } -static void *rockchip_bo_map(struct bo *bo, struct map_info *data, size_t plane, uint32_t map_flags) +static void *rockchip_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags) { int ret; struct drm_rockchip_gem_map_off gem_map; @@ -247,7 +192,7 @@ static void *rockchip_bo_map(struct bo *bo, struct map_info *data, size_t plane, /* We can only map buffers created with SW access flags, which should * have no modifiers (ie, not AFBC). */ - if (bo->format_modifiers[0] == DRM_FORMAT_MOD_CHROMEOS_ROCKCHIP_AFBC) + if (bo->meta.format_modifiers[0] == DRM_FORMAT_MOD_CHROMEOS_ROCKCHIP_AFBC) return MAP_FAILED; memset(&gem_map, 0, sizeof(gem_map)); @@ -255,50 +200,59 @@ static void *rockchip_bo_map(struct bo *bo, struct map_info *data, size_t plane, ret = drmIoctl(bo->drv->fd, DRM_IOCTL_ROCKCHIP_GEM_MAP_OFFSET, &gem_map); if (ret) { - fprintf(stderr, "drv: DRM_IOCTL_ROCKCHIP_GEM_MAP_OFFSET failed\n"); + drv_log("DRM_IOCTL_ROCKCHIP_GEM_MAP_OFFSET failed\n"); return MAP_FAILED; } - void *addr = mmap(0, bo->total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd, + void *addr = mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd, gem_map.offset); - data->length = bo->total_size; + vma->length = bo->meta.total_size; - if (bo->use_flags & BO_USE_RENDERSCRIPT) { + if (bo->meta.use_flags & BO_USE_RENDERSCRIPT) { priv = calloc(1, sizeof(*priv)); - priv->cached_addr = calloc(1, bo->total_size); + priv->cached_addr = calloc(1, bo->meta.total_size); priv->gem_addr = addr; - memcpy(priv->cached_addr, priv->gem_addr, bo->total_size); - data->priv = priv; + vma->priv = priv; addr = priv->cached_addr; } return addr; } -static int rockchip_bo_unmap(struct bo *bo, struct map_info *data) +static int rockchip_bo_unmap(struct bo *bo, struct vma *vma) { - if (data->priv) { - struct rockchip_private_map_data *priv = data->priv; - data->addr = priv->gem_addr; + if (vma->priv) { + struct rockchip_private_map_data *priv = vma->priv; + vma->addr = priv->gem_addr; free(priv->cached_addr); free(priv); - data->priv = NULL; + vma->priv = NULL; } - return munmap(data->addr, data->length); + return munmap(vma->addr, vma->length); +} + +static int rockchip_bo_invalidate(struct bo *bo, struct mapping *mapping) +{ + if (mapping->vma->priv) { + struct rockchip_private_map_data *priv = mapping->vma->priv; + memcpy(priv->cached_addr, priv->gem_addr, bo->meta.total_size); + } + + return 0; } -static int rockchip_bo_flush(struct bo *bo, struct map_info *data) +static int rockchip_bo_flush(struct bo *bo, struct mapping *mapping) { - struct rockchip_private_map_data *priv = data->priv; - if (priv && (data->map_flags & BO_MAP_WRITE)) - memcpy(priv->gem_addr, priv->cached_addr, bo->total_size); + struct rockchip_private_map_data *priv = mapping->vma->priv; + if (priv && (mapping->vma->map_flags & BO_MAP_WRITE)) + memcpy(priv->gem_addr, priv->cached_addr, bo->meta.total_size); return 0; } -static uint32_t rockchip_resolve_format(uint32_t format, uint64_t use_flags) +static uint32_t rockchip_resolve_format(struct driver *drv, uint32_t format, uint64_t use_flags) { switch (format) { case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED: @@ -314,7 +268,7 @@ static uint32_t rockchip_resolve_format(uint32_t format, uint64_t use_flags) } } -struct backend backend_rockchip = { +const struct backend backend_rockchip = { .name = "rockchip", .init = rockchip_init, .bo_create = rockchip_bo_create, @@ -323,6 +277,7 @@ struct backend backend_rockchip = { .bo_import = drv_prime_bo_import, .bo_map = rockchip_bo_map, .bo_unmap = rockchip_bo_unmap, + .bo_invalidate = rockchip_bo_invalidate, .bo_flush = rockchip_bo_flush, .resolve_format = rockchip_resolve_format, }; diff --git a/synaptics.c b/synaptics.c new file mode 100644 index 0000000..28cb518 --- /dev/null +++ b/synaptics.c @@ -0,0 +1,39 @@ +/* + * Copyright 2020 The Chromium OS Authors. All rights reserved. + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ +#ifdef DRV_SYNAPTICS + +#include "drv_priv.h" +#include "helpers.h" +#include "util.h" + +static const uint32_t render_target_formats[] = { DRM_FORMAT_ARGB8888, DRM_FORMAT_ABGR8888, + DRM_FORMAT_XRGB8888 }; + +static const uint32_t texture_source_formats[] = { DRM_FORMAT_R8, DRM_FORMAT_NV12, + DRM_FORMAT_YVU420, DRM_FORMAT_YVU420_ANDROID }; + +static int synaptics_init(struct driver *drv) +{ + drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats), + &LINEAR_METADATA, BO_USE_RENDER_MASK | BO_USE_SCANOUT); + + drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats), + &LINEAR_METADATA, BO_USE_TEXTURE_MASK | BO_USE_HW_VIDEO_ENCODER); + + return drv_modify_linear_combinations(drv); +} + +const struct backend backend_synaptics = { + .name = "synaptics", + .init = synaptics_init, + .bo_create = drv_dumb_bo_create, + .bo_destroy = drv_dumb_bo_destroy, + .bo_import = drv_prime_bo_import, + .bo_map = drv_dumb_bo_map, + .bo_unmap = drv_bo_munmap, +}; + +#endif diff --git a/tegra.c b/tegra.c index d16a182..df97461 100644 --- a/tegra.c +++ b/tegra.c @@ -7,6 +7,7 @@ #ifdef DRV_TEGRA #include +#include #include #include #include @@ -118,12 +119,12 @@ static void transfer_tile(struct bo *bo, uint8_t *tiled, uint8_t *untiled, enum if (tiled >= tiled_last) return; - if (x >= bo->width || y >= bo->height) { + if (x >= bo->meta.width || y >= bo->meta.height) { tiled += bytes_per_pixel; continue; } - tmp = untiled + y * bo->strides[0] + x * bytes_per_pixel; + tmp = untiled + y * bo->meta.strides[0] + x * bytes_per_pixel; if (type == TEGRA_READ_TILED_BUFFER) memcpy(tmp, tiled, bytes_per_pixel); @@ -142,7 +143,7 @@ static void transfer_tiled_memory(struct bo *bo, uint8_t *tiled, uint8_t *untile gob_top, gob_left; uint32_t i, j, offset; uint8_t *tmp, *tiled_last; - uint32_t bytes_per_pixel = drv_stride_from_format(bo->format, 1, 0); + uint32_t bytes_per_pixel = drv_stride_from_format(bo->meta.format, 1, 0); /* * The blocklinear format consists of 8*(2^n) x 64 byte sized tiles, @@ -151,16 +152,16 @@ static void transfer_tiled_memory(struct bo *bo, uint8_t *tiled, uint8_t *untile gob_width = DIV_ROUND_UP(NV_BLOCKLINEAR_GOB_WIDTH, bytes_per_pixel); gob_height = NV_BLOCKLINEAR_GOB_HEIGHT * (1 << NV_DEFAULT_BLOCK_HEIGHT_LOG2); /* Calculate the height from maximum possible gob height */ - while (gob_height > NV_BLOCKLINEAR_GOB_HEIGHT && gob_height >= 2 * bo->height) + while (gob_height > NV_BLOCKLINEAR_GOB_HEIGHT && gob_height >= 2 * bo->meta.height) gob_height /= 2; gob_size_bytes = gob_height * NV_BLOCKLINEAR_GOB_WIDTH; gob_size_pixels = gob_height * gob_width; - gob_count_x = DIV_ROUND_UP(bo->strides[0], NV_BLOCKLINEAR_GOB_WIDTH); - gob_count_y = DIV_ROUND_UP(bo->height, gob_height); + gob_count_x = DIV_ROUND_UP(bo->meta.strides[0], NV_BLOCKLINEAR_GOB_WIDTH); + gob_count_y = DIV_ROUND_UP(bo->meta.height, gob_height); - tiled_last = tiled + bo->total_size; + tiled_last = tiled + bo->meta.total_size; offset = 0; for (j = 0; j < gob_count_y; j++) { @@ -179,7 +180,6 @@ static void transfer_tiled_memory(struct bo *bo, uint8_t *tiled, uint8_t *untile static int tegra_init(struct driver *drv) { - int ret; struct format_metadata metadata; uint64_t use_flags = BO_USE_RENDER_MASK; @@ -187,10 +187,8 @@ static int tegra_init(struct driver *drv) metadata.priority = 1; metadata.modifier = DRM_FORMAT_MOD_LINEAR; - ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats), - &metadata, use_flags); - if (ret) - return ret; + drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats), + &metadata, use_flags); drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT); drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_CURSOR | BO_USE_SCANOUT); @@ -202,10 +200,8 @@ static int tegra_init(struct driver *drv) metadata.tiling = NV_MEM_KIND_C32_2CRA; metadata.priority = 2; - ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats), - &metadata, use_flags); - if (ret) - return ret; + drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats), + &metadata, use_flags); drv_modify_combination(drv, DRM_FORMAT_XRGB8888, &metadata, BO_USE_SCANOUT); drv_modify_combination(drv, DRM_FORMAT_ARGB8888, &metadata, BO_USE_SCANOUT); @@ -233,14 +229,14 @@ static int tegra_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint3 ret = drmIoctl(bo->drv->fd, DRM_IOCTL_TEGRA_GEM_CREATE, &gem_create); if (ret) { - fprintf(stderr, "drv: DRM_IOCTL_TEGRA_GEM_CREATE failed (size=%zu)\n", size); - return ret; + drv_log("DRM_IOCTL_TEGRA_GEM_CREATE failed (size=%zu)\n", size); + return -errno; } bo->handles[0].u32 = gem_create.handle; - bo->offsets[0] = 0; - bo->total_size = bo->sizes[0] = size; - bo->strides[0] = stride; + bo->meta.offsets[0] = 0; + bo->meta.total_size = bo->meta.sizes[0] = size; + bo->meta.strides[0] = stride; if (kind != NV_MEM_KIND_PITCH) { struct drm_tegra_gem_set_tiling gem_tile; @@ -258,8 +254,8 @@ static int tegra_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint3 } /* Encode blocklinear parameters for EGLImage creation. */ - bo->tiling = (kind & 0xff) | ((block_height_log2 & 0xf) << 8); - bo->format_modifiers[0] = fourcc_mod_code(NV, bo->tiling); + bo->meta.tiling = (kind & 0xff) | ((block_height_log2 & 0xf) << 8); + bo->meta.format_modifiers[0] = fourcc_mod_code(NV, bo->meta.tiling); } return 0; @@ -281,26 +277,26 @@ static int tegra_bo_import(struct bo *bo, struct drv_import_fd_data *data) ret = drmIoctl(bo->drv->fd, DRM_IOCTL_TEGRA_GEM_GET_TILING, &gem_get_tiling); if (ret) { drv_gem_bo_destroy(bo); - return ret; + return -errno; } /* NOTE(djmk): we only know about one tiled format, so if our drmIoctl call tells us we are tiled, assume it is this format (NV_MEM_KIND_C32_2CRA) otherwise linear (KIND_PITCH). */ if (gem_get_tiling.mode == DRM_TEGRA_GEM_TILING_MODE_PITCH) { - bo->tiling = NV_MEM_KIND_PITCH; + bo->meta.tiling = NV_MEM_KIND_PITCH; } else if (gem_get_tiling.mode == DRM_TEGRA_GEM_TILING_MODE_BLOCK) { - bo->tiling = NV_MEM_KIND_C32_2CRA; + bo->meta.tiling = NV_MEM_KIND_C32_2CRA; } else { - fprintf(stderr, "tegra_bo_import: unknown tile format %d", gem_get_tiling.mode); + drv_log("%s: unknown tile format %d\n", __func__, gem_get_tiling.mode); drv_gem_bo_destroy(bo); assert(0); } - bo->format_modifiers[0] = fourcc_mod_code(NV, bo->tiling); + bo->meta.format_modifiers[0] = fourcc_mod_code(NV, bo->meta.tiling); return 0; } -static void *tegra_bo_map(struct bo *bo, struct map_info *data, size_t plane, uint32_t map_flags) +static void *tegra_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags) { int ret; struct drm_tegra_gem_mmap gem_map; @@ -311,18 +307,18 @@ static void *tegra_bo_map(struct bo *bo, struct map_info *data, size_t plane, ui ret = drmCommandWriteRead(bo->drv->fd, DRM_TEGRA_GEM_MMAP, &gem_map, sizeof(gem_map)); if (ret < 0) { - fprintf(stderr, "drv: DRM_TEGRA_GEM_MMAP failed\n"); + drv_log("DRM_TEGRA_GEM_MMAP failed\n"); return MAP_FAILED; } - void *addr = mmap(0, bo->total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd, + void *addr = mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd, gem_map.offset); - data->length = bo->total_size; - if ((bo->tiling & 0xFF) == NV_MEM_KIND_C32_2CRA && addr != MAP_FAILED) { + vma->length = bo->meta.total_size; + if ((bo->meta.tiling & 0xFF) == NV_MEM_KIND_C32_2CRA && addr != MAP_FAILED) { priv = calloc(1, sizeof(*priv)); - priv->untiled = calloc(1, bo->total_size); + priv->untiled = calloc(1, bo->meta.total_size); priv->tiled = addr; - data->priv = priv; + vma->priv = priv; transfer_tiled_memory(bo, priv->tiled, priv->untiled, TEGRA_READ_TILED_BUFFER); addr = priv->untiled; } @@ -330,30 +326,30 @@ static void *tegra_bo_map(struct bo *bo, struct map_info *data, size_t plane, ui return addr; } -static int tegra_bo_unmap(struct bo *bo, struct map_info *data) +static int tegra_bo_unmap(struct bo *bo, struct vma *vma) { - if (data->priv) { - struct tegra_private_map_data *priv = data->priv; - data->addr = priv->tiled; + if (vma->priv) { + struct tegra_private_map_data *priv = vma->priv; + vma->addr = priv->tiled; free(priv->untiled); free(priv); - data->priv = NULL; + vma->priv = NULL; } - return munmap(data->addr, data->length); + return munmap(vma->addr, vma->length); } -static int tegra_bo_flush(struct bo *bo, struct map_info *data) +static int tegra_bo_flush(struct bo *bo, struct mapping *mapping) { - struct tegra_private_map_data *priv = data->priv; + struct tegra_private_map_data *priv = mapping->vma->priv; - if (priv && (data->map_flags & BO_MAP_WRITE)) + if (priv && (mapping->vma->map_flags & BO_MAP_WRITE)) transfer_tiled_memory(bo, priv->tiled, priv->untiled, TEGRA_WRITE_TILED_BUFFER); return 0; } -struct backend backend_tegra = { +const struct backend backend_tegra = { .name = "tegra", .init = tegra_init, .bo_create = tegra_bo_create, diff --git a/udl.c b/udl.c index dc3c4eb..12dc967 100644 --- a/udl.c +++ b/udl.c @@ -12,16 +12,13 @@ static const uint32_t render_target_formats[] = { DRM_FORMAT_ARGB8888, DRM_FORMA static int udl_init(struct driver *drv) { - int ret; - ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats), - &LINEAR_METADATA, BO_USE_RENDER_MASK); - if (ret) - return ret; + drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats), + &LINEAR_METADATA, BO_USE_RENDER_MASK); return drv_modify_linear_combinations(drv); } -struct backend backend_udl = { +const struct backend backend_udl = { .name = "udl", .init = udl_init, .bo_create = drv_dumb_bo_create, diff --git a/util.h b/util.h index fd61d9b..8f8bb0d 100644 --- a/util.h +++ b/util.h @@ -10,7 +10,10 @@ #define MAX(A, B) ((A) > (B) ? (A) : (B)) #define ARRAY_SIZE(A) (sizeof(A) / sizeof(*(A))) #define PUBLIC __attribute__((visibility("default"))) -#define ALIGN(A, B) (((A) + (B)-1) / (B) * (B)) +#define ALIGN(A, B) (((A) + (B)-1) & ~((B)-1)) +#define IS_ALIGNED(A, B) (ALIGN((A), (B)) == (A)) #define DIV_ROUND_UP(n, d) (((n) + (d)-1) / (d)) +#define STRINGIZE_NO_EXPANSION(x) #x +#define STRINGIZE(x) STRINGIZE_NO_EXPANSION(x) #endif diff --git a/vc4.c b/vc4.c index 20431d9..06b3ed7 100644 --- a/vc4.c +++ b/vc4.c @@ -6,6 +6,7 @@ #ifdef DRV_VC4 +#include #include #include #include @@ -21,23 +22,30 @@ static const uint32_t render_target_formats[] = { DRM_FORMAT_ARGB8888, DRM_FORMA static int vc4_init(struct driver *drv) { - int ret; - ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats), - &LINEAR_METADATA, BO_USE_RENDER_MASK); - if (ret) - return ret; + drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats), + &LINEAR_METADATA, BO_USE_RENDER_MASK); return drv_modify_linear_combinations(drv); } -static int vc4_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format, - uint64_t use_flags) +static int vc4_bo_create_for_modifier(struct bo *bo, uint32_t width, uint32_t height, + uint32_t format, uint64_t modifier) { int ret; size_t plane; uint32_t stride; struct drm_vc4_create_bo bo_create; + switch (modifier) { + case DRM_FORMAT_MOD_LINEAR: + break; + case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED: + drv_log("DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED not supported yet\n"); + return -EINVAL; + default: + return -EINVAL; + } + /* * Since the ARM L1 cache line size is 64 bytes, align to that as a * performance optimization. @@ -47,22 +55,46 @@ static int vc4_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_ drv_bo_from_format(bo, stride, height, format); memset(&bo_create, 0, sizeof(bo_create)); - bo_create.size = bo->total_size; + bo_create.size = bo->meta.total_size; ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VC4_CREATE_BO, &bo_create); if (ret) { - fprintf(stderr, "drv: DRM_IOCTL_VC4_GEM_CREATE failed (size=%zu)\n", - bo->total_size); - return ret; + drv_log("DRM_IOCTL_VC4_GEM_CREATE failed (size=%zu)\n", bo->meta.total_size); + return -errno; } - for (plane = 0; plane < bo->num_planes; plane++) + for (plane = 0; plane < bo->meta.num_planes; plane++) bo->handles[plane].u32 = bo_create.handle; return 0; } -static void *vc4_bo_map(struct bo *bo, struct map_info *data, size_t plane, uint32_t map_flags) +static int vc4_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format, + uint64_t use_flags) +{ + struct combination *combo; + + combo = drv_get_combination(bo->drv, format, use_flags); + if (!combo) + return -EINVAL; + + return vc4_bo_create_for_modifier(bo, width, height, format, combo->metadata.modifier); +} + +static int vc4_bo_create_with_modifiers(struct bo *bo, uint32_t width, uint32_t height, + uint32_t format, const uint64_t *modifiers, uint32_t count) +{ + static const uint64_t modifier_order[] = { + DRM_FORMAT_MOD_LINEAR, + }; + uint64_t modifier; + + modifier = drv_pick_modifier(modifiers, count, modifier_order, ARRAY_SIZE(modifier_order)); + + return vc4_bo_create_for_modifier(bo, width, height, format, modifier); +} + +static void *vc4_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags) { int ret; struct drm_vc4_mmap_bo bo_map; @@ -72,19 +104,20 @@ static void *vc4_bo_map(struct bo *bo, struct map_info *data, size_t plane, uint ret = drmCommandWriteRead(bo->drv->fd, DRM_VC4_MMAP_BO, &bo_map, sizeof(bo_map)); if (ret) { - fprintf(stderr, "drv: DRM_VC4_MMAP_BO failed\n"); + drv_log("DRM_VC4_MMAP_BO failed\n"); return MAP_FAILED; } - data->length = bo->total_size; - return mmap(0, bo->total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd, + vma->length = bo->meta.total_size; + return mmap(NULL, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd, bo_map.offset); } -struct backend backend_vc4 = { +const struct backend backend_vc4 = { .name = "vc4", .init = vc4_init, .bo_create = vc4_bo_create, + .bo_create_with_modifiers = vc4_bo_create_with_modifiers, .bo_import = drv_prime_bo_import, .bo_destroy = drv_gem_bo_destroy, .bo_map = vc4_bo_map, diff --git a/vgem.c b/vgem.c index 867b439..0d0371c 100644 --- a/vgem.c +++ b/vgem.c @@ -20,16 +20,11 @@ static const uint32_t texture_source_formats[] = { DRM_FORMAT_R8, DRM_FORMAT_YVU static int vgem_init(struct driver *drv) { - int ret; - ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats), - &LINEAR_METADATA, BO_USE_RENDER_MASK); - if (ret) - return ret; + drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats), + &LINEAR_METADATA, BO_USE_RENDER_MASK); - ret = drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats), - &LINEAR_METADATA, BO_USE_TEXTURE_MASK); - if (ret) - return ret; + drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats), + &LINEAR_METADATA, BO_USE_TEXTURE_MASK); return drv_modify_linear_combinations(drv); } @@ -40,14 +35,10 @@ static int vgem_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32 width = ALIGN(width, MESA_LLVMPIPE_TILE_SIZE); height = ALIGN(height, MESA_LLVMPIPE_TILE_SIZE); - /* HAL_PIXEL_FORMAT_YV12 requires that the buffer's height not be aligned. */ - if (bo->format == DRM_FORMAT_YVU420_ANDROID) - height = bo->height; - return drv_dumb_bo_create(bo, width, height, format, flags); } -static uint32_t vgem_resolve_format(uint32_t format, uint64_t flags) +static uint32_t vgem_resolve_format(struct driver *drv, uint32_t format, uint64_t flags) { switch (format) { case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED: @@ -60,7 +51,7 @@ static uint32_t vgem_resolve_format(uint32_t format, uint64_t flags) } } -struct backend backend_vgem = { +const struct backend backend_vgem = { .name = "vgem", .init = vgem_init, .bo_create = vgem_bo_create, diff --git a/virgl_hw.h b/virgl_hw.h new file mode 100644 index 0000000..1c493d1 --- /dev/null +++ b/virgl_hw.h @@ -0,0 +1,432 @@ +/* + * Copyright 2014, 2015 Red Hat. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * on the rights to use, copy, modify, merge, publish, distribute, sub + * license, and/or sell copies of the Software, and to permit persons to whom + * the Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef VIRGL_HW_H +#define VIRGL_HW_H + +#include + +struct virgl_box { + uint32_t x, y, z; + uint32_t w, h, d; +}; + +/* formats known by the HW device - based on gallium subset */ +enum virgl_formats { + VIRGL_FORMAT_B8G8R8A8_UNORM = 1, + VIRGL_FORMAT_B8G8R8X8_UNORM = 2, + VIRGL_FORMAT_A8R8G8B8_UNORM = 3, + VIRGL_FORMAT_X8R8G8B8_UNORM = 4, + VIRGL_FORMAT_B5G5R5A1_UNORM = 5, + VIRGL_FORMAT_B4G4R4A4_UNORM = 6, + VIRGL_FORMAT_B5G6R5_UNORM = 7, + VIRGL_FORMAT_R10G10B10A2_UNORM = 8, + VIRGL_FORMAT_L8_UNORM = 9, /**< ubyte luminance */ + VIRGL_FORMAT_A8_UNORM = 10, /**< ubyte alpha */ + VIRGL_FORMAT_L8A8_UNORM = 12, /**< ubyte alpha, luminance */ + VIRGL_FORMAT_L16_UNORM = 13, /**< ushort luminance */ + + VIRGL_FORMAT_Z16_UNORM = 16, + VIRGL_FORMAT_Z32_UNORM = 17, + VIRGL_FORMAT_Z32_FLOAT = 18, + VIRGL_FORMAT_Z24_UNORM_S8_UINT = 19, + VIRGL_FORMAT_S8_UINT_Z24_UNORM = 20, + VIRGL_FORMAT_Z24X8_UNORM = 21, + VIRGL_FORMAT_S8_UINT = 23, /**< ubyte stencil */ + + VIRGL_FORMAT_R32_FLOAT = 28, + VIRGL_FORMAT_R32G32_FLOAT = 29, + VIRGL_FORMAT_R32G32B32_FLOAT = 30, + VIRGL_FORMAT_R32G32B32A32_FLOAT = 31, + + VIRGL_FORMAT_R16_UNORM = 48, + VIRGL_FORMAT_R16G16_UNORM = 49, + + VIRGL_FORMAT_R16G16B16A16_UNORM = 51, + + VIRGL_FORMAT_R16_SNORM = 56, + VIRGL_FORMAT_R16G16_SNORM = 57, + VIRGL_FORMAT_R16G16B16A16_SNORM = 59, + + VIRGL_FORMAT_R8_UNORM = 64, + VIRGL_FORMAT_R8G8_UNORM = 65, + VIRGL_FORMAT_R8G8B8_UNORM = 66, + VIRGL_FORMAT_R8G8B8A8_UNORM = 67, + + VIRGL_FORMAT_R8_SNORM = 74, + VIRGL_FORMAT_R8G8_SNORM = 75, + VIRGL_FORMAT_R8G8B8_SNORM = 76, + VIRGL_FORMAT_R8G8B8A8_SNORM = 77, + + VIRGL_FORMAT_R16_FLOAT = 91, + VIRGL_FORMAT_R16G16_FLOAT = 92, + VIRGL_FORMAT_R16G16B16_FLOAT = 93, + VIRGL_FORMAT_R16G16B16A16_FLOAT = 94, + + VIRGL_FORMAT_L8_SRGB = 95, + VIRGL_FORMAT_L8A8_SRGB = 96, + VIRGL_FORMAT_B8G8R8A8_SRGB = 100, + VIRGL_FORMAT_B8G8R8X8_SRGB = 101, + VIRGL_FORMAT_R8G8B8A8_SRGB = 104, + + /* compressed formats */ + VIRGL_FORMAT_DXT1_RGB = 105, + VIRGL_FORMAT_DXT1_RGBA = 106, + VIRGL_FORMAT_DXT3_RGBA = 107, + VIRGL_FORMAT_DXT5_RGBA = 108, + + /* sRGB, compressed */ + VIRGL_FORMAT_DXT1_SRGB = 109, + VIRGL_FORMAT_DXT1_SRGBA = 110, + VIRGL_FORMAT_DXT3_SRGBA = 111, + VIRGL_FORMAT_DXT5_SRGBA = 112, + + /* rgtc compressed */ + VIRGL_FORMAT_RGTC1_UNORM = 113, + VIRGL_FORMAT_RGTC1_SNORM = 114, + VIRGL_FORMAT_RGTC2_UNORM = 115, + VIRGL_FORMAT_RGTC2_SNORM = 116, + + VIRGL_FORMAT_A8B8G8R8_UNORM = 121, + VIRGL_FORMAT_B5G5R5X1_UNORM = 122, + VIRGL_FORMAT_R11G11B10_FLOAT = 124, + VIRGL_FORMAT_R9G9B9E5_FLOAT = 125, + VIRGL_FORMAT_Z32_FLOAT_S8X24_UINT = 126, + + VIRGL_FORMAT_B10G10R10A2_UNORM = 131, + VIRGL_FORMAT_R8G8B8X8_UNORM = 134, + VIRGL_FORMAT_B4G4R4X4_UNORM = 135, + VIRGL_FORMAT_X24S8_UINT = 136, + VIRGL_FORMAT_S8X24_UINT = 137, + VIRGL_FORMAT_B2G3R3_UNORM = 139, + + VIRGL_FORMAT_L16A16_UNORM = 140, + VIRGL_FORMAT_A16_UNORM = 141, + + VIRGL_FORMAT_A8_SNORM = 147, + VIRGL_FORMAT_L8_SNORM = 148, + VIRGL_FORMAT_L8A8_SNORM = 149, + + VIRGL_FORMAT_A16_SNORM = 151, + VIRGL_FORMAT_L16_SNORM = 152, + VIRGL_FORMAT_L16A16_SNORM = 153, + + VIRGL_FORMAT_A16_FLOAT = 155, + VIRGL_FORMAT_L16_FLOAT = 156, + VIRGL_FORMAT_L16A16_FLOAT = 157, + + VIRGL_FORMAT_A32_FLOAT = 159, + VIRGL_FORMAT_L32_FLOAT = 160, + VIRGL_FORMAT_L32A32_FLOAT = 161, + + VIRGL_FORMAT_YV12 = 163, + VIRGL_FORMAT_YV16 = 164, + VIRGL_FORMAT_IYUV = 165, /**< aka I420 */ + VIRGL_FORMAT_NV12 = 166, + VIRGL_FORMAT_NV21 = 167, + + VIRGL_FORMAT_R8_UINT = 177, + VIRGL_FORMAT_R8G8_UINT = 178, + VIRGL_FORMAT_R8G8B8_UINT = 179, + VIRGL_FORMAT_R8G8B8A8_UINT = 180, + + VIRGL_FORMAT_R8_SINT = 181, + VIRGL_FORMAT_R8G8_SINT = 182, + VIRGL_FORMAT_R8G8B8_SINT = 183, + VIRGL_FORMAT_R8G8B8A8_SINT = 184, + + VIRGL_FORMAT_R16_UINT = 185, + VIRGL_FORMAT_R16G16_UINT = 186, + VIRGL_FORMAT_R16G16B16_UINT = 187, + VIRGL_FORMAT_R16G16B16A16_UINT = 188, + + VIRGL_FORMAT_R16_SINT = 189, + VIRGL_FORMAT_R16G16_SINT = 190, + VIRGL_FORMAT_R16G16B16_SINT = 191, + VIRGL_FORMAT_R16G16B16A16_SINT = 192, + VIRGL_FORMAT_R32_UINT = 193, + VIRGL_FORMAT_R32G32_UINT = 194, + VIRGL_FORMAT_R32G32B32_UINT = 195, + VIRGL_FORMAT_R32G32B32A32_UINT = 196, + + VIRGL_FORMAT_R32_SINT = 197, + VIRGL_FORMAT_R32G32_SINT = 198, + VIRGL_FORMAT_R32G32B32_SINT = 199, + VIRGL_FORMAT_R32G32B32A32_SINT = 200, + + VIRGL_FORMAT_A8_UINT = 201, + VIRGL_FORMAT_L8_UINT = 203, + VIRGL_FORMAT_L8A8_UINT = 204, + + VIRGL_FORMAT_A8_SINT = 205, + VIRGL_FORMAT_L8_SINT = 207, + VIRGL_FORMAT_L8A8_SINT = 208, + + VIRGL_FORMAT_A16_UINT = 209, + VIRGL_FORMAT_L16_UINT = 211, + VIRGL_FORMAT_L16A16_UINT = 212, + + VIRGL_FORMAT_A16_SINT = 213, + VIRGL_FORMAT_L16_SINT = 215, + VIRGL_FORMAT_L16A16_SINT = 216, + + VIRGL_FORMAT_A32_UINT = 217, + VIRGL_FORMAT_L32_UINT = 219, + VIRGL_FORMAT_L32A32_UINT = 220, + + VIRGL_FORMAT_A32_SINT = 221, + VIRGL_FORMAT_L32_SINT = 223, + VIRGL_FORMAT_L32A32_SINT = 224, + + VIRGL_FORMAT_B10G10R10A2_UINT = 225, + VIRGL_FORMAT_R8G8B8X8_SNORM = 229, + + VIRGL_FORMAT_R8G8B8X8_SRGB = 230, + + VIRGL_FORMAT_R8G8B8X8_UINT = 231, + VIRGL_FORMAT_R8G8B8X8_SINT = 232, + VIRGL_FORMAT_B10G10R10X2_UNORM = 233, + VIRGL_FORMAT_R16G16B16X16_UNORM = 234, + VIRGL_FORMAT_R16G16B16X16_SNORM = 235, + VIRGL_FORMAT_R16G16B16X16_FLOAT = 236, + VIRGL_FORMAT_R16G16B16X16_UINT = 237, + VIRGL_FORMAT_R16G16B16X16_SINT = 238, + + VIRGL_FORMAT_R10G10B10A2_UINT = 253, + + VIRGL_FORMAT_BPTC_RGBA_UNORM = 255, + VIRGL_FORMAT_BPTC_SRGBA = 256, + VIRGL_FORMAT_BPTC_RGB_FLOAT = 257, + VIRGL_FORMAT_BPTC_RGB_UFLOAT = 258, + + VIRGL_FORMAT_R10G10B10X2_UNORM = 308, + VIRGL_FORMAT_A4B4G4R4_UNORM = 311, + + VIRGL_FORMAT_R8_SRGB = 312, + VIRGL_FORMAT_MAX /* = PIPE_FORMAT_COUNT */, + + /* Below formats must not be used in the guest. */ + VIRGL_FORMAT_B8G8R8X8_UNORM_EMULATED, + VIRGL_FORMAT_B8G8R8A8_UNORM_EMULATED, + VIRGL_FORMAT_MAX_EXTENDED +}; + +/* These are used by the capability_bits field in virgl_caps_v2. */ +#define VIRGL_CAP_NONE 0 +#define VIRGL_CAP_TGSI_INVARIANT (1 << 0) +#define VIRGL_CAP_TEXTURE_VIEW (1 << 1) +#define VIRGL_CAP_SET_MIN_SAMPLES (1 << 2) +#define VIRGL_CAP_COPY_IMAGE (1 << 3) +#define VIRGL_CAP_TGSI_PRECISE (1 << 4) +#define VIRGL_CAP_TXQS (1 << 5) +#define VIRGL_CAP_MEMORY_BARRIER (1 << 6) +#define VIRGL_CAP_COMPUTE_SHADER (1 << 7) +#define VIRGL_CAP_FB_NO_ATTACH (1 << 8) +#define VIRGL_CAP_ROBUST_BUFFER_ACCESS (1 << 9) +#define VIRGL_CAP_TGSI_FBFETCH (1 << 10) +#define VIRGL_CAP_SHADER_CLOCK (1 << 11) +#define VIRGL_CAP_TEXTURE_BARRIER (1 << 12) +#define VIRGL_CAP_TGSI_COMPONENTS (1 << 13) +#define VIRGL_CAP_GUEST_MAY_INIT_LOG (1 << 14) +#define VIRGL_CAP_SRGB_WRITE_CONTROL (1 << 15) +#define VIRGL_CAP_QBO (1 << 16) +#define VIRGL_CAP_TRANSFER (1 << 17) +#define VIRGL_CAP_FBO_MIXED_COLOR_FORMATS (1 << 18) +#define VIRGL_CAP_FAKE_FP64 (1 << 19) +#define VIRGL_CAP_BIND_COMMAND_ARGS (1 << 20) +#define VIRGL_CAP_MULTI_DRAW_INDIRECT (1 << 21) +#define VIRGL_CAP_INDIRECT_PARAMS (1 << 22) +#define VIRGL_CAP_TRANSFORM_FEEDBACK3 (1 << 23) +#define VIRGL_CAP_3D_ASTC (1 << 24) +#define VIRGL_CAP_INDIRECT_INPUT_ADDR (1 << 25) +#define VIRGL_CAP_COPY_TRANSFER (1 << 26) +#define VIRGL_CAP_CLIP_HALFZ (1 << 27) +#define VIRGL_CAP_APP_TWEAK_SUPPORT (1 << 28) +#define VIRGL_CAP_BGRA_SRGB_IS_EMULATED (1 << 29) + +/* virgl bind flags - these are compatible with mesa 10.5 gallium. + * but are fixed, no other should be passed to virgl either. + */ +#define VIRGL_BIND_DEPTH_STENCIL (1 << 0) +#define VIRGL_BIND_RENDER_TARGET (1 << 1) +#define VIRGL_BIND_SAMPLER_VIEW (1 << 3) +#define VIRGL_BIND_VERTEX_BUFFER (1 << 4) +#define VIRGL_BIND_INDEX_BUFFER (1 << 5) +#define VIRGL_BIND_CONSTANT_BUFFER (1 << 6) +#define VIRGL_BIND_DISPLAY_TARGET (1 << 7) +#define VIRGL_BIND_COMMAND_ARGS (1 << 8) +#define VIRGL_BIND_STREAM_OUTPUT (1 << 11) +#define VIRGL_BIND_SHADER_BUFFER (1 << 14) +#define VIRGL_BIND_QUERY_BUFFER (1 << 15) +#define VIRGL_BIND_CURSOR (1 << 16) +#define VIRGL_BIND_CUSTOM (1 << 17) +#define VIRGL_BIND_SCANOUT (1 << 18) +/* Used for buffers that are backed by guest storage and + * are only read by the host. + */ +#define VIRGL_BIND_STAGING (1 << 19) +#define VIRGL_BIND_SHARED (1 << 20) + +#define VIRGL_BIND_PREFER_EMULATED_BGRA (1 << 21) + +#define VIRGL_BIND_LINEAR (1 << 22) + +struct virgl_caps_bool_set1 { + unsigned indep_blend_enable:1; + unsigned indep_blend_func:1; + unsigned cube_map_array:1; + unsigned shader_stencil_export:1; + unsigned conditional_render:1; + unsigned start_instance:1; + unsigned primitive_restart:1; + unsigned blend_eq_sep:1; + unsigned instanceid:1; + unsigned vertex_element_instance_divisor:1; + unsigned seamless_cube_map:1; + unsigned occlusion_query:1; + unsigned timer_query:1; + unsigned streamout_pause_resume:1; + unsigned texture_multisample:1; + unsigned fragment_coord_conventions:1; + unsigned depth_clip_disable:1; + unsigned seamless_cube_map_per_texture:1; + unsigned ubo:1; + unsigned color_clamping:1; /* not in GL 3.1 core profile */ + unsigned poly_stipple:1; /* not in GL 3.1 core profile */ + unsigned mirror_clamp:1; + unsigned texture_query_lod:1; + unsigned has_fp64:1; + unsigned has_tessellation_shaders:1; + unsigned has_indirect_draw:1; + unsigned has_sample_shading:1; + unsigned has_cull:1; + unsigned conditional_render_inverted:1; + unsigned derivative_control:1; + unsigned polygon_offset_clamp:1; + unsigned transform_feedback_overflow_query:1; + /* DO NOT ADD ANYMORE MEMBERS - need to add another 32-bit to v2 caps */ +}; + +/* endless expansion capabilites - current gallium has 252 formats */ +struct virgl_supported_format_mask { + uint32_t bitmask[16]; +}; +/* capabilities set 2 - version 1 - 32-bit and float values */ +struct virgl_caps_v1 { + uint32_t max_version; + struct virgl_supported_format_mask sampler; + struct virgl_supported_format_mask render; + struct virgl_supported_format_mask depthstencil; + struct virgl_supported_format_mask vertexbuffer; + struct virgl_caps_bool_set1 bset; + uint32_t glsl_level; + uint32_t max_texture_array_layers; + uint32_t max_streamout_buffers; + uint32_t max_dual_source_render_targets; + uint32_t max_render_targets; + uint32_t max_samples; + uint32_t prim_mask; + uint32_t max_tbo_size; + uint32_t max_uniform_blocks; + uint32_t max_viewports; + uint32_t max_texture_gather_components; +}; + +/* + * This struct should be growable when used in capset 2, + * so we shouldn't have to add a v3 ever. + */ +struct virgl_caps_v2 { + struct virgl_caps_v1 v1; + float min_aliased_point_size; + float max_aliased_point_size; + float min_smooth_point_size; + float max_smooth_point_size; + float min_aliased_line_width; + float max_aliased_line_width; + float min_smooth_line_width; + float max_smooth_line_width; + float max_texture_lod_bias; + uint32_t max_geom_output_vertices; + uint32_t max_geom_total_output_components; + uint32_t max_vertex_outputs; + uint32_t max_vertex_attribs; + uint32_t max_shader_patch_varyings; + int32_t min_texel_offset; + int32_t max_texel_offset; + int32_t min_texture_gather_offset; + int32_t max_texture_gather_offset; + uint32_t texture_buffer_offset_alignment; + uint32_t uniform_buffer_offset_alignment; + uint32_t shader_buffer_offset_alignment; + uint32_t capability_bits; + uint32_t sample_locations[8]; + uint32_t max_vertex_attrib_stride; + uint32_t max_shader_buffer_frag_compute; + uint32_t max_shader_buffer_other_stages; + uint32_t max_shader_image_frag_compute; + uint32_t max_shader_image_other_stages; + uint32_t max_image_samples; + uint32_t max_compute_work_group_invocations; + uint32_t max_compute_shared_memory_size; + uint32_t max_compute_grid_size[3]; + uint32_t max_compute_block_size[3]; + uint32_t max_texture_2d_size; + uint32_t max_texture_3d_size; + uint32_t max_texture_cube_size; + uint32_t max_combined_shader_buffers; + uint32_t max_atomic_counters[6]; + uint32_t max_atomic_counter_buffers[6]; + uint32_t max_combined_atomic_counters; + uint32_t max_combined_atomic_counter_buffers; + uint32_t host_feature_check_version; + struct virgl_supported_format_mask supported_readback_formats; + struct virgl_supported_format_mask scanout; +}; + +union virgl_caps { + uint32_t max_version; + struct virgl_caps_v1 v1; + struct virgl_caps_v2 v2; +}; + +enum virgl_errors { + VIRGL_ERROR_NONE, + VIRGL_ERROR_UNKNOWN, + VIRGL_ERROR_UNKNOWN_RESOURCE_FORMAT, +}; + +enum virgl_ctx_errors { + VIRGL_ERROR_CTX_NONE, + VIRGL_ERROR_CTX_UNKNOWN, + VIRGL_ERROR_CTX_ILLEGAL_SHADER, + VIRGL_ERROR_CTX_ILLEGAL_HANDLE, + VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, + VIRGL_ERROR_CTX_ILLEGAL_SURFACE, + VIRGL_ERROR_CTX_ILLEGAL_VERTEX_FORMAT, + VIRGL_ERROR_CTX_ILLEGAL_CMD_BUFFER, + VIRGL_ERROR_CTX_GLES_HAVE_TES_BUT_MISS_TCS, + VIRGL_ERROR_GL_ANY_SAMPLES_PASSED, +}; + +#define VIRGL_RESOURCE_Y_0_TOP (1 << 0) +#endif diff --git a/virtgpu_drm.h b/virtgpu_drm.h new file mode 100644 index 0000000..a92d764 --- /dev/null +++ b/virtgpu_drm.h @@ -0,0 +1,188 @@ +/* + * Copyright 2013 Red Hat + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef VIRTGPU_DRM_H +#define VIRTGPU_DRM_H + +#include "drm.h" + +#if defined(__cplusplus) +extern "C" { +#endif + +/* Please note that modifications to all structs defined here are + * subject to backwards-compatibility constraints. + * + * Do not use pointers, use __u64 instead for 32 bit / 64 bit user/kernel + * compatibility Keep fields aligned to their size + */ + +#define DRM_VIRTGPU_MAP 0x01 +#define DRM_VIRTGPU_EXECBUFFER 0x02 +#define DRM_VIRTGPU_GETPARAM 0x03 +#define DRM_VIRTGPU_RESOURCE_CREATE 0x04 +#define DRM_VIRTGPU_RESOURCE_INFO 0x05 +#define DRM_VIRTGPU_TRANSFER_FROM_HOST 0x06 +#define DRM_VIRTGPU_TRANSFER_TO_HOST 0x07 +#define DRM_VIRTGPU_WAIT 0x08 +#define DRM_VIRTGPU_GET_CAPS 0x09 + +#define VIRTGPU_EXECBUF_FENCE_FD_IN 0x01 +#define VIRTGPU_EXECBUF_FENCE_FD_OUT 0x02 +#define VIRTGPU_EXECBUF_FLAGS (\ + VIRTGPU_EXECBUF_FENCE_FD_IN |\ + VIRTGPU_EXECBUF_FENCE_FD_OUT |\ + 0) + +struct drm_virtgpu_map { + __u64 offset; /* use for mmap system call */ + __u32 handle; + __u32 pad; +}; + +struct drm_virtgpu_execbuffer { + __u32 flags; + __u32 size; + __u64 command; /* void* */ + __u64 bo_handles; + __u32 num_bo_handles; + __s32 fence_fd; /* in/out fence fd (see VIRTGPU_EXECBUF_FENCE_FD_IN/OUT) */ +}; + +#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */ +#define VIRTGPU_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */ + +struct drm_virtgpu_getparam { + __u64 param; + __u64 value; +}; + +/* NO_BO flags? NO resource flag? */ +/* resource flag for y_0_top */ +struct drm_virtgpu_resource_create { + __u32 target; + __u32 format; + __u32 bind; + __u32 width; + __u32 height; + __u32 depth; + __u32 array_size; + __u32 last_level; + __u32 nr_samples; + __u32 flags; + __u32 bo_handle; /* if this is set - recreate a new resource attached to this bo ? */ + __u32 res_handle; /* returned by kernel */ + __u32 size; /* validate transfer in the host */ + __u32 stride; /* validate transfer in the host */ +}; + +struct drm_virtgpu_resource_info { + __u32 bo_handle; + __u32 res_handle; + __u32 size; + union { + __u32 stride; + __u32 strides[4]; /* strides[0] is accessible with stride. */ + }; + __u32 num_planes; + __u32 offsets[4]; + __u64 format_modifier; +}; + +struct drm_virtgpu_3d_box { + __u32 x; + __u32 y; + __u32 z; + __u32 w; + __u32 h; + __u32 d; +}; + +struct drm_virtgpu_3d_transfer_to_host { + __u32 bo_handle; + struct drm_virtgpu_3d_box box; + __u32 level; + __u32 offset; +}; + +struct drm_virtgpu_3d_transfer_from_host { + __u32 bo_handle; + struct drm_virtgpu_3d_box box; + __u32 level; + __u32 offset; +}; + +#define VIRTGPU_WAIT_NOWAIT 1 /* like it */ +struct drm_virtgpu_3d_wait { + __u32 handle; /* 0 is an invalid handle */ + __u32 flags; +}; + +struct drm_virtgpu_get_caps { + __u32 cap_set_id; + __u32 cap_set_ver; + __u64 addr; + __u32 size; + __u32 pad; +}; + +#define DRM_IOCTL_VIRTGPU_MAP \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map) + +#define DRM_IOCTL_VIRTGPU_EXECBUFFER \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_EXECBUFFER,\ + struct drm_virtgpu_execbuffer) + +#define DRM_IOCTL_VIRTGPU_GETPARAM \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GETPARAM,\ + struct drm_virtgpu_getparam) + +#define DRM_IOCTL_VIRTGPU_RESOURCE_CREATE \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_CREATE, \ + struct drm_virtgpu_resource_create) + +#define DRM_IOCTL_VIRTGPU_RESOURCE_INFO \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_INFO, \ + struct drm_virtgpu_resource_info) + +#define DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_TRANSFER_FROM_HOST, \ + struct drm_virtgpu_3d_transfer_from_host) + +#define DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_TRANSFER_TO_HOST, \ + struct drm_virtgpu_3d_transfer_to_host) + +#define DRM_IOCTL_VIRTGPU_WAIT \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_WAIT, \ + struct drm_virtgpu_3d_wait) + +#define DRM_IOCTL_VIRTGPU_GET_CAPS \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GET_CAPS, \ + struct drm_virtgpu_get_caps) + +#if defined(__cplusplus) +} +#endif + +#endif diff --git a/virtio_gpu.c b/virtio_gpu.c index 6548e59..aae2458 100644 --- a/virtio_gpu.c +++ b/virtio_gpu.c @@ -1,72 +1,889 @@ /* - * Copyright 2016 The Chromium OS Authors. All rights reserved. + * Copyright 2017 The Chromium OS Authors. All rights reserved. * Use of this source code is governed by a BSD-style license that can be * found in the LICENSE file. */ +#include +#include +#include +#include +#include +#include +#include + #include "drv_priv.h" #include "helpers.h" #include "util.h" +#include "virgl_hw.h" +#include "virtgpu_drm.h" + +#ifndef PAGE_SIZE +#define PAGE_SIZE 0x1000 +#endif +#define PIPE_TEXTURE_2D 2 #define MESA_LLVMPIPE_TILE_ORDER 6 #define MESA_LLVMPIPE_TILE_SIZE (1 << MESA_LLVMPIPE_TILE_ORDER) +struct feature { + uint64_t feature; + const char *name; + uint32_t enabled; +}; + +enum feature_id { + feat_3d, + feat_capset_fix, + feat_max, +}; + +#define FEATURE(x) \ + (struct feature) \ + { \ + x, #x, 0 \ + } + +static struct feature features[] = { FEATURE(VIRTGPU_PARAM_3D_FEATURES), + FEATURE(VIRTGPU_PARAM_CAPSET_QUERY_FIX) }; + static const uint32_t render_target_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_ARGB8888, DRM_FORMAT_RGB565, DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB8888 }; -static const uint32_t texture_source_formats[] = { DRM_FORMAT_R8, DRM_FORMAT_YVU420, - DRM_FORMAT_YVU420_ANDROID }; +static const uint32_t dumb_texture_source_formats[] = { + DRM_FORMAT_R8, DRM_FORMAT_R16, DRM_FORMAT_YVU420, + DRM_FORMAT_NV12, DRM_FORMAT_NV21, DRM_FORMAT_YVU420_ANDROID +}; -static int virtio_gpu_init(struct driver *drv) +static const uint32_t texture_source_formats[] = { DRM_FORMAT_NV12, DRM_FORMAT_NV21, + DRM_FORMAT_R8, DRM_FORMAT_R16, + DRM_FORMAT_RG88, DRM_FORMAT_YVU420_ANDROID }; + +struct virtio_gpu_priv { + int caps_is_v2; + union virgl_caps caps; + int host_gbm_enabled; +}; + +static uint32_t translate_format(uint32_t drm_fourcc) +{ + switch (drm_fourcc) { + case DRM_FORMAT_BGR888: + case DRM_FORMAT_RGB888: + return VIRGL_FORMAT_R8G8B8_UNORM; + case DRM_FORMAT_XRGB8888: + return VIRGL_FORMAT_B8G8R8X8_UNORM; + case DRM_FORMAT_ARGB8888: + return VIRGL_FORMAT_B8G8R8A8_UNORM; + case DRM_FORMAT_XBGR8888: + return VIRGL_FORMAT_R8G8B8X8_UNORM; + case DRM_FORMAT_ABGR8888: + return VIRGL_FORMAT_R8G8B8A8_UNORM; + case DRM_FORMAT_ABGR16161616F: + return VIRGL_FORMAT_R16G16B16A16_UNORM; + case DRM_FORMAT_RGB565: + return VIRGL_FORMAT_B5G6R5_UNORM; + case DRM_FORMAT_R8: + return VIRGL_FORMAT_R8_UNORM; + case DRM_FORMAT_RG88: + return VIRGL_FORMAT_R8G8_UNORM; + case DRM_FORMAT_NV12: + return VIRGL_FORMAT_NV12; + case DRM_FORMAT_NV21: + return VIRGL_FORMAT_NV21; + case DRM_FORMAT_YVU420: + case DRM_FORMAT_YVU420_ANDROID: + return VIRGL_FORMAT_YV12; + default: + return 0; + } +} + +static bool virtio_gpu_bitmask_supports_format(struct virgl_supported_format_mask *supported, + uint32_t drm_format) +{ + uint32_t virgl_format = translate_format(drm_format); + if (!virgl_format) { + return false; + } + + uint32_t bitmask_index = virgl_format / 32; + uint32_t bit_index = virgl_format % 32; + return supported->bitmask[bitmask_index] & (1 << bit_index); +} + +// The metadata generated here for emulated buffers is slightly different than the metadata +// generated by drv_bo_from_format. In order to simplify transfers in the flush and invalidate +// functions below, the emulated buffers are oversized. For example, ignoring stride alignment +// requirements to demonstrate, a 6x6 YUV420 image buffer might have the following layout from +// drv_bo_from_format: +// +// | Y | Y | Y | Y | Y | Y | +// | Y | Y | Y | Y | Y | Y | +// | Y | Y | Y | Y | Y | Y | +// | Y | Y | Y | Y | Y | Y | +// | Y | Y | Y | Y | Y | Y | +// | Y | Y | Y | Y | Y | Y | +// | U | U | U | U | U | U | +// | U | U | U | V | V | V | +// | V | V | V | V | V | V | +// +// where each plane immediately follows the previous plane in memory. This layout makes it +// difficult to compute the transfers needed for example when the middle 2x2 region of the +// image is locked and needs to be flushed/invalidated. +// +// Emulated multi-plane buffers instead have a layout of: +// +// | Y | Y | Y | Y | Y | Y | +// | Y | Y | Y | Y | Y | Y | +// | Y | Y | Y | Y | Y | Y | +// | Y | Y | Y | Y | Y | Y | +// | Y | Y | Y | Y | Y | Y | +// | Y | Y | Y | Y | Y | Y | +// | U | U | U | | | | +// | U | U | U | | | | +// | U | U | U | | | | +// | V | V | V | | | | +// | V | V | V | | | | +// | V | V | V | | | | +// +// where each plane is placed as a sub-image (albeit with a very large stride) in order to +// simplify transfers into 3 sub-image transfers for the above example. +// +// Additional note: the V-plane is not placed to the right of the U-plane due to some +// observed failures in media framework code which assumes the V-plane is not +// "row-interlaced" with the U-plane. +static void virtio_gpu_get_emulated_metadata(const struct bo *bo, struct bo_metadata *metadata) +{ + uint32_t y_plane_height; + uint32_t c_plane_height; + uint32_t original_width = bo->meta.width; + uint32_t original_height = bo->meta.height; + + metadata->format = DRM_FORMAT_R8; + switch (bo->meta.format) { + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + // Bi-planar + metadata->num_planes = 2; + + y_plane_height = original_height; + c_plane_height = DIV_ROUND_UP(original_height, 2); + + metadata->width = original_width; + metadata->height = y_plane_height + c_plane_height; + + // Y-plane (full resolution) + metadata->strides[0] = metadata->width; + metadata->offsets[0] = 0; + metadata->sizes[0] = metadata->width * y_plane_height; + + // CbCr-plane (half resolution, interleaved, placed below Y-plane) + metadata->strides[1] = metadata->width; + metadata->offsets[1] = metadata->offsets[0] + metadata->sizes[0]; + metadata->sizes[1] = metadata->width * c_plane_height; + + metadata->total_size = metadata->width * metadata->height; + break; + case DRM_FORMAT_YVU420: + case DRM_FORMAT_YVU420_ANDROID: + // Tri-planar + metadata->num_planes = 3; + + y_plane_height = original_height; + c_plane_height = DIV_ROUND_UP(original_height, 2); + + metadata->width = ALIGN(original_width, 32); + metadata->height = y_plane_height + (2 * c_plane_height); + + // Y-plane (full resolution) + metadata->strides[0] = metadata->width; + metadata->offsets[0] = 0; + metadata->sizes[0] = metadata->width * original_height; + + // Cb-plane (half resolution, placed below Y-plane) + metadata->strides[1] = metadata->width; + metadata->offsets[1] = metadata->offsets[0] + metadata->sizes[0]; + metadata->sizes[1] = metadata->width * c_plane_height; + + // Cr-plane (half resolution, placed below Cb-plane) + metadata->strides[2] = metadata->width; + metadata->offsets[2] = metadata->offsets[1] + metadata->sizes[1]; + metadata->sizes[2] = metadata->width * c_plane_height; + + metadata->total_size = metadata->width * metadata->height; + break; + default: + break; + } +} + +struct virtio_transfers_params { + size_t xfers_needed; + struct rectangle xfer_boxes[DRV_MAX_PLANES]; +}; + +static void virtio_gpu_get_emulated_transfers_params(const struct bo *bo, + const struct rectangle *transfer_box, + struct virtio_transfers_params *xfer_params) +{ + uint32_t y_plane_height; + uint32_t c_plane_height; + struct bo_metadata emulated_metadata; + + if (transfer_box->x == 0 && transfer_box->y == 0 && transfer_box->width == bo->meta.width && + transfer_box->height == bo->meta.height) { + virtio_gpu_get_emulated_metadata(bo, &emulated_metadata); + + xfer_params->xfers_needed = 1; + xfer_params->xfer_boxes[0].x = 0; + xfer_params->xfer_boxes[0].y = 0; + xfer_params->xfer_boxes[0].width = emulated_metadata.width; + xfer_params->xfer_boxes[0].height = emulated_metadata.height; + + return; + } + + switch (bo->meta.format) { + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + // Bi-planar + xfer_params->xfers_needed = 2; + + y_plane_height = bo->meta.height; + c_plane_height = DIV_ROUND_UP(bo->meta.height, 2); + + // Y-plane (full resolution) + xfer_params->xfer_boxes[0].x = transfer_box->x; + xfer_params->xfer_boxes[0].y = transfer_box->y; + xfer_params->xfer_boxes[0].width = transfer_box->width; + xfer_params->xfer_boxes[0].height = transfer_box->height; + + // CbCr-plane (half resolution, interleaved, placed below Y-plane) + xfer_params->xfer_boxes[1].x = transfer_box->x; + xfer_params->xfer_boxes[1].y = transfer_box->y + y_plane_height; + xfer_params->xfer_boxes[1].width = transfer_box->width; + xfer_params->xfer_boxes[1].height = DIV_ROUND_UP(transfer_box->height, 2); + + break; + case DRM_FORMAT_YVU420: + case DRM_FORMAT_YVU420_ANDROID: + // Tri-planar + xfer_params->xfers_needed = 3; + + y_plane_height = bo->meta.height; + c_plane_height = DIV_ROUND_UP(bo->meta.height, 2); + + // Y-plane (full resolution) + xfer_params->xfer_boxes[0].x = transfer_box->x; + xfer_params->xfer_boxes[0].y = transfer_box->y; + xfer_params->xfer_boxes[0].width = transfer_box->width; + xfer_params->xfer_boxes[0].height = transfer_box->height; + + // Cb-plane (half resolution, placed below Y-plane) + xfer_params->xfer_boxes[1].x = transfer_box->x; + xfer_params->xfer_boxes[1].y = transfer_box->y + y_plane_height; + xfer_params->xfer_boxes[1].width = DIV_ROUND_UP(transfer_box->width, 2); + xfer_params->xfer_boxes[1].height = DIV_ROUND_UP(transfer_box->height, 2); + + // Cr-plane (half resolution, placed below Cb-plane) + xfer_params->xfer_boxes[2].x = transfer_box->x; + xfer_params->xfer_boxes[2].y = transfer_box->y + y_plane_height + c_plane_height; + xfer_params->xfer_boxes[2].width = DIV_ROUND_UP(transfer_box->width, 2); + xfer_params->xfer_boxes[2].height = DIV_ROUND_UP(transfer_box->height, 2); + + break; + } +} + +static bool virtio_gpu_supports_combination_natively(struct driver *drv, uint32_t drm_format, + uint64_t use_flags) +{ + struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv; + + if (priv->caps.max_version == 0) { + return true; + } + + if ((use_flags & BO_USE_RENDERING) && + !virtio_gpu_bitmask_supports_format(&priv->caps.v1.render, drm_format)) { + return false; + } + + if ((use_flags & BO_USE_TEXTURE) && + !virtio_gpu_bitmask_supports_format(&priv->caps.v1.sampler, drm_format)) { + return false; + } + + if ((use_flags & BO_USE_SCANOUT) && priv->caps_is_v2 && + !virtio_gpu_bitmask_supports_format(&priv->caps.v2.scanout, drm_format)) { + return false; + } + + return true; +} + +// For virtio backends that do not support formats natively (e.g. multi-planar formats are not +// supported in virglrenderer when gbm is unavailable on the host machine), whether or not the +// format and usage combination can be handled as a blob (byte buffer). +static bool virtio_gpu_supports_combination_through_emulation(struct driver *drv, + uint32_t drm_format, + uint64_t use_flags) +{ + struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv; + + // Only enable emulation on non-gbm virtio backends. + if (priv->host_gbm_enabled) { + return false; + } + + if (use_flags & (BO_USE_RENDERING | BO_USE_SCANOUT)) { + return false; + } + + if (!virtio_gpu_supports_combination_natively(drv, DRM_FORMAT_R8, use_flags)) { + return false; + } + + return drm_format == DRM_FORMAT_NV12 || drm_format == DRM_FORMAT_NV21 || + drm_format == DRM_FORMAT_YVU420 || drm_format == DRM_FORMAT_YVU420_ANDROID; +} + +// Adds the given buffer combination to the list of supported buffer combinations if the +// combination is supported by the virtio backend. +static void virtio_gpu_add_combination(struct driver *drv, uint32_t drm_format, + struct format_metadata *metadata, uint64_t use_flags) +{ + struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv; + + if (features[feat_3d].enabled && priv->caps.max_version >= 1) { + if ((use_flags & BO_USE_SCANOUT) && priv->caps_is_v2 && + !virtio_gpu_supports_combination_natively(drv, drm_format, use_flags)) { + drv_log("Scanout format: %d\n", drm_format); + use_flags &= ~BO_USE_SCANOUT; + } + + if (!virtio_gpu_supports_combination_natively(drv, drm_format, use_flags) && + !virtio_gpu_supports_combination_through_emulation(drv, drm_format, + use_flags)) { + drv_log("Skipping unsupported combination format:%d\n", drm_format); + return; + } + } + + drv_add_combination(drv, drm_format, metadata, use_flags); +} + +// Adds each given buffer combination to the list of supported buffer combinations if the +// combination supported by the virtio backend. +static void virtio_gpu_add_combinations(struct driver *drv, const uint32_t *drm_formats, + uint32_t num_formats, struct format_metadata *metadata, + uint64_t use_flags) +{ + uint32_t i; + + for (i = 0; i < num_formats; i++) { + virtio_gpu_add_combination(drv, drm_formats[i], metadata, use_flags); + } +} + +static int virtio_dumb_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format, + uint64_t use_flags) +{ + if (bo->meta.format != DRM_FORMAT_R8) { + width = ALIGN(width, MESA_LLVMPIPE_TILE_SIZE); + height = ALIGN(height, MESA_LLVMPIPE_TILE_SIZE); + } + + return drv_dumb_bo_create_ex(bo, width, height, format, use_flags, BO_QUIRK_DUMB32BPP); +} + +static inline void handle_flag(uint64_t *flag, uint64_t check_flag, uint32_t *bind, + uint32_t virgl_bind) +{ + if ((*flag) & check_flag) { + (*flag) &= ~check_flag; + (*bind) |= virgl_bind; + } +} + +static uint32_t use_flags_to_bind(uint64_t use_flags) +{ + /* In crosvm, VIRGL_BIND_SHARED means minigbm will allocate, not virglrenderer. */ + uint32_t bind = VIRGL_BIND_SHARED; + + handle_flag(&use_flags, BO_USE_TEXTURE, &bind, VIRGL_BIND_SAMPLER_VIEW); + handle_flag(&use_flags, BO_USE_RENDERING, &bind, VIRGL_BIND_RENDER_TARGET); + handle_flag(&use_flags, BO_USE_SCANOUT, &bind, VIRGL_BIND_SCANOUT); + handle_flag(&use_flags, BO_USE_CURSOR, &bind, VIRGL_BIND_CURSOR); + handle_flag(&use_flags, BO_USE_LINEAR, &bind, VIRGL_BIND_LINEAR); + + handle_flag(&use_flags, BO_USE_SW_READ_OFTEN, &bind, VIRGL_BIND_LINEAR); + handle_flag(&use_flags, BO_USE_SW_READ_RARELY, &bind, VIRGL_BIND_LINEAR); + handle_flag(&use_flags, BO_USE_SW_WRITE_OFTEN, &bind, VIRGL_BIND_LINEAR); + handle_flag(&use_flags, BO_USE_SW_WRITE_RARELY, &bind, VIRGL_BIND_LINEAR); + + // All host drivers only support linear camera buffer formats. If + // that changes, this will need to be modified. + handle_flag(&use_flags, BO_USE_CAMERA_READ, &bind, VIRGL_BIND_LINEAR); + handle_flag(&use_flags, BO_USE_CAMERA_WRITE, &bind, VIRGL_BIND_LINEAR); + + if (use_flags) { + drv_log("Unhandled bo use flag: %llx\n", (unsigned long long)use_flags); + } + + return bind; +} + +static int virtio_virgl_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format, + uint64_t use_flags) { int ret; - ret = drv_add_combinations(drv, render_target_formats, ARRAY_SIZE(render_target_formats), - &LINEAR_METADATA, BO_USE_RENDER_MASK); - if (ret) - return ret; + size_t i; + uint32_t stride; + struct drm_virtgpu_resource_create res_create; + struct bo_metadata emulated_metadata; + + if (virtio_gpu_supports_combination_natively(bo->drv, format, use_flags)) { + stride = drv_stride_from_format(format, width, 0); + drv_bo_from_format(bo, stride, height, format); + } else { + assert( + virtio_gpu_supports_combination_through_emulation(bo->drv, format, use_flags)); + + virtio_gpu_get_emulated_metadata(bo, &emulated_metadata); - ret = drv_add_combinations(drv, texture_source_formats, ARRAY_SIZE(texture_source_formats), - &LINEAR_METADATA, BO_USE_TEXTURE_MASK); - if (ret) + format = emulated_metadata.format; + width = emulated_metadata.width; + height = emulated_metadata.height; + for (i = 0; i < emulated_metadata.num_planes; i++) { + bo->meta.strides[i] = emulated_metadata.strides[i]; + bo->meta.offsets[i] = emulated_metadata.offsets[i]; + bo->meta.sizes[i] = emulated_metadata.sizes[i]; + } + bo->meta.total_size = emulated_metadata.total_size; + } + + /* + * Setting the target is intended to ensure this resource gets bound as a 2D + * texture in the host renderer's GL state. All of these resource properties are + * sent unchanged by the kernel to the host, which in turn sends them unchanged to + * virglrenderer. When virglrenderer makes a resource, it will convert the target + * enum to the equivalent one in GL and then bind the resource to that target. + */ + memset(&res_create, 0, sizeof(res_create)); + + res_create.target = PIPE_TEXTURE_2D; + res_create.format = translate_format(format); + res_create.bind = use_flags_to_bind(use_flags); + res_create.width = width; + res_create.height = height; + + /* For virgl 3D */ + res_create.depth = 1; + res_create.array_size = 1; + res_create.last_level = 0; + res_create.nr_samples = 0; + + res_create.size = ALIGN(bo->meta.total_size, PAGE_SIZE); // PAGE_SIZE = 0x1000 + ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, &res_create); + if (ret) { + drv_log("DRM_IOCTL_VIRTGPU_RESOURCE_CREATE failed with %s\n", strerror(errno)); return ret; + } + + for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++) + bo->handles[plane].u32 = res_create.bo_handle; + + return 0; +} + +static void *virtio_virgl_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags) +{ + int ret; + struct drm_virtgpu_map gem_map; + + memset(&gem_map, 0, sizeof(gem_map)); + gem_map.handle = bo->handles[0].u32; + + ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_MAP, &gem_map); + if (ret) { + drv_log("DRM_IOCTL_VIRTGPU_MAP failed with %s\n", strerror(errno)); + return MAP_FAILED; + } + + vma->length = bo->meta.total_size; + return mmap(0, bo->meta.total_size, drv_get_prot(map_flags), MAP_SHARED, bo->drv->fd, + gem_map.offset); +} + +static int virtio_gpu_get_caps(struct driver *drv, union virgl_caps *caps, int *caps_is_v2) +{ + int ret; + struct drm_virtgpu_get_caps cap_args; + + *caps_is_v2 = 0; + memset(&cap_args, 0, sizeof(cap_args)); + cap_args.addr = (unsigned long long)caps; + if (features[feat_capset_fix].enabled) { + *caps_is_v2 = 1; + cap_args.cap_set_id = 2; + cap_args.size = sizeof(union virgl_caps); + } else { + cap_args.cap_set_id = 1; + cap_args.size = sizeof(struct virgl_caps_v1); + } + + ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &cap_args); + if (ret) { + drv_log("DRM_IOCTL_VIRTGPU_GET_CAPS failed with %s\n", strerror(errno)); + *caps_is_v2 = 0; + + // Fallback to v1 + cap_args.cap_set_id = 1; + cap_args.size = sizeof(struct virgl_caps_v1); + + ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &cap_args); + if (ret) { + drv_log("DRM_IOCTL_VIRTGPU_GET_CAPS failed with %s\n", strerror(errno)); + } + } + + return ret; +} + +static void virtio_gpu_init_features_and_caps(struct driver *drv) +{ + struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)drv->priv; + + for (uint32_t i = 0; i < ARRAY_SIZE(features); i++) { + struct drm_virtgpu_getparam params = { 0 }; + + params.param = features[i].feature; + params.value = (uint64_t)(uintptr_t)&features[i].enabled; + int ret = drmIoctl(drv->fd, DRM_IOCTL_VIRTGPU_GETPARAM, ¶ms); + if (ret) + drv_log("DRM_IOCTL_VIRTGPU_GET_PARAM failed with %s\n", strerror(errno)); + } + + if (features[feat_3d].enabled) { + virtio_gpu_get_caps(drv, &priv->caps, &priv->caps_is_v2); + } + + // Multi-planar formats are currently only supported in virglrenderer through gbm. + priv->host_gbm_enabled = + virtio_gpu_supports_combination_natively(drv, DRM_FORMAT_NV12, BO_USE_TEXTURE); +} + +static int virtio_gpu_init(struct driver *drv) +{ + struct virtio_gpu_priv *priv; + + priv = calloc(1, sizeof(*priv)); + drv->priv = priv; + + virtio_gpu_init_features_and_caps(drv); + + if (features[feat_3d].enabled) { + /* This doesn't mean host can scanout everything, it just means host + * hypervisor can show it. */ + virtio_gpu_add_combinations(drv, render_target_formats, + ARRAY_SIZE(render_target_formats), &LINEAR_METADATA, + BO_USE_RENDER_MASK | BO_USE_SCANOUT); + virtio_gpu_add_combinations(drv, texture_source_formats, + ARRAY_SIZE(texture_source_formats), &LINEAR_METADATA, + BO_USE_TEXTURE_MASK); + } else { + /* Virtio primary plane only allows this format. */ + virtio_gpu_add_combination(drv, DRM_FORMAT_XRGB8888, &LINEAR_METADATA, + BO_USE_RENDER_MASK | BO_USE_SCANOUT); + /* Virtio cursor plane only allows this format and Chrome cannot live without + * ARGB888 renderable format. */ + virtio_gpu_add_combination(drv, DRM_FORMAT_ARGB8888, &LINEAR_METADATA, + BO_USE_RENDER_MASK | BO_USE_CURSOR); + /* Android needs more, but they cannot be bound as scanouts anymore after + * "drm/virtio: fix DRM_FORMAT_* handling" */ + virtio_gpu_add_combinations(drv, render_target_formats, + ARRAY_SIZE(render_target_formats), &LINEAR_METADATA, + BO_USE_RENDER_MASK); + virtio_gpu_add_combinations(drv, dumb_texture_source_formats, + ARRAY_SIZE(dumb_texture_source_formats), + &LINEAR_METADATA, BO_USE_TEXTURE_MASK); + virtio_gpu_add_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA, + BO_USE_SW_MASK | BO_USE_LINEAR); + virtio_gpu_add_combination(drv, DRM_FORMAT_NV21, &LINEAR_METADATA, + BO_USE_SW_MASK | BO_USE_LINEAR); + } + + /* Android CTS tests require this. */ + virtio_gpu_add_combination(drv, DRM_FORMAT_RGB888, &LINEAR_METADATA, BO_USE_SW_MASK); + virtio_gpu_add_combination(drv, DRM_FORMAT_BGR888, &LINEAR_METADATA, BO_USE_SW_MASK); + virtio_gpu_add_combination(drv, DRM_FORMAT_ABGR16161616F, &LINEAR_METADATA, + BO_USE_SW_MASK | BO_USE_TEXTURE_MASK); + + drv_modify_combination(drv, DRM_FORMAT_NV12, &LINEAR_METADATA, + BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER | + BO_USE_HW_VIDEO_ENCODER); + drv_modify_combination(drv, DRM_FORMAT_NV21, &LINEAR_METADATA, + BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER | + BO_USE_HW_VIDEO_ENCODER); + drv_modify_combination(drv, DRM_FORMAT_YVU420, &LINEAR_METADATA, + BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER | + BO_USE_HW_VIDEO_ENCODER | BO_USE_RENDERSCRIPT); + drv_modify_combination(drv, DRM_FORMAT_YVU420_ANDROID, &LINEAR_METADATA, + BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER | + BO_USE_HW_VIDEO_ENCODER | BO_USE_RENDERSCRIPT); + drv_modify_combination(drv, DRM_FORMAT_R8, &LINEAR_METADATA, + BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER); + drv_modify_combination(drv, DRM_FORMAT_R16, &LINEAR_METADATA, + BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE | BO_USE_HW_VIDEO_DECODER); +#ifdef USE_GRALLOC1 + drv_modify_combination(drv, DRM_FORMAT_ABGR8888, &LINEAR_METADATA, + BO_USE_CAMERA_WRITE | BO_USE_CAMERA_READ); +#endif return drv_modify_linear_combinations(drv); } +static void virtio_gpu_close(struct driver *drv) +{ + free(drv->priv); + drv->priv = NULL; +} + static int virtio_gpu_bo_create(struct bo *bo, uint32_t width, uint32_t height, uint32_t format, uint64_t use_flags) { - width = ALIGN(width, MESA_LLVMPIPE_TILE_SIZE); - height = ALIGN(height, MESA_LLVMPIPE_TILE_SIZE); + if (features[feat_3d].enabled) + return virtio_virgl_bo_create(bo, width, height, format, use_flags); + else + return virtio_dumb_bo_create(bo, width, height, format, use_flags); +} + +static int virtio_gpu_bo_destroy(struct bo *bo) +{ + if (features[feat_3d].enabled) + return drv_gem_bo_destroy(bo); + else + return drv_dumb_bo_destroy(bo); +} + +static void *virtio_gpu_bo_map(struct bo *bo, struct vma *vma, size_t plane, uint32_t map_flags) +{ + if (features[feat_3d].enabled) + return virtio_virgl_bo_map(bo, vma, plane, map_flags); + else + return drv_dumb_bo_map(bo, vma, plane, map_flags); +} + +static int virtio_gpu_bo_invalidate(struct bo *bo, struct mapping *mapping) +{ + int ret; + size_t i; + struct drm_virtgpu_3d_transfer_from_host xfer; + struct drm_virtgpu_3d_wait waitcmd; + struct virtio_transfers_params xfer_params; + struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv; + + if (!features[feat_3d].enabled) + return 0; + + // Invalidate is only necessary if the host writes to the buffer. + if ((bo->meta.use_flags & (BO_USE_RENDERING | BO_USE_CAMERA_WRITE | + BO_USE_HW_VIDEO_ENCODER | BO_USE_HW_VIDEO_DECODER)) == 0) + return 0; + + memset(&xfer, 0, sizeof(xfer)); + xfer.bo_handle = mapping->vma->handle; + + if ((bo->meta.use_flags & BO_USE_RENDERING) == 0) { + // Unfortunately, the kernel doesn't actually pass the guest layer_stride + // and guest stride to the host (compare virtio_gpu.h and virtgpu_drm.h). + // For gbm based resources, we can work around this by using the level field + // to pass the stride to virglrenderer's gbm transfer code. However, we need + // to avoid doing this for resources which don't rely on that transfer code, + // which is resources with the BO_USE_RENDERING flag set. + // TODO(b/145993887): Send also stride when the patches are landed + if (priv->host_gbm_enabled) { + xfer.level = bo->meta.strides[0]; + } + } + + if (virtio_gpu_supports_combination_natively(bo->drv, bo->meta.format, + bo->meta.use_flags)) { + xfer_params.xfers_needed = 1; + xfer_params.xfer_boxes[0] = mapping->rect; + } else { + assert(virtio_gpu_supports_combination_through_emulation(bo->drv, bo->meta.format, + bo->meta.use_flags)); + + virtio_gpu_get_emulated_transfers_params(bo, &mapping->rect, &xfer_params); + } + + for (i = 0; i < xfer_params.xfers_needed; i++) { + xfer.box.x = xfer_params.xfer_boxes[i].x; + xfer.box.y = xfer_params.xfer_boxes[i].y; + xfer.box.w = xfer_params.xfer_boxes[i].width; + xfer.box.h = xfer_params.xfer_boxes[i].height; + xfer.box.d = 1; + + ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST, &xfer); + if (ret) { + drv_log("DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST failed with %s\n", + strerror(errno)); + return -errno; + } + } + + // The transfer needs to complete before invalidate returns so that any host changes + // are visible and to ensure the host doesn't overwrite subsequent guest changes. + // TODO(b/136733358): Support returning fences from transfers + memset(&waitcmd, 0, sizeof(waitcmd)); + waitcmd.handle = mapping->vma->handle; + ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd); + if (ret) { + drv_log("DRM_IOCTL_VIRTGPU_WAIT failed with %s\n", strerror(errno)); + return -errno; + } + + return 0; +} + +static int virtio_gpu_bo_flush(struct bo *bo, struct mapping *mapping) +{ + int ret; + size_t i; + struct drm_virtgpu_3d_transfer_to_host xfer; + struct drm_virtgpu_3d_wait waitcmd; + struct virtio_transfers_params xfer_params; + struct virtio_gpu_priv *priv = (struct virtio_gpu_priv *)bo->drv->priv; + + if (!features[feat_3d].enabled) + return 0; + + if (!(mapping->vma->map_flags & BO_MAP_WRITE)) + return 0; + + memset(&xfer, 0, sizeof(xfer)); + xfer.bo_handle = mapping->vma->handle; + + // Unfortunately, the kernel doesn't actually pass the guest layer_stride and + // guest stride to the host (compare virtio_gpu.h and virtgpu_drm.h). We can use + // the level to work around this. + if (priv->host_gbm_enabled) { + xfer.level = bo->meta.strides[0]; + } + + if (virtio_gpu_supports_combination_natively(bo->drv, bo->meta.format, + bo->meta.use_flags)) { + xfer_params.xfers_needed = 1; + xfer_params.xfer_boxes[0] = mapping->rect; + } else { + assert(virtio_gpu_supports_combination_through_emulation(bo->drv, bo->meta.format, + bo->meta.use_flags)); + + virtio_gpu_get_emulated_transfers_params(bo, &mapping->rect, &xfer_params); + } + + for (i = 0; i < xfer_params.xfers_needed; i++) { + xfer.box.x = xfer_params.xfer_boxes[i].x; + xfer.box.y = xfer_params.xfer_boxes[i].y; + xfer.box.w = xfer_params.xfer_boxes[i].width; + xfer.box.h = xfer_params.xfer_boxes[i].height; + xfer.box.d = 1; + + ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST, &xfer); + if (ret) { + drv_log("DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST failed with %s\n", + strerror(errno)); + return -errno; + } + } - /* HAL_PIXEL_FORMAT_YV12 requires that the buffer's height not be aligned. */ - if (bo->format == DRM_FORMAT_YVU420_ANDROID) - height = bo->height; + // If the buffer is only accessed by the host GPU, then the flush is ordered + // with subsequent commands. However, if other host hardware can access the + // buffer, we need to wait for the transfer to complete for consistency. + // TODO(b/136733358): Support returning fences from transfers + if (bo->meta.use_flags & BO_USE_NON_GPU_HW) { + memset(&waitcmd, 0, sizeof(waitcmd)); + waitcmd.handle = mapping->vma->handle; - return drv_dumb_bo_create(bo, width, height, format, use_flags); + ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd); + if (ret) { + drv_log("DRM_IOCTL_VIRTGPU_WAIT failed with %s\n", strerror(errno)); + return -errno; + } + } + + return 0; } -static uint32_t virtio_gpu_resolve_format(uint32_t format, uint64_t use_flags) +static uint32_t virtio_gpu_resolve_format(struct driver *drv, uint32_t format, uint64_t use_flags) { switch (format) { case DRM_FORMAT_FLEX_IMPLEMENTATION_DEFINED: + /* Camera subsystem requires NV12. */ + if (use_flags & (BO_USE_CAMERA_READ | BO_USE_CAMERA_WRITE)) + return DRM_FORMAT_NV12; /*HACK: See b/28671744 */ return DRM_FORMAT_XBGR8888; case DRM_FORMAT_FLEX_YCbCr_420_888: - return DRM_FORMAT_YVU420; + /* + * All of our host drivers prefer NV12 as their flexible media format. + * If that changes, this will need to be modified. + */ + if (features[feat_3d].enabled) + return DRM_FORMAT_NV12; + else + return DRM_FORMAT_YVU420_ANDROID; default: return format; } } -struct backend backend_virtio_gpu = { +static int virtio_gpu_resource_info(struct bo *bo, uint32_t strides[DRV_MAX_PLANES], + uint32_t offsets[DRV_MAX_PLANES]) +{ + int ret; + struct drm_virtgpu_resource_info res_info; + + if (!features[feat_3d].enabled) + return 0; + + memset(&res_info, 0, sizeof(res_info)); + res_info.bo_handle = bo->handles[0].u32; + ret = drmIoctl(bo->drv->fd, DRM_IOCTL_VIRTGPU_RESOURCE_INFO, &res_info); + if (ret) { + drv_log("DRM_IOCTL_VIRTGPU_RESOURCE_INFO failed with %s\n", strerror(errno)); + return ret; + } + + for (uint32_t plane = 0; plane < bo->meta.num_planes; plane++) { + /* + * Currently, kernel v4.14 (Betty) doesn't have the extended resource info + * ioctl. + */ + if (res_info.strides[plane]) { + strides[plane] = res_info.strides[plane]; + offsets[plane] = res_info.offsets[plane]; + } + } + + return 0; +} + +const struct backend backend_virtio_gpu = { .name = "virtio_gpu", .init = virtio_gpu_init, + .close = virtio_gpu_close, .bo_create = virtio_gpu_bo_create, - .bo_destroy = drv_dumb_bo_destroy, + .bo_destroy = virtio_gpu_bo_destroy, .bo_import = drv_prime_bo_import, - .bo_map = drv_dumb_bo_map, + .bo_map = virtio_gpu_bo_map, .bo_unmap = drv_bo_munmap, + .bo_invalidate = virtio_gpu_bo_invalidate, + .bo_flush = virtio_gpu_bo_flush, .resolve_format = virtio_gpu_resolve_format, + .resource_info = virtio_gpu_resource_info, };