From c359b1dd27719aad182c36a35d6e411af6ddb026 Mon Sep 17 00:00:00 2001 From: Jordan Crouse Date: Wed, 27 May 2020 10:38:28 -0600 Subject: [PATCH] msm: kgsl: Make all GMU code target specific Move all the GMU code to a6xx target specific code. This is the first in several steps to make the GMU code more modular and flexibile for various usecases. Change-Id: Ic0dedbad0e70771c301705a0eb4ed422efb4c80a Signed-off-by: Jordan Crouse --- drivers/gpu/msm/Makefile | 1 - drivers/gpu/msm/adreno.h | 1 - drivers/gpu/msm/adreno_a6xx.c | 2 - drivers/gpu/msm/adreno_a6xx_gmu.c | 1769 ++++++++++++++++++++++++++++- drivers/gpu/msm/kgsl_bus.c | 5 +- drivers/gpu/msm/kgsl_gmu.h | 11 +- drivers/gpu/msm/kgsl_gmu_core.c | 2 +- drivers/gpu/msm/kgsl_gmu_core.h | 2 +- 8 files changed, 1756 insertions(+), 37 deletions(-) diff --git a/drivers/gpu/msm/Makefile b/drivers/gpu/msm/Makefile index 4c3f64d41c97..6c9de9394c2c 100644 --- a/drivers/gpu/msm/Makefile +++ b/drivers/gpu/msm/Makefile @@ -9,7 +9,6 @@ msm_kgsl-y = \ kgsl_drawobj.o \ kgsl_events.o \ kgsl_ioctl.o \ - kgsl_gmu.o \ kgsl_gmu_core.o \ kgsl_hfi.o \ kgsl_mmu.o \ diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h index 1ddec025e572..1d5bacb851c5 100644 --- a/drivers/gpu/msm/adreno.h +++ b/drivers/gpu/msm/adreno.h @@ -667,7 +667,6 @@ enum adreno_regs { ADRENO_REG_GBIF_HALT_ACK, ADRENO_REG_GMU_AO_INTERRUPT_EN, ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR, - ADRENO_REG_GMU_AO_HOST_INTERRUPT_STATUS, ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK, ADRENO_REG_GMU_PWR_COL_KEEPALIVE, ADRENO_REG_GMU_AHB_FENCE_STATUS, diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c index ebcd7d4e9425..b3be11b8d414 100644 --- a/drivers/gpu/msm/adreno_a6xx.c +++ b/drivers/gpu/msm/adreno_a6xx.c @@ -2486,8 +2486,6 @@ static unsigned int a6xx_register_offsets[ADRENO_REG_REGISTER_MAX] = { A6XX_GMU_AO_INTERRUPT_EN), ADRENO_REG_DEFINE(ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR, A6XX_GMU_AO_HOST_INTERRUPT_CLR), - ADRENO_REG_DEFINE(ADRENO_REG_GMU_AO_HOST_INTERRUPT_STATUS, - A6XX_GMU_AO_HOST_INTERRUPT_STATUS), ADRENO_REG_DEFINE(ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK, A6XX_GMU_AO_HOST_INTERRUPT_MASK), ADRENO_REG_DEFINE(ADRENO_REG_GMU_PWR_COL_KEEPALIVE, diff --git a/drivers/gpu/msm/adreno_a6xx_gmu.c b/drivers/gpu/msm/adreno_a6xx_gmu.c index 84398b5a4618..ae5640f30caa 100644 --- a/drivers/gpu/msm/adreno_a6xx_gmu.c +++ b/drivers/gpu/msm/adreno_a6xx_gmu.c @@ -3,18 +3,64 @@ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. */ -/* soc/qcom/cmd-db.h needs types.h */ +#include +#include +#include +#include #include +#include #include -#include +#include +#include #include +#include #include +#include #include "adreno.h" #include "adreno_a6xx.h" #include "adreno_snapshot.h" +#include "kgsl_bus.h" +#include "kgsl_device.h" #include "kgsl_gmu.h" #include "kgsl_trace.h" +#include "kgsl_util.h" + +struct gmu_iommu_context { + const char *name; + struct platform_device *pdev; + struct iommu_domain *domain; +}; + +struct gmu_vma_entry { + unsigned int start; + unsigned int size; +}; + +static const struct gmu_vma_entry a6xx_gmu_vma_legacy[] = { + [GMU_ITCM] = { .start = 0x00000, .size = SZ_16K }, + [GMU_ICACHE] = { .start = 0x04000, .size = (SZ_256K - SZ_16K) }, + [GMU_DTCM] = { .start = 0x40000, .size = SZ_16K }, + [GMU_DCACHE] = { .start = 0x44000, .size = (SZ_256K - SZ_16K) }, + [GMU_NONCACHED_KERNEL] = { .start = 0x60000000, .size = SZ_512M }, + [GMU_NONCACHED_USER] = { .start = 0x80000000, .size = SZ_1G }, + [GMU_MEM_TYPE_MAX] = { .start = 0x0, .size = 0x0 }, +}; + +static const struct gmu_vma_entry a6xx_gmu_vma[] = { + [GMU_ITCM] = { .start = 0x00000000, .size = SZ_16K }, + [GMU_CACHE] = { .start = SZ_16K, .size = (SZ_16M - SZ_16K) }, + [GMU_DTCM] = { .start = SZ_256M + SZ_16K, .size = SZ_16K }, + [GMU_DCACHE] = { .start = 0x0, .size = 0x0 }, + [GMU_NONCACHED_KERNEL] = { .start = 0x60000000, .size = SZ_512M }, + [GMU_NONCACHED_USER] = { .start = 0x80000000, .size = SZ_1G }, + [GMU_MEM_TYPE_MAX] = { .start = 0x0, .size = 0x0 }, +}; + +static struct gmu_iommu_context a6xx_gmu_ctx[] = { + [GMU_CONTEXT_USER] = { .name = "gmu_user" }, + [GMU_CONTEXT_KERNEL] = { .name = "gmu_kernel" } +}; static const unsigned int a6xx_gmu_gx_registers[] = { /* GMU GX */ @@ -365,11 +411,7 @@ static void a6xx_gmu_power_config(struct kgsl_device *device) RPMH_ENABLE_MASK); } -/* - * a6xx_gmu_start() - Start GMU and wait until FW boot up. - * @device: Pointer to KGSL device - */ -static int a6xx_gmu_start(struct kgsl_device *device) +static int a6xx_gmu_device_start(struct kgsl_device *device) { struct gmu_device *gmu = KGSL_GMU_DEVICE(device); u32 val = 0x00000100; @@ -540,6 +582,26 @@ static int _load_legacy_gmu_fw(struct kgsl_device *device, return 0; } +static struct gmu_memdesc *a6xx_gmu_get_memdesc(struct gmu_device *gmu, + unsigned int addr, unsigned int size) +{ + int i; + struct gmu_memdesc *mem; + + for (i = 0; i < GMU_KERNEL_ENTRIES; i++) { + if (!test_bit(i, &gmu->kmem_bitmap)) + continue; + + mem = &gmu->kmem_entries[i]; + + if (addr >= mem->gmuaddr && + (addr + size <= mem->gmuaddr + mem->size)) + return mem; + } + + return NULL; +} + static int load_gmu_fw(struct kgsl_device *device) { struct gmu_device *gmu = KGSL_GMU_DEVICE(device); @@ -560,7 +622,7 @@ static int load_gmu_fw(struct kgsl_device *device) if (blk->size == 0) continue; - md = gmu_get_memdesc(gmu, blk->addr, blk->size); + md = a6xx_gmu_get_memdesc(gmu, blk->addr, blk->size); if (md == NULL) { dev_err(&gmu->pdev->dev, "No backing memory for 0x%8.8X\n", @@ -1045,6 +1107,16 @@ static void load_gmu_version_info(struct kgsl_device *device) &gmu->ver.hfi); } +static void a6xx_gmu_mem_free(struct gmu_device *gmu, + struct gmu_memdesc *md) +{ + /* Free GMU image memory */ + if (md->hostptr) + dma_free_attrs(&gmu->pdev->dev, (size_t) md->size, + (void *)md->hostptr, md->physaddr, 0); + memset(md, 0, sizeof(*md)); +} + /* * a6xx_gmu_fw_start() - set up GMU and start FW * @device: Pointer to KGSL device @@ -1157,7 +1229,7 @@ static int a6xx_gmu_fw_start(struct kgsl_device *device, /* Clear any previously set cm3 fault */ atomic_set(&gmu->cm3_fault, 0); - ret = a6xx_gmu_start(device); + ret = a6xx_gmu_device_start(device); if (ret) return ret; @@ -1184,6 +1256,221 @@ static int a6xx_gmu_fw_start(struct kgsl_device *device, return ret; } +static int a6xx_gmu_alloc_and_map(struct gmu_device *gmu, + struct gmu_memdesc *md, unsigned int attrs) +{ + struct iommu_domain *domain = a6xx_gmu_ctx[md->ctx_idx].domain; + int ret; + + md->hostptr = dma_alloc_attrs(&gmu->pdev->dev, (size_t) md->size, + &md->physaddr, GFP_KERNEL, 0); + + if (md->hostptr == NULL) + return -ENOMEM; + + ret = iommu_map(domain, md->gmuaddr, md->physaddr, md->size, + attrs); + + if (ret) { + dev_err(&gmu->pdev->dev, + "gmu map err: gaddr=0x%016llX, paddr=0x%pa\n", + md->gmuaddr, &(md->physaddr)); + a6xx_gmu_mem_free(gmu, md); + } + + return ret; +} + +/* + * There are a few static memory buffers that are allocated and mapped at boot + * time for GMU to function. The buffers are permanent (not freed) after + * GPU boot. The size of the buffers are constant and not expected to change. + * + * We define an array and a simple allocator to keep track of the currently + * active SMMU entries of GMU kernel mode context. Each entry is assigned + * a unique address inside GMU kernel mode address range. + */ +static struct gmu_memdesc *a6xx_gmu_kmem_allocate(struct gmu_device *gmu, + enum gmu_mem_type mem_type, unsigned int addr, + unsigned int size, unsigned int attrs) +{ + static unsigned int next_uncached_kernel_alloc; + static unsigned int next_uncached_user_alloc; + + struct gmu_memdesc *md; + int ret; + int entry_idx = find_first_zero_bit( + &gmu->kmem_bitmap, GMU_KERNEL_ENTRIES); + + if (entry_idx >= GMU_KERNEL_ENTRIES) { + dev_err(&gmu->pdev->dev, + "Ran out of GMU kernel mempool slots\n"); + return ERR_PTR(-EINVAL); + } + + /* Non-TCM requests have page alignment requirement */ + if ((mem_type != GMU_ITCM) && (mem_type != GMU_DTCM) && + addr & (PAGE_SIZE - 1)) { + dev_err(&gmu->pdev->dev, + "Invalid alignment request 0x%X\n", + addr); + return ERR_PTR(-EINVAL); + } + + md = &gmu->kmem_entries[entry_idx]; + set_bit(entry_idx, &gmu->kmem_bitmap); + + memset(md, 0, sizeof(*md)); + + switch (mem_type) { + case GMU_ITCM: + case GMU_DTCM: + /* Assign values and return without mapping */ + md->size = size; + md->mem_type = mem_type; + md->gmuaddr = addr; + return md; + + case GMU_DCACHE: + case GMU_ICACHE: + md->ctx_idx = GMU_CONTEXT_KERNEL; + size = PAGE_ALIGN(size); + break; + + case GMU_NONCACHED_KERNEL: + /* Set start address for first uncached kernel alloc */ + if (next_uncached_kernel_alloc == 0) + next_uncached_kernel_alloc = gmu->vma[mem_type].start; + + if (addr == 0) + addr = next_uncached_kernel_alloc; + + md->ctx_idx = GMU_CONTEXT_KERNEL; + size = PAGE_ALIGN(size); + break; + case GMU_NONCACHED_USER: + /* Set start address for first uncached kernel alloc */ + if (next_uncached_user_alloc == 0) + next_uncached_user_alloc = gmu->vma[mem_type].start; + + if (addr == 0) + addr = next_uncached_user_alloc; + + md->ctx_idx = GMU_CONTEXT_USER; + size = PAGE_ALIGN(size); + break; + + default: + dev_err(&gmu->pdev->dev, + "Invalid memory type (%d) requested\n", + mem_type); + clear_bit(entry_idx, &gmu->kmem_bitmap); + return ERR_PTR(-EINVAL); + } + + md->size = size; + md->mem_type = mem_type; + md->gmuaddr = addr; + + ret = a6xx_gmu_alloc_and_map(gmu, md, attrs); + if (ret) { + clear_bit(entry_idx, &gmu->kmem_bitmap); + return ERR_PTR(ret); + } + + if (mem_type == GMU_NONCACHED_KERNEL) + next_uncached_kernel_alloc = PAGE_ALIGN(md->gmuaddr + md->size); + if (mem_type == GMU_NONCACHED_USER) + next_uncached_user_alloc = PAGE_ALIGN(md->gmuaddr + md->size); + + return md; +} + + +static int a6xx_gmu_cache_finalize(struct adreno_device *adreno_dev, + struct gmu_device *gmu) +{ + struct gmu_memdesc *md; + + /* Preallocations were made so no need to request all this memory */ + if (gmu->preallocations) + return 0; + + md = a6xx_gmu_kmem_allocate(gmu, GMU_ICACHE, + gmu->vma[GMU_ICACHE].start, gmu->vma[GMU_ICACHE].size, + (IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV)); + if (IS_ERR(md)) + return PTR_ERR(md); + + if (!adreno_is_a650_family(adreno_dev)) { + md = a6xx_gmu_kmem_allocate(gmu, GMU_DCACHE, + gmu->vma[GMU_DCACHE].start, + gmu->vma[GMU_DCACHE].size, + (IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV)); + if (IS_ERR(md)) + return PTR_ERR(md); + } + + md = a6xx_gmu_kmem_allocate(gmu, GMU_NONCACHED_KERNEL, + 0, SZ_4K, (IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV)); + if (IS_ERR(md)) + return PTR_ERR(md); + + if (ADRENO_FEATURE(adreno_dev, ADRENO_ECP)) { + /* Allocation to account for future MEM_ALLOC buffers */ + md = a6xx_gmu_kmem_allocate(gmu, GMU_NONCACHED_KERNEL, + 0, SZ_32K, + (IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV)); + if (IS_ERR(md)) + return PTR_ERR(md); + } + + gmu->preallocations = true; + + return 0; +} + +static enum gmu_mem_type a6xx_gmu_get_blk_memtype(struct gmu_device *gmu, + struct gmu_block_header *blk) +{ + int i; + + for (i = 0; i < GMU_MEM_TYPE_MAX; i++) { + if (blk->addr >= gmu->vma[i].start && + blk->addr + blk->value <= + gmu->vma[i].start + gmu->vma[i].size) + return (enum gmu_mem_type)i; + } + + return GMU_MEM_TYPE_MAX; +} + +static int a6xx_gmu_prealloc_req(struct gmu_device *gmu, + struct gmu_block_header *blk) +{ + enum gmu_mem_type type; + struct gmu_memdesc *md; + + /* Check to see if this memdesc is already around */ + md = a6xx_gmu_get_memdesc(gmu, blk->addr, blk->value); + if (md) + return 0; + + type = a6xx_gmu_get_blk_memtype(gmu, blk); + if (type >= GMU_MEM_TYPE_MAX) + return -EINVAL; + + md = a6xx_gmu_kmem_allocate(gmu, type, blk->addr, blk->value, + (IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV)); + if (IS_ERR(md)) + return PTR_ERR(md); + + gmu->preallocations = true; + + return 0; +} + + /* * a6xx_gmu_load_firmware() - Load the ucode into the GPMU RAM & PDC/RSC * @device: Pointer to KGSL device @@ -1232,14 +1519,61 @@ static int a6xx_gmu_load_firmware(struct kgsl_device *device) if (blk->type == GMU_BLK_TYPE_PREALLOC_REQ || blk->type == GMU_BLK_TYPE_PREALLOC_PERSIST_REQ) - ret = gmu_prealloc_req(device, blk); + ret = a6xx_gmu_prealloc_req(gmu, blk); if (ret) return ret; } /* Request any other cache ranges that might be required */ - return gmu_cache_finalize(device); + return a6xx_gmu_cache_finalize(adreno_dev, gmu); +} + +static int a6xx_gmu_memory_probe(struct adreno_device *adreno_dev, + struct gmu_device *gmu) +{ + /* Allocates & maps memory for HFI */ + if (IS_ERR_OR_NULL(gmu->hfi_mem)) + gmu->hfi_mem = a6xx_gmu_kmem_allocate(gmu, + GMU_NONCACHED_KERNEL, 0, + HFIMEM_SIZE, (IOMMU_READ | IOMMU_WRITE)); + if (IS_ERR(gmu->hfi_mem)) + return PTR_ERR(gmu->hfi_mem); + + /* Allocates & maps GMU crash dump memory */ + if (adreno_is_a630(adreno_dev) || adreno_is_a615_family(adreno_dev)) { + if (IS_ERR_OR_NULL(gmu->dump_mem)) + gmu->dump_mem = a6xx_gmu_kmem_allocate(gmu, + GMU_NONCACHED_KERNEL, 0, SZ_16K, + (IOMMU_READ | IOMMU_WRITE)); + if (IS_ERR(gmu->dump_mem)) + return PTR_ERR(gmu->dump_mem); + } + + /* GMU master log */ + if (IS_ERR_OR_NULL(gmu->gmu_log)) + gmu->gmu_log = a6xx_gmu_kmem_allocate(gmu, + GMU_NONCACHED_KERNEL, 0, + SZ_4K, (IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV)); + return PTR_ERR_OR_ZERO(gmu->gmu_log); +} + +static int a6xx_gmu_init(struct kgsl_device *device) +{ + struct gmu_device *gmu = KGSL_GMU_DEVICE(device); + int ret; + + ret = a6xx_gmu_load_firmware(device); + if (ret) + return ret; + + ret = a6xx_gmu_memory_probe(ADRENO_DEVICE(device), gmu); + if (ret) + return ret; + + hfi_init(gmu); + + return 0; } #define A6XX_VBIF_XIN_HALT_CTRL1_ACKS (BIT(0) | BIT(1) | BIT(2) | BIT(3)) @@ -1278,7 +1612,7 @@ static void do_gbif_halt(struct kgsl_device *device, u32 reg, u32 ack_reg, dev_err(device->dev, "%s GBIF halt timed out\n", client); } -static int a6xx_gmu_suspend(struct kgsl_device *device) +static int a6xx_gmu_pwrctrl_suspend(struct kgsl_device *device) { int ret = 0; struct gmu_device *gmu = KGSL_GMU_DEVICE(device); @@ -1410,7 +1744,7 @@ out: } /* - * a6xx_rpmh_gpu_pwrctrl() - GPU power control via RPMh/GMU interface + * a6xx_gmu_rpmh_gpu_pwrctrl() - GPU power control via RPMh/GMU interface * @adreno_dev: Pointer to adreno device * @mode: requested power mode * @arg1: first argument for mode control @@ -1428,7 +1762,7 @@ static int a6xx_gmu_rpmh_gpu_pwrctrl(struct kgsl_device *device, ret = a6xx_gmu_fw_start(device, arg1); break; case GMU_SUSPEND: - ret = a6xx_gmu_suspend(device); + ret = a6xx_gmu_pwrctrl_suspend(device); break; case GMU_FW_STOP: if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)) @@ -1451,6 +1785,113 @@ static int a6xx_gmu_rpmh_gpu_pwrctrl(struct kgsl_device *device, return ret; } +static int a6xx_gmu_suspend(struct kgsl_device *device) +{ + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + struct gmu_device *gmu = KGSL_GMU_DEVICE(device); + + if (!test_bit(GMU_CLK_ON, &device->gmu_core.flags)) + return 0; + + /* Pending message in all queues are abandoned */ + a6xx_gmu_irq_disable(device); + hfi_stop(gmu); + + if (a6xx_gmu_rpmh_gpu_pwrctrl(device, GMU_SUSPEND, 0, 0)) + return -EINVAL; + + clk_bulk_disable_unprepare(gmu->num_clks, gmu->clks); + clear_bit(GMU_CLK_ON, &device->gmu_core.flags); + + if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_CX_GDSC)) + regulator_set_mode(gmu->cx_gdsc, REGULATOR_MODE_IDLE); + + if (!kgsl_regulator_disable_wait(gmu->cx_gdsc, 5000)) + dev_err(&gmu->pdev->dev, "GMU CX gdsc off timeout\n"); + + if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_CX_GDSC)) + regulator_set_mode(gmu->cx_gdsc, REGULATOR_MODE_NORMAL); + + dev_err(&gmu->pdev->dev, "Suspended GMU\n"); + + clear_bit(GMU_FAULT, &device->gmu_core.flags); + + return 0; +} + +static void a6xx_gmu_snapshot(struct kgsl_device *device); + +static int a6xx_gmu_dcvs_set(struct kgsl_device *device, + int gpu_pwrlevel, int bus_level) +{ + int ret = 0; + struct kgsl_pwrctrl *pwr = &device->pwrctrl; + struct gmu_device *gmu = KGSL_GMU_DEVICE(device); + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + struct hfi_gx_bw_perf_vote_cmd req = { + .ack_type = DCVS_ACK_BLOCK, + .freq = INVALID_DCVS_IDX, + .bw = INVALID_DCVS_IDX, + }; + + /* If GMU has not been started, save it */ + if (!test_bit(GMU_HFI_ON, &device->gmu_core.flags)) { + /* store clock change request */ + set_bit(GMU_DCVS_REPLAY, &device->gmu_core.flags); + return 0; + } + + /* Do not set to XO and lower GPU clock vote from GMU */ + if ((gpu_pwrlevel != INVALID_DCVS_IDX) && + (gpu_pwrlevel >= gmu->num_gpupwrlevels - 1)) + return -EINVAL; + + if (gpu_pwrlevel < gmu->num_gpupwrlevels - 1) + req.freq = gmu->num_gpupwrlevels - gpu_pwrlevel - 1; + + if (bus_level < pwr->ddr_table_count && bus_level > 0) + req.bw = bus_level; + + /* GMU will vote for slumber levels through the sleep sequence */ + if ((req.freq == INVALID_DCVS_IDX) && + (req.bw == INVALID_DCVS_IDX)) { + clear_bit(GMU_DCVS_REPLAY, &device->gmu_core.flags); + return 0; + } + + if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)) + ret = a6xx_gmu_rpmh_gpu_pwrctrl(device, + GMU_DCVS_NOHFI, req.freq, req.bw); + else if (test_bit(GMU_HFI_ON, &device->gmu_core.flags)) + ret = hfi_send_req(gmu, H2F_MSG_GX_BW_PERF_VOTE, &req); + + if (ret) { + dev_err_ratelimited(&gmu->pdev->dev, + "Failed to set GPU perf idx %d, bw idx %d\n", + req.freq, req.bw); + + /* + * We can be here in two situations. First, we send a dcvs + * hfi so gmu knows at what level it must bring up the gpu. + * If that fails, it is already being handled as part of + * gmu boot failures. The other reason why we are here is + * because we are trying to scale an active gpu. For this, + * we need to do inline snapshot and dispatcher based + * recovery. + */ + if (test_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv)) { + a6xx_gmu_snapshot(device); + adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT | + ADRENO_GMU_FAULT_SKIP_SNAPSHOT); + adreno_dispatcher_schedule(device); + } + } + + /* indicate actual clock change */ + clear_bit(GMU_DCVS_REPLAY, &device->gmu_core.flags); + return ret; +} + static void a6xx_gmu_enable_lm(struct kgsl_device *device) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); @@ -1572,6 +2013,7 @@ struct a6xx_tcm_data { static size_t a6xx_snapshot_gmu_tcm(struct kgsl_device *device, u8 *buf, size_t remain, void *priv) { + struct gmu_device *gmu = KGSL_GMU_DEVICE(device); struct kgsl_snapshot_gmu_mem *mem_hdr = (struct kgsl_snapshot_gmu_mem *)buf; unsigned int *data = (unsigned int *)(buf + sizeof(*mem_hdr)); @@ -1587,8 +2029,7 @@ static size_t a6xx_snapshot_gmu_tcm(struct kgsl_device *device, mem_hdr->type = SNAPSHOT_GMU_MEM_BIN_BLOCK; mem_hdr->hostaddr = 0; - mem_hdr->gmuaddr = gmu_get_memtype_base(KGSL_GMU_DEVICE(device), - tcm->type); + mem_hdr->gmuaddr = gmu->vma[tcm->type].start; mem_hdr->gpuaddr = 0; for (i = tcm->start; i <= tcm->last; i++) @@ -1701,14 +2142,14 @@ static void a6xx_gmu_snapshot_versions(struct kgsl_device *device, } /* - * a6xx_gmu_snapshot() - A6XX GMU snapshot function + * a6xx_gmu_device_snapshot() - A6XX GMU snapshot function * @device: Device being snapshotted * @snapshot: Pointer to the snapshot instance * * This is where all of the A6XX GMU specific bits and pieces are grabbed * into the snapshot memory */ -static void a6xx_gmu_snapshot(struct kgsl_device *device, +static void a6xx_gmu_device_snapshot(struct kgsl_device *device, struct kgsl_snapshot *snapshot) { unsigned int val; @@ -1817,7 +2258,370 @@ static u64 a6xx_gmu_read_alwayson(struct kgsl_device *device) return a6xx_read_alwayson(ADRENO_DEVICE(device)); } -struct gmu_dev_ops adreno_a6xx_gmudev = { +static irqreturn_t a6xx_gmu_irq_handler(int irq, void *data) +{ + struct kgsl_device *device = data; + struct gmu_device *gmu = KGSL_GMU_DEVICE(device); + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + unsigned int mask, status = 0; + + gmu_core_regread(device, A6XX_GMU_AO_HOST_INTERRUPT_STATUS, &status); + gmu_core_regwrite(device, A6XX_GMU_AO_HOST_INTERRUPT_CLR, status); + + /* Ignore GMU_INT_RSCC_COMP and GMU_INT_DBD WAKEUP interrupts */ + if (status & GMU_INT_WDOG_BITE) { + /* Temporarily mask the watchdog interrupt to prevent a storm */ + gmu_core_regread(device, A6XX_GMU_AO_HOST_INTERRUPT_MASK, + &mask); + gmu_core_regwrite(device, A6XX_GMU_AO_HOST_INTERRUPT_MASK, + (mask | GMU_INT_WDOG_BITE)); + + /* make sure we're reading the latest cm3_fault */ + smp_rmb(); + + /* + * We should not send NMI if there was a CM3 fault reported + * because we don't want to overwrite the critical CM3 state + * captured by gmu before it sent the CM3 fault interrupt. + */ + if (!atomic_read(&gmu->cm3_fault)) + adreno_gmu_send_nmi(adreno_dev); + + /* + * There is sufficient delay for the GMU to have finished + * handling the NMI before snapshot is taken, as the fault + * worker is scheduled below. + */ + + dev_err_ratelimited(&gmu->pdev->dev, + "GMU watchdog expired interrupt received\n"); + } + if (status & GMU_INT_HOST_AHB_BUS_ERR) + dev_err_ratelimited(&gmu->pdev->dev, + "AHB bus error interrupt received\n"); + if (status & GMU_INT_FENCE_ERR) { + unsigned int fence_status; + + gmu_core_regread(device, A6XX_GMU_AHB_FENCE_STATUS, + &fence_status); + dev_err_ratelimited(&gmu->pdev->dev, + "FENCE error interrupt received %x\n", fence_status); + } + + if (status & ~GMU_AO_INT_MASK) + dev_err_ratelimited(&gmu->pdev->dev, + "Unhandled GMU interrupts 0x%lx\n", + status & ~GMU_AO_INT_MASK); + + return IRQ_HANDLED; +} + + +static void a6xx_gmu_snapshot(struct kgsl_device *device) +{ + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + struct gmu_device *gmu = KGSL_GMU_DEVICE(device); + + /* Abstain from sending another nmi or over-writing snapshot */ + if (test_and_set_bit(GMU_FAULT, &device->gmu_core.flags)) + return; + + /* make sure we're reading the latest cm3_fault */ + smp_rmb(); + + /* + * We should not send NMI if there was a CM3 fault reported because we + * don't want to overwrite the critical CM3 state captured by gmu before + * it sent the CM3 fault interrupt. + */ + if (!atomic_read(&gmu->cm3_fault)) { + adreno_gmu_send_nmi(adreno_dev); + + /* Wait for the NMI to be handled */ + udelay(100); + } + + kgsl_device_snapshot(device, NULL, true); + + gmu_core_regwrite(device, A6XX_GMU_GMU2HOST_INTR_CLR, + 0xffffffff); + gmu_core_regwrite(device, A6XX_GMU_GMU2HOST_INTR_MASK, + HFI_IRQ_MASK); + + gmu->fault_count++; +} + +/* Caller shall ensure GPU is ready for SLUMBER */ +static void a6xx_gmu_stop(struct kgsl_device *device) +{ + struct gmu_device *gmu = KGSL_GMU_DEVICE(device); + struct kgsl_pwrctrl *pwr = &device->pwrctrl; + int ret = 0; + + if (!test_bit(GMU_CLK_ON, &device->gmu_core.flags)) + return; + + /* Force suspend if gmu is already in fault */ + if (test_bit(GMU_FAULT, &device->gmu_core.flags)) { + a6xx_gmu_suspend(device); + return; + } + + /* Wait for the lowest idle level we requested */ + if (a6xx_gmu_wait_for_lowest_idle(device)) + goto error; + + ret = a6xx_gmu_rpmh_gpu_pwrctrl(device, + GMU_NOTIFY_SLUMBER, 0, 0); + if (ret) + goto error; + + if (a6xx_gmu_wait_for_idle(device)) + goto error; + + /* Pending message in all queues are abandoned */ + a6xx_gmu_irq_disable(device); + hfi_stop(gmu); + + a6xx_gmu_rpmh_gpu_pwrctrl(device, GMU_FW_STOP, 0, 0); + + clk_bulk_disable_unprepare(gmu->num_clks, gmu->clks); + clear_bit(GMU_CLK_ON, &device->gmu_core.flags); + + /* Pool to make sure that the CX is off */ + if (!kgsl_regulator_disable_wait(gmu->cx_gdsc, 5000)) + dev_err(&gmu->pdev->dev, "GMU CX gdsc off timeout\n"); + + icc_set_bw(pwr->icc_path, 0, 0); + return; + +error: + dev_err(&gmu->pdev->dev, "Failed to stop GMU\n"); + a6xx_gmu_snapshot(device); + /* + * We failed to stop the gmu successfully. Force a suspend + * to set things up for a fresh start. + */ + a6xx_gmu_suspend(device); +} + +static int a6xx_gmu_aop_send_acd_state(struct mbox_chan *channel, bool flag) +{ + char msg_buf[33]; + struct { + u32 len; + void *msg; + } msg; + + if (IS_ERR_OR_NULL(channel)) + return 0; + + msg.len = scnprintf(msg_buf, sizeof(msg_buf), + "{class: gpu, res: acd, value: %d}", flag); + msg.msg = msg_buf; + + return mbox_send_message(channel, &msg); +} + +static int a6xx_gmu_enable_gdsc(struct kgsl_device *device) +{ + struct gmu_device *gmu = KGSL_GMU_DEVICE(device); + int ret; + + ret = regulator_enable(gmu->cx_gdsc); + if (ret) + dev_err(&gmu->pdev->dev, + "Failed to enable GMU CX gdsc, error %d\n", ret); + + return ret; +} + +static int a6xx_gmu_clk_set_rate(struct gmu_device *gmu, const char *id, + unsigned long rate) +{ + struct clk *clk; + + clk = kgsl_of_clk_by_name(gmu->clks, gmu->num_clks, id); + if (!clk) + return -ENODEV; + + return clk_set_rate(clk, rate); +} + +static int a6xx_gmu_enable_clks(struct kgsl_device *device) +{ + struct gmu_device *gmu = KGSL_GMU_DEVICE(device); + int ret; + + ret = a6xx_gmu_clk_set_rate(gmu, "gmu_clk", GMU_FREQUENCY); + if (ret) { + dev_err(&gmu->pdev->dev, "Unable to set the GMU clock\n"); + return ret; + } + + ret = a6xx_gmu_clk_set_rate(gmu, "hub_clk", 150000000); + if (ret && ret != ENODEV) { + dev_err(&gmu->pdev->dev, "Unable to set the HUB clock\n"); + return ret; + } + + ret = clk_bulk_prepare_enable(gmu->num_clks, gmu->clks); + if (ret) { + dev_err(&gmu->pdev->dev, "Cannot enable GMU clocks\n"); + return ret; + } + + set_bit(GMU_CLK_ON, &device->gmu_core.flags); + return 0; +} + + + +static int a6xx_gmu_start_from_init(struct kgsl_device *device) +{ + struct kgsl_pwrctrl *pwr = &device->pwrctrl; + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + struct gmu_device *gmu = KGSL_GMU_DEVICE(device); + int level, ret; + + if (device->state == KGSL_STATE_INIT) { + int ret = a6xx_gmu_aop_send_acd_state(gmu->mailbox.channel, + adreno_dev->acd_enabled); + if (ret) + dev_err(&gmu->pdev->dev, + "AOP mbox send message failed: %d\n", ret); + } + + WARN_ON(test_bit(GMU_CLK_ON, &device->gmu_core.flags)); + + a6xx_gmu_enable_gdsc(device); + a6xx_gmu_enable_clks(device); + a6xx_gmu_irq_enable(device); + + /* Vote for minimal DDR BW for GMU to init */ + level = pwr->pwrlevels[pwr->default_pwrlevel].bus_min; + icc_set_bw(pwr->icc_path, 0, kBps_to_icc(pwr->ddr_table[level])); + + ret = a6xx_gmu_rpmh_gpu_pwrctrl(device, GMU_FW_START, + GMU_COLD_BOOT, 0); + if (ret) + return ret; + + ret = hfi_start(device, gmu, GMU_COLD_BOOT); + if (ret) + return ret; + + /* Request default DCVS level */ + return kgsl_pwrctrl_set_default_gpu_pwrlevel(device); +} + +static int a6xx_gmu_start_from_slumber(struct kgsl_device *device) +{ + struct gmu_device *gmu = KGSL_GMU_DEVICE(device); + int ret; + + WARN_ON(test_bit(GMU_CLK_ON, &device->gmu_core.flags)); + + a6xx_gmu_enable_gdsc(device); + a6xx_gmu_enable_clks(device); + a6xx_gmu_irq_enable(device); + + ret = a6xx_gmu_rpmh_gpu_pwrctrl(device, GMU_FW_START, + GMU_COLD_BOOT, 0); + if (ret) + return ret; + + ret = hfi_start(device, gmu, GMU_COLD_BOOT); + if (ret) + return ret; + + return kgsl_pwrctrl_set_default_gpu_pwrlevel(device); +} + +static int a6xx_gmu_start_from_reset(struct kgsl_device *device) +{ + struct gmu_device *gmu = KGSL_GMU_DEVICE(device); + int ret; + + a6xx_gmu_suspend(device); + + a6xx_gmu_enable_gdsc(device); + a6xx_gmu_enable_clks(device); + a6xx_gmu_irq_enable(device); + + ret = a6xx_gmu_rpmh_gpu_pwrctrl(device, GMU_FW_START, GMU_COLD_BOOT, 0); + if (ret) + return ret; + + ret = hfi_start(device, gmu, GMU_COLD_BOOT); + if (ret) + return ret; + + /* Send DCVS level prior to reset*/ + return kgsl_pwrctrl_set_default_gpu_pwrlevel(device); +} + +/* To be called to power on both GPU and GMU */ +static int a6xx_gmu_start(struct kgsl_device *device) +{ + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + int ret = 0; + + switch (device->state) { + case KGSL_STATE_INIT: + case KGSL_STATE_SUSPEND: + ret = a6xx_gmu_start_from_init(device); + break; + + case KGSL_STATE_SLUMBER: + ret = a6xx_gmu_start_from_slumber(device); + break; + case KGSL_STATE_RESET: + ret = a6xx_gmu_start_from_reset(device); + break; + } + + if (ret) { + if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)) + a6xx_gmu_oob_clear(device, oob_boot_slumber); + + a6xx_gmu_snapshot(device); + } + + return ret; +} + +static int a6xx_gmu_acd_set(struct kgsl_device *device, bool val) +{ + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + struct gmu_device *gmu = KGSL_GMU_DEVICE(device); + int ret; + + if (IS_ERR_OR_NULL(gmu->mailbox.channel)) + return -EINVAL; + + /* Don't do any unneeded work if ACD is already in the correct state */ + if (adreno_dev->acd_enabled == val) + return 0; + + mutex_lock(&device->mutex); + + /* Power down the GPU before enabling or disabling ACD */ + kgsl_pwrctrl_change_state(device, KGSL_STATE_SUSPEND); + + adreno_dev->acd_enabled = val; + ret = a6xx_gmu_aop_send_acd_state(gmu->mailbox.channel, val); + if (ret) + dev_err(&gmu->pdev->dev, + "AOP mbox send message failed: %d\n", ret); + + kgsl_pwrctrl_change_state(device, KGSL_STATE_SLUMBER); + + mutex_unlock(&device->mutex); + return 0; +} + +static struct gmu_dev_ops a6xx_gmudev = { .load_firmware = a6xx_gmu_load_firmware, .oob_set = a6xx_gmu_oob_set, .oob_clear = a6xx_gmu_oob_clear, @@ -1831,7 +2635,7 @@ struct gmu_dev_ops adreno_a6xx_gmudev = { .wait_for_gmu_idle = a6xx_gmu_wait_for_idle, .ifpc_store = a6xx_gmu_ifpc_store, .ifpc_show = a6xx_gmu_ifpc_show, - .snapshot = a6xx_gmu_snapshot, + .snapshot = a6xx_gmu_device_snapshot, .cooperative_reset = a6xx_gmu_cooperative_reset, .wait_for_active_transition = a6xx_gmu_wait_for_active_transition, .read_alwayson = a6xx_gmu_read_alwayson, @@ -1839,3 +2643,928 @@ struct gmu_dev_ops adreno_a6xx_gmudev = { .gmu_ao_intr_mask = GMU_AO_INT_MASK, .scales_bandwidth = a6xx_gmu_scales_bandwidth, }; + +static struct gmu_core_ops a6xx_gmu_ops = { + .init = a6xx_gmu_init, + .start = a6xx_gmu_start, + .stop = a6xx_gmu_stop, + .dcvs_set = a6xx_gmu_dcvs_set, + .snapshot = a6xx_gmu_snapshot, + .suspend = a6xx_gmu_suspend, + .acd_set = a6xx_gmu_acd_set, +}; + +static int a6xx_gmu_bus_set(struct kgsl_device *device, int buslevel, + u32 ab) +{ + struct kgsl_pwrctrl *pwr = &device->pwrctrl; + int ret = 0; + + if (buslevel != pwr->cur_buslevel) { + ret = a6xx_gmu_dcvs_set(device, INVALID_DCVS_IDX, buslevel); + if (ret) + return ret; + + pwr->cur_buslevel = buslevel; + + trace_kgsl_buslevel(device, pwr->active_pwrlevel, buslevel); + } + + if (ab != pwr->cur_ab) { + icc_set_bw(pwr->icc_path, MBps_to_icc(ab), 0); + pwr->cur_ab = ab; + } + + return ret; +} + +static void a6xx_gmu_iommu_cb_close(struct gmu_iommu_context *ctx); + +static void a6xx_gmu_memory_close(struct gmu_device *gmu) +{ + int i; + struct gmu_memdesc *md; + struct gmu_iommu_context *ctx; + + gmu->hfi_mem = NULL; + gmu->dump_mem = NULL; + gmu->gmu_log = NULL; + gmu->preallocations = false; + + /* Unmap and free all memories in GMU kernel memory pool */ + for (i = 0; i < GMU_KERNEL_ENTRIES; i++) { + if (!test_bit(i, &gmu->kmem_bitmap)) + continue; + + md = &gmu->kmem_entries[i]; + ctx = &a6xx_gmu_ctx[md->ctx_idx]; + + if (md->gmuaddr && md->mem_type != GMU_ITCM && + md->mem_type != GMU_DTCM) + iommu_unmap(ctx->domain, md->gmuaddr, md->size); + + a6xx_gmu_mem_free(gmu, md); + + clear_bit(i, &gmu->kmem_bitmap); + } + + a6xx_gmu_iommu_cb_close(&a6xx_gmu_ctx[GMU_CONTEXT_KERNEL]); + a6xx_gmu_iommu_cb_close(&a6xx_gmu_ctx[GMU_CONTEXT_USER]); +} + +static int a6xx_gmu_aop_mailbox_init(struct adreno_device *adreno_dev, + struct gmu_device *gmu) +{ + struct kgsl_mailbox *mailbox = &gmu->mailbox; + + mailbox->client.dev = &gmu->pdev->dev; + mailbox->client.tx_block = true; + mailbox->client.tx_tout = 1000; + mailbox->client.knows_txdone = false; + + mailbox->channel = mbox_request_channel(&mailbox->client, 0); + if (IS_ERR(mailbox->channel)) + return PTR_ERR(mailbox->channel); + + adreno_dev->acd_enabled = true; + return 0; +} + +static void a6xx_gmu_acd_probe(struct kgsl_device *device, + struct gmu_device *gmu, struct device_node *node) +{ + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + struct kgsl_pwrctrl *pwr = &device->pwrctrl; + struct hfi_acd_table_cmd *cmd = &gmu->hfi.acd_tbl_cmd; + u32 acd_level, cmd_idx, numlvl = pwr->num_pwrlevels; + int ret, i; + + if (!ADRENO_FEATURE(adreno_dev, ADRENO_ACD)) + return; + + cmd->hdr = 0xFFFFFFFF; + cmd->version = HFI_ACD_INIT_VERSION; + cmd->stride = 1; + cmd->enable_by_level = 0; + + for (i = 0, cmd_idx = 0; i < numlvl; i++) { + acd_level = pwr->pwrlevels[numlvl - i].acd_level; + if (acd_level) { + cmd->enable_by_level |= (1 << i); + cmd->data[cmd_idx++] = acd_level; + } + } + + if (!cmd->enable_by_level) + return; + + cmd->num_levels = cmd_idx; + + ret = a6xx_gmu_aop_mailbox_init(adreno_dev, gmu); + if (ret) + dev_err(&gmu->pdev->dev, + "AOP mailbox init failed: %d\n", ret); +} + +struct rpmh_arc_vals { + unsigned int num; + const u16 *val; +}; + +enum rpmh_vote_type { + GPU_ARC_VOTE = 0, + GMU_ARC_VOTE, + INVALID_ARC_VOTE, +}; + +/* + * rpmh_arc_cmds() - query RPMh command database for GX/CX/MX rail + * VLVL tables. The index of table will be used by GMU to vote rail + * voltage. + * + * @gmu: Pointer to GMU device + * @arc: Pointer to RPMh rail controller (ARC) voltage table + * @res_id: Pointer to 8 char array that contains rail name + */ +static int rpmh_arc_cmds(struct gmu_device *gmu, + struct rpmh_arc_vals *arc, const char *res_id) +{ + size_t len = 0; + + arc->val = cmd_db_read_aux_data(res_id, &len); + + /* + * cmd_db_read_aux_data() gives us a zero-padded table of + * size len that contains the arc values. To determine the + * number of arc values, we loop through the table and count + * them until we get to the end of the buffer or hit the + * zero padding. + */ + for (arc->num = 1; arc->num < (len >> 1); arc->num++) { + if (arc->val[arc->num - 1] != 0 && arc->val[arc->num] == 0) + break; + } + + return 0; +} + +/* + * setup_volt_dependency_tbl() - set up GX->MX or CX->MX rail voltage + * dependencies. Second rail voltage shall be equal to or higher than + * primary rail voltage. VLVL table index was used by RPMh for PMIC + * voltage setting. + * @votes: Pointer to a ARC vote descriptor + * @pri_rail: Pointer to primary power rail VLVL table + * @sec_rail: Pointer to second/dependent power rail VLVL table + * @vlvl: Pointer to VLVL table being used by GPU or GMU driver, a subset + * of pri_rail VLVL table + * @num_entries: Valid number of entries in table pointed by "vlvl" parameter + */ +static int setup_volt_dependency_tbl(uint32_t *votes, + struct rpmh_arc_vals *pri_rail, struct rpmh_arc_vals *sec_rail, + u16 *vlvl, unsigned int num_entries) +{ + int i, j, k; + uint16_t cur_vlvl; + bool found_match; + + /* i tracks current KGSL GPU frequency table entry + * j tracks secondary rail voltage table entry + * k tracks primary rail voltage table entry + */ + for (i = 0; i < num_entries; i++) { + found_match = false; + + /* Look for a primary rail voltage that matches a VLVL level */ + for (k = 0; k < pri_rail->num; k++) { + if (pri_rail->val[k] >= vlvl[i]) { + cur_vlvl = pri_rail->val[k]; + found_match = true; + break; + } + } + + /* If we did not find a matching VLVL level then abort */ + if (!found_match) + return -EINVAL; + + /* + * Look for a secondary rail index whose VLVL value + * is greater than or equal to the VLVL value of the + * corresponding index of the primary rail + */ + for (j = 0; j < sec_rail->num; j++) { + if (sec_rail->val[j] >= cur_vlvl || + j + 1 == sec_rail->num) + break; + } + + if (j == sec_rail->num) + j = 0; + + votes[i] = ARC_VOTE_SET(k, j, cur_vlvl); + } + + return 0; +} + +static int rpmh_gmu_arc_votes_init(struct gmu_device *gmu, + struct rpmh_arc_vals *pri_rail, struct rpmh_arc_vals *sec_rail) +{ + /* Hardcoded values of GMU CX voltage levels */ + u16 gmu_cx_vlvl[] = { 0, RPMH_REGULATOR_LEVEL_MIN_SVS }; + + return setup_volt_dependency_tbl(gmu->rpmh_votes.cx_votes, pri_rail, + sec_rail, gmu_cx_vlvl, 2); +} + +/* + * rpmh_arc_votes_init() - initialized GX RPMh votes needed for rails + * voltage scaling by GMU. + * @device: Pointer to KGSL device + * @gmu: Pointer to GMU device + * @pri_rail: Pointer to primary power rail VLVL table + * @sec_rail: Pointer to second/dependent power rail VLVL table + * of pri_rail VLVL table + * @type: the type of the primary rail, GPU or GMU + */ +static int rpmh_arc_votes_init(struct kgsl_device *device, + struct gmu_device *gmu, struct rpmh_arc_vals *pri_rail, + struct rpmh_arc_vals *sec_rail, unsigned int type) +{ + unsigned int num_freqs; + u16 vlvl_tbl[MAX_GX_LEVELS]; + int i; + + if (type == GMU_ARC_VOTE) + return rpmh_gmu_arc_votes_init(gmu, pri_rail, sec_rail); + + num_freqs = gmu->num_gpupwrlevels; + + if (num_freqs > pri_rail->num || num_freqs > ARRAY_SIZE(vlvl_tbl)) { + dev_err(&gmu->pdev->dev, + "Defined more GPU DCVS levels than RPMh can support\n"); + return -EINVAL; + } + + memset(vlvl_tbl, 0, sizeof(vlvl_tbl)); + for (i = 0; i < num_freqs; i++) + vlvl_tbl[i] = gmu->pwrlevels[i].level; + + return setup_volt_dependency_tbl(gmu->rpmh_votes.gx_votes, pri_rail, + sec_rail, vlvl_tbl, num_freqs); +} + +struct bcm { + const char *name; + u32 buswidth; + u32 channels; + u32 unit; + u16 width; + u8 vcd; + bool fixed; +}; + +/* + * List of Bus Control Modules (BCMs) that need to be configured for the GPU + * to access DDR. For each bus level we will generate a vote each BC + */ +static struct bcm a660_ddr_bcms[] = { + { .name = "SH0", .buswidth = 16 }, + { .name = "MC0", .buswidth = 4 }, + { .name = "ACV", .fixed = true }, +}; + +/* Same as above, but for the CNOC BCMs */ +static struct bcm a660_cnoc_bcms[] = { + { .name = "CN0", .buswidth = 4 }, +}; + +/* Generate a set of bandwidth votes for the list of BCMs */ +static void tcs_cmd_data(struct bcm *bcms, int count, u32 ab, u32 ib, + u32 *data) +{ + int i; + + for (i = 0; i < count; i++) { + bool valid = true; + bool commit = false; + u64 avg, peak, x, y; + + if (i == count - 1 || bcms[i].vcd != bcms[i + 1].vcd) + commit = true; + + /* + * On a660, the "ACV" y vote should be 0x08 if there is a valid + * vote and 0x00 if not. This is kind of hacky and a660 specific + * but we can clean it up when we add a new target + */ + if (bcms[i].fixed) { + if (!ab && !ib) + data[i] = BCM_TCS_CMD(commit, false, 0x0, 0x0); + else + data[i] = BCM_TCS_CMD(commit, true, 0x0, 0x8); + continue; + } + + /* Multiple the bandwidth by the width of the connection */ + avg = ((u64) ab) * bcms[i].width; + + /* And then divide by the total width across channels */ + do_div(avg, bcms[i].buswidth * bcms[i].channels); + + peak = ((u64) ib) * bcms[i].width; + do_div(peak, bcms[i].buswidth); + + /* Input bandwidth value is in KBps */ + x = avg * 1000ULL; + do_div(x, bcms[i].unit); + + /* Input bandwidth value is in KBps */ + y = peak * 1000ULL; + do_div(y, bcms[i].unit); + + /* + * If a bandwidth value was specified but the calculation ends + * rounding down to zero, set a minimum level + */ + if (ab && x == 0) + x = 1; + + if (ib && y == 0) + y = 1; + + x = min_t(u64, x, BCM_TCS_CMD_VOTE_MASK); + y = min_t(u64, y, BCM_TCS_CMD_VOTE_MASK); + + if (!x && !y) + valid = false; + + data[i] = BCM_TCS_CMD(commit, valid, x, y); + } +} + +struct bcm_data { + __le32 unit; + __le16 width; + u8 vcd; + u8 reserved; +}; + +struct rpmh_bw_votes { + u32 wait_bitmask; + u32 num_cmds; + u32 *addrs; + u32 num_levels; + u32 **cmds; +}; + +static void free_rpmh_bw_votes(struct rpmh_bw_votes *votes) +{ + int i; + + if (!votes) + return; + + for (i = 0; votes->cmds && i < votes->num_levels; i++) + kfree(votes->cmds[i]); + + kfree(votes->cmds); + kfree(votes->addrs); + kfree(votes); +} + +/* Build the votes table from the specified bandwidth levels */ +static struct rpmh_bw_votes *build_rpmh_bw_votes(struct bcm *bcms, + int bcm_count, u32 *levels, int levels_count) +{ + struct rpmh_bw_votes *votes; + int i; + + votes = kzalloc(sizeof(*votes), GFP_KERNEL); + if (!votes) + return ERR_PTR(-ENOMEM); + + votes->addrs = kcalloc(bcm_count, sizeof(*votes->cmds), GFP_KERNEL); + if (!votes->addrs) { + free_rpmh_bw_votes(votes); + return ERR_PTR(-ENOMEM); + } + + votes->cmds = kcalloc(levels_count, sizeof(*votes->cmds), GFP_KERNEL); + if (!votes->cmds) { + free_rpmh_bw_votes(votes); + return ERR_PTR(-ENOMEM); + } + + votes->num_cmds = bcm_count; + votes->num_levels = levels_count; + + /* Get the cmd-db information for each BCM */ + for (i = 0; i < bcm_count; i++) { + size_t l; + const struct bcm_data *data; + + data = cmd_db_read_aux_data(bcms[i].name, &l); + + votes->addrs[i] = cmd_db_read_addr(bcms[i].name); + + bcms[i].unit = le32_to_cpu(data->unit); + bcms[i].width = le16_to_cpu(data->width); + bcms[i].vcd = data->vcd; + } + + for (i = 0; i < bcm_count; i++) { + if (i == (bcm_count - 1) || bcms[i].vcd != bcms[i + 1].vcd) + votes->wait_bitmask |= (1 << i); + } + + for (i = 0; i < levels_count; i++) { + votes->cmds[i] = kcalloc(bcm_count, sizeof(u32), GFP_KERNEL); + if (!votes->cmds[i]) { + free_rpmh_bw_votes(votes); + return ERR_PTR(-ENOMEM); + } + + tcs_cmd_data(bcms, bcm_count, 0, levels[i], votes->cmds[i]); + } + + return votes; +} + +static void build_bwtable_cmd_cache(struct hfi_bwtable_cmd *cmd, + struct rpmh_bw_votes *ddr, struct rpmh_bw_votes *cnoc) +{ + unsigned int i, j; + + cmd->hdr = 0xFFFFFFFF; + cmd->bw_level_num = ddr->num_levels; + cmd->ddr_cmds_num = ddr->num_cmds; + cmd->ddr_wait_bitmask = ddr->wait_bitmask; + + for (i = 0; i < ddr->num_cmds; i++) + cmd->ddr_cmd_addrs[i] = ddr->addrs[i]; + + for (i = 0; i < ddr->num_levels; i++) + for (j = 0; j < ddr->num_cmds; j++) + cmd->ddr_cmd_data[i][j] = (u32) ddr->cmds[i][j]; + + if (!cnoc) + return; + + cmd->cnoc_cmds_num = cnoc->num_cmds; + cmd->cnoc_wait_bitmask = cnoc->wait_bitmask; + + for (i = 0; i < cnoc->num_cmds; i++) + cmd->cnoc_cmd_addrs[i] = cnoc->addrs[i]; + + for (i = 0; i < cnoc->num_levels; i++) + for (j = 0; j < cnoc->num_cmds; j++) + cmd->cnoc_cmd_data[i][j] = (u32) cnoc->cmds[i][j]; +} + +static int a6xx_gmu_bus_vote_init(struct kgsl_device *device) +{ + struct gmu_device *gmu = KGSL_GMU_DEVICE(device); + struct kgsl_pwrctrl *pwr = &device->pwrctrl; + struct rpmh_bw_votes *ddr, *cnoc = NULL; + u32 *cnoc_table; + u32 count; + + /* Build the DDR votes */ + ddr = build_rpmh_bw_votes(a660_ddr_bcms, ARRAY_SIZE(a660_ddr_bcms), + pwr->ddr_table, pwr->ddr_table_count); + if (IS_ERR(ddr)) + return PTR_ERR(ddr); + + /* Get the CNOC table */ + cnoc_table = kgsl_bus_get_table(device->pdev, "qcom,bus-table-cnoc", + &count); + + /* And build the votes for that, if it exists */ + if (count > 0) + cnoc = build_rpmh_bw_votes(a660_cnoc_bcms, + ARRAY_SIZE(a660_cnoc_bcms), cnoc_table, count); + kfree(cnoc_table); + + if (IS_ERR(cnoc)) { + free_rpmh_bw_votes(ddr); + return PTR_ERR(cnoc); + } + + /* Build the HFI command once */ + build_bwtable_cmd_cache(&gmu->hfi.bwtbl_cmd, ddr, cnoc); + + free_rpmh_bw_votes(ddr); + free_rpmh_bw_votes(cnoc); + + return 0; +} + +static int a6xx_gmu_rpmh_init(struct kgsl_device *device, + struct gmu_device *gmu) +{ + struct rpmh_arc_vals gfx_arc, cx_arc, mx_arc; + int ret; + + /* Initialize BW tables */ + ret = a6xx_gmu_bus_vote_init(device); + if (ret) + return ret; + + /* Populate GPU and GMU frequency vote table */ + ret = rpmh_arc_cmds(gmu, &gfx_arc, "gfx.lvl"); + if (ret) + return ret; + + ret = rpmh_arc_cmds(gmu, &cx_arc, "cx.lvl"); + if (ret) + return ret; + + ret = rpmh_arc_cmds(gmu, &mx_arc, "mx.lvl"); + if (ret) + return ret; + + ret = rpmh_arc_votes_init(device, gmu, &gfx_arc, &mx_arc, GPU_ARC_VOTE); + if (ret) + return ret; + + return rpmh_arc_votes_init(device, gmu, &cx_arc, &mx_arc, GMU_ARC_VOTE); +} + + +static int a6xx_gmu_reg_probe(struct kgsl_device *device) +{ + struct gmu_device *gmu = KGSL_GMU_DEVICE(device); + struct resource *res; + + res = platform_get_resource_byname(gmu->pdev, IORESOURCE_MEM, + "kgsl_gmu_reg"); + if (!res) { + dev_err(&gmu->pdev->dev, "The GMU register region isn't defined\n"); + return -ENODEV; + } + + device->gmu_core.gmu2gpu_offset = (res->start - device->reg_phys) >> 2; + device->gmu_core.reg_len = resource_size(res); + + /* + * We can't use devm_ioremap_resource here because we purposely double + * map the gpu_cc registers for debugging purposes + */ + device->gmu_core.reg_virt = devm_ioremap(&gmu->pdev->dev, res->start, + resource_size(res)); + + if (!device->gmu_core.reg_virt) { + dev_err(&gmu->pdev->dev, "Unable to map the GMU registers\n"); + return -ENOMEM; + } + + return 0; +} + +static int a6xx_gmu_tcm_init(struct gmu_device *gmu) +{ + struct gmu_memdesc *md; + + /* Reserve a memdesc for ITCM. No actually memory allocated */ + md = a6xx_gmu_kmem_allocate(gmu, GMU_ITCM, gmu->vma[GMU_ITCM].start, + gmu->vma[GMU_ITCM].size, 0); + if (IS_ERR(md)) + return PTR_ERR(md); + + /* Reserve a memdesc for DTCM. No actually memory allocated */ + md = a6xx_gmu_kmem_allocate(gmu, GMU_DTCM, gmu->vma[GMU_DTCM].start, + gmu->vma[GMU_DTCM].size, 0); + + return PTR_ERR_OR_ZERO(md); +} + + +static int a6xx_gmu_iommu_fault_handler(struct iommu_domain *domain, + struct device *dev, unsigned long addr, int flags, void *token, + const char *name) +{ + char *fault_type = "unknown"; + + if (flags & IOMMU_FAULT_TRANSLATION) + fault_type = "translation"; + else if (flags & IOMMU_FAULT_PERMISSION) + fault_type = "permission"; + else if (flags & IOMMU_FAULT_EXTERNAL) + fault_type = "external"; + else if (flags & IOMMU_FAULT_TRANSACTION_STALLED) + fault_type = "transaction stalled"; + + dev_err(dev, "GMU fault addr = %lX, context=%s (%s %s fault)\n", + addr, name, + (flags & IOMMU_FAULT_WRITE) ? "write" : "read", + fault_type); + + return 0; +} + +static int a6xx_gmu_kernel_fault_handler(struct iommu_domain *domain, + struct device *dev, unsigned long addr, int flags, void *token) +{ + return a6xx_gmu_iommu_fault_handler(domain, dev, addr, flags, token, + "gmu_kernel"); +} + +static int a6xx_gmu_user_fault_handler(struct iommu_domain *domain, + struct device *dev, unsigned long addr, int flags, void *token) +{ + return a6xx_gmu_iommu_fault_handler(domain, dev, addr, flags, token, + "gmu_user"); +} + +static int a6xx_gmu_iommu_cb_probe(struct gmu_device *gmu, + const char *name, struct gmu_iommu_context *ctx, + struct device_node *parent, iommu_fault_handler_t handler) +{ + struct device_node *node = of_get_child_by_name(parent, name); + struct platform_device *pdev; + int ret; + + if (!node) + return -ENODEV; + + pdev = of_find_device_by_node(node); + ret = of_dma_configure(&pdev->dev, node, true); + of_node_put(node); + + if (ret) { + platform_device_put(pdev); + return ret; + } + + ctx->pdev = pdev; + ctx->domain = iommu_domain_alloc(&platform_bus_type); + if (ctx->domain == NULL) { + dev_err(&gmu->pdev->dev, "gmu iommu fail to alloc %s domain\n", + ctx->name); + platform_device_put(pdev); + return -ENODEV; + } + + ret = iommu_attach_device(ctx->domain, &pdev->dev); + if (!ret) { + iommu_set_fault_handler(ctx->domain, handler, ctx); + return 0; + } + + dev_err(&gmu->pdev->dev, + "gmu iommu fail to attach %s device\n", ctx->name); + iommu_domain_free(ctx->domain); + ctx->domain = NULL; + platform_device_put(pdev); + + return ret; +} + +static void a6xx_gmu_iommu_cb_close(struct gmu_iommu_context *ctx) +{ + if (!ctx->domain) + return; + + iommu_detach_device(ctx->domain, &ctx->pdev->dev); + iommu_domain_free(ctx->domain); + + platform_device_put(ctx->pdev); + ctx->domain = NULL; +} + +static int a6xx_gmu_iommu_init(struct gmu_device *gmu, struct device_node *node) +{ + int ret; + + devm_of_platform_populate(&gmu->pdev->dev); + + ret = a6xx_gmu_iommu_cb_probe(gmu, "gmu_user", + &a6xx_gmu_ctx[GMU_CONTEXT_USER], node, + a6xx_gmu_user_fault_handler); + if (ret) + return ret; + + return a6xx_gmu_iommu_cb_probe(gmu, "gmu_kernel", + &a6xx_gmu_ctx[GMU_CONTEXT_KERNEL], node, + a6xx_gmu_kernel_fault_handler); +} + +static int a6xx_gmu_regulators_probe(struct gmu_device *gmu, + struct platform_device *pdev) +{ + gmu->cx_gdsc = devm_regulator_get(&pdev->dev, "vddcx"); + if (IS_ERR(gmu->cx_gdsc)) { + if (PTR_ERR(gmu->cx_gdsc) != -EPROBE_DEFER) + dev_err(&pdev->dev, "Couldn't get the vddcx gdsc\n"); + return PTR_ERR(gmu->cx_gdsc); + } + + gmu->gx_gdsc = devm_regulator_get(&pdev->dev, "vdd"); + if (IS_ERR(gmu->gx_gdsc)) { + if (PTR_ERR(gmu->gx_gdsc) != -EPROBE_DEFER) + dev_err(&pdev->dev, "Couldn't get the vdd gdsc\n"); + return PTR_ERR(gmu->gx_gdsc); + } + + return 0; +} + +static void a6xx_gmu_remove(struct kgsl_device *device) +{ + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + struct gmu_device *gmu = KGSL_GMU_DEVICE(device); + + tasklet_kill(&gmu->hfi.tasklet); + + a6xx_gmu_stop(device); + + if (!IS_ERR_OR_NULL(gmu->mailbox.channel)) + mbox_free_channel(gmu->mailbox.channel); + + adreno_dev->acd_enabled = false; + + if (gmu->fw_image) + release_firmware(gmu->fw_image); + + a6xx_gmu_memory_close(gmu); + + memset(&device->gmu_core, 0, sizeof(device->gmu_core)); +} + +static int a6xx_gmu_probe(struct kgsl_device *device, + struct platform_device *pdev) +{ + struct gmu_device *gmu; + struct kgsl_hfi *hfi; + struct kgsl_pwrctrl *pwr = &device->pwrctrl; + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + int i = 0, ret = -ENXIO, index; + + gmu = devm_kzalloc(&pdev->dev, sizeof(*gmu), GFP_KERNEL); + if (!gmu) + return -ENOMEM; + + gmu->pdev = pdev; + + device->gmu_core.ptr = gmu; + hfi = &gmu->hfi; + gmu->load_mode = TCM_BOOT; + + dma_set_coherent_mask(&gmu->pdev->dev, DMA_BIT_MASK(64)); + gmu->pdev->dev.dma_mask = &gmu->pdev->dev.coherent_dma_mask; + set_dma_ops(&gmu->pdev->dev, NULL); + + /* Set up GMU regulators */ + ret = a6xx_gmu_regulators_probe(gmu, pdev); + if (ret) + return ret; + + ret = devm_clk_bulk_get_all(&pdev->dev, &gmu->clks); + if (ret < 0) + return ret; + + gmu->num_clks = ret; + + /* Set up GMU IOMMU and shared memory with GMU */ + ret = a6xx_gmu_iommu_init(gmu, pdev->dev.of_node); + if (ret) + goto error; + + if (adreno_is_a650_family(adreno_dev)) + gmu->vma = a6xx_gmu_vma; + else + gmu->vma = a6xx_gmu_vma_legacy; + + ret = a6xx_gmu_tcm_init(gmu); + if (ret) + goto error; + + /* Map and reserve GMU CSRs registers */ + ret = a6xx_gmu_reg_probe(device); + if (ret) + goto error; + + /* Initialize HFI and GMU interrupts */ + ret = kgsl_request_irq(gmu->pdev, "kgsl_hfi_irq", hfi_irq_handler, + device); + if (ret < 0) + goto error; + + hfi->hfi_interrupt_num = ret; + + ret = kgsl_request_irq(gmu->pdev, "kgsl_gmu_irq", a6xx_gmu_irq_handler, + device); + if (ret < 0) + goto error; + + gmu->gmu_interrupt_num = ret; + + /* Don't enable GMU interrupts until GMU started */ + /* We cannot use irq_disable because it writes registers */ + disable_irq(gmu->gmu_interrupt_num); + disable_irq(hfi->hfi_interrupt_num); + + tasklet_init(&hfi->tasklet, hfi_receiver, (unsigned long) gmu); + hfi->kgsldev = device; + + if (WARN(pwr->num_pwrlevels + 1 > ARRAY_SIZE(gmu->pwrlevels), + "Too many GPU powerlevels for the GMU HFI\n")) { + ret = -EINVAL; + goto error; + } + + /* Add a dummy level for "off" because the GMU expects it */ + gmu->pwrlevels[0].freq = 0; + gmu->pwrlevels[0].level = 0; + + /* GMU power levels are in ascending order */ + for (index = 1, i = pwr->num_pwrlevels - 1; i >= 0; i--, index++) { + gmu->pwrlevels[index].freq = pwr->pwrlevels[i].gpu_freq; + gmu->pwrlevels[index].level = pwr->pwrlevels[i].voltage_level; + } + + gmu->num_gpupwrlevels = pwr->num_pwrlevels + 1; + + /* Populates RPMh configurations */ + ret = a6xx_gmu_rpmh_init(device, gmu); + if (ret) + goto error; + + /* Set up GMU idle states */ + if (ADRENO_FEATURE(adreno_dev, ADRENO_MIN_VOLT)) + gmu->idle_level = GPU_HW_MIN_VOLT; + else if (ADRENO_FEATURE(adreno_dev, ADRENO_HW_NAP)) + gmu->idle_level = GPU_HW_NAP; + else if (ADRENO_FEATURE(adreno_dev, ADRENO_IFPC)) + gmu->idle_level = GPU_HW_IFPC; + else if (ADRENO_FEATURE(adreno_dev, ADRENO_SPTP_PC)) + gmu->idle_level = GPU_HW_SPTP_PC; + else + gmu->idle_level = GPU_HW_ACTIVE; + + a6xx_gmu_acd_probe(device, gmu, pdev->dev.of_node); + + + if (a6xx_gmu_scales_bandwidth(device)) + pwr->bus_set = a6xx_gmu_bus_set; + + set_bit(GMU_ENABLED, &device->gmu_core.flags); + + device->gmu_core.core_ops = &a6xx_gmu_ops; + device->gmu_core.dev_ops = &a6xx_gmudev; + + return 0; + +error: + a6xx_gmu_remove(device); + return ret; +} + + + +static int a6xx_gmu_bind(struct device *dev, struct device *master, void *data) +{ + struct kgsl_device *device = dev_get_drvdata(master); + + return a6xx_gmu_probe(device, to_platform_device(dev)); +} + +static void a6xx_gmu_unbind(struct device *dev, struct device *master, + void *data) +{ + struct kgsl_device *device = dev_get_drvdata(master); + + a6xx_gmu_remove(device); +} + +static const struct component_ops a6xx_gmu_component_ops = { + .bind = a6xx_gmu_bind, + .unbind = a6xx_gmu_unbind, +}; + +static int a6xx_gmu_probe_dev(struct platform_device *pdev) +{ + return component_add(&pdev->dev, &a6xx_gmu_component_ops); +} + +static int a6xx_gmu_remove_dev(struct platform_device *pdev) +{ + component_del(&pdev->dev, &a6xx_gmu_component_ops); + return 0; +} + +static const struct of_device_id a6xx_gmu_match_table[] = { + { .compatible = "qcom,gpu-gmu" }, + { }, +}; + +struct platform_driver a6xx_gmu_driver = { + .probe = a6xx_gmu_probe_dev, + .remove = a6xx_gmu_remove_dev, + .driver = { + .name = "adreno-a6xx-gmu", + .of_match_table = a6xx_gmu_match_table, + }, +}; diff --git a/drivers/gpu/msm/kgsl_bus.c b/drivers/gpu/msm/kgsl_bus.c index 84d71dfa30ad..94dd5d68011f 100644 --- a/drivers/gpu/msm/kgsl_bus.c +++ b/drivers/gpu/msm/kgsl_bus.c @@ -127,7 +127,7 @@ u32 *kgsl_bus_get_table(struct platform_device *pdev, if (num <= 0) return ERR_PTR(-EINVAL); - levels = devm_kcalloc(&pdev->dev, num, sizeof(*levels), GFP_KERNEL); + levels = kcalloc(num, sizeof(*levels), GFP_KERNEL); if (!levels) return ERR_PTR(-ENOMEM); @@ -171,6 +171,8 @@ done: pwr->icc_path = of_icc_get(&pdev->dev, NULL); if (IS_ERR(pwr->icc_path) && !gmu_core_scales_bandwidth(device)) { WARN(1, "The CPU has no way to set the GPU bus levels\n"); + + kfree(pwr->ddr_table); return PTR_ERR(pwr->icc_path); } @@ -181,5 +183,6 @@ done: void kgsl_bus_close(struct kgsl_device *device) { + kfree(device->pwrctrl.ddr_table); icc_put(device->pwrctrl.icc_path); } diff --git a/drivers/gpu/msm/kgsl_gmu.h b/drivers/gpu/msm/kgsl_gmu.h index 062cb362bfaa..c385c5e456ca 100644 --- a/drivers/gpu/msm/kgsl_gmu.h +++ b/drivers/gpu/msm/kgsl_gmu.h @@ -81,7 +81,6 @@ struct gmu_block_header { /* GMU memdesc entries */ #define GMU_KERNEL_ENTRIES 16 -extern struct gmu_dev_ops adreno_a6xx_gmudev; #define KGSL_GMU_DEVICE(_a) ((struct gmu_device *)((_a)->gmu_core.ptr)) enum gmu_mem_type { @@ -96,7 +95,7 @@ enum gmu_mem_type { }; enum gmu_context_index { - GMU_CONTEXT_USER = 0, + GMU_CONTEXT_USER, GMU_CONTEXT_KERNEL, }; @@ -208,12 +207,4 @@ struct gmu_device { atomic_t cm3_fault; }; -struct gmu_memdesc *gmu_get_memdesc(struct gmu_device *gmu, - unsigned int addr, unsigned int size); -unsigned int gmu_get_memtype_base(struct gmu_device *gmu, - enum gmu_mem_type type); - -int gmu_prealloc_req(struct kgsl_device *device, struct gmu_block_header *blk); -int gmu_cache_finalize(struct kgsl_device *device); - #endif /* __KGSL_GMU_H */ diff --git a/drivers/gpu/msm/kgsl_gmu_core.c b/drivers/gpu/msm/kgsl_gmu_core.c index 0ebb4f09ed13..aded346fa01e 100644 --- a/drivers/gpu/msm/kgsl_gmu_core.c +++ b/drivers/gpu/msm/kgsl_gmu_core.c @@ -11,7 +11,7 @@ #include "kgsl_trace.h" static const struct of_device_id gmu_match_table[] = { - { .compatible = "qcom,gpu-gmu", .data = &kgsl_gmu_driver }, + { .compatible = "qcom,gpu-gmu", .data = &a6xx_gmu_driver }, { .compatible = "qcom,gpu-rgmu", .data = &kgsl_rgmu_driver }, {}, }; diff --git a/drivers/gpu/msm/kgsl_gmu_core.h b/drivers/gpu/msm/kgsl_gmu_core.h index a2c86409eae8..ff82d5619469 100644 --- a/drivers/gpu/msm/kgsl_gmu_core.h +++ b/drivers/gpu/msm/kgsl_gmu_core.h @@ -161,7 +161,7 @@ struct gmu_core_device { unsigned long flags; }; -extern struct platform_driver kgsl_gmu_driver; +extern struct platform_driver a6xx_gmu_driver; extern struct platform_driver kgsl_rgmu_driver; /* GMU core functions */