msm: kgsl: Create adreno power operations
This enables us to define target specific device power up/down sequences. Also create adreno_power_cycle for certain settings to take effect by powering down, change the settings and then resume the device. Change-Id: Ie6a0feb224f0b0babcd0249c812674c31dc6c4ab Signed-off-by: Harshdeep Dhatt <hdhatt@codeaurora.org>
This commit is contained in:
parent
f707fc737d
commit
e7c78f113f
@ -1546,11 +1546,10 @@ static void adreno_unbind(struct device *dev)
|
||||
clear_bit(ADRENO_DEVICE_INITIALIZED, &adreno_dev->priv);
|
||||
}
|
||||
|
||||
static int adreno_pm_resume(struct device *dev)
|
||||
static void adreno_resume(struct adreno_device *adreno_dev)
|
||||
{
|
||||
struct kgsl_device *device = dev_get_drvdata(dev);
|
||||
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
|
||||
|
||||
mutex_lock(&device->mutex);
|
||||
if (device->state == KGSL_STATE_SUSPEND) {
|
||||
adreno_dispatcher_unhalt(device);
|
||||
kgsl_pwrctrl_change_state(device, KGSL_STATE_SLUMBER);
|
||||
@ -1565,19 +1564,41 @@ static int adreno_pm_resume(struct device *dev)
|
||||
kgsl_pwrctrl_change_state(device, KGSL_STATE_SLUMBER);
|
||||
dev_err(device->dev, "resume invoked without a suspend\n");
|
||||
}
|
||||
}
|
||||
|
||||
static int adreno_pm_resume(struct device *dev)
|
||||
{
|
||||
struct kgsl_device *device = dev_get_drvdata(dev);
|
||||
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
|
||||
const struct adreno_power_ops *ops = ADRENO_POWER_OPS(adreno_dev);
|
||||
|
||||
mutex_lock(&device->mutex);
|
||||
ops->pm_resume(adreno_dev);
|
||||
mutex_unlock(&device->mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int adreno_suspend(struct adreno_device *adreno_dev)
|
||||
{
|
||||
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
|
||||
int status = kgsl_pwrctrl_change_state(device, KGSL_STATE_SUSPEND);
|
||||
|
||||
if (!status && device->state == KGSL_STATE_SUSPEND)
|
||||
adreno_dispatcher_halt(device);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static int adreno_pm_suspend(struct device *dev)
|
||||
{
|
||||
struct kgsl_device *device = dev_get_drvdata(dev);
|
||||
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
|
||||
const struct adreno_power_ops *ops = ADRENO_POWER_OPS(adreno_dev);
|
||||
int status;
|
||||
|
||||
mutex_lock(&device->mutex);
|
||||
status = kgsl_pwrctrl_change_state(device, KGSL_STATE_SUSPEND);
|
||||
if (!status && device->state == KGSL_STATE_SUSPEND)
|
||||
adreno_dispatcher_halt(device);
|
||||
status = ops->pm_suspend(adreno_dev);
|
||||
mutex_unlock(&device->mutex);
|
||||
|
||||
return status;
|
||||
@ -1825,14 +1846,15 @@ static void adreno_set_active_ctxs_null(struct adreno_device *adreno_dev)
|
||||
}
|
||||
}
|
||||
|
||||
static int adreno_first_open(struct kgsl_device *device)
|
||||
static int adreno_open(struct adreno_device *adreno_dev)
|
||||
{
|
||||
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* active_cnt special case: we are starting up for the first
|
||||
* time, so use this sequence instead of the kgsl_pwrctrl_wake()
|
||||
* which will be called by kgsl_active_count_get().
|
||||
* which will be called by adreno_active_count_get().
|
||||
*/
|
||||
atomic_inc(&device->active_cnt);
|
||||
|
||||
@ -1850,7 +1872,7 @@ static int adreno_first_open(struct kgsl_device *device)
|
||||
|
||||
complete_all(&device->hwaccess_gate);
|
||||
kgsl_pwrctrl_change_state(device, KGSL_STATE_ACTIVE);
|
||||
kgsl_active_count_put(device);
|
||||
adreno_active_count_put(adreno_dev);
|
||||
|
||||
return 0;
|
||||
err:
|
||||
@ -1860,8 +1882,18 @@ err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int adreno_last_close(struct kgsl_device *device)
|
||||
static int adreno_first_open(struct kgsl_device *device)
|
||||
{
|
||||
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
|
||||
const struct adreno_power_ops *ops = ADRENO_POWER_OPS(adreno_dev);
|
||||
|
||||
return ops->first_open(adreno_dev);
|
||||
}
|
||||
|
||||
static int adreno_close(struct adreno_device *adreno_dev)
|
||||
{
|
||||
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
|
||||
|
||||
/*
|
||||
* Wait up to 1 second for the active count to go low
|
||||
* and then start complaining about it
|
||||
@ -1878,6 +1910,84 @@ static int adreno_last_close(struct kgsl_device *device)
|
||||
return kgsl_pwrctrl_change_state(device, KGSL_STATE_INIT);
|
||||
}
|
||||
|
||||
static int adreno_last_close(struct kgsl_device *device)
|
||||
{
|
||||
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
|
||||
const struct adreno_power_ops *ops = ADRENO_POWER_OPS(adreno_dev);
|
||||
|
||||
return ops->last_close(adreno_dev);
|
||||
}
|
||||
|
||||
static int adreno_pwrctrl_active_count_get(struct adreno_device *adreno_dev)
|
||||
{
|
||||
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
|
||||
int ret = 0;
|
||||
|
||||
if (WARN_ON(!mutex_is_locked(&device->mutex)))
|
||||
return -EINVAL;
|
||||
|
||||
if ((atomic_read(&device->active_cnt) == 0) &&
|
||||
(device->state != KGSL_STATE_ACTIVE)) {
|
||||
mutex_unlock(&device->mutex);
|
||||
wait_for_completion(&device->hwaccess_gate);
|
||||
mutex_lock(&device->mutex);
|
||||
device->pwrctrl.superfast = true;
|
||||
ret = kgsl_pwrctrl_change_state(device, KGSL_STATE_ACTIVE);
|
||||
}
|
||||
if (ret == 0)
|
||||
atomic_inc(&device->active_cnt);
|
||||
trace_kgsl_active_count(device,
|
||||
(unsigned long) __builtin_return_address(0));
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void adreno_pwrctrl_active_count_put(struct adreno_device *adreno_dev)
|
||||
{
|
||||
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
|
||||
|
||||
if (WARN_ON(!mutex_is_locked(&device->mutex)))
|
||||
return;
|
||||
|
||||
if (WARN(atomic_read(&device->active_cnt) == 0,
|
||||
"Unbalanced get/put calls to KGSL active count\n"))
|
||||
return;
|
||||
|
||||
if (atomic_dec_and_test(&device->active_cnt)) {
|
||||
bool nap_on = !(device->pwrctrl.ctrl_flags &
|
||||
BIT(KGSL_PWRFLAGS_NAP_OFF));
|
||||
if (nap_on && device->state == KGSL_STATE_ACTIVE &&
|
||||
device->requested_state == KGSL_STATE_NONE) {
|
||||
kgsl_pwrctrl_request_state(device, KGSL_STATE_NAP);
|
||||
kgsl_schedule_work(&device->idle_check_ws);
|
||||
} else if (!nap_on) {
|
||||
kgsl_pwrscale_update_stats(device);
|
||||
kgsl_pwrscale_update(device);
|
||||
}
|
||||
|
||||
mod_timer(&device->idle_timer,
|
||||
jiffies + device->pwrctrl.interval_timeout);
|
||||
}
|
||||
|
||||
trace_kgsl_active_count(device,
|
||||
(unsigned long) __builtin_return_address(0));
|
||||
|
||||
wake_up(&device->active_cnt_wq);
|
||||
}
|
||||
|
||||
int adreno_active_count_get(struct adreno_device *adreno_dev)
|
||||
{
|
||||
const struct adreno_power_ops *ops = ADRENO_POWER_OPS(adreno_dev);
|
||||
|
||||
return ops->active_count_get(adreno_dev);
|
||||
}
|
||||
|
||||
void adreno_active_count_put(struct adreno_device *adreno_dev)
|
||||
{
|
||||
const struct adreno_power_ops *ops = ADRENO_POWER_OPS(adreno_dev);
|
||||
|
||||
ops->active_count_put(adreno_dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* _adreno_start - Power up the GPU and prepare to accept commands
|
||||
* @adreno_dev: Pointer to an adreno_device structure
|
||||
@ -2597,9 +2707,9 @@ static int adreno_setproperty(struct kgsl_device_private *dev_priv,
|
||||
if (enable) {
|
||||
device->pwrctrl.ctrl_flags = 0;
|
||||
|
||||
if (!kgsl_active_count_get(device)) {
|
||||
if (!adreno_active_count_get(adreno_dev)) {
|
||||
adreno_fault_detect_start(adreno_dev);
|
||||
kgsl_active_count_put(device);
|
||||
adreno_active_count_put(adreno_dev);
|
||||
}
|
||||
|
||||
kgsl_pwrscale_enable(device);
|
||||
@ -3662,6 +3772,67 @@ static void adreno_drawctxt_sched(struct kgsl_device *device,
|
||||
adreno_dispatcher_queue_context(device, ADRENO_CONTEXT(context));
|
||||
}
|
||||
|
||||
int adreno_power_cycle(struct adreno_device *adreno_dev,
|
||||
void (*callback)(struct adreno_device *adreno_dev, void *priv),
|
||||
void *priv)
|
||||
{
|
||||
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
|
||||
const struct adreno_power_ops *ops = ADRENO_POWER_OPS(adreno_dev);
|
||||
int ret;
|
||||
|
||||
mutex_lock(&device->mutex);
|
||||
ret = ops->pm_suspend(adreno_dev);
|
||||
|
||||
if (!ret) {
|
||||
callback(adreno_dev, priv);
|
||||
ops->pm_resume(adreno_dev);
|
||||
}
|
||||
|
||||
mutex_unlock(&device->mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int adreno_power_cycle_bool(struct adreno_device *adreno_dev,
|
||||
bool *flag, bool val)
|
||||
{
|
||||
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
|
||||
const struct adreno_power_ops *ops = ADRENO_POWER_OPS(adreno_dev);
|
||||
int ret;
|
||||
|
||||
mutex_lock(&device->mutex);
|
||||
ret = ops->pm_suspend(adreno_dev);
|
||||
|
||||
if (!ret) {
|
||||
*flag = val;
|
||||
ops->pm_resume(adreno_dev);
|
||||
}
|
||||
|
||||
mutex_unlock(&device->mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int adreno_power_cycle_u32(struct adreno_device *adreno_dev,
|
||||
u32 *flag, u32 val)
|
||||
{
|
||||
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
|
||||
const struct adreno_power_ops *ops = ADRENO_POWER_OPS(adreno_dev);
|
||||
int ret;
|
||||
|
||||
mutex_lock(&device->mutex);
|
||||
ret = ops->pm_suspend(adreno_dev);
|
||||
|
||||
if (!ret) {
|
||||
*flag = val;
|
||||
ops->pm_resume(adreno_dev);
|
||||
}
|
||||
|
||||
mutex_unlock(&device->mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct kgsl_functable adreno_functable = {
|
||||
/* Mandatory functions */
|
||||
.regread = adreno_regread,
|
||||
@ -3712,6 +3883,15 @@ static const struct component_master_ops adreno_ops = {
|
||||
.unbind = adreno_unbind,
|
||||
};
|
||||
|
||||
const struct adreno_power_ops adreno_power_operations = {
|
||||
.first_open = adreno_open,
|
||||
.last_close = adreno_close,
|
||||
.active_count_get = adreno_pwrctrl_active_count_get,
|
||||
.active_count_put = adreno_pwrctrl_active_count_put,
|
||||
.pm_suspend = adreno_suspend,
|
||||
.pm_resume = adreno_resume,
|
||||
};
|
||||
|
||||
static const struct of_device_id adreno_gmu_match[] = {
|
||||
{ .compatible = "qcom,gpu-gmu" },
|
||||
{ .compatible = "qcom,gpu-rgmu" },
|
||||
|
@ -27,6 +27,12 @@
|
||||
/* ADRENO_GPU_DEVICE - Given an adreno device return the GPU specific struct */
|
||||
#define ADRENO_GPU_DEVICE(_a) ((_a)->gpucore->gpudev)
|
||||
|
||||
/*
|
||||
* ADRENO_POWER_OPS - Given an adreno device return the GPU specific power
|
||||
* ops
|
||||
*/
|
||||
#define ADRENO_POWER_OPS(_a) ((_a)->gpucore->gpudev->power_ops)
|
||||
|
||||
#define ADRENO_CHIPID_CORE(_id) (((_id) >> 24) & 0xFF)
|
||||
#define ADRENO_CHIPID_MAJOR(_id) (((_id) >> 16) & 0xFF)
|
||||
#define ADRENO_CHIPID_MINOR(_id) (((_id) >> 8) & 0xFF)
|
||||
@ -322,6 +328,37 @@ struct adreno_reglist {
|
||||
u32 value;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct adreno_power_ops - Container for target specific power up/down
|
||||
* sequences
|
||||
*/
|
||||
struct adreno_power_ops {
|
||||
/**
|
||||
* @first_open: Target specific function triggered when first kgsl
|
||||
* instance is opened
|
||||
*/
|
||||
int (*first_open)(struct adreno_device *adreno_dev);
|
||||
/**
|
||||
* @last_close: Target specific function triggered when last kgsl
|
||||
* instance is closed
|
||||
*/
|
||||
int (*last_close)(struct adreno_device *adreno_dev);
|
||||
/**
|
||||
* @active_count_get: Target specific function to keep gpu from power
|
||||
* collapsing
|
||||
*/
|
||||
int (*active_count_get)(struct adreno_device *adreno_dev);
|
||||
/**
|
||||
* @active_count_put: Target specific function to allow gpu to power
|
||||
* collapse
|
||||
*/
|
||||
void (*active_count_put)(struct adreno_device *adreno_dev);
|
||||
/** @pm_suspend: Target specific function to suspend the driver */
|
||||
int (*pm_suspend)(struct adreno_device *adreno_dev);
|
||||
/** @pm_resume: Target specific function to resume the driver */
|
||||
void (*pm_resume)(struct adreno_device *adreno_dev);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct adreno_gpu_core - A specific GPU core definition
|
||||
* @gpurev: Unique GPU revision identifier
|
||||
@ -794,6 +831,11 @@ struct adreno_gpudev {
|
||||
bool update_reg);
|
||||
/** @read_alwayson: Return the current value of the alwayson counter */
|
||||
u64 (*read_alwayson)(struct adreno_device *adreno_dev);
|
||||
/**
|
||||
* @power_ops: Target specific function pointers to power up/down the
|
||||
* gpu
|
||||
*/
|
||||
const struct adreno_power_ops *power_ops;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -855,6 +897,7 @@ struct adreno_ft_perf_counters {
|
||||
unsigned int countable;
|
||||
};
|
||||
|
||||
extern const struct adreno_power_ops adreno_power_operations;
|
||||
extern unsigned int *adreno_ft_regs;
|
||||
extern unsigned int adreno_ft_regs_num;
|
||||
extern unsigned int *adreno_ft_regs_val;
|
||||
@ -950,6 +993,30 @@ void adreno_isense_regread(struct adreno_device *adreno_dev,
|
||||
*/
|
||||
bool adreno_irq_pending(struct adreno_device *adreno_dev);
|
||||
|
||||
/**
|
||||
* adreno_active_count_get - Wrapper for target specific active count get
|
||||
* @adreno_dev: pointer to the adreno device
|
||||
*
|
||||
* Increase the active count for the KGSL device and execute slumber exit
|
||||
* sequence if this is the first reference. Code paths that need to touch the
|
||||
* hardware or wait for the hardware to complete an operation must hold an
|
||||
* active count reference until they are finished. The device mutex must be held
|
||||
* while calling this function.
|
||||
*
|
||||
* Return: 0 on success or negative error on failure to wake up the device
|
||||
*/
|
||||
int adreno_active_count_get(struct adreno_device *adreno_dev);
|
||||
|
||||
/**
|
||||
* adreno_active_count_put - Wrapper for target specific active count put
|
||||
* @adreno_dev: pointer to the adreno device
|
||||
*
|
||||
* Decrease the active or the KGSL device and schedule the idle thread to
|
||||
* execute the slumber sequence if there are no remaining references. The
|
||||
* device mutex must be held while calling this function.
|
||||
*/
|
||||
void adreno_active_count_put(struct adreno_device *adreno_dev);
|
||||
|
||||
#define ADRENO_TARGET(_name, _id) \
|
||||
static inline int adreno_is_##_name(struct adreno_device *adreno_dev) \
|
||||
{ \
|
||||
@ -1556,28 +1623,33 @@ static inline unsigned int counter_delta(struct kgsl_device *device,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int adreno_perfcntr_active_oob_get(struct kgsl_device *device)
|
||||
static inline int adreno_perfcntr_active_oob_get(
|
||||
struct adreno_device *adreno_dev)
|
||||
{
|
||||
int ret = kgsl_active_count_get(device);
|
||||
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
|
||||
int ret = adreno_active_count_get(adreno_dev);
|
||||
|
||||
if (!ret) {
|
||||
ret = gmu_core_dev_oob_set(device, oob_perfcntr);
|
||||
if (ret) {
|
||||
gmu_core_snapshot(device);
|
||||
adreno_set_gpu_fault(ADRENO_DEVICE(device),
|
||||
adreno_set_gpu_fault(adreno_dev,
|
||||
ADRENO_GMU_FAULT_SKIP_SNAPSHOT);
|
||||
adreno_dispatcher_schedule(device);
|
||||
kgsl_active_count_put(device);
|
||||
adreno_active_count_put(adreno_dev);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void adreno_perfcntr_active_oob_put(struct kgsl_device *device)
|
||||
static inline void adreno_perfcntr_active_oob_put(
|
||||
struct adreno_device *adreno_dev)
|
||||
{
|
||||
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
|
||||
|
||||
gmu_core_dev_oob_clear(device, oob_perfcntr);
|
||||
kgsl_active_count_put(device);
|
||||
adreno_active_count_put(adreno_dev);
|
||||
}
|
||||
|
||||
static inline bool adreno_has_sptprac_gdsc(struct adreno_device *adreno_dev)
|
||||
@ -1740,4 +1812,49 @@ irqreturn_t adreno_irq_callbacks(struct adreno_device *adreno_dev,
|
||||
int adreno_device_probe(struct platform_device *pdev,
|
||||
struct adreno_device *adreno_dev);
|
||||
|
||||
/**
|
||||
* adreno_power_cycle - Suspend and resume the device
|
||||
* @adreno_dev: Pointer to the adreno device
|
||||
* @callback: Function that needs to be executed
|
||||
* @priv: Argument to be passed to the callback
|
||||
*
|
||||
* Certain properties that can be set via sysfs need to power
|
||||
* cycle the device to take effect. This function suspends
|
||||
* the device, executes the callback, and resumes the device.
|
||||
*
|
||||
* Return: 0 on success or negative on failure
|
||||
*/
|
||||
int adreno_power_cycle(struct adreno_device *adreno_dev,
|
||||
void (*callback)(struct adreno_device *adreno_dev, void *priv),
|
||||
void *priv);
|
||||
|
||||
/**
|
||||
* adreno_power_cycle_bool - Power cycle the device to change device setting
|
||||
* @adreno_dev: Pointer to the adreno device
|
||||
* @flag: Flag that needs to be set
|
||||
* @val: The value flag should be set to
|
||||
*
|
||||
* Certain properties that can be set via sysfs need to power cycle the device
|
||||
* to take effect. This function suspends the device, sets the flag, and
|
||||
* resumes the device.
|
||||
*
|
||||
* Return: 0 on success or negative on failure
|
||||
*/
|
||||
int adreno_power_cycle_bool(struct adreno_device *adreno_dev,
|
||||
bool *flag, bool val);
|
||||
|
||||
/**
|
||||
* adreno_power_cycle_u32 - Power cycle the device to change device setting
|
||||
* @adreno_dev: Pointer to the adreno device
|
||||
* @flag: Flag that needs to be set
|
||||
* @val: The value flag should be set to
|
||||
*
|
||||
* Certain properties that can be set via sysfs need to power cycle the device
|
||||
* to take effect. This function suspends the device, sets the flag, and
|
||||
* resumes the device.
|
||||
*
|
||||
* Return: 0 on success or negative on failure
|
||||
*/
|
||||
int adreno_power_cycle_u32(struct adreno_device *adreno_dev,
|
||||
u32 *flag, u32 val);
|
||||
#endif /*__ADRENO_H */
|
||||
|
@ -1434,4 +1434,5 @@ struct adreno_gpudev adreno_a3xx_gpudev = {
|
||||
.clk_set_options = a3xx_clk_set_options,
|
||||
.read_alwayson = a3xx_read_alwayson,
|
||||
.hw_isidle = a3xx_hw_isidle,
|
||||
.power_ops = &adreno_power_operations,
|
||||
};
|
||||
|
@ -1449,7 +1449,7 @@ static void a5xx_start(struct adreno_device *adreno_dev)
|
||||
|
||||
/* Enable ISDB mode if requested */
|
||||
if (test_bit(ADRENO_DEVICE_ISDB_ENABLED, &adreno_dev->priv)) {
|
||||
if (!kgsl_active_count_get(device)) {
|
||||
if (!adreno_active_count_get(adreno_dev)) {
|
||||
/*
|
||||
* Disable ME/PFP split timeouts when the debugger is
|
||||
* enabled because the CP doesn't know when a shader is
|
||||
@ -3011,4 +3011,5 @@ struct adreno_gpudev adreno_a5xx_gpudev = {
|
||||
.clk_set_options = a5xx_clk_set_options,
|
||||
.read_alwayson = a5xx_read_alwayson,
|
||||
.hw_isidle = a5xx_hw_isidle,
|
||||
.power_ops = &adreno_power_operations,
|
||||
};
|
||||
|
@ -2677,4 +2677,5 @@ struct adreno_gpudev adreno_a6xx_gpudev = {
|
||||
#endif
|
||||
.clk_set_options = a6xx_clk_set_options,
|
||||
.read_alwayson = a6xx_read_alwayson,
|
||||
.power_ops = &adreno_power_operations,
|
||||
};
|
||||
|
@ -1927,16 +1927,9 @@ static int a6xx_gmu_ifpc_store(struct kgsl_device *device,
|
||||
requested_idle_level = GPU_HW_ACTIVE;
|
||||
}
|
||||
|
||||
mutex_lock(&device->mutex);
|
||||
|
||||
/* Power down the GPU before changing the idle level */
|
||||
kgsl_pwrctrl_change_state(device, KGSL_STATE_SUSPEND);
|
||||
gmu->idle_level = requested_idle_level;
|
||||
kgsl_pwrctrl_change_state(device, KGSL_STATE_SLUMBER);
|
||||
|
||||
mutex_unlock(&device->mutex);
|
||||
|
||||
return 0;
|
||||
return adreno_power_cycle_u32(adreno_dev, &gmu->idle_level,
|
||||
requested_idle_level);
|
||||
}
|
||||
|
||||
static unsigned int a6xx_gmu_ifpc_show(struct kgsl_device *device)
|
||||
@ -2569,11 +2562,24 @@ static int a6xx_gmu_start(struct kgsl_device *device)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void set_acd(struct adreno_device *adreno_dev, void *priv)
|
||||
{
|
||||
struct a6xx_gmu_device *gmu = A6XX_GMU_DEVICE(KGSL_DEVICE(adreno_dev));
|
||||
int ret;
|
||||
|
||||
adreno_dev->acd_enabled = *((bool *)priv);
|
||||
|
||||
ret = a6xx_gmu_aop_send_acd_state(gmu->mailbox.channel,
|
||||
adreno_dev->acd_enabled);
|
||||
if (ret)
|
||||
dev_err(&gmu->pdev->dev,
|
||||
"AOP mbox send message failed: %d\n", ret);
|
||||
}
|
||||
|
||||
static int a6xx_gmu_acd_set(struct kgsl_device *device, bool val)
|
||||
{
|
||||
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
|
||||
struct a6xx_gmu_device *gmu = A6XX_GMU_DEVICE(device);
|
||||
int ret;
|
||||
|
||||
if (IS_ERR_OR_NULL(gmu->mailbox.channel))
|
||||
return -EINVAL;
|
||||
@ -2582,21 +2588,8 @@ static int a6xx_gmu_acd_set(struct kgsl_device *device, bool val)
|
||||
if (adreno_dev->acd_enabled == val)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&device->mutex);
|
||||
|
||||
/* Power down the GPU before enabling or disabling ACD */
|
||||
kgsl_pwrctrl_change_state(device, KGSL_STATE_SUSPEND);
|
||||
|
||||
adreno_dev->acd_enabled = val;
|
||||
ret = a6xx_gmu_aop_send_acd_state(gmu->mailbox.channel, val);
|
||||
if (ret)
|
||||
dev_err(&gmu->pdev->dev,
|
||||
"AOP mbox send message failed: %d\n", ret);
|
||||
|
||||
kgsl_pwrctrl_change_state(device, KGSL_STATE_SLUMBER);
|
||||
|
||||
mutex_unlock(&device->mutex);
|
||||
return 0;
|
||||
/* Power cycle the GPU for changes to take effect */
|
||||
return adreno_power_cycle(adreno_dev, set_acd, &val);
|
||||
}
|
||||
|
||||
static struct gmu_dev_ops a6xx_gmudev = {
|
||||
|
@ -220,16 +220,9 @@ static int a6xx_rgmu_ifpc_store(struct kgsl_device *device,
|
||||
if (requested_idle_level == rgmu->idle_level)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&device->mutex);
|
||||
|
||||
/* Power down the GPU before changing the idle level */
|
||||
kgsl_pwrctrl_change_state(device, KGSL_STATE_SUSPEND);
|
||||
rgmu->idle_level = requested_idle_level;
|
||||
kgsl_pwrctrl_change_state(device, KGSL_STATE_SLUMBER);
|
||||
|
||||
mutex_unlock(&device->mutex);
|
||||
|
||||
return 0;
|
||||
/* Power cycle the GPU for changes to take effect */
|
||||
return adreno_power_cycle_u32(adreno_dev, &rgmu->idle_level,
|
||||
requested_idle_level);
|
||||
}
|
||||
|
||||
static unsigned int a6xx_rgmu_ifpc_show(struct kgsl_device *device)
|
||||
|
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2013-2020, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/coresight.h>
|
||||
@ -61,7 +61,7 @@ ssize_t adreno_coresight_show_register(struct device *dev,
|
||||
|
||||
if (device->state == KGSL_STATE_ACTIVE ||
|
||||
device->state == KGSL_STATE_NAP) {
|
||||
if (!kgsl_active_count_get(device)) {
|
||||
if (!adreno_active_count_get(adreno_dev)) {
|
||||
if (!is_cx)
|
||||
kgsl_regread(device, cattr->reg->offset,
|
||||
&cattr->reg->value);
|
||||
@ -69,7 +69,7 @@ ssize_t adreno_coresight_show_register(struct device *dev,
|
||||
adreno_cx_dbgc_regread(device,
|
||||
cattr->reg->offset,
|
||||
&cattr->reg->value);
|
||||
kgsl_active_count_put(device);
|
||||
adreno_active_count_put(adreno_dev);
|
||||
}
|
||||
}
|
||||
|
||||
@ -116,7 +116,7 @@ ssize_t adreno_coresight_store_register(struct device *dev,
|
||||
/* Program the hardware if it is not power collapsed */
|
||||
if (device->state == KGSL_STATE_ACTIVE ||
|
||||
device->state == KGSL_STATE_NAP) {
|
||||
if (!kgsl_active_count_get(device)) {
|
||||
if (!adreno_active_count_get(adreno_dev)) {
|
||||
if (!is_cx)
|
||||
kgsl_regwrite(device, cattr->reg->offset,
|
||||
cattr->reg->value);
|
||||
@ -125,7 +125,7 @@ ssize_t adreno_coresight_store_register(struct device *dev,
|
||||
cattr->reg->offset,
|
||||
cattr->reg->value);
|
||||
|
||||
kgsl_active_count_put(device);
|
||||
adreno_active_count_put(adreno_dev);
|
||||
}
|
||||
}
|
||||
|
||||
@ -166,7 +166,7 @@ static void adreno_coresight_disable(struct coresight_device *csdev,
|
||||
|
||||
mutex_lock(&device->mutex);
|
||||
|
||||
if (!kgsl_active_count_get(device)) {
|
||||
if (!adreno_active_count_get(adreno_dev)) {
|
||||
if (cs_id == GPU_CORESIGHT_GX)
|
||||
for (i = 0; i < coresight->count; i++)
|
||||
kgsl_regwrite(device,
|
||||
@ -176,7 +176,7 @@ static void adreno_coresight_disable(struct coresight_device *csdev,
|
||||
adreno_cx_dbgc_regwrite(device,
|
||||
coresight->registers[i].offset, 0);
|
||||
|
||||
kgsl_active_count_put(device);
|
||||
adreno_active_count_put(adreno_dev);
|
||||
}
|
||||
|
||||
if (cs_id == GPU_CORESIGHT_GX)
|
||||
@ -289,10 +289,10 @@ static int adreno_coresight_enable(struct coresight_device *csdev,
|
||||
coresight->registers[i].initial;
|
||||
|
||||
if (kgsl_state_is_awake(device)) {
|
||||
ret = kgsl_active_count_get(device);
|
||||
ret = adreno_active_count_get(adreno_dev);
|
||||
if (!ret) {
|
||||
ret = _adreno_coresight_set(adreno_dev, cs_id);
|
||||
kgsl_active_count_put(device);
|
||||
adreno_active_count_put(adreno_dev);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -8,6 +8,11 @@
|
||||
#include "adreno.h"
|
||||
extern struct dentry *kgsl_debugfs_dir;
|
||||
|
||||
static void set_isdb(struct adreno_device *adreno_dev, void *priv)
|
||||
{
|
||||
set_bit(ADRENO_DEVICE_ISDB_ENABLED, &adreno_dev->priv);
|
||||
}
|
||||
|
||||
static int _isdb_set(void *data, u64 val)
|
||||
{
|
||||
struct kgsl_device *device = data;
|
||||
@ -17,19 +22,11 @@ static int _isdb_set(void *data, u64 val)
|
||||
if (test_bit(ADRENO_DEVICE_ISDB_ENABLED, &adreno_dev->priv))
|
||||
return 0;
|
||||
|
||||
mutex_lock(&device->mutex);
|
||||
|
||||
/*
|
||||
* Bring down the GPU so we can bring it back up with the correct power
|
||||
* and clock settings
|
||||
*/
|
||||
kgsl_pwrctrl_change_state(device, KGSL_STATE_SUSPEND);
|
||||
set_bit(ADRENO_DEVICE_ISDB_ENABLED, &adreno_dev->priv);
|
||||
kgsl_pwrctrl_change_state(device, KGSL_STATE_SLUMBER);
|
||||
|
||||
mutex_unlock(&device->mutex);
|
||||
|
||||
return 0;
|
||||
return adreno_power_cycle(adreno_dev, set_isdb, NULL);
|
||||
}
|
||||
|
||||
static int _isdb_get(void *data, u64 *val)
|
||||
@ -57,14 +54,9 @@ static int _lm_limit_set(void *data, u64 val)
|
||||
else if (val < 3000)
|
||||
val = 3000;
|
||||
|
||||
adreno_dev->lm_limit = val;
|
||||
|
||||
if (adreno_dev->lm_enabled) {
|
||||
mutex_lock(&device->mutex);
|
||||
kgsl_pwrctrl_change_state(device, KGSL_STATE_SUSPEND);
|
||||
kgsl_pwrctrl_change_state(device, KGSL_STATE_SLUMBER);
|
||||
mutex_unlock(&device->mutex);
|
||||
}
|
||||
if (adreno_dev->lm_enabled)
|
||||
return adreno_power_cycle_u32(adreno_dev,
|
||||
&adreno_dev->lm_limit, val);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -557,7 +557,7 @@ static int sendcmd(struct adreno_device *adreno_dev,
|
||||
if (dispatcher->inflight == 1 &&
|
||||
!test_bit(ADRENO_DISPATCHER_POWER, &dispatcher->priv)) {
|
||||
/* Time to make the donuts. Turn on the GPU */
|
||||
ret = kgsl_active_count_get(device);
|
||||
ret = adreno_active_count_get(adreno_dev);
|
||||
if (ret) {
|
||||
dispatcher->inflight--;
|
||||
dispatch_q->inflight--;
|
||||
@ -608,7 +608,7 @@ static int sendcmd(struct adreno_device *adreno_dev,
|
||||
kgsl_pwrscale_midframe_timer_restart(device);
|
||||
|
||||
} else {
|
||||
kgsl_active_count_put(device);
|
||||
adreno_active_count_put(adreno_dev);
|
||||
clear_bit(ADRENO_DISPATCHER_POWER, &dispatcher->priv);
|
||||
}
|
||||
}
|
||||
@ -2500,7 +2500,7 @@ static void _dispatcher_power_down(struct adreno_device *adreno_dev)
|
||||
del_timer_sync(&dispatcher->fault_timer);
|
||||
|
||||
if (test_bit(ADRENO_DISPATCHER_POWER, &dispatcher->priv)) {
|
||||
kgsl_active_count_put(device);
|
||||
adreno_active_count_put(adreno_dev);
|
||||
clear_bit(ADRENO_DISPATCHER_POWER, &dispatcher->priv);
|
||||
}
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2002,2007-2019, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2002,2007-2020, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
@ -68,7 +68,7 @@ long adreno_ioctl_perfcounter_get(struct kgsl_device_private *dev_priv,
|
||||
* active count inside that function.
|
||||
*/
|
||||
|
||||
result = adreno_perfcntr_active_oob_get(device);
|
||||
result = adreno_perfcntr_active_oob_get(adreno_dev);
|
||||
if (result) {
|
||||
mutex_unlock(&device->mutex);
|
||||
return (long)result;
|
||||
@ -87,7 +87,7 @@ long adreno_ioctl_perfcounter_get(struct kgsl_device_private *dev_priv,
|
||||
get->countable, PERFCOUNTER_FLAG_NONE);
|
||||
}
|
||||
|
||||
adreno_perfcntr_active_oob_put(device);
|
||||
adreno_perfcntr_active_oob_put(adreno_dev);
|
||||
|
||||
mutex_unlock(&device->mutex);
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2002,2007-2019, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2002,2007-2020, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
@ -248,7 +248,7 @@ int adreno_perfcounter_read_group(struct adreno_device *adreno_dev,
|
||||
|
||||
mutex_lock(&device->mutex);
|
||||
|
||||
ret = adreno_perfcntr_active_oob_get(device);
|
||||
ret = adreno_perfcntr_active_oob_get(adreno_dev);
|
||||
if (ret) {
|
||||
mutex_unlock(&device->mutex);
|
||||
goto done;
|
||||
@ -277,7 +277,7 @@ int adreno_perfcounter_read_group(struct adreno_device *adreno_dev,
|
||||
}
|
||||
}
|
||||
|
||||
adreno_perfcntr_active_oob_put(device);
|
||||
adreno_perfcntr_active_oob_put(adreno_dev);
|
||||
|
||||
mutex_unlock(&device->mutex);
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2013-2020, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/ctype.h>
|
||||
@ -691,7 +691,7 @@ static ssize_t profile_assignments_write(struct file *filep,
|
||||
goto error_unlock;
|
||||
}
|
||||
|
||||
ret = adreno_perfcntr_active_oob_get(device);
|
||||
ret = adreno_perfcntr_active_oob_get(adreno_dev);
|
||||
if (ret) {
|
||||
size = ret;
|
||||
goto error_unlock;
|
||||
@ -738,7 +738,7 @@ static ssize_t profile_assignments_write(struct file *filep,
|
||||
size = len;
|
||||
|
||||
error_put:
|
||||
adreno_perfcntr_active_oob_put(device);
|
||||
adreno_perfcntr_active_oob_put(adreno_dev);
|
||||
error_unlock:
|
||||
mutex_unlock(&device->mutex);
|
||||
error_free:
|
||||
|
@ -139,43 +139,13 @@ static bool _ft_hang_intr_status_show(struct adreno_device *adreno_dev)
|
||||
return true;
|
||||
}
|
||||
|
||||
static int pwrflag_store(struct adreno_device *adreno_dev,
|
||||
unsigned int val, bool *flag)
|
||||
{
|
||||
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
|
||||
|
||||
if (*flag == val)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&device->mutex);
|
||||
|
||||
/* Power down the GPU before changing the state */
|
||||
kgsl_pwrctrl_change_state(device, KGSL_STATE_SUSPEND);
|
||||
*flag = val;
|
||||
kgsl_pwrctrl_change_state(device, KGSL_STATE_SLUMBER);
|
||||
|
||||
mutex_unlock(&device->mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int _preemption_store(struct adreno_device *adreno_dev, bool val)
|
||||
static void change_preemption(struct adreno_device *adreno_dev, void *priv)
|
||||
{
|
||||
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
|
||||
struct kgsl_context *context;
|
||||
struct adreno_context *drawctxt;
|
||||
int id;
|
||||
|
||||
mutex_lock(&device->mutex);
|
||||
|
||||
if (!(ADRENO_FEATURE(adreno_dev, ADRENO_PREEMPTION)) ||
|
||||
(test_bit(ADRENO_DEVICE_PREEMPTION,
|
||||
&adreno_dev->priv) == val)) {
|
||||
mutex_unlock(&device->mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
kgsl_pwrctrl_change_state(device, KGSL_STATE_SUSPEND);
|
||||
change_bit(ADRENO_DEVICE_PREEMPTION, &adreno_dev->priv);
|
||||
adreno_dev->cur_rb = &(adreno_dev->ringbuffers[0]);
|
||||
|
||||
@ -186,12 +156,16 @@ static int _preemption_store(struct adreno_device *adreno_dev, bool val)
|
||||
drawctxt->rb = adreno_ctx_get_rb(adreno_dev, drawctxt);
|
||||
}
|
||||
write_unlock(&device->context_lock);
|
||||
}
|
||||
|
||||
kgsl_pwrctrl_change_state(device, KGSL_STATE_SLUMBER);
|
||||
static int _preemption_store(struct adreno_device *adreno_dev, bool val)
|
||||
{
|
||||
if (!(ADRENO_FEATURE(adreno_dev, ADRENO_PREEMPTION)) ||
|
||||
(test_bit(ADRENO_DEVICE_PREEMPTION,
|
||||
&adreno_dev->priv) == val))
|
||||
return 0;
|
||||
|
||||
mutex_unlock(&device->mutex);
|
||||
|
||||
return 0;
|
||||
return adreno_power_cycle(adreno_dev, change_preemption, NULL);
|
||||
}
|
||||
|
||||
static bool _preemption_show(struct adreno_device *adreno_dev)
|
||||
@ -201,7 +175,11 @@ static bool _preemption_show(struct adreno_device *adreno_dev)
|
||||
|
||||
static int _hwcg_store(struct adreno_device *adreno_dev, bool val)
|
||||
{
|
||||
return pwrflag_store(adreno_dev, val, &adreno_dev->hwcg_enabled);
|
||||
if (adreno_dev->hwcg_enabled == val)
|
||||
return 0;
|
||||
|
||||
return adreno_power_cycle_bool(adreno_dev, &adreno_dev->hwcg_enabled,
|
||||
val);
|
||||
}
|
||||
|
||||
static bool _hwcg_show(struct adreno_device *adreno_dev)
|
||||
@ -211,10 +189,12 @@ static bool _hwcg_show(struct adreno_device *adreno_dev)
|
||||
|
||||
static int _throttling_store(struct adreno_device *adreno_dev, bool val)
|
||||
{
|
||||
if (!adreno_is_a540(adreno_dev))
|
||||
if (!adreno_is_a540(adreno_dev) ||
|
||||
adreno_dev->throttling_enabled == val)
|
||||
return 0;
|
||||
|
||||
return pwrflag_store(adreno_dev, val, &adreno_dev->throttling_enabled);
|
||||
return adreno_power_cycle_bool(adreno_dev,
|
||||
&adreno_dev->throttling_enabled, val);
|
||||
}
|
||||
|
||||
static bool _throttling_show(struct adreno_device *adreno_dev)
|
||||
@ -224,10 +204,12 @@ static bool _throttling_show(struct adreno_device *adreno_dev)
|
||||
|
||||
static int _sptp_pc_store(struct adreno_device *adreno_dev, bool val)
|
||||
{
|
||||
if (!ADRENO_FEATURE(adreno_dev, ADRENO_SPTP_PC))
|
||||
if (!ADRENO_FEATURE(adreno_dev, ADRENO_SPTP_PC) ||
|
||||
adreno_dev->sptp_pc_enabled == val)
|
||||
return 0;
|
||||
|
||||
return pwrflag_store(adreno_dev, val, &adreno_dev->sptp_pc_enabled);
|
||||
return adreno_power_cycle_bool(adreno_dev, &adreno_dev->sptp_pc_enabled,
|
||||
val);
|
||||
}
|
||||
|
||||
static bool _sptp_pc_show(struct adreno_device *adreno_dev)
|
||||
@ -237,10 +219,12 @@ static bool _sptp_pc_show(struct adreno_device *adreno_dev)
|
||||
|
||||
static int _lm_store(struct adreno_device *adreno_dev, bool val)
|
||||
{
|
||||
if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM))
|
||||
if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM) ||
|
||||
adreno_dev->lm_enabled == val)
|
||||
return 0;
|
||||
|
||||
return pwrflag_store(adreno_dev, val, &adreno_dev->lm_enabled);
|
||||
return adreno_power_cycle_bool(adreno_dev, &adreno_dev->lm_enabled,
|
||||
val);
|
||||
}
|
||||
|
||||
static bool _lm_show(struct adreno_device *adreno_dev)
|
||||
|
@ -46,8 +46,6 @@ static void kgsl_pwrctrl_axi(struct kgsl_device *device, int state);
|
||||
static int kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state);
|
||||
static void kgsl_pwrctrl_set_state(struct kgsl_device *device,
|
||||
unsigned int state);
|
||||
static void kgsl_pwrctrl_request_state(struct kgsl_device *device,
|
||||
unsigned int state);
|
||||
static int _isense_clk_set_rate(struct kgsl_pwrctrl *pwr, int level);
|
||||
static int kgsl_pwrctrl_clk_set_rate(struct clk *grp_clk, unsigned int freq,
|
||||
const char *name);
|
||||
@ -2116,7 +2114,7 @@ static void kgsl_pwrctrl_set_state(struct kgsl_device *device,
|
||||
spin_unlock(&device->submit_lock);
|
||||
}
|
||||
|
||||
static void kgsl_pwrctrl_request_state(struct kgsl_device *device,
|
||||
void kgsl_pwrctrl_request_state(struct kgsl_device *device,
|
||||
unsigned int state)
|
||||
{
|
||||
if (state != KGSL_STATE_NONE && state != device->requested_state)
|
||||
@ -2149,82 +2147,6 @@ const char *kgsl_pwrstate_to_str(unsigned int state)
|
||||
return "UNKNOWN";
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* kgsl_active_count_get() - Increase the device active count
|
||||
* @device: Pointer to a KGSL device
|
||||
*
|
||||
* Increase the active count for the KGSL device and turn on
|
||||
* clocks if this is the first reference. Code paths that need
|
||||
* to touch the hardware or wait for the hardware to complete
|
||||
* an operation must hold an active count reference until they
|
||||
* are finished. An error code will be returned if waking the
|
||||
* device fails. The device mutex must be held while *calling
|
||||
* this function.
|
||||
*/
|
||||
int kgsl_active_count_get(struct kgsl_device *device)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (WARN_ON(!mutex_is_locked(&device->mutex)))
|
||||
return -EINVAL;
|
||||
|
||||
if ((atomic_read(&device->active_cnt) == 0) &&
|
||||
(device->state != KGSL_STATE_ACTIVE)) {
|
||||
mutex_unlock(&device->mutex);
|
||||
wait_for_completion(&device->hwaccess_gate);
|
||||
mutex_lock(&device->mutex);
|
||||
device->pwrctrl.superfast = true;
|
||||
ret = kgsl_pwrctrl_change_state(device, KGSL_STATE_ACTIVE);
|
||||
}
|
||||
if (ret == 0)
|
||||
atomic_inc(&device->active_cnt);
|
||||
trace_kgsl_active_count(device,
|
||||
(unsigned long) __builtin_return_address(0));
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* kgsl_active_count_put() - Decrease the device active count
|
||||
* @device: Pointer to a KGSL device
|
||||
*
|
||||
* Decrease the active count for the KGSL device and turn off
|
||||
* clocks if there are no remaining references. This function will
|
||||
* transition the device to NAP if there are no other pending state
|
||||
* changes. It also completes the suspend gate. The device mutex must
|
||||
* be held while calling this function.
|
||||
*/
|
||||
void kgsl_active_count_put(struct kgsl_device *device)
|
||||
{
|
||||
if (WARN_ON(!mutex_is_locked(&device->mutex)))
|
||||
return;
|
||||
|
||||
if (WARN(atomic_read(&device->active_cnt) == 0,
|
||||
"Unbalanced get/put calls to KGSL active count\n"))
|
||||
return;
|
||||
|
||||
if (atomic_dec_and_test(&device->active_cnt)) {
|
||||
bool nap_on = !(device->pwrctrl.ctrl_flags &
|
||||
BIT(KGSL_PWRFLAGS_NAP_OFF));
|
||||
if (nap_on && device->state == KGSL_STATE_ACTIVE &&
|
||||
device->requested_state == KGSL_STATE_NONE) {
|
||||
kgsl_pwrctrl_request_state(device, KGSL_STATE_NAP);
|
||||
kgsl_schedule_work(&device->idle_check_ws);
|
||||
} else if (!nap_on) {
|
||||
kgsl_pwrscale_update_stats(device);
|
||||
kgsl_pwrscale_update(device);
|
||||
}
|
||||
|
||||
mod_timer(&device->idle_timer,
|
||||
jiffies + device->pwrctrl.interval_timeout);
|
||||
}
|
||||
|
||||
trace_kgsl_active_count(device,
|
||||
(unsigned long) __builtin_return_address(0));
|
||||
|
||||
wake_up(&device->active_cnt_wq);
|
||||
}
|
||||
|
||||
static int _check_active_count(struct kgsl_device *device, int count)
|
||||
{
|
||||
/* Return 0 if the active count is greater than the desired value */
|
||||
|
@ -197,12 +197,17 @@ kgsl_pwrctrl_active_freq(struct kgsl_pwrctrl *pwr)
|
||||
return pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq;
|
||||
}
|
||||
|
||||
int __must_check kgsl_active_count_get(struct kgsl_device *device);
|
||||
void kgsl_active_count_put(struct kgsl_device *device);
|
||||
int kgsl_active_count_wait(struct kgsl_device *device, int count);
|
||||
void kgsl_pwrctrl_busy_time(struct kgsl_device *device, u64 time, u64 busy);
|
||||
void kgsl_pwrctrl_set_constraint(struct kgsl_device *device,
|
||||
struct kgsl_pwr_constraint *pwrc, uint32_t id);
|
||||
int kgsl_pwrctrl_set_default_gpu_pwrlevel(struct kgsl_device *device);
|
||||
|
||||
/**
|
||||
* kgsl_pwrctrl_request_state - Request a specific power state
|
||||
* @device: Pointer to the kgsl device
|
||||
* @state: Power state requested
|
||||
*/
|
||||
void kgsl_pwrctrl_request_state(struct kgsl_device *device, u32 state);
|
||||
|
||||
#endif /* __KGSL_PWRCTRL_H */
|
||||
|
Loading…
Reference in New Issue
Block a user