android_kernel_cmhtcleo/drivers/gpu/msm/kgsl_pwrctrl.c

772 lines
20 KiB
C

/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/interrupt.h>
#include <mach/msm_iomap.h>
#include <mach/msm_bus.h>
#include "kgsl.h"
#define SWITCH_OFF 200
#define TZ_UPDATE_ID 0x01404000
#define TZ_RESET_ID 0x01403000
#ifdef CONFIG_MSM_SECURE_IO
/* Trap into the TrustZone, and call funcs there. */
static int __secure_tz_entry(u32 cmd, u32 val)
{
register u32 r0 asm("r0") = cmd;
register u32 r1 asm("r1") = 0x0;
register u32 r2 asm("r2") = val;
__iowmb();
asm(
__asmeq("%0", "r0")
__asmeq("%1", "r0")
__asmeq("%2", "r1")
__asmeq("%3", "r2")
"smc #0 @ switch to secure world\n"
: "=r" (r0)
: "r" (r0), "r" (r1), "r" (r2)
);
return r0;
}
#else
static int __secure_tz_entry(u32 cmd, u32 val)
{
return 0;
}
#endif /* CONFIG_MSM_SECURE_IO */
/* Returns the requested update to our power level. *
* Either up/down (-1/1) a level, or stay the same (0). */
static inline int kgsl_pwrctrl_tz_update(u32 idle)
{
return __secure_tz_entry(TZ_UPDATE_ID, idle);
}
static inline void kgsl_pwrctrl_tz_reset(void)
{
__secure_tz_entry(TZ_RESET_ID, 0);
}
void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
unsigned int new_level)
{
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
if (new_level < (pwr->num_pwrlevels - 1) &&
new_level >= pwr->thermal_pwrlevel &&
new_level != pwr->active_pwrlevel) {
pwr->active_pwrlevel = new_level;
if (test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags))
clk_set_rate(pwr->grp_clks[0],
pwr->pwrlevels[pwr->active_pwrlevel].
gpu_freq);
if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags))
if (pwr->pcl)
msm_bus_scale_client_update_request(pwr->pcl,
pwr->pwrlevels[pwr->active_pwrlevel].
bus_freq);
KGSL_PWR_WARN(device, "pwr level changed to %d\n",
pwr->active_pwrlevel);
}
}
EXPORT_SYMBOL(kgsl_pwrctrl_pwrlevel_change);
static int __gpuclk_store(int max, struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{ int ret, i, delta = 5000000;
unsigned long val;
struct kgsl_device *device = kgsl_device_from_dev(dev);
struct kgsl_pwrctrl *pwr;
if (device == NULL)
return 0;
pwr = &device->pwrctrl;
ret = sscanf(buf, "%ld", &val);
if (ret != 1)
return count;
mutex_lock(&device->mutex);
for (i = 0; i < pwr->num_pwrlevels; i++) {
if (abs(pwr->pwrlevels[i].gpu_freq - val) < delta) {
if (max)
pwr->thermal_pwrlevel = i;
break;
}
}
if (i == pwr->num_pwrlevels)
goto done;
/*
* If the current or requested clock speed is greater than the
* thermal limit, bump down immediately.
*/
if (pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq >
pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq)
kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
else if (!max)
kgsl_pwrctrl_pwrlevel_change(device, i);
done:
mutex_unlock(&device->mutex);
return count;
}
static int kgsl_pwrctrl_max_gpuclk_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
return __gpuclk_store(1, dev, attr, buf, count);
}
static int kgsl_pwrctrl_max_gpuclk_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct kgsl_device *device = kgsl_device_from_dev(dev);
struct kgsl_pwrctrl *pwr;
if (device == NULL)
return 0;
pwr = &device->pwrctrl;
return snprintf(buf, PAGE_SIZE, "%d\n",
pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq);
}
static int kgsl_pwrctrl_gpuclk_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
return __gpuclk_store(0, dev, attr, buf, count);
}
static int kgsl_pwrctrl_gpuclk_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct kgsl_device *device = kgsl_device_from_dev(dev);
struct kgsl_pwrctrl *pwr;
if (device == NULL)
return 0;
pwr = &device->pwrctrl;
return snprintf(buf, PAGE_SIZE, "%d\n",
pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq);
}
static int kgsl_pwrctrl_pwrnap_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
char temp[20];
unsigned long val;
struct kgsl_device *device = kgsl_device_from_dev(dev);
struct kgsl_pwrctrl *pwr;
int rc;
if (device == NULL)
return 0;
pwr = &device->pwrctrl;
snprintf(temp, sizeof(temp), "%.*s",
(int)min(count, sizeof(temp) - 1), buf);
rc = strict_strtoul(temp, 0, &val);
if (rc)
return rc;
mutex_lock(&device->mutex);
if (val == 1)
pwr->nap_allowed = true;
else if (val == 0)
pwr->nap_allowed = false;
mutex_unlock(&device->mutex);
return count;
}
static int kgsl_pwrctrl_pwrnap_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct kgsl_device *device = kgsl_device_from_dev(dev);
if (device == NULL)
return 0;
return snprintf(buf, PAGE_SIZE, "%d\n", device->pwrctrl.nap_allowed);
}
static int kgsl_pwrctrl_idle_timer_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
char temp[20];
unsigned long val;
struct kgsl_device *device = kgsl_device_from_dev(dev);
struct kgsl_pwrctrl *pwr;
const long div = 1000/HZ;
static unsigned int org_interval_timeout = 1;
int rc;
if (device == NULL)
return 0;
pwr = &device->pwrctrl;
snprintf(temp, sizeof(temp), "%.*s",
(int)min(count, sizeof(temp) - 1), buf);
rc = strict_strtoul(temp, 0, &val);
if (rc)
return rc;
if (org_interval_timeout == 1)
org_interval_timeout = pwr->interval_timeout;
mutex_lock(&device->mutex);
/* Let the timeout be requested in ms, but convert to jiffies. */
val /= div;
if (val >= org_interval_timeout)
pwr->interval_timeout = val;
mutex_unlock(&device->mutex);
return count;
}
static int kgsl_pwrctrl_idle_timer_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct kgsl_device *device = kgsl_device_from_dev(dev);
if (device == NULL)
return 0;
return snprintf(buf, PAGE_SIZE, "%d\n",
device->pwrctrl.interval_timeout);
}
static int kgsl_pwrctrl_scaling_governor_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
char temp[20];
struct kgsl_device *device = kgsl_device_from_dev(dev);
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
unsigned int reset = pwr->idle_pass;
snprintf(temp, sizeof(temp), "%.*s",
(int)min(count, sizeof(temp) - 1), buf);
if (strncmp(temp, "ondemand", 8) == 0)
reset = 1;
else if (strncmp(temp, "performance", 11) == 0)
reset = 0;
mutex_lock(&device->mutex);
pwr->idle_pass = reset;
if (pwr->idle_pass == 0)
kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
mutex_unlock(&device->mutex);
return count;
}
static int kgsl_pwrctrl_scaling_governor_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct kgsl_device *device = kgsl_device_from_dev(dev);
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
if (pwr->idle_pass)
return snprintf(buf, 10, "ondemand\n");
else
return snprintf(buf, 13, "performance\n");
}
DEVICE_ATTR(gpuclk, 0644, kgsl_pwrctrl_gpuclk_show, kgsl_pwrctrl_gpuclk_store);
DEVICE_ATTR(max_gpuclk, 0644, kgsl_pwrctrl_max_gpuclk_show,
kgsl_pwrctrl_max_gpuclk_store);
DEVICE_ATTR(pwrnap, 0644, kgsl_pwrctrl_pwrnap_show, kgsl_pwrctrl_pwrnap_store);
DEVICE_ATTR(idle_timer, 0644, kgsl_pwrctrl_idle_timer_show,
kgsl_pwrctrl_idle_timer_store);
DEVICE_ATTR(scaling_governor, 0644, kgsl_pwrctrl_scaling_governor_show,
kgsl_pwrctrl_scaling_governor_store);
static const struct device_attribute *pwrctrl_attr_list[] = {
&dev_attr_gpuclk,
&dev_attr_max_gpuclk,
&dev_attr_pwrnap,
&dev_attr_idle_timer,
&dev_attr_scaling_governor,
NULL
};
int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device)
{
return kgsl_create_device_sysfs_files(device->dev, pwrctrl_attr_list);
}
void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device)
{
kgsl_remove_device_sysfs_files(device->dev, pwrctrl_attr_list);
}
static void kgsl_pwrctrl_idle_calc(struct kgsl_device *device)
{
int val;
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
struct kgsl_power_stats stats;
device->ftbl.device_power_stats(device, &stats);
if (stats.total_time == 0)
return;
/* If the GPU has stayed in turbo mode for a while, *
* stop writing out values. */
if (pwr->active_pwrlevel)
pwr->no_switch_cnt = 0;
else if (pwr->no_switch_cnt > SWITCH_OFF)
return;
pwr->no_switch_cnt++;
val = kgsl_pwrctrl_tz_update(stats.total_time - stats.busy_time);
if (val)
kgsl_pwrctrl_pwrlevel_change(device,
pwr->active_pwrlevel + val);
}
void kgsl_pwrctrl_clk(struct kgsl_device *device, int state)
{
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
int i = 0;
if (state == KGSL_PWRFLAGS_OFF) {
if (test_and_clear_bit(KGSL_PWRFLAGS_CLK_ON,
&pwr->power_flags)) {
KGSL_PWR_INFO(device,
"clocks off, device %d\n", device->id);
for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
if (pwr->grp_clks[i])
clk_disable(pwr->grp_clks[i]);
if ((pwr->pwrlevels[0].gpu_freq > 0) &&
(device->requested_state != KGSL_STATE_NAP))
clk_set_rate(pwr->grp_clks[0],
pwr->pwrlevels[pwr->num_pwrlevels - 1].
gpu_freq);
}
} else if (state == KGSL_PWRFLAGS_ON) {
if (!test_and_set_bit(KGSL_PWRFLAGS_CLK_ON,
&pwr->power_flags)) {
KGSL_PWR_INFO(device,
"clocks on, device %d\n", device->id);
if ((pwr->pwrlevels[0].gpu_freq > 0) &&
(device->state != KGSL_STATE_NAP))
clk_set_rate(pwr->grp_clks[0],
pwr->pwrlevels[pwr->active_pwrlevel].
gpu_freq);
/* as last step, enable grp_clk
this is to let GPU interrupt to come */
for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
if (pwr->grp_clks[i])
clk_enable(pwr->grp_clks[i]);
}
}
}
EXPORT_SYMBOL(kgsl_pwrctrl_clk);
void kgsl_pwrctrl_axi(struct kgsl_device *device, int state)
{
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
if (state == KGSL_PWRFLAGS_OFF) {
if (test_and_clear_bit(KGSL_PWRFLAGS_AXI_ON,
&pwr->power_flags)) {
KGSL_PWR_INFO(device,
"axi off, device %d\n", device->id);
if (pwr->ebi1_clk) {
clk_set_rate(pwr->ebi1_clk, 0);
clk_disable(pwr->ebi1_clk);
}
if (pwr->pcl)
msm_bus_scale_client_update_request(pwr->pcl,
0);
}
} else if (state == KGSL_PWRFLAGS_ON) {
if (!test_and_set_bit(KGSL_PWRFLAGS_AXI_ON,
&pwr->power_flags)) {
KGSL_PWR_INFO(device,
"axi on, device %d\n", device->id);
if (pwr->ebi1_clk) {
clk_enable(pwr->ebi1_clk);
clk_set_rate(pwr->ebi1_clk,
pwr->pwrlevels[pwr->active_pwrlevel].
bus_freq);
}
if (pwr->pcl)
msm_bus_scale_client_update_request(pwr->pcl,
pwr->pwrlevels[pwr->active_pwrlevel].
bus_freq);
}
}
}
EXPORT_SYMBOL(kgsl_pwrctrl_axi);
void kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state)
{
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
if (state == KGSL_PWRFLAGS_OFF) {
if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON,
&pwr->power_flags)) {
KGSL_PWR_INFO(device,
"power off, device %d\n", device->id);
if (pwr->gpu_reg)
regulator_disable(pwr->gpu_reg);
}
} else if (state == KGSL_PWRFLAGS_ON) {
if (!test_and_set_bit(KGSL_PWRFLAGS_POWER_ON,
&pwr->power_flags)) {
KGSL_PWR_INFO(device,
"power on, device %d\n", device->id);
if (pwr->gpu_reg)
regulator_enable(pwr->gpu_reg);
}
}
}
EXPORT_SYMBOL(kgsl_pwrctrl_pwrrail);
void kgsl_pwrctrl_irq(struct kgsl_device *device, int state)
{
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
if (state == KGSL_PWRFLAGS_ON) {
if (!test_and_set_bit(KGSL_PWRFLAGS_IRQ_ON,
&pwr->power_flags)) {
KGSL_PWR_INFO(device,
"irq on, device %d\n", device->id);
enable_irq(pwr->interrupt_num);
}
} else if (state == KGSL_PWRFLAGS_OFF) {
if (test_and_clear_bit(KGSL_PWRFLAGS_IRQ_ON,
&pwr->power_flags)) {
KGSL_PWR_INFO(device,
"irq off, device %d\n", device->id);
disable_irq(pwr->interrupt_num);
}
}
}
EXPORT_SYMBOL(kgsl_pwrctrl_irq);
int kgsl_pwrctrl_init(struct kgsl_device *device)
{
int i, result = 0;
struct clk *clk;
struct platform_device *pdev =
container_of(device->parentdev, struct platform_device, dev);
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
struct kgsl_device_platform_data *pdata_dev = pdev->dev.platform_data;
struct kgsl_device_pwr_data *pdata_pwr = &pdata_dev->pwr_data;
const char *clk_names[KGSL_MAX_CLKS] = {pwr->src_clk_name,
pdata_dev->clk.name.clk,
pdata_dev->clk.name.pclk,
pdata_dev->imem_clk_name.clk,
pdata_dev->imem_clk_name.pclk};
/*acquire clocks */
for (i = 1; i < KGSL_MAX_CLKS; i++) {
if (clk_names[i]) {
clk = clk_get(&pdev->dev, clk_names[i]);
if (IS_ERR(clk))
goto clk_err;
pwr->grp_clks[i] = clk;
}
}
/* Make sure we have a source clk for freq setting */
clk = clk_get(&pdev->dev, clk_names[0]);
pwr->grp_clks[0] = (IS_ERR(clk)) ? pwr->grp_clks[1] : clk;
/* put the AXI bus into asynchronous mode with the graphics cores */
if (pdata_pwr->set_grp_async != NULL)
pdata_pwr->set_grp_async();
if (pdata_pwr->num_levels > KGSL_MAX_PWRLEVELS) {
KGSL_PWR_ERR(device, "invalid power level count: %d\n",
pdata_pwr->num_levels);
result = -EINVAL;
goto done;
}
pwr->num_pwrlevels = pdata_pwr->num_levels;
pwr->active_pwrlevel = pdata_pwr->init_level;
for (i = 0; i < pdata_pwr->num_levels; i++) {
// pwr->pwrlevels[i].gpu_freq =
// (pdata_pwr->pwrlevel[i].gpu_freq > 0) ?
// clk_round_rate(pwr->grp_clks[0],
// pdata_pwr->pwrlevel[i].
// gpu_freq) : 0;
pwr->pwrlevels[i].gpu_freq =(pdata_pwr->pwrlevel[i].gpu_freq > 0)?
pdata_pwr->pwrlevel[i].gpu_freq:0;
pwr->pwrlevels[i].bus_freq =
pdata_pwr->pwrlevel[i].bus_freq;
}
/* Do not set_rate for targets in sync with AXI */
if (pwr->pwrlevels[0].gpu_freq > 0)
clk_set_rate(pwr->grp_clks[0], pwr->
pwrlevels[pwr->num_pwrlevels - 1].gpu_freq);
pwr->gpu_reg = regulator_get(NULL, pwr->regulator_name);
if (IS_ERR(pwr->gpu_reg))
pwr->gpu_reg = NULL;
pwr->power_flags = 0;
pwr->nap_allowed = pdata_pwr->nap_allowed;
/* drewis: below was removed at some point before i cherry-picked the below commit */
pwr->idle_pass = pdata_pwr->idle_pass;
/*dc14311... msm: kgsl: Replace internal_power_rail API calls with regulator APIs*/
pwr->interval_timeout = pdata_pwr->idle_timeout;
pwr->ebi1_clk = clk_get(NULL, "ebi1_kgsl_clk");
if (IS_ERR(pwr->ebi1_clk))
pwr->ebi1_clk = NULL;
else
clk_set_rate(pwr->ebi1_clk,
pwr->pwrlevels[pwr->active_pwrlevel].
bus_freq);
if (pdata_dev->clk.bus_scale_table != NULL) {
pwr->pcl =
msm_bus_scale_register_client(pdata_dev->clk.
bus_scale_table);
if (!pwr->pcl) {
KGSL_PWR_ERR(device,
"msm_bus_scale_register_client failed: "
"id %d table %p", device->id,
pdata_dev->clk.bus_scale_table);
result = -EINVAL;
goto done;
}
}
/*acquire interrupt */
pwr->interrupt_num =
platform_get_irq_byname(pdev, pwr->irq_name);
if (pwr->interrupt_num <= 0) {
KGSL_PWR_ERR(device, "platform_get_irq_byname failed: %d\n",
pwr->interrupt_num);
result = -EINVAL;
goto done;
}
return result;
clk_err:
result = PTR_ERR(clk);
KGSL_PWR_ERR(device, "clk_get(%s) failed: %d\n",
clk_names[i], result);
done:
return result;
}
void kgsl_pwrctrl_close(struct kgsl_device *device)
{
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
int i;
KGSL_PWR_INFO(device, "close device %d\n", device->id);
if (pwr->interrupt_num > 0) {
if (pwr->have_irq) {
free_irq(pwr->interrupt_num, NULL);
pwr->have_irq = 0;
}
pwr->interrupt_num = 0;
}
clk_put(pwr->ebi1_clk);
if (pwr->pcl)
msm_bus_scale_unregister_client(pwr->pcl);
pwr->pcl = 0;
if (pwr->gpu_reg) {
regulator_put(pwr->gpu_reg);
pwr->gpu_reg = NULL;
}
for (i = 1; i < KGSL_MAX_CLKS; i++)
if (pwr->grp_clks[i]) {
clk_put(pwr->grp_clks[i]);
pwr->grp_clks[i] = NULL;
}
pwr->grp_clks[0] = NULL;
pwr->power_flags = 0;
}
void kgsl_idle_check(struct work_struct *work)
{
struct kgsl_device *device = container_of(work, struct kgsl_device,
idle_check_ws);
mutex_lock(&device->mutex);
if ((device->pwrctrl.idle_pass) &&
(device->requested_state != KGSL_STATE_SLEEP))
kgsl_pwrctrl_idle_calc(device);
if (device->state & (KGSL_STATE_ACTIVE | KGSL_STATE_NAP)) {
if (kgsl_pwrctrl_sleep(device) != 0)
mod_timer(&device->idle_timer,
jiffies +
device->pwrctrl.interval_timeout);
} else if (device->state & (KGSL_STATE_HUNG |
KGSL_STATE_DUMP_AND_RECOVER)) {
device->requested_state = KGSL_STATE_NONE;
}
mutex_unlock(&device->mutex);
}
void kgsl_timer(unsigned long data)
{
struct kgsl_device *device = (struct kgsl_device *) data;
KGSL_PWR_INFO(device, "idle timer expired device %d\n", device->id);
if (device->requested_state != KGSL_STATE_SUSPEND) {
device->requested_state = KGSL_STATE_SLEEP;
/* Have work run in a non-interrupt context. */
queue_work(device->work_queue, &device->idle_check_ws);
}
}
void kgsl_pre_hwaccess(struct kgsl_device *device)
{
BUG_ON(!mutex_is_locked(&device->mutex));
if (device->state & (KGSL_STATE_SLEEP | KGSL_STATE_NAP))
kgsl_pwrctrl_wake(device);
}
EXPORT_SYMBOL(kgsl_pre_hwaccess);
void kgsl_check_suspended(struct kgsl_device *device)
{
if (device->requested_state == KGSL_STATE_SUSPEND ||
device->state == KGSL_STATE_SUSPEND) {
mutex_unlock(&device->mutex);
wait_for_completion(&device->hwaccess_gate);
mutex_lock(&device->mutex);
}
if (device->state == KGSL_STATE_DUMP_AND_RECOVER) {
mutex_unlock(&device->mutex);
wait_for_completion(&device->recovery_gate);
mutex_lock(&device->mutex);
}
}
/******************************************************************/
/* Caller must hold the device mutex. */
int kgsl_pwrctrl_sleep(struct kgsl_device *device)
{
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
KGSL_PWR_INFO(device, "sleep device %d\n", device->id);
/* Work through the legal state transitions */
if (device->requested_state == KGSL_STATE_NAP) {
if (device->ftbl.device_isidle(device))
goto nap;
} else if (device->requested_state == KGSL_STATE_SLEEP) {
if (device->state == KGSL_STATE_NAP ||
device->ftbl.device_isidle(device))
goto sleep;
}
device->requested_state = KGSL_STATE_NONE;
return -EBUSY;
sleep:
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
if (pwr->pwrlevels[0].gpu_freq > 0)
clk_set_rate(pwr->grp_clks[0],
pwr->pwrlevels[pwr->num_pwrlevels - 1].
gpu_freq);
device->pwrctrl.no_switch_cnt = 0;
device->pwrctrl.time = 0;
kgsl_pwrctrl_tz_reset();
goto clk_off;
nap:
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
clk_off:
kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF);
device->state = device->requested_state;
device->requested_state = KGSL_STATE_NONE;
wake_unlock(&device->idle_wakelock);
KGSL_PWR_WARN(device, "state -> NAP/SLEEP(%d), device %d\n",
device->state, device->id);
return 0;
}
EXPORT_SYMBOL(kgsl_pwrctrl_sleep);
/******************************************************************/
/* Caller must hold the device mutex. */
void kgsl_pwrctrl_wake(struct kgsl_device *device)
{
if (device->state == KGSL_STATE_SUSPEND)
return;
if (device->state != KGSL_STATE_NAP) {
if (device->pwrctrl.idle_pass)
kgsl_pwrctrl_pwrlevel_change(device,
device->pwrctrl.thermal_pwrlevel);
kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
}
/* Turn on the core clocks */
kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON);
/* Enable state before turning on irq */
device->state = KGSL_STATE_ACTIVE;
KGSL_PWR_WARN(device, "state -> ACTIVE, device %d\n", device->id);
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
/* Re-enable HW access */
mod_timer(&device->idle_timer,
jiffies + device->pwrctrl.interval_timeout);
wake_lock(&device->idle_wakelock);
KGSL_PWR_INFO(device, "wake return for device %d\n", device->id);
}
EXPORT_SYMBOL(kgsl_pwrctrl_wake);
void kgsl_pwrctrl_enable(struct kgsl_device *device)
{
/* Order pwrrail/clk sequence based upon platform */
kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON);
kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
}
EXPORT_SYMBOL(kgsl_pwrctrl_enable);
void kgsl_pwrctrl_disable(struct kgsl_device *device)
{
/* Order pwrrail/clk sequence based upon platform */
kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF);
kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF);
}
EXPORT_SYMBOL(kgsl_pwrctrl_disable);