Revert "PM: domains: Fix up terminology with parent/child"

This reverts commit 35a2681e99 which is
commit 8d87ae48ced2dffd5e7247d19eb4c88be6f1c6f1 upstream.

It is not needed in Android systems, and it breaks the ABI for no real
reason, so revert it.

Bug: 161946584
Change-Id: I50c760760dad7e4fbbec88814d66b36445928771
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-06-16 09:24:38 +00:00
parent cba632cb06
commit 4c85910ff8
4 changed files with 115 additions and 115 deletions

View File

@ -263,18 +263,18 @@ static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
/* /*
* Traverse all sub-domains within the domain. This can be * Traverse all sub-domains within the domain. This can be
* done without any additional locking as the link->performance_state * done without any additional locking as the link->performance_state
* field is protected by the parent genpd->lock, which is already taken. * field is protected by the master genpd->lock, which is already taken.
* *
* Also note that link->performance_state (subdomain's performance state * Also note that link->performance_state (subdomain's performance state
* requirement to parent domain) is different from * requirement to master domain) is different from
* link->child->performance_state (current performance state requirement * link->slave->performance_state (current performance state requirement
* of the devices/sub-domains of the subdomain) and so can have a * of the devices/sub-domains of the subdomain) and so can have a
* different value. * different value.
* *
* Note that we also take vote from powered-off sub-domains into account * Note that we also take vote from powered-off sub-domains into account
* as the same is done for devices right now. * as the same is done for devices right now.
*/ */
list_for_each_entry(link, &genpd->parent_links, parent_node) { list_for_each_entry(link, &genpd->master_links, master_node) {
if (link->performance_state > state) if (link->performance_state > state)
state = link->performance_state; state = link->performance_state;
} }
@ -285,40 +285,40 @@ static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
static int _genpd_set_performance_state(struct generic_pm_domain *genpd, static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
unsigned int state, int depth) unsigned int state, int depth)
{ {
struct generic_pm_domain *parent; struct generic_pm_domain *master;
struct gpd_link *link; struct gpd_link *link;
int parent_state, ret; int master_state, ret;
if (state == genpd->performance_state) if (state == genpd->performance_state)
return 0; return 0;
/* Propagate to parents of genpd */ /* Propagate to masters of genpd */
list_for_each_entry(link, &genpd->child_links, child_node) { list_for_each_entry(link, &genpd->slave_links, slave_node) {
parent = link->parent; master = link->master;
if (!parent->set_performance_state) if (!master->set_performance_state)
continue; continue;
/* Find parent's performance state */ /* Find master's performance state */
ret = dev_pm_opp_xlate_performance_state(genpd->opp_table, ret = dev_pm_opp_xlate_performance_state(genpd->opp_table,
parent->opp_table, master->opp_table,
state); state);
if (unlikely(ret < 0)) if (unlikely(ret < 0))
goto err; goto err;
parent_state = ret; master_state = ret;
genpd_lock_nested(parent, depth + 1); genpd_lock_nested(master, depth + 1);
link->prev_performance_state = link->performance_state; link->prev_performance_state = link->performance_state;
link->performance_state = parent_state; link->performance_state = master_state;
parent_state = _genpd_reeval_performance_state(parent, master_state = _genpd_reeval_performance_state(master,
parent_state); master_state);
ret = _genpd_set_performance_state(parent, parent_state, depth + 1); ret = _genpd_set_performance_state(master, master_state, depth + 1);
if (ret) if (ret)
link->performance_state = link->prev_performance_state; link->performance_state = link->prev_performance_state;
genpd_unlock(parent); genpd_unlock(master);
if (ret) if (ret)
goto err; goto err;
@ -333,26 +333,26 @@ static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
err: err:
/* Encountered an error, lets rollback */ /* Encountered an error, lets rollback */
list_for_each_entry_continue_reverse(link, &genpd->child_links, list_for_each_entry_continue_reverse(link, &genpd->slave_links,
child_node) { slave_node) {
parent = link->parent; master = link->master;
if (!parent->set_performance_state) if (!master->set_performance_state)
continue; continue;
genpd_lock_nested(parent, depth + 1); genpd_lock_nested(master, depth + 1);
parent_state = link->prev_performance_state; master_state = link->prev_performance_state;
link->performance_state = parent_state; link->performance_state = master_state;
parent_state = _genpd_reeval_performance_state(parent, master_state = _genpd_reeval_performance_state(master,
parent_state); master_state);
if (_genpd_set_performance_state(parent, parent_state, depth + 1)) { if (_genpd_set_performance_state(master, master_state, depth + 1)) {
pr_err("%s: Failed to roll back to %d performance state\n", pr_err("%s: Failed to roll back to %d performance state\n",
parent->name, parent_state); master->name, master_state);
} }
genpd_unlock(parent); genpd_unlock(master);
} }
return ret; return ret;
@ -552,7 +552,7 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
/* /*
* If sd_count > 0 at this point, one of the subdomains hasn't * If sd_count > 0 at this point, one of the subdomains hasn't
* managed to call genpd_power_on() for the parent yet after * managed to call genpd_power_on() for the master yet after
* incrementing it. In that case genpd_power_on() will wait * incrementing it. In that case genpd_power_on() will wait
* for us to drop the lock, so we can call .power_off() and let * for us to drop the lock, so we can call .power_off() and let
* the genpd_power_on() restore power for us (this shouldn't * the genpd_power_on() restore power for us (this shouldn't
@ -566,22 +566,22 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
genpd->status = GPD_STATE_POWER_OFF; genpd->status = GPD_STATE_POWER_OFF;
genpd_update_accounting(genpd); genpd_update_accounting(genpd);
list_for_each_entry(link, &genpd->child_links, child_node) { list_for_each_entry(link, &genpd->slave_links, slave_node) {
genpd_sd_counter_dec(link->parent); genpd_sd_counter_dec(link->master);
genpd_lock_nested(link->parent, depth + 1); genpd_lock_nested(link->master, depth + 1);
genpd_power_off(link->parent, false, depth + 1); genpd_power_off(link->master, false, depth + 1);
genpd_unlock(link->parent); genpd_unlock(link->master);
} }
return 0; return 0;
} }
/** /**
* genpd_power_on - Restore power to a given PM domain and its parents. * genpd_power_on - Restore power to a given PM domain and its masters.
* @genpd: PM domain to power up. * @genpd: PM domain to power up.
* @depth: nesting count for lockdep. * @depth: nesting count for lockdep.
* *
* Restore power to @genpd and all of its parents so that it is possible to * Restore power to @genpd and all of its masters so that it is possible to
* resume a device belonging to it. * resume a device belonging to it.
*/ */
static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth) static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
@ -594,20 +594,20 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
/* /*
* The list is guaranteed not to change while the loop below is being * The list is guaranteed not to change while the loop below is being
* executed, unless one of the parents' .power_on() callbacks fiddles * executed, unless one of the masters' .power_on() callbacks fiddles
* with it. * with it.
*/ */
list_for_each_entry(link, &genpd->child_links, child_node) { list_for_each_entry(link, &genpd->slave_links, slave_node) {
struct generic_pm_domain *parent = link->parent; struct generic_pm_domain *master = link->master;
genpd_sd_counter_inc(parent); genpd_sd_counter_inc(master);
genpd_lock_nested(parent, depth + 1); genpd_lock_nested(master, depth + 1);
ret = genpd_power_on(parent, depth + 1); ret = genpd_power_on(master, depth + 1);
genpd_unlock(parent); genpd_unlock(master);
if (ret) { if (ret) {
genpd_sd_counter_dec(parent); genpd_sd_counter_dec(master);
goto err; goto err;
} }
} }
@ -623,12 +623,12 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
err: err:
list_for_each_entry_continue_reverse(link, list_for_each_entry_continue_reverse(link,
&genpd->child_links, &genpd->slave_links,
child_node) { slave_node) {
genpd_sd_counter_dec(link->parent); genpd_sd_counter_dec(link->master);
genpd_lock_nested(link->parent, depth + 1); genpd_lock_nested(link->master, depth + 1);
genpd_power_off(link->parent, false, depth + 1); genpd_power_off(link->master, false, depth + 1);
genpd_unlock(link->parent); genpd_unlock(link->master);
} }
return ret; return ret;
@ -943,13 +943,13 @@ static bool genpd_present(const struct generic_pm_domain *genpd)
#ifdef CONFIG_PM_SLEEP #ifdef CONFIG_PM_SLEEP
/** /**
* genpd_sync_power_off - Synchronously power off a PM domain and its parents. * genpd_sync_power_off - Synchronously power off a PM domain and its masters.
* @genpd: PM domain to power off, if possible. * @genpd: PM domain to power off, if possible.
* @use_lock: use the lock. * @use_lock: use the lock.
* @depth: nesting count for lockdep. * @depth: nesting count for lockdep.
* *
* Check if the given PM domain can be powered off (during system suspend or * Check if the given PM domain can be powered off (during system suspend or
* hibernation) and do that if so. Also, in that case propagate to its parents. * hibernation) and do that if so. Also, in that case propagate to its masters.
* *
* This function is only called in "noirq" and "syscore" stages of system power * This function is only called in "noirq" and "syscore" stages of system power
* transitions. The "noirq" callbacks may be executed asynchronously, thus in * transitions. The "noirq" callbacks may be executed asynchronously, thus in
@ -974,21 +974,21 @@ static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
genpd->status = GPD_STATE_POWER_OFF; genpd->status = GPD_STATE_POWER_OFF;
list_for_each_entry(link, &genpd->child_links, child_node) { list_for_each_entry(link, &genpd->slave_links, slave_node) {
genpd_sd_counter_dec(link->parent); genpd_sd_counter_dec(link->master);
if (use_lock) if (use_lock)
genpd_lock_nested(link->parent, depth + 1); genpd_lock_nested(link->master, depth + 1);
genpd_sync_power_off(link->parent, use_lock, depth + 1); genpd_sync_power_off(link->master, use_lock, depth + 1);
if (use_lock) if (use_lock)
genpd_unlock(link->parent); genpd_unlock(link->master);
} }
} }
/** /**
* genpd_sync_power_on - Synchronously power on a PM domain and its parents. * genpd_sync_power_on - Synchronously power on a PM domain and its masters.
* @genpd: PM domain to power on. * @genpd: PM domain to power on.
* @use_lock: use the lock. * @use_lock: use the lock.
* @depth: nesting count for lockdep. * @depth: nesting count for lockdep.
@ -1005,16 +1005,16 @@ static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
if (genpd_status_on(genpd)) if (genpd_status_on(genpd))
return; return;
list_for_each_entry(link, &genpd->child_links, child_node) { list_for_each_entry(link, &genpd->slave_links, slave_node) {
genpd_sd_counter_inc(link->parent); genpd_sd_counter_inc(link->master);
if (use_lock) if (use_lock)
genpd_lock_nested(link->parent, depth + 1); genpd_lock_nested(link->master, depth + 1);
genpd_sync_power_on(link->parent, use_lock, depth + 1); genpd_sync_power_on(link->master, use_lock, depth + 1);
if (use_lock) if (use_lock)
genpd_unlock(link->parent); genpd_unlock(link->master);
} }
_genpd_power_on(genpd, false); _genpd_power_on(genpd, false);
@ -1454,12 +1454,12 @@ static void genpd_update_cpumask(struct generic_pm_domain *genpd,
if (!genpd_is_cpu_domain(genpd)) if (!genpd_is_cpu_domain(genpd))
return; return;
list_for_each_entry(link, &genpd->child_links, child_node) { list_for_each_entry(link, &genpd->slave_links, slave_node) {
struct generic_pm_domain *parent = link->parent; struct generic_pm_domain *master = link->master;
genpd_lock_nested(parent, depth + 1); genpd_lock_nested(master, depth + 1);
genpd_update_cpumask(parent, cpu, set, depth + 1); genpd_update_cpumask(master, cpu, set, depth + 1);
genpd_unlock(parent); genpd_unlock(master);
} }
if (set) if (set)
@ -1647,17 +1647,17 @@ static int genpd_add_subdomain(struct generic_pm_domain *genpd,
goto out; goto out;
} }
list_for_each_entry(itr, &genpd->parent_links, parent_node) { list_for_each_entry(itr, &genpd->master_links, master_node) {
if (itr->child == subdomain && itr->parent == genpd) { if (itr->slave == subdomain && itr->master == genpd) {
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
} }
link->parent = genpd; link->master = genpd;
list_add_tail(&link->parent_node, &genpd->parent_links); list_add_tail(&link->master_node, &genpd->master_links);
link->child = subdomain; link->slave = subdomain;
list_add_tail(&link->child_node, &subdomain->child_links); list_add_tail(&link->slave_node, &subdomain->slave_links);
if (genpd_status_on(subdomain)) if (genpd_status_on(subdomain))
genpd_sd_counter_inc(genpd); genpd_sd_counter_inc(genpd);
@ -1671,7 +1671,7 @@ static int genpd_add_subdomain(struct generic_pm_domain *genpd,
/** /**
* pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
* @genpd: Leader PM domain to add the subdomain to. * @genpd: Master PM domain to add the subdomain to.
* @subdomain: Subdomain to be added. * @subdomain: Subdomain to be added.
*/ */
int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
@ -1689,7 +1689,7 @@ EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
/** /**
* pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
* @genpd: Leader PM domain to remove the subdomain from. * @genpd: Master PM domain to remove the subdomain from.
* @subdomain: Subdomain to be removed. * @subdomain: Subdomain to be removed.
*/ */
int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
@ -1704,19 +1704,19 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
genpd_lock(subdomain); genpd_lock(subdomain);
genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING); genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
if (!list_empty(&subdomain->parent_links) || subdomain->device_count) { if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
pr_warn("%s: unable to remove subdomain %s\n", pr_warn("%s: unable to remove subdomain %s\n",
genpd->name, subdomain->name); genpd->name, subdomain->name);
ret = -EBUSY; ret = -EBUSY;
goto out; goto out;
} }
list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) { list_for_each_entry_safe(link, l, &genpd->master_links, master_node) {
if (link->child != subdomain) if (link->slave != subdomain)
continue; continue;
list_del(&link->parent_node); list_del(&link->master_node);
list_del(&link->child_node); list_del(&link->slave_node);
kfree(link); kfree(link);
if (genpd_status_on(subdomain)) if (genpd_status_on(subdomain))
genpd_sd_counter_dec(genpd); genpd_sd_counter_dec(genpd);
@ -1781,8 +1781,8 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
if (IS_ERR_OR_NULL(genpd)) if (IS_ERR_OR_NULL(genpd))
return -EINVAL; return -EINVAL;
INIT_LIST_HEAD(&genpd->parent_links); INIT_LIST_HEAD(&genpd->master_links);
INIT_LIST_HEAD(&genpd->child_links); INIT_LIST_HEAD(&genpd->slave_links);
INIT_LIST_HEAD(&genpd->dev_list); INIT_LIST_HEAD(&genpd->dev_list);
genpd_lock_init(genpd); genpd_lock_init(genpd);
genpd->gov = gov; genpd->gov = gov;
@ -1858,15 +1858,15 @@ static int genpd_remove(struct generic_pm_domain *genpd)
return -EBUSY; return -EBUSY;
} }
if (!list_empty(&genpd->parent_links) || genpd->device_count) { if (!list_empty(&genpd->master_links) || genpd->device_count) {
genpd_unlock(genpd); genpd_unlock(genpd);
pr_err("%s: unable to remove %s\n", __func__, genpd->name); pr_err("%s: unable to remove %s\n", __func__, genpd->name);
return -EBUSY; return -EBUSY;
} }
list_for_each_entry_safe(link, l, &genpd->child_links, child_node) { list_for_each_entry_safe(link, l, &genpd->slave_links, slave_node) {
list_del(&link->parent_node); list_del(&link->master_node);
list_del(&link->child_node); list_del(&link->slave_node);
kfree(link); kfree(link);
} }
@ -2793,12 +2793,12 @@ static int genpd_summary_one(struct seq_file *s,
/* /*
* Modifications on the list require holding locks on both * Modifications on the list require holding locks on both
* parent and child, so we are safe. * master and slave, so we are safe.
* Also genpd->name is immutable. * Also genpd->name is immutable.
*/ */
list_for_each_entry(link, &genpd->parent_links, parent_node) { list_for_each_entry(link, &genpd->master_links, master_node) {
seq_printf(s, "%s", link->child->name); seq_printf(s, "%s", link->slave->name);
if (!list_is_last(&link->parent_node, &genpd->parent_links)) if (!list_is_last(&link->master_node, &genpd->master_links))
seq_puts(s, ", "); seq_puts(s, ", ");
} }
@ -2826,7 +2826,7 @@ static int summary_show(struct seq_file *s, void *data)
struct generic_pm_domain *genpd; struct generic_pm_domain *genpd;
int ret = 0; int ret = 0;
seq_puts(s, "domain status children\n"); seq_puts(s, "domain status slaves\n");
seq_puts(s, " /device runtime status\n"); seq_puts(s, " /device runtime status\n");
seq_puts(s, "----------------------------------------------------------------------\n"); seq_puts(s, "----------------------------------------------------------------------\n");
@ -2881,8 +2881,8 @@ static int sub_domains_show(struct seq_file *s, void *data)
if (ret) if (ret)
return -ERESTARTSYS; return -ERESTARTSYS;
list_for_each_entry(link, &genpd->parent_links, parent_node) list_for_each_entry(link, &genpd->master_links, master_node)
seq_printf(s, "%s\n", link->child->name); seq_printf(s, "%s\n", link->slave->name);
genpd_unlock(genpd); genpd_unlock(genpd);
return ret; return ret;

View File

@ -135,8 +135,8 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd,
* *
* All subdomains have been powered off already at this point. * All subdomains have been powered off already at this point.
*/ */
list_for_each_entry(link, &genpd->parent_links, parent_node) { list_for_each_entry(link, &genpd->master_links, master_node) {
struct generic_pm_domain *sd = link->child; struct generic_pm_domain *sd = link->slave;
s64 sd_max_off_ns = sd->max_off_time_ns; s64 sd_max_off_ns = sd->max_off_time_ns;
if (sd_max_off_ns < 0) if (sd_max_off_ns < 0)
@ -217,13 +217,13 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
} }
/* /*
* We have to invalidate the cached results for the parents, so * We have to invalidate the cached results for the masters, so
* use the observation that default_power_down_ok() is not * use the observation that default_power_down_ok() is not
* going to be called for any parent until this instance * going to be called for any master until this instance
* returns. * returns.
*/ */
list_for_each_entry(link, &genpd->child_links, child_node) list_for_each_entry(link, &genpd->slave_links, slave_node)
link->parent->max_off_time_changed = true; link->master->max_off_time_changed = true;
genpd->max_off_time_ns = -1; genpd->max_off_time_ns = -1;
genpd->max_off_time_changed = false; genpd->max_off_time_changed = false;

View File

@ -95,8 +95,8 @@ struct generic_pm_domain {
struct device dev; struct device dev;
struct dev_pm_domain domain; /* PM domain operations */ struct dev_pm_domain domain; /* PM domain operations */
struct list_head gpd_list_node; /* Node in the global PM domains list */ struct list_head gpd_list_node; /* Node in the global PM domains list */
struct list_head parent_links; /* Links with PM domain as a parent */ struct list_head master_links; /* Links with PM domain as a master */
struct list_head child_links;/* Links with PM domain as a child */ struct list_head slave_links; /* Links with PM domain as a slave */
struct list_head dev_list; /* List of devices */ struct list_head dev_list; /* List of devices */
struct dev_power_governor *gov; struct dev_power_governor *gov;
struct work_struct power_off_work; struct work_struct power_off_work;
@ -151,10 +151,10 @@ static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
} }
struct gpd_link { struct gpd_link {
struct generic_pm_domain *parent; struct generic_pm_domain *master;
struct list_head parent_node; struct list_head master_node;
struct generic_pm_domain *child; struct generic_pm_domain *slave;
struct list_head child_node; struct list_head slave_node;
/* Sub-domain's per-master domain performance state */ /* Sub-domain's per-master domain performance state */
unsigned int performance_state; unsigned int performance_state;

View File

@ -49,17 +49,17 @@ Output is similar to /sys/kernel/debug/pm_genpd/pm_genpd_summary'''
else: else:
status_string = 'off-{}'.format(genpd['state_idx']) status_string = 'off-{}'.format(genpd['state_idx'])
child_names = [] slave_names = []
for link in list_for_each_entry( for link in list_for_each_entry(
genpd['parent_links'], genpd['master_links'],
device_link_type.get_type().pointer(), device_link_type.get_type().pointer(),
'parent_node'): 'master_node'):
child_names.append(link['child']['name']) slave_names.apend(link['slave']['name'])
gdb.write('%-30s %-15s %s\n' % ( gdb.write('%-30s %-15s %s\n' % (
genpd['name'].string(), genpd['name'].string(),
status_string, status_string,
', '.join(child_names))) ', '.join(slave_names)))
# Print devices in domain # Print devices in domain
for pm_data in list_for_each_entry(genpd['dev_list'], for pm_data in list_for_each_entry(genpd['dev_list'],
@ -70,7 +70,7 @@ Output is similar to /sys/kernel/debug/pm_genpd/pm_genpd_summary'''
gdb.write(' %-50s %s\n' % (kobj_path, rtpm_status_str(dev))) gdb.write(' %-50s %s\n' % (kobj_path, rtpm_status_str(dev)))
def invoke(self, arg, from_tty): def invoke(self, arg, from_tty):
gdb.write('domain status children\n'); gdb.write('domain status slaves\n');
gdb.write(' /device runtime status\n'); gdb.write(' /device runtime status\n');
gdb.write('----------------------------------------------------------------------\n'); gdb.write('----------------------------------------------------------------------\n');
for genpd in list_for_each_entry( for genpd in list_for_each_entry(