msm: ipa: remove ipa rm deprecated mechanism support

RM isn't in use for newer devices.

Acked-by: Tal Gelbard <tgelbard@qti.qualcomm.com>
Change-Id: I8005f02acbd424e0721c2efa49f840f2cf7e8f7b
Signed-off-by: Arnav Sharma <arnav_s@codeaurora.org>
This commit is contained in:
Amir Levy 2019-06-02 14:07:36 +03:00 committed by Arnav Sharma
parent 39d9f83d20
commit 17f1f70c1e
18 changed files with 269 additions and 2566 deletions

View File

@ -14,6 +14,7 @@
#include <linux/ipa_uc_offload.h>
#include <linux/pci.h>
#include "ipa_api.h"
#include "ipa_v3/ipa_i.h"
/*
* The following for adding code (ie. for EMULATION) not found on x86.
@ -2794,24 +2795,19 @@ enum ipa_client_type ipa_get_client_mapping(int pipe_idx)
EXPORT_SYMBOL(ipa_get_client_mapping);
/**
* ipa_get_rm_resource_from_ep() - get the IPA_RM resource which is related to
* the supplied pipe index.
*
* @pipe_idx:
*
* Return value: IPA_RM resource related to the pipe, -1 if a resource was not
* found.
* ipa_get_rm_resource_from_ep() - this function is part of the deprecated
* RM mechanism but is still used by some drivers so we kept the definition.
*/
enum ipa_rm_resource_name ipa_get_rm_resource_from_ep(int pipe_idx)
{
int ret;
IPA_API_DISPATCH_RETURN(ipa_get_rm_resource_from_ep, pipe_idx);
return ret;
IPAERR("IPA RM is not supported idx=%d\n", pipe_idx);
return -EFAULT;
}
EXPORT_SYMBOL(ipa_get_rm_resource_from_ep);
/**
* ipa_get_modem_cfg_emb_pipe_flt()- Return ipa_ctx->modem_cfg_emb_pipe_flt
*
@ -3645,18 +3641,6 @@ void ipa_deregister_client_callback(enum ipa_client_type client)
}
/**
* ipa_pm_is_used() - Returns if IPA PM framework is used
*/
bool ipa_pm_is_used(void)
{
bool ret;
IPA_API_DISPATCH_RETURN(ipa_pm_is_used);
return ret;
}
static const struct dev_pm_ops ipa_pm_ops = {
.suspend_noirq = ipa_ap_suspend,
.resume_noirq = ipa_ap_resume,

View File

@ -340,8 +340,6 @@ struct ipa_api_controller {
enum ipa_client_type (*ipa_get_client_mapping)(int pipe_idx);
enum ipa_rm_resource_name (*ipa_get_rm_resource_from_ep)(int pipe_idx);
bool (*ipa_get_modem_cfg_emb_pipe_flt)(void);
enum ipa_transport_type (*ipa_get_transport_type)(void);
@ -434,8 +432,6 @@ struct ipa_api_controller {
struct ipa_smmu_out_params *out);
int (*ipa_is_vlan_mode)(enum ipa_vlan_ifaces iface, bool *res);
bool (*ipa_pm_is_used)(void);
int (*ipa_wigig_uc_init)(
struct ipa_wdi_uc_ready_params *inout,
ipa_wigig_misc_int_cb int_notify,

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/debugfs.h>
@ -112,7 +112,7 @@ enum ecm_ipa_state {
};
/**
* enum ecm_ipa_operation - enumerations used to descibe the API operation
* enum ecm_ipa_operation - enumerations used to describe the API operation
*
* Those enums are used as input for the driver state machine.
*/
@ -147,8 +147,6 @@ enum ecm_ipa_operation {
* state is changed to RNDIS_IPA_CONNECTED_AND_UP
* @ipa_to_usb_client: consumer client
* @usb_to_ipa_client: producer client
* @ipa_rm_resource_name_prod: IPA resource manager producer resource
* @ipa_rm_resource_name_cons: IPA resource manager consumer resource
* @pm_hdl: handle for IPA PM
* @is_vlan_mode: does the driver need to work in VLAN mode?
*/
@ -166,8 +164,6 @@ struct ecm_ipa_dev {
void (*device_ready_notify)(void);
enum ipa_client_type ipa_to_usb_client;
enum ipa_client_type usb_to_ipa_client;
enum ipa_rm_resource_name ipa_rm_resource_name_prod;
enum ipa_rm_resource_name ipa_rm_resource_name_cons;
u32 pm_hdl;
bool is_vlan_mode;
};
@ -186,15 +182,9 @@ static int ecm_ipa_rules_cfg
static void ecm_ipa_rules_destroy(struct ecm_ipa_dev *ecm_ipa_ctx);
static int ecm_ipa_register_properties(struct ecm_ipa_dev *ecm_ipa_ctx);
static void ecm_ipa_deregister_properties(void);
static void ecm_ipa_rm_notify
(void *user_data, enum ipa_rm_event event, unsigned long data);
static struct net_device_stats *ecm_ipa_get_stats(struct net_device *net);
static int ecm_ipa_create_rm_resource(struct ecm_ipa_dev *ecm_ipa_ctx);
static void ecm_ipa_destroy_rm_resource(struct ecm_ipa_dev *ecm_ipa_ctx);
static int ecm_ipa_register_pm_client(struct ecm_ipa_dev *ecm_ipa_ctx);
static void ecm_ipa_deregister_pm_client(struct ecm_ipa_dev *ecm_ipa_ctx);
static int resource_request(struct ecm_ipa_dev *ecm_ipa_ctx);
static void resource_release(struct ecm_ipa_dev *ecm_ipa_ctx);
static netdev_tx_t ecm_ipa_start_xmit
(struct sk_buff *skb, struct net_device *net);
static int ecm_ipa_debugfs_atomic_open(struct inode *inode, struct file *file);
@ -242,7 +232,6 @@ static void ecm_ipa_msg_free_cb(void *buff, u32 len, u32 type)
* - allocate the network device
* - set default values for driver internals
* - create debugfs folder and files
* - create IPA resource manager client
* - add header insertion rules for IPA driver (based on host/device
* Ethernet addresses given in input params)
* - register tx/rx properties to IPA driver (will be later used
@ -446,44 +435,18 @@ int ecm_ipa_connect(u32 usb_to_ipa_hdl, u32 ipa_to_usb_hdl, void *priv)
ECM_IPA_DEBUG("usb_to_ipa_client = %d\n",
ecm_ipa_ctx->usb_to_ipa_client);
if (ipa_pm_is_used()) {
retval = ecm_ipa_register_pm_client(ecm_ipa_ctx);
} else {
ecm_ipa_ctx->ipa_rm_resource_name_cons =
ipa_get_rm_resource_from_ep(ipa_to_usb_hdl);
if (ecm_ipa_ctx->ipa_rm_resource_name_cons < 0) {
ECM_IPA_ERROR(
"Error getting CONS RM resource from handle %d\n",
ecm_ipa_ctx->ipa_rm_resource_name_cons);
return -EINVAL;
}
ECM_IPA_DEBUG("ipa_rm_resource_name_cons = %d\n",
ecm_ipa_ctx->ipa_rm_resource_name_cons);
ecm_ipa_ctx->ipa_rm_resource_name_prod =
ipa_get_rm_resource_from_ep(usb_to_ipa_hdl);
if (ecm_ipa_ctx->ipa_rm_resource_name_prod < 0) {
ECM_IPA_ERROR(
"Error getting PROD RM resource from handle %d\n",
ecm_ipa_ctx->ipa_rm_resource_name_prod);
return -EINVAL;
}
ECM_IPA_DEBUG("ipa_rm_resource_name_prod = %d\n",
ecm_ipa_ctx->ipa_rm_resource_name_prod);
retval = ecm_ipa_create_rm_resource(ecm_ipa_ctx);
}
retval = ecm_ipa_register_pm_client(ecm_ipa_ctx);
if (retval) {
ECM_IPA_ERROR("fail on RM create\n");
goto fail_create_rm;
ECM_IPA_ERROR("fail register PM client\n");
return retval;
}
ECM_IPA_DEBUG("RM resource was created\n");
ECM_IPA_DEBUG("PM client registered\n");
retval = ecm_ipa_register_properties(ecm_ipa_ctx);
if (retval) {
ECM_IPA_ERROR("fail on properties set\n");
goto fail_create_rm;
goto fail_register_pm;
}
ECM_IPA_DEBUG("ecm_ipa 2 Tx and 2 Rx properties were registered\n");
@ -537,11 +500,8 @@ int ecm_ipa_connect(u32 usb_to_ipa_hdl, u32 ipa_to_usb_hdl, void *priv)
fail:
ecm_ipa_deregister_properties();
fail_create_rm:
if (ipa_pm_is_used())
ecm_ipa_deregister_pm_client(ecm_ipa_ctx);
else
ecm_ipa_destroy_rm_resource(ecm_ipa_ctx);
fail_register_pm:
ecm_ipa_deregister_pm_client(ecm_ipa_ctx);
return retval;
}
EXPORT_SYMBOL(ecm_ipa_connect);
@ -592,10 +552,7 @@ static int ecm_ipa_open(struct net_device *net)
* in "send" state
* - The driver internal state is in "UP" state.
* - Filter Tx switch is turned off
* - The IPA resource manager state for the driver producer client
* is "Granted" which implies that all the resources in the dependency
* graph are valid for data flow.
* - outstanding high boundary did not reach.
* - Outstanding high boundary did not reach.
*
* In case all of the above conditions are met, the network driver will
* send the packet by using the IPA API for Tx.
@ -626,11 +583,11 @@ static netdev_tx_t ecm_ipa_start_xmit
return NETDEV_TX_BUSY;
}
ret = resource_request(ecm_ipa_ctx);
ret = ipa_pm_activate(ecm_ipa_ctx->pm_hdl);
if (ret) {
ECM_IPA_DEBUG("Waiting to resource\n");
ECM_IPA_DEBUG("Failed to activate PM client\n");
netif_stop_queue(net);
goto resource_busy;
goto fail_pm_activate;
}
if (atomic_read(&ecm_ipa_ctx->outstanding_pkts) >=
@ -662,8 +619,8 @@ static netdev_tx_t ecm_ipa_start_xmit
fail_tx_packet:
out:
resource_release(ecm_ipa_ctx);
resource_busy:
ipa_pm_deferred_deactivate(ecm_ipa_ctx->pm_hdl);
fail_pm_activate:
return status;
}
@ -803,10 +760,7 @@ int ecm_ipa_disconnect(void *priv)
netif_stop_queue(ecm_ipa_ctx->net);
ECM_IPA_DEBUG("queue stopped\n");
if (ipa_pm_is_used())
ecm_ipa_deregister_pm_client(ecm_ipa_ctx);
else
ecm_ipa_destroy_rm_resource(ecm_ipa_ctx);
ecm_ipa_deregister_pm_client(ecm_ipa_ctx);
outstanding_dropped_pkts =
atomic_read(&ecm_ipa_ctx->outstanding_pkts);
@ -831,8 +785,6 @@ EXPORT_SYMBOL(ecm_ipa_disconnect);
* needed anymore, e.g: when the USB composition does not support ECM.
* This function shall be called after the pipes were disconnected.
* Detailed description:
* - delete the driver dependency defined for IPA resource manager and
* destroy the producer resource.
* - remove the debugfs entries
* - deregister the network interface from Linux network stack
* - free all internal data structs
@ -1110,99 +1062,11 @@ static void ecm_ipa_deregister_properties(void)
* Returns negative errno, or zero on success
*/
static void ecm_ipa_rm_notify
(void *user_data, enum ipa_rm_event event, unsigned long data)
{
struct ecm_ipa_dev *ecm_ipa_ctx = user_data;
ECM_IPA_LOG_ENTRY();
if
(event == IPA_RM_RESOURCE_GRANTED &&
netif_queue_stopped(ecm_ipa_ctx->net)) {
ECM_IPA_DEBUG("Resource Granted - starting queue\n");
netif_start_queue(ecm_ipa_ctx->net);
} else {
ECM_IPA_DEBUG("Resource released\n");
}
ECM_IPA_LOG_EXIT();
}
static struct net_device_stats *ecm_ipa_get_stats(struct net_device *net)
{
return &net->stats;
}
static int ecm_ipa_create_rm_resource(struct ecm_ipa_dev *ecm_ipa_ctx)
{
struct ipa_rm_create_params create_params = {0};
struct ipa_rm_perf_profile profile;
int result;
ECM_IPA_LOG_ENTRY();
create_params.name = IPA_RM_RESOURCE_STD_ECM_PROD;
create_params.reg_params.user_data = ecm_ipa_ctx;
create_params.reg_params.notify_cb = ecm_ipa_rm_notify;
result = ipa_rm_create_resource(&create_params);
if (result) {
ECM_IPA_ERROR("Fail on ipa_rm_create_resource\n");
goto fail_rm_create;
}
ECM_IPA_DEBUG("rm client was created");
profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS;
ipa_rm_set_perf_profile(IPA_RM_RESOURCE_STD_ECM_PROD, &profile);
result = ipa_rm_inactivity_timer_init
(IPA_RM_RESOURCE_STD_ECM_PROD,
INACTIVITY_MSEC_DELAY);
if (result) {
ECM_IPA_ERROR("Fail on ipa_rm_inactivity_timer_init\n");
goto fail_it;
}
ECM_IPA_DEBUG("rm_it client was created");
result = ipa_rm_add_dependency_sync
(IPA_RM_RESOURCE_STD_ECM_PROD,
ecm_ipa_ctx->ipa_rm_resource_name_cons);
if (result && result != -EINPROGRESS)
ECM_IPA_ERROR
("unable to add ECM/USB dependency (%d)\n", result);
result = ipa_rm_add_dependency_sync
(ecm_ipa_ctx->ipa_rm_resource_name_prod,
IPA_RM_RESOURCE_APPS_CONS);
if (result && result != -EINPROGRESS)
ECM_IPA_ERROR
("unable to add USB/APPS dependency (%d)\n", result);
ECM_IPA_DEBUG("rm dependency was set\n");
ECM_IPA_LOG_EXIT();
return 0;
fail_it:
fail_rm_create:
return result;
}
static void ecm_ipa_destroy_rm_resource(struct ecm_ipa_dev *ecm_ipa_ctx)
{
int result;
ECM_IPA_LOG_ENTRY();
ipa_rm_delete_dependency(IPA_RM_RESOURCE_STD_ECM_PROD,
ecm_ipa_ctx->ipa_rm_resource_name_cons);
ipa_rm_delete_dependency(ecm_ipa_ctx->ipa_rm_resource_name_prod,
IPA_RM_RESOURCE_APPS_CONS);
ipa_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_STD_ECM_PROD);
result = ipa_rm_delete_resource(IPA_RM_RESOURCE_STD_ECM_PROD);
if (result)
ECM_IPA_ERROR("resource deletion failed\n");
ECM_IPA_LOG_EXIT();
}
static void ecm_ipa_pm_cb(void *p, enum ipa_pm_cb_event event)
{
struct ecm_ipa_dev *ecm_ipa_ctx = p;
@ -1246,23 +1110,6 @@ static void ecm_ipa_deregister_pm_client(struct ecm_ipa_dev *ecm_ipa_ctx)
ecm_ipa_ctx->pm_hdl = ~0;
}
static int resource_request(struct ecm_ipa_dev *ecm_ipa_ctx)
{
if (ipa_pm_is_used())
return ipa_pm_activate(ecm_ipa_ctx->pm_hdl);
return ipa_rm_inactivity_timer_request_resource(
IPA_RM_RESOURCE_STD_ECM_PROD);
}
static void resource_release(struct ecm_ipa_dev *ecm_ipa_ctx)
{
if (ipa_pm_is_used())
ipa_pm_deferred_deactivate(ecm_ipa_ctx->pm_hdl);
else
ipa_rm_inactivity_timer_release_resource(
IPA_RM_RESOURCE_STD_ECM_PROD);
}
/**
* ecm_ipa_tx_complete_notify() - Rx notify

View File

@ -51,7 +51,6 @@
#define IPA_MHI_FUNC_EXIT() \
IPA_MHI_DBG("EXIT\n")
#define IPA_MHI_RM_TIMEOUT_MSEC 10000
#define IPA_MHI_CH_EMPTY_TIMEOUT_MSEC 10
#define IPA_MHI_SUSPEND_SLEEP_MIN 900
@ -64,13 +63,6 @@
#define IPA_MHI_CLIENT_HOST_ADDR_COND(addr) \
((ipa_mhi_client_ctx->assert_bit40)?(IPA_MHI_HOST_ADDR(addr)):(addr))
enum ipa_mhi_rm_state {
IPA_MHI_RM_STATE_RELEASED,
IPA_MHI_RM_STATE_REQUESTED,
IPA_MHI_RM_STATE_GRANTED,
IPA_MHI_RM_STATE_MAX
};
enum ipa_mhi_state {
IPA_MHI_STATE_INITIALIZED,
IPA_MHI_STATE_READY,
@ -130,9 +122,6 @@ struct ipa_mhi_client_ctx {
spinlock_t state_lock;
mhi_client_cb cb_notify;
void *cb_priv;
struct completion rm_prod_granted_comp;
enum ipa_mhi_rm_state rm_cons_state;
struct completion rm_cons_comp;
bool trigger_wakeup;
bool wakeup_notified;
struct workqueue_struct *wq;
@ -534,81 +523,6 @@ static void ipa_mhi_notify_wakeup(void)
IPA_MHI_FUNC_EXIT();
}
/**
* ipa_mhi_rm_cons_request() - callback function for IPA RM request resource
*
* In case IPA MHI is not suspended, MHI CONS will be granted immediately.
* In case IPA MHI is suspended, MHI CONS will be granted after resume.
*/
static int ipa_mhi_rm_cons_request(void)
{
unsigned long flags;
int res;
IPA_MHI_FUNC_ENTRY();
IPA_MHI_DBG("%s\n", MHI_STATE_STR(ipa_mhi_client_ctx->state));
spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags);
ipa_mhi_client_ctx->rm_cons_state = IPA_MHI_RM_STATE_REQUESTED;
if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_STARTED) {
ipa_mhi_client_ctx->rm_cons_state = IPA_MHI_RM_STATE_GRANTED;
res = 0;
} else if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_SUSPENDED) {
ipa_mhi_notify_wakeup();
res = -EINPROGRESS;
} else if (ipa_mhi_client_ctx->state ==
IPA_MHI_STATE_SUSPEND_IN_PROGRESS) {
/* wakeup event will be trigger after suspend finishes */
ipa_mhi_client_ctx->trigger_wakeup = true;
res = -EINPROGRESS;
} else {
res = -EINPROGRESS;
}
spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
IPA_MHI_DBG("EXIT with %d\n", res);
return res;
}
static int ipa_mhi_rm_cons_release(void)
{
unsigned long flags;
IPA_MHI_FUNC_ENTRY();
spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags);
ipa_mhi_client_ctx->rm_cons_state = IPA_MHI_RM_STATE_RELEASED;
complete_all(&ipa_mhi_client_ctx->rm_cons_comp);
spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
IPA_MHI_FUNC_EXIT();
return 0;
}
static void ipa_mhi_rm_prod_notify(void *user_data, enum ipa_rm_event event,
unsigned long data)
{
IPA_MHI_FUNC_ENTRY();
switch (event) {
case IPA_RM_RESOURCE_GRANTED:
IPA_MHI_DBG("IPA_RM_RESOURCE_GRANTED\n");
complete_all(&ipa_mhi_client_ctx->rm_prod_granted_comp);
break;
case IPA_RM_RESOURCE_RELEASED:
IPA_MHI_DBG("IPA_RM_RESOURCE_RELEASED\n");
break;
default:
IPA_MHI_ERR("unexpected event %d\n", event);
WARN_ON(1);
break;
}
IPA_MHI_FUNC_EXIT();
}
/**
* ipa_mhi_wq_notify_wakeup() - Notify MHI client on data available
*
@ -699,14 +613,6 @@ static int ipa_mhi_set_state(enum ipa_mhi_state new_state)
} else if (new_state == IPA_MHI_STATE_STARTED) {
ipa_mhi_client_ctx->wakeup_notified = false;
ipa_mhi_client_ctx->trigger_wakeup = false;
if (ipa_mhi_client_ctx->rm_cons_state ==
IPA_MHI_RM_STATE_REQUESTED) {
ipa_rm_notify_completion(
IPA_RM_RESOURCE_GRANTED,
IPA_RM_RESOURCE_MHI_CONS);
ipa_mhi_client_ctx->rm_cons_state =
IPA_MHI_RM_STATE_GRANTED;
}
res = 0;
}
break;
@ -726,14 +632,6 @@ static int ipa_mhi_set_state(enum ipa_mhi_state new_state)
} else if (new_state == IPA_MHI_STATE_STARTED) {
ipa_mhi_client_ctx->trigger_wakeup = false;
ipa_mhi_client_ctx->wakeup_notified = false;
if (ipa_mhi_client_ctx->rm_cons_state ==
IPA_MHI_RM_STATE_REQUESTED) {
ipa_rm_notify_completion(
IPA_RM_RESOURCE_GRANTED,
IPA_RM_RESOURCE_MHI_CONS);
ipa_mhi_client_ctx->rm_cons_state =
IPA_MHI_RM_STATE_GRANTED;
}
res = 0;
}
break;
@ -781,48 +679,6 @@ static void ipa_mhi_uc_wakeup_request_cb(void)
IPA_MHI_FUNC_EXIT();
}
static int ipa_mhi_request_prod(void)
{
int res;
IPA_MHI_FUNC_ENTRY();
reinit_completion(&ipa_mhi_client_ctx->rm_prod_granted_comp);
IPA_MHI_DBG("requesting mhi prod\n");
res = ipa_rm_request_resource(IPA_RM_RESOURCE_MHI_PROD);
if (res) {
if (res != -EINPROGRESS) {
IPA_MHI_ERR("failed to request mhi prod %d\n", res);
return res;
}
res = wait_for_completion_timeout(
&ipa_mhi_client_ctx->rm_prod_granted_comp,
msecs_to_jiffies(IPA_MHI_RM_TIMEOUT_MSEC));
if (res == 0) {
IPA_MHI_ERR("timeout request mhi prod\n");
return -ETIME;
}
}
IPA_MHI_DBG("mhi prod granted\n");
IPA_MHI_FUNC_EXIT();
return 0;
}
static int ipa_mhi_release_prod(void)
{
int res;
IPA_MHI_FUNC_ENTRY();
res = ipa_rm_release_resource(IPA_RM_RESOURCE_MHI_PROD);
IPA_MHI_FUNC_EXIT();
return res;
}
/**
* ipa_mhi_start() - Start IPA MHI engine
* @params: pcie addresses for MHI
@ -873,38 +729,15 @@ int ipa_mhi_start(struct ipa_mhi_start_params *params)
IPA_MHI_DBG("event_context_array_addr 0x%llx\n",
ipa_mhi_client_ctx->event_context_array_addr);
if (ipa_pm_is_used()) {
res = ipa_pm_activate_sync(ipa_mhi_client_ctx->pm_hdl);
if (res) {
IPA_MHI_ERR("failed activate client %d\n", res);
goto fail_pm_activate;
}
res = ipa_pm_activate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
if (res) {
IPA_MHI_ERR("failed activate modem client %d\n", res);
goto fail_pm_activate_modem;
}
} else {
/* Add MHI <-> Q6 dependencies to IPA RM */
res = ipa_rm_add_dependency(IPA_RM_RESOURCE_MHI_PROD,
IPA_RM_RESOURCE_Q6_CONS);
if (res && res != -EINPROGRESS) {
IPA_MHI_ERR("failed to add dependency %d\n", res);
goto fail_add_mhi_q6_dep;
}
res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
IPA_RM_RESOURCE_MHI_CONS);
if (res && res != -EINPROGRESS) {
IPA_MHI_ERR("failed to add dependency %d\n", res);
goto fail_add_q6_mhi_dep;
}
res = ipa_mhi_request_prod();
if (res) {
IPA_MHI_ERR("failed request prod %d\n", res);
goto fail_request_prod;
}
res = ipa_pm_activate_sync(ipa_mhi_client_ctx->pm_hdl);
if (res) {
IPA_MHI_ERR("failed activate client %d\n", res);
goto fail_pm_activate;
}
res = ipa_pm_activate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
if (res) {
IPA_MHI_ERR("failed activate modem client %d\n", res);
goto fail_pm_activate_modem;
}
/* gsi params */
@ -932,22 +765,9 @@ int ipa_mhi_start(struct ipa_mhi_start_params *params)
return 0;
fail_init_engine:
if (!ipa_pm_is_used())
ipa_mhi_release_prod();
fail_request_prod:
if (!ipa_pm_is_used())
ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
IPA_RM_RESOURCE_MHI_CONS);
fail_add_q6_mhi_dep:
if (!ipa_pm_is_used())
ipa_rm_delete_dependency(IPA_RM_RESOURCE_MHI_PROD,
IPA_RM_RESOURCE_Q6_CONS);
fail_add_mhi_q6_dep:
if (ipa_pm_is_used())
ipa_pm_deactivate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
ipa_pm_deactivate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
fail_pm_activate_modem:
if (ipa_pm_is_used())
ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl);
ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl);
fail_pm_activate:
ipa_mhi_set_state(IPA_MHI_STATE_INITIALIZED);
return res;
@ -1724,31 +1544,6 @@ fail_reset_channel:
return res;
}
static int ipa_mhi_wait_for_cons_release(void)
{
unsigned long flags;
int res;
IPA_MHI_FUNC_ENTRY();
reinit_completion(&ipa_mhi_client_ctx->rm_cons_comp);
spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags);
if (ipa_mhi_client_ctx->rm_cons_state != IPA_MHI_RM_STATE_GRANTED) {
spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
return 0;
}
spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
res = wait_for_completion_timeout(
&ipa_mhi_client_ctx->rm_cons_comp,
msecs_to_jiffies(IPA_MHI_RM_TIMEOUT_MSEC));
if (res == 0) {
IPA_MHI_ERR("timeout release mhi cons\n");
return -ETIME;
}
IPA_MHI_FUNC_EXIT();
return 0;
}
static int ipa_mhi_suspend_channels(struct ipa_mhi_channel_ctx *channels,
int max_channels)
{
@ -2179,35 +1974,20 @@ int ipa_mhi_suspend(bool force)
/*
* hold IPA clocks and release them after all
* IPA RM resource are released to make sure tag process will not start
* IPA PM clients are deactivated to make sure tag process
* will not start
*/
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
if (ipa_pm_is_used()) {
res = ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl);
if (res) {
IPA_MHI_ERR("fail to deactivate client %d\n", res);
goto fail_deactivate_pm;
}
res = ipa_pm_deactivate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
if (res) {
IPA_MHI_ERR("fail to deactivate client %d\n", res);
goto fail_deactivate_modem_pm;
}
} else {
IPA_MHI_DBG("release prod\n");
res = ipa_mhi_release_prod();
if (res) {
IPA_MHI_ERR("ipa_mhi_release_prod failed %d\n", res);
goto fail_release_prod;
}
IPA_MHI_DBG("wait for cons release\n");
res = ipa_mhi_wait_for_cons_release();
if (res) {
IPA_MHI_ERR("ipa_mhi_wait_for_cons_release failed\n");
goto fail_release_cons;
}
res = ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl);
if (res) {
IPA_MHI_ERR("fail to deactivate client %d\n", res);
goto fail_deactivate_pm;
}
res = ipa_pm_deactivate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
if (res) {
IPA_MHI_ERR("fail to deactivate client %d\n", res);
goto fail_deactivate_modem_pm;
}
usleep_range(IPA_MHI_SUSPEND_SLEEP_MIN, IPA_MHI_SUSPEND_SLEEP_MAX);
@ -2225,14 +2005,9 @@ int ipa_mhi_suspend(bool force)
return 0;
fail_release_cons:
if (!ipa_pm_is_used())
ipa_mhi_request_prod();
fail_release_prod:
if (ipa_pm_is_used())
ipa_pm_deactivate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
ipa_pm_activate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
fail_deactivate_modem_pm:
if (ipa_pm_is_used())
ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl);
ipa_pm_activate_sync(ipa_mhi_client_ctx->pm_hdl);
fail_deactivate_pm:
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
fail_suspend_ul_channel:
@ -2262,7 +2037,7 @@ fail_suspend_dl_channel:
* When this function returns device can move to M0 state.
* This function is doing the following:
* - Send command to uC/GSI to resume corresponding MHI channel
* - Request MHI_PROD in IPA RM
* - Activate PM clients
* - Resume data to IPA
*
* Return codes: 0 : success
@ -2271,7 +2046,6 @@ fail_suspend_dl_channel:
int ipa_mhi_resume(void)
{
int res;
bool dl_channel_resumed = false;
IPA_MHI_FUNC_ENTRY();
@ -2281,40 +2055,16 @@ int ipa_mhi_resume(void)
return res;
}
if (ipa_mhi_client_ctx->rm_cons_state == IPA_MHI_RM_STATE_REQUESTED) {
/* resume all DL channels */
res = ipa_mhi_resume_channels(false,
ipa_mhi_client_ctx->dl_channels,
IPA_MHI_MAX_DL_CHANNELS);
if (res) {
IPA_MHI_ERR("ipa_mhi_resume_dl_channels failed %d\n",
res);
goto fail_resume_dl_channels;
}
dl_channel_resumed = true;
ipa_rm_notify_completion(IPA_RM_RESOURCE_GRANTED,
IPA_RM_RESOURCE_MHI_CONS);
ipa_mhi_client_ctx->rm_cons_state = IPA_MHI_RM_STATE_GRANTED;
res = ipa_pm_activate_sync(ipa_mhi_client_ctx->pm_hdl);
if (res) {
IPA_MHI_ERR("fail to activate client %d\n", res);
goto fail_pm_activate;
}
if (ipa_pm_is_used()) {
res = ipa_pm_activate_sync(ipa_mhi_client_ctx->pm_hdl);
if (res) {
IPA_MHI_ERR("fail to activate client %d\n", res);
goto fail_pm_activate;
}
ipa_pm_activate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
if (res) {
IPA_MHI_ERR("fail to activate client %d\n", res);
goto fail_pm_activate_modem;
}
} else {
res = ipa_mhi_request_prod();
if (res) {
IPA_MHI_ERR("ipa_mhi_request_prod failed %d\n", res);
goto fail_request_prod;
}
res = ipa_pm_activate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
if (res) {
IPA_MHI_ERR("fail to activate client %d\n", res);
goto fail_pm_activate_modem;
}
/* resume all UL channels */
@ -2326,15 +2076,13 @@ int ipa_mhi_resume(void)
goto fail_resume_ul_channels;
}
if (!dl_channel_resumed) {
res = ipa_mhi_resume_channels(false,
ipa_mhi_client_ctx->dl_channels,
IPA_MHI_MAX_DL_CHANNELS);
if (res) {
IPA_MHI_ERR("ipa_mhi_resume_dl_channels failed %d\n",
res);
goto fail_resume_dl_channels2;
}
res = ipa_mhi_resume_channels(false,
ipa_mhi_client_ctx->dl_channels,
IPA_MHI_MAX_DL_CHANNELS);
if (res) {
IPA_MHI_ERR("ipa_mhi_resume_dl_channels failed %d\n",
res);
goto fail_resume_dl_channels;
}
if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI)
@ -2352,22 +2100,14 @@ int ipa_mhi_resume(void)
fail_set_state:
ipa_mhi_suspend_channels(ipa_mhi_client_ctx->dl_channels,
IPA_MHI_MAX_DL_CHANNELS);
fail_resume_dl_channels2:
fail_resume_dl_channels:
ipa_mhi_suspend_channels(ipa_mhi_client_ctx->ul_channels,
IPA_MHI_MAX_UL_CHANNELS);
fail_resume_ul_channels:
if (!ipa_pm_is_used())
ipa_mhi_release_prod();
fail_request_prod:
if (ipa_pm_is_used())
ipa_pm_deactivate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
ipa_pm_deactivate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
fail_pm_activate_modem:
if (ipa_pm_is_used())
ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl);
ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl);
fail_pm_activate:
ipa_mhi_suspend_channels(ipa_mhi_client_ctx->dl_channels,
IPA_MHI_MAX_DL_CHANNELS);
fail_resume_dl_channels:
ipa_mhi_set_state(IPA_MHI_STATE_SUSPENDED);
return res;
}
@ -2449,74 +2189,6 @@ static void ipa_mhi_debugfs_destroy(void)
debugfs_remove_recursive(dent);
}
static void ipa_mhi_delete_rm_resources(void)
{
int res;
if (ipa_mhi_client_ctx->state != IPA_MHI_STATE_INITIALIZED &&
ipa_mhi_client_ctx->state != IPA_MHI_STATE_READY) {
IPA_MHI_DBG("release prod\n");
res = ipa_mhi_release_prod();
if (res) {
IPA_MHI_ERR("ipa_mhi_release_prod failed %d\n",
res);
goto fail;
}
IPA_MHI_DBG("wait for cons release\n");
res = ipa_mhi_wait_for_cons_release();
if (res) {
IPA_MHI_ERR("ipa_mhi_wait_for_cons_release%d\n",
res);
goto fail;
}
usleep_range(IPA_MHI_SUSPEND_SLEEP_MIN,
IPA_MHI_SUSPEND_SLEEP_MAX);
IPA_MHI_DBG("deleate dependency Q6_PROD->MHI_CONS\n");
res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
IPA_RM_RESOURCE_MHI_CONS);
if (res) {
IPA_MHI_ERR(
"Error deleting dependency %d->%d, res=%d\n",
IPA_RM_RESOURCE_Q6_PROD,
IPA_RM_RESOURCE_MHI_CONS,
res);
goto fail;
}
IPA_MHI_DBG("deleate dependency MHI_PROD->Q6_CONS\n");
res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_MHI_PROD,
IPA_RM_RESOURCE_Q6_CONS);
if (res) {
IPA_MHI_ERR(
"Error deleting dependency %d->%d, res=%d\n",
IPA_RM_RESOURCE_MHI_PROD,
IPA_RM_RESOURCE_Q6_CONS,
res);
goto fail;
}
}
res = ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_PROD);
if (res) {
IPA_MHI_ERR("Error deleting resource %d, res=%d\n",
IPA_RM_RESOURCE_MHI_PROD, res);
goto fail;
}
res = ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_CONS);
if (res) {
IPA_MHI_ERR("Error deleting resource %d, res=%d\n",
IPA_RM_RESOURCE_MHI_CONS, res);
goto fail;
}
return;
fail:
ipa_assert();
}
static void ipa_mhi_deregister_pm(void)
{
ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl);
@ -2563,11 +2235,7 @@ void ipa_mhi_destroy(void)
ipa_uc_mhi_cleanup();
}
if (ipa_pm_is_used())
ipa_mhi_deregister_pm();
else
ipa_mhi_delete_rm_resources();
ipa_mhi_deregister_pm();
ipa_dma_destroy();
ipa_mhi_debugfs_destroy();
destroy_workqueue(ipa_mhi_client_ctx->wq);
@ -2653,60 +2321,6 @@ fail_pm_cons:
return res;
}
static int ipa_mhi_create_rm_resources(void)
{
int res;
struct ipa_rm_create_params mhi_prod_params;
struct ipa_rm_create_params mhi_cons_params;
struct ipa_rm_perf_profile profile;
/* Create PROD in IPA RM */
memset(&mhi_prod_params, 0, sizeof(mhi_prod_params));
mhi_prod_params.name = IPA_RM_RESOURCE_MHI_PROD;
mhi_prod_params.floor_voltage = IPA_VOLTAGE_SVS;
mhi_prod_params.reg_params.notify_cb = ipa_mhi_rm_prod_notify;
res = ipa_rm_create_resource(&mhi_prod_params);
if (res) {
IPA_MHI_ERR("fail to create IPA_RM_RESOURCE_MHI_PROD\n");
goto fail_create_rm_prod;
}
memset(&profile, 0, sizeof(profile));
profile.max_supported_bandwidth_mbps = 1000;
res = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_MHI_PROD, &profile);
if (res) {
IPA_MHI_ERR("fail to set profile to MHI_PROD\n");
goto fail_perf_rm_prod;
}
/* Create CONS in IPA RM */
memset(&mhi_cons_params, 0, sizeof(mhi_cons_params));
mhi_cons_params.name = IPA_RM_RESOURCE_MHI_CONS;
mhi_cons_params.floor_voltage = IPA_VOLTAGE_SVS;
mhi_cons_params.request_resource = ipa_mhi_rm_cons_request;
mhi_cons_params.release_resource = ipa_mhi_rm_cons_release;
res = ipa_rm_create_resource(&mhi_cons_params);
if (res) {
IPA_MHI_ERR("fail to create IPA_RM_RESOURCE_MHI_CONS\n");
goto fail_create_rm_cons;
}
memset(&profile, 0, sizeof(profile));
profile.max_supported_bandwidth_mbps = 1000;
res = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_MHI_CONS, &profile);
if (res) {
IPA_MHI_ERR("fail to set profile to MHI_CONS\n");
goto fail_perf_rm_cons;
}
fail_perf_rm_cons:
ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_CONS);
fail_create_rm_cons:
fail_perf_rm_prod:
ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_PROD);
fail_create_rm_prod:
return res;
}
/**
* ipa_mhi_init() - Initialize IPA MHI driver
* @params: initialization params
@ -2715,7 +2329,7 @@ fail_create_rm_prod:
* Driver. When this function returns device can move to READY state.
* This function is doing the following:
* - Initialize MHI IPA internal data structures
* - Create IPA RM resources
* - Register with PM
* - Initialize debugfs
*
* Return codes: 0 : success
@ -2763,10 +2377,7 @@ int ipa_mhi_init(struct ipa_mhi_init_params *params)
ipa_mhi_client_ctx->state = IPA_MHI_STATE_INITIALIZED;
ipa_mhi_client_ctx->cb_notify = params->notify;
ipa_mhi_client_ctx->cb_priv = params->priv;
ipa_mhi_client_ctx->rm_cons_state = IPA_MHI_RM_STATE_RELEASED;
init_completion(&ipa_mhi_client_ctx->rm_prod_granted_comp);
spin_lock_init(&ipa_mhi_client_ctx->state_lock);
init_completion(&ipa_mhi_client_ctx->rm_cons_comp);
ipa_mhi_client_ctx->msi = params->msi;
ipa_mhi_client_ctx->mmio_addr = params->mmio_addr;
ipa_mhi_client_ctx->first_ch_idx = params->first_ch_idx;
@ -2789,14 +2400,11 @@ int ipa_mhi_init(struct ipa_mhi_init_params *params)
goto fail_dma_init;
}
if (ipa_pm_is_used())
res = ipa_mhi_register_pm();
else
res = ipa_mhi_create_rm_resources();
res = ipa_mhi_register_pm();
if (res) {
IPA_MHI_ERR("failed to create RM resources\n");
IPA_MHI_ERR("failed to create PM resources\n");
res = -EFAULT;
goto fail_rm;
goto fail_pm;
}
if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
@ -2818,7 +2426,7 @@ int ipa_mhi_init(struct ipa_mhi_init_params *params)
IPA_MHI_FUNC_EXIT();
return 0;
fail_rm:
fail_pm:
ipa_dma_destroy();
fail_dma_init:
destroy_workqueue(ipa_mhi_client_ctx->wq);

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/ipa_uc_offload.h>
@ -69,9 +69,6 @@ struct ipa_uc_offload_ctx {
static struct ipa_uc_offload_ctx *ipa_uc_offload_ctx[IPA_UC_MAX_PROT_SIZE];
static int ipa_uc_ntn_cons_release(void);
static int ipa_uc_ntn_cons_request(void);
static void ipa_uc_offload_rm_notify(void *, enum ipa_rm_event, unsigned long);
static int ipa_commit_partial_hdr(
struct ipa_ioc_add_hdr *hdr,
@ -150,36 +147,6 @@ static void ipa_uc_offload_ntn_deregister_pm_client(
ipa_pm_deactivate_sync(ntn_ctx->pm_hdl);
ipa_pm_deregister(ntn_ctx->pm_hdl);
}
static int ipa_uc_offload_ntn_create_rm_resources(
struct ipa_uc_offload_ctx *ntn_ctx)
{
int ret;
struct ipa_rm_create_params param;
memset(&param, 0, sizeof(param));
param.name = IPA_RM_RESOURCE_ETHERNET_PROD;
param.reg_params.user_data = ntn_ctx;
param.reg_params.notify_cb = ipa_uc_offload_rm_notify;
param.floor_voltage = IPA_VOLTAGE_SVS;
ret = ipa_rm_create_resource(&param);
if (ret) {
IPA_UC_OFFLOAD_ERR("fail to create ETHERNET_PROD resource\n");
return -EFAULT;
}
memset(&param, 0, sizeof(param));
param.name = IPA_RM_RESOURCE_ETHERNET_CONS;
param.request_resource = ipa_uc_ntn_cons_request;
param.release_resource = ipa_uc_ntn_cons_release;
ret = ipa_rm_create_resource(&param);
if (ret) {
IPA_UC_OFFLOAD_ERR("fail to create ETHERNET_CONS resource\n");
ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_PROD);
return -EFAULT;
}
return 0;
}
static int ipa_uc_offload_ntn_reg_intf(
struct ipa_uc_offload_intf_params *inp,
@ -197,12 +164,9 @@ static int ipa_uc_offload_ntn_reg_intf(
IPA_UC_OFFLOAD_DBG("register interface for netdev %s\n",
inp->netdev_name);
if (ipa_pm_is_used())
ret = ipa_uc_offload_ntn_register_pm_client(ntn_ctx);
else
ret = ipa_uc_offload_ntn_create_rm_resources(ntn_ctx);
ret = ipa_uc_offload_ntn_register_pm_client(ntn_ctx);
if (ret) {
IPA_UC_OFFLOAD_ERR("fail to create rm resource\n");
IPA_UC_OFFLOAD_ERR("fail to register PM client\n");
return -EFAULT;
}
memcpy(ntn_ctx->netdev_name, inp->netdev_name, IPA_RESOURCE_NAME_MAX);
@ -282,12 +246,7 @@ static int ipa_uc_offload_ntn_reg_intf(
fail:
kfree(hdr);
fail_alloc:
if (ipa_pm_is_used()) {
ipa_uc_offload_ntn_deregister_pm_client(ntn_ctx);
} else {
ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_CONS);
ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_PROD);
}
ipa_uc_offload_ntn_deregister_pm_client(ntn_ctx);
return ret;
}
@ -336,56 +295,6 @@ int ipa_uc_offload_reg_intf(
}
EXPORT_SYMBOL(ipa_uc_offload_reg_intf);
static int ipa_uc_ntn_cons_release(void)
{
return 0;
}
static int ipa_uc_ntn_cons_request(void)
{
int ret = 0;
struct ipa_uc_offload_ctx *ntn_ctx;
ntn_ctx = ipa_uc_offload_ctx[IPA_UC_NTN];
if (!ntn_ctx) {
IPA_UC_OFFLOAD_ERR("NTN is not initialized\n");
ret = -EFAULT;
} else if (ntn_ctx->state != IPA_UC_OFFLOAD_STATE_UP) {
IPA_UC_OFFLOAD_ERR("Invalid State: %d\n", ntn_ctx->state);
ret = -EFAULT;
}
return ret;
}
static void ipa_uc_offload_rm_notify(void *user_data, enum ipa_rm_event event,
unsigned long data)
{
struct ipa_uc_offload_ctx *offload_ctx;
offload_ctx = (struct ipa_uc_offload_ctx *)user_data;
if (!(offload_ctx && offload_ctx->proto > IPA_UC_INVALID &&
offload_ctx->proto < IPA_UC_MAX_PROT_SIZE)) {
IPA_UC_OFFLOAD_ERR("Invalid user data\n");
return;
}
if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_INITIALIZED)
IPA_UC_OFFLOAD_ERR("Invalid State: %d\n", offload_ctx->state);
switch (event) {
case IPA_RM_RESOURCE_GRANTED:
complete_all(&offload_ctx->ntn_completion);
break;
case IPA_RM_RESOURCE_RELEASED:
break;
default:
IPA_UC_OFFLOAD_ERR("Invalid RM Evt: %d", event);
break;
}
}
static int ipa_uc_ntn_alloc_conn_smmu_info(struct ipa_ntn_setup_info *dest,
struct ipa_ntn_setup_info *source)
@ -456,34 +365,10 @@ int ipa_uc_ntn_conn_pipes(struct ipa_ntn_conn_in_params *inp,
return -EINVAL;
}
if (ipa_pm_is_used()) {
result = ipa_pm_activate_sync(ntn_ctx->pm_hdl);
if (result) {
IPA_UC_OFFLOAD_ERR("fail to activate: %d\n", result);
return result;
}
} else {
result = ipa_rm_add_dependency(IPA_RM_RESOURCE_ETHERNET_PROD,
IPA_RM_RESOURCE_APPS_CONS);
if (result) {
IPA_UC_OFFLOAD_ERR("fail to add rm dependency: %d\n",
result);
return result;
}
result = ipa_rm_request_resource(IPA_RM_RESOURCE_ETHERNET_PROD);
if (result == -EINPROGRESS) {
if (wait_for_completion_timeout(&ntn_ctx->ntn_completion
, 10*HZ) == 0) {
IPA_UC_OFFLOAD_ERR("ETH_PROD req timeout\n");
result = -EFAULT;
goto fail;
}
} else if (result != 0) {
IPA_UC_OFFLOAD_ERR("fail to request resource\n");
result = -EFAULT;
goto fail;
}
result = ipa_pm_activate_sync(ntn_ctx->pm_hdl);
if (result) {
IPA_UC_OFFLOAD_ERR("fail to activate: %d\n", result);
return result;
}
ntn_ctx->state = IPA_UC_OFFLOAD_STATE_UP;
@ -514,9 +399,6 @@ int ipa_uc_ntn_conn_pipes(struct ipa_ntn_conn_in_params *inp,
}
fail:
if (!ipa_pm_is_used())
ipa_rm_delete_dependency(IPA_RM_RESOURCE_ETHERNET_PROD,
IPA_RM_RESOURCE_APPS_CONS);
return result;
}
@ -565,42 +447,6 @@ int ipa_uc_offload_conn_pipes(struct ipa_uc_offload_conn_in_params *inp,
}
EXPORT_SYMBOL(ipa_uc_offload_conn_pipes);
int ipa_set_perf_profile(struct ipa_perf_profile *profile)
{
struct ipa_rm_perf_profile rm_profile;
enum ipa_rm_resource_name resource_name;
if (profile == NULL) {
IPA_UC_OFFLOAD_ERR("Invalid input\n");
return -EINVAL;
}
rm_profile.max_supported_bandwidth_mbps =
profile->max_supported_bw_mbps;
if (profile->client == IPA_CLIENT_ETHERNET_PROD) {
resource_name = IPA_RM_RESOURCE_ETHERNET_PROD;
} else if (profile->client == IPA_CLIENT_ETHERNET_CONS) {
resource_name = IPA_RM_RESOURCE_ETHERNET_CONS;
} else {
IPA_UC_OFFLOAD_ERR("not supported\n");
return -EINVAL;
}
if (ipa_pm_is_used())
return ipa_pm_set_throughput(
ipa_uc_offload_ctx[IPA_UC_NTN]->pm_hdl,
profile->max_supported_bw_mbps);
if (ipa_rm_set_perf_profile(resource_name, &rm_profile)) {
IPA_UC_OFFLOAD_ERR("fail to setup rm perf profile\n");
return -EFAULT;
}
return 0;
}
EXPORT_SYMBOL(ipa_set_perf_profile);
static int ipa_uc_ntn_disconn_pipes(struct ipa_uc_offload_ctx *ntn_ctx)
{
int ipa_ep_idx_ul, ipa_ep_idx_dl;
@ -612,28 +458,11 @@ static int ipa_uc_ntn_disconn_pipes(struct ipa_uc_offload_ctx *ntn_ctx)
}
ntn_ctx->state = IPA_UC_OFFLOAD_STATE_INITIALIZED;
if (ipa_pm_is_used()) {
ret = ipa_pm_deactivate_sync(ntn_ctx->pm_hdl);
if (ret) {
IPA_UC_OFFLOAD_ERR("fail to deactivate res: %d\n",
ret);
return -EFAULT;
}
} else {
ret = ipa_rm_release_resource(IPA_RM_RESOURCE_ETHERNET_PROD);
if (ret) {
IPA_UC_OFFLOAD_ERR("fail release ETHERNET_PROD: %d\n",
ret);
return -EFAULT;
}
ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_ETHERNET_PROD,
IPA_RM_RESOURCE_APPS_CONS);
if (ret) {
IPA_UC_OFFLOAD_ERR("fail del dep ETH->APPS, %d\n", ret);
return -EFAULT;
}
ret = ipa_pm_deactivate_sync(ntn_ctx->pm_hdl);
if (ret) {
IPA_UC_OFFLOAD_ERR("fail to deactivate res: %d\n",
ret);
return -EFAULT;
}
ipa_ep_idx_ul = ipa_get_ep_mapping(IPA_CLIENT_ETHERNET_PROD);
@ -695,19 +524,7 @@ static int ipa_uc_ntn_cleanup(struct ipa_uc_offload_ctx *ntn_ctx)
int len, result = 0;
struct ipa_ioc_del_hdr *hdr;
if (ipa_pm_is_used()) {
ipa_uc_offload_ntn_deregister_pm_client(ntn_ctx);
} else {
if (ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_PROD)) {
IPA_UC_OFFLOAD_ERR("fail to delete ETHERNET_PROD\n");
return -EFAULT;
}
if (ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_CONS)) {
IPA_UC_OFFLOAD_ERR("fail to delete ETHERNET_CONS\n");
return -EFAULT;
}
}
ipa_uc_offload_ntn_deregister_pm_client(ntn_ctx);
len = sizeof(struct ipa_ioc_del_hdr) + 2 * sizeof(struct ipa_hdr_del);
hdr = kzalloc(len, GFP_KERNEL);

View File

@ -14,7 +14,6 @@
#include "../ipa_v3/ipa_i.h"
#include "../ipa_rm_i.h"
#define IPA_USB_RM_TIMEOUT_MSEC 10000
#define IPA_USB_DEV_READY_TIMEOUT_MSEC 10000
#define IPA_HOLB_TMR_EN 0x1
@ -99,24 +98,6 @@ struct ipa3_usb_teth_prot_context {
void *user_data;
};
enum ipa3_usb_cons_state {
IPA_USB_CONS_GRANTED,
IPA_USB_CONS_RELEASED
};
struct ipa3_usb_rm_context {
struct ipa_rm_create_params prod_params;
struct ipa_rm_create_params cons_params;
bool prod_valid;
bool cons_valid;
struct completion prod_comp;
enum ipa3_usb_cons_state cons_state;
/* consumer was requested*/
bool cons_requested;
/* consumer was requested and released before it was granted*/
bool cons_requested_released;
};
struct ipa3_usb_pm_context {
struct ipa_pm_register_params reg_params;
struct work_struct *remote_wakeup_work;
@ -157,10 +138,9 @@ struct ipa3_usb_teth_prot_conn_params {
/**
* Transport type - could be either data tethering or DPL
* Each transport has it's own RM resources and statuses
* Each transport has it's own PM resources and statuses
*/
struct ipa3_usb_transport_type_ctx {
struct ipa3_usb_rm_context rm_ctx;
struct ipa3_usb_pm_context pm_ctx;
int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event, void *user_data);
void *user_data;
@ -231,8 +211,6 @@ struct ipa3_usb_status_dbg_info {
const char *inited_prots[IPA_USB_MAX_TETH_PROT_SIZE];
const char *teth_connected_prot;
const char *dpl_connected_prot;
const char *teth_cons_state;
const char *dpl_cons_state;
};
static void ipa3_usb_wq_notify_remote_wakeup(struct work_struct *work);
@ -326,7 +304,6 @@ static bool ipa3_usb_set_state(enum ipa3_usb_state new_state, bool err_permit,
int state_legal = false;
enum ipa3_usb_state state;
bool rwakeup_pending;
struct ipa3_usb_rm_context *rm_ctx;
spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
state = ipa3_usb_ctx->ttype_ctx[ttype].state;
@ -404,25 +381,6 @@ static bool ipa3_usb_set_state(enum ipa3_usb_state new_state, bool err_permit,
ipa3_usb_state_to_string(new_state));
}
if (!ipa_pm_is_used() &&
state_legal && (new_state == IPA_USB_CONNECTED)) {
rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx;
if ((rm_ctx->cons_state == IPA_USB_CONS_GRANTED) ||
rm_ctx->cons_requested_released) {
rm_ctx->cons_requested = false;
rm_ctx->cons_requested_released =
false;
}
/* Notify RM that consumer is granted */
if (rm_ctx->cons_requested) {
ipa_rm_notify_completion(
IPA_RM_RESOURCE_GRANTED,
rm_ctx->cons_params.name);
rm_ctx->cons_state = IPA_USB_CONS_GRANTED;
rm_ctx->cons_requested = false;
}
}
spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
return state_legal;
}
@ -542,42 +500,6 @@ void ipa3_usb_device_ready_notify_cb(void)
IPA_USB_DBG_LOW("exit\n");
}
static void ipa3_usb_prod_notify_cb_do(enum ipa_rm_event event,
enum ipa3_usb_transport_type ttype)
{
struct ipa3_usb_rm_context *rm_ctx;
IPA_USB_DBG_LOW("entry\n");
rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx;
switch (event) {
case IPA_RM_RESOURCE_GRANTED:
IPA_USB_DBG(":%s granted\n",
ipa_rm_resource_str(rm_ctx->prod_params.name));
complete_all(&rm_ctx->prod_comp);
break;
case IPA_RM_RESOURCE_RELEASED:
IPA_USB_DBG(":%s released\n",
ipa_rm_resource_str(rm_ctx->prod_params.name));
complete_all(&rm_ctx->prod_comp);
break;
}
IPA_USB_DBG_LOW("exit\n");
}
static void ipa3_usb_prod_notify_cb(void *user_data, enum ipa_rm_event event,
unsigned long data)
{
ipa3_usb_prod_notify_cb_do(event, IPA_USB_TRANSPORT_TETH);
}
static void ipa3_usb_dpl_dummy_prod_notify_cb(void *user_data,
enum ipa_rm_event event, unsigned long data)
{
ipa3_usb_prod_notify_cb_do(event, IPA_USB_TRANSPORT_TETH);
}
static void ipa3_usb_wq_notify_remote_wakeup(struct work_struct *work)
{
bool rwakeup_pending;
@ -585,6 +507,7 @@ static void ipa3_usb_wq_notify_remote_wakeup(struct work_struct *work)
enum ipa3_usb_transport_type ttype =
IPA_USB_TRANSPORT_TETH;
spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
rwakeup_pending =
ipa3_usb_ctx->ttype_ctx[ttype].rwakeup_pending;
@ -618,116 +541,6 @@ static void ipa3_usb_wq_dpl_notify_remote_wakeup(struct work_struct *work)
spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
}
static int ipa3_usb_cons_request_resource_cb_do(
enum ipa3_usb_transport_type ttype,
struct work_struct *remote_wakeup_work)
{
struct ipa3_usb_rm_context *rm_ctx;
unsigned long flags;
int result;
IPA_USB_DBG_LOW("entry\n");
rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx;
spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
IPA_USB_DBG("state is %s\n",
ipa3_usb_state_to_string(
ipa3_usb_ctx->ttype_ctx[ttype].state));
switch (ipa3_usb_ctx->ttype_ctx[ttype].state) {
case IPA_USB_CONNECTED:
case IPA_USB_SUSPENDED_NO_RWAKEUP:
rm_ctx->cons_state = IPA_USB_CONS_GRANTED;
result = 0;
break;
case IPA_USB_SUSPEND_REQUESTED:
rm_ctx->cons_requested = true;
if (rm_ctx->cons_state == IPA_USB_CONS_GRANTED)
result = 0;
else
result = -EINPROGRESS;
break;
case IPA_USB_SUSPENDED:
if (!rm_ctx->cons_requested) {
rm_ctx->cons_requested = true;
queue_work(ipa3_usb_ctx->wq, remote_wakeup_work);
}
result = -EINPROGRESS;
break;
default:
rm_ctx->cons_requested = true;
result = -EINPROGRESS;
break;
}
spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
IPA_USB_DBG_LOW("exit with %d\n", result);
return result;
}
static int ipa3_usb_cons_request_resource_cb(void)
{
return ipa3_usb_cons_request_resource_cb_do(IPA_USB_TRANSPORT_TETH,
&ipa3_usb_notify_remote_wakeup_work);
}
static int ipa3_usb_dpl_cons_request_resource_cb(void)
{
return ipa3_usb_cons_request_resource_cb_do(IPA_USB_TRANSPORT_DPL,
&ipa3_usb_dpl_notify_remote_wakeup_work);
}
static int ipa3_usb_cons_release_resource_cb_do(
enum ipa3_usb_transport_type ttype)
{
unsigned long flags;
struct ipa3_usb_rm_context *rm_ctx;
IPA_USB_DBG_LOW("entry\n");
rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx;
spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
IPA_USB_DBG("state is %s\n",
ipa3_usb_state_to_string(
ipa3_usb_ctx->ttype_ctx[ttype].state));
switch (ipa3_usb_ctx->ttype_ctx[ttype].state) {
case IPA_USB_SUSPENDED:
/* Proceed with the suspend if no DL/DPL data */
if (rm_ctx->cons_requested)
rm_ctx->cons_requested_released = true;
break;
case IPA_USB_SUSPEND_REQUESTED:
if (rm_ctx->cons_requested)
rm_ctx->cons_requested_released = true;
break;
case IPA_USB_STOPPED:
case IPA_USB_RESUME_IN_PROGRESS:
case IPA_USB_SUSPENDED_NO_RWAKEUP:
if (rm_ctx->cons_requested)
rm_ctx->cons_requested = false;
break;
case IPA_USB_CONNECTED:
case IPA_USB_INITIALIZED:
break;
default:
IPA_USB_ERR("received cons_release_cb in bad state: %s!\n",
ipa3_usb_state_to_string(
ipa3_usb_ctx->ttype_ctx[ttype].state));
WARN_ON(1);
break;
}
rm_ctx->cons_state = IPA_USB_CONS_RELEASED;
spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
IPA_USB_DBG_LOW("exit\n");
return 0;
}
static int ipa3_usb_cons_release_resource_cb(void)
{
return ipa3_usb_cons_release_resource_cb_do(IPA_USB_TRANSPORT_TETH);
}
static int ipa3_usb_dpl_cons_release_resource_cb(void)
{
return ipa3_usb_cons_release_resource_cb_do(IPA_USB_TRANSPORT_DPL);
}
static void ipa3_usb_pm_cb(void *p, enum ipa_pm_cb_event event)
{
@ -857,76 +670,6 @@ static int ipa3_usb_deregister_pm(enum ipa3_usb_transport_type ttype)
return 0;
}
static int ipa3_usb_create_rm_resources(enum ipa3_usb_transport_type ttype)
{
struct ipa3_usb_rm_context *rm_ctx;
int result = -EFAULT;
bool created = false;
rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx;
/* create PROD */
if (!rm_ctx->prod_valid) {
rm_ctx->prod_params.name = IPA3_USB_IS_TTYPE_DPL(ttype) ?
IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD :
IPA_RM_RESOURCE_USB_PROD;
rm_ctx->prod_params.floor_voltage = IPA_VOLTAGE_SVS2;
rm_ctx->prod_params.reg_params.user_data = NULL;
rm_ctx->prod_params.reg_params.notify_cb =
IPA3_USB_IS_TTYPE_DPL(ttype) ?
ipa3_usb_dpl_dummy_prod_notify_cb :
ipa3_usb_prod_notify_cb;
rm_ctx->prod_params.request_resource = NULL;
rm_ctx->prod_params.release_resource = NULL;
result = ipa_rm_create_resource(&rm_ctx->prod_params);
if (result) {
IPA_USB_ERR("Failed to create %s RM resource\n",
ipa_rm_resource_str(rm_ctx->prod_params.name));
return result;
}
rm_ctx->prod_valid = true;
created = true;
IPA_USB_DBG("Created %s RM resource\n",
ipa_rm_resource_str(rm_ctx->prod_params.name));
}
/* Create CONS */
if (!rm_ctx->cons_valid) {
rm_ctx->cons_params.name = IPA3_USB_IS_TTYPE_DPL(ttype) ?
IPA_RM_RESOURCE_USB_DPL_CONS :
IPA_RM_RESOURCE_USB_CONS;
rm_ctx->cons_params.floor_voltage = IPA_VOLTAGE_SVS2;
rm_ctx->cons_params.reg_params.user_data = NULL;
rm_ctx->cons_params.reg_params.notify_cb = NULL;
rm_ctx->cons_params.request_resource =
IPA3_USB_IS_TTYPE_DPL(ttype) ?
ipa3_usb_dpl_cons_request_resource_cb :
ipa3_usb_cons_request_resource_cb;
rm_ctx->cons_params.release_resource =
IPA3_USB_IS_TTYPE_DPL(ttype) ?
ipa3_usb_dpl_cons_release_resource_cb :
ipa3_usb_cons_release_resource_cb;
result = ipa_rm_create_resource(&rm_ctx->cons_params);
if (result) {
IPA_USB_ERR("Failed to create %s RM resource\n",
ipa_rm_resource_str(rm_ctx->cons_params.name));
goto create_cons_rsc_fail;
}
rm_ctx->cons_valid = true;
IPA_USB_DBG("Created %s RM resource\n",
ipa_rm_resource_str(rm_ctx->cons_params.name));
}
return 0;
create_cons_rsc_fail:
if (created) {
rm_ctx->prod_valid = false;
ipa_rm_delete_resource(rm_ctx->prod_params.name);
}
return result;
}
static bool ipa3_usb_is_teth_switch_valid(enum ipa_usb_teth_prot new_teth)
{
enum ipa_usb_teth_prot old_teth;
@ -1018,14 +761,11 @@ int ipa_usb_init_teth_prot(enum ipa_usb_teth_prot teth_prot,
goto bad_params;
}
/* Create IPA RM USB resources */
/* Register with IPA PM */
teth_prot_ptr = &ipa3_usb_ctx->teth_prot_ctx[teth_prot];
if (ipa_pm_is_used())
result = ipa3_usb_register_pm(ttype);
else
result = ipa3_usb_create_rm_resources(ttype);
result = ipa3_usb_register_pm(ttype);
if (result) {
IPA_USB_ERR("Failed creating IPA RM USB resources\n");
IPA_USB_ERR("Failed registering IPA PM\n");
goto bad_params;
}
@ -1160,18 +900,7 @@ int ipa_usb_init_teth_prot(enum ipa_usb_teth_prot teth_prot,
teth_prot_init_fail:
if ((IPA3_USB_IS_TTYPE_DPL(ttype))
|| (ipa3_usb_ctx->num_init_prot == 0)) {
if (ipa_pm_is_used()) {
ipa3_usb_deregister_pm(ttype);
} else {
ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_valid =
false;
ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_valid =
false;
ipa_rm_delete_resource(
ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_params.name);
ipa_rm_delete_resource(
ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_params.name);
}
ipa3_usb_deregister_pm(ttype);
}
bad_params:
mutex_unlock(&ipa3_usb_ctx->general_mutex);
@ -1553,66 +1282,6 @@ static int ipa3_usb_release_xdci_channel(u32 clnt_hdl,
return 0;
}
static int ipa3_usb_request_prod(enum ipa3_usb_transport_type ttype)
{
int result;
struct ipa3_usb_rm_context *rm_ctx;
const char *rsrc_str;
rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx;
rsrc_str = ipa_rm_resource_str(rm_ctx->prod_params.name);
IPA_USB_DBG_LOW("requesting %s\n", rsrc_str);
init_completion(&rm_ctx->prod_comp);
result = ipa_rm_request_resource(rm_ctx->prod_params.name);
if (result) {
if (result != -EINPROGRESS) {
IPA_USB_ERR("failed to request %s: %d\n",
rsrc_str, result);
return result;
}
result = wait_for_completion_timeout(&rm_ctx->prod_comp,
msecs_to_jiffies(IPA_USB_RM_TIMEOUT_MSEC));
if (result == 0) {
IPA_USB_ERR("timeout request %s\n", rsrc_str);
return -ETIME;
}
}
IPA_USB_DBG_LOW("%s granted\n", rsrc_str);
return 0;
}
static int ipa3_usb_release_prod(enum ipa3_usb_transport_type ttype)
{
int result;
struct ipa3_usb_rm_context *rm_ctx;
const char *rsrc_str;
rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx;
rsrc_str = ipa_rm_resource_str(rm_ctx->prod_params.name);
IPA_USB_DBG_LOW("releasing %s\n", rsrc_str);
init_completion(&rm_ctx->prod_comp);
result = ipa_rm_release_resource(rm_ctx->prod_params.name);
if (result) {
if (result != -EINPROGRESS) {
IPA_USB_ERR("failed to release %s: %d\n",
rsrc_str, result);
return result;
}
result = wait_for_completion_timeout(&rm_ctx->prod_comp,
msecs_to_jiffies(IPA_USB_RM_TIMEOUT_MSEC));
if (result == 0) {
IPA_USB_ERR("timeout release %s\n", rsrc_str);
return -ETIME;
}
}
IPA_USB_DBG_LOW("%s released\n", rsrc_str);
return 0;
}
static bool ipa3_usb_check_connect_params(
struct ipa_usb_xdci_connect_params_internal *params)
@ -1660,41 +1329,6 @@ static int ipa3_usb_connect_teth_bridge(
return 0;
}
static int ipa3_usb_connect_dpl(void)
{
int res = 0;
if (ipa_pm_is_used())
return 0;
/*
* Add DPL dependency to RM dependency graph, first add_dependency call
* is sync in order to make sure the IPA clocks are up before we
* continue and notify the USB driver it may continue.
*/
res = ipa_rm_add_dependency_sync(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD,
IPA_RM_RESOURCE_Q6_CONS);
if (res < 0) {
IPA_USB_ERR("ipa_rm_add_dependency_sync() failed\n");
return res;
}
/*
* this add_dependency call can't be sync since it will block until DPL
* status is connected (which can happen only later in the flow),
* the clocks are already up so the call doesn't need to block.
*/
res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
IPA_RM_RESOURCE_USB_DPL_CONS);
if (res < 0 && res != -EINPROGRESS) {
IPA_USB_ERR("ipa_rm_add_dependency() failed\n");
ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD,
IPA_RM_RESOURCE_Q6_CONS);
return res;
}
return 0;
}
static int ipa3_usb_connect_teth_prot(enum ipa_usb_teth_prot teth_prot)
{
@ -1807,13 +1441,6 @@ static int ipa3_usb_connect_teth_prot(enum ipa_usb_teth_prot teth_prot)
ipa3_usb_ctx->ttype_ctx[ttype].user_data =
ipa3_usb_ctx->teth_prot_ctx[teth_prot].user_data;
result = ipa3_usb_connect_dpl();
if (result) {
IPA_USB_ERR("Failed connecting DPL result=%d\n",
result);
ipa3_usb_ctx->ttype_ctx[ttype].user_data = NULL;
return result;
}
ipa3_usb_ctx->teth_prot_ctx[IPA_USB_DIAG].state =
IPA_USB_TETH_PROT_CONNECTED;
ipa3_usb_notify_do(ttype, IPA_USB_DEVICE_READY);
@ -1841,27 +1468,6 @@ static int ipa3_usb_disconnect_teth_bridge(void)
return 0;
}
static int ipa3_usb_disconnect_dpl(void)
{
int res;
if (ipa_pm_is_used())
return 0;
/* Remove DPL RM dependency */
res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD,
IPA_RM_RESOURCE_Q6_CONS);
if (res)
IPA_USB_ERR("deleting DPL_DUMMY_PROD rsrc dependency fail\n");
res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
IPA_RM_RESOURCE_USB_DPL_CONS);
if (res)
IPA_USB_ERR("deleting DPL_CONS rsrc dependencty fail\n");
return 0;
}
static int ipa3_usb_disconnect_teth_prot(enum ipa_usb_teth_prot teth_prot)
{
int result = 0;
@ -1919,9 +1525,6 @@ static int ipa3_usb_disconnect_teth_prot(enum ipa_usb_teth_prot teth_prot)
ipa3_usb_teth_prot_to_string(teth_prot));
return -EPERM;
}
result = ipa3_usb_disconnect_dpl();
if (result)
break;
teth_prot_ptr->state = IPA_USB_TETH_PROT_INITIALIZED;
IPA_USB_DBG("disconnected %s\n",
ipa3_usb_teth_prot_to_string(teth_prot));
@ -1938,11 +1541,8 @@ static int ipa3_usb_xdci_connect_internal(
struct ipa_usb_xdci_connect_params_internal *params)
{
int result = -EFAULT;
struct ipa_rm_perf_profile profile;
enum ipa3_usb_transport_type ttype;
struct ipa3_usb_teth_prot_conn_params *teth_prot_ptr;
struct ipa3_usb_rm_context *rm_ctx_ptr;
struct ipa3_usb_transport_type_ctx *t_ctx_ptr;
IPA_USB_DBG_LOW("entry\n");
if (params == NULL || !ipa3_usb_check_connect_params(params)) {
@ -1960,7 +1560,6 @@ static int ipa3_usb_xdci_connect_internal(
teth_prot_ptr = &ipa3_usb_ctx->ttype_ctx[ttype].teth_conn_params;
teth_prot_ptr->ipa_to_usb_clnt_hdl = params->ipa_to_usb_clnt_hdl;
rm_ctx_ptr = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx;
if (!IPA3_USB_IS_TTYPE_DPL(ttype))
teth_prot_ptr->usb_to_ipa_clnt_hdl =
@ -1973,57 +1572,22 @@ static int ipa3_usb_xdci_connect_internal(
IPA_USB_ERR("failed setting xDCI EE scratch field\n");
return result;
}
if (ipa_pm_is_used()) {
/* perf profile is not set on USB DPL pipe */
if (ttype != IPA_USB_TRANSPORT_DPL) {
result = ipa_pm_set_throughput(
ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl,
params->max_supported_bandwidth_mbps);
if (result) {
IPA_USB_ERR("failed to set perf profile\n");
return result;
}
}
result = ipa_pm_activate_sync(
ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
/* perf profile is not set on USB DPL pipe */
if (ttype != IPA_USB_TRANSPORT_DPL) {
result = ipa_pm_set_throughput(
ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl,
params->max_supported_bandwidth_mbps);
if (result) {
IPA_USB_ERR("failed to activate pm\n");
IPA_USB_ERR("failed to set pm throughput\n");
return result;
}
} else {
/* Set RM PROD & CONS perf profile */
profile.max_supported_bandwidth_mbps =
params->max_supported_bandwidth_mbps;
result = ipa_rm_set_perf_profile(
ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_params.name,
&profile);
}
t_ctx_ptr = &ipa3_usb_ctx->ttype_ctx[ttype];
if (result) {
IPA_USB_ERR("failed to set %s perf profile\n",
ipa_rm_resource_str(
t_ctx_ptr->rm_ctx.prod_params.name));
return result;
}
result = ipa_rm_set_perf_profile(
ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_params.name,
&profile);
if (result) {
IPA_USB_ERR("failed to set %s perf profile\n",
ipa_rm_resource_str(
t_ctx_ptr->rm_ctx.cons_params.name));
return result;
}
/* Request PROD */
result = ipa3_usb_request_prod(ttype);
if (result)
return result;
result = ipa_pm_activate_sync(
ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
if (result) {
IPA_USB_ERR("failed to activate pm\n");
return result;
}
if (params->teth_prot != IPA_USB_DIAG) {
@ -2087,35 +1651,19 @@ connect_dl_fail:
ipa3_reset_gsi_event_ring(params->usb_to_ipa_clnt_hdl);
}
connect_ul_fail:
if (ipa_pm_is_used())
ipa_pm_deactivate_sync(
ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
else
ipa3_usb_release_prod(ttype);
return result;
}
#ifdef CONFIG_DEBUG_FS
static char dbg_buff[IPA_USB_MAX_MSG_LEN];
static char *ipa3_usb_cons_state_to_string(enum ipa3_usb_cons_state state)
{
switch (state) {
case IPA_USB_CONS_GRANTED:
return "CONS_GRANTED";
case IPA_USB_CONS_RELEASED:
return "CONS_RELEASED";
}
return "UNSUPPORTED";
}
static int ipa3_usb_get_status_dbg_info(struct ipa3_usb_status_dbg_info *status)
{
int res;
int i;
unsigned long flags;
struct ipa3_usb_rm_context *rm_ctx_ptr;
IPA_USB_DBG_LOW("entry\n");
@ -2135,18 +1683,10 @@ static int ipa3_usb_get_status_dbg_info(struct ipa3_usb_status_dbg_info *status)
memset(status, 0, sizeof(struct ipa3_usb_status_dbg_info));
spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
rm_ctx_ptr = &ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_TETH].rm_ctx;
status->teth_state = ipa3_usb_state_to_string(
ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_TETH].state);
status->dpl_state = ipa3_usb_state_to_string(
ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_DPL].state);
if (rm_ctx_ptr->cons_valid)
status->teth_cons_state = ipa3_usb_cons_state_to_string(
rm_ctx_ptr->cons_state);
rm_ctx_ptr = &ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_DPL].rm_ctx;
if (rm_ctx_ptr->cons_valid)
status->dpl_cons_state = ipa3_usb_cons_state_to_string(
rm_ctx_ptr->cons_state);
spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
for (i = 0 ; i < IPA_USB_MAX_TETH_PROT_SIZE ; i++) {
@ -2236,18 +1776,6 @@ static ssize_t ipa3_read_usb_state_info(struct file *file, char __user *ubuf,
(status.teth_connected_prot ||
status.dpl_connected_prot) ? "\n" : "None\n");
cnt += nbytes;
nbytes = scnprintf(dbg_buff + cnt, IPA_USB_MAX_MSG_LEN - cnt,
"USB Tethering Consumer State: %s\n",
status.teth_cons_state ?
status.teth_cons_state : "Invalid");
cnt += nbytes;
nbytes = scnprintf(dbg_buff + cnt, IPA_USB_MAX_MSG_LEN - cnt,
"DPL Consumer State: %s\n",
status.dpl_cons_state ? status.dpl_cons_state :
"Invalid");
cnt += nbytes;
}
return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
@ -2558,13 +2086,10 @@ int ipa_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
goto bad_params;
if (orig_state != IPA_USB_SUSPENDED) {
if (ipa_pm_is_used())
result = ipa_pm_deactivate_sync(
ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
else
result = ipa3_usb_release_prod(ttype);
result = ipa_pm_deactivate_sync(
ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
if (result) {
IPA_USB_ERR("failed to release PROD\n");
IPA_USB_ERR("failed to deactivate PM\n");
goto bad_params;
}
}
@ -2669,20 +2194,8 @@ int ipa_usb_deinit_teth_prot(enum ipa_usb_teth_prot teth_prot)
if (!ipa3_usb_set_state(IPA_USB_INVALID, false, ttype))
IPA_USB_ERR(
"failed to change state to invalid\n");
if (ipa_pm_is_used()) {
ipa3_usb_deregister_pm(ttype);
ipa3_usb_ctx->ttype_ctx[ttype].ipa_usb_notify_cb = NULL;
} else {
ipa_rm_delete_resource(
ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_params.name);
ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_valid =
false;
ipa_rm_delete_resource(
ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_params.name);
ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_valid =
false;
ipa3_usb_ctx->ttype_ctx[ttype].ipa_usb_notify_cb = NULL;
}
ipa3_usb_deregister_pm(ttype);
ipa3_usb_ctx->ttype_ctx[ttype].ipa_usb_notify_cb = NULL;
}
IPA_USB_DBG_LOW("exit\n");
@ -2751,13 +2264,10 @@ static int ipa3_usb_suspend_no_remote_wakeup(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
if (result)
goto enable_mhip;
if (ipa_pm_is_used())
result = ipa_pm_deactivate_sync(
ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
else
result = ipa3_usb_release_prod(ttype);
result = ipa_pm_deactivate_sync(
ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
if (result) {
IPA_USB_ERR("failed to release PROD\n");
IPA_USB_ERR("failed to deactivate PM\n");
goto connect_teth;
}
@ -2786,7 +2296,6 @@ int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
enum ipa_usb_teth_prot teth_prot, bool with_remote_wakeup)
{
int result = 0;
unsigned long flags;
enum ipa3_usb_transport_type ttype;
mutex_lock(&ipa3_usb_ctx->general_mutex);
@ -2835,50 +2344,23 @@ int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
}
ipa3_usb_ctx->qmi_req_id++;
if (ipa_pm_is_used())
result = ipa_pm_deactivate_sync(
ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
else
result = ipa3_usb_release_prod(ttype);
result = ipa_pm_deactivate_sync(
ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
if (result) {
IPA_USB_ERR("failed to release PROD\n");
goto release_prod_fail;
IPA_USB_ERR("failed to deactivate PM IPA client\n");
goto pm_deactivate_fail;
}
/* Check if DL/DPL data pending */
spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
if (ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_state ==
IPA_USB_CONS_GRANTED &&
ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_requested) {
IPA_USB_DBG("DL/DPL data pending, invoke remote wakeup\n");
queue_work(ipa3_usb_ctx->wq,
IPA3_USB_IS_TTYPE_DPL(ttype) ?
&ipa3_usb_dpl_notify_remote_wakeup_work :
&ipa3_usb_notify_remote_wakeup_work);
}
spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
/* Change state to SUSPENDED */
if (!ipa3_usb_set_state(IPA_USB_SUSPENDED, false, ttype))
IPA_USB_ERR("failed to change state to suspended\n");
/* Check if DL/DPL data pending */
spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
if (ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_requested) {
IPA_USB_DBG_LOW(
"DL/DPL data is pending, invoking remote wakeup\n");
queue_work(ipa3_usb_ctx->wq, IPA3_USB_IS_TTYPE_DPL(ttype) ?
&ipa3_usb_dpl_notify_remote_wakeup_work :
&ipa3_usb_notify_remote_wakeup_work);
}
spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
IPA_USB_DBG_LOW("exit\n");
mutex_unlock(&ipa3_usb_ctx->general_mutex);
return 0;
release_prod_fail:
pm_deactivate_fail:
ipa3_xdci_resume(ul_clnt_hdl, dl_clnt_hdl,
IPA3_USB_IS_TTYPE_DPL(ttype));
suspend_fail:
@ -2904,12 +2386,9 @@ static int ipa3_usb_resume_no_remote_wakeup(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
IPA3_USB_IS_TTYPE_DPL(ttype) ?
"DPL channel":"Data Tethering channels");
/* Request USB_PROD */
if (ipa_pm_is_used())
result = ipa_pm_activate_sync(
ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
else
result = ipa3_usb_request_prod(ttype);
/* Activate PM */
result = ipa_pm_activate_sync(
ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
if (result)
goto fail_exit;
@ -2966,11 +2445,8 @@ stop_ul:
disconn_teth:
(void)ipa3_usb_disconnect_teth_prot(teth_prot);
release_prod:
if (ipa_pm_is_used())
(void)ipa_pm_deactivate_sync(
ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
else
(void)ipa3_usb_release_prod(ttype);
(void)ipa_pm_deactivate_sync(
ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
fail_exit:
return result;
}
@ -3006,8 +2482,7 @@ int ipa_usb_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
if (prev_state == IPA_USB_SUSPENDED_NO_RWAKEUP) {
result = ipa3_usb_resume_no_remote_wakeup(ul_clnt_hdl,
dl_clnt_hdl, teth_prot);
mutex_unlock(&ipa3_usb_ctx->general_mutex);
return result;
goto bad_params;
}
IPA_USB_DBG("Start resume sequence: %s\n",
@ -3021,14 +2496,11 @@ int ipa_usb_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
goto bad_params;
}
/* Request USB_PROD */
if (ipa_pm_is_used())
result = ipa_pm_activate_sync(
ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
else
result = ipa3_usb_request_prod(ttype);
/* Activate PM */
result = ipa_pm_activate_sync(
ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
if (result)
goto prod_req_fail;
goto activate_pm_fail;
if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
/* Start UL channel */
@ -3069,12 +2541,9 @@ start_dl_fail:
IPA_USB_ERR("Error stopping UL channel: %d\n", result);
}
start_ul_fail:
if (ipa_pm_is_used())
ipa_pm_deactivate_sync(
ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
else
ipa3_usb_release_prod(ttype);
prod_req_fail:
ipa_pm_deactivate_sync(
ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
activate_pm_fail:
/* Change state back to prev_state */
if (!ipa3_usb_set_state(prev_state, true, ttype))
IPA_USB_ERR("failed to change state back to %s\n",
@ -3119,17 +2588,12 @@ static int __init ipa3_usb_init(void)
pm_ctx->remote_wakeup_work = &ipa3_usb_dpl_notify_remote_wakeup_work;
for (i = 0; i < IPA_USB_TRANSPORT_MAX; i++) {
ipa3_usb_ctx->ttype_ctx[i].rm_ctx.prod_valid = false;
ipa3_usb_ctx->ttype_ctx[i].rm_ctx.cons_valid = false;
init_completion(&ipa3_usb_ctx->ttype_ctx[i].rm_ctx.prod_comp);
ipa3_usb_ctx->ttype_ctx[i].user_data = NULL;
}
spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
for (i = 0; i < IPA_USB_TRANSPORT_MAX; i++) {
ipa3_usb_ctx->ttype_ctx[i].state = IPA_USB_INVALID;
ipa3_usb_ctx->ttype_ctx[i].rm_ctx.cons_state =
IPA_USB_CONS_RELEASED;
}
spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);

View File

@ -379,44 +379,6 @@ fail:
}
EXPORT_SYMBOL(ipa_wdi_dereg_intf);
static void ipa_wdi_rm_notify(void *user_data, enum ipa_rm_event event,
unsigned long data)
{
if (!ipa_wdi_ctx) {
IPA_WDI_ERR("Invalid context\n");
return;
}
switch (event) {
case IPA_RM_RESOURCE_GRANTED:
complete_all(&ipa_wdi_ctx->wdi_completion);
break;
case IPA_RM_RESOURCE_RELEASED:
break;
default:
IPA_WDI_ERR("Invalid RM Evt: %d", event);
break;
}
}
static int ipa_wdi_cons_release(void)
{
return 0;
}
static int ipa_wdi_cons_request(void)
{
int ret = 0;
if (!ipa_wdi_ctx) {
IPA_WDI_ERR("wdi ctx is not initialized\n");
ret = -EFAULT;
}
return ret;
}
static void ipa_wdi_pm_cb(void *p, enum ipa_pm_cb_event event)
{
@ -427,7 +389,6 @@ int ipa_wdi_conn_pipes(struct ipa_wdi_conn_in_params *in,
struct ipa_wdi_conn_out_params *out)
{
int i, j, ret = 0;
struct ipa_rm_create_params param;
struct ipa_pm_register_params pm_params;
struct ipa_wdi_in_params in_tx;
struct ipa_wdi_in_params in_rx;
@ -463,46 +424,15 @@ int ipa_wdi_conn_pipes(struct ipa_wdi_conn_in_params *in,
}
}
if (!ipa_pm_is_used()) {
memset(&param, 0, sizeof(param));
param.name = IPA_RM_RESOURCE_WLAN_PROD;
param.reg_params.user_data = ipa_wdi_ctx;
param.reg_params.notify_cb = ipa_wdi_rm_notify;
param.floor_voltage = IPA_VOLTAGE_SVS;
ret = ipa_rm_create_resource(&param);
if (ret) {
IPA_WDI_ERR("fail to create WLAN_PROD resource\n");
ret = -EFAULT;
goto fail_setup_sys_pipe;
}
memset(&param, 0, sizeof(param));
param.name = IPA_RM_RESOURCE_WLAN_CONS;
param.request_resource = ipa_wdi_cons_request;
param.release_resource = ipa_wdi_cons_release;
ret = ipa_rm_create_resource(&param);
if (ret) {
IPA_WDI_ERR("fail to create WLAN_CONS resource\n");
goto fail_create_rm_cons;
}
if (ipa_rm_add_dependency(IPA_RM_RESOURCE_WLAN_PROD,
IPA_RM_RESOURCE_APPS_CONS)) {
IPA_WDI_ERR("fail to add rm dependency\n");
ret = -EFAULT;
goto fail_add_dependency;
}
} else {
memset(&pm_params, 0, sizeof(pm_params));
pm_params.name = "wdi";
pm_params.callback = ipa_wdi_pm_cb;
pm_params.user_data = NULL;
pm_params.group = IPA_PM_GROUP_DEFAULT;
if (ipa_pm_register(&pm_params, &ipa_wdi_ctx->ipa_pm_hdl)) {
IPA_WDI_ERR("fail to register ipa pm\n");
ret = -EFAULT;
goto fail_setup_sys_pipe;
}
memset(&pm_params, 0, sizeof(pm_params));
pm_params.name = "wdi";
pm_params.callback = ipa_wdi_pm_cb;
pm_params.user_data = NULL;
pm_params.group = IPA_PM_GROUP_DEFAULT;
if (ipa_pm_register(&pm_params, &ipa_wdi_ctx->ipa_pm_hdl)) {
IPA_WDI_ERR("fail to register ipa pm\n");
ret = -EFAULT;
goto fail_setup_sys_pipe;
}
if (ipa_wdi_ctx->wdi_version == IPA_WDI_3) {
@ -631,17 +561,8 @@ int ipa_wdi_conn_pipes(struct ipa_wdi_conn_in_params *in,
fail:
ipa_disconnect_wdi_pipe(ipa_wdi_ctx->rx_pipe_hdl);
fail_connect_pipe:
if (!ipa_pm_is_used())
ipa_rm_delete_dependency(IPA_RM_RESOURCE_WLAN_PROD,
IPA_RM_RESOURCE_APPS_CONS);
else
ipa_pm_deregister(ipa_wdi_ctx->ipa_pm_hdl);
fail_add_dependency:
if (!ipa_pm_is_used())
ipa_rm_delete_resource(IPA_RM_RESOURCE_WLAN_CONS);
fail_create_rm_cons:
if (!ipa_pm_is_used())
ipa_rm_delete_resource(IPA_RM_RESOURCE_WLAN_PROD);
ipa_pm_deregister(ipa_wdi_ctx->ipa_pm_hdl);
fail_setup_sys_pipe:
for (j = 0; j < i; j++)
ipa_teardown_sys_pipe(ipa_wdi_ctx->sys_pipe_hdl[j]);
@ -690,27 +611,9 @@ int ipa_wdi_disconn_pipes(void)
}
}
if (!ipa_pm_is_used()) {
if (ipa_rm_delete_dependency(IPA_RM_RESOURCE_WLAN_PROD,
IPA_RM_RESOURCE_APPS_CONS)) {
IPA_WDI_ERR("fail to delete rm dependency\n");
return -EFAULT;
}
if (ipa_rm_delete_resource(IPA_RM_RESOURCE_WLAN_PROD)) {
IPA_WDI_ERR("fail to delete WLAN_PROD resource\n");
return -EFAULT;
}
if (ipa_rm_delete_resource(IPA_RM_RESOURCE_WLAN_CONS)) {
IPA_WDI_ERR("fail to delete WLAN_CONS resource\n");
return -EFAULT;
}
} else {
if (ipa_pm_deregister(ipa_wdi_ctx->ipa_pm_hdl)) {
IPA_WDI_ERR("fail to deregister ipa pm\n");
return -EFAULT;
}
if (ipa_pm_deregister(ipa_wdi_ctx->ipa_pm_hdl)) {
IPA_WDI_ERR("fail to deregister ipa pm\n");
return -EFAULT;
}
return 0;
@ -759,24 +662,10 @@ int ipa_wdi_enable_pipes(void)
}
}
if (!ipa_pm_is_used()) {
ret = ipa_rm_request_resource(IPA_RM_RESOURCE_WLAN_PROD);
if (ret == -EINPROGRESS) {
if (wait_for_completion_timeout(
&ipa_wdi_ctx->wdi_completion, 10*HZ) == 0) {
IPA_WDI_ERR("WLAN_PROD res req time out\n");
return -EFAULT;
}
} else if (ret != 0) {
IPA_WDI_ERR("fail to request resource\n");
return -EFAULT;
}
} else {
ret = ipa_pm_activate_sync(ipa_wdi_ctx->ipa_pm_hdl);
if (ret) {
IPA_WDI_ERR("fail to activate ipa pm\n");
return -EFAULT;
}
ret = ipa_pm_activate_sync(ipa_wdi_ctx->ipa_pm_hdl);
if (ret) {
IPA_WDI_ERR("fail to activate ipa pm\n");
return -EFAULT;
}
return 0;
@ -825,18 +714,10 @@ int ipa_wdi_disable_pipes(void)
}
}
if (!ipa_pm_is_used()) {
ret = ipa_rm_release_resource(IPA_RM_RESOURCE_WLAN_PROD);
if (ret != 0) {
IPA_WDI_ERR("fail to release resource\n");
return -EFAULT;
}
} else {
ret = ipa_pm_deactivate_sync(ipa_wdi_ctx->ipa_pm_hdl);
if (ret) {
IPA_WDI_ERR("fail to deactivate ipa pm\n");
return -EFAULT;
}
ret = ipa_pm_deactivate_sync(ipa_wdi_ctx->ipa_pm_hdl);
if (ret) {
IPA_WDI_ERR("fail to deactivate ipa pm\n");
return -EFAULT;
}
return 0;
@ -845,39 +726,15 @@ EXPORT_SYMBOL(ipa_wdi_disable_pipes);
int ipa_wdi_set_perf_profile(struct ipa_wdi_perf_profile *profile)
{
struct ipa_rm_perf_profile rm_profile;
enum ipa_rm_resource_name resource_name;
if (profile == NULL) {
IPA_WDI_ERR("Invalid input\n");
return -EINVAL;
}
if (!ipa_pm_is_used()) {
rm_profile.max_supported_bandwidth_mbps =
profile->max_supported_bw_mbps;
if (profile->client == IPA_CLIENT_WLAN1_PROD ||
profile->client == IPA_CLIENT_WLAN2_PROD) {
resource_name = IPA_RM_RESOURCE_WLAN_PROD;
} else if (profile->client == IPA_CLIENT_WLAN1_CONS ||
profile->client == IPA_CLIENT_WLAN2_CONS) {
resource_name = IPA_RM_RESOURCE_WLAN_CONS;
} else {
IPA_WDI_ERR("not supported\n");
return -EINVAL;
}
if (ipa_rm_set_perf_profile(resource_name, &rm_profile)) {
IPA_WDI_ERR("fail to setup rm perf profile\n");
return -EFAULT;
}
} else {
if (ipa_pm_set_throughput(ipa_wdi_ctx->ipa_pm_hdl,
profile->max_supported_bw_mbps)) {
IPA_WDI_ERR("fail to setup pm perf profile\n");
return -EFAULT;
}
if (ipa_pm_set_throughput(ipa_wdi_ctx->ipa_pm_hdl,
profile->max_supported_bw_mbps)) {
IPA_WDI_ERR("fail to set pm throughput\n");
return -EFAULT;
}
return 0;

View File

@ -117,7 +117,6 @@ struct stats {
* @odu_prod_hdl: handle for IPA_CLIENT_ODU_PROD pipe
* @odu_emb_cons_hdl: handle for IPA_CLIENT_ODU_EMB_CONS pipe
* @odu_teth_cons_hdl: handle for IPA_CLIENT_ODU_TETH_CONS pipe
* @rm_comp: completion object for IP RM
* @wakeup_request: client callback to wakeup
*/
struct odu_bridge_ctx {
@ -144,7 +143,6 @@ struct odu_bridge_ctx {
u32 ipa_sys_desc_size;
void *logbuf;
void *logbuf_low;
struct completion rm_comp;
void (*wakeup_request)(void *cl_priv);
u32 pm_hdl;
};
@ -268,24 +266,6 @@ static int odu_bridge_connect_bridge(void)
memset(&odu_prod_params, 0, sizeof(odu_prod_params));
memset(&odu_emb_cons_params, 0, sizeof(odu_emb_cons_params));
if (!ipa_pm_is_used()) {
/* Build IPA Resource manager dependency graph */
ODU_BRIDGE_DBG_LOW("build dependency graph\n");
res = ipa_rm_add_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
IPA_RM_RESOURCE_Q6_CONS);
if (res && res != -EINPROGRESS) {
ODU_BRIDGE_ERR("ipa_rm_add_dependency() failed\n");
goto fail_add_dependency_1;
}
res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
IPA_RM_RESOURCE_ODU_ADAPT_CONS);
if (res && res != -EINPROGRESS) {
ODU_BRIDGE_ERR("ipa_rm_add_dependency() failed\n");
goto fail_add_dependency_2;
}
}
/* configure RX (ODU->IPA) EP */
odu_prod_params.client = IPA_CLIENT_ODU_PROD;
odu_prod_params.desc_fifo_sz = IPA_ODU_SYS_DESC_FIFO_SZ;
@ -343,14 +323,6 @@ fail_odu_teth_cons:
ipa_teardown_sys_pipe(odu_bridge_ctx->odu_prod_hdl);
odu_bridge_ctx->odu_prod_hdl = 0;
fail_odu_prod:
if (!ipa_pm_is_used())
ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
IPA_RM_RESOURCE_ODU_ADAPT_CONS);
fail_add_dependency_2:
if (!ipa_pm_is_used())
ipa_rm_delete_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
IPA_RM_RESOURCE_Q6_CONS);
fail_add_dependency_1:
return res;
}
@ -396,27 +368,13 @@ static int odu_bridge_disconnect_bridge(void)
ODU_BRIDGE_ERR("teardown ODU EMB CONS failed\n");
odu_bridge_ctx->odu_emb_cons_hdl = 0;
if (!ipa_pm_is_used()) {
/* Delete IPA Resource manager dependency graph */
ODU_BRIDGE_DBG("deleting dependency graph\n");
res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
IPA_RM_RESOURCE_Q6_CONS);
if (res && res != -EINPROGRESS)
ODU_BRIDGE_ERR("ipa_rm_delete_dependency() failed\n");
res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
IPA_RM_RESOURCE_ODU_ADAPT_CONS);
if (res && res != -EINPROGRESS)
ODU_BRIDGE_ERR("ipa_rm_delete_dependency() failed\n");
}
return 0;
}
/**
* odu_bridge_disconnect() - Disconnect odu bridge
*
* Disconnect all pipes and deletes IPA RM dependencies on bridge mode
* Disconnect all pipes
*
* Return codes: 0- success, error otherwise
*/
@ -464,8 +422,6 @@ EXPORT_SYMBOL(odu_bridge_disconnect);
* odu_bridge_connect() - Connect odu bridge.
*
* Call to the mode-specific connect function for connection IPA pipes
* and adding IPA RM dependencies
* Return codes: 0: success
* -EINVAL: invalid parameters
* -EPERM: Operation not permitted as the bridge is already

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/atomic.h>
@ -31,7 +31,6 @@
#define DEBUGFS_DIR_NAME "rndis_ipa"
#define DEBUGFS_AGGR_DIR_NAME "rndis_ipa_aggregation"
#define NETDEV_NAME "rndis"
#define DRV_RESOURCE_ID IPA_RM_RESOURCE_RNDIS_PROD
#define IPV4_HDR_NAME "rndis_eth_ipv4"
#define IPV6_HDR_NAME "rndis_eth_ipv6"
#define IPA_TO_USB_CLIENT IPA_CLIENT_USB_CONS
@ -160,7 +159,6 @@ enum rndis_ipa_operation {
* @rx_dropped: number of filtered out Rx packets
* @rx_dump_enable: dump all Rx packets
* @icmp_filter: allow all ICMP packet to pass through the filters
* @rm_enable: flag that enable/disable Resource manager request prior to Tx
* @deaggregation_enable: enable/disable IPA HW deaggregation logic
* @during_xmit_error: flags that indicate that the driver is in a middle
* of error handling in Tx path
@ -195,7 +193,6 @@ struct rndis_ipa_dev {
u32 rx_dropped;
bool rx_dump_enable;
bool icmp_filter;
bool rm_enable;
bool deaggregation_enable;
bool during_xmit_error;
struct dentry *directory;
@ -256,18 +253,10 @@ static int rndis_ipa_hdrs_destroy(struct rndis_ipa_dev *rndis_ipa_ctx);
static struct net_device_stats *rndis_ipa_get_stats(struct net_device *net);
static int rndis_ipa_register_properties(char *netdev_name, bool is_vlan_mode);
static int rndis_ipa_deregister_properties(char *netdev_name);
static void rndis_ipa_rm_notify
(void *user_data, enum ipa_rm_event event,
unsigned long data);
static int rndis_ipa_create_rm_resource(struct rndis_ipa_dev *rndis_ipa_ctx);
static int rndis_ipa_destroy_rm_resource(struct rndis_ipa_dev *rndis_ipa_ctx);
static int rndis_ipa_register_pm_client(struct rndis_ipa_dev *rndis_ipa_ctx);
static int rndis_ipa_deregister_pm_client(struct rndis_ipa_dev *rndis_ipa_ctx);
static bool rx_filter(struct sk_buff *skb);
static bool tx_filter(struct sk_buff *skb);
static bool rm_enabled(struct rndis_ipa_dev *rndis_ipa_ctx);
static int resource_request(struct rndis_ipa_dev *rndis_ipa_ctx);
static void resource_release(struct rndis_ipa_dev *rndis_ipa_ctx);
static netdev_tx_t rndis_ipa_start_xmit
(struct sk_buff *skb, struct net_device *net);
static int rndis_ipa_debugfs_atomic_open
@ -548,7 +537,6 @@ int rndis_ipa_init(struct ipa_usb_init_params *params)
rndis_ipa_ctx->tx_filter = false;
rndis_ipa_ctx->rx_filter = false;
rndis_ipa_ctx->icmp_filter = true;
rndis_ipa_ctx->rm_enable = true;
rndis_ipa_ctx->tx_dropped = 0;
rndis_ipa_ctx->rx_dropped = 0;
rndis_ipa_ctx->tx_dump_enable = false;
@ -749,15 +737,12 @@ int rndis_ipa_pipe_connect_notify(
return -EINVAL;
}
if (ipa_pm_is_used())
result = rndis_ipa_register_pm_client(rndis_ipa_ctx);
else
result = rndis_ipa_create_rm_resource(rndis_ipa_ctx);
result = rndis_ipa_register_pm_client(rndis_ipa_ctx);
if (result) {
RNDIS_IPA_ERROR("fail on RM create\n");
goto fail_create_rm;
RNDIS_IPA_ERROR("fail on PM register\n");
goto fail_register_pm;
}
RNDIS_IPA_DEBUG("RM resource was created\n");
RNDIS_IPA_DEBUG("PM client was registered\n");
rndis_ipa_ctx->ipa_to_usb_hdl = ipa_to_usb_hdl;
rndis_ipa_ctx->usb_to_ipa_hdl = usb_to_ipa_hdl;
@ -835,11 +820,8 @@ int rndis_ipa_pipe_connect_notify(
return 0;
fail:
if (ipa_pm_is_used())
rndis_ipa_deregister_pm_client(rndis_ipa_ctx);
else
rndis_ipa_destroy_rm_resource(rndis_ipa_ctx);
fail_create_rm:
rndis_ipa_deregister_pm_client(rndis_ipa_ctx);
fail_register_pm:
return result;
}
EXPORT_SYMBOL(rndis_ipa_pipe_connect_notify);
@ -957,11 +939,11 @@ static netdev_tx_t rndis_ipa_start_xmit(struct sk_buff *skb,
goto out;
}
ret = resource_request(rndis_ipa_ctx);
ret = ipa_pm_activate(rndis_ipa_ctx->pm_hdl);
if (ret) {
RNDIS_IPA_DEBUG("Waiting to resource\n");
RNDIS_IPA_DEBUG("Failed activate PM client\n");
netif_stop_queue(net);
goto resource_busy;
goto fail_pm_activate;
}
if (atomic_read(&rndis_ipa_ctx->outstanding_pkts) >=
@ -990,8 +972,8 @@ static netdev_tx_t rndis_ipa_start_xmit(struct sk_buff *skb,
fail_tx_packet:
rndis_ipa_xmit_error(skb);
out:
resource_release(rndis_ipa_ctx);
resource_busy:
ipa_pm_deferred_deactivate(rndis_ipa_ctx->pm_hdl);
fail_pm_activate:
RNDIS_IPA_DEBUG
("packet Tx done - %s\n",
(status == NETDEV_TX_OK) ? "OK" : "FAIL");
@ -1079,50 +1061,6 @@ static void rndis_ipa_tx_timeout(struct net_device *net)
net->stats.tx_errors++;
}
/**
* rndis_ipa_rm_notify() - callback supplied to IPA resource manager
* for grant/release events
* user_data: the driver context supplied to IPA resource manager during call
* to ipa_rm_create_resource().
* event: the event notified to us by IPA resource manager (Release/Grant)
* data: reserved field supplied by IPA resource manager
*
* This callback shall be called based on resource request/release sent
* to the IPA resource manager.
* In case the queue was stopped during EINPROGRESS for Tx path and the
* event received is Grant then the queue shall be restarted.
* In case the event notified is a release notification the netdev discard it.
*/
static void rndis_ipa_rm_notify(
void *user_data, enum ipa_rm_event event,
unsigned long data)
{
struct rndis_ipa_dev *rndis_ipa_ctx = user_data;
RNDIS_IPA_LOG_ENTRY();
if (event == IPA_RM_RESOURCE_RELEASED) {
RNDIS_IPA_DEBUG("Resource Released\n");
return;
}
if (event != IPA_RM_RESOURCE_GRANTED) {
RNDIS_IPA_ERROR
("Unexceoted event receieved from RM (%d\n)", event);
return;
}
RNDIS_IPA_DEBUG("Resource Granted\n");
if (netif_queue_stopped(rndis_ipa_ctx->net)) {
RNDIS_IPA_DEBUG("starting queue\n");
netif_start_queue(rndis_ipa_ctx->net);
} else {
RNDIS_IPA_DEBUG("queue already awake\n");
}
RNDIS_IPA_LOG_EXIT();
}
/**
* rndis_ipa_packet_receive_notify() - Rx notify for packet sent from
* tethered PC (USB->IPA).
@ -1332,15 +1270,12 @@ int rndis_ipa_pipe_disconnect_notify(void *private)
rndis_ipa_ctx->net->stats.tx_dropped += outstanding_dropped_pkts;
atomic_set(&rndis_ipa_ctx->outstanding_pkts, 0);
if (ipa_pm_is_used())
retval = rndis_ipa_deregister_pm_client(rndis_ipa_ctx);
else
retval = rndis_ipa_destroy_rm_resource(rndis_ipa_ctx);
retval = rndis_ipa_deregister_pm_client(rndis_ipa_ctx);
if (retval) {
RNDIS_IPA_ERROR("Fail to clean RM\n");
RNDIS_IPA_ERROR("Fail to deregister PM\n");
return retval;
}
RNDIS_IPA_DEBUG("RM was successfully destroyed\n");
RNDIS_IPA_DEBUG("PM was successfully deregistered\n");
spin_lock_irqsave(&rndis_ipa_ctx->state_lock, flags);
next_state = rndis_ipa_next_state(rndis_ipa_ctx->state,
@ -1808,86 +1743,7 @@ static int rndis_ipa_deregister_properties(char *netdev_name)
return 0;
}
/**
* rndis_ipa_create_rm_resource() -creates the resource representing
* this Netdev and supply notification callback for resource event
* such as Grant/Release
* @rndis_ipa_ctx: this driver context
*
* In order make sure all needed resources are available during packet
* transmit this Netdev shall use Request/Release mechanism of
* the IPA resource manager.
* This mechanism shall iterate over a dependency graph and make sure
* all dependent entities are ready to for packet Tx
* transfer (Apps->IPA->USB).
* In this function the resource representing the Netdev is created
* in addition to the basic dependency between the Netdev and the USB client.
* Hence, USB client, is a dependency for the Netdev and may be notified in
* case of packet transmit from this Netdev to tethered Host.
* As implied from the "may" in the above sentence there is a scenario where
* the USB is not notified. This is done thanks to the IPA resource manager
* inactivity timer.
* The inactivity timer allow the Release requests to be delayed in order
* prevent ping-pong with the USB and other dependencies.
*/
static int rndis_ipa_create_rm_resource(struct rndis_ipa_dev *rndis_ipa_ctx)
{
struct ipa_rm_create_params create_params = {0};
struct ipa_rm_perf_profile profile;
int result;
RNDIS_IPA_LOG_ENTRY();
create_params.name = DRV_RESOURCE_ID;
create_params.reg_params.user_data = rndis_ipa_ctx;
create_params.reg_params.notify_cb = rndis_ipa_rm_notify;
result = ipa_rm_create_resource(&create_params);
if (result) {
RNDIS_IPA_ERROR("Fail on ipa_rm_create_resource\n");
goto fail_rm_create;
}
RNDIS_IPA_DEBUG("RM client was created\n");
profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS;
ipa_rm_set_perf_profile(DRV_RESOURCE_ID, &profile);
result = ipa_rm_inactivity_timer_init
(DRV_RESOURCE_ID,
INACTIVITY_MSEC_DELAY);
if (result) {
RNDIS_IPA_ERROR("Fail on ipa_rm_inactivity_timer_init\n");
goto fail_inactivity_timer;
}
RNDIS_IPA_DEBUG("rm_it client was created\n");
result = ipa_rm_add_dependency_sync
(DRV_RESOURCE_ID,
IPA_RM_RESOURCE_USB_CONS);
if (result && result != -EINPROGRESS)
RNDIS_IPA_ERROR("unable to add RNDIS/USB dependency (%d)\n",
result);
else
RNDIS_IPA_DEBUG("RNDIS/USB dependency was set\n");
result = ipa_rm_add_dependency_sync
(IPA_RM_RESOURCE_USB_PROD,
IPA_RM_RESOURCE_APPS_CONS);
if (result && result != -EINPROGRESS)
RNDIS_IPA_ERROR("unable to add USB/APPS dependency (%d)\n",
result);
else
RNDIS_IPA_DEBUG("USB/APPS dependency was set\n");
RNDIS_IPA_LOG_EXIT();
return 0;
fail_inactivity_timer:
fail_rm_create:
return result;
}
static void rndis_ipa_pm_cb(void *p, enum ipa_pm_cb_event event)
{
@ -1912,64 +1768,6 @@ static void rndis_ipa_pm_cb(void *p, enum ipa_pm_cb_event event)
RNDIS_IPA_LOG_EXIT();
}
/**
* rndis_ipa_destroy_rm_resource() - delete the dependency and destroy
* the resource done on rndis_ipa_create_rm_resource()
* @rndis_ipa_ctx: this driver context
*
* This function shall delete the dependency create between
* the Netdev to the USB.
* In addition the inactivity time shall be destroy and the resource shall
* be deleted.
*/
static int rndis_ipa_destroy_rm_resource(struct rndis_ipa_dev *rndis_ipa_ctx)
{
int result;
RNDIS_IPA_LOG_ENTRY();
result = ipa_rm_delete_dependency
(DRV_RESOURCE_ID,
IPA_RM_RESOURCE_USB_CONS);
if (result && result != -EINPROGRESS) {
RNDIS_IPA_ERROR("Fail to delete RNDIS/USB dependency\n");
goto bail;
}
RNDIS_IPA_DEBUG("RNDIS/USB dependency was successfully deleted\n");
result = ipa_rm_delete_dependency
(IPA_RM_RESOURCE_USB_PROD,
IPA_RM_RESOURCE_APPS_CONS);
if (result == -EINPROGRESS) {
RNDIS_IPA_DEBUG("RM dependency deletion is in progress");
} else if (result) {
RNDIS_IPA_ERROR("Fail to delete USB/APPS dependency\n");
goto bail;
} else {
RNDIS_IPA_DEBUG("USB/APPS dependency was deleted\n");
}
result = ipa_rm_inactivity_timer_destroy(DRV_RESOURCE_ID);
if (result) {
RNDIS_IPA_ERROR("Fail to destroy inactivity timern");
goto bail;
}
RNDIS_IPA_DEBUG("RM inactivity timer was successfully destroy\n");
result = ipa_rm_delete_resource(DRV_RESOURCE_ID);
if (result) {
RNDIS_IPA_ERROR("resource deletion failed\n");
goto bail;
}
RNDIS_IPA_DEBUG
("Netdev RM resource was deleted (resid:%d)\n",
DRV_RESOURCE_ID);
RNDIS_IPA_LOG_EXIT();
bail:
return result;
}
static int rndis_ipa_register_pm_client(struct rndis_ipa_dev *rndis_ipa_ctx)
{
@ -1998,52 +1796,6 @@ static int rndis_ipa_deregister_pm_client(struct rndis_ipa_dev *rndis_ipa_ctx)
return 0;
}
/**
* resource_request() - request for the Netdev resource
* @rndis_ipa_ctx: main driver context
*
* This function shall send the IPA resource manager inactivity time a request
* to Grant the Netdev producer.
* In case the resource is already Granted the function shall return immediately
* and "pet" the inactivity timer.
* In case the resource was not already Granted this function shall
* return EINPROGRESS and the Netdev shall stop the send queue until
* the IPA resource manager notify it that the resource is
* granted (done in a differ context)
*/
static int resource_request(struct rndis_ipa_dev *rndis_ipa_ctx)
{
int result = 0;
if (!rm_enabled(rndis_ipa_ctx))
return result;
if (ipa_pm_is_used())
return ipa_pm_activate(rndis_ipa_ctx->pm_hdl);
return ipa_rm_inactivity_timer_request_resource(
DRV_RESOURCE_ID);
}
/**
* resource_release() - release the Netdev resource
* @rndis_ipa_ctx: main driver context
*
* start the inactivity timer count down.by using the IPA resource
* manager inactivity time.
* The actual resource release shall occur only if no request shall be done
* during the INACTIVITY_MSEC_DELAY.
*/
static void resource_release(struct rndis_ipa_dev *rndis_ipa_ctx)
{
if (!rm_enabled(rndis_ipa_ctx))
return;
if (ipa_pm_is_used())
ipa_pm_deferred_deactivate(rndis_ipa_ctx->pm_hdl);
else
ipa_rm_inactivity_timer_release_resource(DRV_RESOURCE_ID);
}
/**
* rndis_encapsulate_skb() - encapsulate the given Ethernet skb with
@ -2131,19 +1883,6 @@ static bool tx_filter(struct sk_buff *skb)
return true;
}
/**
* rm_enabled() - allow the use of resource manager Request/Release to
* be bypassed
* @rndis_ipa_ctx: main driver context
*
* By disabling the resource manager flag the Request for the Netdev resource
* shall be bypassed and the packet shall be sent.
* accordingly, Release request shall be bypass as well.
*/
static bool rm_enabled(struct rndis_ipa_dev *rndis_ipa_ctx)
{
return rndis_ipa_ctx->rm_enable;
}
/**
* rndis_ipa_ep_registers_cfg() - configure the USB endpoints
@ -2419,14 +2158,6 @@ static void rndis_ipa_debugfs_init(struct rndis_ipa_dev *rndis_ipa_ctx)
goto fail_file;
}
file = debugfs_create_bool
("rm_enable", flags_read_write,
rndis_ipa_ctx->directory, &rndis_ipa_ctx->rm_enable);
if (!file) {
RNDIS_IPA_ERROR("could not create debugfs rm file\n");
goto fail_file;
}
file = debugfs_create_u32
("outstanding_high", flags_read_write,
rndis_ipa_ctx->directory,

View File

@ -428,8 +428,6 @@ int ipa_disable_wdi_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx);
const char *ipa_get_version_string(enum ipa_hw_type ver);
int ipa_start_gsi_channel(u32 clnt_hdl);
bool ipa_pm_is_used(void);
int ipa_smmu_store_sgt(struct sg_table **out_ch_ptr,
struct sg_table *in_sgt_ptr);
int ipa_smmu_free_sgt(struct sg_table **out_sgt_ptr);

View File

@ -1474,7 +1474,6 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
struct ipa_ioc_v4_nat_del nat_del;
struct ipa_ioc_nat_ipv6ct_table_del table_del;
struct ipa_ioc_nat_pdn_entry mdfy_pdn;
struct ipa_ioc_rm_dependency rm_depend;
struct ipa_ioc_nat_dma_cmd *table_dma_cmd;
struct ipa_ioc_get_vlan_mode vlan_mode;
struct ipa_ioc_wigig_fst_switch fst_switch;
@ -2311,31 +2310,15 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
break;
case IPA_IOC_RM_ADD_DEPENDENCY:
/* deprecate if IPA PM is used */
if (ipa3_ctx->use_ipa_pm)
return -EINVAL;
/* IPA RM is deprecate because IPA PM is used */
IPAERR("using obselete command: IPA_IOC_RM_ADD_DEPENDENCY");
return -EINVAL;
if (copy_from_user(&rm_depend, (const void __user *)arg,
sizeof(struct ipa_ioc_rm_dependency))) {
retval = -EFAULT;
break;
}
retval = ipa_rm_add_dependency_from_ioctl(
rm_depend.resource_name, rm_depend.depends_on_name);
break;
case IPA_IOC_RM_DEL_DEPENDENCY:
/* deprecate if IPA PM is used */
if (ipa3_ctx->use_ipa_pm)
return -EINVAL;
/* IPA RM is deprecate because IPA PM is used */
IPAERR("using obselete command: IPA_IOC_RM_DEL_DEPENDENCY");
return -EINVAL;
if (copy_from_user(&rm_depend, (const void __user *)arg,
sizeof(struct ipa_ioc_rm_dependency))) {
retval = -EFAULT;
break;
}
retval = ipa_rm_delete_dependency_from_ioctl(
rm_depend.resource_name, rm_depend.depends_on_name);
break;
case IPA_IOC_GENERATE_FLT_EQ:
{
struct ipa_ioc_generate_flt_eq flt_eq;
@ -4562,8 +4545,7 @@ void ipa3_disable_clks(void)
ipa3_ctx->ctrl->ipa3_disable_clks();
if (ipa3_ctx->use_ipa_pm)
ipa_pm_set_clock_index(0);
ipa_pm_set_clock_index(0);
if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl, 0))
WARN(1, "bus scaling failed");
@ -5062,14 +5044,12 @@ void ipa3_suspend_handler(enum ipa_irq_type interrupt,
void *private_data,
void *interrupt_data)
{
enum ipa_rm_resource_name resource;
u32 suspend_data =
((struct ipa_tx_suspend_irq_data *)interrupt_data)->endpoints;
u32 bmsk = 1;
u32 i = 0;
int res;
struct ipa_ep_cfg_holb holb_cfg;
struct mutex *pm_mutex_ptr = &ipa3_ctx->transport_pm.transport_pm_mutex;
u32 pipe_bitmask = 0;
IPADBG("interrupt=%d, interrupt_data=%u\n",
@ -5078,55 +5058,13 @@ void ipa3_suspend_handler(enum ipa_irq_type interrupt,
for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++, bmsk = bmsk << 1) {
if ((suspend_data & bmsk) && (ipa3_ctx->ep[i].valid)) {
if (ipa3_ctx->use_ipa_pm) {
pipe_bitmask |= bmsk;
continue;
}
if (IPA_CLIENT_IS_APPS_CONS(ipa3_ctx->ep[i].client)) {
/*
* pipe will be unsuspended as part of
* enabling IPA clocks
*/
mutex_lock(pm_mutex_ptr);
if (!atomic_read(
&ipa3_ctx->transport_pm.dec_clients)
) {
IPA_ACTIVE_CLIENTS_INC_EP(
ipa3_ctx->ep[i].client);
IPADBG_LOW("Pipes un-suspended.\n");
IPADBG_LOW("Enter poll mode.\n");
atomic_set(
&ipa3_ctx->transport_pm.dec_clients,
1);
/*
* acquire wake lock as long as suspend
* vote is held
*/
ipa3_inc_acquire_wakelock();
ipa3_process_irq_schedule_rel();
}
mutex_unlock(pm_mutex_ptr);
} else {
resource = ipa3_get_rm_resource_from_ep(i);
res =
ipa_rm_request_resource_with_timer(resource);
if (res == -EPERM &&
IPA_CLIENT_IS_CONS(
ipa3_ctx->ep[i].client)) {
holb_cfg.en = 1;
res = ipa3_cfg_ep_holb_by_client(
ipa3_ctx->ep[i].client, &holb_cfg);
WARN(res, "holb en failed\n");
}
}
}
}
if (ipa3_ctx->use_ipa_pm) {
res = ipa_pm_handle_suspend(pipe_bitmask);
if (res) {
IPAERR("ipa_pm_handle_suspend failed %d\n", res);
return;
}
res = ipa_pm_handle_suspend(pipe_bitmask);
if (res) {
IPAERR("ipa_pm_handle_suspend failed %d\n", res);
return;
}
}
@ -6165,7 +6103,7 @@ static bool ipa_is_mem_dump_allowed(void)
* Initialize the filter block by committing IPV4 and IPV6 default rules
* Create empty routing table in system memory(no committing)
* Create a char-device for IPA
* Initialize IPA RM (resource manager)
* Initialize IPA PM (power manager)
* Configure GSI registers (in GSI case)
*/
static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
@ -6221,7 +6159,6 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
ipa3_ctx->tethered_flow_control = resource_p->tethered_flow_control;
ipa3_ctx->ee = resource_p->ee;
ipa3_ctx->gsi_ch20_wa = resource_p->gsi_ch20_wa;
ipa3_ctx->use_ipa_pm = resource_p->use_ipa_pm;
ipa3_ctx->wdi_over_pcie = resource_p->wdi_over_pcie;
ipa3_ctx->ipa3_active_clients_logging.log_rdy = false;
ipa3_ctx->ipa_config_is_mhi = resource_p->ipa_mhi_dynamic_config;
@ -6575,30 +6512,13 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
spin_lock_init(&ipa3_ctx->wakelock_ref_cnt.spinlock);
/* Initialize Power Management framework */
if (ipa3_ctx->use_ipa_pm) {
result = ipa_pm_init(&ipa3_res.pm_init);
if (result) {
IPAERR("IPA PM initialization failed (%d)\n", -result);
result = -ENODEV;
goto fail_ipa_rm_init;
}
IPADBG("IPA resource manager initialized");
} else {
result = ipa_rm_initialize();
if (result) {
IPAERR("RM initialization failed (%d)\n", -result);
result = -ENODEV;
goto fail_ipa_rm_init;
}
IPADBG("IPA resource manager initialized");
result = ipa3_create_apps_resource();
if (result) {
IPAERR("Failed to create APPS_CONS resource\n");
result = -ENODEV;
goto fail_create_apps_resource;
}
result = ipa_pm_init(&ipa3_res.pm_init);
if (result) {
IPAERR("IPA PM initialization failed (%d)\n", -result);
result = -ENODEV;
goto fail_ipa_pm_init;
}
IPADBG("IPA power manager initialized\n");
INIT_LIST_HEAD(&ipa3_ctx->ipa_ready_cb_list);
@ -6668,14 +6588,8 @@ fail_cdev_add:
fail_gsi_pre_fw_load_init:
ipa3_dma_shutdown();
fail_ipa_dma_setup:
if (ipa3_ctx->use_ipa_pm)
ipa_pm_destroy();
else
ipa_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS);
fail_create_apps_resource:
if (!ipa3_ctx->use_ipa_pm)
ipa_rm_exit();
fail_ipa_rm_init:
ipa_pm_destroy();
fail_ipa_pm_init:
device_destroy(ipa3_ctx->cdev.class, ipa3_ctx->cdev.dev_num);
fail_device_create:
unregister_chrdev_region(ipa3_ctx->cdev.dev_num, 1);
@ -6747,12 +6661,6 @@ static int get_ipa_dts_pm_info(struct platform_device *pdev,
int result;
int i, j;
ipa_drv_res->use_ipa_pm = of_property_read_bool(pdev->dev.of_node,
"qcom,use-ipa-pm");
IPADBG("use_ipa_pm=%d\n", ipa_drv_res->use_ipa_pm);
if (!ipa_drv_res->use_ipa_pm)
return 0;
result = of_property_read_u32(pdev->dev.of_node,
"qcom,msm-bus,num-cases",
&ipa_drv_res->pm_init.threshold_size);
@ -7886,16 +7794,8 @@ int ipa3_ap_suspend(struct device *dev)
}
}
if (ipa3_ctx->use_ipa_pm) {
ipa_pm_deactivate_all_deferred();
} else {
/*
* Release transport IPA resource without waiting
* for inactivity timer
*/
atomic_set(&ipa3_ctx->transport_pm.eot_activity, 0);
ipa3_transport_release_resource(NULL);
}
ipa_pm_deactivate_all_deferred();
IPADBG("Exit\n");
return 0;

View File

@ -1853,40 +1853,11 @@ bail:
return 0;
}
static ssize_t ipa3_rm_read_stats(struct file *file, char __user *ubuf,
size_t count, loff_t *ppos)
{
int result, cnt = 0;
/* deprecate if IPA PM is used */
if (ipa3_ctx->use_ipa_pm) {
cnt += scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
"IPA RM is disabled\n");
goto ret;
}
result = ipa_rm_stat(dbg_buff, IPA_MAX_MSG_LEN);
if (result < 0) {
cnt += scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
"Error in printing RM stat %d\n", result);
goto ret;
}
cnt += result;
ret:
return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
}
static ssize_t ipa3_pm_read_stats(struct file *file, char __user *ubuf,
size_t count, loff_t *ppos)
{
int result, cnt = 0;
if (!ipa3_ctx->use_ipa_pm) {
cnt += scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
"IPA PM is disabled\n");
goto ret;
}
result = ipa_pm_stat(dbg_buff, IPA_MAX_MSG_LEN);
if (result < 0) {
cnt += scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
@ -1903,12 +1874,6 @@ static ssize_t ipa3_pm_ex_read_stats(struct file *file, char __user *ubuf,
{
int result, cnt = 0;
if (!ipa3_ctx->use_ipa_pm) {
cnt += scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
"IPA PM is disabled\n");
goto ret;
}
result = ipa_pm_exceptions_stat(dbg_buff, IPA_MAX_MSG_LEN);
if (result < 0) {
cnt += scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
@ -2409,10 +2374,6 @@ static const struct ipa3_debugfs_file debugfs_files[] = {
"ipv6ct", IPA_READ_ONLY_MODE, NULL, {
.read = ipa3_read_ipv6ct,
}
}, {
"rm_stats", IPA_READ_ONLY_MODE, NULL, {
.read = ipa3_rm_read_stats,
}
}, {
"pm_stats", IPA_READ_ONLY_MODE, NULL, {
.read = ipa3_pm_read_stats,

View File

@ -795,10 +795,7 @@ static void ipa3_handle_rx(struct ipa3_sys_context *sys)
int cnt;
int ret;
if (ipa3_ctx->use_ipa_pm)
ipa_pm_activate_sync(sys->pm_hdl);
else
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
ipa_pm_activate_sync(sys->pm_hdl);
start_poll:
inactive_cycles = 0;
do {
@ -827,10 +824,7 @@ start_poll:
if (ret == -GSI_STATUS_PENDING_IRQ)
goto start_poll;
if (ipa3_ctx->use_ipa_pm)
ipa_pm_deferred_deactivate(sys->pm_hdl);
else
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
ipa_pm_deferred_deactivate(sys->pm_hdl);
}
static void ipa3_switch_to_intr_rx_work_func(struct work_struct *work)
@ -998,8 +992,7 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
ep->sys->db_timer.function = ipa3_ring_doorbell_timer_fn;
/* create IPA PM resources for handling polling mode */
if (ipa3_ctx->use_ipa_pm &&
IPA_CLIENT_IS_CONS(sys_in->client)) {
if (IPA_CLIENT_IS_CONS(sys_in->client)) {
pm_reg.name = ipa_clients_strings[sys_in->client];
pm_reg.callback = ipa_pm_sys_pipe_cb;
pm_reg.user_data = ep->sys;
@ -1175,8 +1168,7 @@ fail_repl:
ep->sys->repl->capacity = 0;
kfree(ep->sys->repl);
fail_gen2:
if (ipa3_ctx->use_ipa_pm)
ipa_pm_deregister(ep->sys->pm_hdl);
ipa_pm_deregister(ep->sys->pm_hdl);
fail_pm:
destroy_workqueue(ep->sys->repl_wq);
fail_wq2:
@ -1816,10 +1808,7 @@ static void ipa3_wq_handle_rx(struct work_struct *work)
sys = container_of(work, struct ipa3_sys_context, work);
if (sys->napi_obj) {
if (!ipa3_ctx->use_ipa_pm)
IPA_ACTIVE_CLIENTS_INC_SPECIAL("NAPI");
else
ipa_pm_activate_sync(sys->pm_hdl);
ipa_pm_activate_sync(sys->pm_hdl);
napi_schedule(sys->napi_obj);
} else
ipa3_handle_rx(sys);
@ -3914,29 +3903,15 @@ void __ipa_gsi_irq_rx_scedule_poll(struct ipa3_sys_context *sys)
* pm deactivate is done in wq context
* or after NAPI poll
*/
if (ipa3_ctx->use_ipa_pm) {
clk_off = ipa_pm_activate(sys->pm_hdl);
if (!clk_off && sys->napi_obj) {
napi_schedule(sys->napi_obj);
return;
}
queue_work(sys->wq, &sys->work);
clk_off = ipa_pm_activate(sys->pm_hdl);
if (!clk_off && sys->napi_obj) {
napi_schedule(sys->napi_obj);
return;
}
if (sys->napi_obj) {
struct ipa_active_client_logging_info log;
IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log, "NAPI");
clk_off = ipa3_inc_client_enable_clks_no_block(
&log);
if (!clk_off) {
napi_schedule(sys->napi_obj);
return;
}
}
queue_work(sys->wq, &sys->work);
return;
}
static void ipa_gsi_irq_rx_notify_cb(struct gsi_chan_xfer_notify *notify)
@ -4486,13 +4461,8 @@ start_poll:
if (ret == -GSI_STATUS_PENDING_IRQ &&
napi_reschedule(ep->sys->napi_obj))
goto start_poll;
if (ipa3_ctx->use_ipa_pm)
ipa_pm_deferred_deactivate(ep->sys->pm_hdl);
else
ipa3_dec_client_disable_clks_no_block(&log);
ipa_pm_deferred_deactivate(ep->sys->pm_hdl);
}
return cnt;
}

View File

@ -1866,7 +1866,6 @@ struct ipa3_context {
struct ipa_cne_evt ipa_cne_evt_req_cache[IPA_MAX_NUM_REQ_CACHE];
int num_ipa_cne_evt_req;
struct mutex ipa_cne_evt_lock;
bool use_ipa_pm;
bool vlan_mode_iface[IPA_VLAN_IF_MAX];
bool wdi_over_pcie;
u32 entire_ipa_block_size;
@ -1925,7 +1924,6 @@ struct ipa3_plat_drv_res {
bool ipa_mhi_dynamic_config;
u32 ipa_tz_unlock_reg_num;
struct ipa_tz_unlock_reg_info *ipa_tz_unlock_reg;
bool use_ipa_pm;
struct ipa_pm_init_params pm_init;
bool wdi_over_pcie;
u32 entire_ipa_block_size;
@ -2639,8 +2637,6 @@ void ipa_init_ep_flt_bitmap(void);
bool ipa_is_ep_support_flt(int pipe_idx);
enum ipa_rm_resource_name ipa3_get_rm_resource_from_ep(int pipe_idx);
bool ipa3_get_modem_cfg_emb_pipe_flt(void);
u8 ipa3_get_qmb_master_sel(enum ipa_client_type client);

View File

@ -4193,48 +4193,6 @@ bool ipa3_get_client_uplink(int pipe_idx)
return ipa3_ctx->ipacm_client[pipe_idx].uplink;
}
/**
* ipa3_get_rm_resource_from_ep() - get the IPA_RM resource which is related to
* the supplied pipe index.
*
* @pipe_idx:
*
* Return value: IPA_RM resource related to the pipe, -1 if a resource was not
* found.
*/
enum ipa_rm_resource_name ipa3_get_rm_resource_from_ep(int pipe_idx)
{
int i;
int j;
enum ipa_client_type client;
struct ipa3_client_names clients;
bool found = false;
if (pipe_idx >= ipa3_ctx->ipa_num_pipes || pipe_idx < 0) {
IPAERR("Bad pipe index!\n");
return -EINVAL;
}
client = ipa3_ctx->ep[pipe_idx].client;
for (i = 0; i < IPA_RM_RESOURCE_MAX; i++) {
memset(&clients, 0, sizeof(clients));
ipa3_get_clients_from_rm_resource(i, &clients);
for (j = 0; j < clients.length; j++) {
if (clients.names[j] == client) {
found = true;
break;
}
}
if (found)
break;
}
if (!found)
return -EFAULT;
return i;
}
/**
* ipa3_get_client_mapping() - provide client mapping
@ -6824,11 +6782,6 @@ int ipa3_is_vlan_mode(enum ipa_vlan_ifaces iface, bool *res)
return 0;
}
static bool ipa3_pm_is_used(void)
{
return (ipa3_ctx) ? ipa3_ctx->use_ipa_pm : false;
}
int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type,
struct ipa_api_controller *api_ctrl)
{
@ -6980,7 +6933,6 @@ int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type,
api_ctrl->ipa_proxy_clk_unvote = ipa3_proxy_clk_unvote;
api_ctrl->ipa_is_client_handle_valid = ipa3_is_client_handle_valid;
api_ctrl->ipa_get_client_mapping = ipa3_get_client_mapping;
api_ctrl->ipa_get_rm_resource_from_ep = ipa3_get_rm_resource_from_ep;
api_ctrl->ipa_get_modem_cfg_emb_pipe_flt =
ipa3_get_modem_cfg_emb_pipe_flt;
api_ctrl->ipa_get_transport_type = ipa3_get_transport_type;
@ -7022,7 +6974,6 @@ int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type,
api_ctrl->ipa_tz_unlock_reg = ipa3_tz_unlock_reg;
api_ctrl->ipa_get_smmu_params = ipa3_get_smmu_params;
api_ctrl->ipa_is_vlan_mode = ipa3_is_vlan_mode;
api_ctrl->ipa_pm_is_used = ipa3_pm_is_used;
api_ctrl->ipa_wigig_uc_init = ipa3_wigig_uc_init;
api_ctrl->ipa_conn_wigig_rx_pipe_i = ipa3_conn_wigig_rx_pipe_i;
api_ctrl->ipa_conn_wigig_client_i = ipa3_conn_wigig_client_i;

View File

@ -42,7 +42,6 @@
#define WWAN_METADATA_SHFT 24
#define WWAN_METADATA_MASK 0xFF000000
#define WWAN_DATA_LEN 9216
#define IPA_RM_INACTIVITY_TIMER 100 /* IPA_RM */
#define HEADROOM_FOR_QMAP 8 /* for mux header */
#define TAILROOM 0 /* for padding by mux layer */
#define MAX_NUM_OF_MUX_CHANNEL 15 /* max mux channels */
@ -137,7 +136,6 @@ struct rmnet_ipa3_context {
int rmnet_index;
bool egress_set;
bool a7_ul_flt_set;
struct workqueue_struct *rm_q6_wq;
atomic_t is_initialized;
atomic_t is_ssr;
void *lcl_mdm_subsys_notify_handle;
@ -1252,29 +1250,25 @@ static int ipa3_wwan_xmit(struct sk_buff *skb, struct net_device *dev)
}
send:
/* IPA_RM checking start */
if (ipa3_ctx->use_ipa_pm) {
/* activate the modem pm for clock scaling */
ipa_pm_activate(rmnet_ipa3_ctx->q6_pm_hdl);
ret = ipa_pm_activate(rmnet_ipa3_ctx->pm_hdl);
} else {
ret = ipa_rm_inactivity_timer_request_resource(
IPA_RM_RESOURCE_WWAN_0_PROD);
}
/* IPA_PM checking start */
/* activate the modem pm for clock scaling */
ipa_pm_activate(rmnet_ipa3_ctx->q6_pm_hdl);
ret = ipa_pm_activate(rmnet_ipa3_ctx->pm_hdl);
if (ret == -EINPROGRESS) {
netif_stop_queue(dev);
spin_unlock_irqrestore(&wwan_ptr->lock, flags);
return NETDEV_TX_BUSY;
}
if (ret) {
IPAWANERR("[%s] fatal: ipa rm timer req resource failed %d\n",
IPAWANERR("[%s] fatal: ipa pm activate failed %d\n",
dev->name, ret);
dev_kfree_skb_any(skb);
dev->stats.tx_dropped++;
spin_unlock_irqrestore(&wwan_ptr->lock, flags);
return NETDEV_TX_OK;
}
/* IPA_RM checking end */
/* IPA_PM checking end */
/*
* both data packets and command will be routed to
@ -1300,13 +1294,9 @@ send:
ret = NETDEV_TX_OK;
out:
if (atomic_read(&wwan_ptr->outstanding_pkts) == 0) {
if (ipa3_ctx->use_ipa_pm) {
ipa_pm_deferred_deactivate(rmnet_ipa3_ctx->pm_hdl);
ipa_pm_deferred_deactivate(rmnet_ipa3_ctx->q6_pm_hdl);
} else {
ipa_rm_inactivity_timer_release_resource(
IPA_RM_RESOURCE_WWAN_0_PROD);
}
ipa_pm_deferred_deactivate(rmnet_ipa3_ctx->pm_hdl);
ipa_pm_deferred_deactivate(rmnet_ipa3_ctx->q6_pm_hdl);
}
spin_unlock_irqrestore(&wwan_ptr->lock, flags);
return ret;
@ -1365,13 +1355,9 @@ static void apps_ipa_tx_complete_notify(void *priv,
}
if (atomic_read(&wwan_ptr->outstanding_pkts) == 0) {
if (ipa3_ctx->use_ipa_pm) {
ipa_pm_deferred_deactivate(rmnet_ipa3_ctx->pm_hdl);
ipa_pm_deferred_deactivate(rmnet_ipa3_ctx->q6_pm_hdl);
} else {
ipa_rm_inactivity_timer_release_resource(
IPA_RM_RESOURCE_WWAN_0_PROD);
}
ipa_pm_deferred_deactivate(rmnet_ipa3_ctx->pm_hdl);
ipa_pm_deferred_deactivate(rmnet_ipa3_ctx->q6_pm_hdl);
}
__netif_tx_unlock_bh(netdev_get_tx_queue(dev, 0));
dev_kfree_skb_any(skb);
@ -2075,68 +2061,6 @@ static void ipa3_wwan_setup(struct net_device *dev)
dev->watchdog_timeo = 1000;
}
/* IPA_RM related functions start*/
static void ipa3_q6_prod_rm_request_resource(struct work_struct *work);
static DECLARE_DELAYED_WORK(ipa3_q6_con_rm_request,
ipa3_q6_prod_rm_request_resource);
static void ipa3_q6_prod_rm_release_resource(struct work_struct *work);
static DECLARE_DELAYED_WORK(ipa3_q6_con_rm_release,
ipa3_q6_prod_rm_release_resource);
static void ipa3_q6_prod_rm_request_resource(struct work_struct *work)
{
int ret = 0;
ret = ipa_rm_request_resource(IPA_RM_RESOURCE_Q6_PROD);
if (ret < 0 && ret != -EINPROGRESS) {
IPAWANERR("ipa_rm_request_resource failed %d\n", ret);
return;
}
}
static int ipa3_q6_rm_request_resource(void)
{
queue_delayed_work(rmnet_ipa3_ctx->rm_q6_wq,
&ipa3_q6_con_rm_request, 0);
return 0;
}
static void ipa3_q6_prod_rm_release_resource(struct work_struct *work)
{
int ret = 0;
ret = ipa_rm_release_resource(IPA_RM_RESOURCE_Q6_PROD);
if (ret < 0 && ret != -EINPROGRESS) {
IPAWANERR("ipa_rm_release_resource failed %d\n", ret);
return;
}
}
static int ipa3_q6_rm_release_resource(void)
{
queue_delayed_work(rmnet_ipa3_ctx->rm_q6_wq,
&ipa3_q6_con_rm_release, 0);
return 0;
}
static void ipa3_q6_rm_notify_cb(void *user_data,
enum ipa_rm_event event,
unsigned long data)
{
switch (event) {
case IPA_RM_RESOURCE_GRANTED:
IPAWANDBG_LOW("Q6_PROD GRANTED CB\n");
break;
case IPA_RM_RESOURCE_RELEASED:
IPAWANDBG_LOW("Q6_PROD RELEASED CB\n");
break;
default:
return;
}
}
/**
* rmnet_ipa_send_coalesce_notification
* (uint8_t qmap_id, bool enable, bool tcp, bool udp)
@ -2184,9 +2108,6 @@ int ipa3_wwan_set_modem_state(struct wan_ioctl_notify_wan_state *state)
if (!state)
return -EINVAL;
if (!ipa_pm_is_used())
return 0;
if (state->up)
return ipa_pm_activate_sync(rmnet_ipa3_ctx->q6_teth_pm_hdl);
else
@ -2237,114 +2158,23 @@ static void ipa3_q6_deregister_pm(void)
int ipa3_wwan_set_modem_perf_profile(int throughput)
{
struct ipa_rm_perf_profile profile;
int ret;
int tether_bridge_handle = 0;
if (ipa3_ctx->use_ipa_pm) {
/* query rmnet-tethering handle */
tether_bridge_handle = ipa3_teth_bridge_get_pm_hdl();
if (tether_bridge_handle > 0) {
/* only update with valid handle*/
ret = ipa_pm_set_throughput(tether_bridge_handle,
throughput);
}
/* for TETH MODEM on softap/rndis */
ret = ipa_pm_set_throughput(rmnet_ipa3_ctx->q6_teth_pm_hdl,
throughput);
} else {
memset(&profile, 0, sizeof(profile));
profile.max_supported_bandwidth_mbps = throughput;
ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_PROD,
&profile);
/* query rmnet-tethering handle */
tether_bridge_handle = ipa3_teth_bridge_get_pm_hdl();
if (tether_bridge_handle > 0) {
/* only update with valid handle*/
ret = ipa_pm_set_throughput(tether_bridge_handle,
throughput);
}
/* for TETH MODEM on softap/rndis */
ret = ipa_pm_set_throughput(rmnet_ipa3_ctx->q6_teth_pm_hdl,
throughput);
return ret;
}
static int ipa3_q6_initialize_rm(void)
{
struct ipa_rm_create_params create_params;
struct ipa_rm_perf_profile profile;
int result;
/* Initialize IPA_RM workqueue */
rmnet_ipa3_ctx->rm_q6_wq = create_singlethread_workqueue("clnt_req");
if (!rmnet_ipa3_ctx->rm_q6_wq)
return -ENOMEM;
memset(&create_params, 0, sizeof(create_params));
create_params.name = IPA_RM_RESOURCE_Q6_PROD;
create_params.reg_params.notify_cb = &ipa3_q6_rm_notify_cb;
result = ipa_rm_create_resource(&create_params);
if (result)
goto create_rsrc_err1;
memset(&create_params, 0, sizeof(create_params));
create_params.name = IPA_RM_RESOURCE_Q6_CONS;
create_params.release_resource = &ipa3_q6_rm_release_resource;
create_params.request_resource = &ipa3_q6_rm_request_resource;
result = ipa_rm_create_resource(&create_params);
if (result)
goto create_rsrc_err2;
/* add dependency*/
result = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
IPA_RM_RESOURCE_APPS_CONS);
if (result)
goto add_dpnd_err;
/* setup Performance profile */
memset(&profile, 0, sizeof(profile));
profile.max_supported_bandwidth_mbps = 100;
result = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_PROD,
&profile);
if (result)
goto set_perf_err;
result = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_CONS,
&profile);
if (result)
goto set_perf_err;
return result;
set_perf_err:
ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
IPA_RM_RESOURCE_APPS_CONS);
add_dpnd_err:
result = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS);
if (result < 0)
IPAWANERR("Error deleting resource %d, ret=%d\n",
IPA_RM_RESOURCE_Q6_CONS, result);
create_rsrc_err2:
result = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD);
if (result < 0)
IPAWANERR("Error deleting resource %d, ret=%d\n",
IPA_RM_RESOURCE_Q6_PROD, result);
create_rsrc_err1:
destroy_workqueue(rmnet_ipa3_ctx->rm_q6_wq);
return result;
}
void ipa3_q6_deinitialize_rm(void)
{
int ret;
ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
IPA_RM_RESOURCE_APPS_CONS);
if (ret < 0)
IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
IPA_RM_RESOURCE_Q6_PROD, IPA_RM_RESOURCE_APPS_CONS,
ret);
ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS);
if (ret < 0)
IPAWANERR("Error deleting resource %d, ret=%d\n",
IPA_RM_RESOURCE_Q6_CONS, ret);
ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD);
if (ret < 0)
IPAWANERR("Error deleting resource %d, ret=%d\n",
IPA_RM_RESOURCE_Q6_PROD, ret);
if (rmnet_ipa3_ctx->rm_q6_wq)
destroy_workqueue(rmnet_ipa3_ctx->rm_q6_wq);
}
static void ipa3_wake_tx_queue(struct work_struct *work)
{
if (IPA_NETDEV()) {
@ -2356,56 +2186,21 @@ static void ipa3_wake_tx_queue(struct work_struct *work)
}
/**
* ipa3_rm_resource_granted() - Called upon
* IPA_RM_RESOURCE_GRANTED event. Wakes up queue is was stopped.
* ipa3_pm_resource_granted() - Called upon
* IPA_PM_RESOURCE_GRANTED event. Wakes up the tx workqueue.
*
* @work: work object supplied ny workqueue
*
* Return codes:
* None
*/
static void ipa3_rm_resource_granted(void *dev)
static void ipa3_pm_resource_granted(void *dev)
{
IPAWANDBG_LOW("Resource Granted - starting queue\n");
schedule_work(&ipa3_tx_wakequeue_work);
}
/**
* ipa3_rm_notify() - Callback function for RM events. Handles
* IPA_RM_RESOURCE_GRANTED and IPA_RM_RESOURCE_RELEASED events.
* IPA_RM_RESOURCE_GRANTED is handled in the context of shared
* workqueue.
*
* @dev: network device
* @event: IPA RM event
* @data: Additional data provided by IPA RM
*
* Return codes:
* None
*/
static void ipa3_rm_notify(void *dev, enum ipa_rm_event event,
unsigned long data)
{
struct ipa3_wwan_private *wwan_ptr = netdev_priv(dev);
pr_debug("%s: event %d\n", __func__, event);
switch (event) {
case IPA_RM_RESOURCE_GRANTED:
if (wwan_ptr->device_status == WWAN_DEVICE_INACTIVE) {
complete_all(&wwan_ptr->resource_granted_completion);
break;
}
ipa3_rm_resource_granted(dev);
break;
case IPA_RM_RESOURCE_RELEASED:
break;
default:
pr_err("%s: unknown event %d\n", __func__, event);
break;
}
}
/* IPA_RM related functions end*/
/* IPA_PM related functions end*/
static int ipa3_lcl_mdm_ssr_notifier_cb(struct notifier_block *this,
unsigned long code,
@ -2496,7 +2291,7 @@ static void ipa_pm_wwan_pm_cb(void *p, enum ipa_pm_cb_event event)
complete_all(&wwan_ptr->resource_granted_completion);
break;
}
ipa3_rm_resource_granted(dev);
ipa3_pm_resource_granted(dev);
break;
default:
pr_err("%s: unknown event %d\n", __func__, event);
@ -2528,76 +2323,6 @@ static void ipa3_wwan_deregister_netdev_pm_client(void)
ipa_pm_deregister(rmnet_ipa3_ctx->pm_hdl);
}
static int ipa3_wwan_create_wwan_rm_resource(struct net_device *dev)
{
struct ipa_rm_create_params ipa_rm_params;
struct ipa_rm_perf_profile profile;
int ret;
memset(&ipa_rm_params, 0, sizeof(struct ipa_rm_create_params));
ipa_rm_params.name = IPA_RM_RESOURCE_WWAN_0_PROD;
ipa_rm_params.reg_params.user_data = dev;
ipa_rm_params.reg_params.notify_cb = ipa3_rm_notify;
ret = ipa_rm_create_resource(&ipa_rm_params);
if (ret) {
pr_err("%s: unable to create resourse %d in IPA RM\n",
__func__, IPA_RM_RESOURCE_WWAN_0_PROD);
return ret;
}
ret = ipa_rm_inactivity_timer_init(IPA_RM_RESOURCE_WWAN_0_PROD,
IPA_RM_INACTIVITY_TIMER);
if (ret) {
pr_err("%s: ipa rm timer init failed %d on resourse %d\n",
__func__, ret, IPA_RM_RESOURCE_WWAN_0_PROD);
goto timer_init_err;
}
/* add dependency */
ret = ipa_rm_add_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
IPA_RM_RESOURCE_Q6_CONS);
if (ret)
goto add_dpnd_err;
/* setup Performance profile */
memset(&profile, 0, sizeof(profile));
profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS;
ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_WWAN_0_PROD,
&profile);
if (ret)
goto set_perf_err;
return 0;
set_perf_err:
ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
IPA_RM_RESOURCE_Q6_CONS);
add_dpnd_err:
ipa_rm_inactivity_timer_destroy(
IPA_RM_RESOURCE_WWAN_0_PROD); /* IPA_RM */
timer_init_err:
ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
return ret;
}
static void ipa3_wwan_delete_wwan_rm_resource(void)
{
int ret;
ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
IPA_RM_RESOURCE_Q6_CONS);
if (ret < 0)
IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS,
ret);
ret = ipa_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_WWAN_0_PROD);
if (ret < 0)
IPAWANERR(
"Error ipa_rm_inactivity_timer_destroy resource %d, ret=%d\n",
IPA_RM_RESOURCE_WWAN_0_PROD, ret);
ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
if (ret < 0)
IPAWANERR("Error deleting resource %d, ret=%d\n",
IPA_RM_RESOURCE_WWAN_0_PROD, ret);
}
/**
* ipa3_wwan_probe() - Initialized the module and registers as a
* network interface to the network stack
@ -2708,22 +2433,16 @@ static int ipa3_wwan_probe(struct platform_device *pdev)
&rmnet_ipa3_ctx->wwan_priv->resource_granted_completion);
if (!atomic_read(&rmnet_ipa3_ctx->is_ssr)) {
/* IPA_RM configuration starts */
if (ipa3_ctx->use_ipa_pm)
ret = ipa3_q6_register_pm();
else
ret = ipa3_q6_initialize_rm();
/* IPA_PM configuration starts */
ret = ipa3_q6_register_pm();
if (ret) {
IPAWANERR("ipa3_q6_initialize_rm failed, ret: %d\n",
IPAWANERR("ipa3_q6_register_pm failed, ret: %d\n",
ret);
goto q6_init_err;
}
}
if (ipa3_ctx->use_ipa_pm)
ret = ipa3_wwan_register_netdev_pm_client(dev);
else
ret = ipa3_wwan_create_wwan_rm_resource(dev);
ret = ipa3_wwan_register_netdev_pm_client(dev);
if (ret) {
IPAWANERR("fail to create/register pm resources\n");
goto fail_pm;
@ -2775,18 +2494,11 @@ config_err:
netif_napi_del(&(rmnet_ipa3_ctx->wwan_priv->napi));
unregister_netdev(dev);
set_perf_err:
if (ipa3_ctx->use_ipa_pm)
ipa3_wwan_deregister_netdev_pm_client();
else
ipa3_wwan_delete_wwan_rm_resource();
ipa3_wwan_deregister_netdev_pm_client();
fail_pm:
if (ipa3_ctx->use_ipa_pm) {
if (!atomic_read(&rmnet_ipa3_ctx->is_ssr))
ipa3_q6_deregister_pm();
} else {
if (!atomic_read(&rmnet_ipa3_ctx->is_ssr))
ipa3_q6_deinitialize_rm();
}
if (!atomic_read(&rmnet_ipa3_ctx->is_ssr))
ipa3_q6_deregister_pm();
q6_init_err:
free_netdev(dev);
rmnet_ipa3_ctx->wwan_priv = NULL;
@ -2819,10 +2531,7 @@ static int ipa3_wwan_remove(struct platform_device *pdev)
mutex_unlock(&rmnet_ipa3_ctx->pipe_handle_guard);
IPAWANINFO("rmnet_ipa unregister_netdev\n");
unregister_netdev(IPA_NETDEV());
if (ipa3_ctx->use_ipa_pm)
ipa3_wwan_deregister_netdev_pm_client();
else
ipa3_wwan_delete_wwan_rm_resource();
ipa3_wwan_deregister_netdev_pm_client();
cancel_work_sync(&ipa3_tx_wakequeue_work);
cancel_delayed_work(&ipa_tether_stats_poll_wakequeue_work);
if (IPA_NETDEV())
@ -2904,11 +2613,8 @@ static int rmnet_ipa_ap_suspend(struct device *dev)
netif_stop_queue(netdev);
spin_unlock_irqrestore(&wwan_ptr->lock, flags);
IPAWANDBG("De-activating the PM/RM resource.\n");
if (ipa3_ctx->use_ipa_pm)
ipa_pm_deactivate_sync(rmnet_ipa3_ctx->pm_hdl);
else
ipa_rm_release_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
IPAWANDBG("De-activating the PM resource.\n");
ipa_pm_deactivate_sync(rmnet_ipa3_ctx->pm_hdl);
ret = 0;
bail:
IPAWANDBG("Exit with %d\n", ret);

View File

@ -124,7 +124,7 @@ int ipa3_teth_bridge_get_pm_hdl(void)
return -EINVAL;
}
TETH_DBG("Return rm-handle %d\n", ipa3_teth_ctx->modem_pm_hdl);
TETH_DBG("Return pm-handle %d\n", ipa3_teth_ctx->modem_pm_hdl);
TETH_DBG_FUNC_EXIT();
return ipa3_teth_ctx->modem_pm_hdl;
}
@ -137,20 +137,15 @@ int ipa3_teth_bridge_disconnect(enum ipa_client_type client)
int res = 0;
TETH_DBG_FUNC_ENTRY();
if (ipa_pm_is_used()) {
res = ipa_pm_deactivate_sync(ipa3_teth_ctx->modem_pm_hdl);
if (res) {
TETH_ERR("fail to deactivate modem %d\n", res);
return res;
}
res = ipa_pm_deregister(ipa3_teth_ctx->modem_pm_hdl);
ipa3_teth_ctx->modem_pm_hdl = ~0;
} else {
ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD,
IPA_RM_RESOURCE_Q6_CONS);
ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
IPA_RM_RESOURCE_USB_CONS);
res = ipa_pm_deactivate_sync(ipa3_teth_ctx->modem_pm_hdl);
if (res) {
TETH_ERR("fail to deactivate modem %d\n", res);
return res;
}
res = ipa_pm_deregister(ipa3_teth_ctx->modem_pm_hdl);
ipa3_teth_ctx->modem_pm_hdl = ~0;
TETH_DBG_FUNC_EXIT();
return res;
@ -174,49 +169,17 @@ int ipa3_teth_bridge_connect(struct teth_bridge_connect_params *connect_params)
TETH_DBG_FUNC_ENTRY();
if (ipa_pm_is_used()) {
reg_params.name = "MODEM (USB RMNET)";
reg_params.group = IPA_PM_GROUP_MODEM;
reg_params.skip_clk_vote = true;
res = ipa_pm_register(&reg_params,
&ipa3_teth_ctx->modem_pm_hdl);
if (res) {
TETH_ERR("fail to register with PM %d\n", res);
return res;
}
res = ipa_pm_activate_sync(ipa3_teth_ctx->modem_pm_hdl);
goto bail;
reg_params.name = "MODEM (USB RMNET)";
reg_params.group = IPA_PM_GROUP_MODEM;
reg_params.skip_clk_vote = true;
res = ipa_pm_register(&reg_params,
&ipa3_teth_ctx->modem_pm_hdl);
if (res) {
TETH_ERR("fail to register with PM %d\n", res);
return res;
}
/* Build the dependency graph, first add_dependency call is sync
* in order to make sure the IPA clocks are up before we continue
* and notify the USB driver it may continue.
*/
res = ipa_rm_add_dependency_sync(IPA_RM_RESOURCE_USB_PROD,
IPA_RM_RESOURCE_Q6_CONS);
if (res < 0) {
TETH_ERR("ipa_rm_add_dependency() failed.\n");
goto bail;
}
/* this add_dependency call can't be sync since it will block until USB
* status is connected (which can happen only after the tethering
* bridge is connected), the clocks are already up so the call doesn't
* need to block.
*/
res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
IPA_RM_RESOURCE_USB_CONS);
if (res < 0 && res != -EINPROGRESS) {
ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD,
IPA_RM_RESOURCE_Q6_CONS);
TETH_ERR("ipa_rm_add_dependency() failed.\n");
goto bail;
}
res = 0;
bail:
res = ipa_pm_activate_sync(ipa3_teth_ctx->modem_pm_hdl);
TETH_DBG_FUNC_EXIT();
return res;
}

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/ipa.h>
@ -35,15 +35,13 @@ static int ipa_pm_ut_setup(void **ppriv)
/*decouple PM from RPM */
ipa3_ctx->enable_clock_scaling = false;
if (ipa3_ctx->use_ipa_pm) {
for (i = 0; i < IPA_PM_MAX_CLIENTS; i++) {
ipa_pm_deactivate_sync(i);
ipa_pm_deregister(i);
}
ipa_pm_destroy();
for (i = 0; i < IPA_PM_MAX_CLIENTS; i++) {
ipa_pm_deactivate_sync(i);
ipa_pm_deregister(i);
}
ipa_pm_destroy();
return 0;
}