Merge branch 'android11-5.4' into android11-5.4-lts

Sync up with android11-5.4 for the following commits:

* b86f2ce54f Merge tag 'android11-5.4.233_r00' into android11-5.4
* 5ccb6ea4bc UPSTREAM: ext4: fix another off-by-one fsmap error on 1k block filesystems
* f756dbba7b UPSTREAM: ext4: refuse to create ea block when umounted
* aefbae5b92 UPSTREAM: ext4: optimize ea_inode block expansion
* 7431096bfd UPSTREAM: ext4: allocate extended attribute value in vmalloc area
* 7cfbc7501b BACKPORT: FROMGIT: cgroup: Use separate src/dst nodes when preloading css_sets for migration
* b62f2af118 ANDROID: fs/f2fs: fixup __f2fs_cluster_blocks with F2FS_FS_COMPRESSION
* 9f71ba69cd UPSTREAM: usb: gadget: f_fs: Ensure ep0req is dequeued before free_request
* 62b8ef0458 UPSTREAM: KVM: VMX: Execute IBPB on emulated VM-exit when guest has IBRS
* 2af3bdf293 UPSTREAM: ext4: add inode table check in __ext4_get_inode_loc to aovid possible infinite loop
* 9adb062d06 UPSTREAM: net_sched: reject TCF_EM_SIMPLE case for complex ematch module
* 5a1412b39b UPSTREAM: ipv6: ensure sane device mtu in tunnels
* 2490e55ff4 BACKPORT: iommu: Avoid races around device probe
* 3daec397ee BACKPORT: mac80211_hwsim: notify wmediumd of used MAC addresses
* b6a441d634 FROMGIT: mac80211_hwsim: add concurrent channels scanning support over virtio
* 1866ee1a90 ANDROID: ABI: Cuttlefish Symbol update
* e55bdca2cb UPSTREAM: media: dvb-core: Fix UAF due to refcount races at releasing
* d72fdcc709 ANDROID: abi_gki_aarch64_qcom: Add hrtimer_sleeper_start_expires
* b63a7aff8a UPSTREAM: ALSA: pcm: Move rwsem lock inside snd_ctl_elem_read to prevent UAF

Update the .xml file with the new symbols being tracked now:

2 Added functions:

  [A] 'function void hrtimer_sleeper_start_expires(hrtimer_sleeper*, hrtimer_mode)'
  [A] 'function bool ieee80211_tx_prepare_skb(ieee80211_hw*, ieee80211_vif*, sk_buff*, int, ieee80211_sta**)'

Change-Id: Ida9475b812bf2e4b2f80bff6c554df60b288d2e7
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-03-23 14:30:45 +00:00
commit b6d6caabaf
10 changed files with 5225 additions and 5001 deletions

File diff suppressed because it is too large Load Diff

View File

@ -10,6 +10,7 @@
arm64_const_caps_ready
bcmp
cancel_delayed_work_sync
cancel_work_sync
capable
cfg80211_inform_bss_data
cfg80211_put_bss
@ -28,6 +29,7 @@
delayed_work_timer_fn
destroy_workqueue
_dev_err
device_create
device_register
device_unregister
_dev_info
@ -39,6 +41,7 @@
dma_set_coherent_mask
dma_set_mask
down_write
ether_setup
ethtool_op_get_link
eth_validate_addr
event_triggers_call
@ -60,6 +63,7 @@
init_wait_entry
__init_waitqueue_head
jiffies
jiffies_to_msecs
kfree
kfree_skb
__kmalloc
@ -72,6 +76,8 @@
kmemdup
kstrdup
ktime_get
ktime_get_with_offset
kvfree
__list_add_valid
__list_del_entry_valid
__local_bh_enable_ip
@ -83,6 +89,7 @@
__module_get
module_layout
module_put
__msecs_to_jiffies
msleep
__mutex_init
mutex_lock
@ -98,6 +105,7 @@
netif_device_detach
netif_tx_stop_all_queues
netif_tx_wake_queue
nf_conntrack_destroy
no_llseek
nonseekable_open
noop_llseek
@ -141,9 +149,8 @@
_raw_spin_unlock_irqrestore
__rcu_read_lock
__rcu_read_unlock
refcount_dec_and_test_checked
refcount_inc_checked
register_netdev
refcount_warn_saturate
register_netdevice
register_netdevice_notifier
register_virtio_device
register_virtio_driver
@ -158,9 +165,12 @@
seq_printf
sg_init_one
sg_init_table
skb_add_rx_frag
skb_clone
skb_dequeue
skb_push
skb_put
skb_queue_tail
sk_free
snd_device_new
snd_pcm_alt_chmaps
@ -195,8 +205,11 @@
unregister_virtio_device
unregister_virtio_driver
up_write
virtio_break_device
virtio_check_driver_offered_feature
virtio_config_changed
virtio_device_freeze
virtio_device_restore
virtio_max_dma_size
virtqueue_add_inbuf
virtqueue_add_outbuf
@ -270,10 +283,8 @@
hci_register_dev
hci_unregister_dev
skb_pull
skb_push
skb_queue_head
skb_queue_purge
skb_queue_tail
# required by incrementalfs.ko
bin2hex
@ -303,7 +314,6 @@
generic_file_read_iter
generic_file_splice_read
generic_read_dir
generic_shutdown_super
__get_free_pages
get_zeroed_page
iget5_locked
@ -315,6 +325,7 @@
kernel_read
kernel_write
kern_path
kill_anon_super
kobject_create_and_add
kobject_put
lockref_get
@ -323,7 +334,6 @@
LZ4_decompress_safe
match_int
match_token
__msecs_to_jiffies
mutex_is_locked
notify_change
override_creds
@ -357,6 +367,76 @@
vfs_setxattr
vfs_unlink
# required by mac80211_hwsim.ko
alloc_netdev_mqs
__cfg80211_alloc_event_skb
__cfg80211_alloc_reply_skb
__cfg80211_send_event_skb
cfg80211_vendor_cmd_reply
dev_alloc_name
device_bind_driver
device_release_driver
dst_release
eth_mac_addr
genlmsg_put
genl_notify
genl_register_family
genl_unregister_family
hrtimer_cancel
hrtimer_forward
hrtimer_init
hrtimer_start_range_ns
ieee80211_alloc_hw_nm
ieee80211_beacon_get_tim
ieee80211_csa_finish
ieee80211_csa_is_complete
ieee80211_free_hw
ieee80211_free_txskb
ieee80211_get_tx_rates
ieee80211_iterate_active_interfaces_atomic
ieee80211_probereq_get
ieee80211_queue_delayed_work
ieee80211_ready_on_channel
ieee80211_register_hw
ieee80211_remain_on_channel_expired
ieee80211_rx_irqsafe
ieee80211_scan_completed
ieee80211_start_tx_ba_cb_irqsafe
ieee80211_stop_tx_ba_cb_irqsafe
ieee80211_tx_prepare_skb
ieee80211_tx_status_irqsafe
ieee80211_unregister_hw
init_net
kstrndup
__netdev_alloc_skb
netif_rx
netlink_broadcast
netlink_register_notifier
netlink_unicast
netlink_unregister_notifier
net_namespace_list
nla_memcpy
__nla_parse
nla_put_64bit
nla_put
param_ops_ushort
___ratelimit
register_pernet_device
regulatory_hint
rhashtable_destroy
rhashtable_init
rhashtable_insert_slow
__rht_bucket_nested
rht_bucket_nested
rht_bucket_nested_insert
schedule_timeout_interruptible
skb_copy
skb_copy_expand
__skb_ext_put
skb_trim
unregister_pernet_device
wiphy_apply_custom_regulatory
# required by nd_virtio.ko
bio_alloc_bioset
bio_chain
@ -380,6 +460,7 @@
netdev_lower_state_changed
netdev_pick_tx
pci_bus_type
register_netdev
# required by rtc-test.ko
add_timer
@ -466,7 +547,6 @@
idr_remove
idr_replace
__init_rwsem
jiffies_to_msecs
jiffies_to_usecs
krealloc
memchr_inv
@ -501,11 +581,8 @@
cfg80211_scan_done
__dev_get_by_index
dev_printk
ether_setup
ktime_get_with_offset
netdev_upper_dev_link
netif_stacked_transfer_operstate
register_netdevice
rtnl_link_register
rtnl_link_unregister
unregister_netdevice_many
@ -612,7 +689,6 @@
drm_universal_plane_init
__get_task_comm
kmalloc_order_trace
kvfree
kvmalloc_node
memdup_user
mutex_trylock
@ -701,11 +777,9 @@
unregister_blkdev
# required by virtio_console.ko
cancel_work_sync
cdev_add
cdev_alloc
cdev_del
device_create
device_destroy
dma_alloc_attrs
dma_free_attrs
@ -789,12 +863,10 @@
netif_set_real_num_tx_queues
__netif_set_xps_queue
net_ratelimit
nf_conntrack_destroy
__num_online_cpus
__pskb_pull_tail
_raw_spin_trylock
sched_clock
skb_add_rx_frag
skb_coalesce_rx_frag
__skb_flow_dissect
skb_page_frag_refill
@ -820,6 +892,7 @@
# required by virtio_pci.ko
irq_set_affinity_hint
pci_alloc_irq_vectors_affinity
pci_device_is_present
pci_find_capability
pci_find_ext_capability
pci_find_next_capability
@ -831,8 +904,6 @@
pci_release_region
pci_release_selected_regions
pci_request_selected_regions
virtio_device_freeze
virtio_device_restore
# required by virtio_pmem.ko
nvdimm_bus_register
@ -852,6 +923,7 @@
# required by vsock.ko
autoremove_wake_function
init_user_ns
mod_delayed_work_on
ns_capable_noaudit
prandom_u32
prepare_to_wait
@ -882,3 +954,8 @@
sock_diag_save_cookie
sock_diag_unregister
sock_i_ino
# preserved by --additions-only
generic_shutdown_super
refcount_dec_and_test_checked
refcount_inc_checked

View File

@ -1070,6 +1070,7 @@
__hrtimer_get_remaining
hrtimer_init
hrtimer_init_sleeper
hrtimer_sleeper_start_expires
hrtimer_start_range_ns
hrtimer_try_to_cancel
hvc_alloc

View File

@ -177,14 +177,25 @@ static void iommu_free_dev_param(struct device *dev)
int iommu_probe_device(struct device *dev)
{
const struct iommu_ops *ops = dev->bus->iommu_ops;
static DEFINE_MUTEX(iommu_probe_device_lock);
int ret;
WARN_ON(dev->iommu_group);
if (!ops)
return -EINVAL;
if (!iommu_get_dev_param(dev))
return -ENOMEM;
/*
* Serialise to avoid races between IOMMU drivers registering in
* parallel and/or the "replay" calls from ACPI/OF code via client
* driver probe. Once the latter have been cleaned up we should
* probably be able to use device_lock() here to minimise the scope,
* but for now enforcing a simple global ordering is fine.
*/
mutex_lock(&iommu_probe_device_lock);
if (!iommu_get_dev_param(dev)) {
ret = -ENOMEM;
goto err_unlock;
}
if (!try_module_get(ops->owner)) {
ret = -EINVAL;
@ -195,12 +206,17 @@ int iommu_probe_device(struct device *dev)
if (ret)
goto err_module_put;
mutex_unlock(&iommu_probe_device_lock);
return 0;
err_module_put:
module_put(ops->owner);
err_free_dev_param:
iommu_free_dev_param(dev);
err_unlock:
mutex_unlock(&iommu_probe_device_lock);
return ret;
}

View File

@ -499,6 +499,7 @@ struct mac80211_hwsim_data {
u32 ciphers[ARRAY_SIZE(hwsim_ciphers)];
struct mac_address addresses[2];
struct ieee80211_chanctx_conf *chanctx;
int channels, idx;
bool use_chanctx;
bool destroy_on_close;
@ -1059,6 +1060,47 @@ static int hwsim_unicast_netgroup(struct mac80211_hwsim_data *data,
return res;
}
static void mac80211_hwsim_config_mac_nl(struct ieee80211_hw *hw,
const u8 *addr, bool add)
{
struct mac80211_hwsim_data *data = hw->priv;
u32 _portid = READ_ONCE(data->wmediumd);
struct sk_buff *skb;
void *msg_head;
if (!_portid && !hwsim_virtio_enabled)
return;
skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_ATOMIC);
if (!skb)
return;
msg_head = genlmsg_put(skb, 0, 0, &hwsim_genl_family, 0,
add ? HWSIM_CMD_ADD_MAC_ADDR :
HWSIM_CMD_DEL_MAC_ADDR);
if (!msg_head) {
pr_debug("mac80211_hwsim: problem with msg_head\n");
goto nla_put_failure;
}
if (nla_put(skb, HWSIM_ATTR_ADDR_TRANSMITTER,
ETH_ALEN, data->addresses[1].addr))
goto nla_put_failure;
if (nla_put(skb, HWSIM_ATTR_ADDR_RECEIVER, ETH_ALEN, addr))
goto nla_put_failure;
genlmsg_end(skb, msg_head);
if (hwsim_virtio_enabled)
hwsim_tx_virtio(data, skb);
else
hwsim_unicast_netgroup(data, skb, _portid);
return;
nla_put_failure:
nlmsg_free(skb);
}
static inline u16 trans_tx_rate_flags_ieee2hwsim(struct ieee80211_tx_rate *rate)
{
u16 result = 0;
@ -1091,7 +1133,8 @@ static inline u16 trans_tx_rate_flags_ieee2hwsim(struct ieee80211_tx_rate *rate)
static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
struct sk_buff *my_skb,
int dst_portid)
int dst_portid,
struct ieee80211_channel *channel)
{
struct sk_buff *skb;
struct mac80211_hwsim_data *data = hw->priv;
@ -1146,7 +1189,7 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
if (nla_put_u32(skb, HWSIM_ATTR_FLAGS, hwsim_flags))
goto nla_put_failure;
if (nla_put_u32(skb, HWSIM_ATTR_FREQ, data->channel->center_freq))
if (nla_put_u32(skb, HWSIM_ATTR_FREQ, channel->center_freq))
goto nla_put_failure;
/* We get the tx control (rate and retries) info*/
@ -1487,7 +1530,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
_portid = READ_ONCE(data->wmediumd);
if (_portid || hwsim_virtio_enabled)
return mac80211_hwsim_tx_frame_nl(hw, skb, _portid);
return mac80211_hwsim_tx_frame_nl(hw, skb, _portid, channel);
/* NO wmediumd detected, perfect medium simulation */
data->tx_pkts++;
@ -1540,6 +1583,9 @@ static int mac80211_hwsim_add_interface(struct ieee80211_hw *hw,
vif->addr);
hwsim_set_magic(vif);
if (vif->type != NL80211_IFTYPE_MONITOR)
mac80211_hwsim_config_mac_nl(hw, vif->addr, true);
vif->cab_queue = 0;
vif->hw_queue[IEEE80211_AC_VO] = 0;
vif->hw_queue[IEEE80211_AC_VI] = 1;
@ -1579,6 +1625,8 @@ static void mac80211_hwsim_remove_interface(
vif->addr);
hwsim_check_magic(vif);
hwsim_clear_magic(vif);
if (vif->type != NL80211_IFTYPE_MONITOR)
mac80211_hwsim_config_mac_nl(hw, vif->addr, false);
}
static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
@ -1598,7 +1646,7 @@ static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
mac80211_hwsim_monitor_rx(hw, skb, chan);
if (_pid || hwsim_virtio_enabled)
return mac80211_hwsim_tx_frame_nl(hw, skb, _pid);
return mac80211_hwsim_tx_frame_nl(hw, skb, _pid, chan);
mac80211_hwsim_tx_frame_no_nl(hw, skb, chan);
dev_kfree_skb(skb);
@ -2092,6 +2140,8 @@ static void hw_scan_work(struct work_struct *work)
hwsim->hw_scan_vif = NULL;
hwsim->tmp_chan = NULL;
mutex_unlock(&hwsim->mutex);
mac80211_hwsim_config_mac_nl(hwsim->hw, hwsim->scan_addr,
false);
return;
}
@ -2177,6 +2227,7 @@ static int mac80211_hwsim_hw_scan(struct ieee80211_hw *hw,
memset(hwsim->survey_data, 0, sizeof(hwsim->survey_data));
mutex_unlock(&hwsim->mutex);
mac80211_hwsim_config_mac_nl(hw, hwsim->scan_addr, true);
wiphy_dbg(hw->wiphy, "hwsim hw_scan request\n");
ieee80211_queue_delayed_work(hwsim->hw, &hwsim->hw_scan, 0);
@ -2220,6 +2271,7 @@ static void mac80211_hwsim_sw_scan(struct ieee80211_hw *hw,
pr_debug("hwsim sw_scan request, prepping stuff\n");
memcpy(hwsim->scan_addr, mac_addr, ETH_ALEN);
mac80211_hwsim_config_mac_nl(hw, hwsim->scan_addr, true);
hwsim->scanning = true;
memset(hwsim->survey_data, 0, sizeof(hwsim->survey_data));
@ -2236,6 +2288,7 @@ static void mac80211_hwsim_sw_scan_complete(struct ieee80211_hw *hw,
pr_debug("hwsim sw_scan_complete\n");
hwsim->scanning = false;
mac80211_hwsim_config_mac_nl(hw, hwsim->scan_addr, false);
eth_zero_addr(hwsim->scan_addr);
mutex_unlock(&hwsim->mutex);
@ -2316,6 +2369,11 @@ static int mac80211_hwsim_croc(struct ieee80211_hw *hw,
static int mac80211_hwsim_add_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
struct mac80211_hwsim_data *hwsim = hw->priv;
mutex_lock(&hwsim->mutex);
hwsim->chanctx = ctx;
mutex_unlock(&hwsim->mutex);
hwsim_set_chanctx_magic(ctx);
wiphy_dbg(hw->wiphy,
"add channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n",
@ -2327,6 +2385,11 @@ static int mac80211_hwsim_add_chanctx(struct ieee80211_hw *hw,
static void mac80211_hwsim_remove_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
struct mac80211_hwsim_data *hwsim = hw->priv;
mutex_lock(&hwsim->mutex);
hwsim->chanctx = NULL;
mutex_unlock(&hwsim->mutex);
wiphy_dbg(hw->wiphy,
"remove channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n",
ctx->def.chan->center_freq, ctx->def.width,
@ -2339,6 +2402,11 @@ static void mac80211_hwsim_change_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx,
u32 changed)
{
struct mac80211_hwsim_data *hwsim = hw->priv;
mutex_lock(&hwsim->mutex);
hwsim->chanctx = ctx;
mutex_unlock(&hwsim->mutex);
hwsim_check_chanctx_magic(ctx);
wiphy_dbg(hw->wiphy,
"change channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n",
@ -2926,6 +2994,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
hw->wiphy->max_remain_on_channel_duration = 1000;
data->if_combination.radar_detect_widths = 0;
data->if_combination.num_different_channels = data->channels;
data->chanctx = NULL;
} else {
data->if_combination.num_different_channels = 1;
data->if_combination.radar_detect_widths =
@ -3420,6 +3489,7 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
int frame_data_len;
void *frame_data;
struct sk_buff *skb = NULL;
struct ieee80211_channel *channel = NULL;
if (!info->attrs[HWSIM_ATTR_ADDR_RECEIVER] ||
!info->attrs[HWSIM_ATTR_FRAME] ||
@ -3446,6 +3516,17 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
if (!data2)
goto out;
if (data2->use_chanctx) {
if (data2->tmp_chan)
channel = data2->tmp_chan;
else if (data2->chanctx)
channel = data2->chanctx->def.chan;
} else {
channel = data2->channel;
}
if (!channel)
goto out;
if (!hwsim_virtio_enabled) {
if (hwsim_net_get_netgroup(genl_info_net(info)) !=
data2->netgroup)
@ -3457,7 +3538,7 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
/* check if radio is configured properly */
if (data2->idle || !data2->started)
if ((data2->idle && !data2->tmp_chan) || !data2->started)
goto out;
/* A frame is received from user space */
@ -3470,18 +3551,16 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
mutex_lock(&data2->mutex);
rx_status.freq = nla_get_u32(info->attrs[HWSIM_ATTR_FREQ]);
if (rx_status.freq != data2->channel->center_freq &&
(!data2->tmp_chan ||
rx_status.freq != data2->tmp_chan->center_freq)) {
if (rx_status.freq != channel->center_freq) {
mutex_unlock(&data2->mutex);
goto out;
}
mutex_unlock(&data2->mutex);
} else {
rx_status.freq = data2->channel->center_freq;
rx_status.freq = channel->center_freq;
}
rx_status.band = data2->channel->band;
rx_status.band = channel->band;
rx_status.rate_idx = nla_get_u32(info->attrs[HWSIM_ATTR_RX_RATE]);
if (rx_status.rate_idx >= data2->hw->wiphy->bands[rx_status.band]->n_bitrates)
goto out;

View File

@ -75,6 +75,12 @@ enum hwsim_tx_control_flags {
* @HWSIM_CMD_DEL_RADIO: destroy a radio, reply is multicasted
* @HWSIM_CMD_GET_RADIO: fetch information about existing radios, uses:
* %HWSIM_ATTR_RADIO_ID
* @HWSIM_CMD_ADD_MAC_ADDR: add a receive MAC address (given in the
* %HWSIM_ATTR_ADDR_RECEIVER attribute) to a device identified by
* %HWSIM_ATTR_ADDR_TRANSMITTER. This lets wmediumd forward frames
* to this receiver address for a given station.
* @HWSIM_CMD_DEL_MAC_ADDR: remove the MAC address again, the attributes
* are the same as to @HWSIM_CMD_ADD_MAC_ADDR.
* @__HWSIM_CMD_MAX: enum limit
*/
enum {
@ -85,6 +91,8 @@ enum {
HWSIM_CMD_NEW_RADIO,
HWSIM_CMD_DEL_RADIO,
HWSIM_CMD_GET_RADIO,
HWSIM_CMD_ADD_MAC_ADDR,
HWSIM_CMD_DEL_MAC_ADDR,
__HWSIM_CMD_MAX,
};
#define HWSIM_CMD_MAX (_HWSIM_CMD_MAX - 1)

View File

@ -810,7 +810,7 @@ static int __f2fs_cluster_blocks(struct compress_ctx *cc, bool compr)
}
}
f2fs_bug_on(F2FS_I_SB(inode),
f2fs_bug_on(F2FS_I_SB(cc->inode),
!compr && ret != cc->cluster_size &&
!is_inode_flag_set(cc->inode, FI_COMPRESS_RELEASED));
}

View File

@ -276,6 +276,13 @@ struct css_set {
struct rcu_head rcu_head;
};
struct ext_css_set {
struct css_set cset;
struct list_head mg_src_preload_node;
struct list_head mg_dst_preload_node;
};
struct cgroup_base_stat {
struct task_cputime cputime;
};

View File

@ -71,7 +71,8 @@ struct css_task_iter {
extern struct file_system_type cgroup_fs_type;
extern struct cgroup_root cgrp_dfl_root;
extern struct css_set init_css_set;
extern struct ext_css_set init_ext_css_set;
#define init_css_set init_ext_css_set.cset
#define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
#include <linux/cgroup_subsys.h>

View File

@ -735,25 +735,28 @@ EXPORT_SYMBOL_GPL(of_css);
* reference-counted, to improve performance when child cgroups
* haven't been created.
*/
struct css_set init_css_set = {
.refcount = REFCOUNT_INIT(1),
.dom_cset = &init_css_set,
.tasks = LIST_HEAD_INIT(init_css_set.tasks),
.mg_tasks = LIST_HEAD_INIT(init_css_set.mg_tasks),
.dying_tasks = LIST_HEAD_INIT(init_css_set.dying_tasks),
.task_iters = LIST_HEAD_INIT(init_css_set.task_iters),
.threaded_csets = LIST_HEAD_INIT(init_css_set.threaded_csets),
.cgrp_links = LIST_HEAD_INIT(init_css_set.cgrp_links),
.mg_preload_node = LIST_HEAD_INIT(init_css_set.mg_preload_node),
.mg_node = LIST_HEAD_INIT(init_css_set.mg_node),
/*
* The following field is re-initialized when this cset gets linked
* in cgroup_init(). However, let's initialize the field
* statically too so that the default cgroup can be accessed safely
* early during boot.
*/
.dfl_cgrp = &cgrp_dfl_root.cgrp,
struct ext_css_set init_ext_css_set = {
.cset = {
.refcount = REFCOUNT_INIT(1),
.dom_cset = &init_css_set,
.tasks = LIST_HEAD_INIT(init_css_set.tasks),
.mg_tasks = LIST_HEAD_INIT(init_css_set.mg_tasks),
.dying_tasks = LIST_HEAD_INIT(init_css_set.dying_tasks),
.task_iters = LIST_HEAD_INIT(init_css_set.task_iters),
.threaded_csets = LIST_HEAD_INIT(init_css_set.threaded_csets),
.cgrp_links = LIST_HEAD_INIT(init_css_set.cgrp_links),
.mg_preload_node = LIST_HEAD_INIT(init_css_set.mg_preload_node),
.mg_node = LIST_HEAD_INIT(init_css_set.mg_node),
/*
* The following field is re-initialized when this cset gets linked
* in cgroup_init(). However, let's initialize the field
* statically too so that the default cgroup can be accessed safely
* early during boot.
*/
.dfl_cgrp = &cgrp_dfl_root.cgrp,
},
.mg_src_preload_node = LIST_HEAD_INIT(init_ext_css_set.mg_src_preload_node),
.mg_dst_preload_node = LIST_HEAD_INIT(init_ext_css_set.mg_dst_preload_node),
};
static int css_set_count = 1; /* 1 for init_css_set */
@ -1181,6 +1184,7 @@ static struct css_set *find_css_set(struct css_set *old_cset,
struct cgroup *cgrp)
{
struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT] = { };
struct ext_css_set *ext_cset;
struct css_set *cset;
struct list_head tmp_links;
struct cgrp_cset_link *link;
@ -1201,9 +1205,10 @@ static struct css_set *find_css_set(struct css_set *old_cset,
if (cset)
return cset;
cset = kzalloc(sizeof(*cset), GFP_KERNEL);
if (!cset)
ext_cset = kzalloc(sizeof(*ext_cset), GFP_KERNEL);
if (!ext_cset)
return NULL;
cset = &ext_cset->cset;
/* Allocate all the cgrp_cset_link objects that we'll need */
if (allocate_cgrp_cset_links(cgroup_root_count, &tmp_links) < 0) {
@ -1221,6 +1226,8 @@ static struct css_set *find_css_set(struct css_set *old_cset,
INIT_HLIST_NODE(&cset->hlist);
INIT_LIST_HEAD(&cset->cgrp_links);
INIT_LIST_HEAD(&cset->mg_preload_node);
INIT_LIST_HEAD(&ext_cset->mg_src_preload_node);
INIT_LIST_HEAD(&ext_cset->mg_dst_preload_node);
INIT_LIST_HEAD(&cset->mg_node);
/* Copy the set of subsystem state objects generated in
@ -2671,22 +2678,28 @@ int cgroup_migrate_vet_dst(struct cgroup *dst_cgrp)
*/
void cgroup_migrate_finish(struct cgroup_mgctx *mgctx)
{
LIST_HEAD(preloaded);
struct css_set *cset, *tmp_cset;
struct ext_css_set *cset, *tmp_cset;
lockdep_assert_held(&cgroup_mutex);
spin_lock_irq(&css_set_lock);
list_splice_tail_init(&mgctx->preloaded_src_csets, &preloaded);
list_splice_tail_init(&mgctx->preloaded_dst_csets, &preloaded);
list_for_each_entry_safe(cset, tmp_cset, &mgctx->preloaded_src_csets,
mg_src_preload_node) {
cset->cset.mg_src_cgrp = NULL;
cset->cset.mg_dst_cgrp = NULL;
cset->cset.mg_dst_cset = NULL;
list_del_init(&cset->mg_src_preload_node);
put_css_set_locked(&cset->cset);
}
list_for_each_entry_safe(cset, tmp_cset, &preloaded, mg_preload_node) {
cset->mg_src_cgrp = NULL;
cset->mg_dst_cgrp = NULL;
cset->mg_dst_cset = NULL;
list_del_init(&cset->mg_preload_node);
put_css_set_locked(cset);
list_for_each_entry_safe(cset, tmp_cset, &mgctx->preloaded_dst_csets,
mg_dst_preload_node) {
cset->cset.mg_src_cgrp = NULL;
cset->cset.mg_dst_cgrp = NULL;
cset->cset.mg_dst_cset = NULL;
list_del_init(&cset->mg_dst_preload_node);
put_css_set_locked(&cset->cset);
}
spin_unlock_irq(&css_set_lock);
@ -2713,6 +2726,7 @@ void cgroup_migrate_add_src(struct css_set *src_cset,
struct cgroup_mgctx *mgctx)
{
struct cgroup *src_cgrp;
struct ext_css_set *ext_src_cset;
lockdep_assert_held(&cgroup_mutex);
lockdep_assert_held(&css_set_lock);
@ -2726,8 +2740,9 @@ void cgroup_migrate_add_src(struct css_set *src_cset,
return;
src_cgrp = cset_cgroup_from_root(src_cset, dst_cgrp->root);
ext_src_cset = container_of(src_cset, struct ext_css_set, cset);
if (!list_empty(&src_cset->mg_preload_node))
if (!list_empty(&ext_src_cset->mg_src_preload_node))
return;
WARN_ON(src_cset->mg_src_cgrp);
@ -2738,7 +2753,7 @@ void cgroup_migrate_add_src(struct css_set *src_cset,
src_cset->mg_src_cgrp = src_cgrp;
src_cset->mg_dst_cgrp = dst_cgrp;
get_css_set(src_cset);
list_add_tail(&src_cset->mg_preload_node, &mgctx->preloaded_src_csets);
list_add_tail(&ext_src_cset->mg_src_preload_node, &mgctx->preloaded_src_csets);
}
/**
@ -2757,20 +2772,23 @@ void cgroup_migrate_add_src(struct css_set *src_cset,
*/
int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx)
{
struct css_set *src_cset, *tmp_cset;
struct ext_css_set *ext_src_set, *tmp_cset;
lockdep_assert_held(&cgroup_mutex);
/* look up the dst cset for each src cset and link it to src */
list_for_each_entry_safe(src_cset, tmp_cset, &mgctx->preloaded_src_csets,
mg_preload_node) {
list_for_each_entry_safe(ext_src_set, tmp_cset, &mgctx->preloaded_src_csets,
mg_src_preload_node) {
struct css_set *src_cset = &ext_src_set->cset;
struct css_set *dst_cset;
struct ext_css_set *ext_dst_cset;
struct cgroup_subsys *ss;
int ssid;
dst_cset = find_css_set(src_cset, src_cset->mg_dst_cgrp);
if (!dst_cset)
return -ENOMEM;
ext_dst_cset = container_of(dst_cset, struct ext_css_set, cset);
WARN_ON_ONCE(src_cset->mg_dst_cset || dst_cset->mg_dst_cset);
@ -2782,7 +2800,7 @@ int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx)
if (src_cset == dst_cset) {
src_cset->mg_src_cgrp = NULL;
src_cset->mg_dst_cgrp = NULL;
list_del_init(&src_cset->mg_preload_node);
list_del_init(&ext_src_set->mg_src_preload_node);
put_css_set(src_cset);
put_css_set(dst_cset);
continue;
@ -2790,8 +2808,8 @@ int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx)
src_cset->mg_dst_cset = dst_cset;
if (list_empty(&dst_cset->mg_preload_node))
list_add_tail(&dst_cset->mg_preload_node,
if (list_empty(&ext_dst_cset->mg_dst_preload_node))
list_add_tail(&ext_dst_cset->mg_dst_preload_node,
&mgctx->preloaded_dst_csets);
else
put_css_set(dst_cset);
@ -3010,7 +3028,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
DEFINE_CGROUP_MGCTX(mgctx);
struct cgroup_subsys_state *d_css;
struct cgroup *dsct;
struct css_set *src_cset;
struct ext_css_set *ext_src_set;
bool has_tasks;
int ret;
@ -3041,11 +3059,12 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
goto out_finish;
spin_lock_irq(&css_set_lock);
list_for_each_entry(src_cset, &mgctx.preloaded_src_csets, mg_preload_node) {
list_for_each_entry(ext_src_set, &mgctx.preloaded_src_csets,
mg_src_preload_node) {
struct task_struct *task, *ntask;
/* all tasks in src_csets need to be migrated */
list_for_each_entry_safe(task, ntask, &src_cset->tasks, cg_list)
list_for_each_entry_safe(task, ntask, &ext_src_set->cset.tasks, cg_list)
cgroup_migrate_add_task(task, &mgctx);
}
spin_unlock_irq(&css_set_lock);