diff --git a/android/abi_gki_aarch64_xiaomi b/android/abi_gki_aarch64_xiaomi index 824b53703ce5..744dcb8fe69e 100644 --- a/android/abi_gki_aarch64_xiaomi +++ b/android/abi_gki_aarch64_xiaomi @@ -10,6 +10,7 @@ snd_soc_get_volsw_range snd_soc_info_volsw_range snd_soc_put_volsw_range + snd_compr_stop_error # required by cs35l45_dlkm.ko devm_snd_soc_register_component diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index cb5cfa22b5b2..15ee40d55cfb 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -421,7 +422,14 @@ done: static int show_map(struct seq_file *m, void *v) { - show_map_vma(m, v); + struct vm_area_struct *pad_vma = get_pad_vma(v); + struct vm_area_struct *vma = get_data_vma(v); + + if (vma_pages(vma)) + show_map_vma(m, vma); + + show_map_pad_vma(vma, pad_vma, m, show_map_vma, false); + m_cache_vma(m, v); return 0; } @@ -879,7 +887,7 @@ static void __show_smap(struct seq_file *m, const struct mem_size_stats *mss, seq_puts(m, " kB\n"); } -static int show_smap(struct seq_file *m, void *v) +static void show_smap_vma(struct seq_file *m, void *v) { struct vm_area_struct *vma = v; struct mem_size_stats mss; @@ -908,9 +916,19 @@ static int show_smap(struct seq_file *m, void *v) if (arch_pkeys_enabled()) seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma)); show_smap_vma_flags(m, vma); +} - m_cache_vma(m, vma); +static int show_smap(struct seq_file *m, void *v) +{ + struct vm_area_struct *pad_vma = get_pad_vma(v); + struct vm_area_struct *vma = get_data_vma(v); + if (vma_pages(vma)) + show_smap_vma(m, vma); + + show_map_pad_vma(vma, pad_vma, m, show_smap_vma, true); + + m_cache_vma(m, v); return 0; } diff --git a/include/linux/pgsize_migration.h b/include/linux/pgsize_migration.h new file mode 100644 index 000000000000..fbfb1b9b9196 --- /dev/null +++ b/include/linux/pgsize_migration.h @@ -0,0 +1,133 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_PAGE_SIZE_MIGRATION_H +#define _LINUX_PAGE_SIZE_MIGRATION_H + +/* + * Page Size Migration + * + * Copyright (c) 2024, Google LLC. + * Author: Kalesh Singh + * + * This file contains the APIs for mitigations to ensure + * app compatibility during the transition from 4kB to 16kB + * page size in Android. + */ + +#include +#include +#include + +/* + * vm_flags representation of VMA padding pages. + * + * This allows the kernel to identify the portion of an ELF LOAD segment VMA + * that is padding. + * + * 4 high bits of vm_flags [63,60] are used to represent ELF segment padding + * up to 60kB, which is sufficient for ELFs of both 16kB and 64kB segment + * alignment (p_align). + * + * The representation is illustrated below. + * + * 63 62 61 60 + * _________ _________ _________ _________ + * | Bit 3 | Bit 2 | Bit 1 | Bit 0 | + * | of 4kB | of 4kB | of 4kB | of 4kB | + * | chunks | chunks | chunks | chunks | + * |_________|_________|_________|_________| + */ + +#define VM_PAD_WIDTH 4 +#define VM_PAD_SHIFT (BITS_PER_LONG - VM_PAD_WIDTH) +#define VM_TOTAL_PAD_PAGES ((1ULL << VM_PAD_WIDTH) - 1) +#define VM_PAD_MASK (VM_TOTAL_PAD_PAGES << VM_PAD_SHIFT) +#define VMA_PAD_START(vma) (vma->vm_end - (vma_pad_pages(vma) << PAGE_SHIFT)) + +#if PAGE_SIZE == SZ_4K && defined(CONFIG_64BIT) +extern void vma_set_pad_pages(struct vm_area_struct *vma, + unsigned long nr_pages); + +extern unsigned long vma_pad_pages(struct vm_area_struct *vma); + +extern void madvise_vma_pad_pages(struct vm_area_struct *vma, + unsigned long start, unsigned long end); + +extern struct vm_area_struct *get_pad_vma(struct vm_area_struct *vma); + +extern struct vm_area_struct *get_data_vma(struct vm_area_struct *vma); + +extern void show_map_pad_vma(struct vm_area_struct *vma, + struct vm_area_struct *pad, + struct seq_file *m, void *func, bool smaps); + +extern void split_pad_vma(struct vm_area_struct *vma, struct vm_area_struct *new, + unsigned long addr, int new_below); +#else /* PAGE_SIZE != SZ_4K || !defined(CONFIG_64BIT) */ +static inline void vma_set_pad_pages(struct vm_area_struct *vma, + unsigned long nr_pages) +{ +} + +static inline unsigned long vma_pad_pages(struct vm_area_struct *vma) +{ + return 0; +} + +static inline void madvise_vma_pad_pages(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ +} + +static inline struct vm_area_struct *get_pad_vma(struct vm_area_struct *vma) +{ + return NULL; +} + +static inline struct vm_area_struct *get_data_vma(struct vm_area_struct *vma) +{ + return vma; +} + +static inline void show_map_pad_vma(struct vm_area_struct *vma, + struct vm_area_struct *pad, + struct seq_file *m, void *func, bool smaps) +{ +} + +static inline void split_pad_vma(struct vm_area_struct *vma, struct vm_area_struct *new, + unsigned long addr, int new_below) +{ +} +#endif /* PAGE_SIZE == SZ_4K && defined(CONFIG_64BIT) */ + +static inline unsigned long vma_data_pages(struct vm_area_struct *vma) +{ + return vma_pages(vma) - vma_pad_pages(vma); +} + +/* + * Sets the correct padding bits / flags for a VMA split. + */ +static inline unsigned long vma_pad_fixup_flags(struct vm_area_struct *vma, + unsigned long newflags) +{ + if (newflags & VM_PAD_MASK) + return (newflags & ~VM_PAD_MASK) | (vma->vm_flags & VM_PAD_MASK); + else + return newflags; +} + +/* + * Merging of padding VMAs is uncommon, as padding is only allowed + * from the linker context. + * + * To simplify the semantics, adjacent VMAs with padding are not + * allowed to merge. + */ +static inline bool is_mergable_pad_vma(struct vm_area_struct *vma, + unsigned long vm_flags) +{ + /* Padding VMAs cannot be merged with other padding or real VMAs */ + return !((vma->vm_flags | vm_flags) & VM_PAD_MASK); +} +#endif /* _LINUX_PAGE_SIZE_MIGRATION_H */ diff --git a/mm/Makefile b/mm/Makefile index 372e1aac9be6..d433568ee189 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -42,7 +42,7 @@ obj-y := filemap.o mempool.o oom_kill.o fadvise.o \ mm_init.o mmu_context.o percpu.o slab_common.o \ compaction.o vmacache.o \ interval_tree.o list_lru.o workingset.o \ - debug.o gup.o $(mmu-y) showmem.o + debug.o gup.o pgsize_migration.o $(mmu-y) showmem.o # Give 'page_alloc' its own module-parameter namespace page-alloc-y := page_alloc.o diff --git a/mm/madvise.c b/mm/madvise.c index c87d4c43b885..481e4e07e24b 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -773,6 +774,8 @@ static int madvise_free_single_vma(struct vm_area_struct *vma, static long madvise_dontneed_single_vma(struct vm_area_struct *vma, unsigned long start, unsigned long end) { + madvise_vma_pad_pages(vma, start, end); + zap_page_range(vma, start, end - start); return 0; } diff --git a/mm/memory.c b/mm/memory.c index 49434f6c8493..ef94fccbfa90 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -57,6 +57,7 @@ #include #include #include +#include #include #include #include @@ -3822,7 +3823,7 @@ static vm_fault_t do_fault_around(struct vm_fault *vmf) end_pgoff = start_pgoff - ((vmf->address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) + PTRS_PER_PTE - 1; - end_pgoff = min3(end_pgoff, vma_pages(vmf->vma) + vmf->vma->vm_pgoff - 1, + end_pgoff = min3(end_pgoff, vma_data_pages(vmf->vma) + vmf->vma->vm_pgoff - 1, start_pgoff + nr_pages - 1); if (!(vmf->flags & FAULT_FLAG_SPECULATIVE) && diff --git a/mm/mlock.c b/mm/mlock.c index 395bf08f86d8..5805a284512d 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -573,7 +574,7 @@ success: */ if (lock) { vm_write_begin(vma); - WRITE_ONCE(vma->vm_flags, newflags); + WRITE_ONCE(vma->vm_flags, vma_pad_fixup_flags(vma, newflags)); vm_write_end(vma); } else munlock_vma_pages_range(vma, start, end); diff --git a/mm/mmap.c b/mm/mmap.c index 23698963a8ab..fcc6f69fd4be 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -1082,6 +1083,8 @@ static inline int is_mergeable_vma(struct vm_area_struct *vma, return 0; if (vma_get_anon_name(vma) != anon_name) return 0; + if (!is_mergable_pad_vma(vma, vm_flags)) + return 0; return 1; } @@ -2826,8 +2829,10 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); /* Success. */ - if (!err) + if (!err) { + split_pad_vma(vma, new, addr, new_below); return 0; + } /* Clean everything up if vma_adjust failed. */ if (new->vm_ops && new->vm_ops->close) diff --git a/mm/mprotect.c b/mm/mprotect.c index 4e2283f510f2..c1e4a8da68b0 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -455,7 +456,7 @@ success: * held in write mode. */ vm_write_begin(vma); - WRITE_ONCE(vma->vm_flags, newflags); + WRITE_ONCE(vma->vm_flags, vma_pad_fixup_flags(vma, newflags)); dirty_accountable = vma_wants_writenotify(vma, vma->vm_page_prot); vma_set_page_prot(vma); diff --git a/mm/pgsize_migration.c b/mm/pgsize_migration.c new file mode 100644 index 000000000000..9efadd1412a3 --- /dev/null +++ b/mm/pgsize_migration.c @@ -0,0 +1,400 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Page Size Migration + * + * This file contains the core logic of mitigations to ensure + * app compatibility during the transition from 4kB to 16kB + * page size in Android. + * + * Copyright (c) 2024, Google LLC. + * Author: Kalesh Singh + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +typedef void (*show_pad_maps_fn) (struct seq_file *m, struct vm_area_struct *vma); +typedef void (*show_pad_smaps_fn) (struct seq_file *m, void *v); + +#ifdef CONFIG_64BIT +#if PAGE_SIZE == SZ_4K +DEFINE_STATIC_KEY_TRUE(pgsize_migration_enabled); + +#define is_pgsize_migration_enabled() (static_branch_likely(&pgsize_migration_enabled)) +#else /* PAGE_SIZE != SZ_4K */ +DEFINE_STATIC_KEY_FALSE(pgsize_migration_enabled); + +#define is_pgsize_migration_enabled() (static_branch_unlikely(&pgsize_migration_enabled)) +#endif /* PAGE_SIZE == SZ_4K */ + +static ssize_t show_pgsize_migration_enabled(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + if (is_pgsize_migration_enabled()) + return sprintf(buf, "%d\n", 1); + else + return sprintf(buf, "%d\n", 0); +} + +static ssize_t store_pgsize_migration_enabled(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t n) +{ + unsigned long val; + + /* Migration is only applicable to 4kB kernels */ + if (PAGE_SIZE != SZ_4K) + return n; + + if (kstrtoul(buf, 10, &val)) + return -EINVAL; + + if (val > 1) + return -EINVAL; + + if (val == 1) + static_branch_enable(&pgsize_migration_enabled); + else if (val == 0) + static_branch_disable(&pgsize_migration_enabled); + + return n; +} + +static struct kobj_attribute pgsize_migration_enabled_attr = __ATTR( + enabled, + 0644, + show_pgsize_migration_enabled, + store_pgsize_migration_enabled +); + +static struct attribute *pgsize_migration_attrs[] = { + &pgsize_migration_enabled_attr.attr, + NULL +}; + +static struct attribute_group pgsize_migration_attr_group = { + .name = "pgsize_migration", + .attrs = pgsize_migration_attrs, +}; + +/** + * What: /sys/kernel/mm/pgsize_migration/enabled + * Date: April 2024 + * KernelVersion: v5.4+ (GKI kernels) + * Contact: Kalesh Singh + * Description: /sys/kernel/mm/pgsize_migration/enabled + * allows for userspace to turn on or off page size + * migration mitigations necessary for app compatibility + * during Android's transition from 4kB to 16kB page size. + * Such mitigations include preserving /proc//[s]maps + * output as if there was no segment extension by the + * dynamic loader; and preventing fault around in the padding + * sections of ELF LOAD segment mappings. + * Users: Bionic's dynamic linker + */ +static int __init init_pgsize_migration(void) +{ + if (sysfs_create_group(mm_kobj, &pgsize_migration_attr_group)) + pr_err("pgsize_migration: failed to create sysfs group\n"); + + return 0; +}; +late_initcall(init_pgsize_migration); + +#if PAGE_SIZE == SZ_4K +void vma_set_pad_pages(struct vm_area_struct *vma, + unsigned long nr_pages) +{ + if (!is_pgsize_migration_enabled()) + return; + + vma->vm_flags &= ~VM_PAD_MASK; + vma->vm_flags |= (nr_pages << VM_PAD_SHIFT); +} + +unsigned long vma_pad_pages(struct vm_area_struct *vma) +{ + if (!is_pgsize_migration_enabled()) + return 0; + + return vma->vm_flags >> VM_PAD_SHIFT; +} + +static __always_inline bool str_has_suffix(const char *str, const char *suffix) +{ + size_t str_len = strlen(str); + size_t suffix_len = strlen(suffix); + + if (str_len < suffix_len) + return false; + + return !strncmp(str + str_len - suffix_len, suffix, suffix_len); +} + +/* + * The dynamic linker, or interpreter, operates within the process context + * of the binary that necessitated dynamic linking. + * + * Consequently, process context identifiers; like PID, comm, ...; cannot + * be used to differentiate whether the execution context belongs to the + * dynamic linker or not. + * + * linker_ctx() deduces whether execution is currently in the dynamic linker's + * context by correlating the current userspace instruction pointer with the + * VMAs of the current task. + * + * Returns true if in linker context, otherwise false. + * + * Caller must hold mmap lock in read mode. + */ +static inline bool linker_ctx(void) +{ + struct pt_regs *regs = task_pt_regs(current); + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + struct file *file; + + if (!regs) + return false; + + vma = find_vma(mm, instruction_pointer(regs)); + + /* Current execution context, the VMA must be present */ + BUG_ON(!vma); + + file = vma->vm_file; + if (!file) + return false; + + if ((vma->vm_flags & VM_EXEC)) { + char buf[64]; + const int bufsize = sizeof(buf); + char *path; + + memset(buf, 0, bufsize); + path = d_path(&file->f_path, buf, bufsize); + + if (!strcmp(path, "/system/bin/linker64")) + return true; + } + + return false; +} + +/* + * Saves the number of padding pages for an ELF segment mapping + * in vm_flags. + * + * The number of padding pages is deduced from the madvise DONTNEED range [start, end) + * if the following conditions are met: + * 1) The range is enclosed by a single VMA + * 2) The range ends at the end address of the VMA + * 3) The range starts at an address greater than the start address of the VMA + * 4) The number of the pages in the range does not exceed VM_TOTAL_PAD_PAGES. + * 5) The VMA is a file backed VMA. + * 6) The file backing the VMA is a shared library (*.so) + * 7) The madvise was requested by bionic's dynamic linker. + */ +void madvise_vma_pad_pages(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + unsigned long nr_pad_pages; + + if (!is_pgsize_migration_enabled()) + return; + + /* + * If the madvise range is it at the end of the file save the number of + * pages in vm_flags (only need 4 bits are needed for up to 64kB aligned ELFs). + */ + if (start <= vma->vm_start || end != vma->vm_end) + return; + + nr_pad_pages = (end - start) >> PAGE_SHIFT; + + if (!nr_pad_pages || nr_pad_pages > VM_TOTAL_PAD_PAGES) + return; + + /* Only handle this for file backed VMAs */ + if (!vma->vm_file) + return; + + /* Limit this to only shared libraries (*.so) */ + if (!str_has_suffix(vma->vm_file->f_path.dentry->d_name.name, ".so")) + return; + + /* Only bionic's dynamic linker needs to hint padding pages. */ + if (!linker_ctx()) + return; + + vma_set_pad_pages(vma, nr_pad_pages); +} + +static const char *pad_vma_name(struct vm_area_struct *vma) +{ + return "[page size compat]"; +} + +static const struct vm_operations_struct pad_vma_ops = { + .name = pad_vma_name, +}; + +/* + * Returns a new VMA representing the padding in @vma, if no padding + * in @vma returns NULL. + */ +struct vm_area_struct *get_pad_vma(struct vm_area_struct *vma) +{ + struct vm_area_struct *pad; + + if (!is_pgsize_migration_enabled() || !(vma->vm_flags & VM_PAD_MASK)) + return NULL; + + pad = kzalloc(sizeof(struct vm_area_struct), GFP_KERNEL); + + *pad = *vma; + + /* Remove file */ + pad->vm_file = NULL; + + /* Add vm_ops->name */ + pad->vm_ops = &pad_vma_ops; + + /* Adjust the start to begin at the start of the padding section */ + pad->vm_start = VMA_PAD_START(pad); + + /* Make the pad vma PROT_NONE */ + pad->vm_flags &= ~(VM_READ|VM_WRITE|VM_EXEC); + + /* Remove padding bits */ + pad->vm_flags &= ~VM_PAD_MASK; + + return pad; +} + +/* + * Returns a new VMA exclusing the padding from @vma; if no padding in + * @vma returns @vma. + */ +struct vm_area_struct *get_data_vma(struct vm_area_struct *vma) +{ + struct vm_area_struct *data; + + if (!is_pgsize_migration_enabled() || !(vma->vm_flags & VM_PAD_MASK)) + return vma; + + data = kzalloc(sizeof(struct vm_area_struct), GFP_KERNEL); + + *data = *vma; + + /* Adjust the end to the start of the padding section */ + data->vm_end = VMA_PAD_START(data); + + return data; +} + +/* + * Calls the show_pad_vma_fn on the @pad VMA, and frees the copies of @vma + * and @pad. + */ +void show_map_pad_vma(struct vm_area_struct *vma, struct vm_area_struct *pad, + struct seq_file *m, void *func, bool smaps) +{ + if (!pad) + return; + + /* + * This cannot happen. If @pad vma was allocated the corresponding + * @vma should have the VM_PAD_MASK bit(s) set. + */ + BUG_ON(!(vma->vm_flags & VM_PAD_MASK)); + + /* + * This cannot happen. @pad is a section of the original VMA. + * Therefore @vma cannot be null if @pad is not null. + */ + BUG_ON(!vma); + + if (smaps) + ((show_pad_smaps_fn)func)(m, pad); + else + ((show_pad_maps_fn)func)(m, pad); + + kfree(pad); + kfree(vma); +} + +/* + * When splitting a padding VMA there are a couple of cases to handle. + * + * Given: + * + * | DDDDPPPP | + * + * where: + * - D represents 1 page of data; + * - P represents 1 page of padding; + * - | represents the boundaries (start/end) of the VMA + * + * + * 1) Split exactly at the padding boundary + * + * | DDDDPPPP | --> | DDDD | PPPP | + * + * - Remove padding flags from the first VMA. + * - The second VMA is all padding + * + * 2) Split within the padding area + * + * | DDDDPPPP | --> | DDDDPP | PP | + * + * - Subtract the length of the second VMA from the first VMA's padding. + * - The second VMA is all padding, adjust its padding length (flags) + * + * 3) Split within the data area + * + * | DDDDPPPP | --> | DD | DDPPPP | + * + * - Remove padding flags from the first VMA. + * - The second VMA is has the same padding as from before the split. + */ +void split_pad_vma(struct vm_area_struct *vma, struct vm_area_struct *new, + unsigned long addr, int new_below) +{ + unsigned long nr_pad_pages = vma_pad_pages(vma); + unsigned long nr_vma2_pages; + struct vm_area_struct *first; + struct vm_area_struct *second; + + if (!nr_pad_pages) + return; + + if (new_below) { + first = new; + second = vma; + } else { + first = vma; + second = new; + } + + nr_vma2_pages = vma_pages(second); + + if (nr_vma2_pages >= nr_pad_pages) { /* Case 1 & 3 */ + first->vm_flags &= ~VM_PAD_MASK; + vma_set_pad_pages(second, nr_pad_pages); + } else { /* Case 2 */ + vma_set_pad_pages(first, nr_pad_pages - nr_vma2_pages); + vma_set_pad_pages(second, nr_vma2_pages); + } +} +#endif /* PAGE_SIZE == SZ_4K */ +#endif /* CONFIG_64BIT */ diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 5e94317fb353..c1ac40e3a04a 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -4068,6 +4068,7 @@ static void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, if (list_empty(&set->bindings) && nft_set_is_anonymous(set)) { list_del_rcu(&set->list); + set->dead = 1; if (event) nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, GFP_KERNEL); @@ -7530,10 +7531,11 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) struct nftables_pernet *nft_net = net_generic(net, nf_tables_net_id); struct nft_trans *trans, *next; struct nft_trans_elem *te; + int err = 0; if (action == NFNL_ABORT_VALIDATE && nf_tables_validate(net) < 0) - return -EAGAIN; + err = -EAGAIN; list_for_each_entry_safe_reverse(trans, next, &nft_net->commit_list, list) { @@ -7661,12 +7663,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) nf_tables_abort_release(trans); } - if (action == NFNL_ABORT_AUTOLOAD) - nf_tables_module_autoload(net); - else - nf_tables_module_autoload_cleanup(net); - - return 0; + return err; } static void nf_tables_cleanup(struct net *net) @@ -7685,6 +7682,16 @@ static int nf_tables_abort(struct net *net, struct sk_buff *skb, ret = __nf_tables_abort(net, action); nft_gc_seq_end(nft_net, gc_seq); + WARN_ON_ONCE(!list_empty(&nft_net->commit_list)); + + /* module autoload needs to happen after GC sequence update because it + * temporarily releases and grabs mutex again. + */ + if (action == NFNL_ABORT_AUTOLOAD) + nf_tables_module_autoload(net); + else + nf_tables_module_autoload_cleanup(net); + mutex_unlock(&nft_net->commit_mutex); return ret; @@ -8376,8 +8383,11 @@ static void __net_exit nf_tables_exit_net(struct net *net) gc_seq = nft_gc_seq_begin(nft_net); - if (!list_empty(&nft_net->commit_list)) - __nf_tables_abort(net, NFNL_ABORT_NONE); + WARN_ON_ONCE(!list_empty(&nft_net->commit_list)); + + if (!list_empty(&nft_net->module_list)) + nf_tables_module_autoload_cleanup(net); + __nft_release_tables(net); nft_gc_seq_end(nft_net, gc_seq);