#! /vendor/bin/sh # Copyright (c) 2012-2013, 2016-2019, The Linux Foundation. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of The Linux Foundation nor # the names of its contributors may be used to endorse or promote # products derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # target=`getprop ro.board.platform` function configure_memory_parameters() { # Set Memory parameters. # # Set per_process_reclaim tuning parameters # All targets will use vmpressure range 50-70, # All targets will use 512 pages swap size. # # Set Low memory killer minfree parameters # 64 bit will use Google default LMK series. # # Set ALMK parameters (usually above the highest minfree values) # vmpressure_file_min threshold is always set slightly higher # than LMK minfree's last bin value for all targets. It is calculated as # vmpressure_file_min = (last bin - second last bin ) + last bin # Read adj series and set adj threshold for PPR and ALMK. # This is required since adj values change from framework to framework. adj_series=`cat /sys/module/lowmemorykiller/parameters/adj` adj_1="${adj_series#*,}" set_almk_ppr_adj="${adj_1%%,*}" # PPR and ALMK should not act on HOME adj and below. # Normalized ADJ for HOME is 6. Hence multiply by 6 # ADJ score represented as INT in LMK params, actual score can be in decimal # Hence add 6 considering a worst case of 0.9 conversion to INT (0.9*6). # For uLMK + Memcg, this will be set as 6 since adj is zero. set_almk_ppr_adj=$(((set_almk_ppr_adj * 6) + 6)) echo $set_almk_ppr_adj > /sys/module/lowmemorykiller/parameters/adj_max_shift # Calculate vmpressure_file_min as below & set for 64 bit: # vmpressure_file_min = last_lmk_bin + (last_lmk_bin - last_but_one_lmk_bin) minfree_series=`cat /sys/module/lowmemorykiller/parameters/minfree` minfree_1="${minfree_series#*,}" ; rem_minfree_1="${minfree_1%%,*}" minfree_2="${minfree_1#*,}" ; rem_minfree_2="${minfree_2%%,*}" minfree_3="${minfree_2#*,}" ; rem_minfree_3="${minfree_3%%,*}" minfree_4="${minfree_3#*,}" ; rem_minfree_4="${minfree_4%%,*}" minfree_5="${minfree_4#*,}" vmpres_file_min=$((minfree_5 + (minfree_5 - rem_minfree_4))) echo $vmpres_file_min > /sys/module/lowmemorykiller/parameters/vmpressure_file_min echo "18432,23040,27648,64512,165888,225792" > /sys/module/lowmemorykiller/parameters/minfree # Enable adaptive LMK for all targets & # use Google default LMK series for all 64-bit targets >=2GB. echo 1 > /sys/module/lowmemorykiller/parameters/enable_adaptive_lmk echo 1 > /sys/module/lowmemorykiller/parameters/oom_reaper } if [ -f /sys/devices/soc0/soc_id ]; then soc_id=`cat /sys/devices/soc0/soc_id` else soc_id=`cat /sys/devices/system/soc/soc0/id` fi case "$soc_id" in "355" | "369" | "377" | "380" | "384" ) # Enable bus-dcvs for device in /sys/devices/platform/soc do for cpubw in $device/*cpu-cpu-llcc-bw/devfreq/*cpu-cpu-llcc-bw do echo "bw_hwmon" > $cpubw/governor echo 50 > $cpubw/polling_interval echo "2288 4577 7110 9155 12298 14236" > $cpubw/bw_hwmon/mbps_zones echo 4 > $cpubw/bw_hwmon/sample_ms echo 68 > $cpubw/bw_hwmon/io_percent echo 20 > $cpubw/bw_hwmon/hist_memory echo 0 > $cpubw/bw_hwmon/hyst_length echo 80 > $cpubw/bw_hwmon/down_thres echo 0 > $cpubw/bw_hwmon/guard_band_mbps echo 250 > $cpubw/bw_hwmon/up_scale echo 1600 > $cpubw/bw_hwmon/idle_mbps echo 14236 > $cpubw/max_freq done for llccbw in $device/*cpu-llcc-ddr-bw/devfreq/*cpu-llcc-ddr-bw do echo "bw_hwmon" > $llccbw/governor echo 40 > $llccbw/polling_interval echo "1144 1720 2086 2929 3879 5931 6881" > $llccbw/bw_hwmon/mbps_zones echo 4 > $llccbw/bw_hwmon/sample_ms echo 68 > $llccbw/bw_hwmon/io_percent echo 20 > $llccbw/bw_hwmon/hist_memory echo 0 > $llccbw/bw_hwmon/hyst_length echo 80 > $llccbw/bw_hwmon/down_thres echo 0 > $llccbw/bw_hwmon/guard_band_mbps echo 250 > $llccbw/bw_hwmon/up_scale echo 1600 > $llccbw/bw_hwmon/idle_mbps echo 6881 > $llccbw/max_freq done # Enable mem_latency governor for L3, LLCC, and DDR scaling for memlat in $device/*cpu*-lat/devfreq/*cpu*-lat do echo "mem_latency" > $memlat/governor echo 10 > $memlat/polling_interval echo 400 > $memlat/mem_latency/ratio_ceil done # Gold L3 ratio ceil echo 4000 > /sys/class/devfreq/soc:qcom,cpu6-cpu-l3-lat/mem_latency/ratio_ceil # Enable cdspl3 governor for L3 cdsp nodes for l3cdsp in $device/*cdsp-cdsp-l3-lat/devfreq/*cdsp-cdsp-l3-lat do echo "cdspl3" > $l3cdsp/governor done # Enable compute governor for gold latfloor for latfloor in $device/*cpu*-ddr-latfloor*/devfreq/*cpu-ddr-latfloor* do echo "compute" > $latfloor/governor echo 10 > $latfloor/polling_interval done done ;; esac case "$soc_id" in "365" | "366" ) # Enable bus-dcvs for device in /sys/devices/platform/soc do for cpubw in $device/*cpu-cpu-llcc-bw/devfreq/*cpu-cpu-llcc-bw do echo "bw_hwmon" > $cpubw/governor echo 50 > $cpubw/polling_interval echo "2288 4577 7110 9155 12298 14236" > $cpubw/bw_hwmon/mbps_zones echo 4 > $cpubw/bw_hwmon/sample_ms echo 68 > $cpubw/bw_hwmon/io_percent echo 20 > $cpubw/bw_hwmon/hist_memory echo 0 > $cpubw/bw_hwmon/hyst_length echo 80 > $cpubw/bw_hwmon/down_thres echo 0 > $cpubw/bw_hwmon/guard_band_mbps echo 250 > $cpubw/bw_hwmon/up_scale echo 1600 > $cpubw/bw_hwmon/idle_mbps echo 14236 > $cpubw/max_freq done for llccbw in $device/*cpu-llcc-ddr-bw/devfreq/*cpu-llcc-ddr-bw do echo "bw_hwmon" > $llccbw/governor echo 40 > $llccbw/polling_interval echo "1144 1720 2086 2929 3879 5931 6881" > $llccbw/bw_hwmon/mbps_zones echo 4 > $llccbw/bw_hwmon/sample_ms echo 68 > $llccbw/bw_hwmon/io_percent echo 20 > $llccbw/bw_hwmon/hist_memory echo 0 > $llccbw/bw_hwmon/hyst_length echo 80 > $llccbw/bw_hwmon/down_thres echo 0 > $llccbw/bw_hwmon/guard_band_mbps echo 250 > $llccbw/bw_hwmon/up_scale echo 1600 > $llccbw/bw_hwmon/idle_mbps echo 6881 > $llccbw/max_freq done for npubw in $device/*npu-npu-ddr-bw/devfreq/*npu-npu-ddr-bw do echo 1 > /sys/devices/virtual/npu/msm_npu/pwr echo "bw_hwmon" > $npubw/governor echo 40 > $npubw/polling_interval echo "1144 1720 2086 2929 3879 5931 6881" > $npubw/bw_hwmon/mbps_zones echo 4 > $npubw/bw_hwmon/sample_ms echo 80 > $npubw/bw_hwmon/io_percent echo 20 > $npubw/bw_hwmon/hist_memory echo 10 > $npubw/bw_hwmon/hyst_length echo 30 > $npubw/bw_hwmon/down_thres echo 0 > $npubw/bw_hwmon/guard_band_mbps echo 250 > $npubw/bw_hwmon/up_scale echo 0 > $npubw/bw_hwmon/idle_mbps echo 0 > /sys/devices/virtual/npu/msm_npu/pwr done # Enable mem_latency governor for L3, LLCC, and DDR scaling for memlat in $device/*cpu*-lat/devfreq/*cpu*-lat do echo "mem_latency" > $memlat/governor echo 10 > $memlat/polling_interval echo 400 > $memlat/mem_latency/ratio_ceil done # Gold L3 ratio ceil echo 4000 > /sys/class/devfreq/soc:qcom,cpu6-cpu-l3-lat/mem_latency/ratio_ceil # Enable cdspl3 governor for L3 cdsp nodes for l3cdsp in $device/*cdsp-cdsp-l3-lat/devfreq/*cdsp-cdsp-l3-lat do echo "cdspl3" > $l3cdsp/governor done # Enable compute governor for gold latfloor for latfloor in $device/*cpu*-ddr-latfloor*/devfreq/*cpu-ddr-latfloor* do echo "compute" > $latfloor/governor echo 10 > $latfloor/polling_interval done done ;; esac function configure_zram_parameters() { MemTotalStr=`cat /proc/meminfo | grep MemTotal` MemTotal=${MemTotalStr:16:8} let RamSizeGB="( $MemTotal / 1048576 ) + 1" let zRamSizeMB="( $RamSizeGB * 1024 ) / 2" diskSizeUnit=M # use MB avoid 32 bit overflow if [ $zRamSizeMB -gt 4096 ]; then let zRamSizeMB=4096 fi echo lz4 > /sys/block/zram0/comp_algorithm if [ -f /sys/block/zram0/disksize ]; then if [ -f /sys/block/zram0/use_dedup ]; then echo 1 > /sys/block/zram0/use_dedup fi if [ $MemTotal -le 524288 ]; then echo 402653184 > /sys/block/zram0/disksize elif [ $MemTotal -le 1048576 ]; then echo 805306368 > /sys/block/zram0/disksize else zramDiskSize=$zRamSizeMB$diskSizeUnit echo $zramDiskSize > /sys/block/zram0/disksize fi # ZRAM may use more memory than it saves if SLAB_STORE_USER # debug option is enabled. if [ -e /sys/kernel/slab/zs_handle ]; then echo 0 > /sys/kernel/slab/zs_handle/store_user fi if [ -e /sys/kernel/slab/zspage ]; then echo 0 > /sys/kernel/slab/zspage/store_user fi mkswap /dev/block/zram0 swapon /dev/block/zram0 -p 32758 fi } # Enable ZRAM configure_zram_parameters echo 0 > /proc/sys/vm/page-cluster echo 0 > /sys/module/vmpressure/parameters/allocstall_threshold echo 100 > /proc/sys/vm/swappiness # Set Memory parameters configure_memory_parameters