tree:
https://git.kernel.org/pub/scm/linux/kernel/git/mel/linux.git
mm-percpu-local_lock-v4r4
head: 85d43239ceaed47c1a4e646aba1ebc09ad46734e
commit: c1617f38ab5c8401a4d5c6288c59aaf02e2a6788 [22/29] mm/vmstat: Convert NUMA
statistics to basic NUMA counters
config: parisc-randconfig-r015-20210419 (attached as .config)
compiler: hppa-linux-gcc (GCC) 9.3.0
reproduce (this is a W=1 build):
wget
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O
~/bin/make.cross
chmod +x ~/bin/make.cross
#
https://git.kernel.org/pub/scm/linux/kernel/git/mel/linux.git/commit/?id=...
git remote add mel
https://git.kernel.org/pub/scm/linux/kernel/git/mel/linux.git
git fetch --no-tags mel mm-percpu-local_lock-v4r4
git checkout c1617f38ab5c8401a4d5c6288c59aaf02e2a6788
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross W=1 ARCH=parisc
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp(a)intel.com>
All error/warnings (new ones prefixed by >>):
mm/vmstat.c: In function 'refresh_cpu_vm_stats':
mm/vmstat.c:781:34: warning: unused variable 'pcp' [-Wunused-variable]
781 | struct per_cpu_pages __percpu *pcp = zone->per_cpu_pageset;
| ^~~
mm/vmstat.c: In function 'cpu_vm_stats_fold':
> mm/vmstat.c:913:1: warning: ISO C90 forbids mixed declarations
and code [-Wdeclaration-after-statement]
913 | void drain_zonestat(struct zone
*zone, struct per_cpu_zonestat *pzstats)
| ^~~~
> mm/vmstat.c:1793:5: error: section attribute cannot be specified
for local variables
1793 | int sysctl_stat_interval __read_mostly = HZ;
| ^~~~~~~~~~~~~~~~~~~~
> mm/vmstat.c:1841:13: error: invalid storage class for function
'vmstat_update'
1841 | static void vmstat_update(struct work_struct *w)
| ^~~~~~~~~~~~~
> mm/vmstat.c:1864:13: error: invalid storage class for function
'need_update'
1864 | static bool need_update(int cpu)
| ^~~~~~~~~~~
> mm/vmstat.c:1922:13: error: invalid storage class for function
'vmstat_shepherd'
1922 | static void vmstat_shepherd(struct work_struct
*w);
| ^~~~~~~~~~~~~~~
In file included from include/linux/mm_types.h:16,
from include/linux/mmzone.h:21,
from include/linux/gfp.h:6,
from include/linux/xarray.h:14,
from include/linux/radix-tree.h:19,
from include/linux/fs.h:15,
from mm/vmstat.c:13:
> mm/vmstat.c:1924:42: error: 'vmstat_shepherd' undeclared
(first use in this function)
1924 | static DECLARE_DEFERRABLE_WORK(shepherd,
vmstat_shepherd);
| ^~~~~~~~~~~~~~~
include/linux/workqueue.h:187:11: note: in definition of macro
'__WORK_INITIALIZER'
187 | .func = (f), \
| ^
include/linux/workqueue.h:204:26: note: in expansion of macro
'__DELAYED_WORK_INITIALIZER'
204 | struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
| ^~~~~~~~~~~~~~~~~~~~~~~~~~
mm/vmstat.c:1924:8: note: in expansion of macro 'DECLARE_DEFERRABLE_WORK'
1924 | static DECLARE_DEFERRABLE_WORK(shepherd, vmstat_shepherd);
| ^~~~~~~~~~~~~~~~~~~~~~~
mm/vmstat.c:1924:42: note: each undeclared identifier is reported only once for each
function it appears in
1924 | static DECLARE_DEFERRABLE_WORK(shepherd, vmstat_shepherd);
| ^~~~~~~~~~~~~~~
include/linux/workqueue.h:187:11: note: in definition of macro
'__WORK_INITIALIZER'
187 | .func = (f), \
| ^
include/linux/workqueue.h:204:26: note: in expansion of macro
'__DELAYED_WORK_INITIALIZER'
204 | struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
| ^~~~~~~~~~~~~~~~~~~~~~~~~~
mm/vmstat.c:1924:8: note: in expansion of macro 'DECLARE_DEFERRABLE_WORK'
1924 | static DECLARE_DEFERRABLE_WORK(shepherd, vmstat_shepherd);
| ^~~~~~~~~~~~~~~~~~~~~~~
mm/vmstat.c:1926:13: error: invalid storage class for function
'vmstat_shepherd'
1926 | static void vmstat_shepherd(struct work_struct *w)
| ^~~~~~~~~~~~~~~
> mm/vmstat.c:1946:20: error: invalid storage class for function
'start_shepherd_timer'
1946 | static void __init
start_shepherd_timer(void)
| ^~~~~~~~~~~~~~~~~~~~
> mm/vmstat.c:1958:20: error: invalid storage class for function
'init_cpu_node_state'
1958 | static void __init
init_cpu_node_state(void)
| ^~~~~~~~~~~~~~~~~~~
> mm/vmstat.c:1968:12: error: invalid storage class for function
'vmstat_cpu_online'
1968 | static int vmstat_cpu_online(unsigned int
cpu)
| ^~~~~~~~~~~~~~~~~
> mm/vmstat.c:1975:12: error: invalid storage class for function
'vmstat_cpu_down_prep'
1975 | static int vmstat_cpu_down_prep(unsigned
int cpu)
| ^~~~~~~~~~~~~~~~~~~~
> mm/vmstat.c:1981:12: error: invalid storage class for function
'vmstat_cpu_dead'
1981 | static int vmstat_cpu_dead(unsigned int cpu)
| ^~~~~~~~~~~~~~~
> mm/vmstat.c:2031:1: error: expected declaration or statement at
end of input
2031 | }
| ^
> mm/vmstat.c:1999:26: warning: variable 'mm_percpu_wq' set
but not used [-Wunused-but-set-variable]
1999 | struct workqueue_struct
*mm_percpu_wq;
| ^~~~~~~~~~~~
mm/vmstat.c:1126:20: warning: unused variable 'vmstat_text'
[-Wunused-variable]
1126 | const char * const vmstat_text[] = {
| ^~~~~~~~~~~
At top level:
mm/vmstat.c:1126:20: warning: 'vmstat_text' defined but not used
[-Wunused-const-variable=]
mm/vmstat.c:2001:13: warning: 'init_mm_internals' defined but not used
[-Wunused-function]
2001 | void __init init_mm_internals(void)
| ^~~~~~~~~~~~~~~~~
mm/vmstat.c:1926:13: warning: 'vmstat_shepherd' defined but not used
[-Wunused-function]
1926 | static void vmstat_shepherd(struct work_struct *w)
| ^~~~~~~~~~~~~~~
mm/vmstat.c:1896:6: warning: 'quiet_vmstat' defined but not used
[-Wunused-function]
1896 | void quiet_vmstat(void)
| ^~~~~~~~~~~~
mm/vmstat.c:913:6: warning: 'drain_zonestat' defined but not used
[-Wunused-function]
913 | void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *pzstats)
| ^~~~~~~~~~~~~~
vim +1793 mm/vmstat.c
f6ac2354d79119 Christoph Lameter 2006-06-30 1790
df9ecaba3f152d Christoph Lameter 2006-08-31 1791 #ifdef CONFIG_SMP
d1187ed21026fd Christoph Lameter 2007-05-09 1792 static DEFINE_PER_CPU(struct
delayed_work, vmstat_work);
77461ab33229d4 Christoph Lameter 2007-05-09 @1793 int sysctl_stat_interval
__read_mostly = HZ;
d1187ed21026fd Christoph Lameter 2007-05-09 1794
52b6f46bc163ee Hugh Dickins 2016-05-19 1795 #ifdef CONFIG_PROC_FS
52b6f46bc163ee Hugh Dickins 2016-05-19 1796 static void
refresh_vm_stats(struct work_struct *work)
52b6f46bc163ee Hugh Dickins 2016-05-19 1797 {
52b6f46bc163ee Hugh Dickins 2016-05-19 1798 refresh_cpu_vm_stats(true);
52b6f46bc163ee Hugh Dickins 2016-05-19 1799 }
52b6f46bc163ee Hugh Dickins 2016-05-19 1800
52b6f46bc163ee Hugh Dickins 2016-05-19 1801 int vmstat_refresh(struct
ctl_table *table, int write,
32927393dc1ccd Christoph Hellwig 2020-04-24 1802 void *buffer, size_t
*lenp, loff_t *ppos)
52b6f46bc163ee Hugh Dickins 2016-05-19 1803 {
52b6f46bc163ee Hugh Dickins 2016-05-19 1804 long val;
52b6f46bc163ee Hugh Dickins 2016-05-19 1805 int err;
52b6f46bc163ee Hugh Dickins 2016-05-19 1806 int i;
52b6f46bc163ee Hugh Dickins 2016-05-19 1807
52b6f46bc163ee Hugh Dickins 2016-05-19 1808 /*
52b6f46bc163ee Hugh Dickins 2016-05-19 1809 * The regular update, every
sysctl_stat_interval, may come later
52b6f46bc163ee Hugh Dickins 2016-05-19 1810 * than expected: leaving a
significant amount in per_cpu buckets.
52b6f46bc163ee Hugh Dickins 2016-05-19 1811 * This is particularly
misleading when checking a quantity of HUGE
52b6f46bc163ee Hugh Dickins 2016-05-19 1812 * pages, immediately after
running a test. /proc/sys/vm/stat_refresh,
52b6f46bc163ee Hugh Dickins 2016-05-19 1813 * which can equally be
echo'ed to or cat'ted from (by root),
52b6f46bc163ee Hugh Dickins 2016-05-19 1814 * can be used to update the
stats just before reading them.
52b6f46bc163ee Hugh Dickins 2016-05-19 1815 *
c41f012ade0b95 Michal Hocko 2017-09-06 1816 * Oh, and since
global_zone_page_state() etc. are so careful to hide
52b6f46bc163ee Hugh Dickins 2016-05-19 1817 * transiently negative
values, report an error here if any of
52b6f46bc163ee Hugh Dickins 2016-05-19 1818 * the stats is negative, so
we know to go looking for imbalance.
52b6f46bc163ee Hugh Dickins 2016-05-19 1819 */
52b6f46bc163ee Hugh Dickins 2016-05-19 1820 err =
schedule_on_each_cpu(refresh_vm_stats);
52b6f46bc163ee Hugh Dickins 2016-05-19 1821 if (err)
52b6f46bc163ee Hugh Dickins 2016-05-19 1822 return err;
52b6f46bc163ee Hugh Dickins 2016-05-19 1823 for (i = 0; i <
NR_VM_ZONE_STAT_ITEMS; i++) {
75ef7184053989 Mel Gorman 2016-07-28 1824 val =
atomic_long_read(&vm_zone_stat[i]);
52b6f46bc163ee Hugh Dickins 2016-05-19 1825 if (val < 0) {
52b6f46bc163ee Hugh Dickins 2016-05-19 1826 pr_warn("%s: %s
%ld\n",
9d7ea9a297e644 Konstantin Khlebnikov 2019-12-04 1827 __func__,
zone_stat_name(i), val);
52b6f46bc163ee Hugh Dickins 2016-05-19 1828 err = -EINVAL;
52b6f46bc163ee Hugh Dickins 2016-05-19 1829 }
52b6f46bc163ee Hugh Dickins 2016-05-19 1830 }
52b6f46bc163ee Hugh Dickins 2016-05-19 1831 if (err)
52b6f46bc163ee Hugh Dickins 2016-05-19 1832 return err;
52b6f46bc163ee Hugh Dickins 2016-05-19 1833 if (write)
52b6f46bc163ee Hugh Dickins 2016-05-19 1834 *ppos += *lenp;
52b6f46bc163ee Hugh Dickins 2016-05-19 1835 else
52b6f46bc163ee Hugh Dickins 2016-05-19 1836 *lenp = 0;
52b6f46bc163ee Hugh Dickins 2016-05-19 1837 return 0;
52b6f46bc163ee Hugh Dickins 2016-05-19 1838 }
52b6f46bc163ee Hugh Dickins 2016-05-19 1839 #endif /* CONFIG_PROC_FS */
52b6f46bc163ee Hugh Dickins 2016-05-19 1840
d1187ed21026fd Christoph Lameter 2007-05-09 @1841 static void
vmstat_update(struct work_struct *w)
d1187ed21026fd Christoph Lameter 2007-05-09 1842 {
0eb77e98803219 Christoph Lameter 2016-01-14 1843 if
(refresh_cpu_vm_stats(true)) {
7cc36bbddde5cd Christoph Lameter 2014-10-09 1844 /*
7cc36bbddde5cd Christoph Lameter 2014-10-09 1845 * Counters were updated so
we expect more updates
7cc36bbddde5cd Christoph Lameter 2014-10-09 1846 * to occur in the future.
Keep on running the
7cc36bbddde5cd Christoph Lameter 2014-10-09 1847 * update worker thread.
7cc36bbddde5cd Christoph Lameter 2014-10-09 1848 */
ce612879ddc78e Michal Hocko 2017-04-07 1849
queue_delayed_work_on(smp_processor_id(), mm_percpu_wq,
176bed1de5bf97 Linus Torvalds 2015-10-15 1850
this_cpu_ptr(&vmstat_work),
98f4ebb290a7dc Anton Blanchard 2009-04-02 1851
round_jiffies_relative(sysctl_stat_interval));
f01f17d3705bb6 Michal Hocko 2016-02-05 1852 }
7cc36bbddde5cd Christoph Lameter 2014-10-09 1853 }
7cc36bbddde5cd Christoph Lameter 2014-10-09 1854
0eb77e98803219 Christoph Lameter 2016-01-14 1855 /*
0eb77e98803219 Christoph Lameter 2016-01-14 1856 * Switch off vmstat processing
and then fold all the remaining differentials
0eb77e98803219 Christoph Lameter 2016-01-14 1857 * until the diffs stay at
zero. The function is used by NOHZ and can only be
0eb77e98803219 Christoph Lameter 2016-01-14 1858 * invoked when tick processing
is not active.
0eb77e98803219 Christoph Lameter 2016-01-14 1859 */
7cc36bbddde5cd Christoph Lameter 2014-10-09 1860 /*
7cc36bbddde5cd Christoph Lameter 2014-10-09 1861 * Check if the diffs for a
certain cpu indicate that
7cc36bbddde5cd Christoph Lameter 2014-10-09 1862 * an update is needed.
7cc36bbddde5cd Christoph Lameter 2014-10-09 1863 */
7cc36bbddde5cd Christoph Lameter 2014-10-09 @1864 static bool need_update(int
cpu)
7cc36bbddde5cd Christoph Lameter 2014-10-09 1865 {
2bbd00aef0671b Johannes Weiner 2021-02-25 1866 pg_data_t *last_pgdat = NULL;
7cc36bbddde5cd Christoph Lameter 2014-10-09 1867 struct zone *zone;
7cc36bbddde5cd Christoph Lameter 2014-10-09 1868
7cc36bbddde5cd Christoph Lameter 2014-10-09 1869 for_each_populated_zone(zone)
{
6d2c7ce5a47d4e Mel Gorman 2021-01-19 1870 struct per_cpu_zonestat
*pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
2bbd00aef0671b Johannes Weiner 2021-02-25 1871 struct per_cpu_nodestat *n;
6d2c7ce5a47d4e Mel Gorman 2021-01-19 1872
7cc36bbddde5cd Christoph Lameter 2014-10-09 1873 /*
7cc36bbddde5cd Christoph Lameter 2014-10-09 1874 * The fast way of checking
if there are any vmstat diffs.
7cc36bbddde5cd Christoph Lameter 2014-10-09 1875 */
6d2c7ce5a47d4e Mel Gorman 2021-01-19 1876 if
(memchr_inv(pzstats->vm_stat_diff, 0, NR_VM_ZONE_STAT_ITEMS *
6d2c7ce5a47d4e Mel Gorman 2021-01-19 1877
sizeof(pzstats->vm_stat_diff[0])))
7cc36bbddde5cd Christoph Lameter 2014-10-09 1878 return true;
c1617f38ab5c84 Mel Gorman 2021-02-24 1879
2bbd00aef0671b Johannes Weiner 2021-02-25 1880 if (last_pgdat ==
zone->zone_pgdat)
2bbd00aef0671b Johannes Weiner 2021-02-25 1881 continue;
2bbd00aef0671b Johannes Weiner 2021-02-25 1882 last_pgdat =
zone->zone_pgdat;
2bbd00aef0671b Johannes Weiner 2021-02-25 1883 n =
per_cpu_ptr(zone->zone_pgdat->per_cpu_nodestats, cpu);
2bbd00aef0671b Johannes Weiner 2021-02-25 1884 if
(memchr_inv(n->vm_node_stat_diff, 0, NR_VM_NODE_STAT_ITEMS *
2bbd00aef0671b Johannes Weiner 2021-02-25 1885
sizeof(n->vm_node_stat_diff[0])))
2bbd00aef0671b Johannes Weiner 2021-02-25 1886 return true;
7cc36bbddde5cd Christoph Lameter 2014-10-09 1887 }
7cc36bbddde5cd Christoph Lameter 2014-10-09 1888 return false;
7cc36bbddde5cd Christoph Lameter 2014-10-09 1889 }
7cc36bbddde5cd Christoph Lameter 2014-10-09 1890
7b8da4c7f07774 Christoph Lameter 2016-05-20 1891 /*
7b8da4c7f07774 Christoph Lameter 2016-05-20 1892 * Switch off vmstat processing
and then fold all the remaining differentials
7b8da4c7f07774 Christoph Lameter 2016-05-20 1893 * until the diffs stay at
zero. The function is used by NOHZ and can only be
7b8da4c7f07774 Christoph Lameter 2016-05-20 1894 * invoked when tick processing
is not active.
7b8da4c7f07774 Christoph Lameter 2016-05-20 1895 */
f01f17d3705bb6 Michal Hocko 2016-02-05 1896 void quiet_vmstat(void)
f01f17d3705bb6 Michal Hocko 2016-02-05 1897 {
f01f17d3705bb6 Michal Hocko 2016-02-05 1898 if (system_state !=
SYSTEM_RUNNING)
f01f17d3705bb6 Michal Hocko 2016-02-05 1899 return;
f01f17d3705bb6 Michal Hocko 2016-02-05 1900
7b8da4c7f07774 Christoph Lameter 2016-05-20 1901 if
(!delayed_work_pending(this_cpu_ptr(&vmstat_work)))
f01f17d3705bb6 Michal Hocko 2016-02-05 1902 return;
f01f17d3705bb6 Michal Hocko 2016-02-05 1903
f01f17d3705bb6 Michal Hocko 2016-02-05 1904 if
(!need_update(smp_processor_id()))
f01f17d3705bb6 Michal Hocko 2016-02-05 1905 return;
f01f17d3705bb6 Michal Hocko 2016-02-05 1906
f01f17d3705bb6 Michal Hocko 2016-02-05 1907 /*
f01f17d3705bb6 Michal Hocko 2016-02-05 1908 * Just refresh counters and
do not care about the pending delayed
f01f17d3705bb6 Michal Hocko 2016-02-05 1909 * vmstat_update. It
doesn't fire that often to matter and canceling
f01f17d3705bb6 Michal Hocko 2016-02-05 1910 * it would be too expensive
from this path.
f01f17d3705bb6 Michal Hocko 2016-02-05 1911 * vmstat_shepherd will take
care about that for us.
f01f17d3705bb6 Michal Hocko 2016-02-05 1912 */
f01f17d3705bb6 Michal Hocko 2016-02-05 1913 refresh_cpu_vm_stats(false);
f01f17d3705bb6 Michal Hocko 2016-02-05 1914 }
f01f17d3705bb6 Michal Hocko 2016-02-05 1915
7cc36bbddde5cd Christoph Lameter 2014-10-09 1916 /*
7cc36bbddde5cd Christoph Lameter 2014-10-09 1917 * Shepherd worker thread that
checks the
7cc36bbddde5cd Christoph Lameter 2014-10-09 1918 * differentials of processors
that have their worker
7cc36bbddde5cd Christoph Lameter 2014-10-09 1919 * threads for vm statistics
updates disabled because of
7cc36bbddde5cd Christoph Lameter 2014-10-09 1920 * inactivity.
7cc36bbddde5cd Christoph Lameter 2014-10-09 1921 */
7cc36bbddde5cd Christoph Lameter 2014-10-09 @1922 static void
vmstat_shepherd(struct work_struct *w);
7cc36bbddde5cd Christoph Lameter 2014-10-09 1923
0eb77e98803219 Christoph Lameter 2016-01-14 @1924 static
DECLARE_DEFERRABLE_WORK(shepherd, vmstat_shepherd);
7cc36bbddde5cd Christoph Lameter 2014-10-09 1925
7cc36bbddde5cd Christoph Lameter 2014-10-09 1926 static void
vmstat_shepherd(struct work_struct *w)
7cc36bbddde5cd Christoph Lameter 2014-10-09 1927 {
7cc36bbddde5cd Christoph Lameter 2014-10-09 1928 int cpu;
7cc36bbddde5cd Christoph Lameter 2014-10-09 1929
7cc36bbddde5cd Christoph Lameter 2014-10-09 1930 get_online_cpus();
7cc36bbddde5cd Christoph Lameter 2014-10-09 1931 /* Check processors whose
vmstat worker threads have been disabled */
7b8da4c7f07774 Christoph Lameter 2016-05-20 1932 for_each_online_cpu(cpu) {
f01f17d3705bb6 Michal Hocko 2016-02-05 1933 struct delayed_work *dw =
&per_cpu(vmstat_work, cpu);
7cc36bbddde5cd Christoph Lameter 2014-10-09 1934
7b8da4c7f07774 Christoph Lameter 2016-05-20 1935 if (!delayed_work_pending(dw)
&& need_update(cpu))
ce612879ddc78e Michal Hocko 2017-04-07 1936 queue_delayed_work_on(cpu,
mm_percpu_wq, dw, 0);
fbcc8183a4f815 Jiang Biao 2021-02-25 1937
fbcc8183a4f815 Jiang Biao 2021-02-25 1938 cond_resched();
f01f17d3705bb6 Michal Hocko 2016-02-05 1939 }
7cc36bbddde5cd Christoph Lameter 2014-10-09 1940 put_online_cpus();
7cc36bbddde5cd Christoph Lameter 2014-10-09 1941
7cc36bbddde5cd Christoph Lameter 2014-10-09 1942
schedule_delayed_work(&shepherd,
7cc36bbddde5cd Christoph Lameter 2014-10-09 1943
round_jiffies_relative(sysctl_stat_interval));
d1187ed21026fd Christoph Lameter 2007-05-09 1944 }
d1187ed21026fd Christoph Lameter 2007-05-09 1945
7cc36bbddde5cd Christoph Lameter 2014-10-09 @1946 static void __init
start_shepherd_timer(void)
d1187ed21026fd Christoph Lameter 2007-05-09 1947 {
7cc36bbddde5cd Christoph Lameter 2014-10-09 1948 int cpu;
7cc36bbddde5cd Christoph Lameter 2014-10-09 1949
7cc36bbddde5cd Christoph Lameter 2014-10-09 1950 for_each_possible_cpu(cpu)
ccde8bd4014eb2 Michal Hocko 2016-02-05 1951
INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
7cc36bbddde5cd Christoph Lameter 2014-10-09 1952 vmstat_update);
7cc36bbddde5cd Christoph Lameter 2014-10-09 1953
7cc36bbddde5cd Christoph Lameter 2014-10-09 1954
schedule_delayed_work(&shepherd,
7cc36bbddde5cd Christoph Lameter 2014-10-09 1955
round_jiffies_relative(sysctl_stat_interval));
d1187ed21026fd Christoph Lameter 2007-05-09 1956 }
d1187ed21026fd Christoph Lameter 2007-05-09 1957
03e86dba5b628a Tim Chen 2016-10-07 @1958 static void __init
init_cpu_node_state(void)
03e86dba5b628a Tim Chen 2016-10-07 1959 {
4c501327b4c67f Sebastian Andrzej Siewior 2016-11-29 1960 int node;
03e86dba5b628a Tim Chen 2016-10-07 1961
4c501327b4c67f Sebastian Andrzej Siewior 2016-11-29 1962 for_each_online_node(node) {
4c501327b4c67f Sebastian Andrzej Siewior 2016-11-29 1963 if
(cpumask_weight(cpumask_of_node(node)) > 0)
4c501327b4c67f Sebastian Andrzej Siewior 2016-11-29 1964 node_set_state(node,
N_CPU);
4c501327b4c67f Sebastian Andrzej Siewior 2016-11-29 1965 }
03e86dba5b628a Tim Chen 2016-10-07 1966 }
03e86dba5b628a Tim Chen 2016-10-07 1967
5438da977f83c9 Sebastian Andrzej Siewior 2016-11-29 @1968 static int
vmstat_cpu_online(unsigned int cpu)
807a1bd2b2a388 Toshi Kani 2013-11-12 1969 {
5438da977f83c9 Sebastian Andrzej Siewior 2016-11-29 1970
refresh_zone_stat_thresholds();
5438da977f83c9 Sebastian Andrzej Siewior 2016-11-29 1971
node_set_state(cpu_to_node(cpu), N_CPU);
5438da977f83c9 Sebastian Andrzej Siewior 2016-11-29 1972 return 0;
5438da977f83c9 Sebastian Andrzej Siewior 2016-11-29 1973 }
807a1bd2b2a388 Toshi Kani 2013-11-12 1974
5438da977f83c9 Sebastian Andrzej Siewior 2016-11-29 @1975 static int
vmstat_cpu_down_prep(unsigned int cpu)
5438da977f83c9 Sebastian Andrzej Siewior 2016-11-29 1976 {
5438da977f83c9 Sebastian Andrzej Siewior 2016-11-29 1977
cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
5438da977f83c9 Sebastian Andrzej Siewior 2016-11-29 1978 return 0;
807a1bd2b2a388 Toshi Kani 2013-11-12 1979 }
807a1bd2b2a388 Toshi Kani 2013-11-12 1980
5438da977f83c9 Sebastian Andrzej Siewior 2016-11-29 @1981 static int
vmstat_cpu_dead(unsigned int cpu)
df9ecaba3f152d Christoph Lameter 2006-08-31 1982 {
5438da977f83c9 Sebastian Andrzej Siewior 2016-11-29 1983 const struct cpumask
*node_cpus;
5438da977f83c9 Sebastian Andrzej Siewior 2016-11-29 1984 int node;
5438da977f83c9 Sebastian Andrzej Siewior 2016-11-29 1985
5438da977f83c9 Sebastian Andrzej Siewior 2016-11-29 1986 node = cpu_to_node(cpu);
d1187ed21026fd Christoph Lameter 2007-05-09 1987
df9ecaba3f152d Christoph Lameter 2006-08-31 1988
refresh_zone_stat_thresholds();
5438da977f83c9 Sebastian Andrzej Siewior 2016-11-29 1989 node_cpus =
cpumask_of_node(node);
5438da977f83c9 Sebastian Andrzej Siewior 2016-11-29 1990 if (cpumask_weight(node_cpus)
> 0)
5438da977f83c9 Sebastian Andrzej Siewior 2016-11-29 1991 return 0;
5438da977f83c9 Sebastian Andrzej Siewior 2016-11-29 1992
5438da977f83c9 Sebastian Andrzej Siewior 2016-11-29 1993 node_clear_state(node,
N_CPU);
5438da977f83c9 Sebastian Andrzej Siewior 2016-11-29 1994 return 0;
df9ecaba3f152d Christoph Lameter 2006-08-31 1995 }
df9ecaba3f152d Christoph Lameter 2006-08-31 1996
:::::: The code at line 1793 was first introduced by commit
:::::: 77461ab33229d48614402decfb1b2eaa6d446861 Make vm statistics update interval
configurable
:::::: TO: Christoph Lameter <clameter(a)sgi.com>
:::::: CC: Linus Torvalds <torvalds(a)woody.linux-foundation.org>
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org