tree:
https://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs.git
fscache-iter
head: f6ac6a6db767d26c3330f0cecadd4f331ee6d291
commit: 02956137643245200e02a2683c203fed2308aece [40/55] fscache: Add read helper
config: nios2-randconfig-r022-20200909 (attached as .config)
compiler: nios2-linux-gcc (GCC) 9.3.0
reproduce (this is a W=1 build):
wget
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O
~/bin/make.cross
chmod +x ~/bin/make.cross
git checkout 02956137643245200e02a2683c203fed2308aece
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross ARCH=nios2
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp(a)intel.com>
All error/warnings (new ones prefixed by >>):
fs/fscache/read_helper.c: In function 'fscache_put_read_request':
> fs/fscache/read_helper.c:67:3: error: implicit declaration of
function 'fput'; did you mean 'iput'?
[-Werror=implicit-function-declaration]
67 | fput(rreq->file);
| ^~~~
| iput
fs/fscache/read_helper.c: At top level:
> fs/fscache/read_helper.c:202:6: warning: no previous prototype
for 'fscache_rreq_write_to_cache' [-Wmissing-prototypes]
202 | void
fscache_rreq_write_to_cache(struct fscache_read_request *rreq)
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~
cc1: some warnings being treated as errors
#
https://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs.git/com...
git remote add dhowells-fs
https://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs.git
git fetch --no-tags dhowells-fs fscache-iter
git checkout 02956137643245200e02a2683c203fed2308aece
vim +67 fs/fscache/read_helper.c
60
61 void fscache_put_read_request(struct fscache_read_request *rreq)
62 {
63 if (refcount_dec_and_test(&rreq->usage)) {
64 fscache_rreq_clear_ioreqs(rreq);
65 if (rreq->netfs_ops->cleanup)
66 rreq->netfs_ops->cleanup(rreq);
67 fput(rreq->file);
68
69 if (rreq->cookie) {
70 fscache_end_io_operation(rreq->cookie);
71 fscache_stat(&fscache_n_cop_put_object);
72 rreq->object->cache->ops->put_object(
73 rreq->object, fscache_obj_put_ioreq);
74 fscache_stat_d(&fscache_n_cop_put_object);
75 fscache_cookie_put(rreq->cookie, fscache_cookie_put_ioreq);
76 }
77 kfree(rreq);
78 }
79 }
80
81 /*
82 * Allocate and partially initialise an I/O request structure.
83 */
84 static struct fscache_io_request *fscache_alloc_io_request(
85 struct fscache_read_request *rreq)
86 {
87 struct fscache_io_request *ioreq;
88
89 ioreq = kzalloc(sizeof(struct fscache_io_request), GFP_KERNEL);
90 if (ioreq) {
91 INIT_LIST_HEAD(&ioreq->rreq_link);
92 refcount_set(&ioreq->usage, 2);
93 ioreq->rreq = rreq;
94 ioreq->io_done = fscache_ioreq_terminated;
95 }
96
97 return ioreq;
98 }
99
100 /*
101 * Clear the unread part of an I/O request.
102 */
103 static void fscache_clear_unread(struct fscache_io_request *ioreq)
104 {
105 if (ioreq->transferred < ioreq->len) {
106 struct iov_iter iter;
107
108 iov_iter_xarray(&iter, WRITE, &ioreq->rreq->mapping->i_pages,
109 ioreq->start + ioreq->transferred,
110 ioreq->len - ioreq->transferred);
111 iov_iter_zero(iov_iter_count(&iter), &iter);
112 }
113 }
114
115 /*
116 * Issue a read against the cache.
117 */
118 static void fscache_read_from_cache(struct fscache_read_request *rreq,
119 struct fscache_io_request *ioreq)
120 {
121 struct iov_iter iter;
122
123 fscache_wait_for_object(ioreq->cookie, ioreq->object, FSCACHE_WANT_READ);
124
125 iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages,
126 ioreq->start, ioreq->len);
127 ioreq->object->cache->ops->read(ioreq->object, ioreq, &iter);
128 }
129
130 /*
131 * Ask the netfs to issue a read request to the server for us.
132 *
133 * The netfs is expected to read from ioreq->pos + ioreq->transferred to
134 * ioreq->pos + ioreq->len - 1. It may not backtrack and write data into
the
135 * buffer prior to the transferred point as it might clobber dirty data
136 * obtained from the cache.
137 *
138 * Alternatively, the netfs is allowed to indicate one of two things:
139 *
140 * - FSCACHE_IO_SHORT_READ: A short read - it will get called again to try and
141 * make progress.
142 *
143 * - FSCACHE_IO_CLEAR_TAIL: A short read - the rest of the buffer will be
144 * cleared.
145 */
146 static void fscache_read_from_server(struct fscache_read_request *rreq,
147 struct fscache_io_request *ioreq)
148 {
149 rreq->netfs_ops->issue_op(ioreq);
150 }
151
152 /*
153 * Deal with the completion of writing the data to the cache. We have to clear
154 * the PG_fscache bits on the pages involved and release the caller's ref.
155 *
156 * May be called in softirq mode and we inherit a ref from the caller.
157 */
158 static void fscache_rreq_unmark_after_write(struct fscache_read_request *rreq)
159 {
160 struct fscache_io_request *ioreq;
161 struct page *page;
162 pgoff_t unlocked = 0;
163 bool have_unlocked = false;
164
165 rcu_read_lock();
166
167 list_for_each_entry(ioreq, &rreq->io_requests, rreq_link) {
168 XA_STATE(xas, &rreq->mapping->i_pages, ioreq->start / PAGE_SIZE);
169
170 xas_for_each(&xas, page, (ioreq->start + ioreq->len - 1) / PAGE_SIZE)
{
171 /* We might have multiple writes from the same huge
172 * page, but we mustn't unlock a page more than once.
173 */
174 if (have_unlocked && page->index <= unlocked)
175 continue;
176 unlocked = page->index;
177 unlock_page_fscache(page);
178 have_unlocked = true;
179 }
180 }
181
182 rcu_read_unlock();
183 fscache_rreq_clear_ioreqs(rreq);
184 fscache_put_read_request(rreq);
185 }
186
187 static void fscache_rreq_copy_done(struct fscache_io_request *ioreq)
188 {
189 struct fscache_read_request *rreq = ioreq->rreq;
190
191 /* If we decrement nr_io_ops to 0, the ref belongs to us. */
192 if (atomic_dec_and_test(&rreq->nr_io_ops))
193 fscache_rreq_unmark_after_write(rreq);
194
195 fscache_put_io_request(ioreq);
196 }
197
198 /*
199 * Perform any outstanding writes to the cache. We inherit a ref from the
200 * caller.
201 */
202 void fscache_rreq_write_to_cache(struct fscache_read_request
*rreq)
203 {
204 struct fscache_io_request *ioreq, *next, *p;
205 struct iov_iter iter;
206
207 fscache_wait_for_object(rreq->cookie, rreq->object, FSCACHE_WANT_WRITE);
208
209 /* We don't want terminating writes trying to wake us up whilst we're
210 * still going through the list.
211 */
212 atomic_inc(&rreq->nr_io_ops);
213
214 list_for_each_entry_safe(ioreq, p, &rreq->io_requests, rreq_link) {
215 if (!test_bit(FSCACHE_IO_WRITE_TO_CACHE, &ioreq->flags)) {
216 list_del_init(&ioreq->rreq_link);
217 fscache_put_io_request(ioreq);
218 }
219 }
220
221 list_for_each_entry(ioreq, &rreq->io_requests, rreq_link) {
222 /* Amalgamate adjacent writes */
223 while (!list_is_last(&ioreq->rreq_link, &rreq->io_requests)) {
224 next = list_next_entry(ioreq, rreq_link);
225 if (next->start > ioreq->start + ioreq->len)
226 break;
227 ioreq->len += next->len;
228 list_del_init(&next->rreq_link);
229 fscache_put_io_request(next);
230 }
231
232 iov_iter_xarray(&iter, WRITE, &rreq->mapping->i_pages,
233 ioreq->start, ioreq->len);
234
235 ioreq->io_done = fscache_rreq_copy_done;
236 fscache_get_io_request(ioreq);
237 rreq->object->cache->ops->write(rreq->object, ioreq, &iter);
238 }
239
240 /* If we decrement nr_io_ops to 0, the usage ref belongs to us. */
241 if (atomic_dec_and_test(&rreq->nr_io_ops))
242 fscache_rreq_unmark_after_write(rreq);
243 }
244
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org