1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   *  linux/fs/ext2/dir.c
4   *
5   * Copyright (C) 1992, 1993, 1994, 1995
6   * Remy Card (card@masi.ibp.fr)
7   * Laboratoire MASI - Institut Blaise Pascal
8   * Universite Pierre et Marie Curie (Paris VI)
9   *
10   *  from
11   *
12   *  linux/fs/minix/dir.c
13   *
14   *  Copyright (C) 1991, 1992  Linus Torvalds
15   *
16   *  ext2 directory handling functions
17   *
18   *  Big-endian to little-endian byte-swapping/bitmaps by
19   *        David S. Miller (davem@caip.rutgers.edu), 1995
20   *
21   * All code that works with directory layout had been switched to pagecache
22   * and moved here. AV
23   */
24  
25  #include "ext2.h"
26  #include <linux/buffer_head.h>
27  #include <linux/pagemap.h>
28  #include <linux/swap.h>
29  #include <linux/iversion.h>
30  
31  typedef struct ext2_dir_entry_2 ext2_dirent;
32  
33  /*
34   * Tests against MAX_REC_LEN etc were put in place for 64k block
35   * sizes; if that is not possible on this arch, we can skip
36   * those tests and speed things up.
37   */
ext2_rec_len_from_disk(__le16 dlen)38  static inline unsigned ext2_rec_len_from_disk(__le16 dlen)
39  {
40  	unsigned len = le16_to_cpu(dlen);
41  
42  #if (PAGE_SIZE >= 65536)
43  	if (len == EXT2_MAX_REC_LEN)
44  		return 1 << 16;
45  #endif
46  	return len;
47  }
48  
ext2_rec_len_to_disk(unsigned len)49  static inline __le16 ext2_rec_len_to_disk(unsigned len)
50  {
51  #if (PAGE_SIZE >= 65536)
52  	if (len == (1 << 16))
53  		return cpu_to_le16(EXT2_MAX_REC_LEN);
54  	else
55  		BUG_ON(len > (1 << 16));
56  #endif
57  	return cpu_to_le16(len);
58  }
59  
60  /*
61   * ext2 uses block-sized chunks. Arguably, sector-sized ones would be
62   * more robust, but we have what we have
63   */
ext2_chunk_size(struct inode * inode)64  static inline unsigned ext2_chunk_size(struct inode *inode)
65  {
66  	return inode->i_sb->s_blocksize;
67  }
68  
69  /*
70   * Return the offset into page `page_nr' of the last valid
71   * byte in that page, plus one.
72   */
73  static unsigned
ext2_last_byte(struct inode * inode,unsigned long page_nr)74  ext2_last_byte(struct inode *inode, unsigned long page_nr)
75  {
76  	unsigned last_byte = inode->i_size;
77  
78  	last_byte -= page_nr << PAGE_SHIFT;
79  	if (last_byte > PAGE_SIZE)
80  		last_byte = PAGE_SIZE;
81  	return last_byte;
82  }
83  
ext2_commit_chunk(struct folio * folio,loff_t pos,unsigned len)84  static void ext2_commit_chunk(struct folio *folio, loff_t pos, unsigned len)
85  {
86  	struct address_space *mapping = folio->mapping;
87  	struct inode *dir = mapping->host;
88  
89  	inode_inc_iversion(dir);
90  	block_write_end(NULL, mapping, pos, len, len, folio, NULL);
91  
92  	if (pos+len > dir->i_size) {
93  		i_size_write(dir, pos+len);
94  		mark_inode_dirty(dir);
95  	}
96  	folio_unlock(folio);
97  }
98  
ext2_check_folio(struct folio * folio,int quiet,char * kaddr)99  static bool ext2_check_folio(struct folio *folio, int quiet, char *kaddr)
100  {
101  	struct inode *dir = folio->mapping->host;
102  	struct super_block *sb = dir->i_sb;
103  	unsigned chunk_size = ext2_chunk_size(dir);
104  	u32 max_inumber = le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count);
105  	unsigned offs, rec_len;
106  	unsigned limit = folio_size(folio);
107  	ext2_dirent *p;
108  	char *error;
109  
110  	if (dir->i_size < folio_pos(folio) + limit) {
111  		limit = offset_in_folio(folio, dir->i_size);
112  		if (limit & (chunk_size - 1))
113  			goto Ebadsize;
114  		if (!limit)
115  			goto out;
116  	}
117  	for (offs = 0; offs <= limit - EXT2_DIR_REC_LEN(1); offs += rec_len) {
118  		p = (ext2_dirent *)(kaddr + offs);
119  		rec_len = ext2_rec_len_from_disk(p->rec_len);
120  
121  		if (unlikely(rec_len < EXT2_DIR_REC_LEN(1)))
122  			goto Eshort;
123  		if (unlikely(rec_len & 3))
124  			goto Ealign;
125  		if (unlikely(rec_len < EXT2_DIR_REC_LEN(p->name_len)))
126  			goto Enamelen;
127  		if (unlikely(((offs + rec_len - 1) ^ offs) & ~(chunk_size-1)))
128  			goto Espan;
129  		if (unlikely(le32_to_cpu(p->inode) > max_inumber))
130  			goto Einumber;
131  	}
132  	if (offs != limit)
133  		goto Eend;
134  out:
135  	folio_set_checked(folio);
136  	return true;
137  
138  	/* Too bad, we had an error */
139  
140  Ebadsize:
141  	if (!quiet)
142  		ext2_error(sb, __func__,
143  			"size of directory #%lu is not a multiple "
144  			"of chunk size", dir->i_ino);
145  	goto fail;
146  Eshort:
147  	error = "rec_len is smaller than minimal";
148  	goto bad_entry;
149  Ealign:
150  	error = "unaligned directory entry";
151  	goto bad_entry;
152  Enamelen:
153  	error = "rec_len is too small for name_len";
154  	goto bad_entry;
155  Espan:
156  	error = "directory entry across blocks";
157  	goto bad_entry;
158  Einumber:
159  	error = "inode out of bounds";
160  bad_entry:
161  	if (!quiet)
162  		ext2_error(sb, __func__, "bad entry in directory #%lu: : %s - "
163  			"offset=%llu, inode=%lu, rec_len=%d, name_len=%d",
164  			dir->i_ino, error, folio_pos(folio) + offs,
165  			(unsigned long) le32_to_cpu(p->inode),
166  			rec_len, p->name_len);
167  	goto fail;
168  Eend:
169  	if (!quiet) {
170  		p = (ext2_dirent *)(kaddr + offs);
171  		ext2_error(sb, "ext2_check_folio",
172  			"entry in directory #%lu spans the page boundary"
173  			"offset=%llu, inode=%lu",
174  			dir->i_ino, folio_pos(folio) + offs,
175  			(unsigned long) le32_to_cpu(p->inode));
176  	}
177  fail:
178  	return false;
179  }
180  
181  /*
182   * Calls to ext2_get_folio()/folio_release_kmap() must be nested according
183   * to the rules documented in kmap_local_folio()/kunmap_local().
184   *
185   * NOTE: ext2_find_entry() and ext2_dotdot() act as a call
186   * to folio_release_kmap() and should be treated as a call to
187   * folio_release_kmap() for nesting purposes.
188   */
ext2_get_folio(struct inode * dir,unsigned long n,int quiet,struct folio ** foliop)189  static void *ext2_get_folio(struct inode *dir, unsigned long n,
190  				   int quiet, struct folio **foliop)
191  {
192  	struct address_space *mapping = dir->i_mapping;
193  	struct folio *folio = read_mapping_folio(mapping, n, NULL);
194  	void *kaddr;
195  
196  	if (IS_ERR(folio))
197  		return ERR_CAST(folio);
198  	kaddr = kmap_local_folio(folio, 0);
199  	if (unlikely(!folio_test_checked(folio))) {
200  		if (!ext2_check_folio(folio, quiet, kaddr))
201  			goto fail;
202  	}
203  	*foliop = folio;
204  	return kaddr;
205  
206  fail:
207  	folio_release_kmap(folio, kaddr);
208  	return ERR_PTR(-EIO);
209  }
210  
211  /*
212   * NOTE! unlike strncmp, ext2_match returns 1 for success, 0 for failure.
213   *
214   * len <= EXT2_NAME_LEN and de != NULL are guaranteed by caller.
215   */
ext2_match(int len,const char * const name,struct ext2_dir_entry_2 * de)216  static inline int ext2_match (int len, const char * const name,
217  					struct ext2_dir_entry_2 * de)
218  {
219  	if (len != de->name_len)
220  		return 0;
221  	if (!de->inode)
222  		return 0;
223  	return !memcmp(name, de->name, len);
224  }
225  
226  /*
227   * p is at least 6 bytes before the end of page
228   */
ext2_next_entry(ext2_dirent * p)229  static inline ext2_dirent *ext2_next_entry(ext2_dirent *p)
230  {
231  	return (ext2_dirent *)((char *)p +
232  			ext2_rec_len_from_disk(p->rec_len));
233  }
234  
235  static inline unsigned
ext2_validate_entry(char * base,unsigned offset,unsigned mask)236  ext2_validate_entry(char *base, unsigned offset, unsigned mask)
237  {
238  	ext2_dirent *de = (ext2_dirent*)(base + offset);
239  	ext2_dirent *p = (ext2_dirent*)(base + (offset&mask));
240  	while ((char*)p < (char*)de) {
241  		if (p->rec_len == 0)
242  			break;
243  		p = ext2_next_entry(p);
244  	}
245  	return offset_in_page(p);
246  }
247  
ext2_set_de_type(ext2_dirent * de,struct inode * inode)248  static inline void ext2_set_de_type(ext2_dirent *de, struct inode *inode)
249  {
250  	if (EXT2_HAS_INCOMPAT_FEATURE(inode->i_sb, EXT2_FEATURE_INCOMPAT_FILETYPE))
251  		de->file_type = fs_umode_to_ftype(inode->i_mode);
252  	else
253  		de->file_type = 0;
254  }
255  
256  static int
ext2_readdir(struct file * file,struct dir_context * ctx)257  ext2_readdir(struct file *file, struct dir_context *ctx)
258  {
259  	loff_t pos = ctx->pos;
260  	struct inode *inode = file_inode(file);
261  	struct super_block *sb = inode->i_sb;
262  	unsigned int offset = pos & ~PAGE_MASK;
263  	unsigned long n = pos >> PAGE_SHIFT;
264  	unsigned long npages = dir_pages(inode);
265  	unsigned chunk_mask = ~(ext2_chunk_size(inode)-1);
266  	bool need_revalidate = !inode_eq_iversion(inode, *(u64 *)file->private_data);
267  	bool has_filetype;
268  
269  	if (pos > inode->i_size - EXT2_DIR_REC_LEN(1))
270  		return 0;
271  
272  	has_filetype =
273  		EXT2_HAS_INCOMPAT_FEATURE(sb, EXT2_FEATURE_INCOMPAT_FILETYPE);
274  
275  	for ( ; n < npages; n++, offset = 0) {
276  		ext2_dirent *de;
277  		struct folio *folio;
278  		char *kaddr = ext2_get_folio(inode, n, 0, &folio);
279  		char *limit;
280  
281  		if (IS_ERR(kaddr)) {
282  			ext2_error(sb, __func__,
283  				   "bad page in #%lu",
284  				   inode->i_ino);
285  			ctx->pos += PAGE_SIZE - offset;
286  			return PTR_ERR(kaddr);
287  		}
288  		if (unlikely(need_revalidate)) {
289  			if (offset) {
290  				offset = ext2_validate_entry(kaddr, offset, chunk_mask);
291  				ctx->pos = (n<<PAGE_SHIFT) + offset;
292  			}
293  			*(u64 *)file->private_data = inode_query_iversion(inode);
294  			need_revalidate = false;
295  		}
296  		de = (ext2_dirent *)(kaddr+offset);
297  		limit = kaddr + ext2_last_byte(inode, n) - EXT2_DIR_REC_LEN(1);
298  		for ( ;(char*)de <= limit; de = ext2_next_entry(de)) {
299  			if (de->rec_len == 0) {
300  				ext2_error(sb, __func__,
301  					"zero-length directory entry");
302  				folio_release_kmap(folio, de);
303  				return -EIO;
304  			}
305  			if (de->inode) {
306  				unsigned char d_type = DT_UNKNOWN;
307  
308  				if (has_filetype)
309  					d_type = fs_ftype_to_dtype(de->file_type);
310  
311  				if (!dir_emit(ctx, de->name, de->name_len,
312  						le32_to_cpu(de->inode),
313  						d_type)) {
314  					folio_release_kmap(folio, de);
315  					return 0;
316  				}
317  			}
318  			ctx->pos += ext2_rec_len_from_disk(de->rec_len);
319  		}
320  		folio_release_kmap(folio, kaddr);
321  	}
322  	return 0;
323  }
324  
325  /*
326   *	ext2_find_entry()
327   *
328   * finds an entry in the specified directory with the wanted name. It
329   * returns the page in which the entry was found (as a parameter - res_page),
330   * and the entry itself. Page is returned mapped and unlocked.
331   * Entry is guaranteed to be valid.
332   *
333   * On Success folio_release_kmap() should be called on *foliop.
334   *
335   * NOTE: Calls to ext2_get_folio()/folio_release_kmap() must be nested
336   * according to the rules documented in kmap_local_folio()/kunmap_local().
337   *
338   * ext2_find_entry() and ext2_dotdot() act as a call to ext2_get_folio()
339   * and should be treated as a call to ext2_get_folio() for nesting
340   * purposes.
341   */
ext2_find_entry(struct inode * dir,const struct qstr * child,struct folio ** foliop)342  struct ext2_dir_entry_2 *ext2_find_entry (struct inode *dir,
343  			const struct qstr *child, struct folio **foliop)
344  {
345  	const char *name = child->name;
346  	int namelen = child->len;
347  	unsigned reclen = EXT2_DIR_REC_LEN(namelen);
348  	unsigned long start, n;
349  	unsigned long npages = dir_pages(dir);
350  	struct ext2_inode_info *ei = EXT2_I(dir);
351  	ext2_dirent * de;
352  
353  	if (npages == 0)
354  		goto out;
355  
356  	start = ei->i_dir_start_lookup;
357  	if (start >= npages)
358  		start = 0;
359  	n = start;
360  	do {
361  		char *kaddr = ext2_get_folio(dir, n, 0, foliop);
362  		if (IS_ERR(kaddr))
363  			return ERR_CAST(kaddr);
364  
365  		de = (ext2_dirent *) kaddr;
366  		kaddr += ext2_last_byte(dir, n) - reclen;
367  		while ((char *) de <= kaddr) {
368  			if (de->rec_len == 0) {
369  				ext2_error(dir->i_sb, __func__,
370  					"zero-length directory entry");
371  				folio_release_kmap(*foliop, de);
372  				goto out;
373  			}
374  			if (ext2_match(namelen, name, de))
375  				goto found;
376  			de = ext2_next_entry(de);
377  		}
378  		folio_release_kmap(*foliop, kaddr);
379  
380  		if (++n >= npages)
381  			n = 0;
382  		/* next folio is past the blocks we've got */
383  		if (unlikely(n > (dir->i_blocks >> (PAGE_SHIFT - 9)))) {
384  			ext2_error(dir->i_sb, __func__,
385  				"dir %lu size %lld exceeds block count %llu",
386  				dir->i_ino, dir->i_size,
387  				(unsigned long long)dir->i_blocks);
388  			goto out;
389  		}
390  	} while (n != start);
391  out:
392  	return ERR_PTR(-ENOENT);
393  
394  found:
395  	ei->i_dir_start_lookup = n;
396  	return de;
397  }
398  
399  /*
400   * Return the '..' directory entry and the page in which the entry was found
401   * (as a parameter - p).
402   *
403   * On Success folio_release_kmap() should be called on *foliop.
404   *
405   * NOTE: Calls to ext2_get_folio()/folio_release_kmap() must be nested
406   * according to the rules documented in kmap_local_folio()/kunmap_local().
407   *
408   * ext2_find_entry() and ext2_dotdot() act as a call to ext2_get_folio()
409   * and should be treated as a call to ext2_get_folio() for nesting
410   * purposes.
411   */
ext2_dotdot(struct inode * dir,struct folio ** foliop)412  struct ext2_dir_entry_2 *ext2_dotdot(struct inode *dir, struct folio **foliop)
413  {
414  	ext2_dirent *de = ext2_get_folio(dir, 0, 0, foliop);
415  
416  	if (!IS_ERR(de))
417  		return ext2_next_entry(de);
418  	return NULL;
419  }
420  
ext2_inode_by_name(struct inode * dir,const struct qstr * child,ino_t * ino)421  int ext2_inode_by_name(struct inode *dir, const struct qstr *child, ino_t *ino)
422  {
423  	struct ext2_dir_entry_2 *de;
424  	struct folio *folio;
425  
426  	de = ext2_find_entry(dir, child, &folio);
427  	if (IS_ERR(de))
428  		return PTR_ERR(de);
429  
430  	*ino = le32_to_cpu(de->inode);
431  	folio_release_kmap(folio, de);
432  	return 0;
433  }
434  
ext2_prepare_chunk(struct folio * folio,loff_t pos,unsigned len)435  static int ext2_prepare_chunk(struct folio *folio, loff_t pos, unsigned len)
436  {
437  	return __block_write_begin(folio, pos, len, ext2_get_block);
438  }
439  
ext2_handle_dirsync(struct inode * dir)440  static int ext2_handle_dirsync(struct inode *dir)
441  {
442  	int err;
443  
444  	err = filemap_write_and_wait(dir->i_mapping);
445  	if (!err)
446  		err = sync_inode_metadata(dir, 1);
447  	return err;
448  }
449  
ext2_set_link(struct inode * dir,struct ext2_dir_entry_2 * de,struct folio * folio,struct inode * inode,bool update_times)450  int ext2_set_link(struct inode *dir, struct ext2_dir_entry_2 *de,
451  		struct folio *folio, struct inode *inode, bool update_times)
452  {
453  	loff_t pos = folio_pos(folio) + offset_in_folio(folio, de);
454  	unsigned len = ext2_rec_len_from_disk(de->rec_len);
455  	int err;
456  
457  	folio_lock(folio);
458  	err = ext2_prepare_chunk(folio, pos, len);
459  	if (err) {
460  		folio_unlock(folio);
461  		return err;
462  	}
463  	de->inode = cpu_to_le32(inode->i_ino);
464  	ext2_set_de_type(de, inode);
465  	ext2_commit_chunk(folio, pos, len);
466  	if (update_times)
467  		inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
468  	EXT2_I(dir)->i_flags &= ~EXT2_BTREE_FL;
469  	mark_inode_dirty(dir);
470  	return ext2_handle_dirsync(dir);
471  }
472  
473  /*
474   *	Parent is locked.
475   */
ext2_add_link(struct dentry * dentry,struct inode * inode)476  int ext2_add_link (struct dentry *dentry, struct inode *inode)
477  {
478  	struct inode *dir = d_inode(dentry->d_parent);
479  	const char *name = dentry->d_name.name;
480  	int namelen = dentry->d_name.len;
481  	unsigned chunk_size = ext2_chunk_size(dir);
482  	unsigned reclen = EXT2_DIR_REC_LEN(namelen);
483  	unsigned short rec_len, name_len;
484  	struct folio *folio = NULL;
485  	ext2_dirent * de;
486  	unsigned long npages = dir_pages(dir);
487  	unsigned long n;
488  	loff_t pos;
489  	int err;
490  
491  	/*
492  	 * We take care of directory expansion in the same loop.
493  	 * This code plays outside i_size, so it locks the folio
494  	 * to protect that region.
495  	 */
496  	for (n = 0; n <= npages; n++) {
497  		char *kaddr = ext2_get_folio(dir, n, 0, &folio);
498  		char *dir_end;
499  
500  		if (IS_ERR(kaddr))
501  			return PTR_ERR(kaddr);
502  		folio_lock(folio);
503  		dir_end = kaddr + ext2_last_byte(dir, n);
504  		de = (ext2_dirent *)kaddr;
505  		kaddr += folio_size(folio) - reclen;
506  		while ((char *)de <= kaddr) {
507  			if ((char *)de == dir_end) {
508  				/* We hit i_size */
509  				name_len = 0;
510  				rec_len = chunk_size;
511  				de->rec_len = ext2_rec_len_to_disk(chunk_size);
512  				de->inode = 0;
513  				goto got_it;
514  			}
515  			if (de->rec_len == 0) {
516  				ext2_error(dir->i_sb, __func__,
517  					"zero-length directory entry");
518  				err = -EIO;
519  				goto out_unlock;
520  			}
521  			err = -EEXIST;
522  			if (ext2_match (namelen, name, de))
523  				goto out_unlock;
524  			name_len = EXT2_DIR_REC_LEN(de->name_len);
525  			rec_len = ext2_rec_len_from_disk(de->rec_len);
526  			if (!de->inode && rec_len >= reclen)
527  				goto got_it;
528  			if (rec_len >= name_len + reclen)
529  				goto got_it;
530  			de = (ext2_dirent *) ((char *) de + rec_len);
531  		}
532  		folio_unlock(folio);
533  		folio_release_kmap(folio, kaddr);
534  	}
535  	BUG();
536  	return -EINVAL;
537  
538  got_it:
539  	pos = folio_pos(folio) + offset_in_folio(folio, de);
540  	err = ext2_prepare_chunk(folio, pos, rec_len);
541  	if (err)
542  		goto out_unlock;
543  	if (de->inode) {
544  		ext2_dirent *de1 = (ext2_dirent *) ((char *) de + name_len);
545  		de1->rec_len = ext2_rec_len_to_disk(rec_len - name_len);
546  		de->rec_len = ext2_rec_len_to_disk(name_len);
547  		de = de1;
548  	}
549  	de->name_len = namelen;
550  	memcpy(de->name, name, namelen);
551  	de->inode = cpu_to_le32(inode->i_ino);
552  	ext2_set_de_type (de, inode);
553  	ext2_commit_chunk(folio, pos, rec_len);
554  	inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
555  	EXT2_I(dir)->i_flags &= ~EXT2_BTREE_FL;
556  	mark_inode_dirty(dir);
557  	err = ext2_handle_dirsync(dir);
558  	/* OFFSET_CACHE */
559  out_put:
560  	folio_release_kmap(folio, de);
561  	return err;
562  out_unlock:
563  	folio_unlock(folio);
564  	goto out_put;
565  }
566  
567  /*
568   * ext2_delete_entry deletes a directory entry by merging it with the
569   * previous entry. Page is up-to-date.
570   */
ext2_delete_entry(struct ext2_dir_entry_2 * dir,struct folio * folio)571  int ext2_delete_entry(struct ext2_dir_entry_2 *dir, struct folio *folio)
572  {
573  	struct inode *inode = folio->mapping->host;
574  	size_t from, to;
575  	char *kaddr;
576  	loff_t pos;
577  	ext2_dirent *de, *pde = NULL;
578  	int err;
579  
580  	from = offset_in_folio(folio, dir);
581  	to = from + ext2_rec_len_from_disk(dir->rec_len);
582  	kaddr = (char *)dir - from;
583  	from &= ~(ext2_chunk_size(inode)-1);
584  	de = (ext2_dirent *)(kaddr + from);
585  
586  	while ((char*)de < (char*)dir) {
587  		if (de->rec_len == 0) {
588  			ext2_error(inode->i_sb, __func__,
589  				"zero-length directory entry");
590  			return -EIO;
591  		}
592  		pde = de;
593  		de = ext2_next_entry(de);
594  	}
595  	if (pde)
596  		from = offset_in_folio(folio, pde);
597  	pos = folio_pos(folio) + from;
598  	folio_lock(folio);
599  	err = ext2_prepare_chunk(folio, pos, to - from);
600  	if (err) {
601  		folio_unlock(folio);
602  		return err;
603  	}
604  	if (pde)
605  		pde->rec_len = ext2_rec_len_to_disk(to - from);
606  	dir->inode = 0;
607  	ext2_commit_chunk(folio, pos, to - from);
608  	inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
609  	EXT2_I(inode)->i_flags &= ~EXT2_BTREE_FL;
610  	mark_inode_dirty(inode);
611  	return ext2_handle_dirsync(inode);
612  }
613  
614  /*
615   * Set the first fragment of directory.
616   */
ext2_make_empty(struct inode * inode,struct inode * parent)617  int ext2_make_empty(struct inode *inode, struct inode *parent)
618  {
619  	struct folio *folio = filemap_grab_folio(inode->i_mapping, 0);
620  	unsigned chunk_size = ext2_chunk_size(inode);
621  	struct ext2_dir_entry_2 * de;
622  	int err;
623  	void *kaddr;
624  
625  	if (IS_ERR(folio))
626  		return PTR_ERR(folio);
627  
628  	err = ext2_prepare_chunk(folio, 0, chunk_size);
629  	if (err) {
630  		folio_unlock(folio);
631  		goto fail;
632  	}
633  	kaddr = kmap_local_folio(folio, 0);
634  	memset(kaddr, 0, chunk_size);
635  	de = (struct ext2_dir_entry_2 *)kaddr;
636  	de->name_len = 1;
637  	de->rec_len = ext2_rec_len_to_disk(EXT2_DIR_REC_LEN(1));
638  	memcpy (de->name, ".\0\0", 4);
639  	de->inode = cpu_to_le32(inode->i_ino);
640  	ext2_set_de_type (de, inode);
641  
642  	de = (struct ext2_dir_entry_2 *)(kaddr + EXT2_DIR_REC_LEN(1));
643  	de->name_len = 2;
644  	de->rec_len = ext2_rec_len_to_disk(chunk_size - EXT2_DIR_REC_LEN(1));
645  	de->inode = cpu_to_le32(parent->i_ino);
646  	memcpy (de->name, "..\0", 4);
647  	ext2_set_de_type (de, inode);
648  	kunmap_local(kaddr);
649  	ext2_commit_chunk(folio, 0, chunk_size);
650  	err = ext2_handle_dirsync(inode);
651  fail:
652  	folio_put(folio);
653  	return err;
654  }
655  
656  /*
657   * routine to check that the specified directory is empty (for rmdir)
658   */
ext2_empty_dir(struct inode * inode)659  int ext2_empty_dir(struct inode *inode)
660  {
661  	struct folio *folio;
662  	char *kaddr;
663  	unsigned long i, npages = dir_pages(inode);
664  
665  	for (i = 0; i < npages; i++) {
666  		ext2_dirent *de;
667  
668  		kaddr = ext2_get_folio(inode, i, 0, &folio);
669  		if (IS_ERR(kaddr))
670  			return 0;
671  
672  		de = (ext2_dirent *)kaddr;
673  		kaddr += ext2_last_byte(inode, i) - EXT2_DIR_REC_LEN(1);
674  
675  		while ((char *)de <= kaddr) {
676  			if (de->rec_len == 0) {
677  				ext2_error(inode->i_sb, __func__,
678  					"zero-length directory entry");
679  				printk("kaddr=%p, de=%p\n", kaddr, de);
680  				goto not_empty;
681  			}
682  			if (de->inode != 0) {
683  				/* check for . and .. */
684  				if (de->name[0] != '.')
685  					goto not_empty;
686  				if (de->name_len > 2)
687  					goto not_empty;
688  				if (de->name_len < 2) {
689  					if (de->inode !=
690  					    cpu_to_le32(inode->i_ino))
691  						goto not_empty;
692  				} else if (de->name[1] != '.')
693  					goto not_empty;
694  			}
695  			de = ext2_next_entry(de);
696  		}
697  		folio_release_kmap(folio, kaddr);
698  	}
699  	return 1;
700  
701  not_empty:
702  	folio_release_kmap(folio, kaddr);
703  	return 0;
704  }
705  
ext2_dir_open(struct inode * inode,struct file * file)706  static int ext2_dir_open(struct inode *inode, struct file *file)
707  {
708  	file->private_data = kzalloc(sizeof(u64), GFP_KERNEL);
709  	if (!file->private_data)
710  		return -ENOMEM;
711  	return 0;
712  }
713  
ext2_dir_release(struct inode * inode,struct file * file)714  static int ext2_dir_release(struct inode *inode, struct file *file)
715  {
716  	kfree(file->private_data);
717  	return 0;
718  }
719  
ext2_dir_llseek(struct file * file,loff_t offset,int whence)720  static loff_t ext2_dir_llseek(struct file *file, loff_t offset, int whence)
721  {
722  	return generic_llseek_cookie(file, offset, whence,
723  				     (u64 *)file->private_data);
724  }
725  
726  const struct file_operations ext2_dir_operations = {
727  	.open		= ext2_dir_open,
728  	.release	= ext2_dir_release,
729  	.llseek		= ext2_dir_llseek,
730  	.read		= generic_read_dir,
731  	.iterate_shared	= ext2_readdir,
732  	.unlocked_ioctl = ext2_ioctl,
733  #ifdef CONFIG_COMPAT
734  	.compat_ioctl	= ext2_compat_ioctl,
735  #endif
736  	.fsync		= ext2_fsync,
737  };
738