1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
4 * Written by Alex Tomas <alex@clusterfs.com>
9 * mballoc.c contains the multiblocks allocation routines
12 #include "ext4_jbd2.h"
14 #include <linux/log2.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/nospec.h>
18 #include <linux/backing-dev.h>
19 #include <trace/events/ext4.h>
23 * - test ext4_ext_search_left() and ext4_ext_search_right()
24 * - search for metadata in few groups
27 * - normalization should take into account whether file is still open
28 * - discard preallocations if no free space left (policy?)
29 * - don't normalize tails
31 * - reservation for superuser
34 * - bitmap read-ahead (proposed by Oleg Drokin aka green)
35 * - track min/max extents in each group for better group selection
36 * - mb_mark_used() may allocate chunk right after splitting buddy
37 * - tree of groups sorted by number of free blocks
42 * The allocation request involve request for multiple number of blocks
43 * near to the goal(block) value specified.
45 * During initialization phase of the allocator we decide to use the
46 * group preallocation or inode preallocation depending on the size of
47 * the file. The size of the file could be the resulting file size we
48 * would have after allocation, or the current file size, which ever
49 * is larger. If the size is less than sbi->s_mb_stream_request we
50 * select to use the group preallocation. The default value of
51 * s_mb_stream_request is 16 blocks. This can also be tuned via
52 * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in
53 * terms of number of blocks.
55 * The main motivation for having small file use group preallocation is to
56 * ensure that we have small files closer together on the disk.
58 * First stage the allocator looks at the inode prealloc list,
59 * ext4_inode_info->i_prealloc_list, which contains list of prealloc
60 * spaces for this particular inode. The inode prealloc space is
63 * pa_lstart -> the logical start block for this prealloc space
64 * pa_pstart -> the physical start block for this prealloc space
65 * pa_len -> length for this prealloc space (in clusters)
66 * pa_free -> free space available in this prealloc space (in clusters)
68 * The inode preallocation space is used looking at the _logical_ start
69 * block. If only the logical file block falls within the range of prealloc
70 * space we will consume the particular prealloc space. This makes sure that
71 * we have contiguous physical blocks representing the file blocks
73 * The important thing to be noted in case of inode prealloc space is that
74 * we don't modify the values associated to inode prealloc space except
77 * If we are not able to find blocks in the inode prealloc space and if we
78 * have the group allocation flag set then we look at the locality group
79 * prealloc space. These are per CPU prealloc list represented as
81 * ext4_sb_info.s_locality_groups[smp_processor_id()]
83 * The reason for having a per cpu locality group is to reduce the contention
84 * between CPUs. It is possible to get scheduled at this point.
86 * The locality group prealloc space is used looking at whether we have
87 * enough free space (pa_free) within the prealloc space.
89 * If we can't allocate blocks via inode prealloc or/and locality group
90 * prealloc then we look at the buddy cache. The buddy cache is represented
91 * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets
92 * mapped to the buddy and bitmap information regarding different
93 * groups. The buddy information is attached to buddy cache inode so that
94 * we can access them through the page cache. The information regarding
95 * each group is loaded via ext4_mb_load_buddy. The information involve
96 * block bitmap and buddy information. The information are stored in the
100 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
103 * one block each for bitmap and buddy information. So for each group we
104 * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE /
105 * blocksize) blocks. So it can have information regarding groups_per_page
106 * which is blocks_per_page/2
108 * The buddy cache inode is not stored on disk. The inode is thrown
109 * away when the filesystem is unmounted.
111 * We look for count number of blocks in the buddy cache. If we were able
112 * to locate that many free blocks we return with additional information
113 * regarding rest of the contiguous physical block available
115 * Before allocating blocks via buddy cache we normalize the request
116 * blocks. This ensure we ask for more blocks that we needed. The extra
117 * blocks that we get after allocation is added to the respective prealloc
118 * list. In case of inode preallocation we follow a list of heuristics
119 * based on file size. This can be found in ext4_mb_normalize_request. If
120 * we are doing a group prealloc we try to normalize the request to
121 * sbi->s_mb_group_prealloc. The default value of s_mb_group_prealloc is
122 * dependent on the cluster size; for non-bigalloc file systems, it is
123 * 512 blocks. This can be tuned via
124 * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in
125 * terms of number of blocks. If we have mounted the file system with -O
126 * stripe=<value> option the group prealloc request is normalized to the
127 * smallest multiple of the stripe value (sbi->s_stripe) which is
128 * greater than the default mb_group_prealloc.
130 * If "mb_optimize_scan" mount option is set, we maintain in memory group info
131 * structures in two data structures:
133 * 1) Array of largest free order lists (sbi->s_mb_largest_free_orders)
135 * Locking: sbi->s_mb_largest_free_orders_locks(array of rw locks)
137 * This is an array of lists where the index in the array represents the
138 * largest free order in the buddy bitmap of the participating group infos of
139 * that list. So, there are exactly MB_NUM_ORDERS(sb) (which means total
140 * number of buddy bitmap orders possible) number of lists. Group-infos are
141 * placed in appropriate lists.
143 * 2) Average fragment size lists (sbi->s_mb_avg_fragment_size)
145 * Locking: sbi->s_mb_avg_fragment_size_locks(array of rw locks)
147 * This is an array of lists where in the i-th list there are groups with
148 * average fragment size >= 2^i and < 2^(i+1). The average fragment size
149 * is computed as ext4_group_info->bb_free / ext4_group_info->bb_fragments.
150 * Note that we don't bother with a special list for completely empty groups
151 * so we only have MB_NUM_ORDERS(sb) lists.
153 * When "mb_optimize_scan" mount option is set, mballoc consults the above data
154 * structures to decide the order in which groups are to be traversed for
155 * fulfilling an allocation request.
157 * At CR = 0, we look for groups which have the largest_free_order >= the order
158 * of the request. We directly look at the largest free order list in the data
159 * structure (1) above where largest_free_order = order of the request. If that
160 * list is empty, we look at remaining list in the increasing order of
161 * largest_free_order. This allows us to perform CR = 0 lookup in O(1) time.
163 * At CR = 1, we only consider groups where average fragment size > request
164 * size. So, we lookup a group which has average fragment size just above or
165 * equal to request size using our average fragment size group lists (data
166 * structure 2) in O(1) time.
168 * If "mb_optimize_scan" mount option is not set, mballoc traverses groups in
169 * linear order which requires O(N) search time for each CR 0 and CR 1 phase.
171 * The regular allocator (using the buddy cache) supports a few tunables.
173 * /sys/fs/ext4/<partition>/mb_min_to_scan
174 * /sys/fs/ext4/<partition>/mb_max_to_scan
175 * /sys/fs/ext4/<partition>/mb_order2_req
176 * /sys/fs/ext4/<partition>/mb_linear_limit
178 * The regular allocator uses buddy scan only if the request len is power of
179 * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
180 * value of s_mb_order2_reqs can be tuned via
181 * /sys/fs/ext4/<partition>/mb_order2_req. If the request len is equal to
182 * stripe size (sbi->s_stripe), we try to search for contiguous block in
183 * stripe size. This should result in better allocation on RAID setups. If
184 * not, we search in the specific group using bitmap for best extents. The
185 * tunable min_to_scan and max_to_scan control the behaviour here.
186 * min_to_scan indicate how long the mballoc __must__ look for a best
187 * extent and max_to_scan indicates how long the mballoc __can__ look for a
188 * best extent in the found extents. Searching for the blocks starts with
189 * the group specified as the goal value in allocation context via
190 * ac_g_ex. Each group is first checked based on the criteria whether it
191 * can be used for allocation. ext4_mb_good_group explains how the groups are
194 * When "mb_optimize_scan" is turned on, as mentioned above, the groups may not
195 * get traversed linearly. That may result in subsequent allocations being not
196 * close to each other. And so, the underlying device may get filled up in a
197 * non-linear fashion. While that may not matter on non-rotational devices, for
198 * rotational devices that may result in higher seek times. "mb_linear_limit"
199 * tells mballoc how many groups mballoc should search linearly before
200 * performing consulting above data structures for more efficient lookups. For
201 * non rotational devices, this value defaults to 0 and for rotational devices
202 * this is set to MB_DEFAULT_LINEAR_LIMIT.
204 * Both the prealloc space are getting populated as above. So for the first
205 * request we will hit the buddy cache which will result in this prealloc
206 * space getting filled. The prealloc space is then later used for the
207 * subsequent request.
211 * mballoc operates on the following data:
213 * - in-core buddy (actually includes buddy and bitmap)
214 * - preallocation descriptors (PAs)
216 * there are two types of preallocations:
218 * assiged to specific inode and can be used for this inode only.
219 * it describes part of inode's space preallocated to specific
220 * physical blocks. any block from that preallocated can be used
221 * independent. the descriptor just tracks number of blocks left
222 * unused. so, before taking some block from descriptor, one must
223 * make sure corresponded logical block isn't allocated yet. this
224 * also means that freeing any block within descriptor's range
225 * must discard all preallocated blocks.
227 * assigned to specific locality group which does not translate to
228 * permanent set of inodes: inode can join and leave group. space
229 * from this type of preallocation can be used for any inode. thus
230 * it's consumed from the beginning to the end.
232 * relation between them can be expressed as:
233 * in-core buddy = on-disk bitmap + preallocation descriptors
235 * this mean blocks mballoc considers used are:
236 * - allocated blocks (persistent)
237 * - preallocated blocks (non-persistent)
239 * consistency in mballoc world means that at any time a block is either
240 * free or used in ALL structures. notice: "any time" should not be read
241 * literally -- time is discrete and delimited by locks.
243 * to keep it simple, we don't use block numbers, instead we count number of
244 * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
246 * all operations can be expressed as:
247 * - init buddy: buddy = on-disk + PAs
248 * - new PA: buddy += N; PA = N
249 * - use inode PA: on-disk += N; PA -= N
250 * - discard inode PA buddy -= on-disk - PA; PA = 0
251 * - use locality group PA on-disk += N; PA -= N
252 * - discard locality group PA buddy -= PA; PA = 0
253 * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
254 * is used in real operation because we can't know actual used
255 * bits from PA, only from on-disk bitmap
257 * if we follow this strict logic, then all operations above should be atomic.
258 * given some of them can block, we'd have to use something like semaphores
259 * killing performance on high-end SMP hardware. let's try to relax it using
260 * the following knowledge:
261 * 1) if buddy is referenced, it's already initialized
262 * 2) while block is used in buddy and the buddy is referenced,
263 * nobody can re-allocate that block
264 * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
265 * bit set and PA claims same block, it's OK. IOW, one can set bit in
266 * on-disk bitmap if buddy has same bit set or/and PA covers corresponded
269 * so, now we're building a concurrency table:
272 * blocks for PA are allocated in the buddy, buddy must be referenced
273 * until PA is linked to allocation group to avoid concurrent buddy init
275 * we need to make sure that either on-disk bitmap or PA has uptodate data
276 * given (3) we care that PA-=N operation doesn't interfere with init
278 * the simplest way would be to have buddy initialized by the discard
279 * - use locality group PA
280 * again PA-=N must be serialized with init
281 * - discard locality group PA
282 * the simplest way would be to have buddy initialized by the discard
285 * i_data_sem serializes them
287 * discard process must wait until PA isn't used by another process
288 * - use locality group PA
289 * some mutex should serialize them
290 * - discard locality group PA
291 * discard process must wait until PA isn't used by another process
294 * i_data_sem or another mutex should serializes them
296 * discard process must wait until PA isn't used by another process
297 * - use locality group PA
298 * nothing wrong here -- they're different PAs covering different blocks
299 * - discard locality group PA
300 * discard process must wait until PA isn't used by another process
302 * now we're ready to make few consequences:
303 * - PA is referenced and while it is no discard is possible
304 * - PA is referenced until block isn't marked in on-disk bitmap
305 * - PA changes only after on-disk bitmap
306 * - discard must not compete with init. either init is done before
307 * any discard or they're serialized somehow
308 * - buddy init as sum of on-disk bitmap and PAs is done atomically
310 * a special case when we've used PA to emptiness. no need to modify buddy
311 * in this case, but we should care about concurrent init
316 * Logic in few words:
321 * mark bits in on-disk bitmap
324 * - use preallocation:
325 * find proper PA (per-inode or group)
327 * mark bits in on-disk bitmap
333 * mark bits in on-disk bitmap
336 * - discard preallocations in group:
338 * move them onto local list
339 * load on-disk bitmap
341 * remove PA from object (inode or locality group)
342 * mark free blocks in-core
344 * - discard inode's preallocations:
351 * - bitlock on a group (group)
352 * - object (inode/locality) (object)
354 * - cr0 lists lock (cr0)
355 * - cr1 tree lock (cr1)
365 * - release consumed pa:
370 * - generate in-core bitmap:
374 * - discard all for given object (inode, locality group):
379 * - discard all for given group:
385 * - allocation path (ext4_mb_regular_allocator)
389 static struct kmem_cache *ext4_pspace_cachep;
390 static struct kmem_cache *ext4_ac_cachep;
391 static struct kmem_cache *ext4_free_data_cachep;
393 /* We create slab caches for groupinfo data structures based on the
394 * superblock block size. There will be one per mounted filesystem for
395 * each unique s_blocksize_bits */
396 #define NR_GRPINFO_CACHES 8
397 static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES];
399 static const char * const ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = {
400 "ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k",
401 "ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k",
402 "ext4_groupinfo_64k", "ext4_groupinfo_128k"
405 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
407 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
409 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac);
411 static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
412 ext4_group_t group, int cr);
414 static int ext4_try_to_trim_range(struct super_block *sb,
415 struct ext4_buddy *e4b, ext4_grpblk_t start,
416 ext4_grpblk_t max, ext4_grpblk_t minblocks);
419 * The algorithm using this percpu seq counter goes below:
420 * 1. We sample the percpu discard_pa_seq counter before trying for block
421 * allocation in ext4_mb_new_blocks().
422 * 2. We increment this percpu discard_pa_seq counter when we either allocate
423 * or free these blocks i.e. while marking those blocks as used/free in
424 * mb_mark_used()/mb_free_blocks().
425 * 3. We also increment this percpu seq counter when we successfully identify
426 * that the bb_prealloc_list is not empty and hence proceed for discarding
427 * of those PAs inside ext4_mb_discard_group_preallocations().
429 * Now to make sure that the regular fast path of block allocation is not
430 * affected, as a small optimization we only sample the percpu seq counter
431 * on that cpu. Only when the block allocation fails and when freed blocks
432 * found were 0, that is when we sample percpu seq counter for all cpus using
433 * below function ext4_get_discard_pa_seq_sum(). This happens after making
434 * sure that all the PAs on grp->bb_prealloc_list got freed or if it's empty.
436 static DEFINE_PER_CPU(u64, discard_pa_seq);
437 static inline u64 ext4_get_discard_pa_seq_sum(void)
442 for_each_possible_cpu(__cpu)
443 __seq += per_cpu(discard_pa_seq, __cpu);
447 static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
449 #if BITS_PER_LONG == 64
450 *bit += ((unsigned long) addr & 7UL) << 3;
451 addr = (void *) ((unsigned long) addr & ~7UL);
452 #elif BITS_PER_LONG == 32
453 *bit += ((unsigned long) addr & 3UL) << 3;
454 addr = (void *) ((unsigned long) addr & ~3UL);
456 #error "how many bits you are?!"
461 static inline int mb_test_bit(int bit, void *addr)
464 * ext4_test_bit on architecture like powerpc
465 * needs unsigned long aligned address
467 addr = mb_correct_addr_and_bit(&bit, addr);
468 return ext4_test_bit(bit, addr);
471 static inline void mb_set_bit(int bit, void *addr)
473 addr = mb_correct_addr_and_bit(&bit, addr);
474 ext4_set_bit(bit, addr);
477 static inline void mb_clear_bit(int bit, void *addr)
479 addr = mb_correct_addr_and_bit(&bit, addr);
480 ext4_clear_bit(bit, addr);
483 static inline int mb_test_and_clear_bit(int bit, void *addr)
485 addr = mb_correct_addr_and_bit(&bit, addr);
486 return ext4_test_and_clear_bit(bit, addr);
489 static inline int mb_find_next_zero_bit(void *addr, int max, int start)
491 int fix = 0, ret, tmpmax;
492 addr = mb_correct_addr_and_bit(&fix, addr);
496 ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix;
502 static inline int mb_find_next_bit(void *addr, int max, int start)
504 int fix = 0, ret, tmpmax;
505 addr = mb_correct_addr_and_bit(&fix, addr);
509 ret = ext4_find_next_bit(addr, tmpmax, start) - fix;
515 static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
519 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
522 if (order > e4b->bd_blkbits + 1) {
527 /* at order 0 we see each particular block */
529 *max = 1 << (e4b->bd_blkbits + 3);
530 return e4b->bd_bitmap;
533 bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order];
534 *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order];
540 static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
541 int first, int count)
544 struct super_block *sb = e4b->bd_sb;
546 if (unlikely(e4b->bd_info->bb_bitmap == NULL))
548 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
549 for (i = 0; i < count; i++) {
550 if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) {
551 ext4_fsblk_t blocknr;
553 blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
554 blocknr += EXT4_C2B(EXT4_SB(sb), first + i);
555 ext4_grp_locked_error(sb, e4b->bd_group,
556 inode ? inode->i_ino : 0,
558 "freeing block already freed "
561 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
562 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
564 mb_clear_bit(first + i, e4b->bd_info->bb_bitmap);
568 static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count)
572 if (unlikely(e4b->bd_info->bb_bitmap == NULL))
574 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
575 for (i = 0; i < count; i++) {
576 BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap));
577 mb_set_bit(first + i, e4b->bd_info->bb_bitmap);
581 static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
583 if (unlikely(e4b->bd_info->bb_bitmap == NULL))
585 if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) {
586 unsigned char *b1, *b2;
588 b1 = (unsigned char *) e4b->bd_info->bb_bitmap;
589 b2 = (unsigned char *) bitmap;
590 for (i = 0; i < e4b->bd_sb->s_blocksize; i++) {
591 if (b1[i] != b2[i]) {
592 ext4_msg(e4b->bd_sb, KERN_ERR,
593 "corruption in group %u "
594 "at byte %u(%u): %x in copy != %x "
596 e4b->bd_group, i, i * 8, b1[i], b2[i]);
603 static void mb_group_bb_bitmap_alloc(struct super_block *sb,
604 struct ext4_group_info *grp, ext4_group_t group)
606 struct buffer_head *bh;
608 grp->bb_bitmap = kmalloc(sb->s_blocksize, GFP_NOFS);
612 bh = ext4_read_block_bitmap(sb, group);
613 if (IS_ERR_OR_NULL(bh)) {
614 kfree(grp->bb_bitmap);
615 grp->bb_bitmap = NULL;
619 memcpy(grp->bb_bitmap, bh->b_data, sb->s_blocksize);
623 static void mb_group_bb_bitmap_free(struct ext4_group_info *grp)
625 kfree(grp->bb_bitmap);
629 static inline void mb_free_blocks_double(struct inode *inode,
630 struct ext4_buddy *e4b, int first, int count)
634 static inline void mb_mark_used_double(struct ext4_buddy *e4b,
635 int first, int count)
639 static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
644 static inline void mb_group_bb_bitmap_alloc(struct super_block *sb,
645 struct ext4_group_info *grp, ext4_group_t group)
650 static inline void mb_group_bb_bitmap_free(struct ext4_group_info *grp)
656 #ifdef AGGRESSIVE_CHECK
658 #define MB_CHECK_ASSERT(assert) \
662 "Assertion failure in %s() at %s:%d: \"%s\"\n", \
663 function, file, line, # assert); \
668 static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
669 const char *function, int line)
671 struct super_block *sb = e4b->bd_sb;
672 int order = e4b->bd_blkbits + 1;
679 struct ext4_group_info *grp;
682 struct list_head *cur;
686 if (e4b->bd_info->bb_check_counter++ % 10)
690 buddy = mb_find_buddy(e4b, order, &max);
691 MB_CHECK_ASSERT(buddy);
692 buddy2 = mb_find_buddy(e4b, order - 1, &max2);
693 MB_CHECK_ASSERT(buddy2);
694 MB_CHECK_ASSERT(buddy != buddy2);
695 MB_CHECK_ASSERT(max * 2 == max2);
698 for (i = 0; i < max; i++) {
700 if (mb_test_bit(i, buddy)) {
701 /* only single bit in buddy2 may be 0 */
702 if (!mb_test_bit(i << 1, buddy2)) {
704 mb_test_bit((i<<1)+1, buddy2));
709 /* both bits in buddy2 must be 1 */
710 MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
711 MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
713 for (j = 0; j < (1 << order); j++) {
714 k = (i * (1 << order)) + j;
716 !mb_test_bit(k, e4b->bd_bitmap));
720 MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count);
725 buddy = mb_find_buddy(e4b, 0, &max);
726 for (i = 0; i < max; i++) {
727 if (!mb_test_bit(i, buddy)) {
728 MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free);
736 /* check used bits only */
737 for (j = 0; j < e4b->bd_blkbits + 1; j++) {
738 buddy2 = mb_find_buddy(e4b, j, &max2);
740 MB_CHECK_ASSERT(k < max2);
741 MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
744 MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info));
745 MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
747 grp = ext4_get_group_info(sb, e4b->bd_group);
748 list_for_each(cur, &grp->bb_prealloc_list) {
749 ext4_group_t groupnr;
750 struct ext4_prealloc_space *pa;
751 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
752 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k);
753 MB_CHECK_ASSERT(groupnr == e4b->bd_group);
754 for (i = 0; i < pa->pa_len; i++)
755 MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
759 #undef MB_CHECK_ASSERT
760 #define mb_check_buddy(e4b) __mb_check_buddy(e4b, \
761 __FILE__, __func__, __LINE__)
763 #define mb_check_buddy(e4b)
767 * Divide blocks started from @first with length @len into
768 * smaller chunks with power of 2 blocks.
769 * Clear the bits in bitmap which the blocks of the chunk(s) covered,
770 * then increase bb_counters[] for corresponded chunk size.
772 static void ext4_mb_mark_free_simple(struct super_block *sb,
773 void *buddy, ext4_grpblk_t first, ext4_grpblk_t len,
774 struct ext4_group_info *grp)
776 struct ext4_sb_info *sbi = EXT4_SB(sb);
782 BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb));
784 border = 2 << sb->s_blocksize_bits;
787 /* find how many blocks can be covered since this position */
788 max = ffs(first | border) - 1;
790 /* find how many blocks of power 2 we need to mark */
797 /* mark multiblock chunks only */
798 grp->bb_counters[min]++;
800 mb_clear_bit(first >> min,
801 buddy + sbi->s_mb_offsets[min]);
808 static int mb_avg_fragment_size_order(struct super_block *sb, ext4_grpblk_t len)
813 * We don't bother with a special lists groups with only 1 block free
814 * extents and for completely empty groups.
816 order = fls(len) - 2;
819 if (order == MB_NUM_ORDERS(sb))
824 /* Move group to appropriate avg_fragment_size list */
826 mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp)
828 struct ext4_sb_info *sbi = EXT4_SB(sb);
831 if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_free == 0)
834 new_order = mb_avg_fragment_size_order(sb,
835 grp->bb_free / grp->bb_fragments);
836 if (new_order == grp->bb_avg_fragment_size_order)
839 if (grp->bb_avg_fragment_size_order != -1) {
840 write_lock(&sbi->s_mb_avg_fragment_size_locks[
841 grp->bb_avg_fragment_size_order]);
842 list_del(&grp->bb_avg_fragment_size_node);
843 write_unlock(&sbi->s_mb_avg_fragment_size_locks[
844 grp->bb_avg_fragment_size_order]);
846 grp->bb_avg_fragment_size_order = new_order;
847 write_lock(&sbi->s_mb_avg_fragment_size_locks[
848 grp->bb_avg_fragment_size_order]);
849 list_add_tail(&grp->bb_avg_fragment_size_node,
850 &sbi->s_mb_avg_fragment_size[grp->bb_avg_fragment_size_order]);
851 write_unlock(&sbi->s_mb_avg_fragment_size_locks[
852 grp->bb_avg_fragment_size_order]);
856 * Choose next group by traversing largest_free_order lists. Updates *new_cr if
857 * cr level needs an update.
859 static void ext4_mb_choose_next_group_cr0(struct ext4_allocation_context *ac,
860 int *new_cr, ext4_group_t *group, ext4_group_t ngroups)
862 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
863 struct ext4_group_info *iter, *grp;
866 if (ac->ac_status == AC_STATUS_FOUND)
869 if (unlikely(sbi->s_mb_stats && ac->ac_flags & EXT4_MB_CR0_OPTIMIZED))
870 atomic_inc(&sbi->s_bal_cr0_bad_suggestions);
873 for (i = ac->ac_2order; i < MB_NUM_ORDERS(ac->ac_sb); i++) {
874 if (list_empty(&sbi->s_mb_largest_free_orders[i]))
876 read_lock(&sbi->s_mb_largest_free_orders_locks[i]);
877 if (list_empty(&sbi->s_mb_largest_free_orders[i])) {
878 read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
882 list_for_each_entry(iter, &sbi->s_mb_largest_free_orders[i],
883 bb_largest_free_order_node) {
885 atomic64_inc(&sbi->s_bal_cX_groups_considered[0]);
886 if (likely(ext4_mb_good_group(ac, iter->bb_group, 0))) {
891 read_unlock(&sbi->s_mb_largest_free_orders_locks[i]);
897 /* Increment cr and search again */
900 *group = grp->bb_group;
901 ac->ac_flags |= EXT4_MB_CR0_OPTIMIZED;
906 * Choose next group by traversing average fragment size list of suitable
907 * order. Updates *new_cr if cr level needs an update.
909 static void ext4_mb_choose_next_group_cr1(struct ext4_allocation_context *ac,
910 int *new_cr, ext4_group_t *group, ext4_group_t ngroups)
912 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
913 struct ext4_group_info *grp = NULL, *iter;
916 if (unlikely(ac->ac_flags & EXT4_MB_CR1_OPTIMIZED)) {
918 atomic_inc(&sbi->s_bal_cr1_bad_suggestions);
921 for (i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len);
922 i < MB_NUM_ORDERS(ac->ac_sb); i++) {
923 if (list_empty(&sbi->s_mb_avg_fragment_size[i]))
925 read_lock(&sbi->s_mb_avg_fragment_size_locks[i]);
926 if (list_empty(&sbi->s_mb_avg_fragment_size[i])) {
927 read_unlock(&sbi->s_mb_avg_fragment_size_locks[i]);
930 list_for_each_entry(iter, &sbi->s_mb_avg_fragment_size[i],
931 bb_avg_fragment_size_node) {
933 atomic64_inc(&sbi->s_bal_cX_groups_considered[1]);
934 if (likely(ext4_mb_good_group(ac, iter->bb_group, 1))) {
939 read_unlock(&sbi->s_mb_avg_fragment_size_locks[i]);
945 *group = grp->bb_group;
946 ac->ac_flags |= EXT4_MB_CR1_OPTIMIZED;
952 static inline int should_optimize_scan(struct ext4_allocation_context *ac)
954 if (unlikely(!test_opt2(ac->ac_sb, MB_OPTIMIZE_SCAN)))
956 if (ac->ac_criteria >= 2)
958 if (!ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))
964 * Return next linear group for allocation. If linear traversal should not be
965 * performed, this function just returns the same group
968 next_linear_group(struct ext4_allocation_context *ac, int group, int ngroups)
970 if (!should_optimize_scan(ac))
973 if (ac->ac_groups_linear_remaining) {
974 ac->ac_groups_linear_remaining--;
981 * Artificially restricted ngroups for non-extent
982 * files makes group > ngroups possible on first loop.
984 return group + 1 >= ngroups ? 0 : group + 1;
988 * ext4_mb_choose_next_group: choose next group for allocation.
990 * @ac Allocation Context
991 * @new_cr This is an output parameter. If the there is no good group
992 * available at current CR level, this field is updated to indicate
993 * the new cr level that should be used.
994 * @group This is an input / output parameter. As an input it indicates the
995 * next group that the allocator intends to use for allocation. As
996 * output, this field indicates the next group that should be used as
997 * determined by the optimization functions.
998 * @ngroups Total number of groups
1000 static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac,
1001 int *new_cr, ext4_group_t *group, ext4_group_t ngroups)
1003 *new_cr = ac->ac_criteria;
1005 if (!should_optimize_scan(ac) || ac->ac_groups_linear_remaining) {
1006 *group = next_linear_group(ac, *group, ngroups);
1011 ext4_mb_choose_next_group_cr0(ac, new_cr, group, ngroups);
1012 } else if (*new_cr == 1) {
1013 ext4_mb_choose_next_group_cr1(ac, new_cr, group, ngroups);
1016 * TODO: For CR=2, we can arrange groups in an rb tree sorted by
1017 * bb_free. But until that happens, we should never come here.
1024 * Cache the order of the largest free extent we have available in this block
1028 mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
1030 struct ext4_sb_info *sbi = EXT4_SB(sb);
1033 for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--)
1034 if (grp->bb_counters[i] > 0)
1036 /* No need to move between order lists? */
1037 if (!test_opt2(sb, MB_OPTIMIZE_SCAN) ||
1038 i == grp->bb_largest_free_order) {
1039 grp->bb_largest_free_order = i;
1043 if (grp->bb_largest_free_order >= 0) {
1044 write_lock(&sbi->s_mb_largest_free_orders_locks[
1045 grp->bb_largest_free_order]);
1046 list_del_init(&grp->bb_largest_free_order_node);
1047 write_unlock(&sbi->s_mb_largest_free_orders_locks[
1048 grp->bb_largest_free_order]);
1050 grp->bb_largest_free_order = i;
1051 if (grp->bb_largest_free_order >= 0 && grp->bb_free) {
1052 write_lock(&sbi->s_mb_largest_free_orders_locks[
1053 grp->bb_largest_free_order]);
1054 list_add_tail(&grp->bb_largest_free_order_node,
1055 &sbi->s_mb_largest_free_orders[grp->bb_largest_free_order]);
1056 write_unlock(&sbi->s_mb_largest_free_orders_locks[
1057 grp->bb_largest_free_order]);
1061 static noinline_for_stack
1062 void ext4_mb_generate_buddy(struct super_block *sb,
1063 void *buddy, void *bitmap, ext4_group_t group)
1065 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
1066 struct ext4_sb_info *sbi = EXT4_SB(sb);
1067 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
1068 ext4_grpblk_t i = 0;
1069 ext4_grpblk_t first;
1072 unsigned fragments = 0;
1073 unsigned long long period = get_cycles();
1075 /* initialize buddy from bitmap which is aggregation
1076 * of on-disk bitmap and preallocations */
1077 i = mb_find_next_zero_bit(bitmap, max, 0);
1078 grp->bb_first_free = i;
1082 i = mb_find_next_bit(bitmap, max, i);
1086 ext4_mb_mark_free_simple(sb, buddy, first, len, grp);
1088 grp->bb_counters[0]++;
1090 i = mb_find_next_zero_bit(bitmap, max, i);
1092 grp->bb_fragments = fragments;
1094 if (free != grp->bb_free) {
1095 ext4_grp_locked_error(sb, group, 0, 0,
1096 "block bitmap and bg descriptor "
1097 "inconsistent: %u vs %u free clusters",
1098 free, grp->bb_free);
1100 * If we intend to continue, we consider group descriptor
1101 * corrupt and update bb_free using bitmap value
1103 grp->bb_free = free;
1104 ext4_mark_group_bitmap_corrupted(sb, group,
1105 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
1107 mb_set_largest_free_order(sb, grp);
1108 mb_update_avg_fragment_size(sb, grp);
1110 clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
1112 period = get_cycles() - period;
1113 atomic_inc(&sbi->s_mb_buddies_generated);
1114 atomic64_add(period, &sbi->s_mb_generation_time);
1117 /* The buddy information is attached the buddy cache inode
1118 * for convenience. The information regarding each group
1119 * is loaded via ext4_mb_load_buddy. The information involve
1120 * block bitmap and buddy information. The information are
1121 * stored in the inode as
1124 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
1127 * one block each for bitmap and buddy information.
1128 * So for each group we take up 2 blocks. A page can
1129 * contain blocks_per_page (PAGE_SIZE / blocksize) blocks.
1130 * So it can have information regarding groups_per_page which
1131 * is blocks_per_page/2
1133 * Locking note: This routine takes the block group lock of all groups
1134 * for this page; do not hold this lock when calling this routine!
1137 static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
1139 ext4_group_t ngroups;
1141 int blocks_per_page;
1142 int groups_per_page;
1145 ext4_group_t first_group, group;
1147 struct super_block *sb;
1148 struct buffer_head *bhs;
1149 struct buffer_head **bh = NULL;
1150 struct inode *inode;
1153 struct ext4_group_info *grinfo;
1155 inode = page->mapping->host;
1157 ngroups = ext4_get_groups_count(sb);
1158 blocksize = i_blocksize(inode);
1159 blocks_per_page = PAGE_SIZE / blocksize;
1161 mb_debug(sb, "init page %lu\n", page->index);
1163 groups_per_page = blocks_per_page >> 1;
1164 if (groups_per_page == 0)
1165 groups_per_page = 1;
1167 /* allocate buffer_heads to read bitmaps */
1168 if (groups_per_page > 1) {
1169 i = sizeof(struct buffer_head *) * groups_per_page;
1170 bh = kzalloc(i, gfp);
1178 first_group = page->index * blocks_per_page / 2;
1180 /* read all groups the page covers into the cache */
1181 for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
1182 if (group >= ngroups)
1185 grinfo = ext4_get_group_info(sb, group);
1187 * If page is uptodate then we came here after online resize
1188 * which added some new uninitialized group info structs, so
1189 * we must skip all initialized uptodate buddies on the page,
1190 * which may be currently in use by an allocating task.
1192 if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) {
1196 bh[i] = ext4_read_block_bitmap_nowait(sb, group, false);
1197 if (IS_ERR(bh[i])) {
1198 err = PTR_ERR(bh[i]);
1202 mb_debug(sb, "read bitmap for group %u\n", group);
1205 /* wait for I/O completion */
1206 for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
1211 err2 = ext4_wait_block_bitmap(sb, group, bh[i]);
1216 first_block = page->index * blocks_per_page;
1217 for (i = 0; i < blocks_per_page; i++) {
1218 group = (first_block + i) >> 1;
1219 if (group >= ngroups)
1222 if (!bh[group - first_group])
1223 /* skip initialized uptodate buddy */
1226 if (!buffer_verified(bh[group - first_group]))
1227 /* Skip faulty bitmaps */
1232 * data carry information regarding this
1233 * particular group in the format specified
1237 data = page_address(page) + (i * blocksize);
1238 bitmap = bh[group - first_group]->b_data;
1241 * We place the buddy block and bitmap block
1244 if ((first_block + i) & 1) {
1245 /* this is block of buddy */
1246 BUG_ON(incore == NULL);
1247 mb_debug(sb, "put buddy for group %u in page %lu/%x\n",
1248 group, page->index, i * blocksize);
1249 trace_ext4_mb_buddy_bitmap_load(sb, group);
1250 grinfo = ext4_get_group_info(sb, group);
1251 grinfo->bb_fragments = 0;
1252 memset(grinfo->bb_counters, 0,
1253 sizeof(*grinfo->bb_counters) *
1254 (MB_NUM_ORDERS(sb)));
1256 * incore got set to the group block bitmap below
1258 ext4_lock_group(sb, group);
1259 /* init the buddy */
1260 memset(data, 0xff, blocksize);
1261 ext4_mb_generate_buddy(sb, data, incore, group);
1262 ext4_unlock_group(sb, group);
1265 /* this is block of bitmap */
1266 BUG_ON(incore != NULL);
1267 mb_debug(sb, "put bitmap for group %u in page %lu/%x\n",
1268 group, page->index, i * blocksize);
1269 trace_ext4_mb_bitmap_load(sb, group);
1271 /* see comments in ext4_mb_put_pa() */
1272 ext4_lock_group(sb, group);
1273 memcpy(data, bitmap, blocksize);
1275 /* mark all preallocated blks used in in-core bitmap */
1276 ext4_mb_generate_from_pa(sb, data, group);
1277 ext4_mb_generate_from_freelist(sb, data, group);
1278 ext4_unlock_group(sb, group);
1280 /* set incore so that the buddy information can be
1281 * generated using this
1286 SetPageUptodate(page);
1290 for (i = 0; i < groups_per_page; i++)
1299 * Lock the buddy and bitmap pages. This make sure other parallel init_group
1300 * on the same buddy page doesn't happen whild holding the buddy page lock.
1301 * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap
1302 * are on the same page e4b->bd_buddy_page is NULL and return value is 0.
1304 static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
1305 ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp)
1307 struct inode *inode = EXT4_SB(sb)->s_buddy_cache;
1308 int block, pnum, poff;
1309 int blocks_per_page;
1312 e4b->bd_buddy_page = NULL;
1313 e4b->bd_bitmap_page = NULL;
1315 blocks_per_page = PAGE_SIZE / sb->s_blocksize;
1317 * the buddy cache inode stores the block bitmap
1318 * and buddy information in consecutive blocks.
1319 * So for each group we need two blocks.
1322 pnum = block / blocks_per_page;
1323 poff = block % blocks_per_page;
1324 page = find_or_create_page(inode->i_mapping, pnum, gfp);
1327 BUG_ON(page->mapping != inode->i_mapping);
1328 e4b->bd_bitmap_page = page;
1329 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1331 if (blocks_per_page >= 2) {
1332 /* buddy and bitmap are on the same page */
1337 pnum = block / blocks_per_page;
1338 page = find_or_create_page(inode->i_mapping, pnum, gfp);
1341 BUG_ON(page->mapping != inode->i_mapping);
1342 e4b->bd_buddy_page = page;
1346 static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
1348 if (e4b->bd_bitmap_page) {
1349 unlock_page(e4b->bd_bitmap_page);
1350 put_page(e4b->bd_bitmap_page);
1352 if (e4b->bd_buddy_page) {
1353 unlock_page(e4b->bd_buddy_page);
1354 put_page(e4b->bd_buddy_page);
1359 * Locking note: This routine calls ext4_mb_init_cache(), which takes the
1360 * block group lock of all groups for this page; do not hold the BG lock when
1361 * calling this routine!
1363 static noinline_for_stack
1364 int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
1367 struct ext4_group_info *this_grp;
1368 struct ext4_buddy e4b;
1373 mb_debug(sb, "init group %u\n", group);
1374 this_grp = ext4_get_group_info(sb, group);
1376 * This ensures that we don't reinit the buddy cache
1377 * page which map to the group from which we are already
1378 * allocating. If we are looking at the buddy cache we would
1379 * have taken a reference using ext4_mb_load_buddy and that
1380 * would have pinned buddy page to page cache.
1381 * The call to ext4_mb_get_buddy_page_lock will mark the
1384 ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b, gfp);
1385 if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) {
1387 * somebody initialized the group
1388 * return without doing anything
1393 page = e4b.bd_bitmap_page;
1394 ret = ext4_mb_init_cache(page, NULL, gfp);
1397 if (!PageUptodate(page)) {
1402 if (e4b.bd_buddy_page == NULL) {
1404 * If both the bitmap and buddy are in
1405 * the same page we don't need to force
1411 /* init buddy cache */
1412 page = e4b.bd_buddy_page;
1413 ret = ext4_mb_init_cache(page, e4b.bd_bitmap, gfp);
1416 if (!PageUptodate(page)) {
1421 ext4_mb_put_buddy_page_lock(&e4b);
1426 * Locking note: This routine calls ext4_mb_init_cache(), which takes the
1427 * block group lock of all groups for this page; do not hold the BG lock when
1428 * calling this routine!
1430 static noinline_for_stack int
1431 ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
1432 struct ext4_buddy *e4b, gfp_t gfp)
1434 int blocks_per_page;
1440 struct ext4_group_info *grp;
1441 struct ext4_sb_info *sbi = EXT4_SB(sb);
1442 struct inode *inode = sbi->s_buddy_cache;
1445 mb_debug(sb, "load group %u\n", group);
1447 blocks_per_page = PAGE_SIZE / sb->s_blocksize;
1448 grp = ext4_get_group_info(sb, group);
1450 e4b->bd_blkbits = sb->s_blocksize_bits;
1453 e4b->bd_group = group;
1454 e4b->bd_buddy_page = NULL;
1455 e4b->bd_bitmap_page = NULL;
1457 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
1459 * we need full data about the group
1460 * to make a good selection
1462 ret = ext4_mb_init_group(sb, group, gfp);
1468 * the buddy cache inode stores the block bitmap
1469 * and buddy information in consecutive blocks.
1470 * So for each group we need two blocks.
1473 pnum = block / blocks_per_page;
1474 poff = block % blocks_per_page;
1476 /* we could use find_or_create_page(), but it locks page
1477 * what we'd like to avoid in fast path ... */
1478 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
1479 if (page == NULL || !PageUptodate(page)) {
1482 * drop the page reference and try
1483 * to get the page with lock. If we
1484 * are not uptodate that implies
1485 * somebody just created the page but
1486 * is yet to initialize the same. So
1487 * wait for it to initialize.
1490 page = find_or_create_page(inode->i_mapping, pnum, gfp);
1492 BUG_ON(page->mapping != inode->i_mapping);
1493 if (!PageUptodate(page)) {
1494 ret = ext4_mb_init_cache(page, NULL, gfp);
1499 mb_cmp_bitmaps(e4b, page_address(page) +
1500 (poff * sb->s_blocksize));
1509 if (!PageUptodate(page)) {
1514 /* Pages marked accessed already */
1515 e4b->bd_bitmap_page = page;
1516 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1519 pnum = block / blocks_per_page;
1520 poff = block % blocks_per_page;
1522 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
1523 if (page == NULL || !PageUptodate(page)) {
1526 page = find_or_create_page(inode->i_mapping, pnum, gfp);
1528 BUG_ON(page->mapping != inode->i_mapping);
1529 if (!PageUptodate(page)) {
1530 ret = ext4_mb_init_cache(page, e4b->bd_bitmap,
1544 if (!PageUptodate(page)) {
1549 /* Pages marked accessed already */
1550 e4b->bd_buddy_page = page;
1551 e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
1558 if (e4b->bd_bitmap_page)
1559 put_page(e4b->bd_bitmap_page);
1560 if (e4b->bd_buddy_page)
1561 put_page(e4b->bd_buddy_page);
1562 e4b->bd_buddy = NULL;
1563 e4b->bd_bitmap = NULL;
1567 static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
1568 struct ext4_buddy *e4b)
1570 return ext4_mb_load_buddy_gfp(sb, group, e4b, GFP_NOFS);
1573 static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
1575 if (e4b->bd_bitmap_page)
1576 put_page(e4b->bd_bitmap_page);
1577 if (e4b->bd_buddy_page)
1578 put_page(e4b->bd_buddy_page);
1582 static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
1587 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
1588 BUG_ON(block >= (1 << (e4b->bd_blkbits + 3)));
1590 while (order <= e4b->bd_blkbits + 1) {
1591 bb = mb_find_buddy(e4b, order, &max);
1592 if (!mb_test_bit(block >> order, bb)) {
1593 /* this block is part of buddy of order 'order' */
1601 static void mb_clear_bits(void *bm, int cur, int len)
1607 if ((cur & 31) == 0 && (len - cur) >= 32) {
1608 /* fast path: clear whole word at once */
1609 addr = bm + (cur >> 3);
1614 mb_clear_bit(cur, bm);
1619 /* clear bits in given range
1620 * will return first found zero bit if any, -1 otherwise
1622 static int mb_test_and_clear_bits(void *bm, int cur, int len)
1629 if ((cur & 31) == 0 && (len - cur) >= 32) {
1630 /* fast path: clear whole word at once */
1631 addr = bm + (cur >> 3);
1632 if (*addr != (__u32)(-1) && zero_bit == -1)
1633 zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0);
1638 if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1)
1646 void mb_set_bits(void *bm, int cur, int len)
1652 if ((cur & 31) == 0 && (len - cur) >= 32) {
1653 /* fast path: set whole word at once */
1654 addr = bm + (cur >> 3);
1659 mb_set_bit(cur, bm);
1664 static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side)
1666 if (mb_test_bit(*bit + side, bitmap)) {
1667 mb_clear_bit(*bit, bitmap);
1673 mb_set_bit(*bit, bitmap);
1678 static void mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last)
1682 void *buddy = mb_find_buddy(e4b, order, &max);
1687 /* Bits in range [first; last] are known to be set since
1688 * corresponding blocks were allocated. Bits in range
1689 * (first; last) will stay set because they form buddies on
1690 * upper layer. We just deal with borders if they don't
1691 * align with upper layer and then go up.
1692 * Releasing entire group is all about clearing
1693 * single bit of highest order buddy.
1697 * ---------------------------------
1699 * ---------------------------------
1700 * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 |
1701 * ---------------------------------
1703 * \_____________________/
1705 * Neither [1] nor [6] is aligned to above layer.
1706 * Left neighbour [0] is free, so mark it busy,
1707 * decrease bb_counters and extend range to
1709 * Right neighbour [7] is busy. It can't be coaleasced with [6], so
1710 * mark [6] free, increase bb_counters and shrink range to
1712 * Then shift range to [0; 2], go up and do the same.
1717 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1);
1719 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1);
1724 if (first == last || !(buddy2 = mb_find_buddy(e4b, order, &max))) {
1725 mb_clear_bits(buddy, first, last - first + 1);
1726 e4b->bd_info->bb_counters[order - 1] += last - first + 1;
1735 static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
1736 int first, int count)
1738 int left_is_free = 0;
1739 int right_is_free = 0;
1741 int last = first + count - 1;
1742 struct super_block *sb = e4b->bd_sb;
1744 if (WARN_ON(count == 0))
1746 BUG_ON(last >= (sb->s_blocksize << 3));
1747 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
1748 /* Don't bother if the block group is corrupt. */
1749 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
1752 mb_check_buddy(e4b);
1753 mb_free_blocks_double(inode, e4b, first, count);
1755 this_cpu_inc(discard_pa_seq);
1756 e4b->bd_info->bb_free += count;
1757 if (first < e4b->bd_info->bb_first_free)
1758 e4b->bd_info->bb_first_free = first;
1760 /* access memory sequentially: check left neighbour,
1761 * clear range and then check right neighbour
1764 left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap);
1765 block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count);
1766 if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0])
1767 right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap);
1769 if (unlikely(block != -1)) {
1770 struct ext4_sb_info *sbi = EXT4_SB(sb);
1771 ext4_fsblk_t blocknr;
1773 blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
1774 blocknr += EXT4_C2B(sbi, block);
1775 if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) {
1776 ext4_grp_locked_error(sb, e4b->bd_group,
1777 inode ? inode->i_ino : 0,
1779 "freeing already freed block (bit %u); block bitmap corrupt.",
1781 ext4_mark_group_bitmap_corrupted(
1783 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
1788 /* let's maintain fragments counter */
1789 if (left_is_free && right_is_free)
1790 e4b->bd_info->bb_fragments--;
1791 else if (!left_is_free && !right_is_free)
1792 e4b->bd_info->bb_fragments++;
1794 /* buddy[0] == bd_bitmap is a special case, so handle
1795 * it right away and let mb_buddy_mark_free stay free of
1796 * zero order checks.
1797 * Check if neighbours are to be coaleasced,
1798 * adjust bitmap bb_counters and borders appropriately.
1801 first += !left_is_free;
1802 e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1;
1805 last -= !right_is_free;
1806 e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1;
1810 mb_buddy_mark_free(e4b, first >> 1, last >> 1);
1813 mb_set_largest_free_order(sb, e4b->bd_info);
1814 mb_update_avg_fragment_size(sb, e4b->bd_info);
1815 mb_check_buddy(e4b);
1818 static int mb_find_extent(struct ext4_buddy *e4b, int block,
1819 int needed, struct ext4_free_extent *ex)
1825 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1828 buddy = mb_find_buddy(e4b, 0, &max);
1829 BUG_ON(buddy == NULL);
1830 BUG_ON(block >= max);
1831 if (mb_test_bit(block, buddy)) {
1838 /* find actual order */
1839 order = mb_find_order_for_block(e4b, block);
1840 block = block >> order;
1842 ex->fe_len = 1 << order;
1843 ex->fe_start = block << order;
1844 ex->fe_group = e4b->bd_group;
1846 /* calc difference from given start */
1847 next = next - ex->fe_start;
1849 ex->fe_start += next;
1851 while (needed > ex->fe_len &&
1852 mb_find_buddy(e4b, order, &max)) {
1854 if (block + 1 >= max)
1857 next = (block + 1) * (1 << order);
1858 if (mb_test_bit(next, e4b->bd_bitmap))
1861 order = mb_find_order_for_block(e4b, next);
1863 block = next >> order;
1864 ex->fe_len += 1 << order;
1867 if (ex->fe_start + ex->fe_len > EXT4_CLUSTERS_PER_GROUP(e4b->bd_sb)) {
1868 /* Should never happen! (but apparently sometimes does?!?) */
1870 ext4_grp_locked_error(e4b->bd_sb, e4b->bd_group, 0, 0,
1871 "corruption or bug in mb_find_extent "
1872 "block=%d, order=%d needed=%d ex=%u/%d/%d@%u",
1873 block, order, needed, ex->fe_group, ex->fe_start,
1874 ex->fe_len, ex->fe_logical);
1882 static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
1888 int start = ex->fe_start;
1889 int len = ex->fe_len;
1894 BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3));
1895 BUG_ON(e4b->bd_group != ex->fe_group);
1896 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1897 mb_check_buddy(e4b);
1898 mb_mark_used_double(e4b, start, len);
1900 this_cpu_inc(discard_pa_seq);
1901 e4b->bd_info->bb_free -= len;
1902 if (e4b->bd_info->bb_first_free == start)
1903 e4b->bd_info->bb_first_free += len;
1905 /* let's maintain fragments counter */
1907 mlen = !mb_test_bit(start - 1, e4b->bd_bitmap);
1908 if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0])
1909 max = !mb_test_bit(start + len, e4b->bd_bitmap);
1911 e4b->bd_info->bb_fragments++;
1912 else if (!mlen && !max)
1913 e4b->bd_info->bb_fragments--;
1915 /* let's maintain buddy itself */
1917 ord = mb_find_order_for_block(e4b, start);
1919 if (((start >> ord) << ord) == start && len >= (1 << ord)) {
1920 /* the whole chunk may be allocated at once! */
1922 buddy = mb_find_buddy(e4b, ord, &max);
1923 BUG_ON((start >> ord) >= max);
1924 mb_set_bit(start >> ord, buddy);
1925 e4b->bd_info->bb_counters[ord]--;
1932 /* store for history */
1934 ret = len | (ord << 16);
1936 /* we have to split large buddy */
1938 buddy = mb_find_buddy(e4b, ord, &max);
1939 mb_set_bit(start >> ord, buddy);
1940 e4b->bd_info->bb_counters[ord]--;
1943 cur = (start >> ord) & ~1U;
1944 buddy = mb_find_buddy(e4b, ord, &max);
1945 mb_clear_bit(cur, buddy);
1946 mb_clear_bit(cur + 1, buddy);
1947 e4b->bd_info->bb_counters[ord]++;
1948 e4b->bd_info->bb_counters[ord]++;
1950 mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info);
1952 mb_update_avg_fragment_size(e4b->bd_sb, e4b->bd_info);
1953 mb_set_bits(e4b->bd_bitmap, ex->fe_start, len0);
1954 mb_check_buddy(e4b);
1960 * Must be called under group lock!
1962 static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
1963 struct ext4_buddy *e4b)
1965 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1968 BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group);
1969 BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1971 ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
1972 ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical;
1973 ret = mb_mark_used(e4b, &ac->ac_b_ex);
1975 /* preallocation can change ac_b_ex, thus we store actually
1976 * allocated blocks for history */
1977 ac->ac_f_ex = ac->ac_b_ex;
1979 ac->ac_status = AC_STATUS_FOUND;
1980 ac->ac_tail = ret & 0xffff;
1981 ac->ac_buddy = ret >> 16;
1984 * take the page reference. We want the page to be pinned
1985 * so that we don't get a ext4_mb_init_cache_call for this
1986 * group until we update the bitmap. That would mean we
1987 * double allocate blocks. The reference is dropped
1988 * in ext4_mb_release_context
1990 ac->ac_bitmap_page = e4b->bd_bitmap_page;
1991 get_page(ac->ac_bitmap_page);
1992 ac->ac_buddy_page = e4b->bd_buddy_page;
1993 get_page(ac->ac_buddy_page);
1994 /* store last allocated for subsequent stream allocation */
1995 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
1996 spin_lock(&sbi->s_md_lock);
1997 sbi->s_mb_last_group = ac->ac_f_ex.fe_group;
1998 sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
1999 spin_unlock(&sbi->s_md_lock);
2002 * As we've just preallocated more space than
2003 * user requested originally, we store allocated
2004 * space in a special descriptor.
2006 if (ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
2007 ext4_mb_new_preallocation(ac);
2011 static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
2012 struct ext4_buddy *e4b,
2015 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2016 struct ext4_free_extent *bex = &ac->ac_b_ex;
2017 struct ext4_free_extent *gex = &ac->ac_g_ex;
2018 struct ext4_free_extent ex;
2021 if (ac->ac_status == AC_STATUS_FOUND)
2024 * We don't want to scan for a whole year
2026 if (ac->ac_found > sbi->s_mb_max_to_scan &&
2027 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2028 ac->ac_status = AC_STATUS_BREAK;
2033 * Haven't found good chunk so far, let's continue
2035 if (bex->fe_len < gex->fe_len)
2038 if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
2039 && bex->fe_group == e4b->bd_group) {
2040 /* recheck chunk's availability - we don't know
2041 * when it was found (within this lock-unlock
2043 max = mb_find_extent(e4b, bex->fe_start, gex->fe_len, &ex);
2044 if (max >= gex->fe_len) {
2045 ext4_mb_use_best_found(ac, e4b);
2052 * The routine checks whether found extent is good enough. If it is,
2053 * then the extent gets marked used and flag is set to the context
2054 * to stop scanning. Otherwise, the extent is compared with the
2055 * previous found extent and if new one is better, then it's stored
2056 * in the context. Later, the best found extent will be used, if
2057 * mballoc can't find good enough extent.
2059 * FIXME: real allocation policy is to be designed yet!
2061 static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
2062 struct ext4_free_extent *ex,
2063 struct ext4_buddy *e4b)
2065 struct ext4_free_extent *bex = &ac->ac_b_ex;
2066 struct ext4_free_extent *gex = &ac->ac_g_ex;
2068 BUG_ON(ex->fe_len <= 0);
2069 BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
2070 BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
2071 BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
2076 * The special case - take what you catch first
2078 if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2080 ext4_mb_use_best_found(ac, e4b);
2085 * Let's check whether the chuck is good enough
2087 if (ex->fe_len == gex->fe_len) {
2089 ext4_mb_use_best_found(ac, e4b);
2094 * If this is first found extent, just store it in the context
2096 if (bex->fe_len == 0) {
2102 * If new found extent is better, store it in the context
2104 if (bex->fe_len < gex->fe_len) {
2105 /* if the request isn't satisfied, any found extent
2106 * larger than previous best one is better */
2107 if (ex->fe_len > bex->fe_len)
2109 } else if (ex->fe_len > gex->fe_len) {
2110 /* if the request is satisfied, then we try to find
2111 * an extent that still satisfy the request, but is
2112 * smaller than previous one */
2113 if (ex->fe_len < bex->fe_len)
2117 ext4_mb_check_limits(ac, e4b, 0);
2120 static noinline_for_stack
2121 int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
2122 struct ext4_buddy *e4b)
2124 struct ext4_free_extent ex = ac->ac_b_ex;
2125 ext4_group_t group = ex.fe_group;
2129 BUG_ON(ex.fe_len <= 0);
2130 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
2134 ext4_lock_group(ac->ac_sb, group);
2135 max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex);
2139 ext4_mb_use_best_found(ac, e4b);
2142 ext4_unlock_group(ac->ac_sb, group);
2143 ext4_mb_unload_buddy(e4b);
2148 static noinline_for_stack
2149 int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
2150 struct ext4_buddy *e4b)
2152 ext4_group_t group = ac->ac_g_ex.fe_group;
2155 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2156 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2157 struct ext4_free_extent ex;
2159 if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL))
2161 if (grp->bb_free == 0)
2164 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
2168 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) {
2169 ext4_mb_unload_buddy(e4b);
2173 ext4_lock_group(ac->ac_sb, group);
2174 max = mb_find_extent(e4b, ac->ac_g_ex.fe_start,
2175 ac->ac_g_ex.fe_len, &ex);
2176 ex.fe_logical = 0xDEADFA11; /* debug value */
2178 if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
2181 start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) +
2183 /* use do_div to get remainder (would be 64-bit modulo) */
2184 if (do_div(start, sbi->s_stripe) == 0) {
2187 ext4_mb_use_best_found(ac, e4b);
2189 } else if (max >= ac->ac_g_ex.fe_len) {
2190 BUG_ON(ex.fe_len <= 0);
2191 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
2192 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
2195 ext4_mb_use_best_found(ac, e4b);
2196 } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) {
2197 /* Sometimes, caller may want to merge even small
2198 * number of blocks to an existing extent */
2199 BUG_ON(ex.fe_len <= 0);
2200 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
2201 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
2204 ext4_mb_use_best_found(ac, e4b);
2206 ext4_unlock_group(ac->ac_sb, group);
2207 ext4_mb_unload_buddy(e4b);
2213 * The routine scans buddy structures (not bitmap!) from given order
2214 * to max order and tries to find big enough chunk to satisfy the req
2216 static noinline_for_stack
2217 void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
2218 struct ext4_buddy *e4b)
2220 struct super_block *sb = ac->ac_sb;
2221 struct ext4_group_info *grp = e4b->bd_info;
2227 BUG_ON(ac->ac_2order <= 0);
2228 for (i = ac->ac_2order; i < MB_NUM_ORDERS(sb); i++) {
2229 if (grp->bb_counters[i] == 0)
2232 buddy = mb_find_buddy(e4b, i, &max);
2233 BUG_ON(buddy == NULL);
2235 k = mb_find_next_zero_bit(buddy, max, 0);
2237 ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0,
2238 "%d free clusters of order %d. But found 0",
2239 grp->bb_counters[i], i);
2240 ext4_mark_group_bitmap_corrupted(ac->ac_sb,
2242 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2247 ac->ac_b_ex.fe_len = 1 << i;
2248 ac->ac_b_ex.fe_start = k << i;
2249 ac->ac_b_ex.fe_group = e4b->bd_group;
2251 ext4_mb_use_best_found(ac, e4b);
2253 BUG_ON(ac->ac_f_ex.fe_len != ac->ac_g_ex.fe_len);
2255 if (EXT4_SB(sb)->s_mb_stats)
2256 atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
2263 * The routine scans the group and measures all found extents.
2264 * In order to optimize scanning, caller must pass number of
2265 * free blocks in the group, so the routine can know upper limit.
2267 static noinline_for_stack
2268 void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
2269 struct ext4_buddy *e4b)
2271 struct super_block *sb = ac->ac_sb;
2272 void *bitmap = e4b->bd_bitmap;
2273 struct ext4_free_extent ex;
2277 free = e4b->bd_info->bb_free;
2278 if (WARN_ON(free <= 0))
2281 i = e4b->bd_info->bb_first_free;
2283 while (free && ac->ac_status == AC_STATUS_CONTINUE) {
2284 i = mb_find_next_zero_bit(bitmap,
2285 EXT4_CLUSTERS_PER_GROUP(sb), i);
2286 if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) {
2288 * IF we have corrupt bitmap, we won't find any
2289 * free blocks even though group info says we
2292 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
2293 "%d free clusters as per "
2294 "group info. But bitmap says 0",
2296 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
2297 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2301 mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex);
2302 if (WARN_ON(ex.fe_len <= 0))
2304 if (free < ex.fe_len) {
2305 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
2306 "%d free clusters as per "
2307 "group info. But got %d blocks",
2309 ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
2310 EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2312 * The number of free blocks differs. This mostly
2313 * indicate that the bitmap is corrupt. So exit
2314 * without claiming the space.
2318 ex.fe_logical = 0xDEADC0DE; /* debug value */
2319 ext4_mb_measure_extent(ac, &ex, e4b);
2325 ext4_mb_check_limits(ac, e4b, 1);
2329 * This is a special case for storages like raid5
2330 * we try to find stripe-aligned chunks for stripe-size-multiple requests
2332 static noinline_for_stack
2333 void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
2334 struct ext4_buddy *e4b)
2336 struct super_block *sb = ac->ac_sb;
2337 struct ext4_sb_info *sbi = EXT4_SB(sb);
2338 void *bitmap = e4b->bd_bitmap;
2339 struct ext4_free_extent ex;
2340 ext4_fsblk_t first_group_block;
2345 BUG_ON(sbi->s_stripe == 0);
2347 /* find first stripe-aligned block in group */
2348 first_group_block = ext4_group_first_block_no(sb, e4b->bd_group);
2350 a = first_group_block + sbi->s_stripe - 1;
2351 do_div(a, sbi->s_stripe);
2352 i = (a * sbi->s_stripe) - first_group_block;
2354 while (i < EXT4_CLUSTERS_PER_GROUP(sb)) {
2355 if (!mb_test_bit(i, bitmap)) {
2356 max = mb_find_extent(e4b, i, sbi->s_stripe, &ex);
2357 if (max >= sbi->s_stripe) {
2359 ex.fe_logical = 0xDEADF00D; /* debug value */
2361 ext4_mb_use_best_found(ac, e4b);
2370 * This is also called BEFORE we load the buddy bitmap.
2371 * Returns either 1 or 0 indicating that the group is either suitable
2372 * for the allocation or not.
2374 static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
2375 ext4_group_t group, int cr)
2377 ext4_grpblk_t free, fragments;
2378 int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb));
2379 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2381 BUG_ON(cr < 0 || cr >= 4);
2383 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
2386 free = grp->bb_free;
2390 fragments = grp->bb_fragments;
2396 BUG_ON(ac->ac_2order == 0);
2398 /* Avoid using the first bg of a flexgroup for data files */
2399 if ((ac->ac_flags & EXT4_MB_HINT_DATA) &&
2400 (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) &&
2401 ((group % flex_size) == 0))
2404 if (free < ac->ac_g_ex.fe_len)
2407 if (ac->ac_2order >= MB_NUM_ORDERS(ac->ac_sb))
2410 if (grp->bb_largest_free_order < ac->ac_2order)
2415 if ((free / fragments) >= ac->ac_g_ex.fe_len)
2419 if (free >= ac->ac_g_ex.fe_len)
2432 * This could return negative error code if something goes wrong
2433 * during ext4_mb_init_group(). This should not be called with
2434 * ext4_lock_group() held.
2436 * Note: because we are conditionally operating with the group lock in
2437 * the EXT4_MB_STRICT_CHECK case, we need to fake out sparse in this
2438 * function using __acquire and __release. This means we need to be
2439 * super careful before messing with the error path handling via "goto
2442 static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac,
2443 ext4_group_t group, int cr)
2445 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2446 struct super_block *sb = ac->ac_sb;
2447 struct ext4_sb_info *sbi = EXT4_SB(sb);
2448 bool should_lock = ac->ac_flags & EXT4_MB_STRICT_CHECK;
2452 if (sbi->s_mb_stats)
2453 atomic64_inc(&sbi->s_bal_cX_groups_considered[ac->ac_criteria]);
2455 ext4_lock_group(sb, group);
2456 __release(ext4_group_lock_ptr(sb, group));
2458 free = grp->bb_free;
2461 if (cr <= 2 && free < ac->ac_g_ex.fe_len)
2463 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
2466 __acquire(ext4_group_lock_ptr(sb, group));
2467 ext4_unlock_group(sb, group);
2470 /* We only do this if the grp has never been initialized */
2471 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
2472 struct ext4_group_desc *gdp =
2473 ext4_get_group_desc(sb, group, NULL);
2476 /* cr=0/1 is a very optimistic search to find large
2477 * good chunks almost for free. If buddy data is not
2478 * ready, then this optimization makes no sense. But
2479 * we never skip the first block group in a flex_bg,
2480 * since this gets used for metadata block allocation,
2481 * and we want to make sure we locate metadata blocks
2482 * in the first block group in the flex_bg if possible.
2485 (!sbi->s_log_groups_per_flex ||
2486 ((group & ((1 << sbi->s_log_groups_per_flex) - 1)) != 0)) &&
2487 !(ext4_has_group_desc_csum(sb) &&
2488 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))))
2490 ret = ext4_mb_init_group(sb, group, GFP_NOFS);
2496 ext4_lock_group(sb, group);
2497 __release(ext4_group_lock_ptr(sb, group));
2499 ret = ext4_mb_good_group(ac, group, cr);
2502 __acquire(ext4_group_lock_ptr(sb, group));
2503 ext4_unlock_group(sb, group);
2509 * Start prefetching @nr block bitmaps starting at @group.
2510 * Return the next group which needs to be prefetched.
2512 ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group,
2513 unsigned int nr, int *cnt)
2515 ext4_group_t ngroups = ext4_get_groups_count(sb);
2516 struct buffer_head *bh;
2517 struct blk_plug plug;
2519 blk_start_plug(&plug);
2521 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group,
2523 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
2526 * Prefetch block groups with free blocks; but don't
2527 * bother if it is marked uninitialized on disk, since
2528 * it won't require I/O to read. Also only try to
2529 * prefetch once, so we avoid getblk() call, which can
2532 if (!EXT4_MB_GRP_TEST_AND_SET_READ(grp) &&
2533 EXT4_MB_GRP_NEED_INIT(grp) &&
2534 ext4_free_group_clusters(sb, gdp) > 0 &&
2535 !(ext4_has_group_desc_csum(sb) &&
2536 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) {
2537 bh = ext4_read_block_bitmap_nowait(sb, group, true);
2538 if (bh && !IS_ERR(bh)) {
2539 if (!buffer_uptodate(bh) && cnt)
2544 if (++group >= ngroups)
2547 blk_finish_plug(&plug);
2552 * Prefetching reads the block bitmap into the buffer cache; but we
2553 * need to make sure that the buddy bitmap in the page cache has been
2554 * initialized. Note that ext4_mb_init_group() will block if the I/O
2555 * is not yet completed, or indeed if it was not initiated by
2556 * ext4_mb_prefetch did not start the I/O.
2558 * TODO: We should actually kick off the buddy bitmap setup in a work
2559 * queue when the buffer I/O is completed, so that we don't block
2560 * waiting for the block allocation bitmap read to finish when
2561 * ext4_mb_prefetch_fini is called from ext4_mb_regular_allocator().
2563 void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group,
2567 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group,
2569 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
2572 group = ext4_get_groups_count(sb);
2574 grp = ext4_get_group_info(sb, group);
2576 if (EXT4_MB_GRP_NEED_INIT(grp) &&
2577 ext4_free_group_clusters(sb, gdp) > 0 &&
2578 !(ext4_has_group_desc_csum(sb) &&
2579 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) {
2580 if (ext4_mb_init_group(sb, group, GFP_NOFS))
2586 static noinline_for_stack int
2587 ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
2589 ext4_group_t prefetch_grp = 0, ngroups, group, i;
2590 int cr = -1, new_cr;
2591 int err = 0, first_err = 0;
2592 unsigned int nr = 0, prefetch_ios = 0;
2593 struct ext4_sb_info *sbi;
2594 struct super_block *sb;
2595 struct ext4_buddy e4b;
2600 ngroups = ext4_get_groups_count(sb);
2601 /* non-extent files are limited to low blocks/groups */
2602 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)))
2603 ngroups = sbi->s_blockfile_groups;
2605 BUG_ON(ac->ac_status == AC_STATUS_FOUND);
2607 /* first, try the goal */
2608 err = ext4_mb_find_by_goal(ac, &e4b);
2609 if (err || ac->ac_status == AC_STATUS_FOUND)
2612 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
2616 * ac->ac_2order is set only if the fe_len is a power of 2
2617 * if ac->ac_2order is set we also set criteria to 0 so that we
2618 * try exact allocation using buddy.
2620 i = fls(ac->ac_g_ex.fe_len);
2623 * We search using buddy data only if the order of the request
2624 * is greater than equal to the sbi_s_mb_order2_reqs
2625 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req
2626 * We also support searching for power-of-two requests only for
2627 * requests upto maximum buddy size we have constructed.
2629 if (i >= sbi->s_mb_order2_reqs && i <= MB_NUM_ORDERS(sb)) {
2631 * This should tell if fe_len is exactly power of 2
2633 if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0)
2634 ac->ac_2order = array_index_nospec(i - 1,
2638 /* if stream allocation is enabled, use global goal */
2639 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
2640 /* TBD: may be hot point */
2641 spin_lock(&sbi->s_md_lock);
2642 ac->ac_g_ex.fe_group = sbi->s_mb_last_group;
2643 ac->ac_g_ex.fe_start = sbi->s_mb_last_start;
2644 spin_unlock(&sbi->s_md_lock);
2647 /* Let's just scan groups to find more-less suitable blocks */
2648 cr = ac->ac_2order ? 0 : 1;
2650 * cr == 0 try to get exact allocation,
2651 * cr == 3 try to get anything
2654 for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
2655 ac->ac_criteria = cr;
2657 * searching for the right group start
2658 * from the goal value specified
2660 group = ac->ac_g_ex.fe_group;
2661 ac->ac_groups_linear_remaining = sbi->s_mb_max_linear_groups;
2662 prefetch_grp = group;
2664 for (i = 0, new_cr = cr; i < ngroups; i++,
2665 ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups)) {
2675 * Batch reads of the block allocation bitmaps
2676 * to get multiple READs in flight; limit
2677 * prefetching at cr=0/1, otherwise mballoc can
2678 * spend a lot of time loading imperfect groups
2680 if ((prefetch_grp == group) &&
2682 prefetch_ios < sbi->s_mb_prefetch_limit)) {
2683 unsigned int curr_ios = prefetch_ios;
2685 nr = sbi->s_mb_prefetch;
2686 if (ext4_has_feature_flex_bg(sb)) {
2687 nr = 1 << sbi->s_log_groups_per_flex;
2688 nr -= group & (nr - 1);
2689 nr = min(nr, sbi->s_mb_prefetch);
2691 prefetch_grp = ext4_mb_prefetch(sb, group,
2693 if (prefetch_ios == curr_ios)
2697 /* This now checks without needing the buddy page */
2698 ret = ext4_mb_good_group_nolock(ac, group, cr);
2705 err = ext4_mb_load_buddy(sb, group, &e4b);
2709 ext4_lock_group(sb, group);
2712 * We need to check again after locking the
2715 ret = ext4_mb_good_group(ac, group, cr);
2717 ext4_unlock_group(sb, group);
2718 ext4_mb_unload_buddy(&e4b);
2722 ac->ac_groups_scanned++;
2724 ext4_mb_simple_scan_group(ac, &e4b);
2725 else if (cr == 1 && sbi->s_stripe &&
2726 !(ac->ac_g_ex.fe_len % sbi->s_stripe))
2727 ext4_mb_scan_aligned(ac, &e4b);
2729 ext4_mb_complex_scan_group(ac, &e4b);
2731 ext4_unlock_group(sb, group);
2732 ext4_mb_unload_buddy(&e4b);
2734 if (ac->ac_status != AC_STATUS_CONTINUE)
2737 /* Processed all groups and haven't found blocks */
2738 if (sbi->s_mb_stats && i == ngroups)
2739 atomic64_inc(&sbi->s_bal_cX_failed[cr]);
2742 if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
2743 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2745 * We've been searching too long. Let's try to allocate
2746 * the best chunk we've found so far
2748 ext4_mb_try_best_found(ac, &e4b);
2749 if (ac->ac_status != AC_STATUS_FOUND) {
2751 * Someone more lucky has already allocated it.
2752 * The only thing we can do is just take first
2755 lost = atomic_inc_return(&sbi->s_mb_lost_chunks);
2756 mb_debug(sb, "lost chunk, group: %u, start: %d, len: %d, lost: %d\n",
2757 ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_start,
2758 ac->ac_b_ex.fe_len, lost);
2760 ac->ac_b_ex.fe_group = 0;
2761 ac->ac_b_ex.fe_start = 0;
2762 ac->ac_b_ex.fe_len = 0;
2763 ac->ac_status = AC_STATUS_CONTINUE;
2764 ac->ac_flags |= EXT4_MB_HINT_FIRST;
2770 if (sbi->s_mb_stats && ac->ac_status == AC_STATUS_FOUND)
2771 atomic64_inc(&sbi->s_bal_cX_hits[ac->ac_criteria]);
2773 if (!err && ac->ac_status != AC_STATUS_FOUND && first_err)
2776 mb_debug(sb, "Best len %d, origin len %d, ac_status %u, ac_flags 0x%x, cr %d ret %d\n",
2777 ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status,
2778 ac->ac_flags, cr, err);
2781 ext4_mb_prefetch_fini(sb, prefetch_grp, nr);
2786 static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
2788 struct super_block *sb = pde_data(file_inode(seq->file));
2791 if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2794 return (void *) ((unsigned long) group);
2797 static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
2799 struct super_block *sb = pde_data(file_inode(seq->file));
2803 if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2806 return (void *) ((unsigned long) group);
2809 static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2811 struct super_block *sb = pde_data(file_inode(seq->file));
2812 ext4_group_t group = (ext4_group_t) ((unsigned long) v);
2814 int err, buddy_loaded = 0;
2815 struct ext4_buddy e4b;
2816 struct ext4_group_info *grinfo;
2817 unsigned char blocksize_bits = min_t(unsigned char,
2818 sb->s_blocksize_bits,
2819 EXT4_MAX_BLOCK_LOG_SIZE);
2821 struct ext4_group_info info;
2822 ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2];
2827 seq_puts(seq, "#group: free frags first ["
2828 " 2^0 2^1 2^2 2^3 2^4 2^5 2^6 "
2829 " 2^7 2^8 2^9 2^10 2^11 2^12 2^13 ]\n");
2831 i = (blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
2832 sizeof(struct ext4_group_info);
2834 grinfo = ext4_get_group_info(sb, group);
2835 /* Load the group info in memory only if not already loaded. */
2836 if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) {
2837 err = ext4_mb_load_buddy(sb, group, &e4b);
2839 seq_printf(seq, "#%-5u: I/O error\n", group);
2845 memcpy(&sg, ext4_get_group_info(sb, group), i);
2848 ext4_mb_unload_buddy(&e4b);
2850 seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
2851 sg.info.bb_fragments, sg.info.bb_first_free);
2852 for (i = 0; i <= 13; i++)
2853 seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ?
2854 sg.info.bb_counters[i] : 0);
2855 seq_puts(seq, " ]\n");
2860 static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v)
2864 const struct seq_operations ext4_mb_seq_groups_ops = {
2865 .start = ext4_mb_seq_groups_start,
2866 .next = ext4_mb_seq_groups_next,
2867 .stop = ext4_mb_seq_groups_stop,
2868 .show = ext4_mb_seq_groups_show,
2871 int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset)
2873 struct super_block *sb = seq->private;
2874 struct ext4_sb_info *sbi = EXT4_SB(sb);
2876 seq_puts(seq, "mballoc:\n");
2877 if (!sbi->s_mb_stats) {
2878 seq_puts(seq, "\tmb stats collection turned off.\n");
2879 seq_puts(seq, "\tTo enable, please write \"1\" to sysfs file mb_stats.\n");
2882 seq_printf(seq, "\treqs: %u\n", atomic_read(&sbi->s_bal_reqs));
2883 seq_printf(seq, "\tsuccess: %u\n", atomic_read(&sbi->s_bal_success));
2885 seq_printf(seq, "\tgroups_scanned: %u\n", atomic_read(&sbi->s_bal_groups_scanned));
2887 seq_puts(seq, "\tcr0_stats:\n");
2888 seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[0]));
2889 seq_printf(seq, "\t\tgroups_considered: %llu\n",
2890 atomic64_read(&sbi->s_bal_cX_groups_considered[0]));
2891 seq_printf(seq, "\t\tuseless_loops: %llu\n",
2892 atomic64_read(&sbi->s_bal_cX_failed[0]));
2893 seq_printf(seq, "\t\tbad_suggestions: %u\n",
2894 atomic_read(&sbi->s_bal_cr0_bad_suggestions));
2896 seq_puts(seq, "\tcr1_stats:\n");
2897 seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[1]));
2898 seq_printf(seq, "\t\tgroups_considered: %llu\n",
2899 atomic64_read(&sbi->s_bal_cX_groups_considered[1]));
2900 seq_printf(seq, "\t\tuseless_loops: %llu\n",
2901 atomic64_read(&sbi->s_bal_cX_failed[1]));
2902 seq_printf(seq, "\t\tbad_suggestions: %u\n",
2903 atomic_read(&sbi->s_bal_cr1_bad_suggestions));
2905 seq_puts(seq, "\tcr2_stats:\n");
2906 seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[2]));
2907 seq_printf(seq, "\t\tgroups_considered: %llu\n",
2908 atomic64_read(&sbi->s_bal_cX_groups_considered[2]));
2909 seq_printf(seq, "\t\tuseless_loops: %llu\n",
2910 atomic64_read(&sbi->s_bal_cX_failed[2]));
2912 seq_puts(seq, "\tcr3_stats:\n");
2913 seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[3]));
2914 seq_printf(seq, "\t\tgroups_considered: %llu\n",
2915 atomic64_read(&sbi->s_bal_cX_groups_considered[3]));
2916 seq_printf(seq, "\t\tuseless_loops: %llu\n",
2917 atomic64_read(&sbi->s_bal_cX_failed[3]));
2918 seq_printf(seq, "\textents_scanned: %u\n", atomic_read(&sbi->s_bal_ex_scanned));
2919 seq_printf(seq, "\t\tgoal_hits: %u\n", atomic_read(&sbi->s_bal_goals));
2920 seq_printf(seq, "\t\t2^n_hits: %u\n", atomic_read(&sbi->s_bal_2orders));
2921 seq_printf(seq, "\t\tbreaks: %u\n", atomic_read(&sbi->s_bal_breaks));
2922 seq_printf(seq, "\t\tlost: %u\n", atomic_read(&sbi->s_mb_lost_chunks));
2924 seq_printf(seq, "\tbuddies_generated: %u/%u\n",
2925 atomic_read(&sbi->s_mb_buddies_generated),
2926 ext4_get_groups_count(sb));
2927 seq_printf(seq, "\tbuddies_time_used: %llu\n",
2928 atomic64_read(&sbi->s_mb_generation_time));
2929 seq_printf(seq, "\tpreallocated: %u\n",
2930 atomic_read(&sbi->s_mb_preallocated));
2931 seq_printf(seq, "\tdiscarded: %u\n",
2932 atomic_read(&sbi->s_mb_discarded));
2936 static void *ext4_mb_seq_structs_summary_start(struct seq_file *seq, loff_t *pos)
2937 __acquires(&EXT4_SB(sb)->s_mb_rb_lock)
2939 struct super_block *sb = pde_data(file_inode(seq->file));
2940 unsigned long position;
2942 if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb))
2944 position = *pos + 1;
2945 return (void *) ((unsigned long) position);
2948 static void *ext4_mb_seq_structs_summary_next(struct seq_file *seq, void *v, loff_t *pos)
2950 struct super_block *sb = pde_data(file_inode(seq->file));
2951 unsigned long position;
2954 if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb))
2956 position = *pos + 1;
2957 return (void *) ((unsigned long) position);
2960 static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v)
2962 struct super_block *sb = pde_data(file_inode(seq->file));
2963 struct ext4_sb_info *sbi = EXT4_SB(sb);
2964 unsigned long position = ((unsigned long) v);
2965 struct ext4_group_info *grp;
2969 if (position >= MB_NUM_ORDERS(sb)) {
2970 position -= MB_NUM_ORDERS(sb);
2972 seq_puts(seq, "avg_fragment_size_lists:\n");
2975 read_lock(&sbi->s_mb_avg_fragment_size_locks[position]);
2976 list_for_each_entry(grp, &sbi->s_mb_avg_fragment_size[position],
2977 bb_avg_fragment_size_node)
2979 read_unlock(&sbi->s_mb_avg_fragment_size_locks[position]);
2980 seq_printf(seq, "\tlist_order_%u_groups: %u\n",
2981 (unsigned int)position, count);
2985 if (position == 0) {
2986 seq_printf(seq, "optimize_scan: %d\n",
2987 test_opt2(sb, MB_OPTIMIZE_SCAN) ? 1 : 0);
2988 seq_puts(seq, "max_free_order_lists:\n");
2991 read_lock(&sbi->s_mb_largest_free_orders_locks[position]);
2992 list_for_each_entry(grp, &sbi->s_mb_largest_free_orders[position],
2993 bb_largest_free_order_node)
2995 read_unlock(&sbi->s_mb_largest_free_orders_locks[position]);
2996 seq_printf(seq, "\tlist_order_%u_groups: %u\n",
2997 (unsigned int)position, count);
3002 static void ext4_mb_seq_structs_summary_stop(struct seq_file *seq, void *v)
3006 const struct seq_operations ext4_mb_seq_structs_summary_ops = {
3007 .start = ext4_mb_seq_structs_summary_start,
3008 .next = ext4_mb_seq_structs_summary_next,
3009 .stop = ext4_mb_seq_structs_summary_stop,
3010 .show = ext4_mb_seq_structs_summary_show,
3013 static struct kmem_cache *get_groupinfo_cache(int blocksize_bits)
3015 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
3016 struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index];
3023 * Allocate the top-level s_group_info array for the specified number
3026 int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
3028 struct ext4_sb_info *sbi = EXT4_SB(sb);
3030 struct ext4_group_info ***old_groupinfo, ***new_groupinfo;
3032 size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >>
3033 EXT4_DESC_PER_BLOCK_BITS(sb);
3034 if (size <= sbi->s_group_info_size)
3037 size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size);
3038 new_groupinfo = kvzalloc(size, GFP_KERNEL);
3039 if (!new_groupinfo) {
3040 ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
3044 old_groupinfo = rcu_dereference(sbi->s_group_info);
3046 memcpy(new_groupinfo, old_groupinfo,
3047 sbi->s_group_info_size * sizeof(*sbi->s_group_info));
3049 rcu_assign_pointer(sbi->s_group_info, new_groupinfo);
3050 sbi->s_group_info_size = size / sizeof(*sbi->s_group_info);
3052 ext4_kvfree_array_rcu(old_groupinfo);
3053 ext4_debug("allocated s_groupinfo array for %d meta_bg's\n",
3054 sbi->s_group_info_size);
3058 /* Create and initialize ext4_group_info data for the given group. */
3059 int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
3060 struct ext4_group_desc *desc)
3064 int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb);
3065 struct ext4_sb_info *sbi = EXT4_SB(sb);
3066 struct ext4_group_info **meta_group_info;
3067 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
3070 * First check if this group is the first of a reserved block.
3071 * If it's true, we have to allocate a new table of pointers
3072 * to ext4_group_info structures
3074 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
3075 metalen = sizeof(*meta_group_info) <<
3076 EXT4_DESC_PER_BLOCK_BITS(sb);
3077 meta_group_info = kmalloc(metalen, GFP_NOFS);
3078 if (meta_group_info == NULL) {
3079 ext4_msg(sb, KERN_ERR, "can't allocate mem "
3080 "for a buddy group");
3081 goto exit_meta_group_info;
3084 rcu_dereference(sbi->s_group_info)[idx] = meta_group_info;
3088 meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx);
3089 i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
3091 meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS);
3092 if (meta_group_info[i] == NULL) {
3093 ext4_msg(sb, KERN_ERR, "can't allocate buddy mem");
3094 goto exit_group_info;
3096 set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
3097 &(meta_group_info[i]->bb_state));
3100 * initialize bb_free to be able to skip
3101 * empty groups without initialization
3103 if (ext4_has_group_desc_csum(sb) &&
3104 (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
3105 meta_group_info[i]->bb_free =
3106 ext4_free_clusters_after_init(sb, group, desc);
3108 meta_group_info[i]->bb_free =
3109 ext4_free_group_clusters(sb, desc);
3112 INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
3113 init_rwsem(&meta_group_info[i]->alloc_sem);
3114 meta_group_info[i]->bb_free_root = RB_ROOT;
3115 INIT_LIST_HEAD(&meta_group_info[i]->bb_largest_free_order_node);
3116 INIT_LIST_HEAD(&meta_group_info[i]->bb_avg_fragment_size_node);
3117 meta_group_info[i]->bb_largest_free_order = -1; /* uninit */
3118 meta_group_info[i]->bb_avg_fragment_size_order = -1; /* uninit */
3119 meta_group_info[i]->bb_group = group;
3121 mb_group_bb_bitmap_alloc(sb, meta_group_info[i], group);
3125 /* If a meta_group_info table has been allocated, release it now */
3126 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
3127 struct ext4_group_info ***group_info;
3130 group_info = rcu_dereference(sbi->s_group_info);
3131 kfree(group_info[idx]);
3132 group_info[idx] = NULL;
3135 exit_meta_group_info:
3137 } /* ext4_mb_add_groupinfo */
3139 static int ext4_mb_init_backend(struct super_block *sb)
3141 ext4_group_t ngroups = ext4_get_groups_count(sb);
3143 struct ext4_sb_info *sbi = EXT4_SB(sb);
3145 struct ext4_group_desc *desc;
3146 struct ext4_group_info ***group_info;
3147 struct kmem_cache *cachep;
3149 err = ext4_mb_alloc_groupinfo(sb, ngroups);
3153 sbi->s_buddy_cache = new_inode(sb);
3154 if (sbi->s_buddy_cache == NULL) {
3155 ext4_msg(sb, KERN_ERR, "can't get new inode");
3158 /* To avoid potentially colliding with an valid on-disk inode number,
3159 * use EXT4_BAD_INO for the buddy cache inode number. This inode is
3160 * not in the inode hash, so it should never be found by iget(), but
3161 * this will avoid confusion if it ever shows up during debugging. */
3162 sbi->s_buddy_cache->i_ino = EXT4_BAD_INO;
3163 EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
3164 for (i = 0; i < ngroups; i++) {
3166 desc = ext4_get_group_desc(sb, i, NULL);
3168 ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i);
3171 if (ext4_mb_add_groupinfo(sb, i, desc) != 0)
3175 if (ext4_has_feature_flex_bg(sb)) {
3176 /* a single flex group is supposed to be read by a single IO.
3177 * 2 ^ s_log_groups_per_flex != UINT_MAX as s_mb_prefetch is
3178 * unsigned integer, so the maximum shift is 32.
3180 if (sbi->s_es->s_log_groups_per_flex >= 32) {
3181 ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group");
3184 sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex,
3185 BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9));
3186 sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */
3188 sbi->s_mb_prefetch = 32;
3190 if (sbi->s_mb_prefetch > ext4_get_groups_count(sb))
3191 sbi->s_mb_prefetch = ext4_get_groups_count(sb);
3192 /* now many real IOs to prefetch within a single allocation at cr=0
3193 * given cr=0 is an CPU-related optimization we shouldn't try to
3194 * load too many groups, at some point we should start to use what
3195 * we've got in memory.
3196 * with an average random access time 5ms, it'd take a second to get
3197 * 200 groups (* N with flex_bg), so let's make this limit 4
3199 sbi->s_mb_prefetch_limit = sbi->s_mb_prefetch * 4;
3200 if (sbi->s_mb_prefetch_limit > ext4_get_groups_count(sb))
3201 sbi->s_mb_prefetch_limit = ext4_get_groups_count(sb);
3206 cachep = get_groupinfo_cache(sb->s_blocksize_bits);
3208 kmem_cache_free(cachep, ext4_get_group_info(sb, i));
3209 i = sbi->s_group_info_size;
3211 group_info = rcu_dereference(sbi->s_group_info);
3213 kfree(group_info[i]);
3215 iput(sbi->s_buddy_cache);
3218 kvfree(rcu_dereference(sbi->s_group_info));
3223 static void ext4_groupinfo_destroy_slabs(void)
3227 for (i = 0; i < NR_GRPINFO_CACHES; i++) {
3228 kmem_cache_destroy(ext4_groupinfo_caches[i]);
3229 ext4_groupinfo_caches[i] = NULL;
3233 static int ext4_groupinfo_create_slab(size_t size)
3235 static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex);
3237 int blocksize_bits = order_base_2(size);
3238 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
3239 struct kmem_cache *cachep;
3241 if (cache_index >= NR_GRPINFO_CACHES)
3244 if (unlikely(cache_index < 0))
3247 mutex_lock(&ext4_grpinfo_slab_create_mutex);
3248 if (ext4_groupinfo_caches[cache_index]) {
3249 mutex_unlock(&ext4_grpinfo_slab_create_mutex);
3250 return 0; /* Already created */
3253 slab_size = offsetof(struct ext4_group_info,
3254 bb_counters[blocksize_bits + 2]);
3256 cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index],
3257 slab_size, 0, SLAB_RECLAIM_ACCOUNT,
3260 ext4_groupinfo_caches[cache_index] = cachep;
3262 mutex_unlock(&ext4_grpinfo_slab_create_mutex);
3265 "EXT4-fs: no memory for groupinfo slab cache\n");
3272 static void ext4_discard_work(struct work_struct *work)
3274 struct ext4_sb_info *sbi = container_of(work,
3275 struct ext4_sb_info, s_discard_work);
3276 struct super_block *sb = sbi->s_sb;
3277 struct ext4_free_data *fd, *nfd;
3278 struct ext4_buddy e4b;
3279 struct list_head discard_list;
3280 ext4_group_t grp, load_grp;
3283 INIT_LIST_HEAD(&discard_list);
3284 spin_lock(&sbi->s_md_lock);
3285 list_splice_init(&sbi->s_discard_list, &discard_list);
3286 spin_unlock(&sbi->s_md_lock);
3288 load_grp = UINT_MAX;
3289 list_for_each_entry_safe(fd, nfd, &discard_list, efd_list) {
3291 * If filesystem is umounting or no memory or suffering
3292 * from no space, give up the discard
3294 if ((sb->s_flags & SB_ACTIVE) && !err &&
3295 !atomic_read(&sbi->s_retry_alloc_pending)) {
3296 grp = fd->efd_group;
3297 if (grp != load_grp) {
3298 if (load_grp != UINT_MAX)
3299 ext4_mb_unload_buddy(&e4b);
3301 err = ext4_mb_load_buddy(sb, grp, &e4b);
3303 kmem_cache_free(ext4_free_data_cachep, fd);
3304 load_grp = UINT_MAX;
3311 ext4_lock_group(sb, grp);
3312 ext4_try_to_trim_range(sb, &e4b, fd->efd_start_cluster,
3313 fd->efd_start_cluster + fd->efd_count - 1, 1);
3314 ext4_unlock_group(sb, grp);
3316 kmem_cache_free(ext4_free_data_cachep, fd);
3319 if (load_grp != UINT_MAX)
3320 ext4_mb_unload_buddy(&e4b);
3323 int ext4_mb_init(struct super_block *sb)
3325 struct ext4_sb_info *sbi = EXT4_SB(sb);
3327 unsigned offset, offset_incr;
3331 i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_offsets);
3333 sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
3334 if (sbi->s_mb_offsets == NULL) {
3339 i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_maxs);
3340 sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
3341 if (sbi->s_mb_maxs == NULL) {
3346 ret = ext4_groupinfo_create_slab(sb->s_blocksize);
3350 /* order 0 is regular bitmap */
3351 sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
3352 sbi->s_mb_offsets[0] = 0;
3356 offset_incr = 1 << (sb->s_blocksize_bits - 1);
3357 max = sb->s_blocksize << 2;
3359 sbi->s_mb_offsets[i] = offset;
3360 sbi->s_mb_maxs[i] = max;
3361 offset += offset_incr;
3362 offset_incr = offset_incr >> 1;
3365 } while (i < MB_NUM_ORDERS(sb));
3367 sbi->s_mb_avg_fragment_size =
3368 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head),
3370 if (!sbi->s_mb_avg_fragment_size) {
3374 sbi->s_mb_avg_fragment_size_locks =
3375 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t),
3377 if (!sbi->s_mb_avg_fragment_size_locks) {
3381 for (i = 0; i < MB_NUM_ORDERS(sb); i++) {
3382 INIT_LIST_HEAD(&sbi->s_mb_avg_fragment_size[i]);
3383 rwlock_init(&sbi->s_mb_avg_fragment_size_locks[i]);
3385 sbi->s_mb_largest_free_orders =
3386 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head),
3388 if (!sbi->s_mb_largest_free_orders) {
3392 sbi->s_mb_largest_free_orders_locks =
3393 kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t),
3395 if (!sbi->s_mb_largest_free_orders_locks) {
3399 for (i = 0; i < MB_NUM_ORDERS(sb); i++) {
3400 INIT_LIST_HEAD(&sbi->s_mb_largest_free_orders[i]);
3401 rwlock_init(&sbi->s_mb_largest_free_orders_locks[i]);
3404 spin_lock_init(&sbi->s_md_lock);
3405 sbi->s_mb_free_pending = 0;
3406 INIT_LIST_HEAD(&sbi->s_freed_data_list);
3407 INIT_LIST_HEAD(&sbi->s_discard_list);
3408 INIT_WORK(&sbi->s_discard_work, ext4_discard_work);
3409 atomic_set(&sbi->s_retry_alloc_pending, 0);
3411 sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
3412 sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
3413 sbi->s_mb_stats = MB_DEFAULT_STATS;
3414 sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
3415 sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
3416 sbi->s_mb_max_inode_prealloc = MB_DEFAULT_MAX_INODE_PREALLOC;
3418 * The default group preallocation is 512, which for 4k block
3419 * sizes translates to 2 megabytes. However for bigalloc file
3420 * systems, this is probably too big (i.e, if the cluster size
3421 * is 1 megabyte, then group preallocation size becomes half a
3422 * gigabyte!). As a default, we will keep a two megabyte
3423 * group pralloc size for cluster sizes up to 64k, and after
3424 * that, we will force a minimum group preallocation size of
3425 * 32 clusters. This translates to 8 megs when the cluster
3426 * size is 256k, and 32 megs when the cluster size is 1 meg,
3427 * which seems reasonable as a default.
3429 sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >>
3430 sbi->s_cluster_bits, 32);
3432 * If there is a s_stripe > 1, then we set the s_mb_group_prealloc
3433 * to the lowest multiple of s_stripe which is bigger than
3434 * the s_mb_group_prealloc as determined above. We want
3435 * the preallocation size to be an exact multiple of the
3436 * RAID stripe size so that preallocations don't fragment
3439 if (sbi->s_stripe > 1) {
3440 sbi->s_mb_group_prealloc = roundup(
3441 sbi->s_mb_group_prealloc, sbi->s_stripe);
3444 sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
3445 if (sbi->s_locality_groups == NULL) {
3449 for_each_possible_cpu(i) {
3450 struct ext4_locality_group *lg;
3451 lg = per_cpu_ptr(sbi->s_locality_groups, i);
3452 mutex_init(&lg->lg_mutex);
3453 for (j = 0; j < PREALLOC_TB_SIZE; j++)
3454 INIT_LIST_HEAD(&lg->lg_prealloc_list[j]);
3455 spin_lock_init(&lg->lg_prealloc_lock);
3458 if (bdev_nonrot(sb->s_bdev))
3459 sbi->s_mb_max_linear_groups = 0;
3461 sbi->s_mb_max_linear_groups = MB_DEFAULT_LINEAR_LIMIT;
3462 /* init file for buddy data */
3463 ret = ext4_mb_init_backend(sb);
3465 goto out_free_locality_groups;
3469 out_free_locality_groups:
3470 free_percpu(sbi->s_locality_groups);
3471 sbi->s_locality_groups = NULL;
3473 kfree(sbi->s_mb_avg_fragment_size);
3474 kfree(sbi->s_mb_avg_fragment_size_locks);
3475 kfree(sbi->s_mb_largest_free_orders);
3476 kfree(sbi->s_mb_largest_free_orders_locks);
3477 kfree(sbi->s_mb_offsets);
3478 sbi->s_mb_offsets = NULL;
3479 kfree(sbi->s_mb_maxs);
3480 sbi->s_mb_maxs = NULL;
3484 /* need to called with the ext4 group lock held */
3485 static int ext4_mb_cleanup_pa(struct ext4_group_info *grp)
3487 struct ext4_prealloc_space *pa;
3488 struct list_head *cur, *tmp;
3491 list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) {
3492 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
3493 list_del(&pa->pa_group_list);
3495 kmem_cache_free(ext4_pspace_cachep, pa);
3500 int ext4_mb_release(struct super_block *sb)
3502 ext4_group_t ngroups = ext4_get_groups_count(sb);
3504 int num_meta_group_infos;
3505 struct ext4_group_info *grinfo, ***group_info;
3506 struct ext4_sb_info *sbi = EXT4_SB(sb);
3507 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
3510 if (test_opt(sb, DISCARD)) {
3512 * wait the discard work to drain all of ext4_free_data
3514 flush_work(&sbi->s_discard_work);
3515 WARN_ON_ONCE(!list_empty(&sbi->s_discard_list));
3518 if (sbi->s_group_info) {
3519 for (i = 0; i < ngroups; i++) {
3521 grinfo = ext4_get_group_info(sb, i);
3522 mb_group_bb_bitmap_free(grinfo);
3523 ext4_lock_group(sb, i);
3524 count = ext4_mb_cleanup_pa(grinfo);
3526 mb_debug(sb, "mballoc: %d PAs left\n",
3528 ext4_unlock_group(sb, i);
3529 kmem_cache_free(cachep, grinfo);
3531 num_meta_group_infos = (ngroups +
3532 EXT4_DESC_PER_BLOCK(sb) - 1) >>
3533 EXT4_DESC_PER_BLOCK_BITS(sb);
3535 group_info = rcu_dereference(sbi->s_group_info);
3536 for (i = 0; i < num_meta_group_infos; i++)
3537 kfree(group_info[i]);
3541 kfree(sbi->s_mb_avg_fragment_size);
3542 kfree(sbi->s_mb_avg_fragment_size_locks);
3543 kfree(sbi->s_mb_largest_free_orders);
3544 kfree(sbi->s_mb_largest_free_orders_locks);
3545 kfree(sbi->s_mb_offsets);
3546 kfree(sbi->s_mb_maxs);
3547 iput(sbi->s_buddy_cache);
3548 if (sbi->s_mb_stats) {
3549 ext4_msg(sb, KERN_INFO,
3550 "mballoc: %u blocks %u reqs (%u success)",
3551 atomic_read(&sbi->s_bal_allocated),
3552 atomic_read(&sbi->s_bal_reqs),
3553 atomic_read(&sbi->s_bal_success));
3554 ext4_msg(sb, KERN_INFO,
3555 "mballoc: %u extents scanned, %u groups scanned, %u goal hits, "
3556 "%u 2^N hits, %u breaks, %u lost",
3557 atomic_read(&sbi->s_bal_ex_scanned),
3558 atomic_read(&sbi->s_bal_groups_scanned),
3559 atomic_read(&sbi->s_bal_goals),
3560 atomic_read(&sbi->s_bal_2orders),
3561 atomic_read(&sbi->s_bal_breaks),
3562 atomic_read(&sbi->s_mb_lost_chunks));
3563 ext4_msg(sb, KERN_INFO,
3564 "mballoc: %u generated and it took %llu",
3565 atomic_read(&sbi->s_mb_buddies_generated),
3566 atomic64_read(&sbi->s_mb_generation_time));
3567 ext4_msg(sb, KERN_INFO,
3568 "mballoc: %u preallocated, %u discarded",
3569 atomic_read(&sbi->s_mb_preallocated),
3570 atomic_read(&sbi->s_mb_discarded));
3573 free_percpu(sbi->s_locality_groups);
3578 static inline int ext4_issue_discard(struct super_block *sb,
3579 ext4_group_t block_group, ext4_grpblk_t cluster, int count,
3582 ext4_fsblk_t discard_block;
3584 discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) +
3585 ext4_group_first_block_no(sb, block_group));
3586 count = EXT4_C2B(EXT4_SB(sb), count);
3587 trace_ext4_discard_blocks(sb,
3588 (unsigned long long) discard_block, count);
3590 return __blkdev_issue_discard(sb->s_bdev,
3591 (sector_t)discard_block << (sb->s_blocksize_bits - 9),
3592 (sector_t)count << (sb->s_blocksize_bits - 9),
3595 return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
3598 static void ext4_free_data_in_buddy(struct super_block *sb,
3599 struct ext4_free_data *entry)
3601 struct ext4_buddy e4b;
3602 struct ext4_group_info *db;
3603 int err, count = 0, count2 = 0;
3605 mb_debug(sb, "gonna free %u blocks in group %u (0x%p):",
3606 entry->efd_count, entry->efd_group, entry);
3608 err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b);
3609 /* we expect to find existing buddy because it's pinned */
3612 spin_lock(&EXT4_SB(sb)->s_md_lock);
3613 EXT4_SB(sb)->s_mb_free_pending -= entry->efd_count;
3614 spin_unlock(&EXT4_SB(sb)->s_md_lock);
3617 /* there are blocks to put in buddy to make them really free */
3618 count += entry->efd_count;
3620 ext4_lock_group(sb, entry->efd_group);
3621 /* Take it out of per group rb tree */
3622 rb_erase(&entry->efd_node, &(db->bb_free_root));
3623 mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count);
3626 * Clear the trimmed flag for the group so that the next
3627 * ext4_trim_fs can trim it.
3628 * If the volume is mounted with -o discard, online discard
3629 * is supported and the free blocks will be trimmed online.
3631 if (!test_opt(sb, DISCARD))
3632 EXT4_MB_GRP_CLEAR_TRIMMED(db);
3634 if (!db->bb_free_root.rb_node) {
3635 /* No more items in the per group rb tree
3636 * balance refcounts from ext4_mb_free_metadata()
3638 put_page(e4b.bd_buddy_page);
3639 put_page(e4b.bd_bitmap_page);
3641 ext4_unlock_group(sb, entry->efd_group);
3642 ext4_mb_unload_buddy(&e4b);
3644 mb_debug(sb, "freed %d blocks in %d structures\n", count,
3649 * This function is called by the jbd2 layer once the commit has finished,
3650 * so we know we can free the blocks that were released with that commit.
3652 void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid)
3654 struct ext4_sb_info *sbi = EXT4_SB(sb);
3655 struct ext4_free_data *entry, *tmp;
3656 struct list_head freed_data_list;
3657 struct list_head *cut_pos = NULL;
3660 INIT_LIST_HEAD(&freed_data_list);
3662 spin_lock(&sbi->s_md_lock);
3663 list_for_each_entry(entry, &sbi->s_freed_data_list, efd_list) {
3664 if (entry->efd_tid != commit_tid)
3666 cut_pos = &entry->efd_list;
3669 list_cut_position(&freed_data_list, &sbi->s_freed_data_list,
3671 spin_unlock(&sbi->s_md_lock);
3673 list_for_each_entry(entry, &freed_data_list, efd_list)
3674 ext4_free_data_in_buddy(sb, entry);
3676 if (test_opt(sb, DISCARD)) {
3677 spin_lock(&sbi->s_md_lock);
3678 wake = list_empty(&sbi->s_discard_list);
3679 list_splice_tail(&freed_data_list, &sbi->s_discard_list);
3680 spin_unlock(&sbi->s_md_lock);
3682 queue_work(system_unbound_wq, &sbi->s_discard_work);
3684 list_for_each_entry_safe(entry, tmp, &freed_data_list, efd_list)
3685 kmem_cache_free(ext4_free_data_cachep, entry);
3689 int __init ext4_init_mballoc(void)
3691 ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space,
3692 SLAB_RECLAIM_ACCOUNT);
3693 if (ext4_pspace_cachep == NULL)
3696 ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context,
3697 SLAB_RECLAIM_ACCOUNT);
3698 if (ext4_ac_cachep == NULL)
3701 ext4_free_data_cachep = KMEM_CACHE(ext4_free_data,
3702 SLAB_RECLAIM_ACCOUNT);
3703 if (ext4_free_data_cachep == NULL)
3709 kmem_cache_destroy(ext4_ac_cachep);
3711 kmem_cache_destroy(ext4_pspace_cachep);
3716 void ext4_exit_mballoc(void)
3719 * Wait for completion of call_rcu()'s on ext4_pspace_cachep
3720 * before destroying the slab cache.
3723 kmem_cache_destroy(ext4_pspace_cachep);
3724 kmem_cache_destroy(ext4_ac_cachep);
3725 kmem_cache_destroy(ext4_free_data_cachep);
3726 ext4_groupinfo_destroy_slabs();
3731 * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps
3732 * Returns 0 if success or error code
3734 static noinline_for_stack int
3735 ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
3736 handle_t *handle, unsigned int reserv_clstrs)
3738 struct buffer_head *bitmap_bh = NULL;
3739 struct ext4_group_desc *gdp;
3740 struct buffer_head *gdp_bh;
3741 struct ext4_sb_info *sbi;
3742 struct super_block *sb;
3746 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3747 BUG_ON(ac->ac_b_ex.fe_len <= 0);
3752 bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group);
3753 if (IS_ERR(bitmap_bh)) {
3754 err = PTR_ERR(bitmap_bh);
3759 BUFFER_TRACE(bitmap_bh, "getting write access");
3760 err = ext4_journal_get_write_access(handle, sb, bitmap_bh,
3766 gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh);
3770 ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group,
3771 ext4_free_group_clusters(sb, gdp));
3773 BUFFER_TRACE(gdp_bh, "get_write_access");
3774 err = ext4_journal_get_write_access(handle, sb, gdp_bh, EXT4_JTR_NONE);
3778 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3780 len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
3781 if (!ext4_inode_block_valid(ac->ac_inode, block, len)) {
3782 ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
3783 "fs metadata", block, block+len);
3784 /* File system mounted not to panic on error
3785 * Fix the bitmap and return EFSCORRUPTED
3786 * We leak some of the blocks here.
3788 ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3789 mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
3790 ac->ac_b_ex.fe_len);
3791 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3792 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
3794 err = -EFSCORRUPTED;
3798 ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3799 #ifdef AGGRESSIVE_CHECK
3802 for (i = 0; i < ac->ac_b_ex.fe_len; i++) {
3803 BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i,
3804 bitmap_bh->b_data));
3808 mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
3809 ac->ac_b_ex.fe_len);
3810 if (ext4_has_group_desc_csum(sb) &&
3811 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
3812 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
3813 ext4_free_group_clusters_set(sb, gdp,
3814 ext4_free_clusters_after_init(sb,
3815 ac->ac_b_ex.fe_group, gdp));
3817 len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len;
3818 ext4_free_group_clusters_set(sb, gdp, len);
3819 ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh);
3820 ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp);
3822 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3823 percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len);
3825 * Now reduce the dirty block count also. Should not go negative
3827 if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
3828 /* release all the reserved blocks if non delalloc */
3829 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
3832 if (sbi->s_log_groups_per_flex) {
3833 ext4_group_t flex_group = ext4_flex_group(sbi,
3834 ac->ac_b_ex.fe_group);
3835 atomic64_sub(ac->ac_b_ex.fe_len,
3836 &sbi_array_rcu_deref(sbi, s_flex_groups,
3837 flex_group)->free_clusters);
3840 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
3843 err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh);
3851 * Idempotent helper for Ext4 fast commit replay path to set the state of
3852 * blocks in bitmaps and update counters.
3854 void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block,
3857 struct buffer_head *bitmap_bh = NULL;
3858 struct ext4_group_desc *gdp;
3859 struct buffer_head *gdp_bh;
3860 struct ext4_sb_info *sbi = EXT4_SB(sb);
3862 ext4_grpblk_t blkoff;
3865 unsigned int clen, clen_changed, thisgrp_len;
3868 ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
3871 * Check to see if we are freeing blocks across a group
3873 * In case of flex_bg, this can happen that (block, len) may
3874 * span across more than one group. In that case we need to
3875 * get the corresponding group metadata to work with.
3876 * For this we have goto again loop.
3878 thisgrp_len = min_t(unsigned int, (unsigned int)len,
3879 EXT4_BLOCKS_PER_GROUP(sb) - EXT4_C2B(sbi, blkoff));
3880 clen = EXT4_NUM_B2C(sbi, thisgrp_len);
3882 if (!ext4_sb_block_valid(sb, NULL, block, thisgrp_len)) {
3883 ext4_error(sb, "Marking blocks in system zone - "
3884 "Block = %llu, len = %u",
3885 block, thisgrp_len);
3890 bitmap_bh = ext4_read_block_bitmap(sb, group);
3891 if (IS_ERR(bitmap_bh)) {
3892 err = PTR_ERR(bitmap_bh);
3898 gdp = ext4_get_group_desc(sb, group, &gdp_bh);
3902 ext4_lock_group(sb, group);
3904 for (i = 0; i < clen; i++)
3905 if (!mb_test_bit(blkoff + i, bitmap_bh->b_data) ==
3909 clen_changed = clen - already;
3911 mb_set_bits(bitmap_bh->b_data, blkoff, clen);
3913 mb_clear_bits(bitmap_bh->b_data, blkoff, clen);
3914 if (ext4_has_group_desc_csum(sb) &&
3915 (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
3916 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
3917 ext4_free_group_clusters_set(sb, gdp,
3918 ext4_free_clusters_after_init(sb, group, gdp));
3921 clen = ext4_free_group_clusters(sb, gdp) - clen_changed;
3923 clen = ext4_free_group_clusters(sb, gdp) + clen_changed;
3925 ext4_free_group_clusters_set(sb, gdp, clen);
3926 ext4_block_bitmap_csum_set(sb, group, gdp, bitmap_bh);
3927 ext4_group_desc_csum_set(sb, group, gdp);
3929 ext4_unlock_group(sb, group);
3931 if (sbi->s_log_groups_per_flex) {
3932 ext4_group_t flex_group = ext4_flex_group(sbi, group);
3933 struct flex_groups *fg = sbi_array_rcu_deref(sbi,
3934 s_flex_groups, flex_group);
3937 atomic64_sub(clen_changed, &fg->free_clusters);
3939 atomic64_add(clen_changed, &fg->free_clusters);
3943 err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh);
3946 sync_dirty_buffer(bitmap_bh);
3947 err = ext4_handle_dirty_metadata(NULL, NULL, gdp_bh);
3948 sync_dirty_buffer(gdp_bh);
3952 block += thisgrp_len;
3963 * here we normalize request for locality group
3964 * Group request are normalized to s_mb_group_prealloc, which goes to
3965 * s_strip if we set the same via mount option.
3966 * s_mb_group_prealloc can be configured via
3967 * /sys/fs/ext4/<partition>/mb_group_prealloc
3969 * XXX: should we try to preallocate more than the group has now?
3971 static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
3973 struct super_block *sb = ac->ac_sb;
3974 struct ext4_locality_group *lg = ac->ac_lg;
3977 ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
3978 mb_debug(sb, "goal %u blocks for locality group\n", ac->ac_g_ex.fe_len);
3982 * Normalization means making request better in terms of
3983 * size and alignment
3985 static noinline_for_stack void
3986 ext4_mb_normalize_request(struct ext4_allocation_context *ac,
3987 struct ext4_allocation_request *ar)
3989 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3992 loff_t size, start_off;
3993 loff_t orig_size __maybe_unused;
3995 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
3996 struct ext4_prealloc_space *pa;
3998 /* do normalize only data requests, metadata requests
3999 do not need preallocation */
4000 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
4003 /* sometime caller may want exact blocks */
4004 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
4007 /* caller may indicate that preallocation isn't
4008 * required (it's a tail, for example) */
4009 if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC)
4012 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) {
4013 ext4_mb_normalize_group_request(ac);
4017 bsbits = ac->ac_sb->s_blocksize_bits;
4019 /* first, let's learn actual file size
4020 * given current request is allocated */
4021 size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
4022 size = size << bsbits;
4023 if (size < i_size_read(ac->ac_inode))
4024 size = i_size_read(ac->ac_inode);
4027 /* max size of free chunks */
4030 #define NRL_CHECK_SIZE(req, size, max, chunk_size) \
4031 (req <= (size) || max <= (chunk_size))
4033 /* first, try to predict filesize */
4034 /* XXX: should this table be tunable? */
4036 if (size <= 16 * 1024) {
4038 } else if (size <= 32 * 1024) {
4040 } else if (size <= 64 * 1024) {
4042 } else if (size <= 128 * 1024) {
4044 } else if (size <= 256 * 1024) {
4046 } else if (size <= 512 * 1024) {
4048 } else if (size <= 1024 * 1024) {
4050 } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
4051 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
4052 (21 - bsbits)) << 21;
4053 size = 2 * 1024 * 1024;
4054 } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
4055 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
4056 (22 - bsbits)) << 22;
4057 size = 4 * 1024 * 1024;
4058 } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
4059 (8<<20)>>bsbits, max, 8 * 1024)) {
4060 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
4061 (23 - bsbits)) << 23;
4062 size = 8 * 1024 * 1024;
4064 start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits;
4065 size = (loff_t) EXT4_C2B(EXT4_SB(ac->ac_sb),
4066 ac->ac_o_ex.fe_len) << bsbits;
4068 size = size >> bsbits;
4069 start = start_off >> bsbits;
4072 * For tiny groups (smaller than 8MB) the chosen allocation
4073 * alignment may be larger than group size. Make sure the
4074 * alignment does not move allocation to a different group which
4075 * makes mballoc fail assertions later.
4077 start = max(start, rounddown(ac->ac_o_ex.fe_logical,
4078 (ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb)));
4080 /* don't cover already allocated blocks in selected range */
4081 if (ar->pleft && start <= ar->lleft) {
4082 size -= ar->lleft + 1 - start;
4083 start = ar->lleft + 1;
4085 if (ar->pright && start + size - 1 >= ar->lright)
4086 size -= start + size - ar->lright;
4089 * Trim allocation request for filesystems with artificially small
4092 if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb))
4093 size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb);
4097 /* check we don't cross already preallocated blocks */
4099 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
4104 spin_lock(&pa->pa_lock);
4105 if (pa->pa_deleted) {
4106 spin_unlock(&pa->pa_lock);
4110 pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
4113 /* PA must not overlap original request */
4114 BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end ||
4115 ac->ac_o_ex.fe_logical < pa->pa_lstart));
4117 /* skip PAs this normalized request doesn't overlap with */
4118 if (pa->pa_lstart >= end || pa_end <= start) {
4119 spin_unlock(&pa->pa_lock);
4122 BUG_ON(pa->pa_lstart <= start && pa_end >= end);
4124 /* adjust start or end to be adjacent to this pa */
4125 if (pa_end <= ac->ac_o_ex.fe_logical) {
4126 BUG_ON(pa_end < start);
4128 } else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) {
4129 BUG_ON(pa->pa_lstart > end);
4130 end = pa->pa_lstart;
4132 spin_unlock(&pa->pa_lock);
4137 /* XXX: extra loop to check we really don't overlap preallocations */
4139 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
4142 spin_lock(&pa->pa_lock);
4143 if (pa->pa_deleted == 0) {
4144 pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
4146 BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
4148 spin_unlock(&pa->pa_lock);
4153 * In this function "start" and "size" are normalized for better
4154 * alignment and length such that we could preallocate more blocks.
4155 * This normalization is done such that original request of
4156 * ac->ac_o_ex.fe_logical & fe_len should always lie within "start" and
4157 * "size" boundaries.
4158 * (Note fe_len can be relaxed since FS block allocation API does not
4159 * provide gurantee on number of contiguous blocks allocation since that
4160 * depends upon free space left, etc).
4161 * In case of inode pa, later we use the allocated blocks
4162 * [pa_start + fe_logical - pa_lstart, fe_len/size] from the preallocated
4163 * range of goal/best blocks [start, size] to put it at the
4164 * ac_o_ex.fe_logical extent of this inode.
4165 * (See ext4_mb_use_inode_pa() for more details)
4167 if (start + size <= ac->ac_o_ex.fe_logical ||
4168 start > ac->ac_o_ex.fe_logical) {
4169 ext4_msg(ac->ac_sb, KERN_ERR,
4170 "start %lu, size %lu, fe_logical %lu",
4171 (unsigned long) start, (unsigned long) size,
4172 (unsigned long) ac->ac_o_ex.fe_logical);
4175 BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
4177 /* now prepare goal request */
4179 /* XXX: is it better to align blocks WRT to logical
4180 * placement or satisfy big request as is */
4181 ac->ac_g_ex.fe_logical = start;
4182 ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size);
4184 /* define goal start in order to merge */
4185 if (ar->pright && (ar->lright == (start + size))) {
4186 /* merge to the right */
4187 ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
4188 &ac->ac_f_ex.fe_group,
4189 &ac->ac_f_ex.fe_start);
4190 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
4192 if (ar->pleft && (ar->lleft + 1 == start)) {
4193 /* merge to the left */
4194 ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
4195 &ac->ac_f_ex.fe_group,
4196 &ac->ac_f_ex.fe_start);
4197 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
4200 mb_debug(ac->ac_sb, "goal: %lld(was %lld) blocks at %u\n", size,
4204 static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
4206 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4208 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len >= 1) {
4209 atomic_inc(&sbi->s_bal_reqs);
4210 atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
4211 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
4212 atomic_inc(&sbi->s_bal_success);
4213 atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
4214 atomic_add(ac->ac_groups_scanned, &sbi->s_bal_groups_scanned);
4215 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
4216 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
4217 atomic_inc(&sbi->s_bal_goals);
4218 if (ac->ac_found > sbi->s_mb_max_to_scan)
4219 atomic_inc(&sbi->s_bal_breaks);
4222 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
4223 trace_ext4_mballoc_alloc(ac);
4225 trace_ext4_mballoc_prealloc(ac);
4229 * Called on failure; free up any blocks from the inode PA for this
4230 * context. We don't need this for MB_GROUP_PA because we only change
4231 * pa_free in ext4_mb_release_context(), but on failure, we've already
4232 * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed.
4234 static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
4236 struct ext4_prealloc_space *pa = ac->ac_pa;
4237 struct ext4_buddy e4b;
4241 if (ac->ac_f_ex.fe_len == 0)
4243 err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b);
4246 * This should never happen since we pin the
4247 * pages in the ext4_allocation_context so
4248 * ext4_mb_load_buddy() should never fail.
4250 WARN(1, "mb_load_buddy failed (%d)", err);
4253 ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
4254 mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start,
4255 ac->ac_f_ex.fe_len);
4256 ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
4257 ext4_mb_unload_buddy(&e4b);
4260 if (pa->pa_type == MB_INODE_PA)
4261 pa->pa_free += ac->ac_b_ex.fe_len;
4265 * use blocks preallocated to inode
4267 static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
4268 struct ext4_prealloc_space *pa)
4270 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4275 /* found preallocated blocks, use them */
4276 start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
4277 end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len),
4278 start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len));
4279 len = EXT4_NUM_B2C(sbi, end - start);
4280 ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group,
4281 &ac->ac_b_ex.fe_start);
4282 ac->ac_b_ex.fe_len = len;
4283 ac->ac_status = AC_STATUS_FOUND;
4286 BUG_ON(start < pa->pa_pstart);
4287 BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len));
4288 BUG_ON(pa->pa_free < len);
4291 mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa);
4295 * use blocks preallocated to locality group
4297 static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
4298 struct ext4_prealloc_space *pa)
4300 unsigned int len = ac->ac_o_ex.fe_len;
4302 ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
4303 &ac->ac_b_ex.fe_group,
4304 &ac->ac_b_ex.fe_start);
4305 ac->ac_b_ex.fe_len = len;
4306 ac->ac_status = AC_STATUS_FOUND;
4309 /* we don't correct pa_pstart or pa_plen here to avoid
4310 * possible race when the group is being loaded concurrently
4311 * instead we correct pa later, after blocks are marked
4312 * in on-disk bitmap -- see ext4_mb_release_context()
4313 * Other CPUs are prevented from allocating from this pa by lg_mutex
4315 mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n",
4316 pa->pa_lstart-len, len, pa);
4320 * Return the prealloc space that have minimal distance
4321 * from the goal block. @cpa is the prealloc
4322 * space that is having currently known minimal distance
4323 * from the goal block.
4325 static struct ext4_prealloc_space *
4326 ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
4327 struct ext4_prealloc_space *pa,
4328 struct ext4_prealloc_space *cpa)
4330 ext4_fsblk_t cur_distance, new_distance;
4333 atomic_inc(&pa->pa_count);
4336 cur_distance = abs(goal_block - cpa->pa_pstart);
4337 new_distance = abs(goal_block - pa->pa_pstart);
4339 if (cur_distance <= new_distance)
4342 /* drop the previous reference */
4343 atomic_dec(&cpa->pa_count);
4344 atomic_inc(&pa->pa_count);
4349 * search goal blocks in preallocated space
4351 static noinline_for_stack bool
4352 ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
4354 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4356 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
4357 struct ext4_locality_group *lg;
4358 struct ext4_prealloc_space *pa, *cpa = NULL;
4359 ext4_fsblk_t goal_block;
4361 /* only data can be preallocated */
4362 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
4365 /* first, try per-file preallocation */
4367 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
4369 /* all fields in this condition don't change,
4370 * so we can skip locking for them */
4371 if (ac->ac_o_ex.fe_logical < pa->pa_lstart ||
4372 ac->ac_o_ex.fe_logical >= (pa->pa_lstart +
4373 EXT4_C2B(sbi, pa->pa_len)))
4376 /* non-extent files can't have physical blocks past 2^32 */
4377 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) &&
4378 (pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len) >
4379 EXT4_MAX_BLOCK_FILE_PHYS))
4382 /* found preallocated blocks, use them */
4383 spin_lock(&pa->pa_lock);
4384 if (pa->pa_deleted == 0 && pa->pa_free) {
4385 atomic_inc(&pa->pa_count);
4386 ext4_mb_use_inode_pa(ac, pa);
4387 spin_unlock(&pa->pa_lock);
4388 ac->ac_criteria = 10;
4392 spin_unlock(&pa->pa_lock);
4396 /* can we use group allocation? */
4397 if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC))
4400 /* inode may have no locality group for some reason */
4404 order = fls(ac->ac_o_ex.fe_len) - 1;
4405 if (order > PREALLOC_TB_SIZE - 1)
4406 /* The max size of hash table is PREALLOC_TB_SIZE */
4407 order = PREALLOC_TB_SIZE - 1;
4409 goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex);
4411 * search for the prealloc space that is having
4412 * minimal distance from the goal block.
4414 for (i = order; i < PREALLOC_TB_SIZE; i++) {
4416 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i],
4418 spin_lock(&pa->pa_lock);
4419 if (pa->pa_deleted == 0 &&
4420 pa->pa_free >= ac->ac_o_ex.fe_len) {
4422 cpa = ext4_mb_check_group_pa(goal_block,
4425 spin_unlock(&pa->pa_lock);
4430 ext4_mb_use_group_pa(ac, cpa);
4431 ac->ac_criteria = 20;
4438 * the function goes through all block freed in the group
4439 * but not yet committed and marks them used in in-core bitmap.
4440 * buddy must be generated from this bitmap
4441 * Need to be called with the ext4 group lock held
4443 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
4447 struct ext4_group_info *grp;
4448 struct ext4_free_data *entry;
4450 grp = ext4_get_group_info(sb, group);
4451 n = rb_first(&(grp->bb_free_root));
4454 entry = rb_entry(n, struct ext4_free_data, efd_node);
4455 mb_set_bits(bitmap, entry->efd_start_cluster, entry->efd_count);
4462 * the function goes through all preallocation in this group and marks them
4463 * used in in-core bitmap. buddy must be generated from this bitmap
4464 * Need to be called with ext4 group lock held
4466 static noinline_for_stack
4467 void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
4470 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
4471 struct ext4_prealloc_space *pa;
4472 struct list_head *cur;
4473 ext4_group_t groupnr;
4474 ext4_grpblk_t start;
4475 int preallocated = 0;
4478 /* all form of preallocation discards first load group,
4479 * so the only competing code is preallocation use.
4480 * we don't need any locking here
4481 * notice we do NOT ignore preallocations with pa_deleted
4482 * otherwise we could leave used blocks available for
4483 * allocation in buddy when concurrent ext4_mb_put_pa()
4484 * is dropping preallocation
4486 list_for_each(cur, &grp->bb_prealloc_list) {
4487 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
4488 spin_lock(&pa->pa_lock);
4489 ext4_get_group_no_and_offset(sb, pa->pa_pstart,
4492 spin_unlock(&pa->pa_lock);
4493 if (unlikely(len == 0))
4495 BUG_ON(groupnr != group);
4496 mb_set_bits(bitmap, start, len);
4497 preallocated += len;
4499 mb_debug(sb, "preallocated %d for group %u\n", preallocated, group);
4502 static void ext4_mb_mark_pa_deleted(struct super_block *sb,
4503 struct ext4_prealloc_space *pa)
4505 struct ext4_inode_info *ei;
4507 if (pa->pa_deleted) {
4508 ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n",
4509 pa->pa_type, pa->pa_pstart, pa->pa_lstart,
4516 if (pa->pa_type == MB_INODE_PA) {
4517 ei = EXT4_I(pa->pa_inode);
4518 atomic_dec(&ei->i_prealloc_active);
4522 static void ext4_mb_pa_callback(struct rcu_head *head)
4524 struct ext4_prealloc_space *pa;
4525 pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
4527 BUG_ON(atomic_read(&pa->pa_count));
4528 BUG_ON(pa->pa_deleted == 0);
4529 kmem_cache_free(ext4_pspace_cachep, pa);
4533 * drops a reference to preallocated space descriptor
4534 * if this was the last reference and the space is consumed
4536 static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
4537 struct super_block *sb, struct ext4_prealloc_space *pa)
4540 ext4_fsblk_t grp_blk;
4542 /* in this short window concurrent discard can set pa_deleted */
4543 spin_lock(&pa->pa_lock);
4544 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) {
4545 spin_unlock(&pa->pa_lock);
4549 if (pa->pa_deleted == 1) {
4550 spin_unlock(&pa->pa_lock);
4554 ext4_mb_mark_pa_deleted(sb, pa);
4555 spin_unlock(&pa->pa_lock);
4557 grp_blk = pa->pa_pstart;
4559 * If doing group-based preallocation, pa_pstart may be in the
4560 * next group when pa is used up
4562 if (pa->pa_type == MB_GROUP_PA)
4565 grp = ext4_get_group_number(sb, grp_blk);
4570 * P1 (buddy init) P2 (regular allocation)
4571 * find block B in PA
4572 * copy on-disk bitmap to buddy
4573 * mark B in on-disk bitmap
4574 * drop PA from group
4575 * mark all PAs in buddy
4577 * thus, P1 initializes buddy with B available. to prevent this
4578 * we make "copy" and "mark all PAs" atomic and serialize "drop PA"
4581 ext4_lock_group(sb, grp);
4582 list_del(&pa->pa_group_list);
4583 ext4_unlock_group(sb, grp);
4585 spin_lock(pa->pa_obj_lock);
4586 list_del_rcu(&pa->pa_inode_list);
4587 spin_unlock(pa->pa_obj_lock);
4589 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4593 * creates new preallocated space for given inode
4595 static noinline_for_stack void
4596 ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
4598 struct super_block *sb = ac->ac_sb;
4599 struct ext4_sb_info *sbi = EXT4_SB(sb);
4600 struct ext4_prealloc_space *pa;
4601 struct ext4_group_info *grp;
4602 struct ext4_inode_info *ei;
4604 /* preallocate only when found space is larger then requested */
4605 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
4606 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
4607 BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
4608 BUG_ON(ac->ac_pa == NULL);
4612 if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
4618 /* we can't allocate as much as normalizer wants.
4619 * so, found space must get proper lstart
4620 * to cover original request */
4621 BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
4622 BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
4624 /* we're limited by original request in that
4625 * logical block must be covered any way
4626 * winl is window we can move our chunk within */
4627 winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical;
4629 /* also, we should cover whole original request */
4630 wins = EXT4_C2B(sbi, ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len);
4632 /* the smallest one defines real window */
4633 win = min(winl, wins);
4635 offs = ac->ac_o_ex.fe_logical %
4636 EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
4637 if (offs && offs < win)
4640 ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical -
4641 EXT4_NUM_B2C(sbi, win);
4642 BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
4643 BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
4646 /* preallocation can change ac_b_ex, thus we store actually
4647 * allocated blocks for history */
4648 ac->ac_f_ex = ac->ac_b_ex;
4650 pa->pa_lstart = ac->ac_b_ex.fe_logical;
4651 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4652 pa->pa_len = ac->ac_b_ex.fe_len;
4653 pa->pa_free = pa->pa_len;
4654 spin_lock_init(&pa->pa_lock);
4655 INIT_LIST_HEAD(&pa->pa_inode_list);
4656 INIT_LIST_HEAD(&pa->pa_group_list);
4658 pa->pa_type = MB_INODE_PA;
4660 mb_debug(sb, "new inode pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
4661 pa->pa_len, pa->pa_lstart);
4662 trace_ext4_mb_new_inode_pa(ac, pa);
4664 ext4_mb_use_inode_pa(ac, pa);
4665 atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
4667 ei = EXT4_I(ac->ac_inode);
4668 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
4670 pa->pa_obj_lock = &ei->i_prealloc_lock;
4671 pa->pa_inode = ac->ac_inode;
4673 list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
4675 spin_lock(pa->pa_obj_lock);
4676 list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list);
4677 spin_unlock(pa->pa_obj_lock);
4678 atomic_inc(&ei->i_prealloc_active);
4682 * creates new preallocated space for locality group inodes belongs to
4684 static noinline_for_stack void
4685 ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
4687 struct super_block *sb = ac->ac_sb;
4688 struct ext4_locality_group *lg;
4689 struct ext4_prealloc_space *pa;
4690 struct ext4_group_info *grp;
4692 /* preallocate only when found space is larger then requested */
4693 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
4694 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
4695 BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
4696 BUG_ON(ac->ac_pa == NULL);
4700 /* preallocation can change ac_b_ex, thus we store actually
4701 * allocated blocks for history */
4702 ac->ac_f_ex = ac->ac_b_ex;
4704 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4705 pa->pa_lstart = pa->pa_pstart;
4706 pa->pa_len = ac->ac_b_ex.fe_len;
4707 pa->pa_free = pa->pa_len;
4708 spin_lock_init(&pa->pa_lock);
4709 INIT_LIST_HEAD(&pa->pa_inode_list);
4710 INIT_LIST_HEAD(&pa->pa_group_list);
4712 pa->pa_type = MB_GROUP_PA;
4714 mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
4715 pa->pa_len, pa->pa_lstart);
4716 trace_ext4_mb_new_group_pa(ac, pa);
4718 ext4_mb_use_group_pa(ac, pa);
4719 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
4721 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
4725 pa->pa_obj_lock = &lg->lg_prealloc_lock;
4726 pa->pa_inode = NULL;
4728 list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
4731 * We will later add the new pa to the right bucket
4732 * after updating the pa_free in ext4_mb_release_context
4736 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
4738 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
4739 ext4_mb_new_group_pa(ac);
4741 ext4_mb_new_inode_pa(ac);
4745 * finds all unused blocks in on-disk bitmap, frees them in
4746 * in-core bitmap and buddy.
4747 * @pa must be unlinked from inode and group lists, so that
4748 * nobody else can find/use it.
4749 * the caller MUST hold group/inode locks.
4750 * TODO: optimize the case when there are no in-core structures yet
4752 static noinline_for_stack int
4753 ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
4754 struct ext4_prealloc_space *pa)
4756 struct super_block *sb = e4b->bd_sb;
4757 struct ext4_sb_info *sbi = EXT4_SB(sb);
4762 unsigned long long grp_blk_start;
4765 BUG_ON(pa->pa_deleted == 0);
4766 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
4767 grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit);
4768 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
4769 end = bit + pa->pa_len;
4772 bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
4775 next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
4776 mb_debug(sb, "free preallocated %u/%u in group %u\n",
4777 (unsigned) ext4_group_first_block_no(sb, group) + bit,
4778 (unsigned) next - bit, (unsigned) group);
4781 trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit);
4782 trace_ext4_mb_release_inode_pa(pa, (grp_blk_start +
4783 EXT4_C2B(sbi, bit)),
4785 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
4788 if (free != pa->pa_free) {
4789 ext4_msg(e4b->bd_sb, KERN_CRIT,
4790 "pa %p: logic %lu, phys. %lu, len %d",
4791 pa, (unsigned long) pa->pa_lstart,
4792 (unsigned long) pa->pa_pstart,
4794 ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u",
4797 * pa is already deleted so we use the value obtained
4798 * from the bitmap and continue.
4801 atomic_add(free, &sbi->s_mb_discarded);
4806 static noinline_for_stack int
4807 ext4_mb_release_group_pa(struct ext4_buddy *e4b,
4808 struct ext4_prealloc_space *pa)
4810 struct super_block *sb = e4b->bd_sb;
4814 trace_ext4_mb_release_group_pa(sb, pa);
4815 BUG_ON(pa->pa_deleted == 0);
4816 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
4817 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
4818 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
4819 atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
4820 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
4826 * releases all preallocations in given group
4828 * first, we need to decide discard policy:
4829 * - when do we discard
4831 * - how many do we discard
4832 * 1) how many requested
4834 static noinline_for_stack int
4835 ext4_mb_discard_group_preallocations(struct super_block *sb,
4836 ext4_group_t group, int *busy)
4838 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
4839 struct buffer_head *bitmap_bh = NULL;
4840 struct ext4_prealloc_space *pa, *tmp;
4841 struct list_head list;
4842 struct ext4_buddy e4b;
4846 mb_debug(sb, "discard preallocation for group %u\n", group);
4847 if (list_empty(&grp->bb_prealloc_list))
4850 bitmap_bh = ext4_read_block_bitmap(sb, group);
4851 if (IS_ERR(bitmap_bh)) {
4852 err = PTR_ERR(bitmap_bh);
4853 ext4_error_err(sb, -err,
4854 "Error %d reading block bitmap for %u",
4859 err = ext4_mb_load_buddy(sb, group, &e4b);
4861 ext4_warning(sb, "Error %d loading buddy information for %u",
4867 INIT_LIST_HEAD(&list);
4868 ext4_lock_group(sb, group);
4869 list_for_each_entry_safe(pa, tmp,
4870 &grp->bb_prealloc_list, pa_group_list) {
4871 spin_lock(&pa->pa_lock);
4872 if (atomic_read(&pa->pa_count)) {
4873 spin_unlock(&pa->pa_lock);
4877 if (pa->pa_deleted) {
4878 spin_unlock(&pa->pa_lock);
4882 /* seems this one can be freed ... */
4883 ext4_mb_mark_pa_deleted(sb, pa);
4886 this_cpu_inc(discard_pa_seq);
4888 /* we can trust pa_free ... */
4889 free += pa->pa_free;
4891 spin_unlock(&pa->pa_lock);
4893 list_del(&pa->pa_group_list);
4894 list_add(&pa->u.pa_tmp_list, &list);
4897 /* now free all selected PAs */
4898 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
4900 /* remove from object (inode or locality group) */
4901 spin_lock(pa->pa_obj_lock);
4902 list_del_rcu(&pa->pa_inode_list);
4903 spin_unlock(pa->pa_obj_lock);
4905 if (pa->pa_type == MB_GROUP_PA)
4906 ext4_mb_release_group_pa(&e4b, pa);
4908 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
4910 list_del(&pa->u.pa_tmp_list);
4911 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4914 ext4_unlock_group(sb, group);
4915 ext4_mb_unload_buddy(&e4b);
4918 mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n",
4919 free, group, grp->bb_free);
4924 * releases all non-used preallocated blocks for given inode
4926 * It's important to discard preallocations under i_data_sem
4927 * We don't want another block to be served from the prealloc
4928 * space when we are discarding the inode prealloc space.
4930 * FIXME!! Make sure it is valid at all the call sites
4932 void ext4_discard_preallocations(struct inode *inode, unsigned int needed)
4934 struct ext4_inode_info *ei = EXT4_I(inode);
4935 struct super_block *sb = inode->i_sb;
4936 struct buffer_head *bitmap_bh = NULL;
4937 struct ext4_prealloc_space *pa, *tmp;
4938 ext4_group_t group = 0;
4939 struct list_head list;
4940 struct ext4_buddy e4b;
4943 if (!S_ISREG(inode->i_mode)) {
4944 /*BUG_ON(!list_empty(&ei->i_prealloc_list));*/
4948 if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY)
4951 mb_debug(sb, "discard preallocation for inode %lu\n",
4953 trace_ext4_discard_preallocations(inode,
4954 atomic_read(&ei->i_prealloc_active), needed);
4956 INIT_LIST_HEAD(&list);
4962 /* first, collect all pa's in the inode */
4963 spin_lock(&ei->i_prealloc_lock);
4964 while (!list_empty(&ei->i_prealloc_list) && needed) {
4965 pa = list_entry(ei->i_prealloc_list.prev,
4966 struct ext4_prealloc_space, pa_inode_list);
4967 BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock);
4968 spin_lock(&pa->pa_lock);
4969 if (atomic_read(&pa->pa_count)) {
4970 /* this shouldn't happen often - nobody should
4971 * use preallocation while we're discarding it */
4972 spin_unlock(&pa->pa_lock);
4973 spin_unlock(&ei->i_prealloc_lock);
4974 ext4_msg(sb, KERN_ERR,
4975 "uh-oh! used pa while discarding");
4977 schedule_timeout_uninterruptible(HZ);
4981 if (pa->pa_deleted == 0) {
4982 ext4_mb_mark_pa_deleted(sb, pa);
4983 spin_unlock(&pa->pa_lock);
4984 list_del_rcu(&pa->pa_inode_list);
4985 list_add(&pa->u.pa_tmp_list, &list);
4990 /* someone is deleting pa right now */
4991 spin_unlock(&pa->pa_lock);
4992 spin_unlock(&ei->i_prealloc_lock);
4994 /* we have to wait here because pa_deleted
4995 * doesn't mean pa is already unlinked from
4996 * the list. as we might be called from
4997 * ->clear_inode() the inode will get freed
4998 * and concurrent thread which is unlinking
4999 * pa from inode's list may access already
5000 * freed memory, bad-bad-bad */
5002 /* XXX: if this happens too often, we can
5003 * add a flag to force wait only in case
5004 * of ->clear_inode(), but not in case of
5005 * regular truncate */
5006 schedule_timeout_uninterruptible(HZ);
5009 spin_unlock(&ei->i_prealloc_lock);
5011 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
5012 BUG_ON(pa->pa_type != MB_INODE_PA);
5013 group = ext4_get_group_number(sb, pa->pa_pstart);
5015 err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
5016 GFP_NOFS|__GFP_NOFAIL);
5018 ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
5023 bitmap_bh = ext4_read_block_bitmap(sb, group);
5024 if (IS_ERR(bitmap_bh)) {
5025 err = PTR_ERR(bitmap_bh);
5026 ext4_error_err(sb, -err, "Error %d reading block bitmap for %u",
5028 ext4_mb_unload_buddy(&e4b);
5032 ext4_lock_group(sb, group);
5033 list_del(&pa->pa_group_list);
5034 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
5035 ext4_unlock_group(sb, group);
5037 ext4_mb_unload_buddy(&e4b);
5040 list_del(&pa->u.pa_tmp_list);
5041 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
5045 static int ext4_mb_pa_alloc(struct ext4_allocation_context *ac)
5047 struct ext4_prealloc_space *pa;
5049 BUG_ON(ext4_pspace_cachep == NULL);
5050 pa = kmem_cache_zalloc(ext4_pspace_cachep, GFP_NOFS);
5053 atomic_set(&pa->pa_count, 1);
5058 static void ext4_mb_pa_free(struct ext4_allocation_context *ac)
5060 struct ext4_prealloc_space *pa = ac->ac_pa;
5064 WARN_ON(!atomic_dec_and_test(&pa->pa_count));
5065 kmem_cache_free(ext4_pspace_cachep, pa);
5068 #ifdef CONFIG_EXT4_DEBUG
5069 static inline void ext4_mb_show_pa(struct super_block *sb)
5071 ext4_group_t i, ngroups;
5073 if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
5076 ngroups = ext4_get_groups_count(sb);
5077 mb_debug(sb, "groups: ");
5078 for (i = 0; i < ngroups; i++) {
5079 struct ext4_group_info *grp = ext4_get_group_info(sb, i);
5080 struct ext4_prealloc_space *pa;
5081 ext4_grpblk_t start;
5082 struct list_head *cur;
5083 ext4_lock_group(sb, i);
5084 list_for_each(cur, &grp->bb_prealloc_list) {
5085 pa = list_entry(cur, struct ext4_prealloc_space,
5087 spin_lock(&pa->pa_lock);
5088 ext4_get_group_no_and_offset(sb, pa->pa_pstart,
5090 spin_unlock(&pa->pa_lock);
5091 mb_debug(sb, "PA:%u:%d:%d\n", i, start,
5094 ext4_unlock_group(sb, i);
5095 mb_debug(sb, "%u: %d/%d\n", i, grp->bb_free,
5100 static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
5102 struct super_block *sb = ac->ac_sb;
5104 if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
5107 mb_debug(sb, "Can't allocate:"
5108 " Allocation context details:");
5109 mb_debug(sb, "status %u flags 0x%x",
5110 ac->ac_status, ac->ac_flags);
5111 mb_debug(sb, "orig %lu/%lu/%lu@%lu, "
5112 "goal %lu/%lu/%lu@%lu, "
5113 "best %lu/%lu/%lu@%lu cr %d",
5114 (unsigned long)ac->ac_o_ex.fe_group,
5115 (unsigned long)ac->ac_o_ex.fe_start,
5116 (unsigned long)ac->ac_o_ex.fe_len,
5117 (unsigned long)ac->ac_o_ex.fe_logical,
5118 (unsigned long)ac->ac_g_ex.fe_group,
5119 (unsigned long)ac->ac_g_ex.fe_start,
5120 (unsigned long)ac->ac_g_ex.fe_len,
5121 (unsigned long)ac->ac_g_ex.fe_logical,
5122 (unsigned long)ac->ac_b_ex.fe_group,
5123 (unsigned long)ac->ac_b_ex.fe_start,
5124 (unsigned long)ac->ac_b_ex.fe_len,
5125 (unsigned long)ac->ac_b_ex.fe_logical,
5126 (int)ac->ac_criteria);
5127 mb_debug(sb, "%u found", ac->ac_found);
5128 ext4_mb_show_pa(sb);
5131 static inline void ext4_mb_show_pa(struct super_block *sb)
5135 static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac)
5137 ext4_mb_show_pa(ac->ac_sb);
5143 * We use locality group preallocation for small size file. The size of the
5144 * file is determined by the current size or the resulting size after
5145 * allocation which ever is larger
5147 * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req
5149 static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
5151 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
5152 int bsbits = ac->ac_sb->s_blocksize_bits;
5154 bool inode_pa_eligible, group_pa_eligible;
5156 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
5159 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
5162 group_pa_eligible = sbi->s_mb_group_prealloc > 0;
5163 inode_pa_eligible = true;
5164 size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
5165 isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
5168 /* No point in using inode preallocation for closed files */
5169 if ((size == isize) && !ext4_fs_is_busy(sbi) &&
5170 !inode_is_open_for_write(ac->ac_inode))
5171 inode_pa_eligible = false;
5173 size = max(size, isize);
5174 /* Don't use group allocation for large files */
5175 if (size > sbi->s_mb_stream_request)
5176 group_pa_eligible = false;
5178 if (!group_pa_eligible) {
5179 if (inode_pa_eligible)
5180 ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
5182 ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
5186 BUG_ON(ac->ac_lg != NULL);
5188 * locality group prealloc space are per cpu. The reason for having
5189 * per cpu locality group is to reduce the contention between block
5190 * request from multiple CPUs.
5192 ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups);
5194 /* we're going to use group allocation */
5195 ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
5197 /* serialize all allocations in the group */
5198 mutex_lock(&ac->ac_lg->lg_mutex);
5201 static noinline_for_stack int
5202 ext4_mb_initialize_context(struct ext4_allocation_context *ac,
5203 struct ext4_allocation_request *ar)
5205 struct super_block *sb = ar->inode->i_sb;
5206 struct ext4_sb_info *sbi = EXT4_SB(sb);
5207 struct ext4_super_block *es = sbi->s_es;
5211 ext4_grpblk_t block;
5213 /* we can't allocate > group size */
5216 /* just a dirty hack to filter too big requests */
5217 if (len >= EXT4_CLUSTERS_PER_GROUP(sb))
5218 len = EXT4_CLUSTERS_PER_GROUP(sb);
5220 /* start searching from the goal */
5222 if (goal < le32_to_cpu(es->s_first_data_block) ||
5223 goal >= ext4_blocks_count(es))
5224 goal = le32_to_cpu(es->s_first_data_block);
5225 ext4_get_group_no_and_offset(sb, goal, &group, &block);
5227 /* set up allocation goals */
5228 ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical);
5229 ac->ac_status = AC_STATUS_CONTINUE;
5231 ac->ac_inode = ar->inode;
5232 ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical;
5233 ac->ac_o_ex.fe_group = group;
5234 ac->ac_o_ex.fe_start = block;
5235 ac->ac_o_ex.fe_len = len;
5236 ac->ac_g_ex = ac->ac_o_ex;
5237 ac->ac_flags = ar->flags;
5239 /* we have to define context: we'll work with a file or
5240 * locality group. this is a policy, actually */
5241 ext4_mb_group_or_file(ac);
5243 mb_debug(sb, "init ac: %u blocks @ %u, goal %u, flags 0x%x, 2^%d, "
5244 "left: %u/%u, right %u/%u to %swritable\n",
5245 (unsigned) ar->len, (unsigned) ar->logical,
5246 (unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
5247 (unsigned) ar->lleft, (unsigned) ar->pleft,
5248 (unsigned) ar->lright, (unsigned) ar->pright,
5249 inode_is_open_for_write(ar->inode) ? "" : "non-");
5254 static noinline_for_stack void
5255 ext4_mb_discard_lg_preallocations(struct super_block *sb,
5256 struct ext4_locality_group *lg,
5257 int order, int total_entries)
5259 ext4_group_t group = 0;
5260 struct ext4_buddy e4b;
5261 struct list_head discard_list;
5262 struct ext4_prealloc_space *pa, *tmp;
5264 mb_debug(sb, "discard locality group preallocation\n");
5266 INIT_LIST_HEAD(&discard_list);
5268 spin_lock(&lg->lg_prealloc_lock);
5269 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
5271 lockdep_is_held(&lg->lg_prealloc_lock)) {
5272 spin_lock(&pa->pa_lock);
5273 if (atomic_read(&pa->pa_count)) {
5275 * This is the pa that we just used
5276 * for block allocation. So don't
5279 spin_unlock(&pa->pa_lock);
5282 if (pa->pa_deleted) {
5283 spin_unlock(&pa->pa_lock);
5286 /* only lg prealloc space */
5287 BUG_ON(pa->pa_type != MB_GROUP_PA);
5289 /* seems this one can be freed ... */
5290 ext4_mb_mark_pa_deleted(sb, pa);
5291 spin_unlock(&pa->pa_lock);
5293 list_del_rcu(&pa->pa_inode_list);
5294 list_add(&pa->u.pa_tmp_list, &discard_list);
5297 if (total_entries <= 5) {
5299 * we want to keep only 5 entries
5300 * allowing it to grow to 8. This
5301 * mak sure we don't call discard
5302 * soon for this list.
5307 spin_unlock(&lg->lg_prealloc_lock);
5309 list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
5312 group = ext4_get_group_number(sb, pa->pa_pstart);
5313 err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
5314 GFP_NOFS|__GFP_NOFAIL);
5316 ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
5320 ext4_lock_group(sb, group);
5321 list_del(&pa->pa_group_list);
5322 ext4_mb_release_group_pa(&e4b, pa);
5323 ext4_unlock_group(sb, group);
5325 ext4_mb_unload_buddy(&e4b);
5326 list_del(&pa->u.pa_tmp_list);
5327 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
5332 * We have incremented pa_count. So it cannot be freed at this
5333 * point. Also we hold lg_mutex. So no parallel allocation is
5334 * possible from this lg. That means pa_free cannot be updated.
5336 * A parallel ext4_mb_discard_group_preallocations is possible.
5337 * which can cause the lg_prealloc_list to be updated.
5340 static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
5342 int order, added = 0, lg_prealloc_count = 1;
5343 struct super_block *sb = ac->ac_sb;
5344 struct ext4_locality_group *lg = ac->ac_lg;
5345 struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa;
5347 order = fls(pa->pa_free) - 1;
5348 if (order > PREALLOC_TB_SIZE - 1)
5349 /* The max size of hash table is PREALLOC_TB_SIZE */
5350 order = PREALLOC_TB_SIZE - 1;
5351 /* Add the prealloc space to lg */
5352 spin_lock(&lg->lg_prealloc_lock);
5353 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
5355 lockdep_is_held(&lg->lg_prealloc_lock)) {
5356 spin_lock(&tmp_pa->pa_lock);
5357 if (tmp_pa->pa_deleted) {
5358 spin_unlock(&tmp_pa->pa_lock);
5361 if (!added && pa->pa_free < tmp_pa->pa_free) {
5362 /* Add to the tail of the previous entry */
5363 list_add_tail_rcu(&pa->pa_inode_list,
5364 &tmp_pa->pa_inode_list);
5367 * we want to count the total
5368 * number of entries in the list
5371 spin_unlock(&tmp_pa->pa_lock);
5372 lg_prealloc_count++;
5375 list_add_tail_rcu(&pa->pa_inode_list,
5376 &lg->lg_prealloc_list[order]);
5377 spin_unlock(&lg->lg_prealloc_lock);
5379 /* Now trim the list to be not more than 8 elements */
5380 if (lg_prealloc_count > 8) {
5381 ext4_mb_discard_lg_preallocations(sb, lg,
5382 order, lg_prealloc_count);
5389 * if per-inode prealloc list is too long, trim some PA
5391 static void ext4_mb_trim_inode_pa(struct inode *inode)
5393 struct ext4_inode_info *ei = EXT4_I(inode);
5394 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5397 count = atomic_read(&ei->i_prealloc_active);
5398 delta = (sbi->s_mb_max_inode_prealloc >> 2) + 1;
5399 if (count > sbi->s_mb_max_inode_prealloc + delta) {
5400 count -= sbi->s_mb_max_inode_prealloc;
5401 ext4_discard_preallocations(inode, count);
5406 * release all resource we used in allocation
5408 static int ext4_mb_release_context(struct ext4_allocation_context *ac)
5410 struct inode *inode = ac->ac_inode;
5411 struct ext4_inode_info *ei = EXT4_I(inode);
5412 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
5413 struct ext4_prealloc_space *pa = ac->ac_pa;
5415 if (pa->pa_type == MB_GROUP_PA) {
5416 /* see comment in ext4_mb_use_group_pa() */
5417 spin_lock(&pa->pa_lock);
5418 pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
5419 pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
5420 pa->pa_free -= ac->ac_b_ex.fe_len;
5421 pa->pa_len -= ac->ac_b_ex.fe_len;
5422 spin_unlock(&pa->pa_lock);
5425 * We want to add the pa to the right bucket.
5426 * Remove it from the list and while adding
5427 * make sure the list to which we are adding
5430 if (likely(pa->pa_free)) {
5431 spin_lock(pa->pa_obj_lock);
5432 list_del_rcu(&pa->pa_inode_list);
5433 spin_unlock(pa->pa_obj_lock);
5434 ext4_mb_add_n_trim(ac);
5438 if (pa->pa_type == MB_INODE_PA) {
5440 * treat per-inode prealloc list as a lru list, then try
5441 * to trim the least recently used PA.
5443 spin_lock(pa->pa_obj_lock);
5444 list_move(&pa->pa_inode_list, &ei->i_prealloc_list);
5445 spin_unlock(pa->pa_obj_lock);
5448 ext4_mb_put_pa(ac, ac->ac_sb, pa);
5450 if (ac->ac_bitmap_page)
5451 put_page(ac->ac_bitmap_page);
5452 if (ac->ac_buddy_page)
5453 put_page(ac->ac_buddy_page);
5454 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
5455 mutex_unlock(&ac->ac_lg->lg_mutex);
5456 ext4_mb_collect_stats(ac);
5457 ext4_mb_trim_inode_pa(inode);
5461 static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
5463 ext4_group_t i, ngroups = ext4_get_groups_count(sb);
5465 int freed = 0, busy = 0;
5468 trace_ext4_mb_discard_preallocations(sb, needed);
5471 needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1;
5473 for (i = 0; i < ngroups && needed > 0; i++) {
5474 ret = ext4_mb_discard_group_preallocations(sb, i, &busy);
5480 if (needed > 0 && busy && ++retry < 3) {
5488 static bool ext4_mb_discard_preallocations_should_retry(struct super_block *sb,
5489 struct ext4_allocation_context *ac, u64 *seq)
5495 freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len);
5500 seq_retry = ext4_get_discard_pa_seq_sum();
5501 if (!(ac->ac_flags & EXT4_MB_STRICT_CHECK) || seq_retry != *seq) {
5502 ac->ac_flags |= EXT4_MB_STRICT_CHECK;
5508 mb_debug(sb, "freed %d, retry ? %s\n", freed, ret ? "yes" : "no");
5512 static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle,
5513 struct ext4_allocation_request *ar, int *errp);
5516 * Main entry point into mballoc to allocate blocks
5517 * it tries to use preallocation first, then falls back
5518 * to usual allocation
5520 ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
5521 struct ext4_allocation_request *ar, int *errp)
5523 struct ext4_allocation_context *ac = NULL;
5524 struct ext4_sb_info *sbi;
5525 struct super_block *sb;
5526 ext4_fsblk_t block = 0;
5527 unsigned int inquota = 0;
5528 unsigned int reserv_clstrs = 0;
5533 sb = ar->inode->i_sb;
5536 trace_ext4_request_blocks(ar);
5537 if (sbi->s_mount_state & EXT4_FC_REPLAY)
5538 return ext4_mb_new_blocks_simple(handle, ar, errp);
5540 /* Allow to use superuser reservation for quota file */
5541 if (ext4_is_quota_file(ar->inode))
5542 ar->flags |= EXT4_MB_USE_ROOT_BLOCKS;
5544 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) {
5545 /* Without delayed allocation we need to verify
5546 * there is enough free blocks to do block allocation
5547 * and verify allocation doesn't exceed the quota limits.
5550 ext4_claim_free_clusters(sbi, ar->len, ar->flags)) {
5552 /* let others to free the space */
5554 ar->len = ar->len >> 1;
5557 ext4_mb_show_pa(sb);
5561 reserv_clstrs = ar->len;
5562 if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) {
5563 dquot_alloc_block_nofail(ar->inode,
5564 EXT4_C2B(sbi, ar->len));
5567 dquot_alloc_block(ar->inode,
5568 EXT4_C2B(sbi, ar->len))) {
5570 ar->flags |= EXT4_MB_HINT_NOPREALLOC;
5581 ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS);
5588 *errp = ext4_mb_initialize_context(ac, ar);
5594 ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
5595 seq = this_cpu_read(discard_pa_seq);
5596 if (!ext4_mb_use_preallocated(ac)) {
5597 ac->ac_op = EXT4_MB_HISTORY_ALLOC;
5598 ext4_mb_normalize_request(ac, ar);
5600 *errp = ext4_mb_pa_alloc(ac);
5604 /* allocate space in core */
5605 *errp = ext4_mb_regular_allocator(ac);
5607 * pa allocated above is added to grp->bb_prealloc_list only
5608 * when we were able to allocate some block i.e. when
5609 * ac->ac_status == AC_STATUS_FOUND.
5610 * And error from above mean ac->ac_status != AC_STATUS_FOUND
5611 * So we have to free this pa here itself.
5614 ext4_mb_pa_free(ac);
5615 ext4_discard_allocated_blocks(ac);
5618 if (ac->ac_status == AC_STATUS_FOUND &&
5619 ac->ac_o_ex.fe_len >= ac->ac_f_ex.fe_len)
5620 ext4_mb_pa_free(ac);
5622 if (likely(ac->ac_status == AC_STATUS_FOUND)) {
5623 *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs);
5625 ext4_discard_allocated_blocks(ac);
5628 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
5629 ar->len = ac->ac_b_ex.fe_len;
5632 if (++retries < 3 &&
5633 ext4_mb_discard_preallocations_should_retry(sb, ac, &seq))
5636 * If block allocation fails then the pa allocated above
5637 * needs to be freed here itself.
5639 ext4_mb_pa_free(ac);
5645 ac->ac_b_ex.fe_len = 0;
5647 ext4_mb_show_ac(ac);
5649 ext4_mb_release_context(ac);
5652 kmem_cache_free(ext4_ac_cachep, ac);
5653 if (inquota && ar->len < inquota)
5654 dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len));
5656 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0)
5657 /* release all the reserved blocks if non delalloc */
5658 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
5662 trace_ext4_allocate_blocks(ar, (unsigned long long)block);
5668 * We can merge two free data extents only if the physical blocks
5669 * are contiguous, AND the extents were freed by the same transaction,
5670 * AND the blocks are associated with the same group.
5672 static void ext4_try_merge_freed_extent(struct ext4_sb_info *sbi,
5673 struct ext4_free_data *entry,
5674 struct ext4_free_data *new_entry,
5675 struct rb_root *entry_rb_root)
5677 if ((entry->efd_tid != new_entry->efd_tid) ||
5678 (entry->efd_group != new_entry->efd_group))
5680 if (entry->efd_start_cluster + entry->efd_count ==
5681 new_entry->efd_start_cluster) {
5682 new_entry->efd_start_cluster = entry->efd_start_cluster;
5683 new_entry->efd_count += entry->efd_count;
5684 } else if (new_entry->efd_start_cluster + new_entry->efd_count ==
5685 entry->efd_start_cluster) {
5686 new_entry->efd_count += entry->efd_count;
5689 spin_lock(&sbi->s_md_lock);
5690 list_del(&entry->efd_list);
5691 spin_unlock(&sbi->s_md_lock);
5692 rb_erase(&entry->efd_node, entry_rb_root);
5693 kmem_cache_free(ext4_free_data_cachep, entry);
5696 static noinline_for_stack int
5697 ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
5698 struct ext4_free_data *new_entry)
5700 ext4_group_t group = e4b->bd_group;
5701 ext4_grpblk_t cluster;
5702 ext4_grpblk_t clusters = new_entry->efd_count;
5703 struct ext4_free_data *entry;
5704 struct ext4_group_info *db = e4b->bd_info;
5705 struct super_block *sb = e4b->bd_sb;
5706 struct ext4_sb_info *sbi = EXT4_SB(sb);
5707 struct rb_node **n = &db->bb_free_root.rb_node, *node;
5708 struct rb_node *parent = NULL, *new_node;
5710 BUG_ON(!ext4_handle_valid(handle));
5711 BUG_ON(e4b->bd_bitmap_page == NULL);
5712 BUG_ON(e4b->bd_buddy_page == NULL);
5714 new_node = &new_entry->efd_node;
5715 cluster = new_entry->efd_start_cluster;
5718 /* first free block exent. We need to
5719 protect buddy cache from being freed,
5720 * otherwise we'll refresh it from
5721 * on-disk bitmap and lose not-yet-available
5723 get_page(e4b->bd_buddy_page);
5724 get_page(e4b->bd_bitmap_page);
5728 entry = rb_entry(parent, struct ext4_free_data, efd_node);
5729 if (cluster < entry->efd_start_cluster)
5731 else if (cluster >= (entry->efd_start_cluster + entry->efd_count))
5732 n = &(*n)->rb_right;
5734 ext4_grp_locked_error(sb, group, 0,
5735 ext4_group_first_block_no(sb, group) +
5736 EXT4_C2B(sbi, cluster),
5737 "Block already on to-be-freed list");
5738 kmem_cache_free(ext4_free_data_cachep, new_entry);
5743 rb_link_node(new_node, parent, n);
5744 rb_insert_color(new_node, &db->bb_free_root);
5746 /* Now try to see the extent can be merged to left and right */
5747 node = rb_prev(new_node);
5749 entry = rb_entry(node, struct ext4_free_data, efd_node);
5750 ext4_try_merge_freed_extent(sbi, entry, new_entry,
5751 &(db->bb_free_root));
5754 node = rb_next(new_node);
5756 entry = rb_entry(node, struct ext4_free_data, efd_node);
5757 ext4_try_merge_freed_extent(sbi, entry, new_entry,
5758 &(db->bb_free_root));
5761 spin_lock(&sbi->s_md_lock);
5762 list_add_tail(&new_entry->efd_list, &sbi->s_freed_data_list);
5763 sbi->s_mb_free_pending += clusters;
5764 spin_unlock(&sbi->s_md_lock);
5769 * Simple allocator for Ext4 fast commit replay path. It searches for blocks
5770 * linearly starting at the goal block and also excludes the blocks which
5771 * are going to be in use after fast commit replay.
5773 static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle,
5774 struct ext4_allocation_request *ar, int *errp)
5776 struct buffer_head *bitmap_bh;
5777 struct super_block *sb = ar->inode->i_sb;
5779 ext4_grpblk_t blkoff;
5780 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
5781 ext4_grpblk_t i = 0;
5782 ext4_fsblk_t goal, block;
5783 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
5786 if (goal < le32_to_cpu(es->s_first_data_block) ||
5787 goal >= ext4_blocks_count(es))
5788 goal = le32_to_cpu(es->s_first_data_block);
5791 ext4_get_group_no_and_offset(sb, goal, &group, &blkoff);
5792 for (; group < ext4_get_groups_count(sb); group++) {
5793 bitmap_bh = ext4_read_block_bitmap(sb, group);
5794 if (IS_ERR(bitmap_bh)) {
5795 *errp = PTR_ERR(bitmap_bh);
5796 pr_warn("Failed to read block bitmap\n");
5800 ext4_get_group_no_and_offset(sb,
5801 max(ext4_group_first_block_no(sb, group), goal),
5804 i = mb_find_next_zero_bit(bitmap_bh->b_data, max,
5808 if (ext4_fc_replay_check_excluded(sb,
5809 ext4_group_first_block_no(sb, group) + i)) {
5819 if (group >= ext4_get_groups_count(sb) || i >= max) {
5824 block = ext4_group_first_block_no(sb, group) + i;
5825 ext4_mb_mark_bb(sb, block, 1, 1);
5831 static void ext4_free_blocks_simple(struct inode *inode, ext4_fsblk_t block,
5832 unsigned long count)
5834 struct buffer_head *bitmap_bh;
5835 struct super_block *sb = inode->i_sb;
5836 struct ext4_group_desc *gdp;
5837 struct buffer_head *gdp_bh;
5839 ext4_grpblk_t blkoff;
5840 int already_freed = 0, err, i;
5842 ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
5843 bitmap_bh = ext4_read_block_bitmap(sb, group);
5844 if (IS_ERR(bitmap_bh)) {
5845 err = PTR_ERR(bitmap_bh);
5846 pr_warn("Failed to read block bitmap\n");
5849 gdp = ext4_get_group_desc(sb, group, &gdp_bh);
5853 for (i = 0; i < count; i++) {
5854 if (!mb_test_bit(blkoff + i, bitmap_bh->b_data))
5857 mb_clear_bits(bitmap_bh->b_data, blkoff, count);
5858 err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh);
5861 ext4_free_group_clusters_set(
5862 sb, gdp, ext4_free_group_clusters(sb, gdp) +
5863 count - already_freed);
5864 ext4_block_bitmap_csum_set(sb, group, gdp, bitmap_bh);
5865 ext4_group_desc_csum_set(sb, group, gdp);
5866 ext4_handle_dirty_metadata(NULL, NULL, gdp_bh);
5867 sync_dirty_buffer(bitmap_bh);
5868 sync_dirty_buffer(gdp_bh);
5873 * ext4_mb_clear_bb() -- helper function for freeing blocks.
5874 * Used by ext4_free_blocks()
5875 * @handle: handle for this transaction
5877 * @block: starting physical block to be freed
5878 * @count: number of blocks to be freed
5879 * @flags: flags used by ext4_free_blocks
5881 static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode,
5882 ext4_fsblk_t block, unsigned long count,
5885 struct buffer_head *bitmap_bh = NULL;
5886 struct super_block *sb = inode->i_sb;
5887 struct ext4_group_desc *gdp;
5888 unsigned int overflow;
5890 struct buffer_head *gd_bh;
5891 ext4_group_t block_group;
5892 struct ext4_sb_info *sbi;
5893 struct ext4_buddy e4b;
5894 unsigned int count_clusters;
5900 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
5901 !ext4_inode_block_valid(inode, block, count)) {
5902 ext4_error(sb, "Freeing blocks in system zone - "
5903 "Block = %llu, count = %lu", block, count);
5904 /* err = 0. ext4_std_error should be a no op */
5907 flags |= EXT4_FREE_BLOCKS_VALIDATED;
5911 ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
5913 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(
5914 ext4_get_group_info(sb, block_group))))
5918 * Check to see if we are freeing blocks across a group
5921 if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) {
5922 overflow = EXT4_C2B(sbi, bit) + count -
5923 EXT4_BLOCKS_PER_GROUP(sb);
5925 /* The range changed so it's no longer validated */
5926 flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
5928 count_clusters = EXT4_NUM_B2C(sbi, count);
5929 bitmap_bh = ext4_read_block_bitmap(sb, block_group);
5930 if (IS_ERR(bitmap_bh)) {
5931 err = PTR_ERR(bitmap_bh);
5935 gdp = ext4_get_group_desc(sb, block_group, &gd_bh);
5941 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
5942 !ext4_inode_block_valid(inode, block, count)) {
5943 ext4_error(sb, "Freeing blocks in system zone - "
5944 "Block = %llu, count = %lu", block, count);
5945 /* err = 0. ext4_std_error should be a no op */
5949 BUFFER_TRACE(bitmap_bh, "getting write access");
5950 err = ext4_journal_get_write_access(handle, sb, bitmap_bh,
5956 * We are about to modify some metadata. Call the journal APIs
5957 * to unshare ->b_data if a currently-committing transaction is
5960 BUFFER_TRACE(gd_bh, "get_write_access");
5961 err = ext4_journal_get_write_access(handle, sb, gd_bh, EXT4_JTR_NONE);
5964 #ifdef AGGRESSIVE_CHECK
5967 for (i = 0; i < count_clusters; i++)
5968 BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
5971 trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters);
5973 /* __GFP_NOFAIL: retry infinitely, ignore TIF_MEMDIE and memcg limit. */
5974 err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b,
5975 GFP_NOFS|__GFP_NOFAIL);
5980 * We need to make sure we don't reuse the freed block until after the
5981 * transaction is committed. We make an exception if the inode is to be
5982 * written in writeback mode since writeback mode has weak data
5983 * consistency guarantees.
5985 if (ext4_handle_valid(handle) &&
5986 ((flags & EXT4_FREE_BLOCKS_METADATA) ||
5987 !ext4_should_writeback_data(inode))) {
5988 struct ext4_free_data *new_entry;
5990 * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed
5993 new_entry = kmem_cache_alloc(ext4_free_data_cachep,
5994 GFP_NOFS|__GFP_NOFAIL);
5995 new_entry->efd_start_cluster = bit;
5996 new_entry->efd_group = block_group;
5997 new_entry->efd_count = count_clusters;
5998 new_entry->efd_tid = handle->h_transaction->t_tid;
6000 ext4_lock_group(sb, block_group);
6001 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
6002 ext4_mb_free_metadata(handle, &e4b, new_entry);
6004 /* need to update group_info->bb_free and bitmap
6005 * with group lock held. generate_buddy look at
6006 * them with group lock_held
6008 if (test_opt(sb, DISCARD)) {
6009 err = ext4_issue_discard(sb, block_group, bit, count,
6011 if (err && err != -EOPNOTSUPP)
6012 ext4_msg(sb, KERN_WARNING, "discard request in"
6013 " group:%u block:%d count:%lu failed"
6014 " with %d", block_group, bit, count,
6017 EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info);
6019 ext4_lock_group(sb, block_group);
6020 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
6021 mb_free_blocks(inode, &e4b, bit, count_clusters);
6024 ret = ext4_free_group_clusters(sb, gdp) + count_clusters;
6025 ext4_free_group_clusters_set(sb, gdp, ret);
6026 ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh);
6027 ext4_group_desc_csum_set(sb, block_group, gdp);
6028 ext4_unlock_group(sb, block_group);
6030 if (sbi->s_log_groups_per_flex) {
6031 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
6032 atomic64_add(count_clusters,
6033 &sbi_array_rcu_deref(sbi, s_flex_groups,
6034 flex_group)->free_clusters);
6038 * on a bigalloc file system, defer the s_freeclusters_counter
6039 * update to the caller (ext4_remove_space and friends) so they
6040 * can determine if a cluster freed here should be rereserved
6042 if (!(flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)) {
6043 if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
6044 dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
6045 percpu_counter_add(&sbi->s_freeclusters_counter,
6049 ext4_mb_unload_buddy(&e4b);
6051 /* We dirtied the bitmap block */
6052 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
6053 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
6055 /* And the group descriptor block */
6056 BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
6057 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
6061 if (overflow && !err) {
6065 /* The range changed so it's no longer validated */
6066 flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6071 ext4_std_error(sb, err);
6076 * ext4_free_blocks() -- Free given blocks and update quota
6077 * @handle: handle for this transaction
6079 * @bh: optional buffer of the block to be freed
6080 * @block: starting physical block to be freed
6081 * @count: number of blocks to be freed
6082 * @flags: flags used by ext4_free_blocks
6084 void ext4_free_blocks(handle_t *handle, struct inode *inode,
6085 struct buffer_head *bh, ext4_fsblk_t block,
6086 unsigned long count, int flags)
6088 struct super_block *sb = inode->i_sb;
6089 unsigned int overflow;
6090 struct ext4_sb_info *sbi;
6094 if (sbi->s_mount_state & EXT4_FC_REPLAY) {
6095 ext4_free_blocks_simple(inode, block, count);
6102 BUG_ON(block != bh->b_blocknr);
6104 block = bh->b_blocknr;
6107 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
6108 !ext4_inode_block_valid(inode, block, count)) {
6109 ext4_error(sb, "Freeing blocks not in datazone - "
6110 "block = %llu, count = %lu", block, count);
6113 flags |= EXT4_FREE_BLOCKS_VALIDATED;
6115 ext4_debug("freeing block %llu\n", block);
6116 trace_ext4_free_blocks(inode, block, count, flags);
6118 if (bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
6121 ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA,
6126 * If the extent to be freed does not begin on a cluster
6127 * boundary, we need to deal with partial clusters at the
6128 * beginning and end of the extent. Normally we will free
6129 * blocks at the beginning or the end unless we are explicitly
6130 * requested to avoid doing so.
6132 overflow = EXT4_PBLK_COFF(sbi, block);
6134 if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
6135 overflow = sbi->s_cluster_ratio - overflow;
6137 if (count > overflow)
6145 /* The range changed so it's no longer validated */
6146 flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6148 overflow = EXT4_LBLK_COFF(sbi, count);
6150 if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
6151 if (count > overflow)
6156 count += sbi->s_cluster_ratio - overflow;
6157 /* The range changed so it's no longer validated */
6158 flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
6161 if (!bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
6163 int is_metadata = flags & EXT4_FREE_BLOCKS_METADATA;
6165 for (i = 0; i < count; i++) {
6168 bh = sb_find_get_block(inode->i_sb, block + i);
6169 ext4_forget(handle, is_metadata, inode, bh, block + i);
6173 ext4_mb_clear_bb(handle, inode, block, count, flags);
6178 * ext4_group_add_blocks() -- Add given blocks to an existing group
6179 * @handle: handle to this transaction
6181 * @block: start physical block to add to the block group
6182 * @count: number of blocks to free
6184 * This marks the blocks as free in the bitmap and buddy.
6186 int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
6187 ext4_fsblk_t block, unsigned long count)
6189 struct buffer_head *bitmap_bh = NULL;
6190 struct buffer_head *gd_bh;
6191 ext4_group_t block_group;
6194 struct ext4_group_desc *desc;
6195 struct ext4_sb_info *sbi = EXT4_SB(sb);
6196 struct ext4_buddy e4b;
6197 int err = 0, ret, free_clusters_count;
6198 ext4_grpblk_t clusters_freed;
6199 ext4_fsblk_t first_cluster = EXT4_B2C(sbi, block);
6200 ext4_fsblk_t last_cluster = EXT4_B2C(sbi, block + count - 1);
6201 unsigned long cluster_count = last_cluster - first_cluster + 1;
6203 ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
6208 ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
6210 * Check to see if we are freeing blocks across a group
6213 if (bit + cluster_count > EXT4_CLUSTERS_PER_GROUP(sb)) {
6214 ext4_warning(sb, "too many blocks added to group %u",
6220 bitmap_bh = ext4_read_block_bitmap(sb, block_group);
6221 if (IS_ERR(bitmap_bh)) {
6222 err = PTR_ERR(bitmap_bh);
6227 desc = ext4_get_group_desc(sb, block_group, &gd_bh);
6233 if (!ext4_sb_block_valid(sb, NULL, block, count)) {
6234 ext4_error(sb, "Adding blocks in system zones - "
6235 "Block = %llu, count = %lu",
6241 BUFFER_TRACE(bitmap_bh, "getting write access");
6242 err = ext4_journal_get_write_access(handle, sb, bitmap_bh,
6248 * We are about to modify some metadata. Call the journal APIs
6249 * to unshare ->b_data if a currently-committing transaction is
6252 BUFFER_TRACE(gd_bh, "get_write_access");
6253 err = ext4_journal_get_write_access(handle, sb, gd_bh, EXT4_JTR_NONE);
6257 for (i = 0, clusters_freed = 0; i < cluster_count; i++) {
6258 BUFFER_TRACE(bitmap_bh, "clear bit");
6259 if (!mb_test_bit(bit + i, bitmap_bh->b_data)) {
6260 ext4_error(sb, "bit already cleared for block %llu",
6261 (ext4_fsblk_t)(block + i));
6262 BUFFER_TRACE(bitmap_bh, "bit already cleared");
6268 err = ext4_mb_load_buddy(sb, block_group, &e4b);
6273 * need to update group_info->bb_free and bitmap
6274 * with group lock held. generate_buddy look at
6275 * them with group lock_held
6277 ext4_lock_group(sb, block_group);
6278 mb_clear_bits(bitmap_bh->b_data, bit, cluster_count);
6279 mb_free_blocks(NULL, &e4b, bit, cluster_count);
6280 free_clusters_count = clusters_freed +
6281 ext4_free_group_clusters(sb, desc);
6282 ext4_free_group_clusters_set(sb, desc, free_clusters_count);
6283 ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh);
6284 ext4_group_desc_csum_set(sb, block_group, desc);
6285 ext4_unlock_group(sb, block_group);
6286 percpu_counter_add(&sbi->s_freeclusters_counter,
6289 if (sbi->s_log_groups_per_flex) {
6290 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
6291 atomic64_add(clusters_freed,
6292 &sbi_array_rcu_deref(sbi, s_flex_groups,
6293 flex_group)->free_clusters);
6296 ext4_mb_unload_buddy(&e4b);
6298 /* We dirtied the bitmap block */
6299 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
6300 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
6302 /* And the group descriptor block */
6303 BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
6304 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
6310 ext4_std_error(sb, err);
6315 * ext4_trim_extent -- function to TRIM one single free extent in the group
6316 * @sb: super block for the file system
6317 * @start: starting block of the free extent in the alloc. group
6318 * @count: number of blocks to TRIM
6319 * @e4b: ext4 buddy for the group
6321 * Trim "count" blocks starting at "start" in the "group". To assure that no
6322 * one will allocate those blocks, mark it as used in buddy bitmap. This must
6323 * be called with under the group lock.
6325 static int ext4_trim_extent(struct super_block *sb,
6326 int start, int count, struct ext4_buddy *e4b)
6330 struct ext4_free_extent ex;
6331 ext4_group_t group = e4b->bd_group;
6334 trace_ext4_trim_extent(sb, group, start, count);
6336 assert_spin_locked(ext4_group_lock_ptr(sb, group));
6338 ex.fe_start = start;
6339 ex.fe_group = group;
6343 * Mark blocks used, so no one can reuse them while
6346 mb_mark_used(e4b, &ex);
6347 ext4_unlock_group(sb, group);
6348 ret = ext4_issue_discard(sb, group, start, count, NULL);
6349 ext4_lock_group(sb, group);
6350 mb_free_blocks(NULL, e4b, start, ex.fe_len);
6354 static int ext4_try_to_trim_range(struct super_block *sb,
6355 struct ext4_buddy *e4b, ext4_grpblk_t start,
6356 ext4_grpblk_t max, ext4_grpblk_t minblocks)
6357 __acquires(ext4_group_lock_ptr(sb, e4b->bd_group))
6358 __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
6360 ext4_grpblk_t next, count, free_count;
6363 bitmap = e4b->bd_bitmap;
6364 start = (e4b->bd_info->bb_first_free > start) ?
6365 e4b->bd_info->bb_first_free : start;
6369 while (start <= max) {
6370 start = mb_find_next_zero_bit(bitmap, max + 1, start);
6373 next = mb_find_next_bit(bitmap, max + 1, start);
6375 if ((next - start) >= minblocks) {
6376 int ret = ext4_trim_extent(sb, start, next - start, e4b);
6378 if (ret && ret != -EOPNOTSUPP)
6380 count += next - start;
6382 free_count += next - start;
6385 if (fatal_signal_pending(current)) {
6386 count = -ERESTARTSYS;
6390 if (need_resched()) {
6391 ext4_unlock_group(sb, e4b->bd_group);
6393 ext4_lock_group(sb, e4b->bd_group);
6396 if ((e4b->bd_info->bb_free - free_count) < minblocks)
6404 * ext4_trim_all_free -- function to trim all free space in alloc. group
6405 * @sb: super block for file system
6406 * @group: group to be trimmed
6407 * @start: first group block to examine
6408 * @max: last group block to examine
6409 * @minblocks: minimum extent block count
6410 * @set_trimmed: set the trimmed flag if at least one block is trimmed
6412 * ext4_trim_all_free walks through group's block bitmap searching for free
6413 * extents. When the free extent is found, mark it as used in group buddy
6414 * bitmap. Then issue a TRIM command on this extent and free the extent in
6415 * the group buddy bitmap.
6417 static ext4_grpblk_t
6418 ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
6419 ext4_grpblk_t start, ext4_grpblk_t max,
6420 ext4_grpblk_t minblocks, bool set_trimmed)
6422 struct ext4_buddy e4b;
6425 trace_ext4_trim_all_free(sb, group, start, max);
6427 ret = ext4_mb_load_buddy(sb, group, &e4b);
6429 ext4_warning(sb, "Error %d loading buddy information for %u",
6434 ext4_lock_group(sb, group);
6436 if (!EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) ||
6437 minblocks < EXT4_SB(sb)->s_last_trim_minblks) {
6438 ret = ext4_try_to_trim_range(sb, &e4b, start, max, minblocks);
6439 if (ret >= 0 && set_trimmed)
6440 EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info);
6445 ext4_unlock_group(sb, group);
6446 ext4_mb_unload_buddy(&e4b);
6448 ext4_debug("trimmed %d blocks in the group %d\n",
6455 * ext4_trim_fs() -- trim ioctl handle function
6456 * @sb: superblock for filesystem
6457 * @range: fstrim_range structure
6459 * start: First Byte to trim
6460 * len: number of Bytes to trim from start
6461 * minlen: minimum extent length in Bytes
6462 * ext4_trim_fs goes through all allocation groups containing Bytes from
6463 * start to start+len. For each such a group ext4_trim_all_free function
6464 * is invoked to trim all free space.
6466 int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
6468 unsigned int discard_granularity = bdev_discard_granularity(sb->s_bdev);
6469 struct ext4_group_info *grp;
6470 ext4_group_t group, first_group, last_group;
6471 ext4_grpblk_t cnt = 0, first_cluster, last_cluster;
6472 uint64_t start, end, minlen, trimmed = 0;
6473 ext4_fsblk_t first_data_blk =
6474 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
6475 ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es);
6476 bool whole_group, eof = false;
6479 start = range->start >> sb->s_blocksize_bits;
6480 end = start + (range->len >> sb->s_blocksize_bits) - 1;
6481 minlen = EXT4_NUM_B2C(EXT4_SB(sb),
6482 range->minlen >> sb->s_blocksize_bits);
6484 if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) ||
6485 start >= max_blks ||
6486 range->len < sb->s_blocksize)
6488 /* No point to try to trim less than discard granularity */
6489 if (range->minlen < discard_granularity) {
6490 minlen = EXT4_NUM_B2C(EXT4_SB(sb),
6491 discard_granularity >> sb->s_blocksize_bits);
6492 if (minlen > EXT4_CLUSTERS_PER_GROUP(sb))
6495 if (end >= max_blks - 1) {
6499 if (end <= first_data_blk)
6501 if (start < first_data_blk)
6502 start = first_data_blk;
6504 /* Determine first and last group to examine based on start and end */
6505 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start,
6506 &first_group, &first_cluster);
6507 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end,
6508 &last_group, &last_cluster);
6510 /* end now represents the last cluster to discard in this group */
6511 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
6514 for (group = first_group; group <= last_group; group++) {
6515 grp = ext4_get_group_info(sb, group);
6516 /* We only do this if the grp has never been initialized */
6517 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
6518 ret = ext4_mb_init_group(sb, group, GFP_NOFS);
6524 * For all the groups except the last one, last cluster will
6525 * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to
6526 * change it for the last group, note that last_cluster is
6527 * already computed earlier by ext4_get_group_no_and_offset()
6529 if (group == last_group) {
6531 whole_group = eof ? true : end == EXT4_CLUSTERS_PER_GROUP(sb) - 1;
6533 if (grp->bb_free >= minlen) {
6534 cnt = ext4_trim_all_free(sb, group, first_cluster,
6535 end, minlen, whole_group);
6544 * For every group except the first one, we are sure
6545 * that the first cluster to discard will be cluster #0.
6551 EXT4_SB(sb)->s_last_trim_minblks = minlen;
6554 range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits;
6558 /* Iterate all the free extents in the group. */
6560 ext4_mballoc_query_range(
6561 struct super_block *sb,
6563 ext4_grpblk_t start,
6565 ext4_mballoc_query_range_fn formatter,
6570 struct ext4_buddy e4b;
6573 error = ext4_mb_load_buddy(sb, group, &e4b);
6576 bitmap = e4b.bd_bitmap;
6578 ext4_lock_group(sb, group);
6580 start = (e4b.bd_info->bb_first_free > start) ?
6581 e4b.bd_info->bb_first_free : start;
6582 if (end >= EXT4_CLUSTERS_PER_GROUP(sb))
6583 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
6585 while (start <= end) {
6586 start = mb_find_next_zero_bit(bitmap, end + 1, start);
6589 next = mb_find_next_bit(bitmap, end + 1, start);
6591 ext4_unlock_group(sb, group);
6592 error = formatter(sb, group, start, next - start, priv);
6595 ext4_lock_group(sb, group);
6600 ext4_unlock_group(sb, group);
6602 ext4_mb_unload_buddy(&e4b);