Loading fs/btrfs/check-integrity.c +2 −2 Original line number Diff line number Diff line Loading @@ -667,7 +667,7 @@ static int btrfsic_process_superblock(struct btrfsic_state *state, selected_super = kzalloc(sizeof(*selected_super), GFP_NOFS); if (NULL == selected_super) { printk(KERN_INFO "btrfsic: error, kmalloc failed!\n"); return -1; return -ENOMEM; } list_for_each_entry(device, dev_head, dev_list) { Loading Loading @@ -1660,7 +1660,7 @@ static int btrfsic_read_block(struct btrfsic_state *state, sizeof(*block_ctx->pagev)) * num_pages, GFP_NOFS); if (!block_ctx->mem_to_free) return -1; return -ENOMEM; block_ctx->datav = block_ctx->mem_to_free; block_ctx->pagev = (struct page **)(block_ctx->datav + num_pages); for (i = 0; i < num_pages; i++) { Loading fs/btrfs/compression.c +48 −46 Original line number Diff line number Diff line Loading @@ -745,11 +745,13 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, return ret; } static struct list_head comp_idle_workspace[BTRFS_COMPRESS_TYPES]; static spinlock_t comp_workspace_lock[BTRFS_COMPRESS_TYPES]; static int comp_num_workspace[BTRFS_COMPRESS_TYPES]; static atomic_t comp_alloc_workspace[BTRFS_COMPRESS_TYPES]; static wait_queue_head_t comp_workspace_wait[BTRFS_COMPRESS_TYPES]; static struct { struct list_head idle_ws; spinlock_t ws_lock; int num_ws; atomic_t alloc_ws; wait_queue_head_t ws_wait; } btrfs_comp_ws[BTRFS_COMPRESS_TYPES]; static const struct btrfs_compress_op * const btrfs_compress_op[] = { &btrfs_zlib_compress, Loading @@ -761,10 +763,10 @@ void __init btrfs_init_compress(void) int i; for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) { INIT_LIST_HEAD(&comp_idle_workspace[i]); spin_lock_init(&comp_workspace_lock[i]); atomic_set(&comp_alloc_workspace[i], 0); init_waitqueue_head(&comp_workspace_wait[i]); INIT_LIST_HEAD(&btrfs_comp_ws[i].idle_ws); spin_lock_init(&btrfs_comp_ws[i].ws_lock); atomic_set(&btrfs_comp_ws[i].alloc_ws, 0); init_waitqueue_head(&btrfs_comp_ws[i].ws_wait); } } Loading @@ -778,38 +780,38 @@ static struct list_head *find_workspace(int type) int cpus = num_online_cpus(); int idx = type - 1; struct list_head *idle_workspace = &comp_idle_workspace[idx]; spinlock_t *workspace_lock = &comp_workspace_lock[idx]; atomic_t *alloc_workspace = &comp_alloc_workspace[idx]; wait_queue_head_t *workspace_wait = &comp_workspace_wait[idx]; int *num_workspace = &comp_num_workspace[idx]; struct list_head *idle_ws = &btrfs_comp_ws[idx].idle_ws; spinlock_t *ws_lock = &btrfs_comp_ws[idx].ws_lock; atomic_t *alloc_ws = &btrfs_comp_ws[idx].alloc_ws; wait_queue_head_t *ws_wait = &btrfs_comp_ws[idx].ws_wait; int *num_ws = &btrfs_comp_ws[idx].num_ws; again: spin_lock(workspace_lock); if (!list_empty(idle_workspace)) { workspace = idle_workspace->next; spin_lock(ws_lock); if (!list_empty(idle_ws)) { workspace = idle_ws->next; list_del(workspace); (*num_workspace)--; spin_unlock(workspace_lock); (*num_ws)--; spin_unlock(ws_lock); return workspace; } if (atomic_read(alloc_workspace) > cpus) { if (atomic_read(alloc_ws) > cpus) { DEFINE_WAIT(wait); spin_unlock(workspace_lock); prepare_to_wait(workspace_wait, &wait, TASK_UNINTERRUPTIBLE); if (atomic_read(alloc_workspace) > cpus && !*num_workspace) spin_unlock(ws_lock); prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE); if (atomic_read(alloc_ws) > cpus && !*num_ws) schedule(); finish_wait(workspace_wait, &wait); finish_wait(ws_wait, &wait); goto again; } atomic_inc(alloc_workspace); spin_unlock(workspace_lock); atomic_inc(alloc_ws); spin_unlock(ws_lock); workspace = btrfs_compress_op[idx]->alloc_workspace(); if (IS_ERR(workspace)) { atomic_dec(alloc_workspace); wake_up(workspace_wait); atomic_dec(alloc_ws); wake_up(ws_wait); } return workspace; } Loading @@ -821,30 +823,30 @@ static struct list_head *find_workspace(int type) static void free_workspace(int type, struct list_head *workspace) { int idx = type - 1; struct list_head *idle_workspace = &comp_idle_workspace[idx]; spinlock_t *workspace_lock = &comp_workspace_lock[idx]; atomic_t *alloc_workspace = &comp_alloc_workspace[idx]; wait_queue_head_t *workspace_wait = &comp_workspace_wait[idx]; int *num_workspace = &comp_num_workspace[idx]; spin_lock(workspace_lock); if (*num_workspace < num_online_cpus()) { list_add(workspace, idle_workspace); (*num_workspace)++; spin_unlock(workspace_lock); struct list_head *idle_ws = &btrfs_comp_ws[idx].idle_ws; spinlock_t *ws_lock = &btrfs_comp_ws[idx].ws_lock; atomic_t *alloc_ws = &btrfs_comp_ws[idx].alloc_ws; wait_queue_head_t *ws_wait = &btrfs_comp_ws[idx].ws_wait; int *num_ws = &btrfs_comp_ws[idx].num_ws; spin_lock(ws_lock); if (*num_ws < num_online_cpus()) { list_add(workspace, idle_ws); (*num_ws)++; spin_unlock(ws_lock); goto wake; } spin_unlock(workspace_lock); spin_unlock(ws_lock); btrfs_compress_op[idx]->free_workspace(workspace); atomic_dec(alloc_workspace); atomic_dec(alloc_ws); wake: /* * Make sure counter is updated before we wake up waiters. */ smp_mb(); if (waitqueue_active(workspace_wait)) wake_up(workspace_wait); if (waitqueue_active(ws_wait)) wake_up(ws_wait); } /* Loading @@ -856,11 +858,11 @@ static void free_workspaces(void) int i; for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) { while (!list_empty(&comp_idle_workspace[i])) { workspace = comp_idle_workspace[i].next; while (!list_empty(&btrfs_comp_ws[i].idle_ws)) { workspace = btrfs_comp_ws[i].idle_ws.next; list_del(workspace); btrfs_compress_op[i]->free_workspace(workspace); atomic_dec(&comp_alloc_workspace[i]); atomic_dec(&btrfs_comp_ws[i].alloc_ws); } } } Loading fs/btrfs/ctree.c +2 −2 Original line number Diff line number Diff line Loading @@ -4940,8 +4940,8 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, { struct extent_buffer *leaf; struct btrfs_item *item; int last_off; int dsize = 0; u32 last_off; u32 dsize = 0; int ret = 0; int wret; int i; Loading fs/btrfs/disk-io.c +22 −13 Original line number Diff line number Diff line Loading @@ -3476,22 +3476,31 @@ static int barrier_all_devices(struct btrfs_fs_info *info) int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags) { if ((flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 | BTRFS_AVAIL_ALLOC_BIT_SINGLE)) || ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0)) return 0; int raid_type; int min_tolerated = INT_MAX; if (flags & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID10)) return 1; if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 || (flags & BTRFS_AVAIL_ALLOC_BIT_SINGLE)) min_tolerated = min(min_tolerated, btrfs_raid_array[BTRFS_RAID_SINGLE]. tolerated_failures); for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) { if (raid_type == BTRFS_RAID_SINGLE) continue; if (!(flags & btrfs_raid_group[raid_type])) continue; min_tolerated = min(min_tolerated, btrfs_raid_array[raid_type]. tolerated_failures); } if (flags & BTRFS_BLOCK_GROUP_RAID6) return 2; if (min_tolerated == INT_MAX) { pr_warn("BTRFS: unknown raid flag: %llu\n", flags); min_tolerated = 0; } pr_warn("BTRFS: unknown raid type: %llu\n", flags); return 0; return min_tolerated; } int btrfs_calc_num_tolerated_disk_barrier_failures( Loading fs/btrfs/extent-tree.c +25 −33 Original line number Diff line number Diff line Loading @@ -3822,7 +3822,8 @@ static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags) { u64 num_devices = root->fs_info->fs_devices->rw_devices; u64 target; u64 tmp; u64 raid_type; u64 allowed = 0; /* * see if restripe for this chunk_type is in progress, if so Loading @@ -3840,31 +3841,26 @@ static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags) spin_unlock(&root->fs_info->balance_lock); /* First, mask out the RAID levels which aren't possible */ if (num_devices == 1) flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID5); if (num_devices < 3) flags &= ~BTRFS_BLOCK_GROUP_RAID6; if (num_devices < 4) flags &= ~BTRFS_BLOCK_GROUP_RAID10; tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10); flags &= ~tmp; if (tmp & BTRFS_BLOCK_GROUP_RAID6) tmp = BTRFS_BLOCK_GROUP_RAID6; else if (tmp & BTRFS_BLOCK_GROUP_RAID5) tmp = BTRFS_BLOCK_GROUP_RAID5; else if (tmp & BTRFS_BLOCK_GROUP_RAID10) tmp = BTRFS_BLOCK_GROUP_RAID10; else if (tmp & BTRFS_BLOCK_GROUP_RAID1) tmp = BTRFS_BLOCK_GROUP_RAID1; else if (tmp & BTRFS_BLOCK_GROUP_RAID0) tmp = BTRFS_BLOCK_GROUP_RAID0; return extended_to_chunk(flags | tmp); for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) { if (num_devices >= btrfs_raid_array[raid_type].devs_min) allowed |= btrfs_raid_group[raid_type]; } allowed &= flags; if (allowed & BTRFS_BLOCK_GROUP_RAID6) allowed = BTRFS_BLOCK_GROUP_RAID6; else if (allowed & BTRFS_BLOCK_GROUP_RAID5) allowed = BTRFS_BLOCK_GROUP_RAID5; else if (allowed & BTRFS_BLOCK_GROUP_RAID10) allowed = BTRFS_BLOCK_GROUP_RAID10; else if (allowed & BTRFS_BLOCK_GROUP_RAID1) allowed = BTRFS_BLOCK_GROUP_RAID1; else if (allowed & BTRFS_BLOCK_GROUP_RAID0) allowed = BTRFS_BLOCK_GROUP_RAID0; flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK; return extended_to_chunk(flags | allowed); } static u64 get_alloc_profile(struct btrfs_root *root, u64 orig_flags) Loading Loading @@ -4891,13 +4887,9 @@ static struct btrfs_block_rsv *get_block_rsv( { struct btrfs_block_rsv *block_rsv = NULL; if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) block_rsv = trans->block_rsv; if (root == root->fs_info->csum_root && trans->adding_csums) block_rsv = trans->block_rsv; if (root == root->fs_info->uuid_root) if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) || (root == root->fs_info->csum_root && trans->adding_csums) || (root == root->fs_info->uuid_root)) block_rsv = trans->block_rsv; if (!block_rsv) Loading Loading
fs/btrfs/check-integrity.c +2 −2 Original line number Diff line number Diff line Loading @@ -667,7 +667,7 @@ static int btrfsic_process_superblock(struct btrfsic_state *state, selected_super = kzalloc(sizeof(*selected_super), GFP_NOFS); if (NULL == selected_super) { printk(KERN_INFO "btrfsic: error, kmalloc failed!\n"); return -1; return -ENOMEM; } list_for_each_entry(device, dev_head, dev_list) { Loading Loading @@ -1660,7 +1660,7 @@ static int btrfsic_read_block(struct btrfsic_state *state, sizeof(*block_ctx->pagev)) * num_pages, GFP_NOFS); if (!block_ctx->mem_to_free) return -1; return -ENOMEM; block_ctx->datav = block_ctx->mem_to_free; block_ctx->pagev = (struct page **)(block_ctx->datav + num_pages); for (i = 0; i < num_pages; i++) { Loading
fs/btrfs/compression.c +48 −46 Original line number Diff line number Diff line Loading @@ -745,11 +745,13 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, return ret; } static struct list_head comp_idle_workspace[BTRFS_COMPRESS_TYPES]; static spinlock_t comp_workspace_lock[BTRFS_COMPRESS_TYPES]; static int comp_num_workspace[BTRFS_COMPRESS_TYPES]; static atomic_t comp_alloc_workspace[BTRFS_COMPRESS_TYPES]; static wait_queue_head_t comp_workspace_wait[BTRFS_COMPRESS_TYPES]; static struct { struct list_head idle_ws; spinlock_t ws_lock; int num_ws; atomic_t alloc_ws; wait_queue_head_t ws_wait; } btrfs_comp_ws[BTRFS_COMPRESS_TYPES]; static const struct btrfs_compress_op * const btrfs_compress_op[] = { &btrfs_zlib_compress, Loading @@ -761,10 +763,10 @@ void __init btrfs_init_compress(void) int i; for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) { INIT_LIST_HEAD(&comp_idle_workspace[i]); spin_lock_init(&comp_workspace_lock[i]); atomic_set(&comp_alloc_workspace[i], 0); init_waitqueue_head(&comp_workspace_wait[i]); INIT_LIST_HEAD(&btrfs_comp_ws[i].idle_ws); spin_lock_init(&btrfs_comp_ws[i].ws_lock); atomic_set(&btrfs_comp_ws[i].alloc_ws, 0); init_waitqueue_head(&btrfs_comp_ws[i].ws_wait); } } Loading @@ -778,38 +780,38 @@ static struct list_head *find_workspace(int type) int cpus = num_online_cpus(); int idx = type - 1; struct list_head *idle_workspace = &comp_idle_workspace[idx]; spinlock_t *workspace_lock = &comp_workspace_lock[idx]; atomic_t *alloc_workspace = &comp_alloc_workspace[idx]; wait_queue_head_t *workspace_wait = &comp_workspace_wait[idx]; int *num_workspace = &comp_num_workspace[idx]; struct list_head *idle_ws = &btrfs_comp_ws[idx].idle_ws; spinlock_t *ws_lock = &btrfs_comp_ws[idx].ws_lock; atomic_t *alloc_ws = &btrfs_comp_ws[idx].alloc_ws; wait_queue_head_t *ws_wait = &btrfs_comp_ws[idx].ws_wait; int *num_ws = &btrfs_comp_ws[idx].num_ws; again: spin_lock(workspace_lock); if (!list_empty(idle_workspace)) { workspace = idle_workspace->next; spin_lock(ws_lock); if (!list_empty(idle_ws)) { workspace = idle_ws->next; list_del(workspace); (*num_workspace)--; spin_unlock(workspace_lock); (*num_ws)--; spin_unlock(ws_lock); return workspace; } if (atomic_read(alloc_workspace) > cpus) { if (atomic_read(alloc_ws) > cpus) { DEFINE_WAIT(wait); spin_unlock(workspace_lock); prepare_to_wait(workspace_wait, &wait, TASK_UNINTERRUPTIBLE); if (atomic_read(alloc_workspace) > cpus && !*num_workspace) spin_unlock(ws_lock); prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE); if (atomic_read(alloc_ws) > cpus && !*num_ws) schedule(); finish_wait(workspace_wait, &wait); finish_wait(ws_wait, &wait); goto again; } atomic_inc(alloc_workspace); spin_unlock(workspace_lock); atomic_inc(alloc_ws); spin_unlock(ws_lock); workspace = btrfs_compress_op[idx]->alloc_workspace(); if (IS_ERR(workspace)) { atomic_dec(alloc_workspace); wake_up(workspace_wait); atomic_dec(alloc_ws); wake_up(ws_wait); } return workspace; } Loading @@ -821,30 +823,30 @@ static struct list_head *find_workspace(int type) static void free_workspace(int type, struct list_head *workspace) { int idx = type - 1; struct list_head *idle_workspace = &comp_idle_workspace[idx]; spinlock_t *workspace_lock = &comp_workspace_lock[idx]; atomic_t *alloc_workspace = &comp_alloc_workspace[idx]; wait_queue_head_t *workspace_wait = &comp_workspace_wait[idx]; int *num_workspace = &comp_num_workspace[idx]; spin_lock(workspace_lock); if (*num_workspace < num_online_cpus()) { list_add(workspace, idle_workspace); (*num_workspace)++; spin_unlock(workspace_lock); struct list_head *idle_ws = &btrfs_comp_ws[idx].idle_ws; spinlock_t *ws_lock = &btrfs_comp_ws[idx].ws_lock; atomic_t *alloc_ws = &btrfs_comp_ws[idx].alloc_ws; wait_queue_head_t *ws_wait = &btrfs_comp_ws[idx].ws_wait; int *num_ws = &btrfs_comp_ws[idx].num_ws; spin_lock(ws_lock); if (*num_ws < num_online_cpus()) { list_add(workspace, idle_ws); (*num_ws)++; spin_unlock(ws_lock); goto wake; } spin_unlock(workspace_lock); spin_unlock(ws_lock); btrfs_compress_op[idx]->free_workspace(workspace); atomic_dec(alloc_workspace); atomic_dec(alloc_ws); wake: /* * Make sure counter is updated before we wake up waiters. */ smp_mb(); if (waitqueue_active(workspace_wait)) wake_up(workspace_wait); if (waitqueue_active(ws_wait)) wake_up(ws_wait); } /* Loading @@ -856,11 +858,11 @@ static void free_workspaces(void) int i; for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) { while (!list_empty(&comp_idle_workspace[i])) { workspace = comp_idle_workspace[i].next; while (!list_empty(&btrfs_comp_ws[i].idle_ws)) { workspace = btrfs_comp_ws[i].idle_ws.next; list_del(workspace); btrfs_compress_op[i]->free_workspace(workspace); atomic_dec(&comp_alloc_workspace[i]); atomic_dec(&btrfs_comp_ws[i].alloc_ws); } } } Loading
fs/btrfs/ctree.c +2 −2 Original line number Diff line number Diff line Loading @@ -4940,8 +4940,8 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, { struct extent_buffer *leaf; struct btrfs_item *item; int last_off; int dsize = 0; u32 last_off; u32 dsize = 0; int ret = 0; int wret; int i; Loading
fs/btrfs/disk-io.c +22 −13 Original line number Diff line number Diff line Loading @@ -3476,22 +3476,31 @@ static int barrier_all_devices(struct btrfs_fs_info *info) int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags) { if ((flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 | BTRFS_AVAIL_ALLOC_BIT_SINGLE)) || ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0)) return 0; int raid_type; int min_tolerated = INT_MAX; if (flags & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID10)) return 1; if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 || (flags & BTRFS_AVAIL_ALLOC_BIT_SINGLE)) min_tolerated = min(min_tolerated, btrfs_raid_array[BTRFS_RAID_SINGLE]. tolerated_failures); for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) { if (raid_type == BTRFS_RAID_SINGLE) continue; if (!(flags & btrfs_raid_group[raid_type])) continue; min_tolerated = min(min_tolerated, btrfs_raid_array[raid_type]. tolerated_failures); } if (flags & BTRFS_BLOCK_GROUP_RAID6) return 2; if (min_tolerated == INT_MAX) { pr_warn("BTRFS: unknown raid flag: %llu\n", flags); min_tolerated = 0; } pr_warn("BTRFS: unknown raid type: %llu\n", flags); return 0; return min_tolerated; } int btrfs_calc_num_tolerated_disk_barrier_failures( Loading
fs/btrfs/extent-tree.c +25 −33 Original line number Diff line number Diff line Loading @@ -3822,7 +3822,8 @@ static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags) { u64 num_devices = root->fs_info->fs_devices->rw_devices; u64 target; u64 tmp; u64 raid_type; u64 allowed = 0; /* * see if restripe for this chunk_type is in progress, if so Loading @@ -3840,31 +3841,26 @@ static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags) spin_unlock(&root->fs_info->balance_lock); /* First, mask out the RAID levels which aren't possible */ if (num_devices == 1) flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID5); if (num_devices < 3) flags &= ~BTRFS_BLOCK_GROUP_RAID6; if (num_devices < 4) flags &= ~BTRFS_BLOCK_GROUP_RAID10; tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10); flags &= ~tmp; if (tmp & BTRFS_BLOCK_GROUP_RAID6) tmp = BTRFS_BLOCK_GROUP_RAID6; else if (tmp & BTRFS_BLOCK_GROUP_RAID5) tmp = BTRFS_BLOCK_GROUP_RAID5; else if (tmp & BTRFS_BLOCK_GROUP_RAID10) tmp = BTRFS_BLOCK_GROUP_RAID10; else if (tmp & BTRFS_BLOCK_GROUP_RAID1) tmp = BTRFS_BLOCK_GROUP_RAID1; else if (tmp & BTRFS_BLOCK_GROUP_RAID0) tmp = BTRFS_BLOCK_GROUP_RAID0; return extended_to_chunk(flags | tmp); for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) { if (num_devices >= btrfs_raid_array[raid_type].devs_min) allowed |= btrfs_raid_group[raid_type]; } allowed &= flags; if (allowed & BTRFS_BLOCK_GROUP_RAID6) allowed = BTRFS_BLOCK_GROUP_RAID6; else if (allowed & BTRFS_BLOCK_GROUP_RAID5) allowed = BTRFS_BLOCK_GROUP_RAID5; else if (allowed & BTRFS_BLOCK_GROUP_RAID10) allowed = BTRFS_BLOCK_GROUP_RAID10; else if (allowed & BTRFS_BLOCK_GROUP_RAID1) allowed = BTRFS_BLOCK_GROUP_RAID1; else if (allowed & BTRFS_BLOCK_GROUP_RAID0) allowed = BTRFS_BLOCK_GROUP_RAID0; flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK; return extended_to_chunk(flags | allowed); } static u64 get_alloc_profile(struct btrfs_root *root, u64 orig_flags) Loading Loading @@ -4891,13 +4887,9 @@ static struct btrfs_block_rsv *get_block_rsv( { struct btrfs_block_rsv *block_rsv = NULL; if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) block_rsv = trans->block_rsv; if (root == root->fs_info->csum_root && trans->adding_csums) block_rsv = trans->block_rsv; if (root == root->fs_info->uuid_root) if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) || (root == root->fs_info->csum_root && trans->adding_csums) || (root == root->fs_info->uuid_root)) block_rsv = trans->block_rsv; if (!block_rsv) Loading