Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2016 CNEX Labs * Initial release: Javier Gonzalez <javier@cnexlabs.com> * Matias Bjorling <matias@cnexlabs.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * pblk-cache.c - pblk's write cache */ #include "pblk.h" void pblk_write_to_cache(struct pblk *pblk, struct bio *bio, unsigned long flags) { struct request_queue *q = pblk->dev->q; struct pblk_w_ctx w_ctx; sector_t lba = pblk_get_lba(bio); unsigned long start_time = jiffies; unsigned int bpos, pos; int nr_entries = pblk_get_secs(bio); int i, ret; generic_start_io_acct(q, REQ_OP_WRITE, bio_sectors(bio), &pblk->disk->part0); /* Update the write buffer head (mem) with the entries that we can * write. The write in itself cannot fail, so there is no need to * rollback from here on. */ retry: ret = pblk_rb_may_write_user(&pblk->rwb, bio, nr_entries, &bpos); switch (ret) { case NVM_IO_REQUEUE: io_schedule(); goto retry; case NVM_IO_ERR: pblk_pipeline_stop(pblk); bio_io_error(bio); goto out; } pblk_ppa_set_empty(&w_ctx.ppa); w_ctx.flags = flags; if (bio->bi_opf & REQ_PREFLUSH) { w_ctx.flags |= PBLK_FLUSH_ENTRY; pblk_write_kick(pblk); } if (unlikely(!bio_has_data(bio))) goto out; for (i = 0; i < nr_entries; i++) { void *data = bio_data(bio); w_ctx.lba = lba + i; pos = pblk_rb_wrap_pos(&pblk->rwb, bpos + i); pblk_rb_write_entry_user(&pblk->rwb, data, w_ctx, pos); bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE); } atomic64_add(nr_entries, &pblk->user_wa); #ifdef CONFIG_NVM_PBLK_DEBUG atomic_long_add(nr_entries, &pblk->inflight_writes); atomic_long_add(nr_entries, &pblk->req_writes); #endif pblk_rl_inserted(&pblk->rl, nr_entries); out: generic_end_io_acct(q, REQ_OP_WRITE, &pblk->disk->part0, start_time); pblk_write_should_kick(pblk); if (ret == NVM_IO_DONE) bio_endio(bio); } /* * On GC the incoming lbas are not necessarily sequential. Also, some of the * lbas might not be valid entries, which are marked as empty by the GC thread */ int pblk_write_gc_to_cache(struct pblk *pblk, struct pblk_gc_rq *gc_rq) { struct pblk_w_ctx w_ctx; unsigned int bpos, pos; void *data = gc_rq->data; int i, valid_entries; /* Update the write buffer head (mem) with the entries that we can * write. The write in itself cannot fail, so there is no need to * rollback from here on. */ retry: if (!pblk_rb_may_write_gc(&pblk->rwb, gc_rq->secs_to_gc, &bpos)) { io_schedule(); goto retry; } w_ctx.flags = PBLK_IOTYPE_GC; pblk_ppa_set_empty(&w_ctx.ppa); for (i = 0, valid_entries = 0; i < gc_rq->nr_secs; i++) { if (gc_rq->lba_list[i] == ADDR_EMPTY) continue; w_ctx.lba = gc_rq->lba_list[i]; pos = pblk_rb_wrap_pos(&pblk->rwb, bpos + valid_entries); pblk_rb_write_entry_gc(&pblk->rwb, data, w_ctx, gc_rq->line, gc_rq->paddr_list[i], pos); data += PBLK_EXPOSED_PAGE_SIZE; valid_entries++; } WARN_ONCE(gc_rq->secs_to_gc != valid_entries, "pblk: inconsistent GC write\n"); atomic64_add(valid_entries, &pblk->gc_wa); #ifdef CONFIG_NVM_PBLK_DEBUG atomic_long_add(valid_entries, &pblk->inflight_writes); atomic_long_add(valid_entries, &pblk->recov_gc_writes); #endif pblk_write_should_kick(pblk); return NVM_IO_OK; } |