for-5.7/drivers-2020-03-29
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAl6BJDYQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgplhMD/95jd4nlVetHAo54z+Zk2ExE13+yDamRKyh vc7t2tz1reqFOimtVr5aVuTXCTgOx4CpiIox5qcn6qAExN4JtCChOBRGize/0u8S ckxnhHbN2C0rfnGldvrYYeNRonFI+7QKimnurWUSYYGN0xqbo21BxJ7dFaohMseo q4K8sIW0ctE6AOlw28Jerkg614s2NDGZ7q1laheXnYHn5c9f1m0NaKN/jyTGgr0X TLBiLbX2yRrAuvpctBj6Fna6YN7Vdd9jsf2Bt6ipUI1XgHQoVUGMxQNhWPyjsbSv GzRQUNAfVcasLzCP/Mj/47144OkUtDDpn2mjeXDaFljLDGFULD+jp/SsOmLCxkPC gI7G2yfBvF96/SOyT0JXrLyMcBd1R2vRoASbc5tPu82mZhx7YJZH5WYtOB9h2gra RTYo3xcm0EoN6yeMaH+xOuXxTWWInIrgKPONW4H8s7hxEiMt5oFNVBI7vqPr4LVp tpfxiKZDavKOofKXogNV4W7mSMP/Ir5Q9Ha4g5SXHBGp0z/PHmnQ0xDGNq0KDnU4 eNO0UYCFNCNa+0AOhpNxaVuVm9LjrgvyXRjePgOZQ4akhohwHO6DLrHK1f8Hb1vD 8Ih6uR+F5zZlKsouWro8HLGYm5w40Wq9tbCI8QbPYH6nkGoDmzpPv9jbAeWgJU5c KqP/5TBSLA== =Bs4E -----END PGP SIGNATURE----- Merge tag 'for-5.7/drivers-2020-03-29' of git://git.kernel.dk/linux-block Pull block driver updates from Jens Axboe: - floppy driver cleanup series from Willy - NVMe updates and fixes (Various) - null_blk trace improvements (Chaitanya) - bcache fixes (Coly) - md fixes (via Song) - loop block size change optimizations (Martijn) - scnprintf() use (Takashi) * tag 'for-5.7/drivers-2020-03-29' of git://git.kernel.dk/linux-block: (81 commits) null_blk: add trace in null_blk_zoned.c null_blk: add tracepoint helpers for zoned mode block: add a zone condition debug helper nvme: cleanup namespace identifier reporting in nvme_init_ns_head nvme: rename __nvme_find_ns_head to nvme_find_ns_head nvme: refactor nvme_identify_ns_descs error handling nvme-tcp: Add warning on state change failure at nvme_tcp_setup_ctrl nvme-rdma: Add warning on state change failure at nvme_rdma_setup_ctrl nvme: Fix controller creation races with teardown flow nvme: Make nvme_uninit_ctrl symmetric to nvme_init_ctrl nvme: Fix ctrl use-after-free during sysfs deletion nvme-pci: Re-order nvme_pci_free_ctrl nvme: Remove unused return code from nvme_delete_ctrl_sync nvme: Use nvme_state_terminal helper nvme: release ida resources nvme: Add compat_ioctl handler for NVME_IOCTL_SUBMIT_IO nvmet-tcp: optimize tcp stack TX when data digest is used nvme-fabrics: Use scnprintf() for avoiding potential buffer overflow nvme-multipath: do not reset on unknown status nvmet-rdma: allocate RW ctxs according to mdts ...
This commit is contained in:
commit
1592614838
|
@ -8,16 +8,18 @@
|
|||
*/
|
||||
#ifndef __ASM_ARM_FLOPPY_H
|
||||
#define __ASM_ARM_FLOPPY_H
|
||||
#if 0
|
||||
#include <mach/floppy.h>
|
||||
#endif
|
||||
|
||||
#define fd_outb(val,port) \
|
||||
do { \
|
||||
if ((port) == (u32)FD_DOR) \
|
||||
fd_setdor((val)); \
|
||||
else \
|
||||
outb((val),(port)); \
|
||||
#define fd_outb(val,port) \
|
||||
do { \
|
||||
int new_val = (val); \
|
||||
if (((port) & 7) == FD_DOR) { \
|
||||
if (new_val & 0xf0) \
|
||||
new_val = (new_val & 0x0c) | \
|
||||
floppy_selects[new_val & 3]; \
|
||||
else \
|
||||
new_val &= 0x0c; \
|
||||
} \
|
||||
outb(new_val, (port)); \
|
||||
} while(0)
|
||||
|
||||
#define fd_inb(port) inb((port))
|
||||
|
@ -53,69 +55,7 @@ static inline int fd_dma_setup(void *data, unsigned int length,
|
|||
* to a non-zero track, and then restoring it to track 0. If an error occurs,
|
||||
* then there is no floppy drive present. [to be put back in again]
|
||||
*/
|
||||
static unsigned char floppy_selects[2][4] =
|
||||
{
|
||||
{ 0x10, 0x21, 0x23, 0x33 },
|
||||
{ 0x10, 0x21, 0x23, 0x33 }
|
||||
};
|
||||
|
||||
#define fd_setdor(dor) \
|
||||
do { \
|
||||
int new_dor = (dor); \
|
||||
if (new_dor & 0xf0) \
|
||||
new_dor = (new_dor & 0x0c) | floppy_selects[fdc][new_dor & 3]; \
|
||||
else \
|
||||
new_dor &= 0x0c; \
|
||||
outb(new_dor, FD_DOR); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* Someday, we'll automatically detect which drives are present...
|
||||
*/
|
||||
static inline void fd_scandrives (void)
|
||||
{
|
||||
#if 0
|
||||
int floppy, drive_count;
|
||||
|
||||
fd_disable_irq();
|
||||
raw_cmd = &default_raw_cmd;
|
||||
raw_cmd->flags = FD_RAW_SPIN | FD_RAW_NEED_SEEK;
|
||||
raw_cmd->track = 0;
|
||||
raw_cmd->rate = ?;
|
||||
drive_count = 0;
|
||||
for (floppy = 0; floppy < 4; floppy ++) {
|
||||
current_drive = drive_count;
|
||||
/*
|
||||
* Turn on floppy motor
|
||||
*/
|
||||
if (start_motor(redo_fd_request))
|
||||
continue;
|
||||
/*
|
||||
* Set up FDC
|
||||
*/
|
||||
fdc_specify();
|
||||
/*
|
||||
* Tell FDC to recalibrate
|
||||
*/
|
||||
output_byte(FD_RECALIBRATE);
|
||||
LAST_OUT(UNIT(floppy));
|
||||
/* wait for command to complete */
|
||||
if (!successful) {
|
||||
int i;
|
||||
for (i = drive_count; i < 3; i--)
|
||||
floppy_selects[fdc][i] = floppy_selects[fdc][i + 1];
|
||||
floppy_selects[fdc][3] = 0;
|
||||
floppy -= 1;
|
||||
} else
|
||||
drive_count++;
|
||||
}
|
||||
#else
|
||||
floppy_selects[0][0] = 0x10;
|
||||
floppy_selects[0][1] = 0x21;
|
||||
floppy_selects[0][2] = 0x23;
|
||||
floppy_selects[0][3] = 0x33;
|
||||
#endif
|
||||
}
|
||||
static unsigned char floppy_selects[4] = { 0x10, 0x21, 0x23, 0x33 };
|
||||
|
||||
#define FDC1 (0x3f0)
|
||||
|
||||
|
@ -135,9 +75,7 @@ static inline void fd_scandrives (void)
|
|||
*/
|
||||
static void driveswap(int *ints, int dummy, int dummy2)
|
||||
{
|
||||
floppy_selects[0][0] ^= floppy_selects[0][1];
|
||||
floppy_selects[0][1] ^= floppy_selects[0][0];
|
||||
floppy_selects[0][0] ^= floppy_selects[0][1];
|
||||
swap(floppy_selects[0], floppy_selects[1]);
|
||||
}
|
||||
|
||||
#define EXTRA_FLOPPY_PARAMS ,{ "driveswap", &driveswap, NULL, 0, 0 }
|
||||
|
|
|
@ -628,6 +628,9 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
|
|||
printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
|
||||
top, bottom);
|
||||
}
|
||||
|
||||
t->backing_dev_info->io_pages =
|
||||
t->limits.max_sectors >> (PAGE_SHIFT - 9);
|
||||
}
|
||||
EXPORT_SYMBOL(disk_stack_limits);
|
||||
|
||||
|
|
|
@ -20,6 +20,38 @@
|
|||
|
||||
#include "blk.h"
|
||||
|
||||
#define ZONE_COND_NAME(name) [BLK_ZONE_COND_##name] = #name
|
||||
static const char *const zone_cond_name[] = {
|
||||
ZONE_COND_NAME(NOT_WP),
|
||||
ZONE_COND_NAME(EMPTY),
|
||||
ZONE_COND_NAME(IMP_OPEN),
|
||||
ZONE_COND_NAME(EXP_OPEN),
|
||||
ZONE_COND_NAME(CLOSED),
|
||||
ZONE_COND_NAME(READONLY),
|
||||
ZONE_COND_NAME(FULL),
|
||||
ZONE_COND_NAME(OFFLINE),
|
||||
};
|
||||
#undef ZONE_COND_NAME
|
||||
|
||||
/**
|
||||
* blk_zone_cond_str - Return string XXX in BLK_ZONE_COND_XXX.
|
||||
* @zone_cond: BLK_ZONE_COND_XXX.
|
||||
*
|
||||
* Description: Centralize block layer function to convert BLK_ZONE_COND_XXX
|
||||
* into string format. Useful in the debugging and tracing zone conditions. For
|
||||
* invalid BLK_ZONE_COND_XXX it returns string "UNKNOWN".
|
||||
*/
|
||||
const char *blk_zone_cond_str(enum blk_zone_cond zone_cond)
|
||||
{
|
||||
static const char *zone_cond_str = "UNKNOWN";
|
||||
|
||||
if (zone_cond < ARRAY_SIZE(zone_cond_name) && zone_cond_name[zone_cond])
|
||||
zone_cond_str = zone_cond_name[zone_cond];
|
||||
|
||||
return zone_cond_str;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_zone_cond_str);
|
||||
|
||||
static inline sector_t blk_zone_start(struct request_queue *q,
|
||||
sector_t sector)
|
||||
{
|
||||
|
|
|
@ -6,6 +6,9 @@
|
|||
# Rewritten to use lists instead of if-statements.
|
||||
#
|
||||
|
||||
# needed for trace events
|
||||
ccflags-y += -I$(src)
|
||||
|
||||
obj-$(CONFIG_MAC_FLOPPY) += swim3.o
|
||||
obj-$(CONFIG_BLK_DEV_SWIM) += swim_mod.o
|
||||
obj-$(CONFIG_BLK_DEV_FD) += floppy.o
|
||||
|
@ -39,6 +42,9 @@ obj-$(CONFIG_ZRAM) += zram/
|
|||
|
||||
obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk.o
|
||||
null_blk-objs := null_blk_main.o
|
||||
ifeq ($(CONFIG_BLK_DEV_ZONED), y)
|
||||
null_blk-$(CONFIG_TRACING) += null_blk_trace.o
|
||||
endif
|
||||
null_blk-$(CONFIG_BLK_DEV_ZONED) += null_blk_zoned.o
|
||||
|
||||
skd-y := skd_main.o
|
||||
|
|
|
@ -87,9 +87,9 @@ static ssize_t aoedisk_show_netif(struct device *dev,
|
|||
if (*nd == NULL)
|
||||
return snprintf(page, PAGE_SIZE, "none\n");
|
||||
for (p = page; nd < ne; nd++)
|
||||
p += snprintf(p, PAGE_SIZE - (p-page), "%s%s",
|
||||
p += scnprintf(p, PAGE_SIZE - (p-page), "%s%s",
|
||||
p == page ? "" : ",", (*nd)->name);
|
||||
p += snprintf(p, PAGE_SIZE - (p-page), "\n");
|
||||
p += scnprintf(p, PAGE_SIZE - (p-page), "\n");
|
||||
return p-page;
|
||||
}
|
||||
/* firmware version */
|
||||
|
|
|
@ -3413,22 +3413,11 @@ int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev)
|
|||
* the meta-data super block. This function sets MD_DIRTY, and starts a
|
||||
* timer that ensures that within five seconds you have to call drbd_md_sync().
|
||||
*/
|
||||
#ifdef DEBUG
|
||||
void drbd_md_mark_dirty_(struct drbd_device *device, unsigned int line, const char *func)
|
||||
{
|
||||
if (!test_and_set_bit(MD_DIRTY, &device->flags)) {
|
||||
mod_timer(&device->md_sync_timer, jiffies + HZ);
|
||||
device->last_md_mark_dirty.line = line;
|
||||
device->last_md_mark_dirty.func = func;
|
||||
}
|
||||
}
|
||||
#else
|
||||
void drbd_md_mark_dirty(struct drbd_device *device)
|
||||
{
|
||||
if (!test_and_set_bit(MD_DIRTY, &device->flags))
|
||||
mod_timer(&device->md_sync_timer, jiffies + 5*HZ);
|
||||
}
|
||||
#endif
|
||||
|
||||
void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local)
|
||||
{
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -214,7 +214,8 @@ static void __loop_update_dio(struct loop_device *lo, bool dio)
|
|||
* LO_FLAGS_READ_ONLY, both are set from kernel, and losetup
|
||||
* will get updated by ioctl(LOOP_GET_STATUS)
|
||||
*/
|
||||
blk_mq_freeze_queue(lo->lo_queue);
|
||||
if (lo->lo_state == Lo_bound)
|
||||
blk_mq_freeze_queue(lo->lo_queue);
|
||||
lo->use_dio = use_dio;
|
||||
if (use_dio) {
|
||||
blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, lo->lo_queue);
|
||||
|
@ -223,7 +224,8 @@ static void __loop_update_dio(struct loop_device *lo, bool dio)
|
|||
blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue);
|
||||
lo->lo_flags &= ~LO_FLAGS_DIRECT_IO;
|
||||
}
|
||||
blk_mq_unfreeze_queue(lo->lo_queue);
|
||||
if (lo->lo_state == Lo_bound)
|
||||
blk_mq_unfreeze_queue(lo->lo_queue);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -1539,16 +1541,16 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
|
|||
if (arg < 512 || arg > PAGE_SIZE || !is_power_of_2(arg))
|
||||
return -EINVAL;
|
||||
|
||||
if (lo->lo_queue->limits.logical_block_size != arg) {
|
||||
sync_blockdev(lo->lo_device);
|
||||
kill_bdev(lo->lo_device);
|
||||
}
|
||||
if (lo->lo_queue->limits.logical_block_size == arg)
|
||||
return 0;
|
||||
|
||||
sync_blockdev(lo->lo_device);
|
||||
kill_bdev(lo->lo_device);
|
||||
|
||||
blk_mq_freeze_queue(lo->lo_queue);
|
||||
|
||||
/* kill_bdev should have truncated all the pages */
|
||||
if (lo->lo_queue->limits.logical_block_size != arg &&
|
||||
lo->lo_device->bd_inode->i_mapping->nrpages) {
|
||||
if (lo->lo_device->bd_inode->i_mapping->nrpages) {
|
||||
err = -EAGAIN;
|
||||
pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
|
||||
__func__, lo->lo_number, lo->lo_file_name,
|
||||
|
|
|
@ -395,16 +395,19 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
|
|||
}
|
||||
config = nbd->config;
|
||||
|
||||
if (config->num_connections > 1) {
|
||||
if (config->num_connections > 1 ||
|
||||
(config->num_connections == 1 && nbd->tag_set.timeout)) {
|
||||
dev_err_ratelimited(nbd_to_dev(nbd),
|
||||
"Connection timed out, retrying (%d/%d alive)\n",
|
||||
atomic_read(&config->live_connections),
|
||||
config->num_connections);
|
||||
/*
|
||||
* Hooray we have more connections, requeue this IO, the submit
|
||||
* path will put it on a real connection.
|
||||
* path will put it on a real connection. Or if only one
|
||||
* connection is configured, the submit path will wait util
|
||||
* a new connection is reconfigured or util dead timeout.
|
||||
*/
|
||||
if (config->socks && config->num_connections > 1) {
|
||||
if (config->socks) {
|
||||
if (cmd->index < config->num_connections) {
|
||||
struct nbd_sock *nsock =
|
||||
config->socks[cmd->index];
|
||||
|
@ -431,12 +434,22 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
|
|||
* Userspace sets timeout=0 to disable socket disconnection,
|
||||
* so just warn and reset the timer.
|
||||
*/
|
||||
struct nbd_sock *nsock = config->socks[cmd->index];
|
||||
cmd->retries++;
|
||||
dev_info(nbd_to_dev(nbd), "Possible stuck request %p: control (%s@%llu,%uB). Runtime %u seconds\n",
|
||||
req, nbdcmd_to_ascii(req_to_nbd_cmd_type(req)),
|
||||
(unsigned long long)blk_rq_pos(req) << 9,
|
||||
blk_rq_bytes(req), (req->timeout / HZ) * cmd->retries);
|
||||
|
||||
mutex_lock(&nsock->tx_lock);
|
||||
if (cmd->cookie != nsock->cookie) {
|
||||
nbd_requeue_cmd(cmd);
|
||||
mutex_unlock(&nsock->tx_lock);
|
||||
mutex_unlock(&cmd->lock);
|
||||
nbd_config_put(nbd);
|
||||
return BLK_EH_DONE;
|
||||
}
|
||||
mutex_unlock(&nsock->tx_lock);
|
||||
mutex_unlock(&cmd->lock);
|
||||
nbd_config_put(nbd);
|
||||
return BLK_EH_RESET_TIMER;
|
||||
|
@ -741,14 +754,12 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
|
|||
dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
|
||||
result);
|
||||
/*
|
||||
* If we've disconnected or we only have 1
|
||||
* connection then we need to make sure we
|
||||
* If we've disconnected, we need to make sure we
|
||||
* complete this request, otherwise error out
|
||||
* and let the timeout stuff handle resubmitting
|
||||
* this request onto another connection.
|
||||
*/
|
||||
if (nbd_disconnected(config) ||
|
||||
config->num_connections <= 1) {
|
||||
if (nbd_disconnected(config)) {
|
||||
cmd->status = BLK_STS_IOERR;
|
||||
goto out;
|
||||
}
|
||||
|
@ -825,7 +836,7 @@ static int find_fallback(struct nbd_device *nbd, int index)
|
|||
|
||||
if (config->num_connections <= 1) {
|
||||
dev_err_ratelimited(disk_to_dev(nbd->disk),
|
||||
"Attempted send on invalid socket\n");
|
||||
"Dead connection, failed to find a fallback\n");
|
||||
return new_index;
|
||||
}
|
||||
|
||||
|
|
|
@ -97,14 +97,21 @@ module_param_named(home_node, g_home_node, int, 0444);
|
|||
MODULE_PARM_DESC(home_node, "Home node for the device");
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
|
||||
/*
|
||||
* For more details about fault injection, please refer to
|
||||
* Documentation/fault-injection/fault-injection.rst.
|
||||
*/
|
||||
static char g_timeout_str[80];
|
||||
module_param_string(timeout, g_timeout_str, sizeof(g_timeout_str), 0444);
|
||||
MODULE_PARM_DESC(timeout, "Fault injection. timeout=<interval>,<probability>,<space>,<times>");
|
||||
|
||||
static char g_requeue_str[80];
|
||||
module_param_string(requeue, g_requeue_str, sizeof(g_requeue_str), 0444);
|
||||
MODULE_PARM_DESC(requeue, "Fault injection. requeue=<interval>,<probability>,<space>,<times>");
|
||||
|
||||
static char g_init_hctx_str[80];
|
||||
module_param_string(init_hctx, g_init_hctx_str, sizeof(g_init_hctx_str), 0444);
|
||||
MODULE_PARM_DESC(init_hctx, "Fault injection to fail hctx init. init_hctx=<interval>,<probability>,<space>,<times>");
|
||||
#endif
|
||||
|
||||
static int g_queue_mode = NULL_Q_MQ;
|
||||
|
@ -615,6 +622,7 @@ static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
|
|||
if (tag != -1U) {
|
||||
cmd = &nq->cmds[tag];
|
||||
cmd->tag = tag;
|
||||
cmd->error = BLK_STS_OK;
|
||||
cmd->nq = nq;
|
||||
if (nq->dev->irqmode == NULL_IRQ_TIMER) {
|
||||
hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
|
||||
|
@ -1395,6 +1403,7 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||
cmd->timer.function = null_cmd_timer_expired;
|
||||
}
|
||||
cmd->rq = bd->rq;
|
||||
cmd->error = BLK_STS_OK;
|
||||
cmd->nq = nq;
|
||||
|
||||
blk_mq_start_request(bd->rq);
|
||||
|
|
21
drivers/block/null_blk_trace.c
Normal file
21
drivers/block/null_blk_trace.c
Normal file
|
@ -0,0 +1,21 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* null_blk trace related helpers.
|
||||
*
|
||||
* Copyright (C) 2020 Western Digital Corporation or its affiliates.
|
||||
*/
|
||||
#include "null_blk_trace.h"
|
||||
|
||||
/*
|
||||
* Helper to use for all null_blk traces to extract disk name.
|
||||
*/
|
||||
const char *nullb_trace_disk_name(struct trace_seq *p, char *name)
|
||||
{
|
||||
const char *ret = trace_seq_buffer_ptr(p);
|
||||
|
||||
if (name && *name)
|
||||
trace_seq_printf(p, "disk=%s, ", name);
|
||||
trace_seq_putc(p, 0);
|
||||
|
||||
return ret;
|
||||
}
|
79
drivers/block/null_blk_trace.h
Normal file
79
drivers/block/null_blk_trace.h
Normal file
|
@ -0,0 +1,79 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* null_blk device driver tracepoints.
|
||||
*
|
||||
* Copyright (C) 2020 Western Digital Corporation or its affiliates.
|
||||
*/
|
||||
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM nullb
|
||||
|
||||
#if !defined(_TRACE_NULLB_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _TRACE_NULLB_H
|
||||
|
||||
#include <linux/tracepoint.h>
|
||||
#include <linux/trace_seq.h>
|
||||
|
||||
#include "null_blk.h"
|
||||
|
||||
const char *nullb_trace_disk_name(struct trace_seq *p, char *name);
|
||||
|
||||
#define __print_disk_name(name) nullb_trace_disk_name(p, name)
|
||||
|
||||
#ifndef TRACE_HEADER_MULTI_READ
|
||||
static inline void __assign_disk_name(char *name, struct gendisk *disk)
|
||||
{
|
||||
if (disk)
|
||||
memcpy(name, disk->disk_name, DISK_NAME_LEN);
|
||||
else
|
||||
memset(name, 0, DISK_NAME_LEN);
|
||||
}
|
||||
#endif
|
||||
|
||||
TRACE_EVENT(nullb_zone_op,
|
||||
TP_PROTO(struct nullb_cmd *cmd, unsigned int zone_no,
|
||||
unsigned int zone_cond),
|
||||
TP_ARGS(cmd, zone_no, zone_cond),
|
||||
TP_STRUCT__entry(
|
||||
__array(char, disk, DISK_NAME_LEN)
|
||||
__field(enum req_opf, op)
|
||||
__field(unsigned int, zone_no)
|
||||
__field(unsigned int, zone_cond)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->op = req_op(cmd->rq);
|
||||
__entry->zone_no = zone_no;
|
||||
__entry->zone_cond = zone_cond;
|
||||
__assign_disk_name(__entry->disk, cmd->rq->rq_disk);
|
||||
),
|
||||
TP_printk("%s req=%-15s zone_no=%u zone_cond=%-10s",
|
||||
__print_disk_name(__entry->disk),
|
||||
blk_op_str(__entry->op),
|
||||
__entry->zone_no,
|
||||
blk_zone_cond_str(__entry->zone_cond))
|
||||
);
|
||||
|
||||
TRACE_EVENT(nullb_report_zones,
|
||||
TP_PROTO(struct nullb *nullb, unsigned int nr_zones),
|
||||
TP_ARGS(nullb, nr_zones),
|
||||
TP_STRUCT__entry(
|
||||
__array(char, disk, DISK_NAME_LEN)
|
||||
__field(unsigned int, nr_zones)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->nr_zones = nr_zones;
|
||||
__assign_disk_name(__entry->disk, nullb->disk);
|
||||
),
|
||||
TP_printk("%s nr_zones=%u",
|
||||
__print_disk_name(__entry->disk), __entry->nr_zones)
|
||||
);
|
||||
|
||||
#endif /* _TRACE_NULLB_H */
|
||||
|
||||
#undef TRACE_INCLUDE_PATH
|
||||
#define TRACE_INCLUDE_PATH .
|
||||
#undef TRACE_INCLUDE_FILE
|
||||
#define TRACE_INCLUDE_FILE null_blk_trace
|
||||
|
||||
/* This part must be outside protection */
|
||||
#include <trace/define_trace.h>
|
|
@ -2,6 +2,9 @@
|
|||
#include <linux/vmalloc.h>
|
||||
#include "null_blk.h"
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include "null_blk_trace.h"
|
||||
|
||||
/* zone_size in MBs to sectors. */
|
||||
#define ZONE_SIZE_SHIFT 11
|
||||
|
||||
|
@ -80,6 +83,8 @@ int null_report_zones(struct gendisk *disk, sector_t sector,
|
|||
return 0;
|
||||
|
||||
nr_zones = min(nr_zones, dev->nr_zones - first_zone);
|
||||
trace_nullb_report_zones(nullb, nr_zones);
|
||||
|
||||
for (i = 0; i < nr_zones; i++) {
|
||||
/*
|
||||
* Stacked DM target drivers will remap the zone information by
|
||||
|
@ -148,6 +153,8 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
|
|||
/* Invalid zone condition */
|
||||
return BLK_STS_IOERR;
|
||||
}
|
||||
|
||||
trace_nullb_zone_op(cmd, zno, zone->cond);
|
||||
return BLK_STS_OK;
|
||||
}
|
||||
|
||||
|
@ -155,7 +162,8 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
|
|||
sector_t sector)
|
||||
{
|
||||
struct nullb_device *dev = cmd->nq->dev;
|
||||
struct blk_zone *zone = &dev->zones[null_zone_no(dev, sector)];
|
||||
unsigned int zone_no = null_zone_no(dev, sector);
|
||||
struct blk_zone *zone = &dev->zones[zone_no];
|
||||
size_t i;
|
||||
|
||||
switch (op) {
|
||||
|
@ -203,6 +211,8 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
|
|||
default:
|
||||
return BLK_STS_NOTSUPP;
|
||||
}
|
||||
|
||||
trace_nullb_zone_op(cmd, zone_no, zone->cond);
|
||||
return BLK_STS_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -80,7 +80,7 @@ struct dma_tracker {
|
|||
struct dma_tracker_list {
|
||||
spinlock_t lock;
|
||||
int head;
|
||||
struct dma_tracker list[0];
|
||||
struct dma_tracker list[];
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -37,7 +37,7 @@ static ssize_t pblk_sysfs_luns_show(struct pblk *pblk, char *page)
|
|||
active = 0;
|
||||
up(&rlun->wr_sem);
|
||||
}
|
||||
sz += snprintf(page + sz, PAGE_SIZE - sz,
|
||||
sz += scnprintf(page + sz, PAGE_SIZE - sz,
|
||||
"pblk: pos:%d, ch:%d, lun:%d - %d\n",
|
||||
i,
|
||||
rlun->bppa.a.ch,
|
||||
|
@ -120,7 +120,7 @@ static ssize_t pblk_sysfs_ppaf(struct pblk *pblk, char *page)
|
|||
struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&pblk->addrf;
|
||||
struct nvm_addrf_12 *gppaf = (struct nvm_addrf_12 *)&geo->addrf;
|
||||
|
||||
sz = snprintf(page, PAGE_SIZE,
|
||||
sz = scnprintf(page, PAGE_SIZE,
|
||||
"g:(b:%d)blk:%d/%d,pg:%d/%d,lun:%d/%d,ch:%d/%d,pl:%d/%d,sec:%d/%d\n",
|
||||
pblk->addrf_len,
|
||||
ppaf->blk_offset, ppaf->blk_len,
|
||||
|
@ -130,7 +130,7 @@ static ssize_t pblk_sysfs_ppaf(struct pblk *pblk, char *page)
|
|||
ppaf->pln_offset, ppaf->pln_len,
|
||||
ppaf->sec_offset, ppaf->sec_len);
|
||||
|
||||
sz += snprintf(page + sz, PAGE_SIZE - sz,
|
||||
sz += scnprintf(page + sz, PAGE_SIZE - sz,
|
||||
"d:blk:%d/%d,pg:%d/%d,lun:%d/%d,ch:%d/%d,pl:%d/%d,sec:%d/%d\n",
|
||||
gppaf->blk_offset, gppaf->blk_len,
|
||||
gppaf->pg_offset, gppaf->pg_len,
|
||||
|
@ -142,7 +142,7 @@ static ssize_t pblk_sysfs_ppaf(struct pblk *pblk, char *page)
|
|||
struct nvm_addrf *ppaf = &pblk->addrf;
|
||||
struct nvm_addrf *gppaf = &geo->addrf;
|
||||
|
||||
sz = snprintf(page, PAGE_SIZE,
|
||||
sz = scnprintf(page, PAGE_SIZE,
|
||||
"pblk:(s:%d)ch:%d/%d,lun:%d/%d,chk:%d/%d/sec:%d/%d\n",
|
||||
pblk->addrf_len,
|
||||
ppaf->ch_offset, ppaf->ch_len,
|
||||
|
@ -150,7 +150,7 @@ static ssize_t pblk_sysfs_ppaf(struct pblk *pblk, char *page)
|
|||
ppaf->chk_offset, ppaf->chk_len,
|
||||
ppaf->sec_offset, ppaf->sec_len);
|
||||
|
||||
sz += snprintf(page + sz, PAGE_SIZE - sz,
|
||||
sz += scnprintf(page + sz, PAGE_SIZE - sz,
|
||||
"device:ch:%d/%d,lun:%d/%d,chk:%d/%d,sec:%d/%d\n",
|
||||
gppaf->ch_offset, gppaf->ch_len,
|
||||
gppaf->lun_offset, gppaf->lun_len,
|
||||
|
@ -278,11 +278,11 @@ static ssize_t pblk_sysfs_lines(struct pblk *pblk, char *page)
|
|||
pblk_err(pblk, "corrupted free line list:%d/%d\n",
|
||||
nr_free_lines, free_line_cnt);
|
||||
|
||||
sz = snprintf(page, PAGE_SIZE - sz,
|
||||
sz = scnprintf(page, PAGE_SIZE - sz,
|
||||
"line: nluns:%d, nblks:%d, nsecs:%d\n",
|
||||
geo->all_luns, lm->blk_per_line, lm->sec_per_line);
|
||||
|
||||
sz += snprintf(page + sz, PAGE_SIZE - sz,
|
||||
sz += scnprintf(page + sz, PAGE_SIZE - sz,
|
||||
"lines:d:%d,l:%d-f:%d,m:%d/%d,c:%d,b:%d,co:%d(d:%d,l:%d)t:%d\n",
|
||||
cur_data, cur_log,
|
||||
nr_free_lines,
|
||||
|
@ -292,12 +292,12 @@ static ssize_t pblk_sysfs_lines(struct pblk *pblk, char *page)
|
|||
d_line_cnt, l_line_cnt,
|
||||
l_mg->nr_lines);
|
||||
|
||||
sz += snprintf(page + sz, PAGE_SIZE - sz,
|
||||
sz += scnprintf(page + sz, PAGE_SIZE - sz,
|
||||
"GC: full:%d, high:%d, mid:%d, low:%d, empty:%d, werr: %d, queue:%d\n",
|
||||
gc_full, gc_high, gc_mid, gc_low, gc_empty, gc_werr,
|
||||
atomic_read(&pblk->gc.read_inflight_gc));
|
||||
|
||||
sz += snprintf(page + sz, PAGE_SIZE - sz,
|
||||
sz += scnprintf(page + sz, PAGE_SIZE - sz,
|
||||
"data (%d) cur:%d, left:%d, vsc:%d, s:%d, map:%d/%d (%d)\n",
|
||||
cur_data, cur_sec, msecs, vsc, sec_in_line,
|
||||
map_weight, lm->sec_per_line,
|
||||
|
@ -313,19 +313,19 @@ static ssize_t pblk_sysfs_lines_info(struct pblk *pblk, char *page)
|
|||
struct pblk_line_meta *lm = &pblk->lm;
|
||||
ssize_t sz = 0;
|
||||
|
||||
sz = snprintf(page, PAGE_SIZE - sz,
|
||||
sz = scnprintf(page, PAGE_SIZE - sz,
|
||||
"smeta - len:%d, secs:%d\n",
|
||||
lm->smeta_len, lm->smeta_sec);
|
||||
sz += snprintf(page + sz, PAGE_SIZE - sz,
|
||||
sz += scnprintf(page + sz, PAGE_SIZE - sz,
|
||||
"emeta - len:%d, sec:%d, bb_start:%d\n",
|
||||
lm->emeta_len[0], lm->emeta_sec[0],
|
||||
lm->emeta_bb);
|
||||
sz += snprintf(page + sz, PAGE_SIZE - sz,
|
||||
sz += scnprintf(page + sz, PAGE_SIZE - sz,
|
||||
"bitmap lengths: sec:%d, blk:%d, lun:%d\n",
|
||||
lm->sec_bitmap_len,
|
||||
lm->blk_bitmap_len,
|
||||
lm->lun_bitmap_len);
|
||||
sz += snprintf(page + sz, PAGE_SIZE - sz,
|
||||
sz += scnprintf(page + sz, PAGE_SIZE - sz,
|
||||
"blk_line:%d, sec_line:%d, sec_blk:%d\n",
|
||||
lm->blk_per_line,
|
||||
lm->sec_per_line,
|
||||
|
@ -344,12 +344,12 @@ static ssize_t pblk_get_write_amp(u64 user, u64 gc, u64 pad,
|
|||
{
|
||||
int sz;
|
||||
|
||||
sz = snprintf(page, PAGE_SIZE,
|
||||
sz = scnprintf(page, PAGE_SIZE,
|
||||
"user:%lld gc:%lld pad:%lld WA:",
|
||||
user, gc, pad);
|
||||
|
||||
if (!user) {
|
||||
sz += snprintf(page + sz, PAGE_SIZE - sz, "NaN\n");
|
||||
sz += scnprintf(page + sz, PAGE_SIZE - sz, "NaN\n");
|
||||
} else {
|
||||
u64 wa_int;
|
||||
u32 wa_frac;
|
||||
|
@ -358,7 +358,7 @@ static ssize_t pblk_get_write_amp(u64 user, u64 gc, u64 pad,
|
|||
wa_int = div64_u64(wa_int, user);
|
||||
wa_int = div_u64_rem(wa_int, 100000, &wa_frac);
|
||||
|
||||
sz += snprintf(page + sz, PAGE_SIZE - sz, "%llu.%05u\n",
|
||||
sz += scnprintf(page + sz, PAGE_SIZE - sz, "%llu.%05u\n",
|
||||
wa_int, wa_frac);
|
||||
}
|
||||
|
||||
|
@ -401,9 +401,9 @@ static ssize_t pblk_sysfs_get_padding_dist(struct pblk *pblk, char *page)
|
|||
total = atomic64_read(&pblk->nr_flush) - pblk->nr_flush_rst;
|
||||
if (!total) {
|
||||
for (i = 0; i < (buckets + 1); i++)
|
||||
sz += snprintf(page + sz, PAGE_SIZE - sz,
|
||||
sz += scnprintf(page + sz, PAGE_SIZE - sz,
|
||||
"%d:0 ", i);
|
||||
sz += snprintf(page + sz, PAGE_SIZE - sz, "\n");
|
||||
sz += scnprintf(page + sz, PAGE_SIZE - sz, "\n");
|
||||
|
||||
return sz;
|
||||
}
|
||||
|
@ -411,7 +411,7 @@ static ssize_t pblk_sysfs_get_padding_dist(struct pblk *pblk, char *page)
|
|||
for (i = 0; i < buckets; i++)
|
||||
total_buckets += atomic64_read(&pblk->pad_dist[i]);
|
||||
|
||||
sz += snprintf(page + sz, PAGE_SIZE - sz, "0:%lld%% ",
|
||||
sz += scnprintf(page + sz, PAGE_SIZE - sz, "0:%lld%% ",
|
||||
bucket_percentage(total - total_buckets, total));
|
||||
|
||||
for (i = 0; i < buckets; i++) {
|
||||
|
@ -419,10 +419,10 @@ static ssize_t pblk_sysfs_get_padding_dist(struct pblk *pblk, char *page)
|
|||
|
||||
p = bucket_percentage(atomic64_read(&pblk->pad_dist[i]),
|
||||
total);
|
||||
sz += snprintf(page + sz, PAGE_SIZE - sz, "%d:%lld%% ",
|
||||
sz += scnprintf(page + sz, PAGE_SIZE - sz, "%d:%lld%% ",
|
||||
i + 1, p);
|
||||
}
|
||||
sz += snprintf(page + sz, PAGE_SIZE - sz, "\n");
|
||||
sz += scnprintf(page + sz, PAGE_SIZE - sz, "\n");
|
||||
|
||||
return sz;
|
||||
}
|
||||
|
|
|
@ -101,64 +101,6 @@
|
|||
|
||||
#define insert_lock(s, b) ((b)->level <= (s)->lock)
|
||||
|
||||
/*
|
||||
* These macros are for recursing down the btree - they handle the details of
|
||||
* locking and looking up nodes in the cache for you. They're best treated as
|
||||
* mere syntax when reading code that uses them.
|
||||
*
|
||||
* op->lock determines whether we take a read or a write lock at a given depth.
|
||||
* If you've got a read lock and find that you need a write lock (i.e. you're
|
||||
* going to have to split), set op->lock and return -EINTR; btree_root() will
|
||||
* call you again and you'll have the correct lock.
|
||||
*/
|
||||
|
||||
/**
|
||||
* btree - recurse down the btree on a specified key
|
||||
* @fn: function to call, which will be passed the child node
|
||||
* @key: key to recurse on
|
||||
* @b: parent btree node
|
||||
* @op: pointer to struct btree_op
|
||||
*/
|
||||
#define btree(fn, key, b, op, ...) \
|
||||
({ \
|
||||
int _r, l = (b)->level - 1; \
|
||||
bool _w = l <= (op)->lock; \
|
||||
struct btree *_child = bch_btree_node_get((b)->c, op, key, l, \
|
||||
_w, b); \
|
||||
if (!IS_ERR(_child)) { \
|
||||
_r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \
|
||||
rw_unlock(_w, _child); \
|
||||
} else \
|
||||
_r = PTR_ERR(_child); \
|
||||
_r; \
|
||||
})
|
||||
|
||||
/**
|
||||
* btree_root - call a function on the root of the btree
|
||||
* @fn: function to call, which will be passed the child node
|
||||
* @c: cache set
|
||||
* @op: pointer to struct btree_op
|
||||
*/
|
||||
#define btree_root(fn, c, op, ...) \
|
||||
({ \
|
||||
int _r = -EINTR; \
|
||||
do { \
|
||||
struct btree *_b = (c)->root; \
|
||||
bool _w = insert_lock(op, _b); \
|
||||
rw_lock(_w, _b, _b->level); \
|
||||
if (_b == (c)->root && \
|
||||
_w == insert_lock(op, _b)) { \
|
||||
_r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
|
||||
} \
|
||||
rw_unlock(_w, _b); \
|
||||
bch_cannibalize_unlock(c); \
|
||||
if (_r == -EINTR) \
|
||||
schedule(); \
|
||||
} while (_r == -EINTR); \
|
||||
\
|
||||
finish_wait(&(c)->btree_cache_wait, &(op)->wait); \
|
||||
_r; \
|
||||
})
|
||||
|
||||
static inline struct bset *write_block(struct btree *b)
|
||||
{
|
||||
|
@ -1848,7 +1790,7 @@ static void bch_btree_gc(struct cache_set *c)
|
|||
|
||||
/* if CACHE_SET_IO_DISABLE set, gc thread should stop too */
|
||||
do {
|
||||
ret = btree_root(gc_root, c, &op, &writes, &stats);
|
||||
ret = bcache_btree_root(gc_root, c, &op, &writes, &stats);
|
||||
closure_sync(&writes);
|
||||
cond_resched();
|
||||
|
||||
|
@ -1946,7 +1888,7 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
|
|||
}
|
||||
|
||||
if (p)
|
||||
ret = btree(check_recurse, p, b, op);
|
||||
ret = bcache_btree(check_recurse, p, b, op);
|
||||
|
||||
p = k;
|
||||
} while (p && !ret);
|
||||
|
@ -1955,13 +1897,176 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
|
|||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static int bch_btree_check_thread(void *arg)
|
||||
{
|
||||
int ret;
|
||||
struct btree_check_info *info = arg;
|
||||
struct btree_check_state *check_state = info->state;
|
||||
struct cache_set *c = check_state->c;
|
||||
struct btree_iter iter;
|
||||
struct bkey *k, *p;
|
||||
int cur_idx, prev_idx, skip_nr;
|
||||
int i, n;
|
||||
|
||||
k = p = NULL;
|
||||
i = n = 0;
|
||||
cur_idx = prev_idx = 0;
|
||||
ret = 0;
|
||||
|
||||
/* root node keys are checked before thread created */
|
||||
bch_btree_iter_init(&c->root->keys, &iter, NULL);
|
||||
k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
|
||||
BUG_ON(!k);
|
||||
|
||||
p = k;
|
||||
while (k) {
|
||||
/*
|
||||
* Fetch a root node key index, skip the keys which
|
||||
* should be fetched by other threads, then check the
|
||||
* sub-tree indexed by the fetched key.
|
||||
*/
|
||||
spin_lock(&check_state->idx_lock);
|
||||
cur_idx = check_state->key_idx;
|
||||
check_state->key_idx++;
|
||||
spin_unlock(&check_state->idx_lock);
|
||||
|
||||
skip_nr = cur_idx - prev_idx;
|
||||
|
||||
while (skip_nr) {
|
||||
k = bch_btree_iter_next_filter(&iter,
|
||||
&c->root->keys,
|
||||
bch_ptr_bad);
|
||||
if (k)
|
||||
p = k;
|
||||
else {
|
||||
/*
|
||||
* No more keys to check in root node,
|
||||
* current checking threads are enough,
|
||||
* stop creating more.
|
||||
*/
|
||||
atomic_set(&check_state->enough, 1);
|
||||
/* Update check_state->enough earlier */
|
||||
smp_mb__after_atomic();
|
||||
goto out;
|
||||
}
|
||||
skip_nr--;
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
if (p) {
|
||||
struct btree_op op;
|
||||
|
||||
btree_node_prefetch(c->root, p);
|
||||
c->gc_stats.nodes++;
|
||||
bch_btree_op_init(&op, 0);
|
||||
ret = bcache_btree(check_recurse, p, c->root, &op);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
p = NULL;
|
||||
prev_idx = cur_idx;
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
out:
|
||||
info->result = ret;
|
||||
/* update check_state->started among all CPUs */
|
||||
smp_mb__before_atomic();
|
||||
if (atomic_dec_and_test(&check_state->started))
|
||||
wake_up(&check_state->wait);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
|
||||
static int bch_btree_chkthread_nr(void)
|
||||
{
|
||||
int n = num_online_cpus()/2;
|
||||
|
||||
if (n == 0)
|
||||
n = 1;
|
||||
else if (n > BCH_BTR_CHKTHREAD_MAX)
|
||||
n = BCH_BTR_CHKTHREAD_MAX;
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
int bch_btree_check(struct cache_set *c)
|
||||
{
|
||||
struct btree_op op;
|
||||
int ret = 0;
|
||||
int i;
|
||||
struct bkey *k = NULL;
|
||||
struct btree_iter iter;
|
||||
struct btree_check_state *check_state;
|
||||
char name[32];
|
||||
|
||||
bch_btree_op_init(&op, SHRT_MAX);
|
||||
/* check and mark root node keys */
|
||||
for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid)
|
||||
bch_initial_mark_key(c, c->root->level, k);
|
||||
|
||||
return btree_root(check_recurse, c, &op);
|
||||
bch_initial_mark_key(c, c->root->level + 1, &c->root->key);
|
||||
|
||||
if (c->root->level == 0)
|
||||
return 0;
|
||||
|
||||
check_state = kzalloc(sizeof(struct btree_check_state), GFP_KERNEL);
|
||||
if (!check_state)
|
||||
return -ENOMEM;
|
||||
|
||||
check_state->c = c;
|
||||
check_state->total_threads = bch_btree_chkthread_nr();
|
||||
check_state->key_idx = 0;
|
||||
spin_lock_init(&check_state->idx_lock);
|
||||
atomic_set(&check_state->started, 0);
|
||||
atomic_set(&check_state->enough, 0);
|
||||
init_waitqueue_head(&check_state->wait);
|
||||
|
||||
/*
|
||||
* Run multiple threads to check btree nodes in parallel,
|
||||
* if check_state->enough is non-zero, it means current
|
||||
* running check threads are enough, unncessary to create
|
||||
* more.
|
||||
*/
|
||||
for (i = 0; i < check_state->total_threads; i++) {
|
||||
/* fetch latest check_state->enough earlier */
|
||||
smp_mb__before_atomic();
|
||||
if (atomic_read(&check_state->enough))
|
||||
break;
|
||||
|
||||
check_state->infos[i].result = 0;
|
||||
check_state->infos[i].state = check_state;
|
||||
snprintf(name, sizeof(name), "bch_btrchk[%u]", i);
|
||||
atomic_inc(&check_state->started);
|
||||
|
||||
check_state->infos[i].thread =
|
||||
kthread_run(bch_btree_check_thread,
|
||||
&check_state->infos[i],
|
||||
name);
|
||||
if (IS_ERR(check_state->infos[i].thread)) {
|
||||
pr_err("fails to run thread bch_btrchk[%d]", i);
|
||||
for (--i; i >= 0; i--)
|
||||
kthread_stop(check_state->infos[i].thread);
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
wait_event_interruptible(check_state->wait,
|
||||
atomic_read(&check_state->started) == 0 ||
|
||||
test_bit(CACHE_SET_IO_DISABLE, &c->flags));
|
||||
|
||||
for (i = 0; i < check_state->total_threads; i++) {
|
||||
if (check_state->infos[i].result) {
|
||||
ret = check_state->infos[i].result;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
kfree(check_state);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void bch_initial_gc_finish(struct cache_set *c)
|
||||
|
@ -2401,7 +2506,7 @@ static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
|
|||
|
||||
while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
|
||||
bch_ptr_bad))) {
|
||||
ret = btree(map_nodes_recurse, k, b,
|
||||
ret = bcache_btree(map_nodes_recurse, k, b,
|
||||
op, from, fn, flags);
|
||||
from = NULL;
|
||||
|
||||
|
@ -2419,10 +2524,10 @@ static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
|
|||
int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
|
||||
struct bkey *from, btree_map_nodes_fn *fn, int flags)
|
||||
{
|
||||
return btree_root(map_nodes_recurse, c, op, from, fn, flags);
|
||||
return bcache_btree_root(map_nodes_recurse, c, op, from, fn, flags);
|
||||
}
|
||||
|
||||
static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
|
||||
int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
|
||||
struct bkey *from, btree_map_keys_fn *fn,
|
||||
int flags)
|
||||
{
|
||||
|
@ -2435,7 +2540,8 @@ static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
|
|||
while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
|
||||
ret = !b->level
|
||||
? fn(op, b, k)
|
||||
: btree(map_keys_recurse, k, b, op, from, fn, flags);
|
||||
: bcache_btree(map_keys_recurse, k,
|
||||
b, op, from, fn, flags);
|
||||
from = NULL;
|
||||
|
||||
if (ret != MAP_CONTINUE)
|
||||
|
@ -2452,7 +2558,7 @@ static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
|
|||
int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
|
||||
struct bkey *from, btree_map_keys_fn *fn, int flags)
|
||||
{
|
||||
return btree_root(map_keys_recurse, c, op, from, fn, flags);
|
||||
return bcache_btree_root(map_keys_recurse, c, op, from, fn, flags);
|
||||
}
|
||||
|
||||
/* Keybuf code */
|
||||
|
|
|
@ -145,6 +145,9 @@ struct btree {
|
|||
struct bio *bio;
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
#define BTREE_FLAG(flag) \
|
||||
static inline bool btree_node_ ## flag(struct btree *b) \
|
||||
{ return test_bit(BTREE_NODE_ ## flag, &b->flags); } \
|
||||
|
@ -216,6 +219,25 @@ struct btree_op {
|
|||
unsigned int insert_collision:1;
|
||||
};
|
||||
|
||||
struct btree_check_state;
|
||||
struct btree_check_info {
|
||||
struct btree_check_state *state;
|
||||
struct task_struct *thread;
|
||||
int result;
|
||||
};
|
||||
|
||||
#define BCH_BTR_CHKTHREAD_MAX 64
|
||||
struct btree_check_state {
|
||||
struct cache_set *c;
|
||||
int total_threads;
|
||||
int key_idx;
|
||||
spinlock_t idx_lock;
|
||||
atomic_t started;
|
||||
atomic_t enough;
|
||||
wait_queue_head_t wait;
|
||||
struct btree_check_info infos[BCH_BTR_CHKTHREAD_MAX];
|
||||
};
|
||||
|
||||
static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level)
|
||||
{
|
||||
memset(op, 0, sizeof(struct btree_op));
|
||||
|
@ -284,6 +306,65 @@ static inline void force_wake_up_gc(struct cache_set *c)
|
|||
wake_up_gc(c);
|
||||
}
|
||||
|
||||
/*
|
||||
* These macros are for recursing down the btree - they handle the details of
|
||||
* locking and looking up nodes in the cache for you. They're best treated as
|
||||
* mere syntax when reading code that uses them.
|
||||
*
|
||||
* op->lock determines whether we take a read or a write lock at a given depth.
|
||||
* If you've got a read lock and find that you need a write lock (i.e. you're
|
||||
* going to have to split), set op->lock and return -EINTR; btree_root() will
|
||||
* call you again and you'll have the correct lock.
|
||||
*/
|
||||
|
||||
/**
|
||||
* btree - recurse down the btree on a specified key
|
||||
* @fn: function to call, which will be passed the child node
|
||||
* @key: key to recurse on
|
||||
* @b: parent btree node
|
||||
* @op: pointer to struct btree_op
|
||||
*/
|
||||
#define bcache_btree(fn, key, b, op, ...) \
|
||||
({ \
|
||||
int _r, l = (b)->level - 1; \
|
||||
bool _w = l <= (op)->lock; \
|
||||
struct btree *_child = bch_btree_node_get((b)->c, op, key, l, \
|
||||
_w, b); \
|
||||
if (!IS_ERR(_child)) { \
|
||||
_r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \
|
||||
rw_unlock(_w, _child); \
|
||||
} else \
|
||||
_r = PTR_ERR(_child); \
|
||||
_r; \
|
||||
})
|
||||
|
||||
/**
|
||||
* btree_root - call a function on the root of the btree
|
||||
* @fn: function to call, which will be passed the child node
|
||||
* @c: cache set
|
||||
* @op: pointer to struct btree_op
|
||||
*/
|
||||
#define bcache_btree_root(fn, c, op, ...) \
|
||||
({ \
|
||||
int _r = -EINTR; \
|
||||
do { \
|
||||
struct btree *_b = (c)->root; \
|
||||
bool _w = insert_lock(op, _b); \
|
||||
rw_lock(_w, _b, _b->level); \
|
||||
if (_b == (c)->root && \
|
||||
_w == insert_lock(op, _b)) { \
|
||||
_r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
|
||||
} \
|
||||
rw_unlock(_w, _b); \
|
||||
bch_cannibalize_unlock(c); \
|
||||
if (_r == -EINTR) \
|
||||
schedule(); \
|
||||
} while (_r == -EINTR); \
|
||||
\
|
||||
finish_wait(&(c)->btree_cache_wait, &(op)->wait); \
|
||||
_r; \
|
||||
})
|
||||
|
||||
#define MAP_DONE 0
|
||||
#define MAP_CONTINUE 1
|
||||
|
||||
|
@ -314,6 +395,9 @@ typedef int (btree_map_keys_fn)(struct btree_op *op, struct btree *b,
|
|||
struct bkey *k);
|
||||
int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
|
||||
struct bkey *from, btree_map_keys_fn *fn, int flags);
|
||||
int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
|
||||
struct bkey *from, btree_map_keys_fn *fn,
|
||||
int flags);
|
||||
|
||||
typedef bool (keybuf_pred_fn)(struct keybuf *buf, struct bkey *k);
|
||||
|
||||
|
|
|
@ -154,7 +154,7 @@ static ssize_t bch_snprint_string_list(char *buf,
|
|||
size_t i;
|
||||
|
||||
for (i = 0; list[i]; i++)
|
||||
out += snprintf(out, buf + size - out,
|
||||
out += scnprintf(out, buf + size - out,
|
||||
i == selected ? "[%s] " : "%s ", list[i]);
|
||||
|
||||
out[-1] = '\n';
|
||||
|
|
|
@ -183,7 +183,7 @@ static void update_writeback_rate(struct work_struct *work)
|
|||
*/
|
||||
set_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
|
||||
/* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
|
||||
smp_mb();
|
||||
smp_mb__after_atomic();
|
||||
|
||||
/*
|
||||
* CACHE_SET_IO_DISABLE might be set via sysfs interface,
|
||||
|
@ -193,7 +193,7 @@ static void update_writeback_rate(struct work_struct *work)
|
|||
test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
|
||||
clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
|
||||
/* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
|
||||
smp_mb();
|
||||
smp_mb__after_atomic();
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -229,7 +229,7 @@ static void update_writeback_rate(struct work_struct *work)
|
|||
*/
|
||||
clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
|
||||
/* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
|
||||
smp_mb();
|
||||
smp_mb__after_atomic();
|
||||
}
|
||||
|
||||
static unsigned int writeback_delay(struct cached_dev *dc,
|
||||
|
@ -785,7 +785,9 @@ static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
|
|||
return MAP_CONTINUE;
|
||||
}
|
||||
|
||||
void bch_sectors_dirty_init(struct bcache_device *d)
|
||||
static int bch_root_node_dirty_init(struct cache_set *c,
|
||||
struct bcache_device *d,
|
||||
struct bkey *k)
|
||||
{
|
||||
struct sectors_dirty_init op;
|
||||
int ret;
|
||||
|
@ -796,8 +798,13 @@ void bch_sectors_dirty_init(struct bcache_device *d)
|
|||
op.start = KEY(op.inode, 0, 0);
|
||||
|
||||
do {
|
||||
ret = bch_btree_map_keys(&op.op, d->c, &op.start,
|
||||
sectors_dirty_init_fn, 0);
|
||||
ret = bcache_btree(map_keys_recurse,
|
||||
k,
|
||||
c->root,
|
||||
&op.op,
|
||||
&op.start,
|
||||
sectors_dirty_init_fn,
|
||||
0);
|
||||
if (ret == -EAGAIN)
|
||||
schedule_timeout_interruptible(
|
||||
msecs_to_jiffies(INIT_KEYS_SLEEP_MS));
|
||||
|
@ -806,6 +813,151 @@ void bch_sectors_dirty_init(struct bcache_device *d)
|
|||
break;
|
||||
}
|
||||
} while (ret == -EAGAIN);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int bch_dirty_init_thread(void *arg)
|
||||
{
|
||||
struct dirty_init_thrd_info *info = arg;
|
||||
struct bch_dirty_init_state *state = info->state;
|
||||
struct cache_set *c = state->c;
|
||||
struct btree_iter iter;
|
||||
struct bkey *k, *p;
|
||||
int cur_idx, prev_idx, skip_nr;
|
||||
int i;
|
||||
|
||||
k = p = NULL;
|
||||
i = 0;
|
||||
cur_idx = prev_idx = 0;
|
||||
|
||||
bch_btree_iter_init(&c->root->keys, &iter, NULL);
|
||||
k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
|
||||
BUG_ON(!k);
|
||||
|
||||
p = k;
|
||||
|
||||
while (k) {
|
||||
spin_lock(&state->idx_lock);
|
||||
cur_idx = state->key_idx;
|
||||
state->key_idx++;
|
||||
spin_unlock(&state->idx_lock);
|
||||
|
||||
skip_nr = cur_idx - prev_idx;
|
||||
|
||||
while (skip_nr) {
|
||||
k = bch_btree_iter_next_filter(&iter,
|
||||
&c->root->keys,
|
||||
bch_ptr_bad);
|
||||
if (k)
|
||||
p = k;
|
||||
else {
|
||||
atomic_set(&state->enough, 1);
|
||||
/* Update state->enough earlier */
|
||||
smp_mb__after_atomic();
|
||||
goto out;
|
||||
}
|
||||
skip_nr--;
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
if (p) {
|
||||
if (bch_root_node_dirty_init(c, state->d, p) < 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
p = NULL;
|
||||
prev_idx = cur_idx;
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
out:
|
||||
/* In order to wake up state->wait in time */
|
||||
smp_mb__before_atomic();
|
||||
if (atomic_dec_and_test(&state->started))
|
||||
wake_up(&state->wait);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bch_btre_dirty_init_thread_nr(void)
|
||||
{
|
||||
int n = num_online_cpus()/2;
|
||||
|
||||
if (n == 0)
|
||||
n = 1;
|
||||
else if (n > BCH_DIRTY_INIT_THRD_MAX)
|
||||
n = BCH_DIRTY_INIT_THRD_MAX;
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
void bch_sectors_dirty_init(struct bcache_device *d)
|
||||
{
|
||||
int i;
|
||||
struct bkey *k = NULL;
|
||||
struct btree_iter iter;
|
||||
struct sectors_dirty_init op;
|
||||
struct cache_set *c = d->c;
|
||||
struct bch_dirty_init_state *state;
|
||||
char name[32];
|
||||
|
||||
/* Just count root keys if no leaf node */
|
||||
if (c->root->level == 0) {
|
||||
bch_btree_op_init(&op.op, -1);
|
||||
op.inode = d->id;
|
||||
op.count = 0;
|
||||
op.start = KEY(op.inode, 0, 0);
|
||||
|
||||
for_each_key_filter(&c->root->keys,
|
||||
k, &iter, bch_ptr_invalid)
|
||||
sectors_dirty_init_fn(&op.op, c->root, k);
|
||||
return;
|
||||
}
|
||||
|
||||
state = kzalloc(sizeof(struct bch_dirty_init_state), GFP_KERNEL);
|
||||
if (!state) {
|
||||
pr_warn("sectors dirty init failed: cannot allocate memory");
|
||||
return;
|
||||
}
|
||||
|
||||
state->c = c;
|
||||
state->d = d;
|
||||
state->total_threads = bch_btre_dirty_init_thread_nr();
|
||||
state->key_idx = 0;
|
||||
spin_lock_init(&state->idx_lock);
|
||||
atomic_set(&state->started, 0);
|
||||
atomic_set(&state->enough, 0);
|
||||
init_waitqueue_head(&state->wait);
|
||||
|
||||
for (i = 0; i < state->total_threads; i++) {
|
||||
/* Fetch latest state->enough earlier */
|
||||
smp_mb__before_atomic();
|
||||
if (atomic_read(&state->enough))
|
||||
break;
|
||||
|
||||
state->infos[i].state = state;
|
||||
atomic_inc(&state->started);
|
||||
snprintf(name, sizeof(name), "bch_dirty_init[%d]", i);
|
||||
|
||||
state->infos[i].thread =
|
||||
kthread_run(bch_dirty_init_thread,
|
||||
&state->infos[i],
|
||||
name);
|
||||
if (IS_ERR(state->infos[i].thread)) {
|
||||
pr_err("fails to run thread bch_dirty_init[%d]", i);
|
||||
for (--i; i >= 0; i--)
|
||||
kthread_stop(state->infos[i].thread);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
wait_event_interruptible(state->wait,
|
||||
atomic_read(&state->started) == 0 ||
|
||||
test_bit(CACHE_SET_IO_DISABLE, &c->flags));
|
||||
|
||||
out:
|
||||
kfree(state);
|
||||
}
|
||||
|
||||
void bch_cached_dev_writeback_init(struct cached_dev *dc)
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
|
||||
#define BCH_AUTO_GC_DIRTY_THRESHOLD 50
|
||||
|
||||
#define BCH_DIRTY_INIT_THRD_MAX 64
|
||||
/*
|
||||
* 14 (16384ths) is chosen here as something that each backing device
|
||||
* should be a reasonable fraction of the share, and not to blow up
|
||||
|
@ -23,6 +24,24 @@
|
|||
*/
|
||||
#define WRITEBACK_SHARE_SHIFT 14
|
||||
|
||||
struct bch_dirty_init_state;
|
||||
struct dirty_init_thrd_info {
|
||||
struct bch_dirty_init_state *state;
|
||||
struct task_struct *thread;
|
||||
};
|
||||
|
||||
struct bch_dirty_init_state {
|
||||
struct cache_set *c;
|
||||
struct bcache_device *d;
|
||||
int total_threads;
|
||||
int key_idx;
|
||||
spinlock_t idx_lock;
|
||||
atomic_t started;
|
||||
atomic_t enough;
|
||||
wait_queue_head_t wait;
|
||||
struct dirty_init_thrd_info infos[BCH_DIRTY_INIT_THRD_MAX];
|
||||
};
|
||||
|
||||
static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
|
||||
{
|
||||
uint64_t i, ret = 0;
|
||||
|
|
|
@ -6185,7 +6185,7 @@ EXPORT_SYMBOL_GPL(md_stop_writes);
|
|||
static void mddev_detach(struct mddev *mddev)
|
||||
{
|
||||
md_bitmap_wait_behind_writes(mddev);
|
||||
if (mddev->pers && mddev->pers->quiesce) {
|
||||
if (mddev->pers && mddev->pers->quiesce && !mddev->suspended) {
|
||||
mddev->pers->quiesce(mddev, 1);
|
||||
mddev->pers->quiesce(mddev, 0);
|
||||
}
|
||||
|
|
|
@ -32,8 +32,6 @@ config NVME_HWMON
|
|||
a hardware monitoring device will be created for each NVMe drive
|
||||
in the system.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config NVME_FABRICS
|
||||
tristate
|
||||
|
||||
|
|
|
@ -171,7 +171,6 @@ static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl)
|
|||
nvme_remove_namespaces(ctrl);
|
||||
ctrl->ops->delete_ctrl(ctrl);
|
||||
nvme_uninit_ctrl(ctrl);
|
||||
nvme_put_ctrl(ctrl);
|
||||
}
|
||||
|
||||
static void nvme_delete_ctrl_work(struct work_struct *work)
|
||||
|
@ -192,21 +191,16 @@ int nvme_delete_ctrl(struct nvme_ctrl *ctrl)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(nvme_delete_ctrl);
|
||||
|
||||
static int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl)
|
||||
static void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* Keep a reference until nvme_do_delete_ctrl() complete,
|
||||
* since ->delete_ctrl can free the controller.
|
||||
*/
|
||||
nvme_get_ctrl(ctrl);
|
||||
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
|
||||
ret = -EBUSY;
|
||||
if (!ret)
|
||||
if (nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
|
||||
nvme_do_delete_ctrl(ctrl);
|
||||
nvme_put_ctrl(ctrl);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline bool nvme_ns_has_pi(struct nvme_ns *ns)
|
||||
|
@ -291,11 +285,8 @@ void nvme_complete_rq(struct request *req)
|
|||
nvme_req(req)->ctrl->comp_seen = true;
|
||||
|
||||
if (unlikely(status != BLK_STS_OK && nvme_req_needs_retry(req))) {
|
||||
if ((req->cmd_flags & REQ_NVME_MPATH) &&
|
||||
blk_path_error(status)) {
|
||||
nvme_failover_req(req);
|
||||
if ((req->cmd_flags & REQ_NVME_MPATH) && nvme_failover_req(req))
|
||||
return;
|
||||
}
|
||||
|
||||
if (!blk_queue_dying(req->q)) {
|
||||
nvme_retry_req(req);
|
||||
|
@ -1055,6 +1046,43 @@ static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
|
|||
return error;
|
||||
}
|
||||
|
||||
static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
|
||||
struct nvme_ns_id_desc *cur)
|
||||
{
|
||||
const char *warn_str = "ctrl returned bogus length:";
|
||||
void *data = cur;
|
||||
|
||||
switch (cur->nidt) {
|
||||
case NVME_NIDT_EUI64:
|
||||
if (cur->nidl != NVME_NIDT_EUI64_LEN) {
|
||||
dev_warn(ctrl->device, "%s %d for NVME_NIDT_EUI64\n",
|
||||
warn_str, cur->nidl);
|
||||
return -1;
|
||||
}
|
||||
memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN);
|
||||
return NVME_NIDT_EUI64_LEN;
|
||||
case NVME_NIDT_NGUID:
|
||||
if (cur->nidl != NVME_NIDT_NGUID_LEN) {
|
||||
dev_warn(ctrl->device, "%s %d for NVME_NIDT_NGUID\n",
|
||||
warn_str, cur->nidl);
|
||||
return -1;
|
||||
}
|
||||
memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN);
|
||||
return NVME_NIDT_NGUID_LEN;
|
||||
case NVME_NIDT_UUID:
|
||||
if (cur->nidl != NVME_NIDT_UUID_LEN) {
|
||||
dev_warn(ctrl->device, "%s %d for NVME_NIDT_UUID\n",
|
||||
warn_str, cur->nidl);
|
||||
return -1;
|
||||
}
|
||||
uuid_copy(&ids->uuid, data + sizeof(*cur));
|
||||
return NVME_NIDT_UUID_LEN;
|
||||
default:
|
||||
/* Skip unknown types */
|
||||
return cur->nidl;
|
||||
}
|
||||
}
|
||||
|
||||
static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
|
||||
struct nvme_ns_ids *ids)
|
||||
{
|
||||
|
@ -1074,8 +1102,17 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
|
|||
|
||||
status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data,
|
||||
NVME_IDENTIFY_DATA_SIZE);
|
||||
if (status)
|
||||
if (status) {
|
||||
dev_warn(ctrl->device,
|
||||
"Identify Descriptors failed (%d)\n", status);
|
||||
/*
|
||||
* Don't treat an error as fatal, as we potentially already
|
||||
* have a NGUID or EUI-64.
|
||||
*/
|
||||
if (status > 0)
|
||||
status = 0;
|
||||
goto free_data;
|
||||
}
|
||||
|
||||
for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
|
||||
struct nvme_ns_id_desc *cur = data + pos;
|
||||
|
@ -1083,42 +1120,9 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
|
|||
if (cur->nidl == 0)
|
||||
break;
|
||||
|
||||
switch (cur->nidt) {
|
||||
case NVME_NIDT_EUI64:
|
||||
if (cur->nidl != NVME_NIDT_EUI64_LEN) {
|
||||
dev_warn(ctrl->device,
|
||||
"ctrl returned bogus length: %d for NVME_NIDT_EUI64\n",
|
||||
cur->nidl);
|
||||
goto free_data;
|
||||
}
|
||||
len = NVME_NIDT_EUI64_LEN;
|
||||
memcpy(ids->eui64, data + pos + sizeof(*cur), len);
|
||||
break;
|
||||
case NVME_NIDT_NGUID:
|
||||
if (cur->nidl != NVME_NIDT_NGUID_LEN) {
|
||||
dev_warn(ctrl->device,
|
||||
"ctrl returned bogus length: %d for NVME_NIDT_NGUID\n",
|
||||
cur->nidl);
|
||||
goto free_data;
|
||||
}
|
||||
len = NVME_NIDT_NGUID_LEN;
|
||||
memcpy(ids->nguid, data + pos + sizeof(*cur), len);
|
||||
break;
|
||||
case NVME_NIDT_UUID:
|
||||
if (cur->nidl != NVME_NIDT_UUID_LEN) {
|
||||
dev_warn(ctrl->device,
|
||||
"ctrl returned bogus length: %d for NVME_NIDT_UUID\n",
|
||||
cur->nidl);
|
||||
goto free_data;
|
||||
}
|
||||
len = NVME_NIDT_UUID_LEN;
|
||||
uuid_copy(&ids->uuid, data + pos + sizeof(*cur));
|
||||
break;
|
||||
default:
|
||||
/* Skip unknown types */
|
||||
len = cur->nidl;
|
||||
break;
|
||||
}
|
||||
len = nvme_process_ns_desc(ctrl, ids, cur);
|
||||
if (len < 0)
|
||||
goto free_data;
|
||||
|
||||
len += sizeof(*cur);
|
||||
}
|
||||
|
@ -1584,6 +1588,47 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
|
|||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
struct nvme_user_io32 {
|
||||
__u8 opcode;
|
||||
__u8 flags;
|
||||
__u16 control;
|
||||
__u16 nblocks;
|
||||
__u16 rsvd;
|
||||
__u64 metadata;
|
||||
__u64 addr;
|
||||
__u64 slba;
|
||||
__u32 dsmgmt;
|
||||
__u32 reftag;
|
||||
__u16 apptag;
|
||||
__u16 appmask;
|
||||
} __attribute__((__packed__));
|
||||
|
||||
#define NVME_IOCTL_SUBMIT_IO32 _IOW('N', 0x42, struct nvme_user_io32)
|
||||
|
||||
static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
/*
|
||||
* Corresponds to the difference of NVME_IOCTL_SUBMIT_IO
|
||||
* between 32 bit programs and 64 bit kernel.
|
||||
* The cause is that the results of sizeof(struct nvme_user_io),
|
||||
* which is used to define NVME_IOCTL_SUBMIT_IO,
|
||||
* are not same between 32 bit compiler and 64 bit compiler.
|
||||
* NVME_IOCTL_SUBMIT_IO32 is for 64 bit kernel handling
|
||||
* NVME_IOCTL_SUBMIT_IO issued from 32 bit programs.
|
||||
* Other IOCTL numbers are same between 32 bit and 64 bit.
|
||||
* So there is nothing to do regarding to other IOCTL numbers.
|
||||
*/
|
||||
if (cmd == NVME_IOCTL_SUBMIT_IO32)
|
||||
return nvme_ioctl(bdev, mode, NVME_IOCTL_SUBMIT_IO, arg);
|
||||
|
||||
return nvme_ioctl(bdev, mode, cmd, arg);
|
||||
}
|
||||
#else
|
||||
#define nvme_compat_ioctl NULL
|
||||
#endif /* CONFIG_COMPAT */
|
||||
|
||||
static int nvme_open(struct block_device *bdev, fmode_t mode)
|
||||
{
|
||||
struct nvme_ns *ns = bdev->bd_disk->private_data;
|
||||
|
@ -1721,26 +1766,15 @@ static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns)
|
|||
static int nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid,
|
||||
struct nvme_id_ns *id, struct nvme_ns_ids *ids)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
memset(ids, 0, sizeof(*ids));
|
||||
|
||||
if (ctrl->vs >= NVME_VS(1, 1, 0))
|
||||
memcpy(ids->eui64, id->eui64, sizeof(id->eui64));
|
||||
if (ctrl->vs >= NVME_VS(1, 2, 0))
|
||||
memcpy(ids->nguid, id->nguid, sizeof(id->nguid));
|
||||
if (ctrl->vs >= NVME_VS(1, 3, 0)) {
|
||||
/* Don't treat error as fatal we potentially
|
||||
* already have a NGUID or EUI-64
|
||||
*/
|
||||
ret = nvme_identify_ns_descs(ctrl, nsid, ids);
|
||||
if (ret)
|
||||
dev_warn(ctrl->device,
|
||||
"Identify Descriptors failed (%d)\n", ret);
|
||||
if (ret > 0)
|
||||
ret = 0;
|
||||
}
|
||||
return ret;
|
||||
if (ctrl->vs >= NVME_VS(1, 3, 0))
|
||||
return nvme_identify_ns_descs(ctrl, nsid, ids);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids)
|
||||
|
@ -2027,7 +2061,7 @@ EXPORT_SYMBOL_GPL(nvme_sec_submit);
|
|||
static const struct block_device_operations nvme_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.ioctl = nvme_ioctl,
|
||||
.compat_ioctl = nvme_ioctl,
|
||||
.compat_ioctl = nvme_compat_ioctl,
|
||||
.open = nvme_open,
|
||||
.release = nvme_release,
|
||||
.getgeo = nvme_getgeo,
|
||||
|
@ -2055,7 +2089,7 @@ const struct block_device_operations nvme_ns_head_ops = {
|
|||
.open = nvme_ns_head_open,
|
||||
.release = nvme_ns_head_release,
|
||||
.ioctl = nvme_ioctl,
|
||||
.compat_ioctl = nvme_ioctl,
|
||||
.compat_ioctl = nvme_compat_ioctl,
|
||||
.getgeo = nvme_getgeo,
|
||||
.pr_ops = &nvme_pr_ops,
|
||||
};
|
||||
|
@ -2074,13 +2108,13 @@ static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
|
|||
if ((csts & NVME_CSTS_RDY) == bit)
|
||||
break;
|
||||
|
||||
msleep(100);
|
||||
usleep_range(1000, 2000);
|
||||
if (fatal_signal_pending(current))
|
||||
return -EINTR;
|
||||
if (time_after(jiffies, timeout)) {
|
||||
dev_err(ctrl->device,
|
||||
"Device not ready; aborting %s\n", enabled ?
|
||||
"initialisation" : "reset");
|
||||
"Device not ready; aborting %s, CSTS=0x%x\n",
|
||||
enabled ? "initialisation" : "reset", csts);
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
|
@ -2591,8 +2625,7 @@ static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
|
|||
lockdep_assert_held(&nvme_subsystems_lock);
|
||||
|
||||
list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) {
|
||||
if (tmp->state == NVME_CTRL_DELETING ||
|
||||
tmp->state == NVME_CTRL_DEAD)
|
||||
if (nvme_state_terminal(tmp))
|
||||
continue;
|
||||
|
||||
if (tmp->cntlid == ctrl->cntlid) {
|
||||
|
@ -3193,6 +3226,10 @@ static ssize_t nvme_sysfs_delete(struct device *dev,
|
|||
{
|
||||
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
||||
|
||||
/* Can't delete non-created controllers */
|
||||
if (!ctrl->created)
|
||||
return -EBUSY;
|
||||
|
||||
if (device_remove_file_self(dev, attr))
|
||||
nvme_delete_ctrl_sync(ctrl);
|
||||
return count;
|
||||
|
@ -3242,6 +3279,26 @@ static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev,
|
|||
}
|
||||
static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL);
|
||||
|
||||
static ssize_t nvme_sysfs_show_hostnqn(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->opts->host->nqn);
|
||||
}
|
||||
static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL);
|
||||
|
||||
static ssize_t nvme_sysfs_show_hostid(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%pU\n", &ctrl->opts->host->id);
|
||||
}
|
||||
static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL);
|
||||
|
||||
static ssize_t nvme_sysfs_show_address(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
|
@ -3267,6 +3324,8 @@ static struct attribute *nvme_dev_attrs[] = {
|
|||
&dev_attr_numa_node.attr,
|
||||
&dev_attr_queue_count.attr,
|
||||
&dev_attr_sqsize.attr,
|
||||
&dev_attr_hostnqn.attr,
|
||||
&dev_attr_hostid.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
|
@ -3280,6 +3339,10 @@ static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
|
|||
return 0;
|
||||
if (a == &dev_attr_address.attr && !ctrl->ops->get_address)
|
||||
return 0;
|
||||
if (a == &dev_attr_hostnqn.attr && !ctrl->opts)
|
||||
return 0;
|
||||
if (a == &dev_attr_hostid.attr && !ctrl->opts)
|
||||
return 0;
|
||||
|
||||
return a->mode;
|
||||
}
|
||||
|
@ -3294,7 +3357,7 @@ static const struct attribute_group *nvme_dev_attr_groups[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
static struct nvme_ns_head *__nvme_find_ns_head(struct nvme_subsystem *subsys,
|
||||
static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys,
|
||||
unsigned nsid)
|
||||
{
|
||||
struct nvme_ns_head *h;
|
||||
|
@ -3327,7 +3390,8 @@ static int __nvme_check_ids(struct nvme_subsystem *subsys,
|
|||
}
|
||||
|
||||
static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
|
||||
unsigned nsid, struct nvme_id_ns *id)
|
||||
unsigned nsid, struct nvme_id_ns *id,
|
||||
struct nvme_ns_ids *ids)
|
||||
{
|
||||
struct nvme_ns_head *head;
|
||||
size_t size = sizeof(*head);
|
||||
|
@ -3350,12 +3414,9 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
|
|||
goto out_ida_remove;
|
||||
head->subsys = ctrl->subsys;
|
||||
head->ns_id = nsid;
|
||||
head->ids = *ids;
|
||||
kref_init(&head->ref);
|
||||
|
||||
ret = nvme_report_ns_ids(ctrl, nsid, id, &head->ids);
|
||||
if (ret)
|
||||
goto out_cleanup_srcu;
|
||||
|
||||
ret = __nvme_check_ids(ctrl->subsys, head);
|
||||
if (ret) {
|
||||
dev_err(ctrl->device,
|
||||
|
@ -3390,24 +3451,23 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
|
|||
struct nvme_ctrl *ctrl = ns->ctrl;
|
||||
bool is_shared = id->nmic & (1 << 0);
|
||||
struct nvme_ns_head *head = NULL;
|
||||
struct nvme_ns_ids ids;
|
||||
int ret = 0;
|
||||
|
||||
ret = nvme_report_ns_ids(ctrl, nsid, id, &ids);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
mutex_lock(&ctrl->subsys->lock);
|
||||
if (is_shared)
|
||||
head = __nvme_find_ns_head(ctrl->subsys, nsid);
|
||||
head = nvme_find_ns_head(ctrl->subsys, nsid);
|
||||
if (!head) {
|
||||
head = nvme_alloc_ns_head(ctrl, nsid, id);
|
||||
head = nvme_alloc_ns_head(ctrl, nsid, id, &ids);
|
||||
if (IS_ERR(head)) {
|
||||
ret = PTR_ERR(head);
|
||||
goto out_unlock;
|
||||
}
|
||||
} else {
|
||||
struct nvme_ns_ids ids;
|
||||
|
||||
ret = nvme_report_ns_ids(ctrl, nsid, id, &ids);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
if (!nvme_ns_ids_equal(&head->ids, &ids)) {
|
||||
dev_err(ctrl->device,
|
||||
"IDs don't match for shared namespace %d\n",
|
||||
|
@ -3422,6 +3482,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
|
|||
|
||||
out_unlock:
|
||||
mutex_unlock(&ctrl->subsys->lock);
|
||||
out:
|
||||
if (ret > 0)
|
||||
ret = blk_status_to_errno(nvme_error_status(ret));
|
||||
return ret;
|
||||
|
@ -3480,7 +3541,7 @@ static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
|
||||
static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
|
||||
{
|
||||
struct nvme_ns *ns;
|
||||
struct gendisk *disk;
|
||||
|
@ -3490,13 +3551,11 @@ static int nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
|
|||
|
||||
ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
|
||||
if (!ns)
|
||||
return -ENOMEM;
|
||||
return;
|
||||
|
||||
ns->queue = blk_mq_init_queue(ctrl->tagset);
|
||||
if (IS_ERR(ns->queue)) {
|
||||
ret = PTR_ERR(ns->queue);
|
||||
if (IS_ERR(ns->queue))
|
||||
goto out_free_ns;
|
||||
}
|
||||
|
||||
if (ctrl->opts && ctrl->opts->data_digest)
|
||||
ns->queue->backing_dev_info->capabilities
|
||||
|
@ -3519,10 +3578,8 @@ static int nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
|
|||
if (ret)
|
||||
goto out_free_queue;
|
||||
|
||||
if (id->ncap == 0) {
|
||||
ret = -EINVAL;
|
||||
if (id->ncap == 0) /* no namespace (legacy quirk) */
|
||||
goto out_free_id;
|
||||
}
|
||||
|
||||
ret = nvme_init_ns_head(ns, nsid, id);
|
||||
if (ret)
|
||||
|
@ -3531,10 +3588,8 @@ static int nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
|
|||
nvme_set_disk_name(disk_name, ns, ctrl, &flags);
|
||||
|
||||
disk = alloc_disk_node(0, node);
|
||||
if (!disk) {
|
||||
ret = -ENOMEM;
|
||||
if (!disk)
|
||||
goto out_unlink_ns;
|
||||
}
|
||||
|
||||
disk->fops = &nvme_fops;
|
||||
disk->private_data = ns;
|
||||
|
@ -3565,7 +3620,7 @@ static int nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
|
|||
nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name);
|
||||
kfree(id);
|
||||
|
||||
return 0;
|
||||
return;
|
||||
out_put_disk:
|
||||
put_disk(ns->disk);
|
||||
out_unlink_ns:
|
||||
|
@ -3579,9 +3634,6 @@ static int nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
|
|||
blk_cleanup_queue(ns->queue);
|
||||
out_free_ns:
|
||||
kfree(ns);
|
||||
if (ret > 0)
|
||||
ret = blk_status_to_errno(nvme_error_status(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void nvme_ns_remove(struct nvme_ns *ns)
|
||||
|
@ -3987,6 +4039,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
|
|||
nvme_queue_scan(ctrl);
|
||||
nvme_start_queues(ctrl);
|
||||
}
|
||||
ctrl->created = true;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvme_start_ctrl);
|
||||
|
||||
|
@ -3995,6 +4048,7 @@ void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
|
|||
nvme_fault_inject_fini(&ctrl->fault_inject);
|
||||
dev_pm_qos_hide_latency_tolerance(ctrl->device);
|
||||
cdev_device_del(&ctrl->cdev, ctrl->device);
|
||||
nvme_put_ctrl(ctrl);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvme_uninit_ctrl);
|
||||
|
||||
|
@ -4077,6 +4131,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
|
|||
if (ret)
|
||||
goto out_release_instance;
|
||||
|
||||
nvme_get_ctrl(ctrl);
|
||||
cdev_init(&ctrl->cdev, &nvme_dev_fops);
|
||||
ctrl->cdev.owner = ops->module;
|
||||
ret = cdev_device_add(&ctrl->cdev, ctrl->device);
|
||||
|
@ -4095,6 +4150,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
|
|||
|
||||
return 0;
|
||||
out_free_name:
|
||||
nvme_put_ctrl(ctrl);
|
||||
kfree_const(ctrl->device->kobj.name);
|
||||
out_release_instance:
|
||||
ida_simple_remove(&nvme_instance_ida, ctrl->instance);
|
||||
|
@ -4299,6 +4355,7 @@ static void __exit nvme_core_exit(void)
|
|||
destroy_workqueue(nvme_delete_wq);
|
||||
destroy_workqueue(nvme_reset_wq);
|
||||
destroy_workqueue(nvme_wq);
|
||||
ida_destroy(&nvme_instance_ida);
|
||||
}
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -105,14 +105,14 @@ int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
|
|||
int len = 0;
|
||||
|
||||
if (ctrl->opts->mask & NVMF_OPT_TRADDR)
|
||||
len += snprintf(buf, size, "traddr=%s", ctrl->opts->traddr);
|
||||
len += scnprintf(buf, size, "traddr=%s", ctrl->opts->traddr);
|
||||
if (ctrl->opts->mask & NVMF_OPT_TRSVCID)
|
||||
len += snprintf(buf + len, size - len, "%strsvcid=%s",
|
||||
len += scnprintf(buf + len, size - len, "%strsvcid=%s",
|
||||
(len) ? "," : "", ctrl->opts->trsvcid);
|
||||
if (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)
|
||||
len += snprintf(buf + len, size - len, "%shost_traddr=%s",
|
||||
len += scnprintf(buf + len, size - len, "%shost_traddr=%s",
|
||||
(len) ? "," : "", ctrl->opts->host_traddr);
|
||||
len += snprintf(buf + len, size - len, "\n");
|
||||
len += scnprintf(buf + len, size - len, "\n");
|
||||
|
||||
return len;
|
||||
}
|
||||
|
|
|
@ -3181,10 +3181,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
|
|||
goto fail_ctrl;
|
||||
}
|
||||
|
||||
nvme_get_ctrl(&ctrl->ctrl);
|
||||
|
||||
if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) {
|
||||
nvme_put_ctrl(&ctrl->ctrl);
|
||||
dev_err(ctrl->ctrl.device,
|
||||
"NVME-FC{%d}: failed to schedule initial connect\n",
|
||||
ctrl->cnum);
|
||||
|
|
|
@ -64,17 +64,12 @@ void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
|
|||
}
|
||||
}
|
||||
|
||||
void nvme_failover_req(struct request *req)
|
||||
bool nvme_failover_req(struct request *req)
|
||||
{
|
||||
struct nvme_ns *ns = req->q->queuedata;
|
||||
u16 status = nvme_req(req)->status;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ns->head->requeue_lock, flags);
|
||||
blk_steal_bios(&ns->head->requeue_list, req);
|
||||
spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
|
||||
blk_mq_end_request(req, 0);
|
||||
|
||||
switch (status & 0x7ff) {
|
||||
case NVME_SC_ANA_TRANSITION:
|
||||
case NVME_SC_ANA_INACCESSIBLE:
|
||||
|
@ -103,15 +98,17 @@ void nvme_failover_req(struct request *req)
|
|||
nvme_mpath_clear_current_path(ns);
|
||||
break;
|
||||
default:
|
||||
/*
|
||||
* Reset the controller for any non-ANA error as we don't know
|
||||
* what caused the error.
|
||||
*/
|
||||
nvme_reset_ctrl(ns->ctrl);
|
||||
break;
|
||||
/* This was a non-ANA error so follow the normal error path. */
|
||||
return false;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&ns->head->requeue_lock, flags);
|
||||
blk_steal_bios(&ns->head->requeue_list, req);
|
||||
spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
|
||||
blk_mq_end_request(req, 0);
|
||||
|
||||
kblockd_schedule_work(&ns->head->requeue_work);
|
||||
return true;
|
||||
}
|
||||
|
||||
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
|
||||
|
|
|
@ -259,6 +259,7 @@ struct nvme_ctrl {
|
|||
struct nvme_command ka_cmd;
|
||||
struct work_struct fw_act_work;
|
||||
unsigned long events;
|
||||
bool created;
|
||||
|
||||
#ifdef CONFIG_NVME_MULTIPATH
|
||||
/* asymmetric namespace access: */
|
||||
|
@ -550,7 +551,7 @@ void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys);
|
|||
void nvme_mpath_start_freeze(struct nvme_subsystem *subsys);
|
||||
void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
|
||||
struct nvme_ctrl *ctrl, int *flags);
|
||||
void nvme_failover_req(struct request *req);
|
||||
bool nvme_failover_req(struct request *req);
|
||||
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
|
||||
int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
|
||||
void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id);
|
||||
|
@ -599,8 +600,9 @@ static inline void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
|
|||
sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
|
||||
}
|
||||
|
||||
static inline void nvme_failover_req(struct request *req)
|
||||
static inline bool nvme_failover_req(struct request *req)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
|
|
|
@ -971,39 +971,25 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
|
|||
nvme_end_request(req, cqe->status, cqe->result);
|
||||
}
|
||||
|
||||
static void nvme_complete_cqes(struct nvme_queue *nvmeq, u16 start, u16 end)
|
||||
{
|
||||
while (start != end) {
|
||||
nvme_handle_cqe(nvmeq, start);
|
||||
if (++start == nvmeq->q_depth)
|
||||
start = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
|
||||
{
|
||||
if (nvmeq->cq_head == nvmeq->q_depth - 1) {
|
||||
if (++nvmeq->cq_head == nvmeq->q_depth) {
|
||||
nvmeq->cq_head = 0;
|
||||
nvmeq->cq_phase = !nvmeq->cq_phase;
|
||||
} else {
|
||||
nvmeq->cq_head++;
|
||||
nvmeq->cq_phase ^= 1;
|
||||
}
|
||||
}
|
||||
|
||||
static inline int nvme_process_cq(struct nvme_queue *nvmeq, u16 *start,
|
||||
u16 *end, unsigned int tag)
|
||||
static inline int nvme_process_cq(struct nvme_queue *nvmeq)
|
||||
{
|
||||
int found = 0;
|
||||
|
||||
*start = nvmeq->cq_head;
|
||||
while (nvme_cqe_pending(nvmeq)) {
|
||||
if (tag == -1U || nvmeq->cqes[nvmeq->cq_head].command_id == tag)
|
||||
found++;
|
||||
found++;
|
||||
nvme_handle_cqe(nvmeq, nvmeq->cq_head);
|
||||
nvme_update_cq_head(nvmeq);
|
||||
}
|
||||
*end = nvmeq->cq_head;
|
||||
|
||||
if (*start != *end)
|
||||
if (found)
|
||||
nvme_ring_cq_doorbell(nvmeq);
|
||||
return found;
|
||||
}
|
||||
|
@ -1012,21 +998,16 @@ static irqreturn_t nvme_irq(int irq, void *data)
|
|||
{
|
||||
struct nvme_queue *nvmeq = data;
|
||||
irqreturn_t ret = IRQ_NONE;
|
||||
u16 start, end;
|
||||
|
||||
/*
|
||||
* The rmb/wmb pair ensures we see all updates from a previous run of
|
||||
* the irq handler, even if that was on another CPU.
|
||||
*/
|
||||
rmb();
|
||||
nvme_process_cq(nvmeq, &start, &end, -1);
|
||||
if (nvme_process_cq(nvmeq))
|
||||
ret = IRQ_HANDLED;
|
||||
wmb();
|
||||
|
||||
if (start != end) {
|
||||
nvme_complete_cqes(nvmeq, start, end);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1039,46 +1020,30 @@ static irqreturn_t nvme_irq_check(int irq, void *data)
|
|||
}
|
||||
|
||||
/*
|
||||
* Poll for completions any queue, including those not dedicated to polling.
|
||||
* Poll for completions for any interrupt driven queue
|
||||
* Can be called from any context.
|
||||
*/
|
||||
static int nvme_poll_irqdisable(struct nvme_queue *nvmeq, unsigned int tag)
|
||||
static void nvme_poll_irqdisable(struct nvme_queue *nvmeq)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev);
|
||||
u16 start, end;
|
||||
int found;
|
||||
|
||||
/*
|
||||
* For a poll queue we need to protect against the polling thread
|
||||
* using the CQ lock. For normal interrupt driven threads we have
|
||||
* to disable the interrupt to avoid racing with it.
|
||||
*/
|
||||
if (test_bit(NVMEQ_POLLED, &nvmeq->flags)) {
|
||||
spin_lock(&nvmeq->cq_poll_lock);
|
||||
found = nvme_process_cq(nvmeq, &start, &end, tag);
|
||||
spin_unlock(&nvmeq->cq_poll_lock);
|
||||
} else {
|
||||
disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
|
||||
found = nvme_process_cq(nvmeq, &start, &end, tag);
|
||||
enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
|
||||
}
|
||||
WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags));
|
||||
|
||||
nvme_complete_cqes(nvmeq, start, end);
|
||||
return found;
|
||||
disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
|
||||
nvme_process_cq(nvmeq);
|
||||
enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
|
||||
}
|
||||
|
||||
static int nvme_poll(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
struct nvme_queue *nvmeq = hctx->driver_data;
|
||||
u16 start, end;
|
||||
bool found;
|
||||
|
||||
if (!nvme_cqe_pending(nvmeq))
|
||||
return 0;
|
||||
|
||||
spin_lock(&nvmeq->cq_poll_lock);
|
||||
found = nvme_process_cq(nvmeq, &start, &end, -1);
|
||||
nvme_complete_cqes(nvmeq, start, end);
|
||||
found = nvme_process_cq(nvmeq);
|
||||
spin_unlock(&nvmeq->cq_poll_lock);
|
||||
|
||||
return found;
|
||||
|
@ -1255,7 +1220,12 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
|
|||
/*
|
||||
* Did we miss an interrupt?
|
||||
*/
|
||||
if (nvme_poll_irqdisable(nvmeq, req->tag)) {
|
||||
if (test_bit(NVMEQ_POLLED, &nvmeq->flags))
|
||||
nvme_poll(req->mq_hctx);
|
||||
else
|
||||
nvme_poll_irqdisable(nvmeq);
|
||||
|
||||
if (blk_mq_request_completed(req)) {
|
||||
dev_warn(dev->ctrl.device,
|
||||
"I/O %d QID %d timeout, completion polled\n",
|
||||
req->tag, nvmeq->qid);
|
||||
|
@ -1398,7 +1368,7 @@ static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
|
|||
else
|
||||
nvme_disable_ctrl(&dev->ctrl);
|
||||
|
||||
nvme_poll_irqdisable(nvmeq, -1);
|
||||
nvme_poll_irqdisable(nvmeq);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1409,13 +1379,10 @@ static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
|
|||
*/
|
||||
static void nvme_reap_pending_cqes(struct nvme_dev *dev)
|
||||
{
|
||||
u16 start, end;
|
||||
int i;
|
||||
|
||||
for (i = dev->ctrl.queue_count - 1; i > 0; i--) {
|
||||
nvme_process_cq(&dev->queues[i], &start, &end, -1);
|
||||
nvme_complete_cqes(&dev->queues[i], start, end);
|
||||
}
|
||||
for (i = dev->ctrl.queue_count - 1; i > 0; i--)
|
||||
nvme_process_cq(&dev->queues[i]);
|
||||
}
|
||||
|
||||
static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
|
||||
|
@ -2503,13 +2470,13 @@ static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
|
|||
struct nvme_dev *dev = to_nvme_dev(ctrl);
|
||||
|
||||
nvme_dbbuf_dma_free(dev);
|
||||
put_device(dev->dev);
|
||||
nvme_free_tagset(dev);
|
||||
if (dev->ctrl.admin_q)
|
||||
blk_put_queue(dev->ctrl.admin_q);
|
||||
kfree(dev->queues);
|
||||
free_opal_dev(dev->ctrl.opal_dev);
|
||||
mempool_destroy(dev->iod_mempool);
|
||||
put_device(dev->dev);
|
||||
kfree(dev->queues);
|
||||
kfree(dev);
|
||||
}
|
||||
|
||||
|
@ -2689,7 +2656,7 @@ static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
|
|||
{
|
||||
struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev);
|
||||
|
||||
return snprintf(buf, size, "%s", dev_name(&pdev->dev));
|
||||
return snprintf(buf, size, "%s\n", dev_name(&pdev->dev));
|
||||
}
|
||||
|
||||
static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
|
||||
|
@ -2835,7 +2802,6 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
|
||||
|
||||
nvme_reset_ctrl(&dev->ctrl);
|
||||
nvme_get_ctrl(&dev->ctrl);
|
||||
async_schedule(nvme_async_probe, dev);
|
||||
|
||||
return 0;
|
||||
|
@ -2907,10 +2873,9 @@ static void nvme_remove(struct pci_dev *pdev)
|
|||
nvme_free_host_mem(dev);
|
||||
nvme_dev_remove_admin(dev);
|
||||
nvme_free_queues(dev, 0);
|
||||
nvme_uninit_ctrl(&dev->ctrl);
|
||||
nvme_release_prp_pools(dev);
|
||||
nvme_dev_unmap(dev);
|
||||
nvme_put_ctrl(&dev->ctrl);
|
||||
nvme_uninit_ctrl(&dev->ctrl);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
|
|
|
@ -1024,8 +1024,13 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
|
|||
|
||||
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
|
||||
if (!changed) {
|
||||
/* state change failure is ok if we're in DELETING state */
|
||||
/*
|
||||
* state change failure is ok if we're in DELETING state,
|
||||
* unless we're during creation of a new controller to
|
||||
* avoid races with teardown flow.
|
||||
*/
|
||||
WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING);
|
||||
WARN_ON_ONCE(new);
|
||||
ret = -EINVAL;
|
||||
goto destroy_io;
|
||||
}
|
||||
|
@ -2045,8 +2050,6 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
|
|||
dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n",
|
||||
ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
|
||||
|
||||
nvme_get_ctrl(&ctrl->ctrl);
|
||||
|
||||
mutex_lock(&nvme_rdma_ctrl_mutex);
|
||||
list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list);
|
||||
mutex_unlock(&nvme_rdma_ctrl_mutex);
|
||||
|
|
|
@ -20,6 +20,16 @@
|
|||
|
||||
struct nvme_tcp_queue;
|
||||
|
||||
/* Define the socket priority to use for connections were it is desirable
|
||||
* that the NIC consider performing optimized packet processing or filtering.
|
||||
* A non-zero value being sufficient to indicate general consideration of any
|
||||
* possible optimization. Making it a module param allows for alternative
|
||||
* values that may be unique for some NIC implementations.
|
||||
*/
|
||||
static int so_priority;
|
||||
module_param(so_priority, int, 0644);
|
||||
MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
|
||||
|
||||
enum nvme_tcp_send_state {
|
||||
NVME_TCP_SEND_CMD_PDU = 0,
|
||||
NVME_TCP_SEND_H2C_PDU,
|
||||
|
@ -1017,8 +1027,15 @@ static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
|
|||
if (req->state == NVME_TCP_SEND_DDGST)
|
||||
ret = nvme_tcp_try_send_ddgst(req);
|
||||
done:
|
||||
if (ret == -EAGAIN)
|
||||
if (ret == -EAGAIN) {
|
||||
ret = 0;
|
||||
} else if (ret < 0) {
|
||||
dev_err(queue->ctrl->ctrl.device,
|
||||
"failed to send request %d\n", ret);
|
||||
if (ret != -EPIPE && ret != -ECONNRESET)
|
||||
nvme_tcp_fail_request(queue->request);
|
||||
nvme_tcp_done_send_req(queue);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1049,25 +1066,16 @@ static void nvme_tcp_io_work(struct work_struct *w)
|
|||
int result;
|
||||
|
||||
result = nvme_tcp_try_send(queue);
|
||||
if (result > 0) {
|
||||
if (result > 0)
|
||||
pending = true;
|
||||
} else if (unlikely(result < 0)) {
|
||||
dev_err(queue->ctrl->ctrl.device,
|
||||
"failed to send request %d\n", result);
|
||||
|
||||
/*
|
||||
* Fail the request unless peer closed the connection,
|
||||
* in which case error recovery flow will complete all.
|
||||
*/
|
||||
if ((result != -EPIPE) && (result != -ECONNRESET))
|
||||
nvme_tcp_fail_request(queue->request);
|
||||
nvme_tcp_done_send_req(queue);
|
||||
return;
|
||||
}
|
||||
else if (unlikely(result < 0))
|
||||
break;
|
||||
|
||||
result = nvme_tcp_try_recv(queue);
|
||||
if (result > 0)
|
||||
pending = true;
|
||||
else if (unlikely(result < 0))
|
||||
break;
|
||||
|
||||
if (!pending)
|
||||
return;
|
||||
|
@ -1248,13 +1256,67 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue)
|
||||
{
|
||||
return nvme_tcp_queue_id(queue) == 0;
|
||||
}
|
||||
|
||||
static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue)
|
||||
{
|
||||
struct nvme_tcp_ctrl *ctrl = queue->ctrl;
|
||||
int qid = nvme_tcp_queue_id(queue);
|
||||
|
||||
return !nvme_tcp_admin_queue(queue) &&
|
||||
qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT];
|
||||
}
|
||||
|
||||
static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue)
|
||||
{
|
||||
struct nvme_tcp_ctrl *ctrl = queue->ctrl;
|
||||
int qid = nvme_tcp_queue_id(queue);
|
||||
|
||||
return !nvme_tcp_admin_queue(queue) &&
|
||||
!nvme_tcp_default_queue(queue) &&
|
||||
qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
|
||||
ctrl->io_queues[HCTX_TYPE_READ];
|
||||
}
|
||||
|
||||
static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
|
||||
{
|
||||
struct nvme_tcp_ctrl *ctrl = queue->ctrl;
|
||||
int qid = nvme_tcp_queue_id(queue);
|
||||
|
||||
return !nvme_tcp_admin_queue(queue) &&
|
||||
!nvme_tcp_default_queue(queue) &&
|
||||
!nvme_tcp_read_queue(queue) &&
|
||||
qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
|
||||
ctrl->io_queues[HCTX_TYPE_READ] +
|
||||
ctrl->io_queues[HCTX_TYPE_POLL];
|
||||
}
|
||||
|
||||
static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
|
||||
{
|
||||
struct nvme_tcp_ctrl *ctrl = queue->ctrl;
|
||||
int qid = nvme_tcp_queue_id(queue);
|
||||
int n = 0;
|
||||
|
||||
if (nvme_tcp_default_queue(queue))
|
||||
n = qid - 1;
|
||||
else if (nvme_tcp_read_queue(queue))
|
||||
n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1;
|
||||
else if (nvme_tcp_poll_queue(queue))
|
||||
n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
|
||||
ctrl->io_queues[HCTX_TYPE_READ] - 1;
|
||||
queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
|
||||
}
|
||||
|
||||
static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
|
||||
int qid, size_t queue_size)
|
||||
{
|
||||
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
|
||||
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
|
||||
struct linger sol = { .l_onoff = 1, .l_linger = 0 };
|
||||
int ret, opt, rcv_pdu_size, n;
|
||||
int ret, opt, rcv_pdu_size;
|
||||
|
||||
queue->ctrl = ctrl;
|
||||
INIT_LIST_HEAD(&queue->send_list);
|
||||
|
@ -1309,6 +1371,17 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
|
|||
goto err_sock;
|
||||
}
|
||||
|
||||
if (so_priority > 0) {
|
||||
ret = kernel_setsockopt(queue->sock, SOL_SOCKET, SO_PRIORITY,
|
||||
(char *)&so_priority, sizeof(so_priority));
|
||||
if (ret) {
|
||||
dev_err(ctrl->ctrl.device,
|
||||
"failed to set SO_PRIORITY sock opt, ret %d\n",
|
||||
ret);
|
||||
goto err_sock;
|
||||
}
|
||||
}
|
||||
|
||||
/* Set socket type of service */
|
||||
if (nctrl->opts->tos >= 0) {
|
||||
opt = nctrl->opts->tos;
|
||||
|
@ -1322,11 +1395,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
|
|||
}
|
||||
|
||||
queue->sock->sk->sk_allocation = GFP_ATOMIC;
|
||||
if (!qid)
|
||||
n = 0;
|
||||
else
|
||||
n = (qid - 1) % num_online_cpus();
|
||||
queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
|
||||
nvme_tcp_set_queue_io_cpu(queue);
|
||||
queue->request = NULL;
|
||||
queue->data_remaining = 0;
|
||||
queue->ddgst_remaining = 0;
|
||||
|
@ -1861,8 +1930,13 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
|
|||
}
|
||||
|
||||
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
|
||||
/* state change failure is ok if we're in DELETING state */
|
||||
/*
|
||||
* state change failure is ok if we're in DELETING state,
|
||||
* unless we're during creation of a new controller to
|
||||
* avoid races with teardown flow.
|
||||
*/
|
||||
WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
|
||||
WARN_ON_ONCE(new);
|
||||
ret = -EINVAL;
|
||||
goto destroy_io;
|
||||
}
|
||||
|
@ -2359,8 +2433,6 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
|
|||
dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
|
||||
ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
|
||||
|
||||
nvme_get_ctrl(&ctrl->ctrl);
|
||||
|
||||
mutex_lock(&nvme_tcp_ctrl_mutex);
|
||||
list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
|
||||
mutex_unlock(&nvme_tcp_ctrl_mutex);
|
||||
|
|
|
@ -323,12 +323,25 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req)
|
|||
nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
|
||||
}
|
||||
|
||||
static void nvmet_id_set_model_number(struct nvme_id_ctrl *id,
|
||||
struct nvmet_subsys *subsys)
|
||||
{
|
||||
const char *model = NVMET_DEFAULT_CTRL_MODEL;
|
||||
struct nvmet_subsys_model *subsys_model;
|
||||
|
||||
rcu_read_lock();
|
||||
subsys_model = rcu_dereference(subsys->model);
|
||||
if (subsys_model)
|
||||
model = subsys_model->number;
|
||||
memcpy_and_pad(id->mn, sizeof(id->mn), model, strlen(model), ' ');
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
|
||||
{
|
||||
struct nvmet_ctrl *ctrl = req->sq->ctrl;
|
||||
struct nvme_id_ctrl *id;
|
||||
u16 status = 0;
|
||||
const char model[] = "Linux";
|
||||
|
||||
id = kzalloc(sizeof(*id), GFP_KERNEL);
|
||||
if (!id) {
|
||||
|
@ -343,7 +356,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
|
|||
memset(id->sn, ' ', sizeof(id->sn));
|
||||
bin2hex(id->sn, &ctrl->subsys->serial,
|
||||
min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
|
||||
memcpy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1, ' ');
|
||||
nvmet_id_set_model_number(id, ctrl->subsys);
|
||||
memcpy_and_pad(id->fr, sizeof(id->fr),
|
||||
UTS_RELEASE, strlen(UTS_RELEASE), ' ');
|
||||
|
||||
|
@ -357,8 +370,12 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
|
|||
/* we support multiple ports, multiples hosts and ANA: */
|
||||
id->cmic = (1 << 0) | (1 << 1) | (1 << 3);
|
||||
|
||||
/* no limit on data transfer sizes for now */
|
||||
id->mdts = 0;
|
||||
/* Limit MDTS according to transport capability */
|
||||
if (ctrl->ops->get_mdts)
|
||||
id->mdts = ctrl->ops->get_mdts(ctrl);
|
||||
else
|
||||
id->mdts = 0;
|
||||
|
||||
id->cntlid = cpu_to_le16(ctrl->cntlid);
|
||||
id->ver = cpu_to_le32(ctrl->subsys->ver);
|
||||
|
||||
|
@ -721,13 +738,22 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
|
|||
{
|
||||
struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
|
||||
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
|
||||
u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
|
||||
u16 status = 0;
|
||||
u16 nsqr;
|
||||
u16 ncqr;
|
||||
|
||||
if (!nvmet_check_data_len(req, 0))
|
||||
return;
|
||||
|
||||
switch (cdw10 & 0xff) {
|
||||
case NVME_FEAT_NUM_QUEUES:
|
||||
ncqr = (cdw11 >> 16) & 0xffff;
|
||||
nsqr = cdw11 & 0xffff;
|
||||
if (ncqr == 0xffff || nsqr == 0xffff) {
|
||||
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
||||
break;
|
||||
}
|
||||
nvmet_set_result(req,
|
||||
(subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
|
||||
break;
|
||||
|
|
|
@ -395,14 +395,12 @@ static ssize_t nvmet_ns_device_uuid_store(struct config_item *item,
|
|||
struct nvmet_subsys *subsys = ns->subsys;
|
||||
int ret = 0;
|
||||
|
||||
|
||||
mutex_lock(&subsys->lock);
|
||||
if (ns->enabled) {
|
||||
ret = -EBUSY;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
|
||||
if (uuid_parse(page, &ns->uuid))
|
||||
ret = -EINVAL;
|
||||
|
||||
|
@ -815,10 +813,10 @@ static ssize_t nvmet_subsys_attr_version_show(struct config_item *item,
|
|||
(int)NVME_MAJOR(subsys->ver),
|
||||
(int)NVME_MINOR(subsys->ver),
|
||||
(int)NVME_TERTIARY(subsys->ver));
|
||||
else
|
||||
return snprintf(page, PAGE_SIZE, "%d.%d\n",
|
||||
(int)NVME_MAJOR(subsys->ver),
|
||||
(int)NVME_MINOR(subsys->ver));
|
||||
|
||||
return snprintf(page, PAGE_SIZE, "%d.%d\n",
|
||||
(int)NVME_MAJOR(subsys->ver),
|
||||
(int)NVME_MINOR(subsys->ver));
|
||||
}
|
||||
|
||||
static ssize_t nvmet_subsys_attr_version_store(struct config_item *item,
|
||||
|
@ -828,7 +826,6 @@ static ssize_t nvmet_subsys_attr_version_store(struct config_item *item,
|
|||
int major, minor, tertiary = 0;
|
||||
int ret;
|
||||
|
||||
|
||||
ret = sscanf(page, "%d.%d.%d\n", &major, &minor, &tertiary);
|
||||
if (ret != 2 && ret != 3)
|
||||
return -EINVAL;
|
||||
|
@ -852,20 +849,151 @@ static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item,
|
|||
static ssize_t nvmet_subsys_attr_serial_store(struct config_item *item,
|
||||
const char *page, size_t count)
|
||||
{
|
||||
struct nvmet_subsys *subsys = to_subsys(item);
|
||||
u64 serial;
|
||||
|
||||
if (sscanf(page, "%llx\n", &serial) != 1)
|
||||
return -EINVAL;
|
||||
|
||||
down_write(&nvmet_config_sem);
|
||||
sscanf(page, "%llx\n", &subsys->serial);
|
||||
to_subsys(item)->serial = serial;
|
||||
up_write(&nvmet_config_sem);
|
||||
|
||||
return count;
|
||||
}
|
||||
CONFIGFS_ATTR(nvmet_subsys_, attr_serial);
|
||||
|
||||
static ssize_t nvmet_subsys_attr_cntlid_min_show(struct config_item *item,
|
||||
char *page)
|
||||
{
|
||||
return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_min);
|
||||
}
|
||||
|
||||
static ssize_t nvmet_subsys_attr_cntlid_min_store(struct config_item *item,
|
||||
const char *page, size_t cnt)
|
||||
{
|
||||
u16 cntlid_min;
|
||||
|
||||
if (sscanf(page, "%hu\n", &cntlid_min) != 1)
|
||||
return -EINVAL;
|
||||
|
||||
if (cntlid_min == 0)
|
||||
return -EINVAL;
|
||||
|
||||
down_write(&nvmet_config_sem);
|
||||
if (cntlid_min >= to_subsys(item)->cntlid_max)
|
||||
goto out_unlock;
|
||||
to_subsys(item)->cntlid_min = cntlid_min;
|
||||
up_write(&nvmet_config_sem);
|
||||
return cnt;
|
||||
|
||||
out_unlock:
|
||||
up_write(&nvmet_config_sem);
|
||||
return -EINVAL;
|
||||
}
|
||||
CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_min);
|
||||
|
||||
static ssize_t nvmet_subsys_attr_cntlid_max_show(struct config_item *item,
|
||||
char *page)
|
||||
{
|
||||
return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_max);
|
||||
}
|
||||
|
||||
static ssize_t nvmet_subsys_attr_cntlid_max_store(struct config_item *item,
|
||||
const char *page, size_t cnt)
|
||||
{
|
||||
u16 cntlid_max;
|
||||
|
||||
if (sscanf(page, "%hu\n", &cntlid_max) != 1)
|
||||
return -EINVAL;
|
||||
|
||||
if (cntlid_max == 0)
|
||||
return -EINVAL;
|
||||
|
||||
down_write(&nvmet_config_sem);
|
||||
if (cntlid_max <= to_subsys(item)->cntlid_min)
|
||||
goto out_unlock;
|
||||
to_subsys(item)->cntlid_max = cntlid_max;
|
||||
up_write(&nvmet_config_sem);
|
||||
return cnt;
|
||||
|
||||
out_unlock:
|
||||
up_write(&nvmet_config_sem);
|
||||
return -EINVAL;
|
||||
}
|
||||
CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_max);
|
||||
|
||||
static ssize_t nvmet_subsys_attr_model_show(struct config_item *item,
|
||||
char *page)
|
||||
{
|
||||
struct nvmet_subsys *subsys = to_subsys(item);
|
||||
struct nvmet_subsys_model *subsys_model;
|
||||
char *model = NVMET_DEFAULT_CTRL_MODEL;
|
||||
int ret;
|
||||
|
||||
rcu_read_lock();
|
||||
subsys_model = rcu_dereference(subsys->model);
|
||||
if (subsys_model)
|
||||
model = subsys_model->number;
|
||||
ret = snprintf(page, PAGE_SIZE, "%s\n", model);
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* See Section 1.5 of NVMe 1.4 */
|
||||
static bool nvmet_is_ascii(const char c)
|
||||
{
|
||||
return c >= 0x20 && c <= 0x7e;
|
||||
}
|
||||
|
||||
static ssize_t nvmet_subsys_attr_model_store(struct config_item *item,
|
||||
const char *page, size_t count)
|
||||
{
|
||||
struct nvmet_subsys *subsys = to_subsys(item);
|
||||
struct nvmet_subsys_model *new_model;
|
||||
char *new_model_number;
|
||||
int pos = 0, len;
|
||||
|
||||
len = strcspn(page, "\n");
|
||||
if (!len)
|
||||
return -EINVAL;
|
||||
|
||||
for (pos = 0; pos < len; pos++) {
|
||||
if (!nvmet_is_ascii(page[pos]))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
new_model_number = kstrndup(page, len, GFP_KERNEL);
|
||||
if (!new_model_number)
|
||||
return -ENOMEM;
|
||||
|
||||
new_model = kzalloc(sizeof(*new_model) + len + 1, GFP_KERNEL);
|
||||
if (!new_model) {
|
||||
kfree(new_model_number);
|
||||
return -ENOMEM;
|
||||
}
|
||||
memcpy(new_model->number, new_model_number, len);
|
||||
|
||||
down_write(&nvmet_config_sem);
|
||||
mutex_lock(&subsys->lock);
|
||||
new_model = rcu_replace_pointer(subsys->model, new_model,
|
||||
mutex_is_locked(&subsys->lock));
|
||||
mutex_unlock(&subsys->lock);
|
||||
up_write(&nvmet_config_sem);
|
||||
|
||||
kfree_rcu(new_model, rcuhead);
|
||||
|
||||
return count;
|
||||
}
|
||||
CONFIGFS_ATTR(nvmet_subsys_, attr_model);
|
||||
|
||||
static struct configfs_attribute *nvmet_subsys_attrs[] = {
|
||||
&nvmet_subsys_attr_attr_allow_any_host,
|
||||
&nvmet_subsys_attr_attr_version,
|
||||
&nvmet_subsys_attr_attr_serial,
|
||||
&nvmet_subsys_attr_attr_cntlid_min,
|
||||
&nvmet_subsys_attr_attr_cntlid_max,
|
||||
&nvmet_subsys_attr_attr_model,
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
|
|
@ -1289,8 +1289,11 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
|
|||
if (!ctrl->sqs)
|
||||
goto out_free_cqs;
|
||||
|
||||
if (subsys->cntlid_min > subsys->cntlid_max)
|
||||
goto out_free_cqs;
|
||||
|
||||
ret = ida_simple_get(&cntlid_ida,
|
||||
NVME_CNTLID_MIN, NVME_CNTLID_MAX,
|
||||
subsys->cntlid_min, subsys->cntlid_max,
|
||||
GFP_KERNEL);
|
||||
if (ret < 0) {
|
||||
status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
|
||||
|
@ -1438,7 +1441,8 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
|
|||
kfree(subsys);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
subsys->cntlid_min = NVME_CNTLID_MIN;
|
||||
subsys->cntlid_max = NVME_CNTLID_MAX;
|
||||
kref_init(&subsys->ref);
|
||||
|
||||
mutex_init(&subsys->lock);
|
||||
|
@ -1457,6 +1461,7 @@ static void nvmet_subsys_free(struct kref *ref)
|
|||
WARN_ON_ONCE(!list_empty(&subsys->namespaces));
|
||||
|
||||
kfree(subsys->subsysnqn);
|
||||
kfree_rcu(subsys->model, rcuhead);
|
||||
kfree(subsys);
|
||||
}
|
||||
|
||||
|
|
|
@ -485,7 +485,6 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
|
|||
out_disable:
|
||||
dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
|
||||
nvme_uninit_ctrl(&ctrl->ctrl);
|
||||
nvme_put_ctrl(&ctrl->ctrl);
|
||||
}
|
||||
|
||||
static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
|
||||
|
@ -618,8 +617,6 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
|
|||
dev_info(ctrl->ctrl.device,
|
||||
"new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);
|
||||
|
||||
nvme_get_ctrl(&ctrl->ctrl);
|
||||
|
||||
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
|
||||
WARN_ON_ONCE(!changed);
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#define NVMET_ASYNC_EVENTS 4
|
||||
#define NVMET_ERROR_LOG_SLOTS 128
|
||||
#define NVMET_NO_ERROR_LOC ((u16)-1)
|
||||
#define NVMET_DEFAULT_CTRL_MODEL "Linux"
|
||||
|
||||
/*
|
||||
* Supported optional AENs:
|
||||
|
@ -202,6 +203,11 @@ struct nvmet_ctrl {
|
|||
struct nvme_error_slot slots[NVMET_ERROR_LOG_SLOTS];
|
||||
};
|
||||
|
||||
struct nvmet_subsys_model {
|
||||
struct rcu_head rcuhead;
|
||||
char number[];
|
||||
};
|
||||
|
||||
struct nvmet_subsys {
|
||||
enum nvme_subsys_type type;
|
||||
|
||||
|
@ -211,6 +217,8 @@ struct nvmet_subsys {
|
|||
struct list_head namespaces;
|
||||
unsigned int nr_namespaces;
|
||||
unsigned int max_nsid;
|
||||
u16 cntlid_min;
|
||||
u16 cntlid_max;
|
||||
|
||||
struct list_head ctrls;
|
||||
|
||||
|
@ -227,6 +235,8 @@ struct nvmet_subsys {
|
|||
|
||||
struct config_group namespaces_group;
|
||||
struct config_group allowed_hosts_group;
|
||||
|
||||
struct nvmet_subsys_model __rcu *model;
|
||||
};
|
||||
|
||||
static inline struct nvmet_subsys *to_subsys(struct config_item *item)
|
||||
|
@ -279,6 +289,7 @@ struct nvmet_fabrics_ops {
|
|||
struct nvmet_port *port, char *traddr);
|
||||
u16 (*install_queue)(struct nvmet_sq *nvme_sq);
|
||||
void (*discovery_chg)(struct nvmet_port *port);
|
||||
u8 (*get_mdts)(const struct nvmet_ctrl *ctrl);
|
||||
};
|
||||
|
||||
#define NVMET_MAX_INLINE_BIOVEC 8
|
||||
|
|
|
@ -31,6 +31,9 @@
|
|||
#define NVMET_RDMA_MAX_INLINE_SGE 4
|
||||
#define NVMET_RDMA_MAX_INLINE_DATA_SIZE max_t(int, SZ_16K, PAGE_SIZE)
|
||||
|
||||
/* Assume mpsmin == device_page_size == 4KB */
|
||||
#define NVMET_RDMA_MAX_MDTS 8
|
||||
|
||||
struct nvmet_rdma_cmd {
|
||||
struct ib_sge sge[NVMET_RDMA_MAX_INLINE_SGE + 1];
|
||||
struct ib_cqe cqe;
|
||||
|
@ -975,7 +978,7 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
|
|||
{
|
||||
struct ib_qp_init_attr qp_attr;
|
||||
struct nvmet_rdma_device *ndev = queue->dev;
|
||||
int comp_vector, nr_cqe, ret, i;
|
||||
int comp_vector, nr_cqe, ret, i, factor;
|
||||
|
||||
/*
|
||||
* Spread the io queues across completion vectors,
|
||||
|
@ -1008,7 +1011,9 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
|
|||
qp_attr.qp_type = IB_QPT_RC;
|
||||
/* +1 for drain */
|
||||
qp_attr.cap.max_send_wr = queue->send_queue_size + 1;
|
||||
qp_attr.cap.max_rdma_ctxs = queue->send_queue_size;
|
||||
factor = rdma_rw_mr_factor(ndev->device, queue->cm_id->port_num,
|
||||
1 << NVMET_RDMA_MAX_MDTS);
|
||||
qp_attr.cap.max_rdma_ctxs = queue->send_queue_size * factor;
|
||||
qp_attr.cap.max_send_sge = max(ndev->device->attrs.max_sge_rd,
|
||||
ndev->device->attrs.max_send_sge);
|
||||
|
||||
|
@ -1602,6 +1607,11 @@ static void nvmet_rdma_disc_port_addr(struct nvmet_req *req,
|
|||
}
|
||||
}
|
||||
|
||||
static u8 nvmet_rdma_get_mdts(const struct nvmet_ctrl *ctrl)
|
||||
{
|
||||
return NVMET_RDMA_MAX_MDTS;
|
||||
}
|
||||
|
||||
static const struct nvmet_fabrics_ops nvmet_rdma_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.type = NVMF_TRTYPE_RDMA,
|
||||
|
@ -1612,6 +1622,7 @@ static const struct nvmet_fabrics_ops nvmet_rdma_ops = {
|
|||
.queue_response = nvmet_rdma_queue_response,
|
||||
.delete_ctrl = nvmet_rdma_delete_ctrl,
|
||||
.disc_traddr = nvmet_rdma_disc_port_addr,
|
||||
.get_mdts = nvmet_rdma_get_mdts,
|
||||
};
|
||||
|
||||
static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data)
|
||||
|
|
|
@ -19,6 +19,16 @@
|
|||
|
||||
#define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE)
|
||||
|
||||
/* Define the socket priority to use for connections were it is desirable
|
||||
* that the NIC consider performing optimized packet processing or filtering.
|
||||
* A non-zero value being sufficient to indicate general consideration of any
|
||||
* possible optimization. Making it a module param allows for alternative
|
||||
* values that may be unique for some NIC implementations.
|
||||
*/
|
||||
static int so_priority;
|
||||
module_param(so_priority, int, 0644);
|
||||
MODULE_PARM_DESC(so_priority, "nvmet tcp socket optimize priority");
|
||||
|
||||
#define NVMET_TCP_RECV_BUDGET 8
|
||||
#define NVMET_TCP_SEND_BUDGET 8
|
||||
#define NVMET_TCP_IO_WORK_BUDGET 64
|
||||
|
@ -622,7 +632,7 @@ static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd)
|
||||
static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
|
||||
{
|
||||
struct nvmet_tcp_queue *queue = cmd->queue;
|
||||
struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
|
||||
|
@ -632,6 +642,9 @@ static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd)
|
|||
};
|
||||
int ret;
|
||||
|
||||
if (!last_in_batch && cmd->queue->send_list_len)
|
||||
msg.msg_flags |= MSG_MORE;
|
||||
|
||||
ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
|
||||
if (unlikely(ret <= 0))
|
||||
return ret;
|
||||
|
@ -672,7 +685,7 @@ static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
|
|||
}
|
||||
|
||||
if (cmd->state == NVMET_TCP_SEND_DDGST) {
|
||||
ret = nvmet_try_send_ddgst(cmd);
|
||||
ret = nvmet_try_send_ddgst(cmd, last_in_batch);
|
||||
if (ret <= 0)
|
||||
goto done_send;
|
||||
}
|
||||
|
@ -794,7 +807,7 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
|
|||
icresp->hdr.pdo = 0;
|
||||
icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen);
|
||||
icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
|
||||
icresp->maxdata = cpu_to_le32(0xffff); /* FIXME: support r2t */
|
||||
icresp->maxdata = cpu_to_le32(0x400000); /* 16M arbitrary limit */
|
||||
icresp->cpda = 0;
|
||||
if (queue->hdr_digest)
|
||||
icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
|
||||
|
@ -1439,6 +1452,13 @@ static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (so_priority > 0) {
|
||||
ret = kernel_setsockopt(sock, SOL_SOCKET, SO_PRIORITY,
|
||||
(char *)&so_priority, sizeof(so_priority));
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Set socket type of service */
|
||||
if (inet->rcv_tos > 0) {
|
||||
int tos = inet->rcv_tos;
|
||||
|
@ -1628,6 +1648,15 @@ static int nvmet_tcp_add_port(struct nvmet_port *nport)
|
|||
goto err_sock;
|
||||
}
|
||||
|
||||
if (so_priority > 0) {
|
||||
ret = kernel_setsockopt(port->sock, SOL_SOCKET, SO_PRIORITY,
|
||||
(char *)&so_priority, sizeof(so_priority));
|
||||
if (ret) {
|
||||
pr_err("failed to set SO_PRIORITY sock opt %d\n", ret);
|
||||
goto err_sock;
|
||||
}
|
||||
}
|
||||
|
||||
ret = kernel_bind(port->sock, (struct sockaddr *)&port->addr,
|
||||
sizeof(port->addr));
|
||||
if (ret) {
|
||||
|
|
|
@ -952,6 +952,10 @@ static inline unsigned int blk_rq_stats_sectors(const struct request *rq)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
|
||||
/* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */
|
||||
const char *blk_zone_cond_str(enum blk_zone_cond zone_cond);
|
||||
|
||||
static inline unsigned int blk_rq_zone_no(struct request *rq)
|
||||
{
|
||||
return blk_queue_zone_no(rq->q, blk_rq_pos(rq));
|
||||
|
|
|
@ -7,26 +7,18 @@
|
|||
* Handbook", Sanches and Canton.
|
||||
*/
|
||||
|
||||
#ifdef FDPATCHES
|
||||
#define FD_IOPORT fdc_state[fdc].address
|
||||
#else
|
||||
/* It would be a lot saner just to force fdc_state[fdc].address to always
|
||||
be set ! FIXME */
|
||||
#define FD_IOPORT 0x3f0
|
||||
#endif
|
||||
|
||||
/* Fd controller regs. S&C, about page 340 */
|
||||
#define FD_STATUS (4 + FD_IOPORT )
|
||||
#define FD_DATA (5 + FD_IOPORT )
|
||||
#define FD_STATUS 4
|
||||
#define FD_DATA 5
|
||||
|
||||
/* Digital Output Register */
|
||||
#define FD_DOR (2 + FD_IOPORT )
|
||||
#define FD_DOR 2
|
||||
|
||||
/* Digital Input Register (read) */
|
||||
#define FD_DIR (7 + FD_IOPORT )
|
||||
#define FD_DIR 7
|
||||
|
||||
/* Diskette Control Register (write)*/
|
||||
#define FD_DCR (7 + FD_IOPORT )
|
||||
#define FD_DCR 7
|
||||
|
||||
/* Bits of main status register */
|
||||
#define STATUS_BUSYMASK 0x0F /* drive busy mask */
|
||||
|
|
Loading…
Reference in New Issue
Block a user