Commit e481fc0a authored by Sagi Grimberg's avatar Sagi Grimberg Committed by Christoph Hellwig
Browse files

nvme-auth: guarantee dhchap buffers under memory pressure



We want to guarantee that we have chap buffers when a controller
reconnects under memory pressure. Add a mempool specifically
for that.

Signed-off-by: default avatarSagi Grimberg <sagi@grimberg.me>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent b7d604ca
Loading
Loading
Loading
Loading
+28 −2
Original line number Diff line number Diff line
@@ -14,6 +14,8 @@
#include <linux/nvme-auth.h>

#define CHAP_BUF_SIZE 4096
static struct kmem_cache *nvme_chap_buf_cache;
static mempool_t *nvme_chap_buf_pool;

struct nvme_dhchap_queue_context {
	struct list_head entry;
@@ -675,7 +677,7 @@ static void nvme_auth_reset_dhchap(struct nvme_dhchap_queue_context *chap)
	chap->transaction = 0;
	memset(chap->c1, 0, sizeof(chap->c1));
	memset(chap->c2, 0, sizeof(chap->c2));
	kfree(chap->buf);
	mempool_free(chap->buf, nvme_chap_buf_pool);
	chap->buf = NULL;
}

@@ -701,7 +703,7 @@ static void nvme_queue_auth_work(struct work_struct *work)
	 * Allocate a large enough buffer for the entire negotiation:
	 * 4k is enough to ffdhe8192.
	 */
	chap->buf = kmalloc(CHAP_BUF_SIZE, GFP_KERNEL);
	chap->buf = mempool_alloc(nvme_chap_buf_pool, GFP_KERNEL);
	if (!chap->buf) {
		chap->error = -ENOMEM;
		return;
@@ -1029,3 +1031,27 @@ void nvme_auth_free(struct nvme_ctrl *ctrl)
	}
}
EXPORT_SYMBOL_GPL(nvme_auth_free);

int __init nvme_init_auth(void)
{
	nvme_chap_buf_cache = kmem_cache_create("nvme-chap-buf-cache",
				CHAP_BUF_SIZE, 0, SLAB_HWCACHE_ALIGN, NULL);
	if (!nvme_chap_buf_cache)
		return -ENOMEM;

	nvme_chap_buf_pool = mempool_create(16, mempool_alloc_slab,
			mempool_free_slab, nvme_chap_buf_cache);
	if (!nvme_chap_buf_pool)
		goto err_destroy_chap_buf_cache;

	return 0;
err_destroy_chap_buf_cache:
	kmem_cache_destroy(nvme_chap_buf_cache);
	return -ENOMEM;
}

void __exit nvme_exit_auth(void)
{
	mempool_destroy(nvme_chap_buf_pool);
	kmem_cache_destroy(nvme_chap_buf_cache);
}
+6 −0
Original line number Diff line number Diff line
@@ -5347,8 +5347,13 @@ static int __init nvme_core_init(void)
		goto unregister_generic_ns;
	}

	result = nvme_init_auth();
	if (result)
		goto destroy_ns_chr;
	return 0;

destroy_ns_chr:
	class_destroy(nvme_ns_chr_class);
unregister_generic_ns:
	unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS);
destroy_subsys_class:
@@ -5369,6 +5374,7 @@ static int __init nvme_core_init(void)

static void __exit nvme_core_exit(void)
{
	nvme_exit_auth();
	class_destroy(nvme_ns_chr_class);
	class_destroy(nvme_subsys_class);
	class_destroy(nvme_class);
+9 −0
Original line number Diff line number Diff line
@@ -1018,6 +1018,8 @@ static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl)
}

#ifdef CONFIG_NVME_AUTH
int __init nvme_init_auth(void);
void __exit nvme_exit_auth(void);
int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl);
void nvme_auth_stop(struct nvme_ctrl *ctrl);
int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid);
@@ -1029,6 +1031,13 @@ static inline int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl)
{
	return 0;
}
static inline int __init nvme_init_auth(void)
{
	return 0;
}
static inline void __exit nvme_exit_auth(void)
{
}
static inline void nvme_auth_stop(struct nvme_ctrl *ctrl) {};
static inline int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
{