nvme fixes for Linux 6.3
- mark Lexar NM760 as IGNORE_DEV_SUBNQN (Juraj Pecigos) - fix a possible UAF when failing to allocate an TCP io queue (Sagi Grimberg) -----BEGIN PGP SIGNATURE----- iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAmQmDo4LHGhjaEBsc3Qu ZGUACgkQD55TZVIEUYPuLg/+Ny+kl8uW0jNZa9OS0Ezi51u6JHsjxtcpEmpzPeif +WfNnLLY6eAKTHS/hfj7b66c2fi2fIbljk24YX3KixFG/vKnid+8lkgm+zLEusOj Qs7DOUs14/KQi4MWutu+RI6XXIeSAE1g5DT2Sn+8hhYU+6J9tisz0daIwvhk7y8h m1znDROWISyOTIi/D7LDocCVE58XYlWZtrpdch5ZTgV8pyBzFdw8SwZNbTxl/4ro BPkt+aVOePYHXPS3tjRSuQO4pzVmnceXSzWaSgkIENeTh66AtUxP5h66cUCHN5r6 dAShAYp/7S3MtEbi13ogFqxz2DQNymZZ9t9lrvAnSecAkTmdJLtE/Zlw3/X2YT6m Cn5F6s+GYQJJY2xaXRufbWT9k0PWF15r1qKqP/ZeI6kBmsTlxLB6uqmgO1s2o6Gs b9sqJ3t4hGwhQV8tFJsDRqBOfOJs3anZlUZC0/ry2X/cL0ozGfI/6z20gBS3oooP AFmzdpfH/hiBRPE5pdKsYgfOET3HZw3zu64YwNllQNfGqrg3t/wgoWYMPjVtxJ6g LrBlbaG/0Hy8rpJyo/zsdFIfuTVW+18GNDKjl1jeTJVANpaOL/cJiZ7ht4nvs8Ci NsrBMSfPTllsR190VJFV3qtNUHLrppaQbEvgiqMUHa8/j56GQKm9O0ZeKUkvizXg yoE= =HGUN -----END PGP SIGNATURE----- Merge tag 'nvme-6.3-2023-03-31' of git://git.infradead.org/nvme into block-6.3 Pull NVMe fixes from Christoph: "nvme fixes for Linux 6.3 - mark Lexar NM760 as IGNORE_DEV_SUBNQN (Juraj Pecigos) - fix a possible UAF when failing to allocate an TCP io queue (Sagi Grimberg)" * tag 'nvme-6.3-2023-03-31' of git://git.infradead.org/nvme: nvme-tcp: fix a possible UAF when failing to allocate an io queue nvme-pci: mark Lexar NM760 as IGNORE_DEV_SUBNQN
This commit is contained in:
commit
1a06ed2d42
|
@ -3441,7 +3441,8 @@ static const struct pci_device_id nvme_id_table[] = {
|
||||||
{ PCI_DEVICE(0x1d97, 0x1d97), /* Lexar NM620 */
|
{ PCI_DEVICE(0x1d97, 0x1d97), /* Lexar NM620 */
|
||||||
.driver_data = NVME_QUIRK_BOGUS_NID, },
|
.driver_data = NVME_QUIRK_BOGUS_NID, },
|
||||||
{ PCI_DEVICE(0x1d97, 0x2269), /* Lexar NM760 */
|
{ PCI_DEVICE(0x1d97, 0x2269), /* Lexar NM760 */
|
||||||
.driver_data = NVME_QUIRK_BOGUS_NID, },
|
.driver_data = NVME_QUIRK_BOGUS_NID |
|
||||||
|
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
|
||||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
|
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
|
||||||
.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
|
.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
|
||||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),
|
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),
|
||||||
|
|
|
@ -1620,22 +1620,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid)
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_init_connect;
|
goto err_init_connect;
|
||||||
|
|
||||||
queue->rd_enabled = true;
|
|
||||||
set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
|
set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
|
||||||
nvme_tcp_init_recv_ctx(queue);
|
|
||||||
|
|
||||||
write_lock_bh(&queue->sock->sk->sk_callback_lock);
|
|
||||||
queue->sock->sk->sk_user_data = queue;
|
|
||||||
queue->state_change = queue->sock->sk->sk_state_change;
|
|
||||||
queue->data_ready = queue->sock->sk->sk_data_ready;
|
|
||||||
queue->write_space = queue->sock->sk->sk_write_space;
|
|
||||||
queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
|
|
||||||
queue->sock->sk->sk_state_change = nvme_tcp_state_change;
|
|
||||||
queue->sock->sk->sk_write_space = nvme_tcp_write_space;
|
|
||||||
#ifdef CONFIG_NET_RX_BUSY_POLL
|
|
||||||
queue->sock->sk->sk_ll_usec = 1;
|
|
||||||
#endif
|
|
||||||
write_unlock_bh(&queue->sock->sk->sk_callback_lock);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -1655,7 +1640,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
|
static void nvme_tcp_restore_sock_ops(struct nvme_tcp_queue *queue)
|
||||||
{
|
{
|
||||||
struct socket *sock = queue->sock;
|
struct socket *sock = queue->sock;
|
||||||
|
|
||||||
|
@ -1670,7 +1655,7 @@ static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
|
||||||
static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
|
static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
|
||||||
{
|
{
|
||||||
kernel_sock_shutdown(queue->sock, SHUT_RDWR);
|
kernel_sock_shutdown(queue->sock, SHUT_RDWR);
|
||||||
nvme_tcp_restore_sock_calls(queue);
|
nvme_tcp_restore_sock_ops(queue);
|
||||||
cancel_work_sync(&queue->io_work);
|
cancel_work_sync(&queue->io_work);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1688,21 +1673,42 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
|
||||||
mutex_unlock(&queue->queue_lock);
|
mutex_unlock(&queue->queue_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue)
|
||||||
|
{
|
||||||
|
write_lock_bh(&queue->sock->sk->sk_callback_lock);
|
||||||
|
queue->sock->sk->sk_user_data = queue;
|
||||||
|
queue->state_change = queue->sock->sk->sk_state_change;
|
||||||
|
queue->data_ready = queue->sock->sk->sk_data_ready;
|
||||||
|
queue->write_space = queue->sock->sk->sk_write_space;
|
||||||
|
queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
|
||||||
|
queue->sock->sk->sk_state_change = nvme_tcp_state_change;
|
||||||
|
queue->sock->sk->sk_write_space = nvme_tcp_write_space;
|
||||||
|
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||||
|
queue->sock->sk->sk_ll_usec = 1;
|
||||||
|
#endif
|
||||||
|
write_unlock_bh(&queue->sock->sk->sk_callback_lock);
|
||||||
|
}
|
||||||
|
|
||||||
static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
|
static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
|
||||||
{
|
{
|
||||||
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
|
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
|
||||||
|
struct nvme_tcp_queue *queue = &ctrl->queues[idx];
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
queue->rd_enabled = true;
|
||||||
|
nvme_tcp_init_recv_ctx(queue);
|
||||||
|
nvme_tcp_setup_sock_ops(queue);
|
||||||
|
|
||||||
if (idx)
|
if (idx)
|
||||||
ret = nvmf_connect_io_queue(nctrl, idx);
|
ret = nvmf_connect_io_queue(nctrl, idx);
|
||||||
else
|
else
|
||||||
ret = nvmf_connect_admin_queue(nctrl);
|
ret = nvmf_connect_admin_queue(nctrl);
|
||||||
|
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags);
|
set_bit(NVME_TCP_Q_LIVE, &queue->flags);
|
||||||
} else {
|
} else {
|
||||||
if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags))
|
if (test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
|
||||||
__nvme_tcp_stop_queue(&ctrl->queues[idx]);
|
__nvme_tcp_stop_queue(queue);
|
||||||
dev_err(nctrl->device,
|
dev_err(nctrl->device,
|
||||||
"failed to connect queue: %d ret=%d\n", idx, ret);
|
"failed to connect queue: %d ret=%d\n", idx, ret);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue