drivers/nvme/host/rdma.c:554: undefined reference to `rdma_disconnect'

From: kbuild test robot
Date: Mon May 28 2018 - 12:28:50 EST


tree: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git master
head: 786b71f5b754273ccef6d9462e52062b3e1f9877
commit: 9533b292a7acc62c294ebcbd9e1f9f9d52adb10b IB: remove redundant INFINIBAND kconfig dependencies
date: 3 weeks ago
config: x86_64-randconfig-s2-05282235 (attached as .config)
compiler: gcc-6 (Debian 6.4.0-9) 6.4.0 20171026
reproduce:
git checkout 9533b292a7acc62c294ebcbd9e1f9f9d52adb10b
# save the attached .config to linux build tree
make ARCH=x86_64

All errors (new ones prefixed by >>):

drivers/nvme/host/rdma.o: In function `nvme_rdma_stop_queue':
>> drivers/nvme/host/rdma.c:554: undefined reference to `rdma_disconnect'
>> drivers/nvme/host/rdma.c:555: undefined reference to `ib_drain_qp'
drivers/nvme/host/rdma.o: In function `nvme_rdma_free_dev':
>> drivers/nvme/host/rdma.c:332: undefined reference to `ib_dealloc_pd'
drivers/nvme/host/rdma.o: In function `nvme_rdma_unmap_data':
>> drivers/nvme/host/rdma.c:1065: undefined reference to `ib_mr_pool_put'
drivers/nvme/host/rdma.o: In function `nvme_rdma_map_sg_fr':
>> drivers/nvme/host/rdma.c:1124: undefined reference to `ib_mr_pool_get'
drivers/nvme/host/rdma.c:1134: undefined reference to `ib_mr_pool_put'
>> drivers/nvme/host/rdma.c:1132: undefined reference to `ib_map_mr_sg'
drivers/nvme/host/rdma.o: In function `nvme_rdma_wr_error':
>> drivers/nvme/host/rdma.c:1007: undefined reference to `ib_wc_status_msg'
drivers/nvme/host/rdma.o: In function `nvme_rdma_create_qp':
>> drivers/nvme/host/rdma.c:258: undefined reference to `rdma_create_qp'
drivers/nvme/host/rdma.o: In function `nvme_rdma_destroy_queue_ib':
>> drivers/nvme/host/rdma.c:402: undefined reference to `ib_mr_pool_destroy'
>> drivers/nvme/host/rdma.c:409: undefined reference to `ib_destroy_qp'
>> drivers/nvme/host/rdma.c:410: undefined reference to `ib_free_cq'
drivers/nvme/host/rdma.o: In function `nvme_rdma_free_queue':
>> drivers/nvme/host/rdma.c:570: undefined reference to `rdma_destroy_id'
drivers/nvme/host/rdma.o: In function `nvme_rdma_alloc_queue':
>> drivers/nvme/host/rdma.c:511: undefined reference to `__rdma_create_id'
>> drivers/nvme/host/rdma.c:523: undefined reference to `rdma_resolve_addr'
drivers/nvme/host/rdma.c:544: undefined reference to `rdma_destroy_id'
drivers/nvme/host/rdma.o: In function `nvme_rdma_find_get_device':
>> drivers/nvme/host/rdma.c:365: undefined reference to `__ib_alloc_pd'
drivers/nvme/host/rdma.c:383: undefined reference to `ib_dealloc_pd'
drivers/nvme/host/rdma.o: In function `nvme_rdma_create_queue_ib':
>> drivers/nvme/host/rdma.c:447: undefined reference to `__ib_alloc_cq'
>> drivers/nvme/host/rdma.c:485: undefined reference to `rdma_destroy_qp'
drivers/nvme/host/rdma.c:487: undefined reference to `ib_free_cq'
drivers/nvme/host/rdma.o: In function `nvme_rdma_addr_resolved':
>> drivers/nvme/host/rdma.c:1461: undefined reference to `rdma_resolve_route'
drivers/nvme/host/rdma.o: In function `nvme_rdma_route_resolved':
>> drivers/nvme/host/rdma.c:1512: undefined reference to `rdma_connect'
drivers/nvme/host/rdma.o: In function `nvme_rdma_conn_rejected':
>> drivers/nvme/host/rdma.c:1436: undefined reference to `rdma_reject_msg'

vim +554 drivers/nvme/host/rdma.c

71102307 Christoph Hellwig 2016-07-06 345
71102307 Christoph Hellwig 2016-07-06 346 static struct nvme_rdma_device *
71102307 Christoph Hellwig 2016-07-06 347 nvme_rdma_find_get_device(struct rdma_cm_id *cm_id)
71102307 Christoph Hellwig 2016-07-06 348 {
71102307 Christoph Hellwig 2016-07-06 349 struct nvme_rdma_device *ndev;
71102307 Christoph Hellwig 2016-07-06 350
71102307 Christoph Hellwig 2016-07-06 351 mutex_lock(&device_list_mutex);
71102307 Christoph Hellwig 2016-07-06 352 list_for_each_entry(ndev, &device_list, entry) {
71102307 Christoph Hellwig 2016-07-06 353 if (ndev->dev->node_guid == cm_id->device->node_guid &&
71102307 Christoph Hellwig 2016-07-06 354 nvme_rdma_dev_get(ndev))
71102307 Christoph Hellwig 2016-07-06 355 goto out_unlock;
71102307 Christoph Hellwig 2016-07-06 356 }
71102307 Christoph Hellwig 2016-07-06 357
71102307 Christoph Hellwig 2016-07-06 358 ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
71102307 Christoph Hellwig 2016-07-06 359 if (!ndev)
71102307 Christoph Hellwig 2016-07-06 360 goto out_err;
71102307 Christoph Hellwig 2016-07-06 361
71102307 Christoph Hellwig 2016-07-06 362 ndev->dev = cm_id->device;
71102307 Christoph Hellwig 2016-07-06 363 kref_init(&ndev->ref);
71102307 Christoph Hellwig 2016-07-06 364
11975e01 Christoph Hellwig 2016-09-05 @365 ndev->pd = ib_alloc_pd(ndev->dev,
11975e01 Christoph Hellwig 2016-09-05 366 register_always ? 0 : IB_PD_UNSAFE_GLOBAL_RKEY);
71102307 Christoph Hellwig 2016-07-06 367 if (IS_ERR(ndev->pd))
71102307 Christoph Hellwig 2016-07-06 368 goto out_free_dev;
71102307 Christoph Hellwig 2016-07-06 369
71102307 Christoph Hellwig 2016-07-06 370 if (!(ndev->dev->attrs.device_cap_flags &
71102307 Christoph Hellwig 2016-07-06 371 IB_DEVICE_MEM_MGT_EXTENSIONS)) {
71102307 Christoph Hellwig 2016-07-06 372 dev_err(&ndev->dev->dev,
71102307 Christoph Hellwig 2016-07-06 373 "Memory registrations not supported.\n");
11975e01 Christoph Hellwig 2016-09-05 374 goto out_free_pd;
71102307 Christoph Hellwig 2016-07-06 375 }
71102307 Christoph Hellwig 2016-07-06 376
71102307 Christoph Hellwig 2016-07-06 377 list_add(&ndev->entry, &device_list);
71102307 Christoph Hellwig 2016-07-06 378 out_unlock:
71102307 Christoph Hellwig 2016-07-06 379 mutex_unlock(&device_list_mutex);
71102307 Christoph Hellwig 2016-07-06 380 return ndev;
71102307 Christoph Hellwig 2016-07-06 381
71102307 Christoph Hellwig 2016-07-06 382 out_free_pd:
71102307 Christoph Hellwig 2016-07-06 383 ib_dealloc_pd(ndev->pd);
71102307 Christoph Hellwig 2016-07-06 384 out_free_dev:
71102307 Christoph Hellwig 2016-07-06 385 kfree(ndev);
71102307 Christoph Hellwig 2016-07-06 386 out_err:
71102307 Christoph Hellwig 2016-07-06 387 mutex_unlock(&device_list_mutex);
71102307 Christoph Hellwig 2016-07-06 388 return NULL;
71102307 Christoph Hellwig 2016-07-06 389 }
71102307 Christoph Hellwig 2016-07-06 390
71102307 Christoph Hellwig 2016-07-06 391 static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
71102307 Christoph Hellwig 2016-07-06 392 {
eb1bd249 Max Gurtovoy 2017-11-28 393 struct nvme_rdma_device *dev;
eb1bd249 Max Gurtovoy 2017-11-28 394 struct ib_device *ibdev;
eb1bd249 Max Gurtovoy 2017-11-28 395
eb1bd249 Max Gurtovoy 2017-11-28 396 if (!test_and_clear_bit(NVME_RDMA_Q_TR_READY, &queue->flags))
eb1bd249 Max Gurtovoy 2017-11-28 397 return;
eb1bd249 Max Gurtovoy 2017-11-28 398
eb1bd249 Max Gurtovoy 2017-11-28 399 dev = queue->device;
eb1bd249 Max Gurtovoy 2017-11-28 400 ibdev = dev->dev;
71102307 Christoph Hellwig 2016-07-06 401
f41725bb Israel Rukshin 2017-11-26 @402 ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs);
f41725bb Israel Rukshin 2017-11-26 403
eb1bd249 Max Gurtovoy 2017-11-28 404 /*
eb1bd249 Max Gurtovoy 2017-11-28 405 * The cm_id object might have been destroyed during RDMA connection
eb1bd249 Max Gurtovoy 2017-11-28 406 * establishment error flow to avoid getting other cma events, thus
eb1bd249 Max Gurtovoy 2017-11-28 407 * the destruction of the QP shouldn't use rdma_cm API.
eb1bd249 Max Gurtovoy 2017-11-28 408 */
eb1bd249 Max Gurtovoy 2017-11-28 @409 ib_destroy_qp(queue->qp);
71102307 Christoph Hellwig 2016-07-06 @410 ib_free_cq(queue->ib_cq);
71102307 Christoph Hellwig 2016-07-06 411
71102307 Christoph Hellwig 2016-07-06 412 nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size,
71102307 Christoph Hellwig 2016-07-06 413 sizeof(struct nvme_completion), DMA_FROM_DEVICE);
71102307 Christoph Hellwig 2016-07-06 414
71102307 Christoph Hellwig 2016-07-06 415 nvme_rdma_dev_put(dev);
71102307 Christoph Hellwig 2016-07-06 416 }
71102307 Christoph Hellwig 2016-07-06 417
f41725bb Israel Rukshin 2017-11-26 418 static int nvme_rdma_get_max_fr_pages(struct ib_device *ibdev)
f41725bb Israel Rukshin 2017-11-26 419 {
f41725bb Israel Rukshin 2017-11-26 420 return min_t(u32, NVME_RDMA_MAX_SEGMENTS,
f41725bb Israel Rukshin 2017-11-26 421 ibdev->attrs.max_fast_reg_page_list_len);
f41725bb Israel Rukshin 2017-11-26 422 }
f41725bb Israel Rukshin 2017-11-26 423
ca6e95bb Sagi Grimberg 2017-05-04 424 static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
71102307 Christoph Hellwig 2016-07-06 425 {
ca6e95bb Sagi Grimberg 2017-05-04 426 struct ib_device *ibdev;
71102307 Christoph Hellwig 2016-07-06 427 const int send_wr_factor = 3; /* MR, SEND, INV */
71102307 Christoph Hellwig 2016-07-06 428 const int cq_factor = send_wr_factor + 1; /* + RECV */
71102307 Christoph Hellwig 2016-07-06 429 int comp_vector, idx = nvme_rdma_queue_idx(queue);
71102307 Christoph Hellwig 2016-07-06 430 int ret;
71102307 Christoph Hellwig 2016-07-06 431
ca6e95bb Sagi Grimberg 2017-05-04 432 queue->device = nvme_rdma_find_get_device(queue->cm_id);
ca6e95bb Sagi Grimberg 2017-05-04 433 if (!queue->device) {
ca6e95bb Sagi Grimberg 2017-05-04 434 dev_err(queue->cm_id->device->dev.parent,
ca6e95bb Sagi Grimberg 2017-05-04 435 "no client data found!\n");
ca6e95bb Sagi Grimberg 2017-05-04 436 return -ECONNREFUSED;
ca6e95bb Sagi Grimberg 2017-05-04 437 }
ca6e95bb Sagi Grimberg 2017-05-04 438 ibdev = queue->device->dev;
71102307 Christoph Hellwig 2016-07-06 439
71102307 Christoph Hellwig 2016-07-06 440 /*
0b36658c Sagi Grimberg 2017-07-13 441 * Spread I/O queues completion vectors according their queue index.
0b36658c Sagi Grimberg 2017-07-13 442 * Admin queues can always go on completion vector 0.
71102307 Christoph Hellwig 2016-07-06 443 */
0b36658c Sagi Grimberg 2017-07-13 444 comp_vector = idx == 0 ? idx : idx - 1;
71102307 Christoph Hellwig 2016-07-06 445
71102307 Christoph Hellwig 2016-07-06 446 /* +1 for ib_stop_cq */
ca6e95bb Sagi Grimberg 2017-05-04 @447 queue->ib_cq = ib_alloc_cq(ibdev, queue,
ca6e95bb Sagi Grimberg 2017-05-04 448 cq_factor * queue->queue_size + 1,
ca6e95bb Sagi Grimberg 2017-05-04 449 comp_vector, IB_POLL_SOFTIRQ);
71102307 Christoph Hellwig 2016-07-06 450 if (IS_ERR(queue->ib_cq)) {
71102307 Christoph Hellwig 2016-07-06 451 ret = PTR_ERR(queue->ib_cq);
ca6e95bb Sagi Grimberg 2017-05-04 452 goto out_put_dev;
71102307 Christoph Hellwig 2016-07-06 453 }
71102307 Christoph Hellwig 2016-07-06 454
71102307 Christoph Hellwig 2016-07-06 455 ret = nvme_rdma_create_qp(queue, send_wr_factor);
71102307 Christoph Hellwig 2016-07-06 456 if (ret)
71102307 Christoph Hellwig 2016-07-06 457 goto out_destroy_ib_cq;
71102307 Christoph Hellwig 2016-07-06 458
71102307 Christoph Hellwig 2016-07-06 459 queue->rsp_ring = nvme_rdma_alloc_ring(ibdev, queue->queue_size,
71102307 Christoph Hellwig 2016-07-06 460 sizeof(struct nvme_completion), DMA_FROM_DEVICE);
71102307 Christoph Hellwig 2016-07-06 461 if (!queue->rsp_ring) {
71102307 Christoph Hellwig 2016-07-06 462 ret = -ENOMEM;
71102307 Christoph Hellwig 2016-07-06 463 goto out_destroy_qp;
71102307 Christoph Hellwig 2016-07-06 464 }
71102307 Christoph Hellwig 2016-07-06 465
f41725bb Israel Rukshin 2017-11-26 @466 ret = ib_mr_pool_init(queue->qp, &queue->qp->rdma_mrs,
f41725bb Israel Rukshin 2017-11-26 467 queue->queue_size,
f41725bb Israel Rukshin 2017-11-26 468 IB_MR_TYPE_MEM_REG,
f41725bb Israel Rukshin 2017-11-26 469 nvme_rdma_get_max_fr_pages(ibdev));
f41725bb Israel Rukshin 2017-11-26 470 if (ret) {
f41725bb Israel Rukshin 2017-11-26 471 dev_err(queue->ctrl->ctrl.device,
f41725bb Israel Rukshin 2017-11-26 472 "failed to initialize MR pool sized %d for QID %d\n",
f41725bb Israel Rukshin 2017-11-26 473 queue->queue_size, idx);
f41725bb Israel Rukshin 2017-11-26 474 goto out_destroy_ring;
f41725bb Israel Rukshin 2017-11-26 475 }
f41725bb Israel Rukshin 2017-11-26 476
eb1bd249 Max Gurtovoy 2017-11-28 477 set_bit(NVME_RDMA_Q_TR_READY, &queue->flags);
eb1bd249 Max Gurtovoy 2017-11-28 478
71102307 Christoph Hellwig 2016-07-06 479 return 0;
71102307 Christoph Hellwig 2016-07-06 480
f41725bb Israel Rukshin 2017-11-26 481 out_destroy_ring:
f41725bb Israel Rukshin 2017-11-26 482 nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size,
f41725bb Israel Rukshin 2017-11-26 483 sizeof(struct nvme_completion), DMA_FROM_DEVICE);
71102307 Christoph Hellwig 2016-07-06 484 out_destroy_qp:
1f61def9 Max Gurtovoy 2017-11-06 @485 rdma_destroy_qp(queue->cm_id);
71102307 Christoph Hellwig 2016-07-06 486 out_destroy_ib_cq:
71102307 Christoph Hellwig 2016-07-06 @487 ib_free_cq(queue->ib_cq);
ca6e95bb Sagi Grimberg 2017-05-04 488 out_put_dev:
ca6e95bb Sagi Grimberg 2017-05-04 489 nvme_rdma_dev_put(queue->device);
71102307 Christoph Hellwig 2016-07-06 490 return ret;
71102307 Christoph Hellwig 2016-07-06 491 }
71102307 Christoph Hellwig 2016-07-06 492
41e8cfa1 Sagi Grimberg 2017-07-10 493 static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
71102307 Christoph Hellwig 2016-07-06 494 int idx, size_t queue_size)
71102307 Christoph Hellwig 2016-07-06 495 {
71102307 Christoph Hellwig 2016-07-06 496 struct nvme_rdma_queue *queue;
8f4e8dac Max Gurtovoy 2017-02-19 497 struct sockaddr *src_addr = NULL;
71102307 Christoph Hellwig 2016-07-06 498 int ret;
71102307 Christoph Hellwig 2016-07-06 499
71102307 Christoph Hellwig 2016-07-06 500 queue = &ctrl->queues[idx];
71102307 Christoph Hellwig 2016-07-06 501 queue->ctrl = ctrl;
71102307 Christoph Hellwig 2016-07-06 502 init_completion(&queue->cm_done);
71102307 Christoph Hellwig 2016-07-06 503
71102307 Christoph Hellwig 2016-07-06 504 if (idx > 0)
71102307 Christoph Hellwig 2016-07-06 505 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
71102307 Christoph Hellwig 2016-07-06 506 else
71102307 Christoph Hellwig 2016-07-06 507 queue->cmnd_capsule_len = sizeof(struct nvme_command);
71102307 Christoph Hellwig 2016-07-06 508
71102307 Christoph Hellwig 2016-07-06 509 queue->queue_size = queue_size;
71102307 Christoph Hellwig 2016-07-06 510
71102307 Christoph Hellwig 2016-07-06 @511 queue->cm_id = rdma_create_id(&init_net, nvme_rdma_cm_handler, queue,
71102307 Christoph Hellwig 2016-07-06 512 RDMA_PS_TCP, IB_QPT_RC);
71102307 Christoph Hellwig 2016-07-06 513 if (IS_ERR(queue->cm_id)) {
71102307 Christoph Hellwig 2016-07-06 514 dev_info(ctrl->ctrl.device,
71102307 Christoph Hellwig 2016-07-06 515 "failed to create CM ID: %ld\n", PTR_ERR(queue->cm_id));
71102307 Christoph Hellwig 2016-07-06 516 return PTR_ERR(queue->cm_id);
71102307 Christoph Hellwig 2016-07-06 517 }
71102307 Christoph Hellwig 2016-07-06 518
8f4e8dac Max Gurtovoy 2017-02-19 519 if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR)
0928f9b4 Sagi Grimberg 2017-02-05 520 src_addr = (struct sockaddr *)&ctrl->src_addr;
8f4e8dac Max Gurtovoy 2017-02-19 521
0928f9b4 Sagi Grimberg 2017-02-05 522 queue->cm_error = -ETIMEDOUT;
0928f9b4 Sagi Grimberg 2017-02-05 @523 ret = rdma_resolve_addr(queue->cm_id, src_addr,
0928f9b4 Sagi Grimberg 2017-02-05 524 (struct sockaddr *)&ctrl->addr,
71102307 Christoph Hellwig 2016-07-06 525 NVME_RDMA_CONNECT_TIMEOUT_MS);
71102307 Christoph Hellwig 2016-07-06 526 if (ret) {
71102307 Christoph Hellwig 2016-07-06 527 dev_info(ctrl->ctrl.device,
71102307 Christoph Hellwig 2016-07-06 528 "rdma_resolve_addr failed (%d).\n", ret);
71102307 Christoph Hellwig 2016-07-06 529 goto out_destroy_cm_id;
71102307 Christoph Hellwig 2016-07-06 530 }
71102307 Christoph Hellwig 2016-07-06 531
71102307 Christoph Hellwig 2016-07-06 532 ret = nvme_rdma_wait_for_cm(queue);
71102307 Christoph Hellwig 2016-07-06 533 if (ret) {
71102307 Christoph Hellwig 2016-07-06 534 dev_info(ctrl->ctrl.device,
d8bfceeb Sagi Grimberg 2017-10-11 535 "rdma connection establishment failed (%d)\n", ret);
71102307 Christoph Hellwig 2016-07-06 536 goto out_destroy_cm_id;
71102307 Christoph Hellwig 2016-07-06 537 }
71102307 Christoph Hellwig 2016-07-06 538
5013e98b Sagi Grimberg 2017-10-11 539 set_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags);
71102307 Christoph Hellwig 2016-07-06 540
71102307 Christoph Hellwig 2016-07-06 541 return 0;
71102307 Christoph Hellwig 2016-07-06 542
71102307 Christoph Hellwig 2016-07-06 543 out_destroy_cm_id:
71102307 Christoph Hellwig 2016-07-06 @544 rdma_destroy_id(queue->cm_id);
eb1bd249 Max Gurtovoy 2017-11-28 545 nvme_rdma_destroy_queue_ib(queue);
71102307 Christoph Hellwig 2016-07-06 546 return ret;
71102307 Christoph Hellwig 2016-07-06 547 }
71102307 Christoph Hellwig 2016-07-06 548
71102307 Christoph Hellwig 2016-07-06 549 static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
71102307 Christoph Hellwig 2016-07-06 550 {
a57bd541 Sagi Grimberg 2017-08-28 551 if (!test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
a57bd541 Sagi Grimberg 2017-08-28 552 return;
a57bd541 Sagi Grimberg 2017-08-28 553
71102307 Christoph Hellwig 2016-07-06 @554 rdma_disconnect(queue->cm_id);
71102307 Christoph Hellwig 2016-07-06 @555 ib_drain_qp(queue->qp);
71102307 Christoph Hellwig 2016-07-06 556 }
71102307 Christoph Hellwig 2016-07-06 557
71102307 Christoph Hellwig 2016-07-06 558 static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
71102307 Christoph Hellwig 2016-07-06 559 {
5013e98b Sagi Grimberg 2017-10-11 560 if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
a57bd541 Sagi Grimberg 2017-08-28 561 return;
a57bd541 Sagi Grimberg 2017-08-28 562
bd9f0759 Sagi Grimberg 2017-10-19 563 if (nvme_rdma_queue_idx(queue) == 0) {
bd9f0759 Sagi Grimberg 2017-10-19 564 nvme_rdma_free_qe(queue->device->dev,
bd9f0759 Sagi Grimberg 2017-10-19 565 &queue->ctrl->async_event_sqe,
bd9f0759 Sagi Grimberg 2017-10-19 566 sizeof(struct nvme_command), DMA_TO_DEVICE);
bd9f0759 Sagi Grimberg 2017-10-19 567 }
bd9f0759 Sagi Grimberg 2017-10-19 568
71102307 Christoph Hellwig 2016-07-06 569 nvme_rdma_destroy_queue_ib(queue);
71102307 Christoph Hellwig 2016-07-06 @570 rdma_destroy_id(queue->cm_id);
71102307 Christoph Hellwig 2016-07-06 571 }
71102307 Christoph Hellwig 2016-07-06 572

:::::: The code at line 554 was first introduced by commit
:::::: 7110230719602852481c2793d054f866b2bf4a2b nvme-rdma: add a NVMe over Fabrics RDMA host driver

:::::: TO: Christoph Hellwig <hch@xxxxxx>
:::::: CC: Jens Axboe <axboe@xxxxxx>

---
0-DAY kernel test infrastructure Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all Intel Corporation

Attachment: .config.gz
Description: application/gzip