// SPDX-License-Identifier: GPL-2.0+ /* * IBM Power Systems Virtual Management Channel Support. * * Copyright (c) 2004, 2018 IBM Corp. * Dave Engebretsen engebret@us.ibm.com * Steven Royer seroyer@linux.vnet.ibm.com * Adam Reznechek adreznec@linux.vnet.ibm.com * Bryant G. Ly */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ibmvmc.h" #define IBMVMC_DRIVER_VERSION "1.0" /* * Static global variables */ static DECLARE_WAIT_QUEUE_HEAD(ibmvmc_read_wait); static const char ibmvmc_driver_name[] = "ibmvmc"; static struct ibmvmc_struct ibmvmc; static struct ibmvmc_hmc hmcs[MAX_HMCS]; static struct crq_server_adapter ibmvmc_adapter; static int ibmvmc_max_buf_pool_size = DEFAULT_BUF_POOL_SIZE; static int ibmvmc_max_hmcs = DEFAULT_HMCS; static int ibmvmc_max_mtu = DEFAULT_MTU; static inline long h_copy_rdma(s64 length, u64 sliobn, u64 slioba, u64 dliobn, u64 dlioba) { long rc = 0; /* Ensure all writes to source memory are visible before hcall */ dma_wmb(); pr_debug("ibmvmc: h_copy_rdma(0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n", length, sliobn, slioba, dliobn, dlioba); rc = plpar_hcall_norets(H_COPY_RDMA, length, sliobn, slioba, dliobn, dlioba); pr_debug("ibmvmc: h_copy_rdma rc = 0x%lx\n", rc); return rc; } static inline void h_free_crq(uint32_t unit_address) { long rc = 0; do { if (H_IS_LONG_BUSY(rc)) msleep(get_longbusy_msecs(rc)); rc = plpar_hcall_norets(H_FREE_CRQ, unit_address); } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc))); } /** * h_request_vmc: - request a hypervisor virtual management channel device * @vmc_index: drc index of the vmc device created * * Requests the hypervisor create a new virtual management channel device, * allowing this partition to send hypervisor virtualization control * commands. * * Return: * 0 - Success * Non-zero - Failure */ static inline long h_request_vmc(u32 *vmc_index) { long rc = 0; unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; do { if (H_IS_LONG_BUSY(rc)) msleep(get_longbusy_msecs(rc)); /* Call to request the VMC device from phyp */ rc = plpar_hcall(H_REQUEST_VMC, retbuf); pr_debug("ibmvmc: %s rc = 0x%lx\n", __func__, rc); *vmc_index = retbuf[0]; } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc))); return rc; } /* routines for managing a command/response queue */ /** * ibmvmc_handle_event: - Interrupt handler for crq events * @irq: number of irq to handle, not used * @dev_instance: crq_server_adapter that received interrupt * * Disables interrupts and schedules ibmvmc_task * * Always returns IRQ_HANDLED */ static irqreturn_t ibmvmc_handle_event(int irq, void *dev_instance) { struct crq_server_adapter *adapter = (struct crq_server_adapter *)dev_instance; vio_disable_interrupts(to_vio_dev(adapter->dev)); tasklet_schedule(&adapter->work_task); return IRQ_HANDLED; } /** * ibmvmc_release_crq_queue - Release CRQ Queue * * @adapter: crq_server_adapter struct * * Return: * 0 - Success * Non-Zero - Failure */ static void ibmvmc_release_crq_queue(struct crq_server_adapter *adapter) { struct vio_dev *vdev = to_vio_dev(adapter->dev); struct crq_queue *queue = &adapter->queue; free_irq(vdev->irq, (void *)adapter); tasklet_kill(&adapter->work_task); if (adapter->reset_task) kthread_stop(adapter->reset_task); h_free_crq(vdev->unit_address); dma_unmap_single(adapter->dev, queue->msg_token, queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL); free_page((unsigned long)queue->msgs); } /** * ibmvmc_reset_crq_queue - Reset CRQ Queue * * @adapter: crq_server_adapter struct * * This function calls h_free_crq and then calls H_REG_CRQ and does all the * bookkeeping to get us back to where we can communicate. * * Return: * 0 - Success * Non-Zero - Failure */ static int ibmvmc_reset_crq_queue(struct crq_server_adapter *adapter) { struct vio_dev *vdev = to_vio_dev(adapter->dev); struct crq_queue *queue = &adapter->queue; int rc = 0; /* Close the CRQ */ h_free_crq(vdev->unit_address); /* Clean out the queue */ memset(queue->msgs, 0x00, PAGE_SIZE); queue->cur = 0; /* And re-open it again */ rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, queue->msg_token, PAGE_SIZE); if (rc == 2) /* Adapter is good, but other end is not ready */ dev_warn(adapter->dev, "Partner adapter not ready\n"); else if (rc != 0) dev_err(adapter->dev, "couldn't register crq--rc 0x%x\n", rc); return rc; } /** * crq_queue_next_crq: - Returns the next entry in message queue * @queue: crq_queue to use * * Returns pointer to next entry in queue, or NULL if there are no new * entried in the CRQ. */ static struct ibmvmc_crq_msg *crq_queue_next_crq(struct crq_queue *queue) { struct ibmvmc_crq_msg *crq; unsigned long flags; spin_lock_irqsave(&queue->lock, flags); crq = &queue->msgs[queue->cur]; if (crq->valid & 0x80) { if (++queue->cur == queue->size) queue->cur = 0; /* Ensure the read of the valid bit occurs before reading any * other bits of the CRQ entry */ dma_rmb(); } else { crq = NULL; } spin_unlock_irqrestore(&queue->lock, flags); return crq; } /** * ibmvmc_send_crq - Send CRQ * * @adapter: crq_server_adapter struct * @word1: Word1 Data field * @word2: Word2 Data field * * Return: * 0 - Success * Non-Zero - Failure */ static long ibmvmc_send_crq(struct crq_server_adapter *adapter, u64 word1, u64 word2) { struct vio_dev *vdev = to_vio_dev(adapter->dev); long rc = 0; dev_dbg(adapter->dev, "(0x%x, 0x%016llx, 0x%016llx)\n", vdev->unit_address, word1, word2); /* * Ensure the command buffer is flushed to memory before handing it * over to the other side to prevent it from fetching any stale data. */ dma_wmb(); rc = plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2); dev_dbg(adapter->dev, "rc = 0x%lx\n", rc); return rc; } /** * alloc_dma_buffer - Create DMA Buffer * * @vdev: vio_dev struct * @size: Size field * @dma_handle: DMA address field * * Allocates memory for the command queue and maps remote memory into an * ioba. * * Returns a pointer to the buffer */ static void *alloc_dma_buffer(struct vio_dev *vdev, size_t size, dma_addr_t *dma_handle) { /* allocate memory */ void *buffer = kzalloc(size, GFP_ATOMIC); if (!buffer) { *dma_handle = 0; return NULL; } /* DMA map */ *dma_handle = dma_map_single(&vdev->dev, buffer, size, DMA_BIDIRECTIONAL); if (dma_mapping_error(&vdev->dev, *dma_handle)) { *dma_handle = 0; kzfree(buffer); return NULL; } return buffer; } /** * free_dma_buffer - Free DMA Buffer * * @vdev: vio_dev struct * @size: Size field * @vaddr: Address field * @dma_handle: DMA address field * * Releases memory for a command queue and unmaps mapped remote memory. */ static void free_dma_buffer(struct vio_dev *vdev, size_t size, void *vaddr, dma_addr_t dma_handle) { /* DMA unmap */ dma_unmap_single(&vdev->dev, dma_handle, size, DMA_BIDIRECTIONAL); /* deallocate memory */ kzfree(vaddr); } /** * ibmvmc_get_valid_hmc_buffer - Retrieve Valid HMC Buffer * * @hmc_index: HMC Index Field * * Return: * Pointer to ibmvmc_buffer */ static struct ibmvmc_buffer *ibmvmc_get_valid_hmc_buffer(u8 hmc_index) { struct ibmvmc_buffer *buffer; struct ibmvmc_buffer *ret_buf = NULL; unsigned long i; if (hmc_index > ibmvmc.max_hmc_index) return NULL; buffer = hmcs[hmc_index].buffer; for (i = 0; i < ibmvmc_max_buf_pool_size; i++) { if (buffer[i].valid && buffer[i].free && buffer[i].owner == VMC_BUF_OWNER_ALPHA) { buffer[i].free = 0; ret_buf = &buffer[i]; break; } } return ret_buf; } /** * ibmvmc_get_free_hmc_buffer - Get Free HMC Buffer * * @adapter: crq_server_adapter struct * @hmc_index: Hmc Index field * * Return: * Pointer to ibmvmc_buffer */ static struct ibmvmc_buffer *ibmvmc_get_free_hmc_buffer(struct crq_server_adapter *adapter, u8 hmc_index) { struct ibmvmc_buffer *buffer; struct ibmvmc_buffer *ret_buf = NULL; unsigned long i; if (hmc_index > ibmvmc.max_hmc_index) { dev_info(adapter->dev, "get_free_hmc_buffer: invalid hmc_index=0x%x\n", hmc_index); return NULL; } buffer = hmcs[hmc_index].buffer; for (i = 0; i < ibmvmc_max_buf_pool_size; i++) { if (buffer[i].free && buffer[i].owner == VMC_BUF_OWNER_ALPHA) { buffer[i].free = 0; ret_buf = &buffer[i]; break; } } return ret_buf; } /** * ibmvmc_free_hmc_buffer - Free an HMC Buffer * * @hmc: ibmvmc_hmc struct * @buffer: ibmvmc_buffer struct * */ static void ibmvmc_free_hmc_buffer(struct ibmvmc_hmc *hmc, struct ibmvmc_buffer *buffer) { unsigned long flags; spin_lock_irqsave(&hmc->lock, flags); buffer->free = 1; spin_unlock_irqrestore(&hmc->lock, flags); } /** * ibmvmc_count_hmc_buffers - Count HMC Buffers * * @hmc_index: HMC Index field * @valid: Valid number of buffers field * @free: Free number of buffers field * */ static void ibmvmc_count_hmc_buffers(u8 hmc_index, unsigned int *valid, unsigned int *free) { struct ibmvmc_buffer *buffer; unsigned long i; unsigned long flags; if (hmc_index > ibmvmc.max_hmc_index) return; if (!valid || !free) return; *valid = 0; *free = 0; buffer = hmcs[hmc_index].buffer; spin_lock_irqsave(&hmcs[hmc_index].lock, flags); for (i = 0; i < ibmvmc_max_buf_pool_size; i++) { if (buffer[i].valid) { *valid = *valid + 1; if (buffer[i].free) *free = *free + 1; } } spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags); } /** * ibmvmc_get_free_hmc - Get Free HMC * * Return: * Pointer to an available HMC Connection * Null otherwise */ static struct ibmvmc_hmc *ibmvmc_get_free_hmc(void) { unsigned long i; unsigned long flags; /* * Find an available HMC connection. */ for (i = 0; i <= ibmvmc.max_hmc_index; i++) { spin_lock_irqsave(&hmcs[i].lock, flags); if (hmcs[i].state == ibmhmc_state_free) { hmcs[i].index = i; hmcs[i].state = ibmhmc_state_initial; spin_unlock_irqrestore(&hmcs[i].lock, flags); return &hmcs[i]; } spin_unlock_irqrestore(&hmcs[i].lock, flags); } return NULL; } /** * ibmvmc_return_hmc - Return an HMC Connection * * @hmc: ibmvmc_hmc struct * @release_readers: Number of readers connected to session * * This function releases the HMC connections back into the pool. * * Return: * 0 - Success * Non-zero - Failure */ static int ibmvmc_return_hmc(struct ibmvmc_hmc *hmc, bool release_readers) { struct ibmvmc_buffer *buffer; struct crq_server_adapter *adapter; struct vio_dev *vdev; unsigned long i; unsigned long flags; if (!hmc || !hmc->adapter) return -EIO; if (release_readers) { if (hmc->file_session) { struct ibmvmc_file_session *session = hmc->file_session; session->valid = 0; wake_up_interruptible(&ibmvmc_read_wait); } } adapter = hmc->adapter; vdev = to_vio_dev(adapter->dev); spin_lock_irqsave(&hmc->lock, flags); hmc->index = 0; hmc->state = ibmhmc_state_free; hmc->queue_head = 0; hmc->queue_tai
/*
 *      Driver for the MTX-1 Watchdog.
 *
 *      (C) Copyright 2005 4G Systems <info@4g-systems.biz>,
 *							All Rights Reserved.
 *                              http://www.4g-systems.biz
 *
 *	(C) Copyright 2007 OpenWrt.org, Florian Fainelli <florian@openwrt.org>
 *
 *      This program is free software; you can redistribute it and/or
 *      modify it under the terms of the GNU General Public License
 *      as published by the Free Software Foundation; either version
 *      2 of the License, or (at your option) any later version.
 *
 *      Neither Michael Stickel nor 4G Systems admit liability nor provide
 *      warranty for any of this software. This material is provided
 *      "AS-IS" and at no charge.
 *
 *      (c) Copyright 2005    4G Systems <info@4g-systems.biz>
 *
 *      Release 0.01.
 *      Author: Michael Stickel  michael.stickel@4g-systems.biz
 *
 *      Release 0.02.
 *	Author: Florian Fainelli florian@openwrt.org
 *		use the Linux watchdog/timer APIs
 *
 *      The Watchdog is configured to reset the MTX-1
 *      if it is not triggered for 100 seconds.
 *      It should not be triggered more often than 1.6 seconds.
 *
 *      A timer triggers the watchdog every 5 seconds, until
 *      it is opened for the first time. After the first open
 *      it MUST be triggered every 2..95 seconds.
 */

#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/miscdevice.h>
#include <linux/fs.h>
#include <linux/ioport.h>
#include <linux/timer.h>
#include <linux/completion.h>
#include <linux/jiffies.h>
#include <linux/watchdog.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/gpio.h>

#include <asm/mach-au1x00/au1000.h>

#define MTX1_WDT_INTERVAL	(5 * HZ)

static int ticks = 100 * HZ;

static struct {
	struct completion stop;
	spinlock_t lock;
	int running;
	struct timer_list timer;
	int queue;
	int default_ticks;
	unsigned long inuse;
	unsigned gpio;
	unsigned int gstate;
} mtx1_wdt_device;

static void mtx1_wdt_trigger(unsigned long unused)
{
	spin_lock(&mtx1_wdt_device.lock);
	if (mtx1_wdt_device.running)
		ticks--;

	/* toggle wdt gpio */
	mtx1_wdt_device.gstate = !mtx1_wdt_device.gstate;
	gpio_set_value(mtx1_wdt_device.gpio, mtx1_wdt_device.gstate);

	if (mtx1_wdt_device.queue && ticks)
		mod_timer(&mtx1_wdt_device.timer, jiffies + MTX1_WDT_INTERVAL);
	else
		complete(&mtx1_wdt_device.stop);
	spin_unlock(&mtx1_wdt_device.lock);
}

static void mtx1_wdt_reset(void)
{
	ticks = mtx1_wdt_device.default_ticks;
}


static void mtx1_wdt_start(void)
{
	unsigned long flags;

	spin_lock_irqsave(&mtx1_wdt_device.lock, flags);
	if (!mtx1_wdt_device.queue) {
		mtx1_wdt_device.queue = 1;
		mtx1_wdt_device.gstate = 1;
		gpio_set_value(mtx1_wdt_device.gpio, 1);
		mod_timer(&mtx1_wdt_device.timer, jiffies + MTX1_WDT_INTERVAL);
	}
	mtx1_wdt_device.running++;
	spin_unlock_irqrestore(&mtx1_wdt_device.lock, flags);
}

static int mtx1_wdt_stop(void)
{
	unsigned long flags;

	spin_lock_irqsave(&mtx1_wdt_device.lock, flags);
	if (mtx1_wdt_device.queue) {
		mtx1_wdt_device.queue = 0;
		mtx1_wdt_device.gstate = 0;
		gpio_set_value(mtx1_wdt_device.gpio, 0);
	}
	ticks = mtx1_wdt_device.default_ticks;
	spin_unlock_irqrestore(&mtx1_wdt_device.lock, flags);
	return 0;
}

/* Filesystem functions */

static int mtx1_wdt_open(struct inode *inode, struct file *file)
{
	if (test_and_set_bit(0, &mtx1_wdt_device.inuse))
		return -EBUSY;
	return nonseekable_open(inode, file);
}


static int mtx1_wdt_release(struct inode *inode, struct file *file)
{
	clear_bit(0, &mtx1_wdt_device.inuse);
	return 0;
}

static long mtx1_wdt_ioctl(struct file *file, unsigned int cmd,
							unsigned long arg)
{
	void __user *argp = (void __user *)arg;
	int __user *p = (int __user *)argp;
	unsigned int value;
	static const struct watchdog_info ident = {
		.options = WDIOF_CARDRESET,
		.identity = "MTX-1 WDT",
	};

	switch (cmd) {
	case WDIOC_GETSUPPORT:
		if (copy_to_user(argp, &ident, sizeof(ident)))
			return -EFAULT;
		break;
	case WDIOC_GETSTATUS:
	case WDIOC_GETBOOTSTATUS:
		put_user(0, p);
		break;
	case WDIOC_SETOPTIONS:
		if (get_user(value, p))
			return -EFAULT;
		if (value & WDIOS_ENABLECARD)
			mtx1_wdt_start();
		else if (value & WDIOS_DISABLECARD)
			mtx1_wdt_stop();
		else
			return -EINVAL;
		return 0;
	case WDIOC_KEEPALIVE:
		mtx1_wdt_reset();
		break;
	default:
		return -ENOTTY;
	}
	return 0;
}


static ssize_t mtx1_wdt_write(struct file *file, const char *buf,
						size_t count, loff_t *ppos)
{
	if (!count)
		return -EIO;
	mtx1_wdt_reset();
	return count;
}

static const struct file_operations mtx1_wdt_fops = {
	.owner		= THIS_MODULE,
	.llseek		= no_llseek,
	.unlocked_ioctl	= mtx1_wdt_ioctl,
	.open		= mtx1_wdt_open,
	.write		= mtx1_wdt_write,
	.release	= mtx1_wdt_release,
};


static struct miscdevice mtx1_wdt_misc = {
	.minor	= WATCHDOG_MINOR,
	.name	= "watchdog",
	.fops	= &mtx1_wdt_fops,
};


static int mtx1_wdt_probe(struct platform_device *pdev)
{
	int ret;

	mtx1_wdt_device.gpio = pdev->resource[0].start;
	ret = devm_gpio_request_one(&pdev->dev, mtx1_wdt_device.gpio,
				GPIOF_OUT_INIT_HIGH, "mtx1-wdt");
	if (ret < 0) {
		dev_err(&pdev->dev, "failed to request gpio");
		return ret;
	}

	spin_lock_init(&mtx1_wdt_device.lock);
	init_completion(&mtx1_wdt_device.stop);
	mtx1_wdt_device.queue = 0;
	clear_bit(0, &mtx1_wdt_device.inuse);
	setup_timer(&mtx1_wdt_device.timer, mtx1_wdt_trigger, 0L);
	mtx1_wdt_device.default_ticks = ticks;

	ret = misc_register(&mtx1_wdt_misc);
	if (ret < 0) {
		dev_err(&pdev->dev, "failed to register\n");
		return ret;
	}
	mtx1_wdt_start();
	dev_info(&pdev->dev, "MTX-1 Watchdog driver\n");
	return 0;
}

static int mtx1_wdt_remove(struct platform_device *pdev)
{
	/* FIXME: do we need to lock this test ? */
	if (mtx1_wdt_device.queue) {
		mtx1_wdt_device.queue = 0;
		wait_for_completion(&mtx1_wdt_device.stop);
	}

	misc_deregister(&mtx1_wdt_misc);
	return 0;
}

static struct platform_driver mtx1_wdt_driver = {
	.probe = mtx1_wdt_probe,
	.remove = mtx1_wdt_remove,
	.driver.name = "mtx1-wdt",
	.driver.owner = THIS_MODULE,
};

module_platform_driver(mtx1_wdt_driver);

MODULE_AUTHOR("Michael Stickel, Florian Fainelli");
MODULE_DESCRIPTION("Driver for the MTX-1 watchdog");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:mtx1-wdt");
v, queue->msgs, queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL); if (dma_mapping_error(adapter->dev, queue->msg_token)) goto map_failed; retrc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, queue->msg_token, PAGE_SIZE); rc = retrc; if (rc == H_RESOURCE) rc = ibmvmc_reset_crq_queue(adapter); if (rc == 2) { dev_warn(adapter->dev, "Partner adapter not ready\n"); retrc = 0; } else if (rc != 0) { dev_err(adapter->dev, "Error %d opening adapter\n", rc); goto reg_crq_failed; } queue->cur = 0; spin_lock_init(&queue->lock); tasklet_init(&adapter->work_task, ibmvmc_task, (unsigned long)adapter); if (request_irq(vdev->irq, ibmvmc_handle_event, 0, "ibmvmc", (void *)adapter) != 0) { dev_err(adapter->dev, "couldn't register irq 0x%x\n", vdev->irq); goto req_irq_failed; } rc = vio_enable_interrupts(vdev); if (rc != 0) { dev_err(adapter->dev, "Error %d enabling interrupts!!!\n", rc); goto req_irq_failed; } return retrc; req_irq_failed: /* Cannot have any work since we either never got our IRQ registered, * or never got interrupts enabled */ tasklet_kill(&adapter->work_task); h_free_crq(vdev->unit_address); reg_crq_failed: dma_unmap_single(adapter->dev, queue->msg_token, queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL); map_failed: free_page((unsigned long)queue->msgs); malloc_failed: return -ENOMEM; } /* Fill in the liobn and riobn fields on the adapter */ static int read_dma_window(struct vio_dev *vdev, struct crq_server_adapter *adapter) { const __be32 *dma_window; const __be32 *prop; /* TODO Using of_parse_dma_window would be better, but it doesn't give * a way to read multiple windows without already knowing the size of * a window or the number of windows */ dma_window = (const __be32 *)vio_get_attribute(vdev, "ibm,my-dma-window", NULL); if (!dma_window) { dev_warn(adapter->dev, "Couldn't find ibm,my-dma-window property\n"); return -1; } adapter->liobn = be32_to_cpu(*dma_window); dma_window++; prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-address-cells", NULL); if (!prop) { dev_warn(adapter->dev, "Couldn't find ibm,#dma-address-cells property\n"); dma_window++; } else { dma_window += be32_to_cpu(*prop); } prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-size-cells", NULL); if (!prop) { dev_warn(adapter->dev, "Couldn't find ibm,#dma-size-cells property\n"); dma_window++; } else { dma_window += be32_to_cpu(*prop); } /* dma_window should point to the second window now */ adapter->riobn = be32_to_cpu(*dma_window); return 0; } static int ibmvmc_probe(struct vio_dev *vdev, const struct vio_device_id *id) { struct crq_server_adapter *adapter = &ibmvmc_adapter; int rc; dev_set_drvdata(&vdev->dev, NULL); memset(adapter, 0, sizeof(*adapter)); adapter->dev = &vdev->dev; dev_info(adapter->dev, "Probe for UA 0x%x\n", vdev->unit_address); rc = read_dma_window(vdev, adapter); if (rc != 0) { ibmvmc.state = ibmvmc_state_failed; return -1; } dev_dbg(adapter->dev, "Probe: liobn 0x%x, riobn 0x%x\n", adapter->liobn, adapter->riobn); init_waitqueue_head(&adapter->reset_wait_queue); adapter->reset_task = kthread_run(ibmvmc_reset_task, adapter, "ibmvmc"); if (IS_ERR(adapter->reset_task)) { dev_err(adapter->dev, "Failed to start reset thread\n"); ibmvmc.state = ibmvmc_state_failed; rc = PTR_ERR(adapter->reset_task); adapter->reset_task = NULL; return rc; } rc = ibmvmc_init_crq_queue(adapter); if (rc != 0 && rc != H_RESOURCE) { dev_err(adapter->dev, "Error initializing CRQ. rc = 0x%x\n", rc); ibmvmc.state = ibmvmc_state_failed; goto crq_failed; } ibmvmc.state = ibmvmc_state_crqinit; /* Try to send an initialization message. Note that this is allowed * to fail if the other end is not acive. In that case we just wait * for the other side to initialize. */ if (ibmvmc_send_crq(adapter, 0xC001000000000000LL, 0) != 0 && rc != H_RESOURCE) dev_warn(adapter->dev, "Failed to send initialize CRQ message\n"); dev_set_drvdata(&vdev->dev, adapter); return 0; crq_failed: kthread_stop(adapter->reset_task); adapter->reset_task = NULL; return -EPERM; } static int ibmvmc_remove(struct vio_dev *vdev) { struct crq_server_adapter *adapter = dev_get_drvdata(&vdev->dev); dev_info(adapter->dev, "Entering remove for UA 0x%x\n", vdev->unit_address); ibmvmc_release_crq_queue(adapter); return 0; } static struct vio_device_id ibmvmc_device_table[] = { { "ibm,vmc", "IBM,vmc" }, { "", "" } }; MODULE_DEVICE_TABLE(vio, ibmvmc_device_table); static struct vio_driver ibmvmc_driver = { .name = ibmvmc_driver_name, .id_table = ibmvmc_device_table, .probe = ibmvmc_probe, .remove = ibmvmc_remove, }; static void __init ibmvmc_scrub_module_parms(void) { if (ibmvmc_max_mtu > MAX_MTU) { pr_warn("ibmvmc: Max MTU reduced to %d\n", MAX_MTU); ibmvmc_max_mtu = MAX_MTU; } else if (ibmvmc_max_mtu < MIN_MTU) { pr_warn("ibmvmc: Max MTU increased to %d\n", MIN_MTU); ibmvmc_max_mtu = MIN_MTU; } if (ibmvmc_max_buf_pool_size > MAX_BUF_POOL_SIZE) { pr_warn("ibmvmc: Max buffer pool size reduced to %d\n", MAX_BUF_POOL_SIZE); ibmvmc_max_buf_pool_size = MAX_BUF_POOL_SIZE; } else if (ibmvmc_max_buf_pool_size < MIN_BUF_POOL_SIZE) { pr_warn("ibmvmc: Max buffer pool size increased to %d\n", MIN_BUF_POOL_SIZE); ibmvmc_max_buf_pool_size = MIN_BUF_POOL_SIZE; } if (ibmvmc_max_hmcs > MAX_HMCS) { pr_warn("ibmvmc: Max HMCs reduced to %d\n", MAX_HMCS); ibmvmc_max_hmcs = MAX_HMCS; } else if (ibmvmc_max_hmcs < MIN_HMCS) { pr_warn("ibmvmc: Max HMCs increased to %d\n", MIN_HMCS); ibmvmc_max_hmcs = MIN_HMCS; } } static struct miscdevice ibmvmc_miscdev = { .name = ibmvmc_driver_name, .minor = MISC_DYNAMIC_MINOR, .fops = &ibmvmc_fops, }; static int __init ibmvmc_module_init(void) { int rc, i, j; ibmvmc.state = ibmvmc_state_initial; pr_info("ibmvmc: version %s\n", IBMVMC_DRIVER_VERSION); rc = misc_register(&ibmvmc_miscdev); if (rc) { pr_err("ibmvmc: misc registration failed\n"); goto misc_register_failed; } pr_info("ibmvmc: node %d:%d\n", MISC_MAJOR, ibmvmc_miscdev.minor); /* Initialize data structures */ memset(hmcs, 0, sizeof(struct ibmvmc_hmc) * MAX_HMCS); for (i = 0; i < MAX_HMCS; i++) { spin_lock_init(&hmcs[i].lock); hmcs[i].state = ibmhmc_state_free; for (j = 0; j < MAX_BUF_POOL_SIZE; j++) hmcs[i].queue_outbound_msgs[j] = VMC_INVALID_BUFFER_ID; } /* Sanity check module parms */ ibmvmc_scrub_module_parms(); /* * Initialize some reasonable values. Might be negotiated smaller * values during the capabilities exchange. */ ibmvmc.max_mtu = ibmvmc_max_mtu; ibmvmc.max_buffer_pool_size = ibmvmc_max_buf_pool_size; ibmvmc.max_hmc_index = ibmvmc_max_hmcs - 1; rc = vio_register_driver(&ibmvmc_driver); if (rc) { pr_err("ibmvmc: rc %d from vio_register_driver\n", rc); goto vio_reg_failed; } return 0; vio_reg_failed: misc_deregister(&ibmvmc_miscdev); misc_register_failed: return rc; } static void __exit ibmvmc_module_exit(void) { pr_info("ibmvmc: module exit\n"); vio_unregister_driver(&ibmvmc_driver); misc_deregister(&ibmvmc_miscdev); } module_init(ibmvmc_module_init); module_exit(ibmvmc_module_exit); module_param_named(buf_pool_size, ibmvmc_max_buf_pool_size, int, 0644); MODULE_PARM_DESC(buf_pool_size, "Buffer pool size"); module_param_named(max_hmcs, ibmvmc_max_hmcs, int, 0644); MODULE_PARM_DESC(max_hmcs, "Max HMCs"); module_param_named(max_mtu, ibmvmc_max_mtu, int, 0644); MODULE_PARM_DESC(max_mtu, "Max MTU"); MODULE_AUTHOR("Steven Royer "); MODULE_DESCRIPTION("IBM VMC"); MODULE_VERSION(IBMVMC_DRIVER_VERSION); MODULE_LICENSE("GPL v2");