/* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support. * * Copyright (C) 2010, 2011 David S. Miller */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "n2_core.h" #define DRV_MODULE_NAME "n2_crypto" #define DRV_MODULE_VERSION "0.2" #define DRV_MODULE_RELDATE "July 28, 2011" static const char version[] = DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); MODULE_DESCRIPTION("Niagara2 Crypto driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); #define N2_CRA_PRIORITY 200 static DEFINE_MUTEX(spu_lock); struct spu_queue { cpumask_t sharing; unsigned long qhandle; spinlock_t lock; u8 q_type; void *q; unsigned long head; unsigned long tail; struct list_head jobs; unsigned long devino; char irq_name[32]; unsigned int irq; struct list_head list; }; struct spu_qreg { struct spu_queue *queue; unsigned long type; }; static struct spu_queue **cpu_to_cwq; static struct spu_queue **cpu_to_mau; static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off) { if (q->q_type == HV_NCS_QTYPE_MAU) { off += MAU_ENTRY_SIZE; if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES)) off = 0; } else { off += CWQ_ENTRY_SIZE; if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES)) off = 0; } return off; } struct n2_request_common { struct list_head entry; unsigned int offset; }; #define OFFSET_NOT_RUNNING (~(unsigned int)0) /* An async job request records the final tail value it used in * n2_request_common->offset, test to see if that offset is in * the range old_head, new_head, inclusive. */ static inline bool job_finished(struct spu_queue *q, unsigned int offset, unsigned long old_head, unsigned long new_head) { if (old_head <= new_head) { if (offset > old_head && offset <= new_head) return true; } else { if (offset > old_head || offset <= new_head) return true; } return false; } /* When the HEAD marker is unequal to the actual HEAD, we get * a virtual device INO interrupt. We should process the * completed CWQ entries and adjust the HEAD marker to clear * the IRQ. */ static irqreturn_t cwq_intr(int irq, void *dev_id) { unsigned long off, new_head, hv_ret; struct spu_queue *q = dev_id; pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n", smp_processor_id(), q->qhandle); spin_lock(&q->lock); hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head); pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n", smp_processor_id(), new_head, hv_ret); for (off = q->head; off != new_head; off = spu_next_offset(q, off)) { /* XXX ... XXX */ } hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head); if (hv_ret == HV_EOK) q->head = new_head; spin_unlock(&q->lock); return IRQ_HANDLED; } static irqreturn_t mau_intr(int irq, void *dev_id) { struct spu_queue *q = dev_id; unsigned long head, hv_ret; spin_lock(&q->lock); pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n", smp_processor_id(), q->qhandle); hv_ret = sun4v_ncs_gethead(q->qhandle, &head); pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n", smp_processor_id(), head, hv_ret); sun4v_ncs_sethead_marker(q->qhandle, head); spin_unlock(&q->lock); return IRQ_HANDLED; } static void *spu_queue_next(struct spu_queue *q, void *cur) { return q->q + spu_next_offset(q, cur - q->q); } static int spu_queue_num_free(struct spu_queue *q) { unsigned long head = q->head; unsigned long tail = q->tail; unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES); unsigned long diff; if (head > tail) diff = head - tail; else diff = (end - tail) + head; return (diff / CWQ_ENTRY_SIZE) - 1; } static void *spu_queue_alloc(struct spu_queue *q, int num_entries) { int avail = spu_queue_num_free(q); if (avail >= num_entries) return q->q + q->tail; return NULL; } static unsigned long spu_queue_submit(struct spu_queue *q, void *last) { unsigned long hv_ret, new_tail; new_tail = spu_next_offset(q, last - q->q); hv_ret = sun4v_ncs_settail(q->qhandle, new_tail); if (hv_ret == HV_EOK) q->tail = new_tail; return hv_ret; } static u64 control_word_base(unsigned int len, unsigned int hmac_key_len, int enc_type, int auth_type, unsigned int hash_len, bool sfas, bool sob, bool eob, bool encrypt, int opcode) { u64 word = (len - 1) & CONTROL_LEN; word |= ((u64) opcode << CONTROL_OPCODE_SHIFT); word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT); word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT); if (sfas) word |= CONTROL_STORE_FINAL_AUTH_STATE; if (sob) word |= CONTROL_START_OF_BLOCK; if (eob) word |= CONTROL_END_OF_BLOCK; if (encrypt) word |= CONTROL_ENCRYPT; if (hmac_key_len) word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT; if (hash_len) word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT; return word; } #if 0 static inline bool n2_should_run_async(struct spu_queue *qp, int this_len) { if (this_len >= 64 || qp->head != qp->tail) return true; return false; } #endif struct n2_ahash_alg { struct list_head entry; const u8 *hash_zero; const u32 *hash_init; u8 hw_op_hashsz; u8 digest_size; u8 auth_type; u8 hmac_type; struct ahash_alg alg; }; static inline struct n2_ahash_alg *n2_ahash_alg(struct crypto_tfm *tfm) { struct crypto_alg *alg = tfm->__crt_alg; struct ahash_alg *ahash_alg; ahash_alg = container_of(alg, struct ahash_alg, halg.base); return container_of(ahash_alg, struct n2_ahash_alg, alg); } struct n2_hmac_alg { const char *child_alg; struct n2_ahash_alg derived; }; static inline struct n2_hmac_alg *n2_hmac_alg(struct crypto_tfm *tfm) { struct crypto_alg *alg = tfm->__crt_alg; struct ahash_alg *ahash_alg; ahash_alg = container_of(alg, struct ahash_alg, halg.base); return container_of(ahash_alg, struct n2_hmac_alg, derived.alg); } struct n2_hash_ctx { struct crypto_ahash *fallback_tfm; }; #define N2_HASH_KEY_MAX 32 /* HW limit for all HMAC requests */ struct n2_hmac_ctx { struct n2_hash_ctx base; struct crypto_shash *child_shash; int hash_key_len; unsigned char hash_key[N2_HASH_KEY_MAX]; }; struct n2_hash_req_ctx { union { struct md5_state md5; struct sha1_state sha1; struct sha256_state sha256; } u; struct ahash_request fallback_req; }; static int n2_hash_async_init(struct ahash_request *req) { struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; return crypto_ahash_init(&rctx->fallback_req); } static int n2_hash_async_update(struct ahash_request *req) { struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; rctx->fallback_req.nbytes = req->nbytes; rctx->fallback_req.src = req->src; return crypto_ahash_update(&rctx->fallback_req); } static int n2_hash_async_final(struct ahash_request *req) { struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; rctx->fallback_req.result = req->result; return crypto_ahash_final(&rctx->fallback_req); } static int n2_hash_async_finup(struct ahash_request *req) { struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; rctx->fallback_req.nbytes = req->nbytes; rctx->fallback_req.src = req->src; rctx->fallback_req.result = req->result; return crypto_ahash_finup(&rctx->fallback_req); } static int n2_hash_cra_init(struct crypto_tfm *tfm) { const
/*
 * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
 * only version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 */

#ifndef UFS_QCOM_PHY_QMP_14NM_H_
#define UFS_QCOM_PHY_QMP_14NM_H_

#include "phy-qcom-ufs-i.h"

/* QCOM UFS PHY control registers */
#define COM_OFF(x)	(0x000 + x)
#define PHY_OFF(x)	(0xC00 + x)
#define TX_OFF(n, x)	(0x400 + (0x400 * n) + x)
#define RX_OFF(n, x)	(0x600 + (0x400 * n) + x)

/* UFS PHY QSERDES COM registers */
#define QSERDES_COM_BG_TIMER			COM_OFF(0x0C)
#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN		COM_OFF(0x34)
#define QSERDES_COM_SYS_CLK_CTRL		COM_OFF(0x3C)
#define QSERDES_COM_LOCK_CMP1_MODE0		COM_OFF(0x4C)
#define QSERDES_COM_LOCK_CMP2_MODE0		COM_OFF(0x50)
#define QSERDES_COM_LOCK_CMP3_MODE0		COM_OFF(0x54)
#define QSERDES_COM_LOCK_CMP1_MODE1		COM_OFF(0x58)
#define QSERDES_COM_LOCK_CMP2_MODE1		COM_OFF(0x5C)
#define QSERDES_COM_LOCK_CMP3_MODE1		COM_OFF(0x60)
#define QSERDES_COM_CP_CTRL_MODE0		COM_OFF(0x78)
#define QSERDES_COM_CP_CTRL_MODE1		COM_OFF(0x7C)
#define QSERDES_COM_PLL_RCTRL_MODE0		COM_OFF(0x84)
#define QSERDES_COM_PLL_RCTRL_MODE1		COM_OFF(0x88)
#define QSERDES_COM_PLL_CCTRL_MODE0		COM_OFF(0x90)
#define QSERDES_COM_PLL_CCTRL_MODE1		COM_OFF(0x94)
#define QSERDES_COM_SYSCLK_EN_SEL		COM_OFF(0xAC)
#define QSERDES_COM_RESETSM_CNTRL		COM_OFF(0xB4)
#define QSERDES_COM_LOCK_CMP_EN			COM_OFF(0xC8)
#define QSERDES_COM_LOCK_CMP_CFG		COM_OFF(0xCC)
#define QSERDES_COM_DEC_START_MODE0		COM_OFF(0xD0)
#define QSERDES_COM_DEC_START_MODE1		COM_OFF(0xD4)
#define QSERDES_COM_DIV_FRAC_START1_MODE0	COM_OFF(0xDC)
#define QSERDES_COM_DIV_FRAC_START2_MODE0	COM_OFF(0xE0)
#define QSERDES_COM_DIV_FRAC_START3_MODE0	COM_OFF(0xE4)
#define QSERDES_COM_DIV_FRAC_START1_MODE1	COM_OFF(0xE8)
#define QSERDES_COM_DIV_FRAC_START2_MODE1	COM_OFF(0xEC)
#define QSERDES_COM_DIV_FRAC_START3_MODE1	COM_OFF(0xF0)
#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0	COM_OFF(0x108)
#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0	COM_OFF(0x10C)
#define QSERDES_COM_INTEGLOOP_GAIN0_MODE1	COM_OFF(0x110)
#define QSERDES_COM_INTEGLOOP_GAIN1_MODE1	COM_OFF(0x114)
#define QSERDES_COM_VCO_TUNE_CTRL		COM_OFF(0x124)
#define QSERDES_COM_VCO_TUNE_MAP		COM_OFF(0x128)
#define QSERDES_COM_VCO_TUNE1_MODE0		COM_OFF(0x12C)
#define QSERDES_COM_VCO_TUNE2_MODE0		COM_OFF(0x130)
#define QSERDES_COM_VCO_TUNE1_MODE1		COM_OFF(0x134)
#define QSERDES_COM_VCO_TUNE2_MODE1		COM_OFF(0x138)
#define QSERDES_COM_VCO_TUNE_TIMER1		COM_OFF(0x144)
#define QSERDES_COM_VCO_TUNE_TIMER2		COM_OFF(0x148)
#define QSERDES_COM_CLK_SELECT			COM_OFF(0x174)
#define QSERDES_COM_HSCLK_SEL			COM_OFF(0x178)
#define QSERDES_COM_CORECLK_DIV			COM_OFF(0x184)
#define QSERDES_COM_CORE_CLK_EN			COM_OFF(0x18C)
#define QSERDES_COM_CMN_CONFIG			COM_OFF(0x194)
#define QSERDES_COM_SVS_MODE_CLK_SEL		COM_OFF(0x19C)
#define QSERDES_COM_CORECLK_DIV_MODE1		COM_OFF(0x1BC)

/* UFS PHY registers */
#define UFS_PHY_PHY_START			PHY_OFF(0x00)
#define UFS_PHY_POWER_DOWN_CONTROL		PHY_OFF(0x04)
#define UFS_PHY_PCS_READY_STATUS		PHY_OFF(0x168)

/* UFS PHY TX registers */
#define QSERDES_TX_HIGHZ_TRANSCEIVER_BIAS_DRVR_EN	TX_OFF(0, 0x68)
#define QSERDES_TX_LANE_MODE				TX_OFF(0, 0x94)

/* UFS PHY RX registers */
#define QSERDES_RX_UCDR_FASTLOCK_FO_GAIN	RX_OFF(0, 0x40)
#define QSERDES_RX_RX_TERM_BW			RX_OFF(0, 0x90)
#define QSERDES_RX_RX_EQ_GAIN1_LSB		RX_OFF(0, 0xC4)
#define QSERDES_RX_RX_EQ_GAIN1_MSB		RX_OFF(0, 0xC8)
#define QSERDES_RX_RX_EQ_GAIN2_LSB		RX_OFF(0, 0xCC)
#define QSERDES_RX_RX_EQ_GAIN2_MSB		RX_OFF(0, 0xD0)
#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2	RX_OFF(0, 0xD8)
#define QSERDES_RX_SIGDET_CNTRL			RX_OFF(0, 0x114)
#define QSERDES_RX_SIGDET_LVL			RX_OFF(0, 0x118)
#define QSERDES_RX_SIGDET_DEGLITCH_CNTRL	RX_OFF(0, 0x11C)
#define QSERDES_RX_RX_INTERFACE_MODE		RX_OFF(0, 0x12C)

/*
 * This structure represents the 14nm specific phy.
 * common_cfg MUST remain the first field in this structure
 * in case extra fields are added. This way, when calling
 * get_ufs_qcom_phy() of generic phy, we can extract the
 * common phy structure (struct ufs_qcom_phy) out of it
 * regardless of the relevant specific phy.
 */
struct ufs_qcom_phy_qmp_14nm {
	struct ufs_qcom_phy common_cfg;
};

static struct ufs_qcom_phy_calibration phy_cal_table_rate_A[] = {
	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_POWER_DOWN_CONTROL, 0x01),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CMN_CONFIG, 0x0e),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYSCLK_EN_SEL, 0xd7),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CLK_SELECT, 0x30),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYS_CLK_CTRL, 0x06),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BG_TIMER, 0x0a),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_HSCLK_SEL, 0x05),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV, 0x0a),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV_MODE1, 0x0a),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP_EN, 0x01),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_CTRL, 0x10),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL, 0x20),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORE_CLK_EN, 0x00),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP_CFG, 0x00),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_TIMER1, 0xff),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_TIMER2, 0x3f),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_MAP, 0x14),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SVS_MODE_CLK_SEL, 0x05),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE0, 0x82),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x00),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x00),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE0, 0x0b),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE0, 0x28),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE0, 0x02),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE0, 0xff),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE0, 0x0c),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE1, 0x98),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1_MODE1, 0x00),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2_MODE1, 0x00),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3_MODE1, 0x00),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE1, 0x0b),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE1, 0x16),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE1, 0x28),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE1, 0x80),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE1, 0x00),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE1, 0xd6),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE1, 0x00),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE1, 0x32),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE1, 0x0f),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP3_MODE1, 0x00),

	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_HIGHZ_TRANSCEIVER_BIAS_DRVR_EN, 0x45),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_LANE_MODE, 0x02),

	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_LVL, 0x24),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_CNTRL, 0x02),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_INTERFACE_MODE, 0x00),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x18),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0B),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_TERM_BW, 0x5B),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_LSB, 0xFF),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_MSB, 0x3F),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_LSB, 0xFF),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_MSB, 0x0F),
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0E),
};

static struct ufs_qcom_phy_calibration phy_cal_table_rate_B[] = {
	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_MAP, 0x54),
};

#endif
); base->cra_module = THIS_MODULE; base->cra_init = n2_hash_cra_init; base->cra_exit = n2_hash_cra_exit; list_add(&p->entry, &ahash_algs); err = crypto_register_ahash(ahash); if (err) { pr_err("%s alg registration failed\n", base->cra_name); list_del(&p->entry); kfree(p); } else { pr_info("%s alg registered\n", base->cra_name); } if (!err && p->hmac_type != AUTH_TYPE_RESERVED) err = __n2_register_one_hmac(p); return err; } static int n2_register_algs(void) { int i, err = 0; mutex_lock(&spu_lock); if (algs_registered++) goto out; for (i = 0; i < NUM_HASH_TMPLS; i++) { err = __n2_register_one_ahash(&hash_tmpls[i]); if (err) { __n2_unregister_algs(); goto out; } } for (i = 0; i < NUM_CIPHER_TMPLS; i++) { err = __n2_register_one_cipher(&cipher_tmpls[i]); if (err) { __n2_unregister_algs(); goto out; } } out: mutex_unlock(&spu_lock); return err; } static void n2_unregister_algs(void) { mutex_lock(&spu_lock); if (!--algs_registered) __n2_unregister_algs(); mutex_unlock(&spu_lock); } /* To map CWQ queues to interrupt sources, the hypervisor API provides * a devino. This isn't very useful to us because all of the * interrupts listed in the device_node have been translated to * Linux virtual IRQ cookie numbers. * * So we have to back-translate, going through the 'intr' and 'ino' * property tables of the n2cp MDESC node, matching it with the OF * 'interrupts' property entries, in order to to figure out which * devino goes to which already-translated IRQ. */ static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip, unsigned long dev_ino) { const unsigned int *dev_intrs; unsigned int intr; int i; for (i = 0; i < ip->num_intrs; i++) { if (ip->ino_table[i].ino == dev_ino) break; } if (i == ip->num_intrs) return -ENODEV; intr = ip->ino_table[i].intr; dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL); if (!dev_intrs) return -ENODEV; for (i = 0; i < dev->archdata.num_irqs; i++) { if (dev_intrs[i] == intr) return i; } return -ENODEV; } static int spu_map_ino(struct platform_device *dev, struct spu_mdesc_info *ip, const char *irq_name, struct spu_queue *p, irq_handler_t handler) { unsigned long herr; int index; herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino); if (herr) return -EINVAL; index = find_devino_index(dev, ip, p->devino); if (index < 0) return index; p->irq = dev->archdata.irqs[index]; sprintf(p->irq_name, "%s-%d", irq_name, index); return request_irq(p->irq, handler, 0, p->irq_name, p); } static struct kmem_cache *queue_cache[2]; static void *new_queue(unsigned long q_type) { return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL); } static void free_queue(void *p, unsigned long q_type) { kmem_cache_free(queue_cache[q_type - 1], p); } static int queue_cache_init(void) { if (!queue_cache[HV_NCS_QTYPE_MAU - 1]) queue_cache[HV_NCS_QTYPE_MAU - 1] = kmem_cache_create("mau_queue", (MAU_NUM_ENTRIES * MAU_ENTRY_SIZE), MAU_ENTRY_SIZE, 0, NULL); if (!queue_cache[HV_NCS_QTYPE_MAU - 1]) return -ENOMEM; if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) queue_cache[HV_NCS_QTYPE_CWQ - 1] = kmem_cache_create("cwq_queue", (CWQ_NUM_ENTRIES * CWQ_ENTRY_SIZE), CWQ_ENTRY_SIZE, 0, NULL); if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) { kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); return -ENOMEM; } return 0; } static void queue_cache_destroy(void) { kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]); } static long spu_queue_register_workfn(void *arg) { struct spu_qreg *qr = arg; struct spu_queue *p = qr->queue; unsigned long q_type = qr->type; unsigned long hv_ret; hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q), CWQ_NUM_ENTRIES, &p->qhandle); if (!hv_ret) sun4v_ncs_sethead_marker(p->qhandle, 0); return hv_ret ? -EINVAL : 0; } static int spu_queue_register(struct spu_queue *p, unsigned long q_type) { int cpu = cpumask_any_and(&p->sharing, cpu_online_mask); struct spu_qreg qr = { .queue = p, .type = q_type }; return work_on_cpu_safe(cpu, spu_queue_register_workfn, &qr); } static int spu_queue_setup(struct spu_queue *p) { int err; p->q = new_queue(p->q_type); if (!p->q) return -ENOMEM; err = spu_queue_register(p, p->q_type); if (err) { free_queue(p->q, p->q_type); p->q = NULL; } return err; } static void spu_queue_destroy(struct spu_queue *p) { unsigned long hv_ret; if (!p->q) return; hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle); if (!hv_ret) free_queue(p->q, p->q_type); } static void spu_list_destroy(struct list_head *list) { struct spu_queue *p, *n; list_for_each_entry_safe(p, n, list, list) { int i; for (i = 0; i < NR_CPUS; i++) { if (cpu_to_cwq[i] == p) cpu_to_cwq[i] = NULL; } if (p->irq) { free_irq(p->irq, p); p->irq = 0; } spu_queue_destroy(p); list_del(&p->list); kfree(p); } } /* Walk the backward arcs of a CWQ 'exec-unit' node, * gathering cpu membership information. */ static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc, struct platform_device *dev, u64 node, struct spu_queue *p, struct spu_queue **table) { u64 arc; mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) { u64 tgt = mdesc_arc_target(mdesc, arc); const char *name = mdesc_node_name(mdesc, tgt); const u64 *id; if (strcmp(name, "cpu")) continue; id = mdesc_get_property(mdesc, tgt, "id", NULL); if (table[*id] != NULL) { dev_err(&dev->dev, "%s: SPU cpu slot already set.\n", dev->dev.of_node->full_name); return -EINVAL; } cpumask_set_cpu(*id, &p->sharing); table[*id] = p; } return 0; } /* Process an 'exec-unit' MDESC node of type 'cwq'. */ static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list, struct platform_device *dev, struct mdesc_handle *mdesc, u64 node, const char *iname, unsigned long q_type, irq_handler_t handler, struct spu_queue **table) { struct spu_queue *p; int err; p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL); if (!p) { dev_err(&dev->dev, "%s: Could not allocate SPU queue.\n", dev->dev.of_node->full_name); return -ENOMEM; } cpumask_clear(&p->sharing); spin_lock_init(&p->lock); p->q_type = q_type; INIT_LIST_HEAD(&p->jobs); list_add(&p->list, list); err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table); if (err) return err; err = spu_queue_setup(p); if (err) return err; return spu_map_ino(dev, ip, iname, p, handler); } static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct platform_device *dev, struct spu_mdesc_info *ip, struct list_head *list, const char *exec_name, unsigned long q_type, irq_handler_t handler, struct spu_queue **table) { int err = 0; u64 node; mdesc_for_each_node_by_name(mdesc, node, "exec-unit") { const char *type; type = mdesc_get_property(mdesc, node, "type", NULL); if (!type || strcmp(type, exec_name)) continue; err = handle_exec_unit(ip, list, dev, mdesc, node, exec_name, q_type, handler, table); if (err) { spu_list_destroy(list); break; } } return err; } static int get_irq_props(struct mdesc_handle *mdesc, u64 node, struct spu_mdesc_info *ip) { const u64 *ino; int ino_len; int i; ino = mdesc_get_property(mdesc, node, "ino", &ino_len); if (!ino) { printk("NO 'ino'\n"); return -ENODEV; } ip->num_intrs = ino_len / sizeof(u64); ip->ino_table = kzalloc((sizeof(struct ino_blob) * ip->num_intrs), GFP_KERNEL); if (!ip->ino_table) return -ENOMEM; for (i = 0; i < ip->num_intrs; i++) { struct ino_blob *b = &ip->ino_table[i]; b->intr = i + 1; b->ino = ino[i]; } return 0; } static int grab_mdesc_irq_props(struct mdesc_handle *mdesc, struct platform_device *dev, struct spu_mdesc_info *ip, const char *node_name) { const unsigned int *reg; u64 node; reg = of_get_property(dev->dev.of_node, "reg", NULL); if (!reg) return -ENODEV; mdesc_for_each_node_by_name(mdesc, node, "virtual-device") { const char *name; const u64 *chdl; name = mdesc_get_property(mdesc, node, "name", NULL); if (!name || strcmp(name, node_name)) continue; chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL); if (!chdl || (*chdl != *reg)) continue; ip->cfg_handle = *chdl; return get_irq_props(mdesc, node, ip); } return -ENODEV; } static unsigned long n2_spu_hvapi_major; static unsigned long n2_spu_hvapi_minor; static int n2_spu_hvapi_register(void) { int err; n2_spu_hvapi_major = 2; n2_spu_hvapi_minor = 0; err = sun4v_hvapi_register(HV_GRP_NCS, n2_spu_hvapi_major, &n2_spu_hvapi_minor); if (!err) pr_info("Registered NCS HVAPI version %lu.%lu\n", n2_spu_hvapi_major, n2_spu_hvapi_minor); return err; } static void n2_spu_hvapi_unregister(void) { sun4v_hvapi_unregister(HV_GRP_NCS); } static int global_ref; static int grab_global_resources(void) { int err = 0; mutex_lock(&spu_lock); if (global_ref++) goto out; err = n2_spu_hvapi_register(); if (err) goto out; err = queue_cache_init(); if (err) goto out_hvapi_release; err = -ENOMEM; cpu_to_cwq = kzalloc(sizeof(struct spu_queue *) * NR_CPUS, GFP_KERNEL); if (!cpu_to_cwq) goto out_queue_cache_destroy; cpu_to_mau = kzalloc(sizeof(struct spu_queue *) * NR_CPUS, GFP_KERNEL); if (!cpu_to_mau) goto out_free_cwq_table; err = 0; out: if (err) global_ref--; mutex_unlock(&spu_lock); return err; out_free_cwq_table: kfree(cpu_to_cwq); cpu_to_cwq = NULL; out_queue_cache_destroy: queue_cache_destroy(); out_hvapi_release: n2_spu_hvapi_unregister(); goto out; } static void release_global_resources(void) { mutex_lock(&spu_lock); if (!--global_ref) { kfree(cpu_to_cwq); cpu_to_cwq = NULL; kfree(cpu_to_mau); cpu_to_mau = NULL; queue_cache_destroy(); n2_spu_hvapi_unregister(); } mutex_unlock(&spu_lock); } static struct n2_crypto *alloc_n2cp(void) { struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL); if (np) INIT_LIST_HEAD(&np->cwq_list); return np; } static void free_n2cp(struct n2_crypto *np) { if (np->cwq_info.ino_table) { kfree(np->cwq_info.ino_table); np->cwq_info.ino_table = NULL; } kfree(np); } static void n2_spu_driver_version(void) { static int n2_spu_version_printed; if (n2_spu_version_printed++ == 0) pr_info("%s", version); } static int n2_crypto_probe(struct platform_device *dev) { struct mdesc_handle *mdesc; const char *full_name; struct n2_crypto *np; int err; n2_spu_driver_version(); full_name = dev->dev.of_node->full_name; pr_info("Found N2CP at %s\n", full_name); np = alloc_n2cp(); if (!np) { dev_err(&dev->dev, "%s: Unable to allocate n2cp.\n", full_name); return -ENOMEM; } err = grab_global_resources(); if (err) { dev_err(&dev->dev, "%s: Unable to grab " "global resources.\n", full_name); goto out_free_n2cp; } mdesc = mdesc_grab(); if (!mdesc) { dev_err(&dev->dev, "%s: Unable to grab MDESC.\n", full_name); err = -ENODEV; goto out_free_global; } err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp"); if (err) { dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n", full_name); mdesc_release(mdesc); goto out_free_global; } err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list, "cwq", HV_NCS_QTYPE_CWQ, cwq_intr, cpu_to_cwq); mdesc_release(mdesc); if (err) { dev_err(&dev->dev, "%s: CWQ MDESC scan failed.\n", full_name); goto out_free_global; } err = n2_register_algs(); if (err) { dev_err(&dev->dev, "%s: Unable to register algorithms.\n", full_name); goto out_free_spu_list; } dev_set_drvdata(&dev->dev, np); return 0; out_free_spu_list: spu_list_destroy(&np->cwq_list); out_free_global: release_global_resources(); out_free_n2cp: free_n2cp(np); return err; } static int n2_crypto_remove(struct platform_device *dev) { struct n2_crypto *np = dev_get_drvdata(&dev->dev); n2_unregister_algs(); spu_list_destroy(&np->cwq_list); release_global_resources(); free_n2cp(np); return 0; } static struct n2_mau *alloc_ncp(void) { struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL); if (mp) INIT_LIST_HEAD(&mp->mau_list); return mp; } static void free_ncp(struct n2_mau *mp) { if (mp->mau_info.ino_table) { kfree(mp->mau_info.ino_table); mp->mau_info.ino_table = NULL; } kfree(mp); } static int n2_mau_probe(struct platform_device *dev) { struct mdesc_handle *mdesc; const char *full_name; struct n2_mau *mp; int err; n2_spu_driver_version(); full_name = dev->dev.of_node->full_name; pr_info("Found NCP at %s\n", full_name); mp = alloc_ncp(); if (!mp) { dev_err(&dev->dev, "%s: Unable to allocate ncp.\n", full_name); return -ENOMEM; } err = grab_global_resources(); if (err) { dev_err(&dev->dev, "%s: Unable to grab " "global resources.\n", full_name); goto out_free_ncp; } mdesc = mdesc_grab(); if (!mdesc) { dev_err(&dev->dev, "%s: Unable to grab MDESC.\n", full_name); err = -ENODEV; goto out_free_global; } err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp"); if (err) { dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n", full_name); mdesc_release(mdesc); goto out_free_global; } err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list, "mau", HV_NCS_QTYPE_MAU, mau_intr, cpu_to_mau); mdesc_release(mdesc); if (err) { dev_err(&dev->dev, "%s: MAU MDESC scan failed.\n", full_name); goto out_free_global; } dev_set_drvdata(&dev->dev, mp); return 0; out_free_global: release_global_resources(); out_free_ncp: free_ncp(mp); return err; } static int n2_mau_remove(struct platform_device *dev) { struct n2_mau *mp = dev_get_drvdata(&dev->dev); spu_list_destroy(&mp->mau_list); release_global_resources(); free_ncp(mp); return 0; } static const struct of_device_id n2_crypto_match[] = { { .name = "n2cp", .compatible = "SUNW,n2-cwq", }, { .name = "n2cp", .compatible = "SUNW,vf-cwq", }, { .name = "n2cp", .compatible = "SUNW,kt-cwq", }, {}, }; MODULE_DEVICE_TABLE(of, n2_crypto_match); static struct platform_driver n2_crypto_driver = { .driver = { .name = "n2cp", .of_match_table = n2_crypto_match, }, .probe = n2_crypto_probe, .remove = n2_crypto_remove, }; static const struct of_device_id n2_mau_match[] = { { .name = "ncp", .compatible = "SUNW,n2-mau", }, { .name = "ncp", .compatible = "SUNW,vf-mau", }, { .name = "ncp", .compatible = "SUNW,kt-mau", }, {}, }; MODULE_DEVICE_TABLE(of, n2_mau_match); static struct platform_driver n2_mau_driver = { .driver = { .name = "ncp", .of_match_table = n2_mau_match, }, .probe = n2_mau_probe, .remove = n2_mau_remove, }; static struct platform_driver * const drivers[] = { &n2_crypto_driver, &n2_mau_driver, }; static int __init n2_init(void) { return platform_register_drivers(drivers, ARRAY_SIZE(drivers)); } static void __exit n2_exit(void) { platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); } module_init(n2_init); module_exit(n2_exit);