/* * Copyright (c) 2013, 2014 Kenneth MacKay. All rights reserved. * Copyright (c) 2019 Vitaly Chikunov * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include "ecc.h" #include "ecc_curve_defs.h" typedef struct { u64 m_low; u64 m_high; } uint128_t; static inline const struct ecc_curve *ecc_get_curve(unsigned int curve_id) { switch (curve_id) { /* In FIPS mode only allow P256 and higher */ case ECC_CURVE_NIST_P192: return fips_enabled ? NULL : &nist_p192; case ECC_CURVE_NIST_P256: return &nist_p256; default: return NULL; } } static u64 *ecc_alloc_digits_space(unsigned int ndigits) { size_t len = ndigits * sizeof(u64); if (!len) return NULL; return kmalloc(len, GFP_KERNEL); } static void ecc_free_digits_space(u64 *space) { kzfree(space); } static struct ecc_point *ecc_alloc_point(unsigned int ndigits) { struct ecc_point *p = kmalloc(sizeof(*p), GFP_KERNEL); if (!p) return NULL; p->x = ecc_alloc_digits_space(ndigits); if (!p->x) goto err_alloc_x; p->y = ecc_alloc_digits_space(ndigits); if (!p->y) goto err_alloc_y; p->ndigits = ndigits; return p; err_alloc_y: ecc_free_digits_space(p->x); err_alloc_x: kfree(p); return NULL; } static void ecc_free_point(struct ecc_point *p) { if (!p) return; kzfree(p->x); kzfree(p->y); kzfree(p); } static void vli_clear(u64 *vli, unsigned int ndigits) { int i; for (i = 0; i < ndigits; i++) vli[i] = 0; } /* Returns true if vli == 0, false otherwise. */ bool vli_is_zero(const u64 *vli, unsigned int ndigits) { int i; for (i = 0; i < ndigits; i++) { if (vli[i]) return false; } return true; } EXPORT_SYMBOL(vli_is_zero); /* Returns nonzero if bit bit of vli is set. */ static u64 vli_test_bit(const u64 *vli, unsigned int bit) { return (vli[bit / 64] & ((u64)1 << (bit % 64))); } static bool vli_is_negative(const u64 *vli, unsigned int ndigits) { return vli_test_bit(vli, ndigits * 64 - 1); } /* Counts the number of 64-bit "digits" in vli. */ static unsigned int vli_num_digits(const u64 *vli, unsigned int ndigits) { int i; /* Search from the end until we find a non-zero digit. * We do it in reverse because we expect that most digits will * be nonzero. */ for (i = ndigits - 1; i >= 0 && vli[i] == 0; i--); return (i + 1); } /* Counts the number of bits required for vli. */ static unsigned int vli_num_bits(const u64 *vli, unsigned int ndigits) { unsigned int i, num_digits; u64 digit; num_digits = vli_num_digits(vli, ndigits); if (num_digits == 0) return 0; digit = vli[num_digits - 1]; for (i = 0; digit; i++) digit >>= 1; return ((num_digits - 1) * 64 + i); } /* Set dest from unaligned bit string src. */ void vli_from_be64(u64 *dest, const void *src, unsigned int ndigits) { int i; const u64 *from = src; for (i = 0; i < ndigits; i++) dest[i] = get_unaligned_be64(&from[ndigits - 1 - i]); } EXPORT_SYMBOL(vli_from_be64); void vli_from_le64(u64 *dest, const void *src, unsigned int ndigits) { int i; const u64 *from = src; for (i = 0; i < ndigits; i++) dest[i] = get_unaligned_le64(&from[i]); } EXPORT_SYMBOL(vli_from_le64); /* Sets dest = src. */ static void vli_set(u64 *dest, const u64 *src, unsigned int ndigits) { int i; for (i = 0; i < ndigits; i++) dest[i] = src[i]; } /* Returns sign of left - right. */ int vli_cmp(const u64 *left, const u64 *right, unsigned int ndigits) { int i; for (i = ndigits - 1; i >= 0; i--) { if (left[i] > right[i]) return 1; else if (left[i] < right[i]) return -1; } return 0; } EXPORT_SYMBOL(vli_cmp); /* Computes result = in << c, returning carry. Can modify in place * (if result == in). 0 < shift < 64. */ static u64 vli_lshift(u64 *result, const u64 *in, unsigned int shift, unsigned int ndigits) { u64 carry = 0; int i; for (i = 0; i < ndigits; i++) { u64 temp = in[i]; result[i] = (temp << shift) | carry; carry = temp >> (64 - shift); } return carry; } /* Computes vli = vli >> 1. */ static void vli_rshift1(u64 *vli, unsigned int ndigits) { u64 *end = vli; u64 carry = 0; vli += ndigits; while (vli-- > end) { u64 temp = *vli; *vli = (temp >> 1) | carry; carry = temp << 63; } } /* Computes result = left + right, returning carry. Can modify in place. */ static u64 vli_add(u64 *result, const u64 *left, const u64 *right, unsigned int ndigits) { u64 carry = 0; int i; for (i = 0; i < ndigits; i++) { u64 sum; sum = left[i] + right[i] + carry; if (sum != left[i]) carry = (sum < left[i]); result[i] = sum; } return carry; } /* Computes result = left + right, returning carry. Can modify in place. */ static u64 vli_uadd(u64 *result, const u64 *left, u64 right, unsigned int ndigits) { u64 carry = right; int i; for (i = 0; i < ndigits; i++) { u64 sum; sum = left[i] + carry; if (sum != left[i]) carry = (sum < left[i]); else carry = !!carry; result[i] = sum; } return carry; } /* Computes result = left - right, returning borrow. Can modify in place. */ u64 vli_sub(u64 *result, const u64 *left, const u64 *right, unsigned int ndigits) { u64 borrow = 0; int i; for (i = 0; i < ndigits; i++) { u64 diff; diff = left[i] - right[i] - borrow; if (diff != left[i]) borrow = (diff > left[i]); result[i] = diff; } return borrow; } EXPORT_SYMBOL(vli_sub); /* Computes result = left - right, returning borrow. Can modify in place. */ static u64 vli_usub(u64 *result, const u64 *left, u64 right, unsigned int ndigits) { u64 borrow = right; int i; for (i = 0; i < ndigits; i++) { u64 diff; diff = left[i] - borrow; if (diff != left[i]) borrow = (diff > left[i]); result[i] = diff; } return borrow; } static uint128_t mul_64_64(u64 left, u64 right) { uint128_t result; #if defined(CONFIG_ARCH_SUPPORTS_INT128) unsigned __int128 m = (unsigned __int128)left * right; result.m_low = m; result.m_high = m >> 64; #else u64 a0 = left & 0xffffffffull; u64 a1 = left >> 32; u64 b0 = right & 0xffffffffull; u64 b1 = right >> 32; u64 m0 = a0 * b0; u64 m1 = a0 * b1; u64 m2 = a1 * b0; u64 m3 = a1 * b1; m2 += (m0 >> 32); m2 += m1; /* Overflow */ if (m2 < m1) m3 += 0x100000000ull; result.m_low = (m0 & 0xffffffffull) | (m2 << 32); result.m_high = m3 + (m2 >> 32); #endif return result; } static uint128_t add_128_128(uint128_t a, uint128_t b) { uint128_t result; result.m_low = a.m_low + b.m_low; result.m_high = a.m_high + b.m_high + (result.m_low < a.m_low); return result; } static void vli_mult(u64 *result, const u64 *left, const u64 *right, unsigned int ndigits) { uint128_t r01 = { 0, 0 }; u64 r2 = 0; unsigned int i, k; /* Compute each digit of result in sequence, maintaining the * carries. */ for (k = 0; k < ndigits * 2 - 1; k++) { unsigned int min; if (k < ndigits) min = 0; else min = (k + 1) - ndigits; for (i = min; i <= k && i < ndigits; i++) { uint128_t product; product = mul_64_64(left[i], right[k - i]); r01 = add_128_128(r01, product); r2 += (r01.m_high < product.m_high); } result[k] = r01.m_low; r01.m_low = r01.m_high; r01.m_high = r2; r2 = 0; } result[ndigits * 2 - 1] = r01.m_low; } /* Compute product = left * right, for a small right value. */ static void vli_umult(u64 *result, const u64 *left, u32 right, unsigned int ndigits) { uint128_t r01 = { 0 }; unsigned int k; for (k = 0; k < ndigits; k++) { uint128_t product; product = mul_64_64(left[k], right); r01 = add_128_128(r01, product); /* no carry */ result[k] = r01.m_low; r01.m_low = r01.m_high; r01.m_high = 0; } result[k] = r01.m_low; for (++k; k < ndigits * 2; k++) result[k] = 0; } static void vli_square(u64 *result, const u64 *left, unsigned int ndigits) { uint128_t r01 = { 0, 0 }; u64 r2 = 0; int i, k; for (k = 0; k < ndigits * 2 - 1; k++) { unsigned int min; if (k < ndigits) min = 0; else min = (k + 1) - ndigits; for (i = min; i <= k && i <= k - i; i++) { uint128_t product; product = mul_64_64(left[i], left[k - i]); if (i < k - i) { r2 += product.m_high >> 63; product.m_high = (product.m_high << 1) | (product.m_low >> 63); product.m_low <<= 1; } r01 = add_128_128(r01, product); r2 += (r01.m_high < product.m_high); } result[k] = r01.m_low; r01.m_low = r01.m_high; r01.m_high = r2; r2 = 0; } result[ndigits * 2 - 1] = r01.m_low; } /* Computes result = (left + right) % mod. * Assumes that left < mod and right < mod, result != mod. */ static void vli_mod_add(u64 *result, const u64 *left, const u64 *right, const u64 *mod, unsigned int ndigits) { u64 carry; carry = vli_add(result, left, right, ndigits); /* result > mod (result = mod + remainder), so subtract mod to * get remainder. */ if (carry || vli_cmp(result, mod, ndigits) >= 0) vli_sub(result, result, mod, ndigits); } /* Computes result = (left - right) % mod. * Assumes that left < mod and right < mod, result != mod. */ static void vli_mod_sub(u64 *result, const u64 *left, const u64 *right, const u64 *mod, unsigned int ndigits) { u64 borrow = vli_sub(result, left, righ
/*
 * Copyright (C) 2014 Mans Rullgard <mans@mansr.com>
 *
 * This program is free software; you can redistribute  it and/or modify it
 * under  the terms of  the GNU General  Public License as published by the
 * Free Software Foundation;  either version 2 of the  License, or (at your
 * option) any later version.
 */

#include <linux/init.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/ioport.h>
#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/slab.h>

#define IRQ0_CTL_BASE		0x0000
#define IRQ1_CTL_BASE		0x0100
#define EDGE_CTL_BASE		0x0200
#define IRQ2_CTL_BASE		0x0300

#define IRQ_CTL_HI		0x18
#define EDGE_CTL_HI		0x20

#define IRQ_STATUS		0x00
#define IRQ_RAWSTAT		0x04
#define IRQ_EN_SET		0x08
#define IRQ_EN_CLR		0x0c
#define IRQ_SOFT_SET		0x10
#define IRQ_SOFT_CLR		0x14

#define EDGE_STATUS		0x00
#define EDGE_RAWSTAT		0x04
#define EDGE_CFG_RISE		0x08
#define EDGE_CFG_FALL		0x0c
#define EDGE_CFG_RISE_SET	0x10
#define EDGE_CFG_RISE_CLR	0x14
#define EDGE_CFG_FALL_SET	0x18
#define EDGE_CFG_FALL_CLR	0x1c

struct tangox_irq_chip {
	void __iomem *base;
	unsigned long ctl;
};

static inline u32 intc_readl(struct tangox_irq_chip *chip, int reg)
{
	return readl_relaxed(chip->base + reg);
}

static inline void intc_writel(struct tangox_irq_chip *chip, int reg, u32 val)
{
	writel_relaxed(val, chip->base + reg);
}

static void tangox_dispatch_irqs(struct irq_domain *dom, unsigned int status,
				 int base)
{
	unsigned int hwirq;
	unsigned int virq;

	while (status) {
		hwirq = __ffs(status);
		virq = irq_find_mapping(dom, base + hwirq);
		if (virq)
			generic_handle_irq(virq);
		status &= ~BIT(hwirq);
	}
}

static void tangox_irq_handler(struct irq_desc *desc)
{
	struct irq_domain *dom = irq_desc_get_handler_data(desc);
	struct irq_chip *host_chip = irq_desc_get_chip(desc);
	struct tangox_irq_chip *chip = dom->host_data;
	unsigned int status_lo, status_hi;

	chained_irq_enter(host_chip, desc);

	status_lo = intc_readl(chip, chip->ctl + IRQ_STATUS);
	status_hi = intc_readl(chip, chip->ctl + IRQ_CTL_HI + IRQ_STATUS);

	tangox_dispatch_irqs(dom, status_lo, 0);
	tangox_dispatch_irqs(dom, status_hi, 32);

	chained_irq_exit(host_chip, desc);
}

static int tangox_irq_set_type(struct irq_data *d, unsigned int flow_type)
{
	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
	struct tangox_irq_chip *chip = gc->domain->host_data;
	struct irq_chip_regs *regs = &gc->chip_types[0].regs;

	switch (flow_type & IRQ_TYPE_SENSE_MASK) {
	case IRQ_TYPE_EDGE_RISING:
		intc_writel(chip, regs->type + EDGE_CFG_RISE_SET, d->mask);
		intc_writel(chip, regs->type + EDGE_CFG_FALL_CLR, d->mask);
		break;

	case IRQ_TYPE_EDGE_FALLING:
		intc_writel(chip, regs->type + EDGE_CFG_RISE_CLR, d->mask);
		intc_writel(chip, regs->type + EDGE_CFG_FALL_SET, d->mask);
		break;

	case IRQ_TYPE_LEVEL_HIGH:
		intc_writel(chip, regs->type + EDGE_CFG_RISE_CLR, d->mask);
		intc_writel(chip, regs->type + EDGE_CFG_FALL_CLR, d->mask);
		break;

	case IRQ_TYPE_LEVEL_LOW:
		intc_writel(chip, regs->type + EDGE_CFG_RISE_SET, d->mask);
		intc_writel(chip, regs->type + EDGE_CFG_FALL_SET, d->mask);
		break;

	default:
		pr_err("Invalid trigger mode %x for IRQ %d\n",
		       flow_type, d->irq);
		return -EINVAL;
	}

	return irq_setup_alt_chip(d, flow_type);
}

static void __init tangox_irq_init_chip(struct irq_chip_generic *gc,
					unsigned long ctl_offs,
					unsigned long edge_offs)
{
	struct tangox_irq_chip *chip = gc->domain->host_data;
	struct irq_chip_type *ct = gc->chip_types;
	unsigned long ctl_base = chip->ctl + ctl_offs;
	unsigned long edge_base = EDGE_CTL_BASE + edge_offs;
	int i;

	gc->reg_base = chip->base;
	gc->unused = 0;

	for (i = 0; i < 2<