summaryrefslogtreecommitdiffstats
path: root/drivers/staging
diff options
context:
space:
mode:
authorGilad Ben-Yossef <gilad@benyossef.com>2017-06-27 10:27:13 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-06-29 16:38:50 +0200
commite7258b6a228bdb176ef6c00a01a322f8e8ea6e8a (patch)
tree8c20fab9c0c18e2f77e2e28ec49dfe2c8ea97624 /drivers/staging
parent97af1ce27844a4303139b06df962e3a8d738d23b (diff)
staging: ccree: fix missing or redundant spaces
Add and/or remove redundant and/or missing spaces in ccree source Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/staging')
-rw-r--r--drivers/staging/ccree/Kconfig2
-rw-r--r--drivers/staging/ccree/ssi_aead.c38
-rw-r--r--drivers/staging/ccree/ssi_aead.h12
-rw-r--r--drivers/staging/ccree/ssi_buffer_mgr.c158
-rw-r--r--drivers/staging/ccree/ssi_cipher.c44
-rw-r--r--drivers/staging/ccree/ssi_driver.c18
-rw-r--r--drivers/staging/ccree/ssi_driver.h4
-rw-r--r--drivers/staging/ccree/ssi_fips_data.h12
-rw-r--r--drivers/staging/ccree/ssi_fips_ll.c12
-rw-r--r--drivers/staging/ccree/ssi_fips_local.c8
-rw-r--r--drivers/staging/ccree/ssi_fips_local.h18
-rw-r--r--drivers/staging/ccree/ssi_hash.c38
-rw-r--r--drivers/staging/ccree/ssi_pm.c16
-rw-r--r--drivers/staging/ccree/ssi_pm.h2
-rw-r--r--drivers/staging/ccree/ssi_request_mgr.c62
-rw-r--r--drivers/staging/ccree/ssi_request_mgr.h6
-rw-r--r--drivers/staging/ccree/ssi_sysfs.c56
17 files changed, 253 insertions, 253 deletions
diff --git a/drivers/staging/ccree/Kconfig b/drivers/staging/ccree/Kconfig
index ec3749d318d2..36a87c686a2a 100644
--- a/drivers/staging/ccree/Kconfig
+++ b/drivers/staging/ccree/Kconfig
@@ -18,7 +18,7 @@ config CRYPTO_DEV_CCREE
select CRYPTO_CTR
select CRYPTO_XTS
help
- Say 'Y' to enable a driver for the Arm TrustZone CryptoCell
+ Say 'Y' to enable a driver for the Arm TrustZone CryptoCell
C7xx. Currently only the CryptoCell 712 REE is supported.
Choose this if you wish to use hardware acceleration of
cryptographic operations on the system REE.
diff --git a/drivers/staging/ccree/ssi_aead.c b/drivers/staging/ccree/ssi_aead.c
index c70e45023d06..2e8dc3fa89ce 100644
--- a/drivers/staging/ccree/ssi_aead.c
+++ b/drivers/staging/ccree/ssi_aead.c
@@ -238,8 +238,8 @@ static void ssi_aead_complete(struct device *dev, void *ssi_req, void __iomem *c
} else { /*ENCRYPT*/
if (unlikely(areq_ctx->is_icv_fragmented == true))
ssi_buffer_mgr_copy_scatterlist_portion(
- areq_ctx->mac_buf, areq_ctx->dstSgl, areq->cryptlen+areq_ctx->dstOffset,
- areq->cryptlen+areq_ctx->dstOffset + ctx->authsize, SSI_SG_FROM_BUF);
+ areq_ctx->mac_buf, areq_ctx->dstSgl, areq->cryptlen + areq_ctx->dstOffset,
+ areq->cryptlen + areq_ctx->dstOffset + ctx->authsize, SSI_SG_FROM_BUF);
/* If an IV was generated, copy it back to the user provided buffer. */
if (areq_ctx->backup_giv != NULL) {
@@ -1561,7 +1561,7 @@ static int config_ccm_adata(struct aead_request *req)
(req->cryptlen - ctx->authsize);
int rc;
memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
- memset(req_ctx->ccm_config, 0, AES_BLOCK_SIZE*3);
+ memset(req_ctx->ccm_config, 0, AES_BLOCK_SIZE * 3);
/* taken from crypto/ccm.c */
/* 2 <= L <= 8, so 1 <= L' <= 7. */
@@ -1585,12 +1585,12 @@ static int config_ccm_adata(struct aead_request *req)
/* END of "taken from crypto/ccm.c" */
/* l(a) - size of associated data. */
- req_ctx->ccm_hdr_size = format_ccm_a0 (a0, req->assoclen);
+ req_ctx->ccm_hdr_size = format_ccm_a0(a0, req->assoclen);
memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1);
req->iv[15] = 1;
- memcpy(ctr_count_0, req->iv, AES_BLOCK_SIZE) ;
+ memcpy(ctr_count_0, req->iv, AES_BLOCK_SIZE);
ctr_count_0[15] = 0;
return 0;
@@ -1858,7 +1858,7 @@ static inline void ssi_aead_dump_gcm(
SSI_LOG_DEBUG("%s\n", title);
}
- SSI_LOG_DEBUG("cipher_mode %d, authsize %d, enc_keylen %d, assoclen %d, cryptlen %d \n", \
+ SSI_LOG_DEBUG("cipher_mode %d, authsize %d, enc_keylen %d, assoclen %d, cryptlen %d\n", \
ctx->cipher_mode, ctx->authsize, ctx->enc_keylen, req->assoclen, req_ctx->cryptlen);
if (ctx->enckey != NULL) {
@@ -1878,12 +1878,12 @@ static inline void ssi_aead_dump_gcm(
dump_byte_array("gcm_len_block", req_ctx->gcm_len_block.lenA, AES_BLOCK_SIZE);
if (req->src != NULL && req->cryptlen) {
- dump_byte_array("req->src", sg_virt(req->src), req->cryptlen+req->assoclen);
+ dump_byte_array("req->src", sg_virt(req->src), req->cryptlen + req->assoclen);
}
if (req->dst != NULL) {
- dump_byte_array("req->dst", sg_virt(req->dst), req->cryptlen+ctx->authsize+req->assoclen);
- }
+ dump_byte_array("req->dst", sg_virt(req->dst), req->cryptlen + ctx->authsize + req->assoclen);
+ }
}
#endif
@@ -1899,7 +1899,7 @@ static int config_gcm_context(struct aead_request *req)
(req->cryptlen - ctx->authsize);
__be32 counter = cpu_to_be32(2);
- SSI_LOG_DEBUG("config_gcm_context() cryptlen = %d, req->assoclen = %d ctx->authsize = %d \n", cryptlen, req->assoclen, ctx->authsize);
+ SSI_LOG_DEBUG("config_gcm_context() cryptlen = %d, req->assoclen = %d ctx->authsize = %d\n", cryptlen, req->assoclen, ctx->authsize);
memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
@@ -1916,15 +1916,15 @@ static int config_gcm_context(struct aead_request *req)
if (req_ctx->plaintext_authenticate_only == false) {
__be64 temp64;
temp64 = cpu_to_be64(req->assoclen * 8);
- memcpy (&req_ctx->gcm_len_block.lenA, &temp64, sizeof(temp64));
+ memcpy(&req_ctx->gcm_len_block.lenA, &temp64, sizeof(temp64));
temp64 = cpu_to_be64(cryptlen * 8);
- memcpy (&req_ctx->gcm_len_block.lenC, &temp64, 8);
+ memcpy(&req_ctx->gcm_len_block.lenC, &temp64, 8);
} else { //rfc4543=> all data(AAD,IV,Plain) are considered additional data that is nothing is encrypted.
__be64 temp64;
- temp64 = cpu_to_be64((req->assoclen+GCM_BLOCK_RFC4_IV_SIZE+cryptlen) * 8);
- memcpy (&req_ctx->gcm_len_block.lenA, &temp64, sizeof(temp64));
+ temp64 = cpu_to_be64((req->assoclen + GCM_BLOCK_RFC4_IV_SIZE + cryptlen) * 8);
+ memcpy(&req_ctx->gcm_len_block.lenA, &temp64, sizeof(temp64));
temp64 = 0;
- memcpy (&req_ctx->gcm_len_block.lenC, &temp64, 8);
+ memcpy(&req_ctx->gcm_len_block.lenC, &temp64, 8);
}
return 0;
@@ -2220,7 +2220,7 @@ static int ssi_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsign
struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
int rc = 0;
- SSI_LOG_DEBUG("ssi_rfc4106_gcm_setkey() keylen %d, key %p \n", keylen, key);
+ SSI_LOG_DEBUG("ssi_rfc4106_gcm_setkey() keylen %d, key %p\n", keylen, key);
if (keylen < 4)
return -EINVAL;
@@ -2238,7 +2238,7 @@ static int ssi_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsign
struct ssi_aead_ctx *ctx = crypto_aead_ctx(tfm);
int rc = 0;
- SSI_LOG_DEBUG("ssi_rfc4543_gcm_setkey() keylen %d, key %p \n", keylen, key);
+ SSI_LOG_DEBUG("ssi_rfc4543_gcm_setkey() keylen %d, key %p\n", keylen, key);
if (keylen < 4)
return -EINVAL;
@@ -2273,7 +2273,7 @@ static int ssi_gcm_setauthsize(struct crypto_aead *authenc,
static int ssi_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
unsigned int authsize)
{
- SSI_LOG_DEBUG("ssi_rfc4106_gcm_setauthsize() authsize %d \n", authsize);
+ SSI_LOG_DEBUG("ssi_rfc4106_gcm_setauthsize() authsize %d\n", authsize);
switch (authsize) {
case 8:
@@ -2290,7 +2290,7 @@ static int ssi_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
static int ssi_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
unsigned int authsize)
{
- SSI_LOG_DEBUG("ssi_rfc4543_gcm_setauthsize() authsize %d \n", authsize);
+ SSI_LOG_DEBUG("ssi_rfc4543_gcm_setauthsize() authsize %d\n", authsize);
if (authsize != 16)
return -EINVAL;
diff --git a/drivers/staging/ccree/ssi_aead.h b/drivers/staging/ccree/ssi_aead.h
index 00a3680cb8ab..07cab84b83f4 100644
--- a/drivers/staging/ccree/ssi_aead.h
+++ b/drivers/staging/ccree/ssi_aead.h
@@ -28,17 +28,17 @@
/* mac_cmp - HW writes 8 B but all bytes hold the same value */
#define ICV_CMP_SIZE 8
-#define CCM_CONFIG_BUF_SIZE (AES_BLOCK_SIZE*3)
+#define CCM_CONFIG_BUF_SIZE (AES_BLOCK_SIZE * 3)
#define MAX_MAC_SIZE MAX(SHA256_DIGEST_SIZE, AES_BLOCK_SIZE)
/* defines for AES GCM configuration buffer */
#define GCM_BLOCK_LEN_SIZE 8
-#define GCM_BLOCK_RFC4_IV_OFFSET 4
-#define GCM_BLOCK_RFC4_IV_SIZE 8 /* IV size for rfc's */
-#define GCM_BLOCK_RFC4_NONCE_OFFSET 0
-#define GCM_BLOCK_RFC4_NONCE_SIZE 4
+#define GCM_BLOCK_RFC4_IV_OFFSET 4
+#define GCM_BLOCK_RFC4_IV_SIZE 8 /* IV size for rfc's */
+#define GCM_BLOCK_RFC4_NONCE_OFFSET 0
+#define GCM_BLOCK_RFC4_NONCE_SIZE 4
@@ -74,7 +74,7 @@ struct aead_req_ctx {
u8 hkey[AES_BLOCK_SIZE] ____cacheline_aligned;
struct {
u8 lenA[GCM_BLOCK_LEN_SIZE] ____cacheline_aligned;
- u8 lenC[GCM_BLOCK_LEN_SIZE] ;
+ u8 lenC[GCM_BLOCK_LEN_SIZE];
} gcm_len_block;
u8 ccm_config[CCM_CONFIG_BUF_SIZE] ____cacheline_aligned;
diff --git a/drivers/staging/ccree/ssi_buffer_mgr.c b/drivers/staging/ccree/ssi_buffer_mgr.c
index 4373d1dcc408..00d95c15071a 100644
--- a/drivers/staging/ccree/ssi_buffer_mgr.c
+++ b/drivers/staging/ccree/ssi_buffer_mgr.c
@@ -83,14 +83,14 @@ static unsigned int ssi_buffer_mgr_get_sgl_nents(
while (nbytes != 0) {
if (sg_is_chain(sg_list)) {
SSI_LOG_ERR("Unexpected chained entry "
- "in sg (entry =0x%X) \n", nents);
+ "in sg (entry =0x%X)\n", nents);
BUG();
}
if (sg_list->length != 0) {
nents++;
/* get the number of bytes in the last entry */
*lbytes = nbytes;
- nbytes -= ( sg_list->length > nbytes ) ? nbytes : sg_list->length;
+ nbytes -= (sg_list->length > nbytes) ? nbytes : sg_list->length;
sg_list = sg_next(sg_list);
} else {
sg_list = (struct scatterlist *)sg_page(sg_list);
@@ -99,7 +99,7 @@ static unsigned int ssi_buffer_mgr_get_sgl_nents(
}
}
}
- SSI_LOG_DEBUG("nents %d last bytes %d\n",nents, *lbytes);
+ SSI_LOG_DEBUG("nents %d last bytes %d\n", nents, *lbytes);
return nents;
}
@@ -154,16 +154,16 @@ static inline int ssi_buffer_mgr_render_buff_to_mlli(
u32 new_nents;;
/* Verify there is no memory overflow*/
- new_nents = (*curr_nents + buff_size/CC_MAX_MLLI_ENTRY_SIZE + 1);
- if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES ) {
+ new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1);
+ if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES) {
return -ENOMEM;
}
/*handle buffer longer than 64 kbytes */
- while (buff_size > CC_MAX_MLLI_ENTRY_SIZE ) {
+ while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) {
cc_lli_set_addr(mlli_entry_p, buff_dma);
cc_lli_set_size(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE);
- SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n",*curr_nents,
+ SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n", *curr_nents,
mlli_entry_p[LLI_WORD0_OFFSET],
mlli_entry_p[LLI_WORD1_OFFSET]);
buff_dma += CC_MAX_MLLI_ENTRY_SIZE;
@@ -174,7 +174,7 @@ static inline int ssi_buffer_mgr_render_buff_to_mlli(
/*Last entry */
cc_lli_set_addr(mlli_entry_p, buff_dma);
cc_lli_set_size(mlli_entry_p, buff_size);
- SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n",*curr_nents,
+ SSI_LOG_DEBUG("entry[%d]: single_buff=0x%08X size=%08X\n", *curr_nents,
mlli_entry_p[LLI_WORD0_OFFSET],
mlli_entry_p[LLI_WORD1_OFFSET]);
mlli_entry_p = mlli_entry_p + 2;
@@ -196,15 +196,15 @@ static inline int ssi_buffer_mgr_render_scatterlist_to_mlli(
curr_sgl = sg_next(curr_sgl)) {
u32 entry_data_len =
(sgl_data_len > sg_dma_len(curr_sgl) - sglOffset) ?
- sg_dma_len(curr_sgl) - sglOffset : sgl_data_len ;
+ sg_dma_len(curr_sgl) - sglOffset : sgl_data_len;
sgl_data_len -= entry_data_len;
rc = ssi_buffer_mgr_render_buff_to_mlli(
sg_dma_address(curr_sgl) + sglOffset, entry_data_len, curr_nents,
&mlli_entry_p);
- if(rc != 0) {
+ if (rc != 0) {
return rc;
}
- sglOffset=0;
+ sglOffset = 0;
}
*mlli_entry_pp = mlli_entry_p;
return 0;
@@ -216,7 +216,7 @@ static int ssi_buffer_mgr_generate_mlli(
struct mlli_params *mlli_params)
{
u32 *mlli_p;
- u32 total_nents = 0,prev_total_nents = 0;
+ u32 total_nents = 0, prev_total_nents = 0;
int rc = 0, i;
SSI_LOG_DEBUG("NUM of SG's = %d\n", sg_data->num_of_buffers);
@@ -227,7 +227,7 @@ static int ssi_buffer_mgr_generate_mlli(
&(mlli_params->mlli_dma_addr));
if (unlikely(mlli_params->mlli_virt_addr == NULL)) {
SSI_LOG_ERR("dma_pool_alloc() failed\n");
- rc =-ENOMEM;
+ rc = -ENOMEM;
goto build_mlli_exit;
}
/* Point to start of MLLI */
@@ -244,7 +244,7 @@ static int ssi_buffer_mgr_generate_mlli(
sg_data->entry[i].buffer_dma,
sg_data->total_data_len[i], &total_nents,
&mlli_p);
- if(rc != 0) {
+ if (rc != 0) {
return rc;
}
@@ -323,13 +323,13 @@ static int
ssi_buffer_mgr_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
enum dma_data_direction direction)
{
- u32 i , j;
+ u32 i, j;
struct scatterlist *l_sg = sg;
for (i = 0; i < nents; i++) {
if (l_sg == NULL) {
break;
}
- if (unlikely(dma_map_sg(dev, l_sg, 1, direction) != 1)){
+ if (unlikely(dma_map_sg(dev, l_sg, 1, direction) != 1)) {
SSI_LOG_ERR("dma_map_page() sg buffer failed\n");
goto err;
}
@@ -343,7 +343,7 @@ err:
if (sg == NULL) {
break;
}
- dma_unmap_sg(dev,sg,1,direction);
+ dma_unmap_sg(dev, sg, 1, direction);
sg = sg_next(sg);
}
return 0;
@@ -387,7 +387,7 @@ static int ssi_buffer_mgr_map_scatterlist(
* be changed from the original sgl nents
*/
*mapped_nents = dma_map_sg(dev, sg, *nents, direction);
- if (unlikely(*mapped_nents == 0)){
+ if (unlikely(*mapped_nents == 0)) {
*nents = 0;
SSI_LOG_ERR("dma_map_sg() sg buffer failed\n");
return -ENOMEM;
@@ -400,7 +400,7 @@ static int ssi_buffer_mgr_map_scatterlist(
sg,
*nents,
direction);
- if (unlikely(*mapped_nents != *nents)){
+ if (unlikely(*mapped_nents != *nents)) {
*nents = *mapped_nents;
SSI_LOG_ERR("dma_map_sg() sg buffer failed\n");
return -ENOMEM;
@@ -418,7 +418,7 @@ ssi_aead_handle_config_buf(struct device *dev,
struct buffer_array *sg_data,
unsigned int assoclen)
{
- SSI_LOG_DEBUG(" handle additional data config set to DLLI \n");
+ SSI_LOG_DEBUG(" handle additional data config set to DLLI\n");
/* create sg for the current buffer */
sg_init_one(&areq_ctx->ccm_adata_sg, config_data, AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
if (unlikely(dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1,
@@ -453,9 +453,9 @@ static inline int ssi_ahash_handle_curr_buf(struct device *dev,
u32 curr_buff_cnt,
struct buffer_array *sg_data)
{
- SSI_LOG_DEBUG(" handle curr buff %x set to DLLI \n", curr_buff_cnt);
+ SSI_LOG_DEBUG(" handle curr buff %x set to DLLI\n", curr_buff_cnt);
/* create sg for the current buffer */
- sg_init_one(areq_ctx->buff_sg,curr_buff, curr_buff_cnt);
+ sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt);
if (unlikely(dma_map_sg(dev, areq_ctx->buff_sg, 1,
DMA_TO_DEVICE) != 1)) {
SSI_LOG_ERR("dma_map_sg() "
@@ -540,12 +540,12 @@ int ssi_buffer_mgr_map_blkcipher_request(
sg_data.num_of_buffers = 0;
/* Map IV buffer */
- if (likely(ivsize != 0) ) {
+ if (likely(ivsize != 0)) {
dump_byte_array("iv", (u8 *)info, ivsize);
req_ctx->gen_ctx.iv_dma_addr =
dma_map_single(dev, (void *)info,
ivsize,
- req_ctx->is_giv ? DMA_BIDIRECTIONAL:
+ req_ctx->is_giv ? DMA_BIDIRECTIONAL :
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev,
req_ctx->gen_ctx.iv_dma_addr))) {
@@ -581,7 +581,7 @@ int ssi_buffer_mgr_map_blkcipher_request(
} else {
/* Map the dst sg */
if (unlikely(ssi_buffer_mgr_map_scatterlist(
- dev,dst, nbytes,
+ dev, dst, nbytes,
DMA_BIDIRECTIONAL, &req_ctx->out_nents,
LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
&mapped_nents))){
@@ -606,7 +606,7 @@ int ssi_buffer_mgr_map_blkcipher_request(
if (unlikely(req_ctx->dma_buf_type == SSI_DMA_BUF_MLLI)) {
mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
rc = ssi_buffer_mgr_generate_mlli(dev, &sg_data, mlli_params);
- if (unlikely(rc!= 0))
+ if (unlikely(rc != 0))
goto ablkcipher_exit;
}
@@ -686,19 +686,19 @@ void ssi_buffer_mgr_unmap_aead_request(
areq_ctx->mlli_params.mlli_dma_addr);
}
- SSI_LOG_DEBUG("Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n", sg_virt(req->src),areq_ctx->src.nents,areq_ctx->assoc.nents,req->assoclen,req->cryptlen);
- size_to_unmap = req->assoclen+req->cryptlen;
- if(areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT){
+ SSI_LOG_DEBUG("Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n", sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents, req->assoclen, req->cryptlen);
+ size_to_unmap = req->assoclen + req->cryptlen;
+ if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) {
size_to_unmap += areq_ctx->req_authsize;
}
if (areq_ctx->is_gcm4543)
size_to_unmap += crypto_aead_ivsize(tfm);
- dma_unmap_sg(dev, req->src, ssi_buffer_mgr_get_sgl_nents(req->src,size_to_unmap,&dummy,&chained) , DMA_BIDIRECTIONAL);
+ dma_unmap_sg(dev, req->src, ssi_buffer_mgr_get_sgl_nents(req->src, size_to_unmap, &dummy, &chained), DMA_BIDIRECTIONAL);
if (unlikely(req->src != req->dst)) {
SSI_LOG_DEBUG("Unmapping dst sgl: req->dst=%pK\n",
sg_virt(req->dst));
- dma_unmap_sg(dev, req->dst, ssi_buffer_mgr_get_sgl_nents(req->dst,size_to_unmap,&dummy,&chained),
+ dma_unmap_sg(dev, req->dst, ssi_buffer_mgr_get_sgl_nents(req->dst, size_to_unmap, &dummy, &chained),
DMA_BIDIRECTIONAL);
}
if (drvdata->coherent &&
@@ -714,8 +714,8 @@ void ssi_buffer_mgr_unmap_aead_request(
*/
ssi_buffer_mgr_copy_scatterlist_portion(
areq_ctx->backup_mac, req->src,
- size_to_skip+ req->cryptlen - areq_ctx->req_authsize,
- size_to_skip+ req->cryptlen, SSI_SG_FROM_BUF);
+ size_to_skip + req->cryptlen - areq_ctx->req_authsize,
+ size_to_skip + req->cryptlen, SSI_SG_FROM_BUF);
}
}
@@ -736,7 +736,7 @@ static inline int ssi_buffer_mgr_get_aead_icv_nents(
return 0;
}
- for( i = 0 ; i < (sgl_nents - MAX_ICV_NENTS_SUPPORTED) ; i++) {
+ for (i = 0 ; i < (sgl_nents - MAX_ICV_NENTS_SUPPORTED) ; i++) {
if (sgl == NULL) {
break;
}
@@ -798,7 +798,7 @@ static inline int ssi_buffer_mgr_aead_chain_iv(
SSI_LOG_DEBUG("Mapped iv %u B at va=%pK to dma=0x%llX\n",
hw_iv_size, req->iv,
(unsigned long long)areq_ctx->gen_ctx.iv_dma_addr);
- if (do_chain == true && areq_ctx->plaintext_authenticate_only == true){ // TODO: what about CTR?? ask Ron
+ if (do_chain == true && areq_ctx->plaintext_authenticate_only == true) { // TODO: what about CTR?? ask Ron
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm);
unsigned int iv_ofs = GCM_BLOCK_RFC4_IV_OFFSET;
@@ -858,7 +858,7 @@ static inline int ssi_buffer_mgr_aead_chain_assoc(
current_sg = sg_next(current_sg);
//if have reached the end of the sgl, then this is unexpected
if (current_sg == NULL) {
- SSI_LOG_ERR("reached end of sg list. unexpected \n");
+ SSI_LOG_ERR("reached end of sg list. unexpected\n");
BUG();
}
sg_index += current_sg->length;
@@ -923,7 +923,7 @@ static inline void ssi_buffer_mgr_prepare_aead_data_dlli(
if (likely(req->src == req->dst)) {
/*INPLACE*/
areq_ctx->icv_dma_addr = sg_dma_address(
- areq_ctx->srcSgl)+
+ areq_ctx->srcSgl) +
(*src_last_bytes - authsize);
areq_ctx->icv_virt_addr = sg_virt(
areq_ctx->srcSgl) +
@@ -942,7 +942,7 @@ static inline void ssi_buffer_mgr_prepare_aead_data_dlli(
areq_ctx->dstSgl) +
(*dst_last_bytes - authsize);
areq_ctx->icv_virt_addr = sg_virt(
- areq_ctx->dstSgl)+
+ areq_ctx->dstSgl) +
(*dst_last_bytes - authsize);
}
}
@@ -964,7 +964,7 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
/*INPLACE*/
ssi_buffer_mgr_add_scatterlist_entry(sg_data,
areq_ctx->src.nents, areq_ctx->srcSgl,
- areq_ctx->cryptlen,areq_ctx->srcOffset, is_last_table,
+ areq_ctx->cryptlen, areq_ctx->srcOffset, is_last_table,
&areq_ctx->src.mlli_nents);
icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->srcSgl,
@@ -1018,11 +1018,11 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
/*NON-INPLACE and DECRYPT*/
ssi_buffer_mgr_add_scatterlist_entry(sg_data,
areq_ctx->src.nents, areq_ctx->srcSgl,
- areq_ctx->cryptlen, areq_ctx->srcOffset,is_last_table,
+ areq_ctx->cryptlen, areq_ctx->srcOffset, is_last_table,
&areq_ctx->src.mlli_nents);
ssi_buffer_mgr_add_scatterlist_entry(sg_data,
areq_ctx->dst.nents, areq_ctx->dstSgl,
- areq_ctx->cryptlen,areq_ctx->dstOffset, is_last_table,
+ areq_ctx->cryptlen, areq_ctx->dstOffset, is_last_table,
&areq_ctx->dst.mlli_nents);
icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->srcSgl,
@@ -1044,8 +1044,8 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
}
ssi_buffer_mgr_copy_scatterlist_portion(
areq_ctx->backup_mac, req->src,
- size_to_skip+ req->cryptlen - areq_ctx->req_authsize,
- size_to_skip+ req->cryptlen, SSI_SG_TO_BUF);
+ size_to_skip + req->cryptlen - areq_ctx->req_authsize,
+ size_to_skip + req->cryptlen, SSI_SG_TO_BUF);
areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
} else { /* Contig. ICV */
/*Should hanlde if the sg is not contig.*/
@@ -1061,11 +1061,11 @@ static inline int ssi_buffer_mgr_prepare_aead_data_mlli(
/*NON-INPLACE and ENCRYPT*/
ssi_buffer_mgr_add_scatterlist_entry(sg_data,
areq_ctx->dst.nents, areq_ctx->dstSgl,
- areq_ctx->cryptlen,areq_ctx->dstOffset, is_last_table,
+ areq_ctx->cryptlen, areq_ctx->dstOffset, is_last_table,
&areq_ctx->dst.mlli_nents);
ssi_buffer_mgr_add_scatterlist_entry(sg_data,
areq_ctx->src.nents, areq_ctx->srcSgl,
- areq_ctx->cryptlen, areq_ctx->srcOffset,is_last_table,
+ areq_ctx->cryptlen, areq_ctx->srcOffset, is_last_table,
&areq_ctx->src.mlli_nents);
icv_nents = ssi_buffer_mgr_get_aead_icv_nents(areq_ctx->dstSgl,
@@ -1108,7 +1108,7 @@ static inline int ssi_buffer_mgr_aead_chain_data(
int rc = 0;
u32 src_mapped_nents = 0, dst_mapped_nents = 0;
u32 offset = 0;
- unsigned int size_for_map = req->assoclen +req->cryptlen; /*non-inplace mode*/
+ unsigned int size_for_map = req->assoclen + req->cryptlen; /*non-inplace mode*/
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
u32 sg_index = 0;
bool chained = false;
@@ -1130,8 +1130,8 @@ static inline int ssi_buffer_mgr_aead_chain_data(
size_for_map += crypto_aead_ivsize(tfm);
}
- size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize:0;
- src_mapped_nents = ssi_buffer_mgr_get_sgl_nents(req->src,size_for_map,&src_last_bytes, &chained);
+ size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize : 0;
+ src_mapped_nents = ssi_buffer_mgr_get_sgl_nents(req->src, size_for_map, &src_last_bytes, &chained);
sg_index = areq_ctx->srcSgl->length;
//check where the data starts
while (sg_index <= size_to_skip) {
@@ -1139,7 +1139,7 @@ static inline int ssi_buffer_mgr_aead_chain_data(
areq_ctx->srcSgl = sg_next(areq_ctx->srcSgl);
//if have reached the end of the sgl, then this is unexpected
if (areq_ctx->srcSgl == NULL) {
- SSI_LOG_ERR("reached end of sg list. unexpected \n");
+ SSI_LOG_ERR("reached end of sg list. unexpected\n");
BUG();
}
sg_index += areq_ctx->srcSgl->length;
@@ -1157,7 +1157,7 @@ static inline int ssi_buffer_mgr_aead_chain_data(
areq_ctx->srcOffset = offset;
if (req->src != req->dst) {
- size_for_map = req->assoclen +req->cryptlen;
+ size_for_map = req->assoclen + req->cryptlen;
size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize : 0;
if (is_gcm4543) {
size_for_map += crypto_aead_ivsize(tfm);
@@ -1173,7 +1173,7 @@ static inline int ssi_buffer_mgr_aead_chain_data(
}
}
- dst_mapped_nents = ssi_buffer_mgr_get_sgl_nents(req->dst,size_for_map,&dst_last_bytes, &chained);
+ dst_mapped_nents = ssi_buffer_mgr_get_sgl_nents(req->dst, size_for_map, &dst_last_bytes, &chained);
sg_index = areq_ctx->dstSgl->length;
offset = size_to_skip;
@@ -1184,7 +1184,7 @@ static inline int ssi_buffer_mgr_aead_chain_data(
areq_ctx->dstSgl = sg_next(areq_ctx->dstSgl);
//if have reached the end of the sgl, then this is unexpected
if (areq_ctx->dstSgl == NULL) {
- SSI_LOG_ERR("reached end of sg list. unexpected \n");
+ SSI_LOG_ERR("reached end of sg list. unexpected\n");
BUG();
}
sg_index += areq_ctx->dstSgl->length;
@@ -1214,7 +1214,7 @@ chain_data_exit:
return rc;
}
-static void ssi_buffer_mgr_update_aead_mlli_nents( struct ssi_drvdata *drvdata,
+static void ssi_buffer_mgr_update_aead_mlli_nents(struct ssi_drvdata *drvdata,
struct aead_request *req)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
@@ -1298,8 +1298,8 @@ int ssi_buffer_mgr_map_aead_request(
*/
ssi_buffer_mgr_copy_scatterlist_portion(
areq_ctx->backup_mac, req->src,
- size_to_skip+ req->cryptlen - areq_ctx->req_authsize,
- size_to_skip+ req->cryptlen, SSI_SG_TO_BUF);
+ size_to_skip + req->cryptlen - areq_ctx->req_authsize,
+ size_to_skip + req->cryptlen, SSI_SG_TO_BUF);
}
/* cacluate the size for cipher remove ICV in decrypt*/
@@ -1393,7 +1393,7 @@ int ssi_buffer_mgr_map_aead_request(
size_to_map += crypto_aead_ivsize(tfm);
rc = ssi_buffer_mgr_map_scatterlist(dev, req->src,
size_to_map, DMA_BIDIRECTIONAL, &(areq_ctx->src.nents),
- LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES+LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
+ LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES + LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
if (unlikely(rc != 0)) {
rc = -ENOMEM;
goto aead_map_failure;
@@ -1459,9 +1459,9 @@ int ssi_buffer_mgr_map_aead_request(
}
ssi_buffer_mgr_update_aead_mlli_nents(drvdata, req);
- SSI_LOG_DEBUG("assoc params mn %d\n",areq_ctx->assoc.mlli_nents);
- SSI_LOG_DEBUG("src params mn %d\n",areq_ctx->src.mlli_nents);
- SSI_LOG_DEBUG("dst params mn %d\n",areq_ctx->dst.mlli_nents);
+ SSI_LOG_DEBUG("assoc params mn %d\n", areq_ctx->assoc.mlli_nents);
+ SSI_LOG_DEBUG("src params mn %d\n", areq_ctx->src.mlli_nents);
+ SSI_LOG_DEBUG("dst params mn %d\n", areq_ctx->dst.mlli_nents);
}
return 0;
@@ -1503,7 +1503,7 @@ int ssi_buffer_mgr_map_hash_request_final(
/*TODO: copy data in case that buffer is enough for operation */
/* map the previous buffer */
- if (*curr_buff_cnt != 0 ) {
+ if (*curr_buff_cnt != 0) {
if (ssi_ahash_handle_curr_buf(dev, areq_ctx, curr_buff,
*curr_buff_cnt, &sg_data) != 0) {
return -ENOMEM;
@@ -1511,7 +1511,7 @@ int ssi_buffer_mgr_map_hash_request_final(
}
if (src && (nbytes > 0) && do_update) {
- if ( unlikely( ssi_buffer_mgr_map_scatterlist( dev,src,
+ if (unlikely(ssi_buffer_mgr_map_scatterlist(dev, src,
nbytes,
DMA_TO_DEVICE,
&areq_ctx->in_nents,
@@ -1519,9 +1519,9 @@ int ssi_buffer_mgr_map_hash_request_final(
&dummy, &mapped_nents))){
goto unmap_curr_buff;
}
- if ( src && (mapped_nents == 1)
- && (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL) ) {
- memcpy(areq_ctx->buff_sg,src,
+ if (src && (mapped_nents == 1)
+ && (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL)) {
+ memcpy(areq_ctx->buff_sg, src,
sizeof(struct scatterlist));
areq_ctx->buff_sg->length = nbytes;
areq_ctx->curr_sg = areq_ctx->buff_sg;
@@ -1547,7 +1547,7 @@ int ssi_buffer_mgr_map_hash_request_final(
}
}
/* change the buffer index for the unmap function */
- areq_ctx->buff_index = (areq_ctx->buff_index^1);
+ areq_ctx->buff_index = (areq_ctx->buff_index ^ 1);
SSI_LOG_DEBUG("areq_ctx->data_dma_buf_type = %s\n",
GET_DMA_BUFFER_TYPE(areq_ctx->data_dma_buf_type));
return 0;
@@ -1556,7 +1556,7 @@ fail_unmap_din:
dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
unmap_curr_buff:
- if (*curr_buff_cnt != 0 ) {
+ if (*curr_buff_cnt != 0) {
dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
}
return -ENOMEM;
@@ -1586,7 +1586,7 @@ int ssi_buffer_mgr_map_hash_request_update(
SSI_LOG_DEBUG(" update params : curr_buff=%pK "
"curr_buff_cnt=0x%X nbytes=0x%X "
- "src=%pK curr_index=%u \n",
+ "src=%pK curr_index=%u\n",
curr_buff, *curr_buff_cnt, nbytes,
src, areq_ctx->buff_index);
/* Init the type of the dma buffer */
@@ -1623,12 +1623,12 @@ int ssi_buffer_mgr_map_hash_request_update(
/* Copy the new residue to next buffer */
if (*next_buff_cnt != 0) {
SSI_LOG_DEBUG(" handle residue: next buff %pK skip data %u"
- " residue %u \n", next_buff,
+ " residue %u\n", next_buff,
(update_data_len - *curr_buff_cnt),
*next_buff_cnt);
ssi_buffer_mgr_copy_scatterlist_portion(next_buff, src,
- (update_data_len -*curr_buff_cnt),
- nbytes,SSI_SG_TO_BUF);
+ (update_data_len - *curr_buff_cnt),
+ nbytes, SSI_SG_TO_BUF);
/* change the buffer index for next operation */
swap_index = 1;
}
@@ -1642,19 +1642,19 @@ int ssi_buffer_mgr_map_hash_request_update(
swap_index = 1;
}
- if ( update_data_len > *curr_buff_cnt ) {
- if ( unlikely( ssi_buffer_mgr_map_scatterlist( dev,src,
- (update_data_len -*curr_buff_cnt),
+ if (update_data_len > *curr_buff_cnt) {
+ if (unlikely(ssi_buffer_mgr_map_scatterlist(dev, src,
+ (update_data_len - *curr_buff_cnt),
DMA_TO_DEVICE,
&areq_ctx->in_nents,
LLI_MAX_NUM_OF_DATA_ENTRIES,
&dummy, &mapped_nents))){
goto unmap_curr_buff;
}
- if ( (mapped_nents == 1)
- && (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL) ) {
+ if ((mapped_nents == 1)
+ && (areq_ctx->data_dma_buf_type == SSI_DMA_BUF_NULL)) {
/* only one entry in the SG and no previous data */
- memcpy(areq_ctx->buff_sg,src,
+ memcpy(areq_ctx->buff_sg, src,
sizeof(struct scatterlist));
areq_ctx->buff_sg->length = update_data_len;
areq_ctx->data_dma_buf_type = SSI_DMA_BUF_DLLI;
@@ -1678,7 +1678,7 @@ int ssi_buffer_mgr_map_hash_request_update(
}
}
- areq_ctx->buff_index = (areq_ctx->buff_index^swap_index);
+ areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
return 0;
@@ -1686,7 +1686,7 @@ fail_unmap_din:
dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
unmap_curr_buff:
- if (*curr_buff_cnt != 0 ) {
+ if (*curr_buff_cnt != 0) {
dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
}
return -ENOMEM;
@@ -1722,7 +1722,7 @@ void ssi_buffer_mgr_unmap_hash_request(
if (*prev_len != 0) {
SSI_LOG_DEBUG("Unmapped buffer: areq_ctx->buff_sg=%pK"
- "dma=0x%llX len 0x%X\n",
+ " dma=0x%llX len 0x%X\n",
sg_virt(areq_ctx->buff_sg),
(unsigned long long)sg_dma_address(areq_ctx->buff_sg),
sg_dma_len(areq_ctx->buff_sg));
diff --git a/drivers/staging/ccree/ssi_cipher.c b/drivers/staging/ccree/ssi_cipher.c
index 34450a5e6573..519e04ef6e70 100644
--- a/drivers/staging/ccree/ssi_cipher.c
+++ b/drivers/staging/ccree/ssi_cipher.c
@@ -69,9 +69,9 @@ static void ssi_ablkcipher_complete(struct device *dev, void *ssi_req, void __io
static int validate_keys_sizes(struct ssi_ablkcipher_ctx *ctx_p, u32 size) {
- switch (ctx_p->flow_mode){
+ switch (ctx_p->flow_mode) {
case S_DIN_to_AES:
- switch (size){
+ switch (size) {
case CC_AES_128_BIT_KEY_SIZE:
case CC_AES_192_BIT_KEY_SIZE:
if (likely((ctx_p->cipher_mode != DRV_CIPHER_XTS) &&
@@ -81,8 +81,8 @@ static int validate_keys_sizes(struct ssi_ablkcipher_ctx *ctx_p, u32 size) {
break;
case CC_AES_256_BIT_KEY_SIZE:
return 0;
- case (CC_AES_192_BIT_KEY_SIZE*2):
- case (CC_AES_256_BIT_KEY_SIZE*2):
+ case (CC_AES_192_BIT_KEY_SIZE * 2):
+ case (CC_AES_256_BIT_KEY_SIZE * 2):
if (likely((ctx_p->cipher_mode == DRV_CIPHER_XTS) ||
(ctx_p->cipher_mode == DRV_CIPHER_ESSIV) ||
(ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER)))
@@ -111,9 +111,9 @@ static int validate_keys_sizes(struct ssi_ablkcipher_ctx *ctx_p, u32 size) {
static int validate_data_size(struct ssi_ablkcipher_ctx *ctx_p, unsigned int size) {
- switch (ctx_p->flow_mode){
+ switch (ctx_p->flow_mode) {
case S_DIN_to_AES:
- switch (ctx_p->cipher_mode){
+ switch (ctx_p->cipher_mode) {
case DRV_CIPHER_XTS:
if ((size >= SSI_MIN_AES_XTS_SIZE) &&
(size <= SSI_MAX_AES_XTS_SIZE) &&
@@ -198,7 +198,7 @@ static int ssi_blkcipher_init(struct crypto_tfm *tfm)
dev = &ctx_p->drvdata->plat_dev->dev;
/* Allocate key buffer, cache line aligned */
- ctx_p->user.key = kmalloc(max_key_buf_size, GFP_KERNEL|GFP_DMA);
+ ctx_p->user.key = kmalloc(max_key_buf_size, GFP_KERNEL | GFP_DMA);
if (!ctx_p->user.key) {
SSI_LOG_ERR("Allocating key buffer in context failed\n");
rc = -ENOMEM;
@@ -257,11 +257,11 @@ static void ssi_blkcipher_exit(struct crypto_tfm *tfm)
}
-typedef struct tdes_keys{
+typedef struct tdes_keys {
u8 key1[DES_KEY_SIZE];
u8 key2[DES_KEY_SIZE];
u8 key3[DES_KEY_SIZE];
-}tdes_keys_t;
+} tdes_keys_t;
static const u8 zero_buff[] = { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
@@ -275,8 +275,8 @@ static int ssi_fips_verify_3des_keys(const u8 *key, unsigned int keylen)
tdes_keys_t *tdes_key = (tdes_keys_t*)key;
/* verify key1 != key2 and key3 != key2*/
- if (unlikely( (memcmp((u8*)tdes_key->key1, (u8*)tdes_key->key2, sizeof(tdes_key->key1)) == 0) ||