apm821xx: 4.14: remove kernel config and patches

This patch removes the 4.14 kernel support from the apm821xx target.
The 4.19 kernel has been available and stable for a while and the 5.4
kernel support has been tested successfully on real hardware as well.

Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
master
Christian Lamparter 2020-03-07 16:58:32 +01:00
parent 0a06fcf608
commit b052b62efc
60 changed files with 0 additions and 8847 deletions

View File

@ -1,366 +0,0 @@
# CONFIG_40x is not set
CONFIG_44x=y
CONFIG_460EX=y
CONFIG_4xx=y
CONFIG_4xx_SOC=y
# CONFIG_ADVANCED_OPTIONS is not set
CONFIG_APM821xx=y
CONFIG_APOLLO3G=y
# CONFIG_ARCHES is not set
CONFIG_ARCH_DMA_ADDR_T_64BIT=y
CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK=y
CONFIG_ARCH_HAS_ELF_RANDOMIZE=y
CONFIG_ARCH_HAS_FORTIFY_SOURCE=y
CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y
CONFIG_ARCH_HAS_SG_CHAIN=y
CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y
# CONFIG_ARCH_HAS_STRICT_MODULE_RWX is not set
CONFIG_ARCH_HAS_WALK_MEMORY=y
CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y
CONFIG_ARCH_HIBERNATION_POSSIBLE=y
CONFIG_ARCH_MAY_HAVE_PC_FDC=y
CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y
CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y
CONFIG_ARCH_MMAP_RND_BITS=11
CONFIG_ARCH_MMAP_RND_BITS_MAX=17
CONFIG_ARCH_MMAP_RND_BITS_MIN=11
CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=17
CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=11
CONFIG_ARCH_OPTIONAL_KERNEL_RWX=y
# CONFIG_ARCH_OPTIONAL_KERNEL_RWX_DEFAULT is not set
CONFIG_ARCH_PHYS_ADDR_T_64BIT=y
# CONFIG_ARCH_RANDOM is not set
CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y
CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
CONFIG_ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT=y
CONFIG_ARCH_SUPPORTS_UPROBES=y
CONFIG_ARCH_SUSPEND_POSSIBLE=y
CONFIG_ARCH_USE_BUILTIN_BSWAP=y
CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y
CONFIG_ARCH_WEAK_RELEASE_ACQUIRE=y
CONFIG_AUDIT_ARCH=y
# CONFIG_BAMBOO is not set
CONFIG_BCH=y
CONFIG_BLK_MQ_PCI=y
# CONFIG_BLUESTONE is not set
CONFIG_BOOKE=y
CONFIG_BOOKE_WDT=y
# CONFIG_BOUNCE is not set
CONFIG_BUCKMINSTER=y
# CONFIG_CANYONLANDS is not set
CONFIG_CLONE_BACKWARDS=y
CONFIG_CMDLINE="rootfstype=squashfs noinitrd"
CONFIG_CMDLINE_BOOL=y
CONFIG_CONSISTENT_SIZE=0x00200000
CONFIG_CPU_BIG_ENDIAN=y
CONFIG_CRC16=y
# CONFIG_CRC32_SARWATE is not set
CONFIG_CRC32_SLICEBY8=y
CONFIG_CRYPTO_ACOMP2=y
CONFIG_CRYPTO_AEAD=y
CONFIG_CRYPTO_AEAD2=y
CONFIG_CRYPTO_CCM=y
CONFIG_CRYPTO_CTR=y
CONFIG_CRYPTO_DEFLATE=y
CONFIG_CRYPTO_DEV_PPC4XX=y
CONFIG_CRYPTO_DRBG=y
CONFIG_CRYPTO_DRBG_HMAC=y
CONFIG_CRYPTO_DRBG_MENU=y
CONFIG_CRYPTO_GCM=y
CONFIG_CRYPTO_GF128MUL=y
CONFIG_CRYPTO_GHASH=y
CONFIG_CRYPTO_HASH=y
CONFIG_CRYPTO_HASH2=y
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_HW=y
CONFIG_CRYPTO_JITTERENTROPY=y
CONFIG_CRYPTO_LZO=y
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_MANAGER2=y
CONFIG_CRYPTO_MD5_PPC=y
CONFIG_CRYPTO_NULL=y
CONFIG_CRYPTO_NULL2=y
CONFIG_CRYPTO_RNG=y
CONFIG_CRYPTO_RNG2=y
CONFIG_CRYPTO_RNG_DEFAULT=y
CONFIG_CRYPTO_SEQIV=y
CONFIG_CRYPTO_SHA1_PPC=y
CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_WORKQUEUE=y
CONFIG_DECOMPRESS_GZIP=y
# CONFIG_DEFAULT_UIMAGE is not set
CONFIG_DTC=y
# CONFIG_E200 is not set
CONFIG_EARLY_PRINTK=y
# CONFIG_EBONY is not set
CONFIG_EDAC_ATOMIC_SCRUB=y
CONFIG_EDAC_SUPPORT=y
# CONFIG_EIGER is not set
# CONFIG_EPAPR_BOOT is not set
CONFIG_EXTRA_TARGETS="uImage"
CONFIG_FIXED_PHY=y
# CONFIG_FORCE_SMP is not set
CONFIG_FREEZER=y
# CONFIG_FSL_LBC is not set
# CONFIG_FSL_ULI1575 is not set
CONFIG_GENERIC_ATOMIC64=y
CONFIG_GENERIC_BUG=y
CONFIG_GENERIC_CLOCKEVENTS=y
CONFIG_GENERIC_CMOS_UPDATE=y
CONFIG_GENERIC_CPU_AUTOPROBE=y
# CONFIG_GENERIC_CSUM is not set
CONFIG_GENERIC_IO=y
CONFIG_GENERIC_IRQ_SHOW=y
CONFIG_GENERIC_IRQ_SHOW_LEVEL=y
CONFIG_GENERIC_ISA_DMA=y
CONFIG_GENERIC_MSI_IRQ=y
CONFIG_GENERIC_NVRAM=y
CONFIG_GENERIC_PCI_IOMAP=y
CONFIG_GENERIC_SMP_IDLE_THREAD=y
CONFIG_GENERIC_STRNCPY_FROM_USER=y
CONFIG_GENERIC_STRNLEN_USER=y
# CONFIG_GENERIC_TBSYNC is not set
CONFIG_GENERIC_TIME_VSYSCALL=y
# CONFIG_GEN_RTC is not set
# CONFIG_GE_FPGA is not set
# CONFIG_GLACIER is not set
CONFIG_GPIOLIB=y
CONFIG_GPIO_GENERIC=y
CONFIG_GPIO_GENERIC_PLATFORM=y
CONFIG_GPIO_SYSFS=y
# CONFIG_GRO_CELLS is not set
CONFIG_HAS_DMA=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT_MAP=y
# CONFIG_HAS_RAPIDIO is not set
# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set
CONFIG_HAVE_ARCH_AUDITSYSCALL=y
# CONFIG_HAVE_ARCH_BITREVERSE is not set
CONFIG_HAVE_ARCH_JUMP_LABEL=y
CONFIG_HAVE_ARCH_KGDB=y
CONFIG_HAVE_ARCH_SECCOMP_FILTER=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set
CONFIG_HAVE_CBPF_JIT=y
CONFIG_HAVE_DEBUG_KMEMLEAK=y
CONFIG_HAVE_DEBUG_STACKOVERFLOW=y
CONFIG_HAVE_DMA_API_DEBUG=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
CONFIG_HAVE_FUNCTION_TRACER=y
# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
CONFIG_HAVE_GENERIC_GUP=y
CONFIG_HAVE_IDE=y
CONFIG_HAVE_IOREMAP_PROT=y
CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y
CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y
CONFIG_HAVE_KPROBES_ON_FTRACE=y
CONFIG_HAVE_MEMBLOCK=y
CONFIG_HAVE_MEMBLOCK_NODE_MAP=y
CONFIG_HAVE_MOD_ARCH_SPECIFIC=y
CONFIG_HAVE_NET_DSA=y
CONFIG_HAVE_OPROFILE=y
CONFIG_HAVE_PERF_EVENTS=y
CONFIG_HAVE_PERF_REGS=y
CONFIG_HAVE_PERF_USER_STACK_DUMP=y
CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
# CONFIG_HAVE_SETUP_PER_CPU_AREA is not set
CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_HAVE_VIRT_CPU_ACCOUNTING=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_PPC4XX=y
CONFIG_HZ=1000
# CONFIG_HZ_100 is not set
CONFIG_HZ_1000=y
CONFIG_HZ_PERIODIC=y
CONFIG_I2C=y
CONFIG_I2C_BOARDINFO=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_IBM_IIC=y
CONFIG_IBM_EMAC=y
CONFIG_IBM_EMAC_EMAC4=y
CONFIG_IBM_EMAC_POLL_WEIGHT=32
CONFIG_IBM_EMAC_RGMII=y
CONFIG_IBM_EMAC_RXB=128
CONFIG_IBM_EMAC_RX_COPY_THRESHOLD=256
CONFIG_IBM_EMAC_RX_SKB_HEADROOM=0
CONFIG_IBM_EMAC_TAH=y
CONFIG_IBM_EMAC_TXB=128
# CONFIG_ICON is not set
CONFIG_IKAREM=y
CONFIG_ILLEGAL_POINTER_VALUE=0
CONFIG_INITRAMFS_SOURCE=""
# CONFIG_IOMMU_HELPER is not set
# CONFIG_IPIC is not set
CONFIG_IRQCHIP=y
CONFIG_IRQ_DOMAIN=y
CONFIG_IRQ_FORCED_THREADING=y
CONFIG_IRQ_WORK=y
CONFIG_ISA_DMA_API=y
# CONFIG_JFFS2_FS is not set
# CONFIG_KATMAI is not set
CONFIG_KERNEL_GZIP=y
CONFIG_KERNEL_START=0xc0000000
CONFIG_LEDS_TRIGGER_MTD=y
CONFIG_LIBFDT=y
CONFIG_LOWMEM_SIZE=0x30000000
CONFIG_LZO_COMPRESS=y
CONFIG_LZO_DECOMPRESS=y
# CONFIG_MATH_EMULATION is not set
CONFIG_MDIO_BUS=y
CONFIG_MDIO_DEVICE=y
CONFIG_MIGRATION=y
# CONFIG_MMIO_NVRAM is not set
CONFIG_MODULES_USE_ELF_RELA=y
# CONFIG_MPIC is not set
# CONFIG_MPIC_U3_HT_IRQS is not set
# CONFIG_MPIC_WEIRD is not set
CONFIG_MTD_CFI_ADV_OPTIONS=y
# CONFIG_MTD_CFI_GEOMETRY is not set
CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_BCH=y
CONFIG_MTD_NAND_ECC=y
CONFIG_MTD_NAND_ECC_BCH=y
CONFIG_MTD_NAND_ECC_SMC=y
CONFIG_MTD_NAND_NDFC=y
# CONFIG_MTD_SPLIT is not set
# CONFIG_MTD_SPLIT_SQUASHFS_ROOT is not set
CONFIG_NEED_DMA_MAP_STATE=y
# CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK is not set
CONFIG_NEED_PER_CPU_KM=y
CONFIG_NEED_SG_DMA_LENGTH=y
# CONFIG_NONSTATIC_KERNEL is not set
CONFIG_NOT_COHERENT_CACHE=y
CONFIG_NO_BOOTMEM=y
CONFIG_NR_IRQS=512
CONFIG_OF=y
CONFIG_OF_ADDRESS=y
CONFIG_OF_ADDRESS_PCI=y
CONFIG_OF_EARLY_FLATTREE=y
CONFIG_OF_FLATTREE=y
CONFIG_OF_GPIO=y
CONFIG_OF_IRQ=y
CONFIG_OF_MDIO=y
CONFIG_OF_NET=y
CONFIG_OF_PCI=y
CONFIG_OF_PCI_IRQ=y
CONFIG_OF_RESERVED_MEM=y
CONFIG_OLD_SIGACTION=y
CONFIG_OLD_SIGSUSPEND=y
CONFIG_PAGE_OFFSET=0xc0000000
CONFIG_PCI=y
CONFIG_PCIEAER=y
CONFIG_PCIEPORTBUS=y
CONFIG_PCIE_PME=y
CONFIG_PCI_BUS_ADDR_T_64BIT=y
CONFIG_PCI_DISABLE_COMMON_QUIRKS=y
CONFIG_PCI_DOMAINS=y
CONFIG_PCI_MSI=y
# CONFIG_PCI_MSI_IRQ_DOMAIN is not set
CONFIG_PGTABLE_LEVELS=2
CONFIG_PHYLIB=y
CONFIG_PHYSICAL_START=0x00000000
CONFIG_PHYS_64BIT=y
CONFIG_PHYS_ADDR_T_64BIT=y
CONFIG_PM=y
CONFIG_PM_AUTOSLEEP=y
# CONFIG_PM_DEBUG is not set
CONFIG_PM_SLEEP=y
CONFIG_PM_WAKELOCKS=y
CONFIG_PM_WAKELOCKS_GC=y
CONFIG_PM_WAKELOCKS_LIMIT=100
CONFIG_PPC=y
CONFIG_PPC32=y
CONFIG_PPC44x_SIMPLE=y
CONFIG_PPC4xx_CPM=y
CONFIG_PPC4xx_GPIO=y
# CONFIG_PPC4xx_HSTA_MSI is not set
CONFIG_PPC4xx_MSI=y
CONFIG_PPC4xx_OCM=y
CONFIG_PPC4xx_PCI_EXPRESS=y
# CONFIG_PPC64 is not set
# CONFIG_PPC_47x is not set
# CONFIG_PPC_85xx is not set
# CONFIG_PPC_8xx is not set
# CONFIG_PPC_970_NAP is not set
CONFIG_PPC_ADV_DEBUG_DACS=2
CONFIG_PPC_ADV_DEBUG_DAC_RANGE=y
CONFIG_PPC_ADV_DEBUG_DVCS=2
CONFIG_PPC_ADV_DEBUG_IACS=4
CONFIG_PPC_ADV_DEBUG_REGS=y
# CONFIG_PPC_BOOK3S_32 is not set
# CONFIG_PPC_CELL is not set
# CONFIG_PPC_CELL_NATIVE is not set
# CONFIG_PPC_COPRO_BASE is not set
CONFIG_PPC_DCR=y
# CONFIG_PPC_DCR_MMIO is not set
CONFIG_PPC_DCR_NATIVE=y
# CONFIG_PPC_DOORBELL is not set
# CONFIG_PPC_EARLY_DEBUG is not set
# CONFIG_PPC_EPAPR_HV_PIC is not set
CONFIG_PPC_FPU=y
# CONFIG_PPC_I8259 is not set
# CONFIG_PPC_ICP_HV is not set
# CONFIG_PPC_ICP_NATIVE is not set
# CONFIG_PPC_ICS_RTAS is not set
CONFIG_PPC_INDIRECT_PCI=y
CONFIG_PPC_LIB_RHEAP=y
CONFIG_PPC_MMU_NOHASH=y
# CONFIG_PPC_MM_SLICES is not set
# CONFIG_PPC_MPC106 is not set
CONFIG_PPC_MSI_BITMAP=y
# CONFIG_PPC_P7_NAP is not set
CONFIG_PPC_PCI_CHOICE=y
# CONFIG_PPC_PTDUMP is not set
# CONFIG_PPC_RTAS is not set
CONFIG_PPC_UDBG_16550=y
CONFIG_PPC_WERROR=y
# CONFIG_PPC_XICS is not set
# CONFIG_PPC_XIVE is not set
# CONFIG_PPC_XIVE_SPAPR is not set
# CONFIG_PQ2ADS is not set
CONFIG_PTE_64BIT=y
# CONFIG_RAINIER is not set
CONFIG_RAS=y
# CONFIG_RCU_NEED_SEGCBLIST is not set
# CONFIG_RCU_STALL_COMMON is not set
CONFIG_RD_GZIP=y
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
# CONFIG_SAM440EP is not set
# CONFIG_SCHED_INFO is not set
# CONFIG_SCSI_DMA is not set
# CONFIG_SEQUOIA is not set
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_FSL=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SIMPLE_GPIO=y
CONFIG_SPARSE_IRQ=y
CONFIG_SRCU=y
CONFIG_SUSPEND=y
CONFIG_SUSPEND_FREEZER=y
# CONFIG_SWIOTLB is not set
CONFIG_SWPHY=y
CONFIG_SYSCTL_EXCEPTION_TRACE=y
# CONFIG_TAISHAN is not set
CONFIG_TASK_SIZE=0xc0000000
CONFIG_THREAD_SHIFT=13
CONFIG_TICK_CPU_ACCOUNTING=y
CONFIG_TINY_SRCU=y
CONFIG_USB_SUPPORT=y
CONFIG_VDSO32=y
# CONFIG_VIRT_CPU_ACCOUNTING_NATIVE is not set
# CONFIG_WARP is not set
CONFIG_WATCHDOG_CORE=y
CONFIG_WNDR4700=y
# CONFIG_XILINX_SYSACE is not set
# CONFIG_XILINX_VIRTEX440_GENERIC_BOARD is not set
CONFIG_XZ_DEC_BCJ=y
CONFIG_XZ_DEC_POWERPC=y
# CONFIG_YOSEMITE is not set
CONFIG_ZLIB_DEFLATE=y
CONFIG_ZLIB_INFLATE=y

View File

@ -1,120 +0,0 @@
From 81065f66dd99b3af58626a914b8c0fcff6b8b0ba Mon Sep 17 00:00:00 2001
From: Christian Lamparter <chunkeey@googlemail.com>
Date: Fri, 25 Aug 2017 15:47:15 +0200
Subject: [PATCH 02/25] crypto: crypto4xx - remove unused definitions and
write-only variables
This patch removes several unused code and definitons
(structs, variables, ...).
Signed-off-by: Christian Lamparter <chunkeey@googlemail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
drivers/crypto/amcc/crypto4xx_alg.c | 6 ------
drivers/crypto/amcc/crypto4xx_core.c | 2 +-
drivers/crypto/amcc/crypto4xx_core.h | 16 ----------------
3 files changed, 1 insertion(+), 23 deletions(-)
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -75,7 +75,6 @@ int crypto4xx_encrypt(struct ablkcipher_
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
ctx->direction = DIR_OUTBOUND;
- ctx->hash_final = 0;
ctx->is_hash = 0;
ctx->pd_ctl = 0x1;
@@ -89,7 +88,6 @@ int crypto4xx_decrypt(struct ablkcipher_
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
ctx->direction = DIR_INBOUND;
- ctx->hash_final = 0;
ctx->is_hash = 0;
ctx->pd_ctl = 1;
@@ -136,7 +134,6 @@ static int crypto4xx_setkey_aes(struct c
}
/* Setup SA */
sa = (struct dynamic_sa_ctl *) ctx->sa_in;
- ctx->hash_final = 0;
set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, (cm == CRYPTO_MODE_CBC ?
SA_SAVE_IV : SA_NOT_SAVE_IV),
@@ -192,7 +189,6 @@ static int crypto4xx_hash_alg_init(struc
ctx->dev = my_alg->dev;
ctx->is_hash = 1;
- ctx->hash_final = 0;
/* Create SA */
if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr)
@@ -257,7 +253,6 @@ int crypto4xx_hash_update(struct ahash_r
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
ctx->is_hash = 1;
- ctx->hash_final = 0;
ctx->pd_ctl = 0x11;
ctx->direction = DIR_INBOUND;
@@ -275,7 +270,6 @@ int crypto4xx_hash_digest(struct ahash_r
{
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- ctx->hash_final = 1;
ctx->pd_ctl = 0x11;
ctx->direction = DIR_INBOUND;
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -969,7 +969,7 @@ u32 crypto4xx_build_pd(struct crypto_asy
sa->sa_command_1.bf.hash_crypto_offset = 0;
pd->pd_ctl.w = ctx->pd_ctl;
- pd->pd_ctl_len.w = 0x00400000 | (ctx->bypass << 24) | datalen;
+ pd->pd_ctl_len.w = 0x00400000 | datalen;
pd_uinfo->state = PD_ENTRY_INUSE;
wmb();
/* write any value to push engine to read a pd */
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -72,7 +72,6 @@ struct pd_uinfo {
struct crypto4xx_device {
struct crypto4xx_core_device *core_dev;
char *name;
- u64 ce_phy_address;
void __iomem *ce_base;
void __iomem *trng_base;
@@ -127,21 +126,9 @@ struct crypto4xx_ctx {
u32 sa_len;
u32 offset_to_sr_ptr; /* offset to state ptr, in dynamic sa */
u32 direction;
- u32 next_hdr;
u32 save_iv;
- u32 pd_ctl_len;
u32 pd_ctl;
- u32 bypass;
u32 is_hash;
- u32 hash_final;
-};
-
-struct crypto4xx_req_ctx {
- struct crypto4xx_device *dev; /* Device in which
- operation to send to */
- void *sa;
- u32 sa_dma_addr;
- u16 sa_len;
};
struct crypto4xx_alg_common {
@@ -172,9 +159,6 @@ static inline struct crypto4xx_alg *cryp
extern int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size);
extern void crypto4xx_free_sa(struct crypto4xx_ctx *ctx);
-extern u32 crypto4xx_alloc_sa_rctx(struct crypto4xx_ctx *ctx,
- struct crypto4xx_ctx *rctx);
-extern void crypto4xx_free_sa_rctx(struct crypto4xx_ctx *rctx);
extern void crypto4xx_free_ctx(struct crypto4xx_ctx *ctx);
extern u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx);
extern u32 get_dynamic_sa_offset_state_ptr_field(struct crypto4xx_ctx *ctx);

View File

@ -1,31 +0,0 @@
From 1ef52a95ea53c3c54b061e3f1af85976356c7132 Mon Sep 17 00:00:00 2001
From: Christian Lamparter <chunkeey@googlemail.com>
Date: Fri, 25 Aug 2017 15:47:16 +0200
Subject: [PATCH 03/25] crypto: crypto4xx - set CRYPTO_ALG_KERN_DRIVER_ONLY
flag
The security offload function is performed by a cryptographic
engine core attached to the 128-bit PLB (processor local bus)
with builtin DMA and interrupt controllers. This, I think,
satisfies the requirement for the CRYPTO_ALG_KERN_DRIVER_ONLY
flag.
Signed-off-by: Christian Lamparter <chunkeey@googlemail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
drivers/crypto/amcc/crypto4xx_core.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -1121,7 +1121,9 @@ struct crypto4xx_alg_common crypto4xx_al
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
.cra_type = &crypto_ablkcipher_type,

View File

@ -1,76 +0,0 @@
From 886c251fd4ca40a27697afec7bc44c115e803d78 Mon Sep 17 00:00:00 2001
From: Christian Lamparter <chunkeey@googlemail.com>
Date: Fri, 25 Aug 2017 15:47:17 +0200
Subject: [PATCH 04/25] crypto: crypto4xx - remove extern statement before
function declaration
All function declarations are "extern" by default, there is no need to
specify it explicitly.
For C99 states in 6.2.2.5:
"If the declaration of an identifier for a function has no
storage-class specifier, its linkage is determined exactly
as if it were declared with the storage-class specifier
extern."
Signed-off-by: Christian Lamparter <chunkeey@googlemail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
drivers/crypto/amcc/crypto4xx_core.h | 48 ++++++++++++++++++------------------
1 file changed, 24 insertions(+), 24 deletions(-)
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -157,28 +157,28 @@ static inline struct crypto4xx_alg *cryp
return container_of(x, struct crypto4xx_alg, alg.u.cipher);
}
-extern int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size);
-extern void crypto4xx_free_sa(struct crypto4xx_ctx *ctx);
-extern void crypto4xx_free_ctx(struct crypto4xx_ctx *ctx);
-extern u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx);
-extern u32 get_dynamic_sa_offset_state_ptr_field(struct crypto4xx_ctx *ctx);
-extern u32 get_dynamic_sa_offset_key_field(struct crypto4xx_ctx *ctx);
-extern u32 get_dynamic_sa_iv_size(struct crypto4xx_ctx *ctx);
-extern void crypto4xx_memcpy_le(unsigned int *dst,
- const unsigned char *buf, int len);
-extern u32 crypto4xx_build_pd(struct crypto_async_request *req,
- struct crypto4xx_ctx *ctx,
- struct scatterlist *src,
- struct scatterlist *dst,
- unsigned int datalen,
- void *iv, u32 iv_len);
-extern int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher,
- const u8 *key, unsigned int keylen);
-extern int crypto4xx_encrypt(struct ablkcipher_request *req);
-extern int crypto4xx_decrypt(struct ablkcipher_request *req);
-extern int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
-extern int crypto4xx_hash_digest(struct ahash_request *req);
-extern int crypto4xx_hash_final(struct ahash_request *req);
-extern int crypto4xx_hash_update(struct ahash_request *req);
-extern int crypto4xx_hash_init(struct ahash_request *req);
+int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size);
+void crypto4xx_free_sa(struct crypto4xx_ctx *ctx);
+void crypto4xx_free_ctx(struct crypto4xx_ctx *ctx);
+u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx);
+u32 get_dynamic_sa_offset_state_ptr_field(struct crypto4xx_ctx *ctx);
+u32 get_dynamic_sa_offset_key_field(struct crypto4xx_ctx *ctx);
+u32 get_dynamic_sa_iv_size(struct crypto4xx_ctx *ctx);
+void crypto4xx_memcpy_le(unsigned int *dst,
+ const unsigned char *buf, int len);
+u32 crypto4xx_build_pd(struct crypto_async_request *req,
+ struct crypto4xx_ctx *ctx,
+ struct scatterlist *src,
+ struct scatterlist *dst,
+ unsigned int datalen,
+ void *iv, u32 iv_len);
+int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_encrypt(struct ablkcipher_request *req);
+int crypto4xx_decrypt(struct ablkcipher_request *req);
+int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
+int crypto4xx_hash_digest(struct ahash_request *req);
+int crypto4xx_hash_final(struct ahash_request *req);
+int crypto4xx_hash_update(struct ahash_request *req);
+int crypto4xx_hash_init(struct ahash_request *req);
#endif

View File

@ -1,24 +0,0 @@
From c587e65deacf8c86de2d7c51f1e81d0a4a9147a8 Mon Sep 17 00:00:00 2001
From: Christian Lamparter <chunkeey@googlemail.com>
Date: Fri, 25 Aug 2017 15:47:18 +0200
Subject: [PATCH 05/25] crypto: crypto4xx - remove double assignment of
pd_uinfo->state
crypto4xx_put_pd_to_pdr() already clears the flag.
Signed-off-by: Christian Lamparter <chunkeey@googlemail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
drivers/crypto/amcc/crypto4xx_core.c | 1 -
1 file changed, 1 deletion(-)
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -1086,7 +1086,6 @@ static void crypto4xx_bh_tasklet_cb(unsi
pd->pd_ctl.bf.pe_done = 0;
crypto4xx_pd_done(core_dev->dev, tail);
crypto4xx_put_pd_to_pdr(core_dev->dev, tail);
- pd_uinfo->state = PD_ENTRY_FREE;
} else {
/* if tail not done, break */
break;

View File

@ -1,87 +0,0 @@
From 453e3090b9c3f5da70b21648c2244e9821f0916d Mon Sep 17 00:00:00 2001
From: Christian Lamparter <chunkeey@googlemail.com>
Date: Fri, 25 Aug 2017 15:47:19 +0200
Subject: [PATCH 06/25] crypto: crypto4xx - fix dynamic_sa_ctl's sa_contents
declaration
The driver had a union dynamic_sa_contents in place that
described the meaning of the bits in the sa_contents
variable.
Signed-off-by: Christian Lamparter <chunkeey@googlemail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
drivers/crypto/amcc/crypto4xx_alg.c | 4 ++--
drivers/crypto/amcc/crypto4xx_sa.c | 12 ++++++------
drivers/crypto/amcc/crypto4xx_sa.h | 2 +-
3 files changed, 9 insertions(+), 9 deletions(-)
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -150,7 +150,7 @@ static int crypto4xx_setkey_aes(struct c
SA_NOT_COPY_HDR);
crypto4xx_memcpy_le(ctx->sa_in + get_dynamic_sa_offset_key_field(ctx),
key, keylen);
- sa->sa_contents = SA_AES_CONTENTS | (keylen << 2);
+ sa->sa_contents.w = SA_AES_CONTENTS | (keylen << 2);
sa->sa_command_1.bf.key_len = keylen >> 3;
ctx->is_hash = 0;
ctx->direction = DIR_INBOUND;
@@ -220,7 +220,7 @@ static int crypto4xx_hash_alg_init(struc
SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD,
SA_NOT_COPY_HDR);
ctx->direction = DIR_INBOUND;
- sa->sa_contents = SA_HASH160_CONTENTS;
+ sa->sa_contents.w = SA_HASH160_CONTENTS;
sa_in = (struct dynamic_sa_hash160 *) ctx->sa_in;
/* Need to zero hash digest in SA */
memset(sa_in->inner_digest, 0, sizeof(sa_in->inner_digest));
--- a/drivers/crypto/amcc/crypto4xx_sa.c
+++ b/drivers/crypto/amcc/crypto4xx_sa.c
@@ -40,9 +40,9 @@ u32 get_dynamic_sa_offset_state_ptr_fiel
union dynamic_sa_contents cts;
if (ctx->direction == DIR_INBOUND)
- cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents;
+ cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents.w;
else
- cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents;
+ cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents.w;
offset = cts.bf.key_size
+ cts.bf.inner_size
+ cts.bf.outer_size
@@ -66,9 +66,9 @@ u32 get_dynamic_sa_iv_size(struct crypto
union dynamic_sa_contents cts;
if (ctx->direction == DIR_INBOUND)
- cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents;
+ cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents.w;
else
- cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents;
+ cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents.w;
return (cts.bf.iv0 + cts.bf.iv1 + cts.bf.iv2 + cts.bf.iv3) * 4;
}
@@ -77,9 +77,9 @@ u32 get_dynamic_sa_offset_key_field(stru
union dynamic_sa_contents cts;
if (ctx->direction == DIR_INBOUND)
- cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents;
+ cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents.w;
else
- cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents;
+ cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents.w;
return sizeof(struct dynamic_sa_ctl);
}
--- a/drivers/crypto/amcc/crypto4xx_sa.h
+++ b/drivers/crypto/amcc/crypto4xx_sa.h
@@ -169,7 +169,7 @@ union sa_command_1 {
} __attribute__((packed));
struct dynamic_sa_ctl {
- u32 sa_contents;
+ union dynamic_sa_contents sa_contents;
union sa_command_0 sa_command_0;
union sa_command_1 sa_command_1;
} __attribute__((packed));

View File

@ -1,234 +0,0 @@
From 249c8d98ea339325dca481d5dae93686cd494059 Mon Sep 17 00:00:00 2001
From: Christian Lamparter <chunkeey@googlemail.com>
Date: Fri, 25 Aug 2017 15:47:20 +0200
Subject: [PATCH 07/25] crypto: crypto4xx - move and refactor dynamic_contents
helpers
This patch refactors and moves the dynamic_contents helper
functions into the crypto4xx_sa.h header file.
* get_dynamic_sa_iv_size is no longer needed, as the cryptoapi
provides the required IV size information as well.
* refactor the function declarations to use the a pointer to the
dynamic_sa_contents union, instead of the crypto4xx_ctx.
* rename get_dynamic_sa_offset_key_field to get_dynamic_sa_key_field.
It returns the pointer to the key directly.
Signed-off-by: Christian Lamparter <chunkeey@googlemail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
drivers/crypto/amcc/Makefile | 2 +-
drivers/crypto/amcc/crypto4xx_alg.c | 20 ++++-----
drivers/crypto/amcc/crypto4xx_core.h | 3 --
drivers/crypto/amcc/crypto4xx_sa.c | 85 ------------------------------------
drivers/crypto/amcc/crypto4xx_sa.h | 28 ++++++++++++
5 files changed, 39 insertions(+), 99 deletions(-)
delete mode 100644 drivers/crypto/amcc/crypto4xx_sa.c
--- a/drivers/crypto/amcc/Makefile
+++ b/drivers/crypto/amcc/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += crypto4xx.o
-crypto4xx-y := crypto4xx_core.o crypto4xx_alg.o crypto4xx_sa.o
+crypto4xx-y := crypto4xx_core.o crypto4xx_alg.o
crypto4xx-$(CONFIG_HW_RANDOM_PPC4XX) += crypto4xx_trng.o
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -29,8 +29,8 @@
#include <crypto/aes.h>
#include <crypto/sha.h>
#include "crypto4xx_reg_def.h"
-#include "crypto4xx_sa.h"
#include "crypto4xx_core.h"
+#include "crypto4xx_sa.h"
static void set_dynamic_sa_command_0(struct dynamic_sa_ctl *sa, u32 save_h,
u32 save_iv, u32 ld_h, u32 ld_iv,
@@ -79,8 +79,8 @@ int crypto4xx_encrypt(struct ablkcipher_
ctx->pd_ctl = 0x1;
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
- req->nbytes, req->info,
- get_dynamic_sa_iv_size(ctx));
+ req->nbytes, req->info,
+ crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req)));
}
int crypto4xx_decrypt(struct ablkcipher_request *req)
@@ -92,8 +92,8 @@ int crypto4xx_decrypt(struct ablkcipher_
ctx->pd_ctl = 1;
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
- req->nbytes, req->info,
- get_dynamic_sa_iv_size(ctx));
+ req->nbytes, req->info,
+ crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req)));
}
/**
@@ -148,15 +148,15 @@ static int crypto4xx_setkey_aes(struct c
SA_SEQ_MASK_OFF, SA_MC_ENABLE,
SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD,
SA_NOT_COPY_HDR);
- crypto4xx_memcpy_le(ctx->sa_in + get_dynamic_sa_offset_key_field(ctx),
+ crypto4xx_memcpy_le(get_dynamic_sa_key_field(sa),
key, keylen);
sa->sa_contents.w = SA_AES_CONTENTS | (keylen << 2);
sa->sa_command_1.bf.key_len = keylen >> 3;
ctx->is_hash = 0;
ctx->direction = DIR_INBOUND;
- memcpy(ctx->sa_in + get_dynamic_sa_offset_state_ptr_field(ctx),
- (void *)&ctx->state_record_dma_addr, 4);
- ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx);
+ memcpy(sa + get_dynamic_sa_offset_state_ptr_field(sa),
+ (void *)&ctx->state_record_dma_addr, 4);
+ ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(sa);
memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
sa = (struct dynamic_sa_ctl *) ctx->sa_out;
@@ -226,7 +226,7 @@ static int crypto4xx_hash_alg_init(struc
memset(sa_in->inner_digest, 0, sizeof(sa_in->inner_digest));
memset(sa_in->outer_digest, 0, sizeof(sa_in->outer_digest));
sa_in->state_ptr = ctx->state_record_dma_addr;
- ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx);
+ ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(sa);
return 0;
}
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -161,9 +161,6 @@ int crypto4xx_alloc_sa(struct crypto4xx_
void crypto4xx_free_sa(struct crypto4xx_ctx *ctx);
void crypto4xx_free_ctx(struct crypto4xx_ctx *ctx);
u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx);
-u32 get_dynamic_sa_offset_state_ptr_field(struct crypto4xx_ctx *ctx);
-u32 get_dynamic_sa_offset_key_field(struct crypto4xx_ctx *ctx);
-u32 get_dynamic_sa_iv_size(struct crypto4xx_ctx *ctx);
void crypto4xx_memcpy_le(unsigned int *dst,
const unsigned char *buf, int len);
u32 crypto4xx_build_pd(struct crypto_async_request *req,
--- a/drivers/crypto/amcc/crypto4xx_sa.c
+++ /dev/null
@@ -1,85 +0,0 @@
-/**
- * AMCC SoC PPC4xx Crypto Driver
- *
- * Copyright (c) 2008 Applied Micro Circuits Corporation.
- * All rights reserved. James Hsiao <jhsiao@amcc.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * @file crypto4xx_sa.c
- *
- * This file implements the security context
- * associate format.
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/mod_devicetable.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock_types.h>
-#include <linux/highmem.h>
-#include <linux/scatterlist.h>
-#include <linux/crypto.h>
-#include <crypto/algapi.h>
-#include <crypto/des.h>
-#include "crypto4xx_reg_def.h"
-#include "crypto4xx_sa.h"
-#include "crypto4xx_core.h"
-
-u32 get_dynamic_sa_offset_state_ptr_field(struct crypto4xx_ctx *ctx)
-{
- u32 offset;
- union dynamic_sa_contents cts;
-
- if (ctx->direction == DIR_INBOUND)
- cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents.w;
- else
- cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents.w;
- offset = cts.bf.key_size
- + cts.bf.inner_size
- + cts.bf.outer_size
- + cts.bf.spi
- + cts.bf.seq_num0
- + cts.bf.seq_num1
- + cts.bf.seq_num_mask0
- + cts.bf.seq_num_mask1
- + cts.bf.seq_num_mask2
- + cts.bf.seq_num_mask3
- + cts.bf.iv0
- + cts.bf.iv1
- + cts.bf.iv2
- + cts.bf.iv3;
-
- return sizeof(struct dynamic_sa_ctl) + offset * 4;
-}
-
-u32 get_dynamic_sa_iv_size(struct crypto4xx_ctx *ctx)
-{
- union dynamic_sa_contents cts;
-
- if (ctx->direction == DIR_INBOUND)
- cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents.w;
- else
- cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents.w;
- return (cts.bf.iv0 + cts.bf.iv1 + cts.bf.iv2 + cts.bf.iv3) * 4;
-}
-
-u32 get_dynamic_sa_offset_key_field(struct crypto4xx_ctx *ctx)
-{
- union dynamic_sa_contents cts;
-
- if (ctx->direction == DIR_INBOUND)
- cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents.w;
- else
- cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents.w;
-
- return sizeof(struct dynamic_sa_ctl);
-}
--- a/drivers/crypto/amcc/crypto4xx_sa.h
+++ b/drivers/crypto/amcc/crypto4xx_sa.h
@@ -240,4 +240,32 @@ struct dynamic_sa_hash160 {
#define SA_HASH160_LEN (sizeof(struct dynamic_sa_hash160)/4)
#define SA_HASH160_CONTENTS 0x2000a502
+static inline u32
+get_dynamic_sa_offset_state_ptr_field(struct dynamic_sa_ctl *cts)
+{
+ u32 offset;
+
+ offset = cts->sa_contents.bf.key_size
+ + cts->sa_contents.bf.inner_size
+ + cts->sa_contents.bf.outer_size
+ + cts->sa_contents.bf.spi
+ + cts->sa_contents.bf.seq_num0
+ + cts->sa_contents.bf.seq_num1
+ + cts->sa_contents.bf.seq_num_mask0
+ + cts->sa_contents.bf.seq_num_mask1
+ + cts->sa_contents.bf.seq_num_mask2
+ + cts->sa_contents.bf.seq_num_mask3
+ + cts->sa_contents.bf.iv0
+ + cts->sa_contents.bf.iv1
+ + cts->sa_contents.bf.iv2
+ + cts->sa_contents.bf.iv3;
+
+ return sizeof(struct dynamic_sa_ctl) + offset * 4;
+}
+
+static inline u8 *get_dynamic_sa_key_field(struct dynamic_sa_ctl *cts)
+{
+ return (u8 *) ((unsigned long)cts + sizeof(struct dynamic_sa_ctl));
+}
+
#endif

View File

@ -1,248 +0,0 @@
From f2a13e7cba9e2b16f4888fbd9cf2bc25b95945be Mon Sep 17 00:00:00 2001
From: Christian Lamparter <chunkeey@googlemail.com>
Date: Fri, 25 Aug 2017 15:47:21 +0200
Subject: [PATCH 08/25] crypto: crypto4xx - enable AES RFC3686, ECB, CFB and
OFB offloads
The crypto engine supports more than just aes-cbc. This patch
enables the remaining AES block cipher modes that pass the
testmanager's test vectors.
Signed-off-by: Christian Lamparter <chunkeey@googlemail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
drivers/crypto/amcc/crypto4xx_alg.c | 66 ++++++++++++++++++++++++
drivers/crypto/amcc/crypto4xx_core.c | 98 ++++++++++++++++++++++++++++++++++++
drivers/crypto/amcc/crypto4xx_core.h | 10 ++++
drivers/crypto/amcc/crypto4xx_sa.h | 3 ++
4 files changed, 177 insertions(+)
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -28,6 +28,7 @@
#include <crypto/algapi.h>
#include <crypto/aes.h>
#include <crypto/sha.h>
+#include <crypto/ctr.h>
#include "crypto4xx_reg_def.h"
#include "crypto4xx_core.h"
#include "crypto4xx_sa.h"
@@ -172,6 +173,71 @@ int crypto4xx_setkey_aes_cbc(struct cryp
CRYPTO_FEEDBACK_MODE_NO_FB);
}
+int crypto4xx_setkey_aes_cfb(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_CFB,
+ CRYPTO_FEEDBACK_MODE_128BIT_CFB);
+}
+
+int crypto4xx_setkey_aes_ecb(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_ECB,
+ CRYPTO_FEEDBACK_MODE_NO_FB);
+}
+
+int crypto4xx_setkey_aes_ofb(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_OFB,
+ CRYPTO_FEEDBACK_MODE_64BIT_OFB);
+}
+
+int crypto4xx_setkey_rfc3686(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ int rc;
+
+ rc = crypto4xx_setkey_aes(cipher, key, keylen - CTR_RFC3686_NONCE_SIZE,
+ CRYPTO_MODE_CTR, CRYPTO_FEEDBACK_MODE_NO_FB);
+ if (rc)
+ return rc;
+
+ memcpy(ctx->state_record,
+ key + keylen - CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
+
+ return 0;
+}
+
+int crypto4xx_rfc3686_encrypt(struct ablkcipher_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ __be32 iv[AES_IV_SIZE / 4] = { *(u32 *)ctx->state_record,
+ *(u32 *) req->info, *(u32 *) (req->info + 4), cpu_to_be32(1) };
+
+ ctx->direction = DIR_OUTBOUND;
+ ctx->pd_ctl = 1;
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->nbytes, iv, AES_IV_SIZE);
+}
+
+int crypto4xx_rfc3686_decrypt(struct ablkcipher_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ __be32 iv[AES_IV_SIZE / 4] = { *(u32 *)ctx->state_record,
+ *(u32 *) req->info, *(u32 *) (req->info + 4), cpu_to_be32(1) };
+
+ ctx->direction = DIR_INBOUND;
+ ctx->pd_ctl = 1;
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->nbytes, iv, AES_IV_SIZE);
+}
+
/**
* HASH SHA1 Functions
*/
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -36,6 +36,7 @@
#include <asm/dcr-regs.h>
#include <asm/cacheflush.h>
#include <crypto/aes.h>
+#include <crypto/ctr.h>
#include <crypto/sha.h>
#include "crypto4xx_reg_def.h"
#include "crypto4xx_core.h"
@@ -1140,6 +1141,103 @@ struct crypto4xx_alg_common crypto4xx_al
}
}
}},
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
+ .cra_name = "cfb(aes)",
+ .cra_driver_name = "cfb-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_IV_SIZE,
+ .setkey = crypto4xx_setkey_aes_cfb,
+ .encrypt = crypto4xx_encrypt,
+ .decrypt = crypto4xx_decrypt,
+ }
+ }
+ } },
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
+ .cra_name = "rfc3686(ctr(aes))",
+ .cra_driver_name = "rfc3686-ctr-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE +
+ CTR_RFC3686_NONCE_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE +
+ CTR_RFC3686_NONCE_SIZE,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .setkey = crypto4xx_setkey_rfc3686,
+ .encrypt = crypto4xx_rfc3686_encrypt,
+ .decrypt = crypto4xx_rfc3686_decrypt,
+ }
+ }
+ } },
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = crypto4xx_setkey_aes_ecb,
+ .encrypt = crypto4xx_encrypt,
+ .decrypt = crypto4xx_decrypt,
+ }
+ }
+ } },
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
+ .cra_name = "ofb(aes)",
+ .cra_driver_name = "ofb-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_IV_SIZE,
+ .setkey = crypto4xx_setkey_aes_cbc,
+ .encrypt = crypto4xx_encrypt,
+ .decrypt = crypto4xx_decrypt,
+ }
+ }
+ } },
};
/**
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -171,8 +171,18 @@ u32 crypto4xx_build_pd(struct crypto_asy
void *iv, u32 iv_len);
int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher,
const u8 *key, unsigned int keylen);
+int crypto4xx_setkey_aes_cfb(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_setkey_aes_ecb(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_setkey_aes_ofb(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_setkey_rfc3686(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen);
int crypto4xx_encrypt(struct ablkcipher_request *req);
int crypto4xx_decrypt(struct ablkcipher_request *req);
+int crypto4xx_rfc3686_encrypt(struct ablkcipher_request *req);
+int crypto4xx_rfc3686_decrypt(struct ablkcipher_request *req);
int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
int crypto4xx_hash_digest(struct ahash_request *req);
int crypto4xx_hash_final(struct ahash_request *req);
--- a/drivers/crypto/amcc/crypto4xx_sa.h
+++ b/drivers/crypto/amcc/crypto4xx_sa.h
@@ -112,6 +112,9 @@ union sa_command_0 {
#define CRYPTO_MODE_ECB 0
#define CRYPTO_MODE_CBC 1
+#define CRYPTO_MODE_OFB 2
+#define CRYPTO_MODE_CFB 3
+#define CRYPTO_MODE_CTR 4
#define CRYPTO_FEEDBACK_MODE_NO_FB 0
#define CRYPTO_FEEDBACK_MODE_64BIT_OFB 0

View File

@ -1,171 +0,0 @@
From 5c727f92ea5e019fd216f73009eee2b6e0867726 Mon Sep 17 00:00:00 2001
From: Christian Lamparter <chunkeey@googlemail.com>
Date: Fri, 25 Aug 2017 15:47:22 +0200
Subject: [PATCH 09/25] crypto: crypto4xx - refactor
crypto4xx_copy_pkt_to_dst()
This patch refactors the crypto4xx_copy_pkt_to_dst() to use
scatterwalk_map_and_copy() to copy the processed data between
the crypto engine's scatter ring buffer and the destination
specified by the ablkcipher_request.
This also makes the crypto4xx_fill_one_page() function redundant.
Signed-off-by: Christian Lamparter <chunkeey@googlemail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
drivers/crypto/amcc/crypto4xx_core.c | 126 +++++++++--------------------------
1 file changed, 30 insertions(+), 96 deletions(-)
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -38,6 +38,7 @@
#include <crypto/aes.h>
#include <crypto/ctr.h>
#include <crypto/sha.h>
+#include <crypto/scatterwalk.h>
#include "crypto4xx_reg_def.h"
#include "crypto4xx_core.h"
#include "crypto4xx_sa.h"
@@ -479,111 +480,44 @@ static inline struct ce_sd *crypto4xx_ge
return (struct ce_sd *)(dev->sdr + sizeof(struct ce_sd) * idx);
}
-static u32 crypto4xx_fill_one_page(struct crypto4xx_device *dev,
- dma_addr_t *addr, u32 *length,
- u32 *idx, u32 *offset, u32 *nbytes)
-{
- u32 len;
-
- if (*length > dev->scatter_buffer_size) {
- memcpy(phys_to_virt(*addr),
- dev->scatter_buffer_va +
- *idx * dev->scatter_buffer_size + *offset,
- dev->scatter_buffer_size);
- *offset = 0;
- *length -= dev->scatter_buffer_size;
- *nbytes -= dev->scatter_buffer_size;
- if (*idx == PPC4XX_LAST_SD)
- *idx = 0;
- else
- (*idx)++;
- *addr = *addr + dev->scatter_buffer_size;
- return 1;
- } else if (*length < dev->scatter_buffer_size) {
- memcpy(phys_to_virt(*addr),
- dev->scatter_buffer_va +
- *idx * dev->scatter_buffer_size + *offset, *length);
- if ((*offset + *length) == dev->scatter_buffer_size) {
- if (*idx == PPC4XX_LAST_SD)
- *idx = 0;
- else
- (*idx)++;
- *nbytes -= *length;
- *offset = 0;
- } else {
- *nbytes -= *length;
- *offset += *length;
- }
-
- return 0;
- } else {
- len = (*nbytes <= dev->scatter_buffer_size) ?
- (*nbytes) : dev->scatter_buffer_size;
- memcpy(phys_to_virt(*addr),
- dev->scatter_buffer_va +
- *idx * dev->scatter_buffer_size + *offset,
- len);
- *offset = 0;
- *nbytes -= len;
-
- if (*idx == PPC4XX_LAST_SD)
- *idx = 0;
- else
- (*idx)++;
-
- return 0;
- }
-}
-
static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev,
struct ce_pd *pd,
struct pd_uinfo *pd_uinfo,
u32 nbytes,
struct scatterlist *dst)
{
- dma_addr_t addr;
- u32 this_sd;
- u32 offset;
- u32 len;
- u32 i;
- u32 sg_len;
- struct scatterlist *sg;
-
- this_sd = pd_uinfo->first_sd;
- offset = 0;
- i = 0;
+ unsigned int first_sd = pd_uinfo->first_sd;
+ unsigned int last_sd;
+ unsigned int overflow = 0;
+ unsigned int to_copy;
+ unsigned int dst_start = 0;
+
+ /*
+ * Because the scatter buffers are all neatly organized in one
+ * big continuous ringbuffer; scatterwalk_map_and_copy() can
+ * be instructed to copy a range of buffers in one go.
+ */
+
+ last_sd = (first_sd + pd_uinfo->num_sd);
+ if (last_sd > PPC4XX_LAST_SD) {
+ last_sd = PPC4XX_LAST_SD;
+ overflow = last_sd % PPC4XX_NUM_SD;
+ }
while (nbytes) {
- sg = &dst[i];
- sg_len = sg->length;
- addr = dma_map_page(dev->core_dev->device, sg_page(sg),
- sg->offset, sg->length, DMA_TO_DEVICE);
-
- if (offset == 0) {
- len = (nbytes <= sg->length) ? nbytes : sg->length;
- while (crypto4xx_fill_one_page(dev, &addr, &len,
- &this_sd, &offset, &nbytes))
- ;
- if (!nbytes)
- return;
- i++;
- } else {
- len = (nbytes <= (dev->scatter_buffer_size - offset)) ?
- nbytes : (dev->scatter_buffer_size - offset);
- len = (sg->length < len) ? sg->length : len;
- while (crypto4xx_fill_one_page(dev, &addr, &len,
- &this_sd, &offset, &nbytes))
- ;
- if (!nbytes)
- return;
- sg_len -= len;
- if (sg_len) {
- addr += len;
- while (crypto4xx_fill_one_page(dev, &addr,
- &sg_len, &this_sd, &offset, &nbytes))
- ;
- }
- i++;
+ void *buf = dev->scatter_buffer_va +
+ first_sd * PPC4XX_SD_BUFFER_SIZE;
+
+ to_copy = min(nbytes, PPC4XX_SD_BUFFER_SIZE *
+ (1 + last_sd - first_sd));
+ scatterwalk_map_and_copy(buf, dst, dst_start, to_copy, 1);
+ nbytes -= to_copy;
+
+ if (overflow) {
+ first_sd = 0;
+ last_sd = overflow;
+ dst_start += to_copy;
+ overflow = 0;
}
}
}

View File

@ -1,59 +0,0 @@
From 40e3b847bff70edc28c5290d209e531da6f9e534 Mon Sep 17 00:00:00 2001
From: Christian Lamparter <chunkeey@googlemail.com>
Date: Fri, 25 Aug 2017 15:47:23 +0200
Subject: [PATCH 10/25] crypto: crypto4xx - replace crypto4xx_dev's
scatter_buffer_size with constant
scatter_buffer_size is always set to PPC4XX_SD_BUFFER_SIZE.
I don't think there's any point in keeping the variable
around.
Signed-off-by: Christian Lamparter <chunkeey@googlemail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
drivers/crypto/amcc/crypto4xx_core.c | 7 +++----
drivers/crypto/amcc/crypto4xx_core.h | 1 -
2 files changed, 3 insertions(+), 5 deletions(-)
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -396,10 +396,9 @@ static u32 crypto4xx_build_sdr(struct cr
if (!dev->sdr)
return -ENOMEM;
- dev->scatter_buffer_size = PPC4XX_SD_BUFFER_SIZE;
dev->scatter_buffer_va =
dma_alloc_coherent(dev->core_dev->device,
- dev->scatter_buffer_size * PPC4XX_NUM_SD,
+ PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD,
&dev->scatter_buffer_pa, GFP_ATOMIC);
if (!dev->scatter_buffer_va)
return -ENOMEM;
@@ -408,7 +407,7 @@ static u32 crypto4xx_build_sdr(struct cr
for (i = 0; i < PPC4XX_NUM_SD; i++) {
sd_array[i].ptr = dev->scatter_buffer_pa +
- dev->scatter_buffer_size * i;
+ PPC4XX_SD_BUFFER_SIZE * i;
}
return 0;
@@ -423,7 +422,7 @@ static void crypto4xx_destroy_sdr(struct
if (dev->scatter_buffer_va)
dma_free_coherent(dev->core_dev->device,
- dev->scatter_buffer_size * PPC4XX_NUM_SD,
+ PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD,
dev->scatter_buffer_va,
dev->scatter_buffer_pa);
}
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -87,7 +87,6 @@ struct crypto4xx_device {
program ce sdr_base_register */
void *scatter_buffer_va;
dma_addr_t scatter_buffer_pa;
- u32 scatter_buffer_size;
void *shadow_sa_pool; /* pool of memory for sa in pd_uinfo */
dma_addr_t shadow_sa_pool_pa;

View File

@ -1,373 +0,0 @@
From 9e0a0b3a192af20193f074ed2ad9dd85a2e48d00 Mon Sep 17 00:00:00 2001
From: Christian Lamparter <chunkeey@googlemail.com>
Date: Fri, 25 Aug 2017 15:47:25 +0200
Subject: [PATCH 12/25] crypto: crypto4xx - pointer arithmetic overhaul
This patch improves the readability of various functions,
by replacing various void* pointers declarations with
their respective structs *. This makes it possible to go
for the eye-friendly array-indexing methods.
Signed-off-by: Christian Lamparter <chunkeey@googlemail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
drivers/crypto/amcc/crypto4xx_alg.c | 26 ++++++++--------
drivers/crypto/amcc/crypto4xx_core.c | 60 +++++++++++++++---------------------
drivers/crypto/amcc/crypto4xx_core.h | 41 +++++++++++++-----------
3 files changed, 59 insertions(+), 68 deletions(-)
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -134,7 +134,7 @@ static int crypto4xx_setkey_aes(struct c
}
}
/* Setup SA */
- sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+ sa = ctx->sa_in;
set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, (cm == CRYPTO_MODE_CBC ?
SA_SAVE_IV : SA_NOT_SAVE_IV),
@@ -160,7 +160,7 @@ static int crypto4xx_setkey_aes(struct c
ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(sa);
memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
- sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+ sa = ctx->sa_out;
sa->sa_command_0.bf.dir = DIR_OUTBOUND;
return 0;
@@ -249,8 +249,7 @@ static int crypto4xx_hash_alg_init(struc
struct crypto_alg *alg = tfm->__crt_alg;
struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
- struct dynamic_sa_ctl *sa;
- struct dynamic_sa_hash160 *sa_in;
+ struct dynamic_sa_hash160 *sa;
int rc;
ctx->dev = my_alg->dev;
@@ -274,25 +273,24 @@ static int crypto4xx_hash_alg_init(struc
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct crypto4xx_ctx));
- sa = (struct dynamic_sa_ctl *) ctx->sa_in;
- set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
+ sa = (struct dynamic_sa_hash160 *)ctx->sa_in;
+ set_dynamic_sa_command_0(&sa->ctrl, SA_SAVE_HASH, SA_NOT_SAVE_IV,
SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA,
SA_NO_HEADER_PROC, ha, SA_CIPHER_ALG_NULL,
SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC,
SA_OPCODE_HASH, DIR_INBOUND);
- set_dynamic_sa_command_1(sa, 0, SA_HASH_MODE_HASH,
+ set_dynamic_sa_command_1(&sa->ctrl, 0, SA_HASH_MODE_HASH,
CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
SA_SEQ_MASK_OFF, SA_MC_ENABLE,
SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD,
SA_NOT_COPY_HDR);
ctx->direction = DIR_INBOUND;
- sa->sa_contents.w = SA_HASH160_CONTENTS;
- sa_in = (struct dynamic_sa_hash160 *) ctx->sa_in;
/* Need to zero hash digest in SA */
- memset(sa_in->inner_digest, 0, sizeof(sa_in->inner_digest));
- memset(sa_in->outer_digest, 0, sizeof(sa_in->outer_digest));
- sa_in->state_ptr = ctx->state_record_dma_addr;
- ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(sa);
+ memset(sa->inner_digest, 0, sizeof(sa->inner_digest));
+ memset(sa->outer_digest, 0, sizeof(sa->outer_digest));
+ sa->state_ptr = ctx->state_record_dma_addr;
+ ctx->offset_to_sr_ptr =
+ get_dynamic_sa_offset_state_ptr_field(&sa->ctrl);
return 0;
}
@@ -303,7 +301,7 @@ int crypto4xx_hash_init(struct ahash_req
int ds;
struct dynamic_sa_ctl *sa;
- sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+ sa = ctx->sa_in;
ds = crypto_ahash_digestsize(
__crypto_ahash_cast(req->base.tfm));
sa->sa_command_0.bf.digest_len = ds >> 2;
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -211,7 +211,7 @@ static u32 crypto4xx_build_pdr(struct cr
}
memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD);
dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
- 256 * PPC4XX_NUM_PD,
+ sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
&dev->shadow_sa_pool_pa,
GFP_ATOMIC);
if (!dev->shadow_sa_pool)
@@ -223,16 +223,14 @@ static u32 crypto4xx_build_pdr(struct cr
if (!dev->shadow_sr_pool)
return -ENOMEM;
for (i = 0; i < PPC4XX_NUM_PD; i++) {
- pd_uinfo = (struct pd_uinfo *) (dev->pdr_uinfo +
- sizeof(struct pd_uinfo) * i);
+ pd_uinfo = &dev->pdr_uinfo[i];
/* alloc 256 bytes which is enough for any kind of dynamic sa */
- pd_uinfo->sa_va = dev->shadow_sa_pool + 256 * i;
+ pd_uinfo->sa_va = &dev->shadow_sa_pool[i].sa;
pd_uinfo->sa_pa = dev->shadow_sa_pool_pa + 256 * i;
/* alloc state record */
- pd_uinfo->sr_va = dev->shadow_sr_pool +
- sizeof(struct sa_state_record) * i;
+ pd_uinfo->sr_va = &dev->shadow_sr_pool[i];
pd_uinfo->sr_pa = dev->shadow_sr_pool_pa +
sizeof(struct sa_state_record) * i;
}
@@ -248,8 +246,9 @@ static void crypto4xx_destroy_pdr(struct
dev->pdr, dev->pdr_pa);
if (dev->shadow_sa_pool)
- dma_free_coherent(dev->core_dev->device, 256 * PPC4XX_NUM_PD,
- dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
+ dma_free_coherent(dev->core_dev->device,
+ sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
+ dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
if (dev->shadow_sr_pool)
dma_free_coherent(dev->core_dev->device,
@@ -277,11 +276,9 @@ static u32 crypto4xx_get_pd_from_pdr_nol
static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
{
- struct pd_uinfo *pd_uinfo;
+ struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx];
unsigned long flags;
- pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo +
- sizeof(struct pd_uinfo) * idx);
spin_lock_irqsave(&dev->core_dev->lock, flags);
if (dev->pdr_tail != PPC4XX_LAST_PD)
dev->pdr_tail++;
@@ -298,7 +295,7 @@ static struct ce_pd *crypto4xx_get_pdp(s
{
*pd_dma = dev->pdr_pa + sizeof(struct ce_pd) * idx;
- return dev->pdr + sizeof(struct ce_pd) * idx;
+ return &dev->pdr[idx];
}
/**
@@ -376,7 +373,7 @@ static inline struct ce_gd *crypto4xx_ge
{
*gd_dma = dev->gdr_pa + sizeof(struct ce_gd) * idx;
- return (struct ce_gd *) (dev->gdr + sizeof(struct ce_gd) * idx);
+ return &dev->gdr[idx];
}
/**
@@ -387,7 +384,6 @@ static inline struct ce_gd *crypto4xx_ge
static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
{
int i;
- struct ce_sd *sd_array;
/* alloc memory for scatter descriptor ring */
dev->sdr = dma_alloc_coherent(dev->core_dev->device,
@@ -403,10 +399,8 @@ static u32 crypto4xx_build_sdr(struct cr
if (!dev->scatter_buffer_va)
return -ENOMEM;
- sd_array = dev->sdr;
-
for (i = 0; i < PPC4XX_NUM_SD; i++) {
- sd_array[i].ptr = dev->scatter_buffer_pa +
+ dev->sdr[i].ptr = dev->scatter_buffer_pa +
PPC4XX_SD_BUFFER_SIZE * i;
}
@@ -476,7 +470,7 @@ static inline struct ce_sd *crypto4xx_ge
{
*sd_dma = dev->sdr_pa + sizeof(struct ce_sd) * idx;
- return (struct ce_sd *)(dev->sdr + sizeof(struct ce_sd) * idx);
+ return &dev->sdr[idx];
}
static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev,
@@ -525,11 +519,10 @@ static u32 crypto4xx_copy_digest_to_dst(
struct crypto4xx_ctx *ctx)
{
struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in;
- struct sa_state_record *state_record =
- (struct sa_state_record *) pd_uinfo->sr_va;
if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) {
- memcpy((void *) pd_uinfo->dest_va, state_record->save_digest,
+ memcpy((void *) pd_uinfo->dest_va,
+ pd_uinfo->sr_va->save_digest,
SA_HASH_ALG_SHA1_DIGEST_SIZE);
}
@@ -612,11 +605,9 @@ static u32 crypto4xx_ahash_done(struct c
static u32 crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
{
- struct ce_pd *pd;
- struct pd_uinfo *pd_uinfo;
+ struct ce_pd *pd = &dev->pdr[idx];
+ struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx];
- pd = dev->pdr + sizeof(struct ce_pd)*idx;
- pd_uinfo = dev->pdr_uinfo + sizeof(struct pd_uinfo)*idx;
if (crypto_tfm_alg_type(pd_uinfo->async_req->tfm) ==
CRYPTO_ALG_TYPE_ABLKCIPHER)
return crypto4xx_ablkcipher_done(dev, pd_uinfo, pd);
@@ -717,7 +708,6 @@ u32 crypto4xx_build_pd(struct crypto_asy
unsigned long flags;
struct pd_uinfo *pd_uinfo = NULL;
unsigned int nbytes = datalen, idx;
- unsigned int ivlen = 0;
u32 gd_idx = 0;
/* figure how many gd is needed */
@@ -776,17 +766,15 @@ u32 crypto4xx_build_pd(struct crypto_asy
}
spin_unlock_irqrestore(&dev->core_dev->lock, flags);
- pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo +
- sizeof(struct pd_uinfo) * pd_entry);
+ pd_uinfo = &dev->pdr_uinfo[pd_entry];
pd = crypto4xx_get_pdp(dev, &pd_dma, pd_entry);
pd_uinfo->async_req = req;
pd_uinfo->num_gd = num_gd;
pd_uinfo->num_sd = num_sd;
if (iv_len || ctx->is_hash) {
- ivlen = iv_len;
pd->sa = pd_uinfo->sa_pa;
- sa = (struct dynamic_sa_ctl *) pd_uinfo->sa_va;
+ sa = pd_uinfo->sa_va;
if (ctx->direction == DIR_INBOUND)
memcpy(sa, ctx->sa_in, ctx->sa_len * 4);
else
@@ -796,14 +784,15 @@ u32 crypto4xx_build_pd(struct crypto_asy
&pd_uinfo->sr_pa, 4);
if (iv_len)
- crypto4xx_memcpy_le(pd_uinfo->sr_va, iv, iv_len);
+ crypto4xx_memcpy_le(pd_uinfo->sr_va->save_iv,
+ iv, iv_len);
} else {
if (ctx->direction == DIR_INBOUND) {
pd->sa = ctx->sa_in_dma_addr;
- sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+ sa = ctx->sa_in;
} else {
pd->sa = ctx->sa_out_dma_addr;
- sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+ sa = ctx->sa_out;
}
}
pd->sa_len = ctx->sa_len;
@@ -1011,9 +1000,8 @@ static void crypto4xx_bh_tasklet_cb(unsi
while (core_dev->dev->pdr_head != core_dev->dev->pdr_tail) {
tail = core_dev->dev->pdr_tail;
- pd_uinfo = core_dev->dev->pdr_uinfo +
- sizeof(struct pd_uinfo)*tail;
- pd = core_dev->dev->pdr + sizeof(struct ce_pd) * tail;
+ pd_uinfo = &core_dev->dev->pdr_uinfo[tail];
+ pd = &core_dev->dev->pdr[tail];
if ((pd_uinfo->state == PD_ENTRY_INUSE) &&
pd->pd_ctl.bf.pe_done &&
!pd->pd_ctl.bf.host_ready) {
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -23,6 +23,8 @@
#define __CRYPTO4XX_CORE_H__
#include <crypto/internal/hash.h>
+#include "crypto4xx_reg_def.h"
+#include "crypto4xx_sa.h"
#define MODULE_NAME "crypto4xx"
@@ -48,6 +50,13 @@
struct crypto4xx_device;
+union shadow_sa_buf {
+ struct dynamic_sa_ctl sa;
+
+ /* alloc 256 bytes which is enough for any kind of dynamic sa */
+ u8 buf[256];
+} __packed;
+
struct pd_uinfo {
struct crypto4xx_device *dev;
u32 state;
@@ -60,9 +69,9 @@ struct pd_uinfo {
used by this packet */
u32 num_sd; /* number of scatter discriptors
used by this packet */
- void *sa_va; /* shadow sa, when using cp from ctx->sa */
+ struct dynamic_sa_ctl *sa_va; /* shadow sa */
u32 sa_pa;
- void *sr_va; /* state record for shadow sa */
+ struct sa_state_record *sr_va; /* state record for shadow sa */
u32 sr_pa;
struct scatterlist *dest_va;
struct crypto_async_request *async_req; /* base crypto request
@@ -75,22 +84,18 @@ struct crypto4xx_device {
void __iomem *ce_base;
void __iomem *trng_base;
- void *pdr; /* base address of packet
- descriptor ring */
- dma_addr_t pdr_pa; /* physical address used to
- program ce pdr_base_register */
- void *gdr; /* gather descriptor ring */
- dma_addr_t gdr_pa; /* physical address used to
- program ce gdr_base_register */
- void *sdr; /* scatter descriptor ring */
- dma_addr_t sdr_pa; /* physical address used to
- program ce sdr_base_register */
+ struct ce_pd *pdr; /* base address of packet descriptor ring */
+ dma_addr_t pdr_pa; /* physical address of pdr_base_register */
+ struct ce_gd *gdr; /* gather descriptor ring */
+ dma_addr_t gdr_pa; /* physical address of gdr_base_register */
+ struct ce_sd *sdr; /* scatter descriptor ring */
+ dma_addr_t sdr_pa; /* physical address of sdr_base_register */
void *scatter_buffer_va;
dma_addr_t scatter_buffer_pa;
- void *shadow_sa_pool; /* pool of memory for sa in pd_uinfo */
+ union shadow_sa_buf *shadow_sa_pool;
dma_addr_t shadow_sa_pool_pa;
- void *shadow_sr_pool; /* pool of memory for sr in pd_uinfo */
+ struct sa_state_record *shadow_sr_pool;
dma_addr_t shadow_sr_pool_pa;
u32 pdr_tail;
u32 pdr_head;
@@ -98,7 +103,7 @@ struct crypto4xx_device {
u32 gdr_head;
u32 sdr_tail;
u32 sdr_head;
- void *pdr_uinfo;
+ struct pd_uinfo *pdr_uinfo;
struct list_head alg_list; /* List of algorithm supported
by this device */
};
@@ -116,11 +121,11 @@ struct crypto4xx_core_device {
struct crypto4xx_ctx {
struct crypto4xx_device *dev;
- void *sa_in;
+ struct dynamic_sa_ctl *sa_in;
dma_addr_t sa_in_dma_addr;
- void *sa_out;
+ struct dynamic_sa_ctl *sa_out;
dma_addr_t sa_out_dma_addr;
- void *state_record;
+ struct sa_state_record *state_record;
dma_addr_t state_record_dma_addr;
u32 sa_len;
u32 offset_to_sr_ptr; /* offset to state ptr, in dynamic sa */

View File

@ -1,25 +0,0 @@
From 5a4326d3a03f03c2518a2c255be33a7114af3230 Mon Sep 17 00:00:00 2001
From: Christian Lamparter <chunkeey@gmail.com>
Date: Wed, 4 Oct 2017 01:00:05 +0200
Subject: [PATCH 13/25] crypto: crypto4xx - wire up hmac_mc to hmac_muting
The hmac_mc parameter of set_dynamic_sa_command_1()
was defined but not used. On closer inspection it
turns out, it was never wired up.
Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
drivers/crypto/amcc/crypto4xx_alg.c | 1 +
1 file changed, 1 insertion(+)
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -63,6 +63,7 @@ static void set_dynamic_sa_command_1(str
sa->sa_command_1.bf.crypto_mode9_8 = cm & 3;
sa->sa_command_1.bf.feedback_mode = cfb,
sa->sa_command_1.bf.sa_rev = 1;
+ sa->sa_command_1.bf.hmac_muting = hmac_mc;
sa->sa_command_1.bf.extended_seq_num = esn;
sa->sa_command_1.bf.seq_num_mask = sn_mask;
sa->sa_command_1.bf.mutable_bit_proc = mute;

View File

@ -1,49 +0,0 @@
From e9b8e4e1129d0886094cfe013cdbaafc4ce0de76 Mon Sep 17 00:00:00 2001
From: Christian Lamparter <chunkeey@gmail.com>
Date: Wed, 4 Oct 2017 01:00:06 +0200
Subject: [PATCH 14/25] crypto: crypto4xx - fix off-by-one AES-OFB
I used aes-cbc as a template for ofb. But sadly I forgot
to update set_key method to crypto4xx_setkey_aes_ofb().
this was caught by the testmgr:
alg: skcipher: Test 1 failed (invalid result) on encr. for ofb-aes-ppc4xx
00000000: 76 49 ab ac 81 19 b2 46 ce e9 8e 9b 12 e9 19 7d
00000010: 50 86 cb 9b 50 72 19 ee 95 db 11 3a 91 76 78 b2
00000020: 73 be d6 b8 e3 c1 74 3b 71 16 e6 9e 22 22 95 16
00000030: 3f f1 ca a1 68 1f ac 09 12 0e ca 30 75 86 e1 a7
With the correct set_key method, the aes-ofb cipher passes the test.
name : ofb(aes)
driver : ofb-aes-ppc4xx
module : crypto4xx
priority : 300
refcnt : 1
selftest : passed
internal : no
type : ablkcipher
async : yes
blocksize : 16
min keysize : 16
max keysize : 32
ivsize : 16
geniv : <default>
Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
drivers/crypto/amcc/crypto4xx_core.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -1153,7 +1153,7 @@ struct crypto4xx_alg_common crypto4xx_al
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_IV_SIZE,
- .setkey = crypto4xx_setkey_aes_cbc,
+ .setkey = crypto4xx_setkey_aes_ofb,
.encrypt = crypto4xx_encrypt,
.decrypt = crypto4xx_decrypt,
}

View File

@ -1,29 +0,0 @@
From 333eb3edda3842f3e5dbd723cb18bbe47eb0508b Mon Sep 17 00:00:00 2001
From: Christian Lamparter <chunkeey@gmail.com>
Date: Wed, 4 Oct 2017 01:00:07 +0200
Subject: [PATCH 15/25] crypto: crypto4xx - fix type mismatch compiler error
This patch fixes a type mismatch error that I accidentally
introduced when I moved and refactored the dynamic_contents
helpers.
Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
drivers/crypto/amcc/crypto4xx_sa.h | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
--- a/drivers/crypto/amcc/crypto4xx_sa.h
+++ b/drivers/crypto/amcc/crypto4xx_sa.h
@@ -266,9 +266,9 @@ get_dynamic_sa_offset_state_ptr_field(st
return sizeof(struct dynamic_sa_ctl) + offset * 4;
}
-static inline u8 *get_dynamic_sa_key_field(struct dynamic_sa_ctl *cts)
+static inline u32 *get_dynamic_sa_key_field(struct dynamic_sa_ctl *cts)
{
- return (u8 *) ((unsigned long)cts + sizeof(struct dynamic_sa_ctl));
+ return (u32 *) ((unsigned long)cts + sizeof(struct dynamic_sa_ctl));
}
#endif

View File

@ -1,161 +0,0 @@
From 8ef8d195430ca3542d0434cf25e5115484b9fa32 Mon Sep 17 00:00:00 2001
From: Christian Lamparter <chunkeey@gmail.com>
Date: Wed, 4 Oct 2017 01:00:09 +0200
Subject: [PATCH 17/25] crypto: crypto4xx - add backlog queue support
Previously, If the crypto4xx driver used all available
security contexts, it would simply refuse new requests
with -EAGAIN. CRYPTO_TFM_REQ_MAY_BACKLOG was ignored.
in case of dm-crypt.c's crypt_convert() function this was
causing the following errors to manifest, if the system was
pushed hard enough:
| EXT4-fs warning (dm-1): ext4_end_bio:314: I/O error -5 writing to ino ..
| EXT4-fs warning (dm-1): ext4_end_bio:314: I/O error -5 writing to ino ..
| EXT4-fs warning (dm-1): ext4_end_bio:314: I/O error -5 writing to ino ..
| JBD2: Detected IO errors while flushing file data on dm-1-8
| Aborting journal on device dm-1-8.
| EXT4-fs error : ext4_journal_check_start:56: Detected aborted journal
| EXT4-fs (dm-1): Remounting filesystem read-only
| EXT4-fs : ext4_writepages: jbd2_start: 2048 pages, inode 498...; err -30
(This did cause corruptions due to failed writes)
To fix this mess, the crypto4xx driver needs to notifiy the
user to slow down. This can be achieved by returning -EBUSY
on requests, once the crypto hardware was falling behind.
Note: -EBUSY has two different meanings. Setting the flag
CRYPTO_TFM_REQ_MAY_BACKLOG implies that the request was
successfully queued, by the crypto driver. To achieve this
requirement, the implementation introduces a threshold check and
adds logic to the completion routines in much the same way as
AMD's Cryptographic Coprocessor (CCP) driver do.
Note2: Tests showed that dm-crypt starved ipsec traffic.
Under load, ipsec links dropped to 0 Kbits/s. This is because
dm-crypt's callback would instantly queue the next request.
In order to not starve ipsec, the driver reserves a small
portion of the available crypto contexts for this purpose.
Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
drivers/crypto/amcc/crypto4xx_core.c | 47 ++++++++++++++++++++++++++++++------
drivers/crypto/amcc/crypto4xx_core.h | 3 ++-
2 files changed, 41 insertions(+), 9 deletions(-)
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -39,6 +39,7 @@
#include <crypto/ctr.h>
#include <crypto/sha.h>
#include <crypto/scatterwalk.h>
+#include <crypto/internal/skcipher.h>
#include "crypto4xx_reg_def.h"
#include "crypto4xx_core.h"
#include "crypto4xx_sa.h"
@@ -578,8 +579,10 @@ static u32 crypto4xx_ablkcipher_done(str
}
crypto4xx_ret_sg_desc(dev, pd_uinfo);
- if (ablk_req->base.complete != NULL)
- ablk_req->base.complete(&ablk_req->base, 0);
+
+ if (pd_uinfo->state & PD_ENTRY_BUSY)
+ ablkcipher_request_complete(ablk_req, -EINPROGRESS);
+ ablkcipher_request_complete(ablk_req, 0);
return 0;
}
@@ -596,9 +599,10 @@ static u32 crypto4xx_ahash_done(struct c
crypto4xx_copy_digest_to_dst(pd_uinfo,
crypto_tfm_ctx(ahash_req->base.tfm));
crypto4xx_ret_sg_desc(dev, pd_uinfo);
- /* call user provided callback function x */
- if (ahash_req->base.complete != NULL)
- ahash_req->base.complete(&ahash_req->base, 0);
+
+ if (pd_uinfo->state & PD_ENTRY_BUSY)
+ ahash_request_complete(ahash_req, -EINPROGRESS);
+ ahash_request_complete(ahash_req, 0);
return 0;
}
@@ -709,6 +713,7 @@ u32 crypto4xx_build_pd(struct crypto_asy
struct pd_uinfo *pd_uinfo = NULL;
unsigned int nbytes = datalen, idx;
u32 gd_idx = 0;
+ bool is_busy;
/* figure how many gd is needed */
num_gd = sg_nents_for_len(src, datalen);
@@ -739,6 +744,31 @@ u32 crypto4xx_build_pd(struct crypto_asy
* already got must be return the original place.
*/
spin_lock_irqsave(&dev->core_dev->lock, flags);
+ /*
+ * Let the caller know to slow down, once more than 13/16ths = 81%
+ * of the available data contexts are being used simultaneously.
+ *
+ * With PPC4XX_NUM_PD = 256, this will leave a "backlog queue" for
+ * 31 more contexts. Before new requests have to be rejected.
+ */
+ if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) {
+ is_busy = ((dev->pdr_head - dev->pdr_tail) % PPC4XX_NUM_PD) >=
+ ((PPC4XX_NUM_PD * 13) / 16);
+ } else {
+ /*
+ * To fix contention issues between ipsec (no blacklog) and
+ * dm-crypto (backlog) reserve 32 entries for "no backlog"
+ * data contexts.
+ */
+ is_busy = ((dev->pdr_head - dev->pdr_tail) % PPC4XX_NUM_PD) >=
+ ((PPC4XX_NUM_PD * 15) / 16);
+
+ if (is_busy) {
+ spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+ return -EBUSY;
+ }
+ }
+
if (num_gd) {
fst_gd = crypto4xx_get_n_gd(dev, num_gd);
if (fst_gd == ERING_WAS_FULL) {
@@ -893,11 +923,12 @@ u32 crypto4xx_build_pd(struct crypto_asy
sa->sa_command_1.bf.hash_crypto_offset = 0;
pd->pd_ctl.w = ctx->pd_ctl;
pd->pd_ctl_len.w = 0x00400000 | datalen;
- pd_uinfo->state = PD_ENTRY_INUSE;
+ pd_uinfo->state = PD_ENTRY_INUSE | (is_busy ? PD_ENTRY_BUSY : 0);
+
wmb();
/* write any value to push engine to read a pd */
writel(1, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
- return -EINPROGRESS;
+ return is_busy ? -EBUSY : -EINPROGRESS;
}
/**
@@ -1002,7 +1033,7 @@ static void crypto4xx_bh_tasklet_cb(unsi
tail = core_dev->dev->pdr_tail;
pd_uinfo = &core_dev->dev->pdr_uinfo[tail];
pd = &core_dev->dev->pdr[tail];
- if ((pd_uinfo->state == PD_ENTRY_INUSE) &&
+ if ((pd_uinfo->state & PD_ENTRY_INUSE) &&
pd->pd_ctl.bf.pe_done &&
!pd->pd_ctl.bf.host_ready) {
pd->pd_ctl.bf.pe_done = 0;
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -44,7 +44,8 @@
#define PPC4XX_LAST_SD (PPC4XX_NUM_SD - 1)
#define PPC4XX_SD_BUFFER_SIZE 2048
-#define PD_ENTRY_INUSE 1
+#define PD_ENTRY_BUSY BIT(1)
+#define PD_ENTRY_INUSE BIT(0)
#define PD_ENTRY_FREE 0
#define ERING_WAS_FULL 0xffffffff

View File

@ -1,236 +0,0 @@
From 4865b122d4aff5151c88d2f7442d5a87f7e795ae Mon Sep 17 00:00:00 2001
From: Christian Lamparter <chunkeey@gmail.com>
Date: Wed, 4 Oct 2017 01:00:10 +0200
Subject: [PATCH 18/25] crypto: crypto4xx - use the correct LE32 format for IV
and key defs
The hardware expects that the keys, IVs (and inner/outer hashes)
are in the le32 format.
This patch changes all hardware interface declarations to use
the correct LE32 data format for each field.
In order to pass __CHECK_ENDIAN__ checks, crypto4xx_memcpy_le
has to be honest about the endianness of its parameters.
The function was split and moved to the common crypto4xx_core.h
header. This allows the compiler to generate better code if the
sizes/len is a constant (various *_IV_LEN).
Please note that the hardware isn't consistent with the endiannes
of the save_digest field in the state record struct though.
The hashes produced by GHASH and CBC (for CCM) will be in LE32.
Whereas md5 and sha{1/,256,...} do not need any conversion.
Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
drivers/crypto/amcc/crypto4xx_alg.c | 4 +--
drivers/crypto/amcc/crypto4xx_core.c | 40 ++----------------------------
drivers/crypto/amcc/crypto4xx_core.h | 47 +++++++++++++++++++++++++++++++++---
drivers/crypto/amcc/crypto4xx_sa.h | 29 ++++++++++++----------
4 files changed, 64 insertions(+), 56 deletions(-)
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -150,8 +150,8 @@ static int crypto4xx_setkey_aes(struct c
SA_SEQ_MASK_OFF, SA_MC_ENABLE,
SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD,
SA_NOT_COPY_HDR);
- crypto4xx_memcpy_le(get_dynamic_sa_key_field(sa),
- key, keylen);
+ crypto4xx_memcpy_to_le32(get_dynamic_sa_key_field(sa),
+ key, keylen);
sa->sa_contents.w = SA_AES_CONTENTS | (keylen << 2);
sa->sa_command_1.bf.key_len = keylen >> 3;
ctx->is_hash = 0;
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -619,42 +619,6 @@ static u32 crypto4xx_pd_done(struct cryp
return crypto4xx_ahash_done(dev, pd_uinfo);
}
-/**
- * Note: Only use this function to copy items that is word aligned.
- */
-void crypto4xx_memcpy_le(unsigned int *dst,
- const unsigned char *buf,
- int len)
-{
- u8 *tmp;
- for (; len >= 4; buf += 4, len -= 4)
- *dst++ = cpu_to_le32(*(unsigned int *) buf);
-
- tmp = (u8 *)dst;
- switch (len) {
- case 3:
- *tmp++ = 0;
- *tmp++ = *(buf+2);
- *tmp++ = *(buf+1);
- *tmp++ = *buf;
- break;
- case 2:
- *tmp++ = 0;
- *tmp++ = 0;
- *tmp++ = *(buf+1);
- *tmp++ = *buf;
- break;
- case 1:
- *tmp++ = 0;
- *tmp++ = 0;
- *tmp++ = 0;
- *tmp++ = *buf;
- break;
- default:
- break;
- }
-}
-
static void crypto4xx_stop_all(struct crypto4xx_core_device *core_dev)
{
crypto4xx_destroy_pdr(core_dev->dev);
@@ -814,8 +778,8 @@ u32 crypto4xx_build_pd(struct crypto_asy
&pd_uinfo->sr_pa, 4);
if (iv_len)
- crypto4xx_memcpy_le(pd_uinfo->sr_va->save_iv,
- iv, iv_len);
+ crypto4xx_memcpy_to_le32(pd_uinfo->sr_va->save_iv,
+ iv, iv_len);
} else {
if (ctx->direction == DIR_INBOUND) {
pd->sa = ctx->sa_in_dma_addr;
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -166,9 +166,7 @@ int crypto4xx_alloc_sa(struct crypto4xx_
void crypto4xx_free_sa(struct crypto4xx_ctx *ctx);
void crypto4xx_free_ctx(struct crypto4xx_ctx *ctx);
u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx);
-void crypto4xx_memcpy_le(unsigned int *dst,
- const unsigned char *buf, int len);
-u32 crypto4xx_build_pd(struct crypto_async_request *req,
+int crypto4xx_build_pd(struct crypto_async_request *req,
struct crypto4xx_ctx *ctx,
struct scatterlist *src,
struct scatterlist *dst,
@@ -193,4 +191,47 @@ int crypto4xx_hash_digest(struct ahash_r
int crypto4xx_hash_final(struct ahash_request *req);
int crypto4xx_hash_update(struct ahash_request *req);
int crypto4xx_hash_init(struct ahash_request *req);
+
+/**
+ * Note: Only use this function to copy items that is word aligned.
+ */
+static inline void crypto4xx_memcpy_swab32(u32 *dst, const void *buf,
+ size_t len)
+{
+ for (; len >= 4; buf += 4, len -= 4)
+ *dst++ = __swab32p((u32 *) buf);
+
+ if (len) {
+ const u8 *tmp = (u8 *)buf;
+
+ switch (len) {
+ case 3:
+ *dst = (tmp[2] << 16) |
+ (tmp[1] << 8) |
+ tmp[0];
+ break;
+ case 2:
+ *dst = (tmp[1] << 8) |
+ tmp[0];
+ break;
+ case 1:
+ *dst = tmp[0];
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static inline void crypto4xx_memcpy_from_le32(u32 *dst, const void *buf,
+ size_t len)
+{
+ crypto4xx_memcpy_swab32(dst, buf, len);
+}
+
+static inline void crypto4xx_memcpy_to_le32(__le32 *dst, const void *buf,
+ size_t len)
+{
+ crypto4xx_memcpy_swab32((u32 *)dst, buf, len);
+}
#endif
--- a/drivers/crypto/amcc/crypto4xx_sa.h
+++ b/drivers/crypto/amcc/crypto4xx_sa.h
@@ -181,9 +181,12 @@ struct dynamic_sa_ctl {
* State Record for Security Association (SA)
*/
struct sa_state_record {
- u32 save_iv[4];
- u32 save_hash_byte_cnt[2];
- u32 save_digest[16];
+ __le32 save_iv[4];
+ __le32 save_hash_byte_cnt[2];
+ union {
+ u32 save_digest[16]; /* for MD5/SHA */
+ __le32 save_digest_le32[16]; /* GHASH / CBC */
+ };
} __attribute__((packed));
/**
@@ -192,8 +195,8 @@ struct sa_state_record {
*/
struct dynamic_sa_aes128 {
struct dynamic_sa_ctl ctrl;
- u32 key[4];
- u32 iv[4]; /* for CBC, OFC, and CFB mode */
+ __le32 key[4];
+ __le32 iv[4]; /* for CBC, OFC, and CFB mode */
u32 state_ptr;
u32 reserved;
} __attribute__((packed));
@@ -206,8 +209,8 @@ struct dynamic_sa_aes128 {
*/
struct dynamic_sa_aes192 {
struct dynamic_sa_ctl ctrl;
- u32 key[6];
- u32 iv[4]; /* for CBC, OFC, and CFB mode */
+ __le32 key[6];
+ __le32 iv[4]; /* for CBC, OFC, and CFB mode */
u32 state_ptr;
u32 reserved;
} __attribute__((packed));
@@ -220,8 +223,8 @@ struct dynamic_sa_aes192 {
*/
struct dynamic_sa_aes256 {
struct dynamic_sa_ctl ctrl;
- u32 key[8];
- u32 iv[4]; /* for CBC, OFC, and CFB mode */
+ __le32 key[8];
+ __le32 iv[4]; /* for CBC, OFC, and CFB mode */
u32 state_ptr;
u32 reserved;
} __attribute__((packed));
@@ -235,8 +238,8 @@ struct dynamic_sa_aes256 {
*/
struct dynamic_sa_hash160 {
struct dynamic_sa_ctl ctrl;
- u32 inner_digest[5];
- u32 outer_digest[5];
+ __le32 inner_digest[5];
+ __le32 outer_digest[5];
u32 state_ptr;
u32 reserved;
} __attribute__((packed));
@@ -266,9 +269,9 @@ get_dynamic_sa_offset_state_ptr_field(st
return sizeof(struct dynamic_sa_ctl) + offset * 4;
}
-static inline u32 *get_dynamic_sa_key_field(struct dynamic_sa_ctl *cts)
+static inline __le32 *get_dynamic_sa_key_field(struct dynamic_sa_ctl *cts)
{
- return (u32 *) ((unsigned long)cts + sizeof(struct dynamic_sa_ctl));
+ return (__le32 *) ((unsigned long)cts + sizeof(struct dynamic_sa_ctl));
}
#endif

View File

@ -1,535 +0,0 @@
From cd4dcd6da7a2610e0562a6e130bb68cc544a8fb1 Mon Sep 17 00:00:00 2001
From: Christian Lamparter <chunkeey@gmail.com>
Date: Wed, 4 Oct 2017 01:00:11 +0200
Subject: [PATCH 19/25] crypto: crypto4xx - overhaul crypto4xx_build_pd()
This patch overhauls and fixes code related to crypto4xx_build_pd()
* crypto4xx_build_pd() did not handle chained source scatterlist.
This is fixed by replacing the buggy indexed-access of &src[idx]
with sg_next() in the gather array setup loop.
* The redundant is_hash, direction, save_iv and pd_ctl members
in the crypto4xx_ctx struct have been removed.
- is_hash can be derived from the crypto_async_request parameter.
- direction is already part of the security association's
bf.dir bitfield.
- save_iv is unused.
- pd_ctl always had the host_ready bit enabled anyway.
(the hash_final case is rather pointless, since the ahash
code has been deactivated).
* make crypto4xx_build_pd()'s caller responsible for converting
the IV to the LE32 format.
* change crypto4xx_ahash_update() and crypto4xx_ahash_digest() to
initialize a temporary destination scatterlist. This allows the
removal of an ugly cast of req->result (which is a pointer to an
u8-array) to a scatterlist pointer.
* change crypto4xx_build_pd() return type to int. After all
it returns -EINPROGRESS/-EBUSY.
* fix crypto4xx_build_pd() thread-unsafe sa handling.
Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
drivers/crypto/amcc/crypto4xx_alg.c | 87 +++++++++++-------------
drivers/crypto/amcc/crypto4xx_core.c | 128 ++++++++++++++++-------------------
drivers/crypto/amcc/crypto4xx_core.h | 12 ++--
3 files changed, 103 insertions(+), 124 deletions(-)
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -75,27 +75,29 @@ static void set_dynamic_sa_command_1(str
int crypto4xx_encrypt(struct ablkcipher_request *req)
{
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ unsigned int ivlen = crypto_ablkcipher_ivsize(
+ crypto_ablkcipher_reqtfm(req));
+ __le32 iv[ivlen];
- ctx->direction = DIR_OUTBOUND;
- ctx->is_hash = 0;
- ctx->pd_ctl = 0x1;
+ if (ivlen)
+ crypto4xx_memcpy_to_le32(iv, req->info, ivlen);
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
- req->nbytes, req->info,
- crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req)));
+ req->nbytes, iv, ivlen, ctx->sa_out, ctx->sa_len);
}
int crypto4xx_decrypt(struct ablkcipher_request *req)
{
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ unsigned int ivlen = crypto_ablkcipher_ivsize(
+ crypto_ablkcipher_reqtfm(req));
+ __le32 iv[ivlen];
- ctx->direction = DIR_INBOUND;
- ctx->is_hash = 0;
- ctx->pd_ctl = 1;
+ if (ivlen)
+ crypto4xx_memcpy_to_le32(iv, req->info, ivlen);
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
- req->nbytes, req->info,
- crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req)));
+ req->nbytes, iv, ivlen, ctx->sa_in, ctx->sa_len);
}
/**
@@ -154,11 +156,6 @@ static int crypto4xx_setkey_aes(struct c
key, keylen);
sa->sa_contents.w = SA_AES_CONTENTS | (keylen << 2);
sa->sa_command_1.bf.key_len = keylen >> 3;
- ctx->is_hash = 0;
- ctx->direction = DIR_INBOUND;
- memcpy(sa + get_dynamic_sa_offset_state_ptr_field(sa),
- (void *)&ctx->state_record_dma_addr, 4);
- ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(sa);
memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
sa = ctx->sa_out;
@@ -207,7 +204,7 @@ int crypto4xx_setkey_rfc3686(struct cryp
if (rc)
return rc;
- memcpy(ctx->state_record,
+ crypto4xx_memcpy_to_le32(ctx->state_record->save_iv,
key + keylen - CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
return 0;
@@ -216,27 +213,29 @@ int crypto4xx_setkey_rfc3686(struct cryp
int crypto4xx_rfc3686_encrypt(struct ablkcipher_request *req)
{
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- __be32 iv[AES_IV_SIZE / 4] = { *(u32 *)ctx->state_record,
- *(u32 *) req->info, *(u32 *) (req->info + 4), cpu_to_be32(1) };
-
- ctx->direction = DIR_OUTBOUND;
- ctx->pd_ctl = 1;
+ __le32 iv[AES_IV_SIZE / 4] = {
+ ctx->state_record->save_iv[0],
+ cpu_to_le32p((u32 *) req->info),
+ cpu_to_le32p((u32 *) (req->info + 4)),
+ cpu_to_le32(1) };
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
- req->nbytes, iv, AES_IV_SIZE);
+ req->nbytes, iv, AES_IV_SIZE,
+ ctx->sa_out, ctx->sa_len);
}
int crypto4xx_rfc3686_decrypt(struct ablkcipher_request *req)
{
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- __be32 iv[AES_IV_SIZE / 4] = { *(u32 *)ctx->state_record,
- *(u32 *) req->info, *(u32 *) (req->info + 4), cpu_to_be32(1) };
-
- ctx->direction = DIR_INBOUND;
- ctx->pd_ctl = 1;
+ __le32 iv[AES_IV_SIZE / 4] = {
+ ctx->state_record->save_iv[0],
+ cpu_to_le32p((u32 *) req->info),
+ cpu_to_le32p((u32 *) (req->info + 4)),
+ cpu_to_le32(1) };
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
- req->nbytes, iv, AES_IV_SIZE);
+ req->nbytes, iv, AES_IV_SIZE,
+ ctx->sa_out, ctx->sa_len);
}
/**
@@ -254,7 +253,6 @@ static int crypto4xx_hash_alg_init(struc
int rc;
ctx->dev = my_alg->dev;
- ctx->is_hash = 1;
/* Create SA */
if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr)
@@ -285,13 +283,9 @@ static int crypto4xx_hash_alg_init(struc
SA_SEQ_MASK_OFF, SA_MC_ENABLE,
SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD,
SA_NOT_COPY_HDR);
- ctx->direction = DIR_INBOUND;
/* Need to zero hash digest in SA */
memset(sa->inner_digest, 0, sizeof(sa->inner_digest));
memset(sa->outer_digest, 0, sizeof(sa->outer_digest));
- sa->state_ptr = ctx->state_record_dma_addr;
- ctx->offset_to_sr_ptr =
- get_dynamic_sa_offset_state_ptr_field(&sa->ctrl);
return 0;
}
@@ -307,23 +301,22 @@ int crypto4xx_hash_init(struct ahash_req
__crypto_ahash_cast(req->base.tfm));
sa->sa_command_0.bf.digest_len = ds >> 2;
sa->sa_command_0.bf.load_hash_state = SA_LOAD_HASH_FROM_SA;
- ctx->is_hash = 1;
- ctx->direction = DIR_INBOUND;
return 0;
}
int crypto4xx_hash_update(struct ahash_request *req)
{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct scatterlist dst;
+ unsigned int ds = crypto_ahash_digestsize(ahash);
+
+ sg_init_one(&dst, req->result, ds);
- ctx->is_hash = 1;
- ctx->pd_ctl = 0x11;
- ctx->direction = DIR_INBOUND;
-
- return crypto4xx_build_pd(&req->base, ctx, req->src,
- (struct scatterlist *) req->result,
- req->nbytes, NULL, 0);
+ return crypto4xx_build_pd(&req->base, ctx, req->src, &dst,
+ req->nbytes, NULL, 0, ctx->sa_in,
+ ctx->sa_len);
}
int crypto4xx_hash_final(struct ahash_request *req)
@@ -333,14 +326,16 @@ int crypto4xx_hash_final(struct ahash_re
int crypto4xx_hash_digest(struct ahash_request *req)
{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct scatterlist dst;
+ unsigned int ds = crypto_ahash_digestsize(ahash);
- ctx->pd_ctl = 0x11;
- ctx->direction = DIR_INBOUND;
+ sg_init_one(&dst, req->result, ds);
- return crypto4xx_build_pd(&req->base, ctx, req->src,
- (struct scatterlist *) req->result,
- req->nbytes, NULL, 0);
+ return crypto4xx_build_pd(&req->base, ctx, req->src, &dst,
+ req->nbytes, NULL, 0, ctx->sa_in,
+ ctx->sa_len);
}
/**
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -194,7 +194,6 @@ void crypto4xx_free_state_record(struct
static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
{
int i;
- struct pd_uinfo *pd_uinfo;
dev->pdr = dma_alloc_coherent(dev->core_dev->device,
sizeof(struct ce_pd) * PPC4XX_NUM_PD,
&dev->pdr_pa, GFP_ATOMIC);
@@ -224,11 +223,14 @@ static u32 crypto4xx_build_pdr(struct cr
if (!dev->shadow_sr_pool)
return -ENOMEM;
for (i = 0; i < PPC4XX_NUM_PD; i++) {
- pd_uinfo = &dev->pdr_uinfo[i];
+ struct ce_pd *pd = &dev->pdr[i];
+ struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[i];
+
+ pd->sa = dev->shadow_sa_pool_pa +
+ sizeof(union shadow_sa_buf) * i;
/* alloc 256 bytes which is enough for any kind of dynamic sa */
pd_uinfo->sa_va = &dev->shadow_sa_pool[i].sa;
- pd_uinfo->sa_pa = dev->shadow_sa_pool_pa + 256 * i;
/* alloc state record */
pd_uinfo->sr_va = &dev->shadow_sr_pool[i];
@@ -291,14 +293,6 @@ static u32 crypto4xx_put_pd_to_pdr(struc
return 0;
}
-static struct ce_pd *crypto4xx_get_pdp(struct crypto4xx_device *dev,
- dma_addr_t *pd_dma, u32 idx)
-{
- *pd_dma = dev->pdr_pa + sizeof(struct ce_pd) * idx;
-
- return &dev->pdr[idx];
-}
-
/**
* alloc memory for the gather ring
* no need to alloc buf for the ring
@@ -516,18 +510,16 @@ static void crypto4xx_copy_pkt_to_dst(st
}
}
-static u32 crypto4xx_copy_digest_to_dst(struct pd_uinfo *pd_uinfo,
+static void crypto4xx_copy_digest_to_dst(void *dst,
+ struct pd_uinfo *pd_uinfo,
struct crypto4xx_ctx *ctx)
{
struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in;
if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) {
- memcpy((void *) pd_uinfo->dest_va,
- pd_uinfo->sr_va->save_digest,
+ memcpy(dst, pd_uinfo->sr_va->save_digest,
SA_HASH_ALG_SHA1_DIGEST_SIZE);
}
-
- return 0;
}
static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev,
@@ -596,7 +588,7 @@ static u32 crypto4xx_ahash_done(struct c
ahash_req = ahash_request_cast(pd_uinfo->async_req);
ctx = crypto_tfm_ctx(ahash_req->base.tfm);
- crypto4xx_copy_digest_to_dst(pd_uinfo,
+ crypto4xx_copy_digest_to_dst(ahash_req->result, pd_uinfo,
crypto_tfm_ctx(ahash_req->base.tfm));
crypto4xx_ret_sg_desc(dev, pd_uinfo);
@@ -656,17 +648,17 @@ static u32 get_next_sd(u32 current)
return 0;
}
-u32 crypto4xx_build_pd(struct crypto_async_request *req,
+int crypto4xx_build_pd(struct crypto_async_request *req,
struct crypto4xx_ctx *ctx,
struct scatterlist *src,
struct scatterlist *dst,
- unsigned int datalen,
- void *iv, u32 iv_len)
+ const unsigned int datalen,
+ const __le32 *iv, const u32 iv_len,
+ const struct dynamic_sa_ctl *req_sa,
+ const unsigned int sa_len)
{
struct crypto4xx_device *dev = ctx->dev;
- dma_addr_t addr, pd_dma, sd_dma, gd_dma;
struct dynamic_sa_ctl *sa;
- struct scatterlist *sg;
struct ce_gd *gd;
struct ce_pd *pd;
u32 num_gd, num_sd;
@@ -674,8 +666,9 @@ u32 crypto4xx_build_pd(struct crypto_asy
u32 fst_sd = 0xffffffff;
u32 pd_entry;
unsigned long flags;
- struct pd_uinfo *pd_uinfo = NULL;
- unsigned int nbytes = datalen, idx;
+ struct pd_uinfo *pd_uinfo;
+ unsigned int nbytes = datalen;
+ size_t offset_to_sr_ptr;
u32 gd_idx = 0;
bool is_busy;
@@ -689,7 +682,7 @@ u32 crypto4xx_build_pd(struct crypto_asy
num_gd = 0;
/* figure how many sd is needed */
- if (sg_is_last(dst) || ctx->is_hash) {
+ if (sg_is_last(dst)) {
num_sd = 0;
} else {
if (datalen > PPC4XX_SD_BUFFER_SIZE) {
@@ -760,37 +753,27 @@ u32 crypto4xx_build_pd(struct crypto_asy
}
spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+ pd = &dev->pdr[pd_entry];
+ pd->sa_len = sa_len;
+
pd_uinfo = &dev->pdr_uinfo[pd_entry];
- pd = crypto4xx_get_pdp(dev, &pd_dma, pd_entry);
pd_uinfo->async_req = req;
pd_uinfo->num_gd = num_gd;
pd_uinfo->num_sd = num_sd;
- if (iv_len || ctx->is_hash) {
- pd->sa = pd_uinfo->sa_pa;
- sa = pd_uinfo->sa_va;
- if (ctx->direction == DIR_INBOUND)
- memcpy(sa, ctx->sa_in, ctx->sa_len * 4);
- else
- memcpy(sa, ctx->sa_out, ctx->sa_len * 4);
+ if (iv_len)
+ memcpy(pd_uinfo->sr_va->save_iv, iv, iv_len);
- memcpy((void *) sa + ctx->offset_to_sr_ptr,
- &pd_uinfo->sr_pa, 4);
+ sa = pd_uinfo->sa_va;
+ memcpy(sa, req_sa, sa_len * 4);
+
+ offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(sa);
+ *(u32 *)((unsigned long)sa + offset_to_sr_ptr) = pd_uinfo->sr_pa;
- if (iv_len)
- crypto4xx_memcpy_to_le32(pd_uinfo->sr_va->save_iv,
- iv, iv_len);
- } else {
- if (ctx->direction == DIR_INBOUND) {
- pd->sa = ctx->sa_in_dma_addr;
- sa = ctx->sa_in;
- } else {
- pd->sa = ctx->sa_out_dma_addr;
- sa = ctx->sa_out;
- }
- }
- pd->sa_len = ctx->sa_len;
if (num_gd) {
+ dma_addr_t gd_dma;
+ struct scatterlist *sg;
+
/* get first gd we are going to use */
gd_idx = fst_gd;
pd_uinfo->first_gd = fst_gd;
@@ -799,27 +782,30 @@ u32 crypto4xx_build_pd(struct crypto_asy
pd->src = gd_dma;
/* enable gather */
sa->sa_command_0.bf.gather = 1;
- idx = 0;
- src = &src[0];
/* walk the sg, and setup gather array */
+
+ sg = src;
while (nbytes) {
- sg = &src[idx];
- addr = dma_map_page(dev->core_dev->device, sg_page(sg),
- sg->offset, sg->length, DMA_TO_DEVICE);
- gd->ptr = addr;
- gd->ctl_len.len = sg->length;
+ size_t len;
+
+ len = min(sg->length, nbytes);
+ gd->ptr = dma_map_page(dev->core_dev->device,
+ sg_page(sg), sg->offset, len, DMA_TO_DEVICE);
+ gd->ctl_len.len = len;
gd->ctl_len.done = 0;
gd->ctl_len.ready = 1;
- if (sg->length >= nbytes)
+ if (len >= nbytes)
break;
+
nbytes -= sg->length;
gd_idx = get_next_gd(gd_idx);
gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
- idx++;
+ sg = sg_next(sg);
}
} else {
pd->src = (u32)dma_map_page(dev->core_dev->device, sg_page(src),
- src->offset, src->length, DMA_TO_DEVICE);
+ src->offset, min(nbytes, src->length),
+ DMA_TO_DEVICE);
/*
* Disable gather in sa command
*/
@@ -830,25 +816,24 @@ u32 crypto4xx_build_pd(struct crypto_asy
pd_uinfo->first_gd = 0xffffffff;
pd_uinfo->num_gd = 0;
}
- if (ctx->is_hash || sg_is_last(dst)) {
+ if (sg_is_last(dst)) {
/*
* we know application give us dst a whole piece of memory
* no need to use scatter ring.
- * In case of is_hash, the icv is always at end of src data.
*/
pd_uinfo->using_sd = 0;
pd_uinfo->first_sd = 0xffffffff;
pd_uinfo->num_sd = 0;
pd_uinfo->dest_va = dst;
sa->sa_command_0.bf.scatter = 0;
- if (ctx->is_hash)
- pd->dest = virt_to_phys((void *)dst);
- else
- pd->dest = (u32)dma_map_page(dev->core_dev->device,
- sg_page(dst), dst->offset,
- dst->length, DMA_TO_DEVICE);
+ pd->dest = (u32)dma_map_page(dev->core_dev->device,
+ sg_page(dst), dst->offset,
+ min(datalen, dst->length),
+ DMA_TO_DEVICE);
} else {
+ dma_addr_t sd_dma;
struct ce_sd *sd = NULL;
+
u32 sd_idx = fst_sd;
nbytes = datalen;
sa->sa_command_0.bf.scatter = 1;
@@ -862,7 +847,6 @@ u32 crypto4xx_build_pd(struct crypto_asy
sd->ctl.done = 0;
sd->ctl.rdy = 1;
/* sd->ptr should be setup by sd_init routine*/
- idx = 0;
if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
nbytes -= PPC4XX_SD_BUFFER_SIZE;
else
@@ -873,19 +857,23 @@ u32 crypto4xx_build_pd(struct crypto_asy
/* setup scatter descriptor */
sd->ctl.done = 0;
sd->ctl.rdy = 1;
- if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
+ if (nbytes >= PPC4XX_SD_BUFFER_SIZE) {
nbytes -= PPC4XX_SD_BUFFER_SIZE;
- else
+ } else {
/*
* SD entry can hold PPC4XX_SD_BUFFER_SIZE,
* which is more than nbytes, so done.
*/
nbytes = 0;
+ }
}
}
sa->sa_command_1.bf.hash_crypto_offset = 0;
- pd->pd_ctl.w = ctx->pd_ctl;
+ pd->pd_ctl.w = 0;
+ pd->pd_ctl.bf.hash_final =
+ (crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AHASH);
+ pd->pd_ctl.bf.host_ready = 1;
pd->pd_ctl_len.w = 0x00400000 | datalen;
pd_uinfo->state = PD_ENTRY_INUSE | (is_busy ? PD_ENTRY_BUSY : 0);
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -71,7 +71,6 @@ struct pd_uinfo {
u32 num_sd; /* number of scatter discriptors
used by this packet */
struct dynamic_sa_ctl *sa_va; /* shadow sa */
- u32 sa_pa;
struct sa_state_record *sr_va; /* state record for shadow sa */
u32 sr_pa;
struct scatterlist *dest_va;
@@ -129,11 +128,6 @@ struct crypto4xx_ctx {
struct sa_state_record *state_record;
dma_addr_t state_record_dma_addr;
u32 sa_len;
- u32 offset_to_sr_ptr; /* offset to state ptr, in dynamic sa */
- u32 direction;
- u32 save_iv;
- u32 pd_ctl;
- u32 is_hash;
};
struct crypto4xx_alg_common {
@@ -170,8 +164,10 @@ int crypto4xx_build_pd(struct crypto_asy
struct crypto4xx_ctx *ctx,
struct scatterlist *src,
struct scatterlist *dst,
- unsigned int datalen,
- void *iv, u32 iv_len);
+ const unsigned int datalen,
+ const __le32 *iv, const u32 iv_len,
+ const struct dynamic_sa_ctl *sa,
+ const unsigned int sa_len);
int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher,
const u8 *key, unsigned int keylen);
int crypto4xx_setkey_aes_cfb(struct crypto_ablkcipher *cipher,

View File

@ -1,62 +0,0 @@
From 64e1062b2371cb8d6126d4e970832365a1a84562 Mon Sep 17 00:00:00 2001
From: Christian Lamparter <chunkeey@gmail.com>
Date: Wed, 4 Oct 2017 01:00:12 +0200
Subject: [PATCH 20/25] crypto: crypto4xx - fix various warnings
crypto4xx_core.c:179:6: warning: symbol 'crypto4xx_free_state_record'
was not declared. Should it be static?
crypto4xx_core.c:331:5: warning: symbol 'crypto4xx_get_n_gd'
was not declared. Should it be static?
crypto4xx_core.c:652:6: warning: symbol 'crypto4xx_return_pd'
was not declared. Should it be static?
crypto4xx_return_pd() is not used by anything. Therefore it is removed.
Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
drivers/crypto/amcc/crypto4xx_core.c | 16 +++-------------
1 file changed, 3 insertions(+), 13 deletions(-)
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -176,7 +176,7 @@ u32 crypto4xx_alloc_state_record(struct
return 0;
}
-void crypto4xx_free_state_record(struct crypto4xx_ctx *ctx)
+static void crypto4xx_free_state_record(struct crypto4xx_ctx *ctx)
{
if (ctx->state_record != NULL)
dma_free_coherent(ctx->dev->core_dev->device,
@@ -322,10 +322,11 @@ static inline void crypto4xx_destroy_gdr
* when this function is called.
* preemption or interrupt must be disabled
*/
-u32 crypto4xx_get_n_gd(struct crypto4xx_device *dev, int n)
+static u32 crypto4xx_get_n_gd(struct crypto4xx_device *dev, int n)
{
u32 retval;
u32 tmp;
+
if (n >= PPC4XX_NUM_GD)
return ERING_WAS_FULL;
@@ -621,17 +622,6 @@ static void crypto4xx_stop_all(struct cr
kfree(core_dev);
}
-void crypto4xx_return_pd(struct crypto4xx_device *dev,
- u32 pd_entry, struct ce_pd *pd,
- struct pd_uinfo *pd_uinfo)
-{
- /* irq should be already disabled */
- dev->pdr_head = pd_entry;
- pd->pd_ctl.w = 0;
- pd->pd_ctl_len.w = 0;
- pd_uinfo->state = PD_ENTRY_FREE;
-}
-
static u32 get_next_gd(u32 current)
{
if (current != PPC4XX_LAST_GD)

View File

@ -1,112 +0,0 @@
From 4b5b79998af61db8b0506fba6c0f33b57ea457bd Mon Sep 17 00:00:00 2001
From: Christian Lamparter <chunkeey@gmail.com>
Date: Wed, 4 Oct 2017 01:00:13 +0200
Subject: [PATCH 21/25] crypto: crypto4xx - fix stalls under heavy load
If the crypto4xx device is continuously loaded by dm-crypt
and ipsec work, it will start to work intermittent after a
few (between 20-30) seconds, hurting throughput and latency.
This patch contains various stability improvements in order
to fix this issue. So far, the hardware has survived more
than a day without suffering any stalls under the continuous
load.
Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
drivers/crypto/amcc/crypto4xx_core.c | 33 ++++++++++++++++++---------------
drivers/crypto/amcc/crypto4xx_reg_def.h | 3 +++
2 files changed, 21 insertions(+), 15 deletions(-)
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -280,17 +280,20 @@ static u32 crypto4xx_get_pd_from_pdr_nol
static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
{
struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx];
+ u32 tail;
unsigned long flags;
spin_lock_irqsave(&dev->core_dev->lock, flags);
+ pd_uinfo->state = PD_ENTRY_FREE;
+
if (dev->pdr_tail != PPC4XX_LAST_PD)
dev->pdr_tail++;
else
dev->pdr_tail = 0;
- pd_uinfo->state = PD_ENTRY_FREE;
+ tail = dev->pdr_tail;
spin_unlock_irqrestore(&dev->core_dev->lock, flags);
- return 0;
+ return tail;
}
/**
@@ -859,16 +862,16 @@ int crypto4xx_build_pd(struct crypto_asy
}
}
- sa->sa_command_1.bf.hash_crypto_offset = 0;
- pd->pd_ctl.w = 0;
- pd->pd_ctl.bf.hash_final =
- (crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AHASH);
- pd->pd_ctl.bf.host_ready = 1;
+ pd->pd_ctl.w = PD_CTL_HOST_READY |
+ ((crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AHASH) |
+ (crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AEAD) ?
+ PD_CTL_HASH_FINAL : 0);
pd->pd_ctl_len.w = 0x00400000 | datalen;
pd_uinfo->state = PD_ENTRY_INUSE | (is_busy ? PD_ENTRY_BUSY : 0);
wmb();
/* write any value to push engine to read a pd */
+ writel(0, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
writel(1, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
return is_busy ? -EBUSY : -EINPROGRESS;
}
@@ -969,23 +972,23 @@ static void crypto4xx_bh_tasklet_cb(unsi
struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
struct pd_uinfo *pd_uinfo;
struct ce_pd *pd;
- u32 tail;
+ u32 tail = core_dev->dev->pdr_tail;
+ u32 head = core_dev->dev->pdr_head;
- while (core_dev->dev->pdr_head != core_dev->dev->pdr_tail) {
- tail = core_dev->dev->pdr_tail;
+ do {
pd_uinfo = &core_dev->dev->pdr_uinfo[tail];
pd = &core_dev->dev->pdr[tail];
if ((pd_uinfo->state & PD_ENTRY_INUSE) &&
- pd->pd_ctl.bf.pe_done &&
- !pd->pd_ctl.bf.host_ready) {
- pd->pd_ctl.bf.pe_done = 0;
+ ((READ_ONCE(pd->pd_ctl.w) &
+ (PD_CTL_PE_DONE | PD_CTL_HOST_READY)) ==
+ PD_CTL_PE_DONE)) {
crypto4xx_pd_done(core_dev->dev, tail);
- crypto4xx_put_pd_to_pdr(core_dev->dev, tail);
+ tail = crypto4xx_put_pd_to_pdr(core_dev->dev, tail);
} else {
/* if tail not done, break */
break;
}
- }
+ } while (head != tail);
}
/**
--- a/drivers/crypto/amcc/crypto4xx_reg_def.h
+++ b/drivers/crypto/amcc/crypto4xx_reg_def.h
@@ -261,6 +261,9 @@ union ce_pd_ctl {
} bf;
u32 w;
} __attribute__((packed));
+#define PD_CTL_HASH_FINAL BIT(4)
+#define PD_CTL_PE_DONE BIT(1)
+#define PD_CTL_HOST_READY BIT(0)
union ce_pd_ctl_len {
struct {

View File

@ -1,209 +0,0 @@
From 2f77690dcb96e525bc6b57bce4a0eaecaa2878d1 Mon Sep 17 00:00:00 2001
From: Christian Lamparter <chunkeey@gmail.com>
Date: Wed, 4 Oct 2017 01:00:14 +0200
Subject: [PATCH 22/25] crypto: crypto4xx - simplify sa and state context
acquisition
Thanks to the big overhaul of crypto4xx_build_pd(), the request-local
sa_in, sa_out and state_record allocation can be simplified.
There's no need to setup any dma coherent memory anymore and
much of the support code can be removed.
Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
drivers/crypto/amcc/crypto4xx_alg.c | 27 +++++--------------
drivers/crypto/amcc/crypto4xx_core.c | 50 ++++++------------------------------
drivers/crypto/amcc/crypto4xx_core.h | 6 +----
3 files changed, 15 insertions(+), 68 deletions(-)
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -122,20 +122,13 @@ static int crypto4xx_setkey_aes(struct c
}
/* Create SA */
- if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr)
+ if (ctx->sa_in || ctx->sa_out)
crypto4xx_free_sa(ctx);
rc = crypto4xx_alloc_sa(ctx, SA_AES128_LEN + (keylen-16) / 4);
if (rc)
return rc;
- if (ctx->state_record_dma_addr == 0) {
- rc = crypto4xx_alloc_state_record(ctx);
- if (rc) {
- crypto4xx_free_sa(ctx);
- return rc;
- }
- }
/* Setup SA */
sa = ctx->sa_in;
@@ -204,8 +197,8 @@ int crypto4xx_setkey_rfc3686(struct cryp
if (rc)
return rc;
- crypto4xx_memcpy_to_le32(ctx->state_record->save_iv,
- key + keylen - CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
+ ctx->iv_nonce = cpu_to_le32p((u32 *)&key[keylen -
+ CTR_RFC3686_NONCE_SIZE]);
return 0;
}
@@ -214,7 +207,7 @@ int crypto4xx_rfc3686_encrypt(struct abl
{
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
__le32 iv[AES_IV_SIZE / 4] = {
- ctx->state_record->save_iv[0],
+ ctx->iv_nonce,
cpu_to_le32p((u32 *) req->info),
cpu_to_le32p((u32 *) (req->info + 4)),
cpu_to_le32(1) };
@@ -228,7 +221,7 @@ int crypto4xx_rfc3686_decrypt(struct abl
{
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
__le32 iv[AES_IV_SIZE / 4] = {
- ctx->state_record->save_iv[0],
+ ctx->iv_nonce,
cpu_to_le32p((u32 *) req->info),
cpu_to_le32p((u32 *) (req->info + 4)),
cpu_to_le32(1) };
@@ -255,21 +248,13 @@ static int crypto4xx_hash_alg_init(struc
ctx->dev = my_alg->dev;
/* Create SA */
- if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr)
+ if (ctx->sa_in || ctx->sa_out)
crypto4xx_free_sa(ctx);
rc = crypto4xx_alloc_sa(ctx, sa_len);
if (rc)
return rc;
- if (ctx->state_record_dma_addr == 0) {
- crypto4xx_alloc_state_record(ctx);
- if (!ctx->state_record_dma_addr) {
- crypto4xx_free_sa(ctx);
- return -ENOMEM;
- }
- }
-
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct crypto4xx_ctx));
sa = (struct dynamic_sa_hash160 *)ctx->sa_in;
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -130,21 +130,17 @@ static void crypto4xx_hw_init(struct cry
int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
{
- ctx->sa_in = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
- &ctx->sa_in_dma_addr, GFP_ATOMIC);
+ ctx->sa_in = kzalloc(size * 4, GFP_ATOMIC);
if (ctx->sa_in == NULL)
return -ENOMEM;
- ctx->sa_out = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
- &ctx->sa_out_dma_addr, GFP_ATOMIC);
+ ctx->sa_out = kzalloc(size * 4, GFP_ATOMIC);
if (ctx->sa_out == NULL) {
- dma_free_coherent(ctx->dev->core_dev->device, size * 4,
- ctx->sa_in, ctx->sa_in_dma_addr);
+ kfree(ctx->sa_in);
+ ctx->sa_in = NULL;
return -ENOMEM;
}
- memset(ctx->sa_in, 0, size * 4);
- memset(ctx->sa_out, 0, size * 4);
ctx->sa_len = size;
return 0;
@@ -152,40 +148,13 @@ int crypto4xx_alloc_sa(struct crypto4xx_
void crypto4xx_free_sa(struct crypto4xx_ctx *ctx)
{
- if (ctx->sa_in != NULL)
- dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4,
- ctx->sa_in, ctx->sa_in_dma_addr);
- if (ctx->sa_out != NULL)
- dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4,
- ctx->sa_out, ctx->sa_out_dma_addr);
-
- ctx->sa_in_dma_addr = 0;
- ctx->sa_out_dma_addr = 0;
+ kfree(ctx->sa_in);
+ ctx->sa_in = NULL;
+ kfree(ctx->sa_out);
+ ctx->sa_out = NULL;
ctx->sa_len = 0;
}
-u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx)
-{
- ctx->state_record = dma_alloc_coherent(ctx->dev->core_dev->device,
- sizeof(struct sa_state_record),
- &ctx->state_record_dma_addr, GFP_ATOMIC);
- if (!ctx->state_record_dma_addr)
- return -ENOMEM;
- memset(ctx->state_record, 0, sizeof(struct sa_state_record));
-
- return 0;
-}
-
-static void crypto4xx_free_state_record(struct crypto4xx_ctx *ctx)
-{
- if (ctx->state_record != NULL)
- dma_free_coherent(ctx->dev->core_dev->device,
- sizeof(struct sa_state_record),
- ctx->state_record,
- ctx->state_record_dma_addr);
- ctx->state_record_dma_addr = 0;
-}
-
/**
* alloc memory for the gather ring
* no need to alloc buf for the ring
@@ -888,8 +857,6 @@ static int crypto4xx_alg_init(struct cry
ctx->dev = amcc_alg->dev;
ctx->sa_in = NULL;
ctx->sa_out = NULL;
- ctx->sa_in_dma_addr = 0;
- ctx->sa_out_dma_addr = 0;
ctx->sa_len = 0;
switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
@@ -910,7 +877,6 @@ static void crypto4xx_alg_exit(struct cr
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
crypto4xx_free_sa(ctx);
- crypto4xx_free_state_record(ctx);
}
int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -122,11 +122,8 @@ struct crypto4xx_core_device {
struct crypto4xx_ctx {
struct crypto4xx_device *dev;
struct dynamic_sa_ctl *sa_in;
- dma_addr_t sa_in_dma_addr;
struct dynamic_sa_ctl *sa_out;
- dma_addr_t sa_out_dma_addr;
- struct sa_state_record *state_record;
- dma_addr_t state_record_dma_addr;
+ __le32 iv_nonce;
u32 sa_len;
};
@@ -159,7 +156,6 @@ static inline struct crypto4xx_alg *cryp
int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size);
void crypto4xx_free_sa(struct crypto4xx_ctx *ctx);
void crypto4xx_free_ctx(struct crypto4xx_ctx *ctx);
-u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx);
int crypto4xx_build_pd(struct crypto_async_request *req,
struct crypto4xx_ctx *ctx,
struct scatterlist *src,

View File

@ -1,617 +0,0 @@
From a0aae821ba3d35a49d4d0143dfb0c07eee22130e Mon Sep 17 00:00:00 2001
From: Christian Lamparter <chunkeey@gmail.com>
Date: Wed, 4 Oct 2017 01:00:15 +0200
Subject: [PATCH 23/25] crypto: crypto4xx - prepare for AEAD support
This patch enhances existing interfaces and
functions to support AEAD ciphers in the next
patches.
Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
drivers/crypto/amcc/crypto4xx_alg.c | 19 +--
drivers/crypto/amcc/crypto4xx_core.c | 217 +++++++++++++++++++++++++++--------
drivers/crypto/amcc/crypto4xx_core.h | 22 ++--
drivers/crypto/amcc/crypto4xx_sa.h | 41 +++++++
4 files changed, 226 insertions(+), 73 deletions(-)
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -26,6 +26,7 @@
#include <crypto/internal/hash.h>
#include <linux/dma-mapping.h>
#include <crypto/algapi.h>
+#include <crypto/aead.h>
#include <crypto/aes.h>
#include <crypto/sha.h>
#include <crypto/ctr.h>
@@ -83,7 +84,7 @@ int crypto4xx_encrypt(struct ablkcipher_
crypto4xx_memcpy_to_le32(iv, req->info, ivlen);
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
- req->nbytes, iv, ivlen, ctx->sa_out, ctx->sa_len);
+ req->nbytes, iv, ivlen, ctx->sa_out, ctx->sa_len, 0);
}
int crypto4xx_decrypt(struct ablkcipher_request *req)
@@ -97,7 +98,7 @@ int crypto4xx_decrypt(struct ablkcipher_
crypto4xx_memcpy_to_le32(iv, req->info, ivlen);
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
- req->nbytes, iv, ivlen, ctx->sa_in, ctx->sa_len);
+ req->nbytes, iv, ivlen, ctx->sa_in, ctx->sa_len, 0);
}
/**
@@ -214,7 +215,7 @@ int crypto4xx_rfc3686_encrypt(struct abl
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
req->nbytes, iv, AES_IV_SIZE,
- ctx->sa_out, ctx->sa_len);
+ ctx->sa_out, ctx->sa_len, 0);
}
int crypto4xx_rfc3686_decrypt(struct ablkcipher_request *req)
@@ -228,7 +229,7 @@ int crypto4xx_rfc3686_decrypt(struct abl
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
req->nbytes, iv, AES_IV_SIZE,
- ctx->sa_out, ctx->sa_len);
+ ctx->sa_out, ctx->sa_len, 0);
}
/**
@@ -240,11 +241,13 @@ static int crypto4xx_hash_alg_init(struc
unsigned char hm)
{
struct crypto_alg *alg = tfm->__crt_alg;
- struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
+ struct crypto4xx_alg *my_alg;
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
struct dynamic_sa_hash160 *sa;
int rc;
+ my_alg = container_of(__crypto_ahash_alg(alg), struct crypto4xx_alg,
+ alg.u.hash);
ctx->dev = my_alg->dev;
/* Create SA */
@@ -301,7 +304,7 @@ int crypto4xx_hash_update(struct ahash_r
return crypto4xx_build_pd(&req->base, ctx, req->src, &dst,
req->nbytes, NULL, 0, ctx->sa_in,
- ctx->sa_len);
+ ctx->sa_len, 0);
}
int crypto4xx_hash_final(struct ahash_request *req)
@@ -320,7 +323,7 @@ int crypto4xx_hash_digest(struct ahash_r
return crypto4xx_build_pd(&req->base, ctx, req->src, &dst,
req->nbytes, NULL, 0, ctx->sa_in,
- ctx->sa_len);
+ ctx->sa_len, 0);
}
/**
@@ -331,5 +334,3 @@ int crypto4xx_sha1_alg_init(struct crypt
return crypto4xx_hash_alg_init(tfm, SA_HASH160_LEN, SA_HASH_ALG_SHA1,
SA_HASH_MODE_HASH);
}
-
-
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -35,10 +35,12 @@
#include <asm/dcr.h>
#include <asm/dcr-regs.h>
#include <asm/cacheflush.h>
+#include <crypto/aead.h>
#include <crypto/aes.h>
#include <crypto/ctr.h>
#include <crypto/sha.h>
#include <crypto/scatterwalk.h>
+#include <crypto/internal/aead.h>
#include <crypto/internal/skcipher.h>
#include "crypto4xx_reg_def.h"
#include "crypto4xx_core.h"
@@ -514,7 +516,7 @@ static void crypto4xx_ret_sg_desc(struct
}
}
-static u32 crypto4xx_ablkcipher_done(struct crypto4xx_device *dev,
+static void crypto4xx_ablkcipher_done(struct crypto4xx_device *dev,
struct pd_uinfo *pd_uinfo,
struct ce_pd *pd)
{
@@ -548,11 +550,9 @@ static u32 crypto4xx_ablkcipher_done(str
if (pd_uinfo->state & PD_ENTRY_BUSY)
ablkcipher_request_complete(ablk_req, -EINPROGRESS);
ablkcipher_request_complete(ablk_req, 0);
-
- return 0;
}
-static u32 crypto4xx_ahash_done(struct crypto4xx_device *dev,
+static void crypto4xx_ahash_done(struct crypto4xx_device *dev,
struct pd_uinfo *pd_uinfo)
{
struct crypto4xx_ctx *ctx;
@@ -568,20 +568,88 @@ static u32 crypto4xx_ahash_done(struct c
if (pd_uinfo->state & PD_ENTRY_BUSY)
ahash_request_complete(ahash_req, -EINPROGRESS);
ahash_request_complete(ahash_req, 0);
+}
- return 0;
+static void crypto4xx_aead_done(struct crypto4xx_device *dev,
+ struct pd_uinfo *pd_uinfo,
+ struct ce_pd *pd)
+{
+ struct aead_request *aead_req;
+ struct crypto4xx_ctx *ctx;
+ struct scatterlist *dst = pd_uinfo->dest_va;
+ int err = 0;
+
+ aead_req = container_of(pd_uinfo->async_req, struct aead_request,
+ base);
+ ctx = crypto_tfm_ctx(aead_req->base.tfm);
+
+ if (pd_uinfo->using_sd) {
+ crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
+ pd->pd_ctl_len.bf.pkt_len,
+ dst);
+ } else {
+ __dma_sync_page(sg_page(dst), dst->offset, dst->length,
+ DMA_FROM_DEVICE);
+ }
+
+ if (pd_uinfo->sa_va->sa_command_0.bf.dir == DIR_OUTBOUND) {
+ /* append icv at the end */
+ size_t cp_len = crypto_aead_authsize(
+ crypto_aead_reqtfm(aead_req));
+ u32 icv[cp_len];
+
+ crypto4xx_memcpy_from_le32(icv, pd_uinfo->sr_va->save_digest,
+ cp_len);
+
+ scatterwalk_map_and_copy(icv, dst, aead_req->cryptlen,
+ cp_len, 1);
+ }
+
+ crypto4xx_ret_sg_desc(dev, pd_uinfo);
+
+ if (pd->pd_ctl.bf.status & 0xff) {
+ if (pd->pd_ctl.bf.status & 0x1) {
+ /* authentication error */
+ err = -EBADMSG;
+ } else {
+ if (!__ratelimit(&dev->aead_ratelimit)) {
+ if (pd->pd_ctl.bf.status & 2)
+ pr_err("pad fail error\n");
+ if (pd->pd_ctl.bf.status & 4)
+ pr_err("seqnum fail\n");
+ if (pd->pd_ctl.bf.status & 8)
+ pr_err("error _notify\n");
+ pr_err("aead return err status = 0x%02x\n",
+ pd->pd_ctl.bf.status & 0xff);
+ pr_err("pd pad_ctl = 0x%08x\n",
+ pd->pd_ctl.bf.pd_pad_ctl);
+ }
+ err = -EINVAL;
+ }
+ }
+
+ if (pd_uinfo->state & PD_ENTRY_BUSY)
+ aead_request_complete(aead_req, -EINPROGRESS);
+
+ aead_request_complete(aead_req, err);
}
-static u32 crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
+static void crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
{
struct ce_pd *pd = &dev->pdr[idx];
struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx];
- if (crypto_tfm_alg_type(pd_uinfo->async_req->tfm) ==
- CRYPTO_ALG_TYPE_ABLKCIPHER)
- return crypto4xx_ablkcipher_done(dev, pd_uinfo, pd);
- else
- return crypto4xx_ahash_done(dev, pd_uinfo);
+ switch (crypto_tfm_alg_type(pd_uinfo->async_req->tfm)) {
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ crypto4xx_ablkcipher_done(dev, pd_uinfo, pd);
+ break;
+ case CRYPTO_ALG_TYPE_AEAD:
+ crypto4xx_aead_done(dev, pd_uinfo, pd);
+ break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ crypto4xx_ahash_done(dev, pd_uinfo);
+ break;
+ }
}
static void crypto4xx_stop_all(struct crypto4xx_core_device *core_dev)
@@ -617,8 +685,10 @@ int crypto4xx_build_pd(struct crypto_asy
const unsigned int datalen,
const __le32 *iv, const u32 iv_len,
const struct dynamic_sa_ctl *req_sa,
- const unsigned int sa_len)
+ const unsigned int sa_len,
+ const unsigned int assoclen)
{
+ struct scatterlist _dst[2];
struct crypto4xx_device *dev = ctx->dev;
struct dynamic_sa_ctl *sa;
struct ce_gd *gd;
@@ -632,18 +702,25 @@ int crypto4xx_build_pd(struct crypto_asy
unsigned int nbytes = datalen;
size_t offset_to_sr_ptr;
u32 gd_idx = 0;
+ int tmp;
bool is_busy;
- /* figure how many gd is needed */
- num_gd = sg_nents_for_len(src, datalen);
- if ((int)num_gd < 0) {
+ /* figure how many gd are needed */
+ tmp = sg_nents_for_len(src, assoclen + datalen);
+ if (tmp < 0) {
dev_err(dev->core_dev->device, "Invalid number of src SG.\n");
- return -EINVAL;
+ return tmp;
}
- if (num_gd == 1)
- num_gd = 0;
+ if (tmp == 1)
+ tmp = 0;
+ num_gd = tmp;
- /* figure how many sd is needed */
+ if (assoclen) {
+ nbytes += assoclen;
+ dst = scatterwalk_ffwd(_dst, dst, assoclen);
+ }
+
+ /* figure how many sd are needed */
if (sg_is_last(dst)) {
num_sd = 0;
} else {
@@ -729,6 +806,7 @@ int crypto4xx_build_pd(struct crypto_asy
sa = pd_uinfo->sa_va;
memcpy(sa, req_sa, sa_len * 4);
+ sa->sa_command_1.bf.hash_crypto_offset = (assoclen >> 2);
offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(sa);
*(u32 *)((unsigned long)sa + offset_to_sr_ptr) = pd_uinfo->sr_pa;
@@ -835,7 +913,7 @@ int crypto4xx_build_pd(struct crypto_asy
((crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AHASH) |
(crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AEAD) ?
PD_CTL_HASH_FINAL : 0);
- pd->pd_ctl_len.w = 0x00400000 | datalen;
+ pd->pd_ctl_len.w = 0x00400000 | (assoclen + datalen);
pd_uinfo->state = PD_ENTRY_INUSE | (is_busy ? PD_ENTRY_BUSY : 0);
wmb();
@@ -848,40 +926,68 @@ int crypto4xx_build_pd(struct crypto_asy
/**
* Algorithm Registration Functions
*/
-static int crypto4xx_alg_init(struct crypto_tfm *tfm)
+static void crypto4xx_ctx_init(struct crypto4xx_alg *amcc_alg,
+ struct crypto4xx_ctx *ctx)
{
- struct crypto_alg *alg = tfm->__crt_alg;
- struct crypto4xx_alg *amcc_alg = crypto_alg_to_crypto4xx_alg(alg);
- struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
-
ctx->dev = amcc_alg->dev;
ctx->sa_in = NULL;
ctx->sa_out = NULL;
ctx->sa_len = 0;
+}
- switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
- default:
- tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx);
- break;
- case CRYPTO_ALG_TYPE_AHASH:
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct crypto4xx_ctx));
- break;
- }
+static int crypto4xx_ablk_init(struct crypto_tfm *tfm)
+{
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto4xx_alg *amcc_alg;
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.cipher);
+ crypto4xx_ctx_init(amcc_alg, ctx);
+ tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx);
return 0;
}
-static void crypto4xx_alg_exit(struct crypto_tfm *tfm)
+static void crypto4xx_common_exit(struct crypto4xx_ctx *ctx)
{
- struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
-
crypto4xx_free_sa(ctx);
}
-int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
- struct crypto4xx_alg_common *crypto_alg,
- int array_size)
+static void crypto4xx_ablk_exit(struct crypto_tfm *tfm)
+{
+ crypto4xx_common_exit(crypto_tfm_ctx(tfm));
+}
+
+static int crypto4xx_aead_init(struct crypto_aead *tfm)
+{
+ struct aead_alg *alg = crypto_aead_alg(tfm);
+ struct crypto4xx_ctx *ctx = crypto_aead_ctx(tfm);
+ struct crypto4xx_alg *amcc_alg;
+
+ ctx->sw_cipher.aead = crypto_alloc_aead(alg->base.cra_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(ctx->sw_cipher.aead))
+ return PTR_ERR(ctx->sw_cipher.aead);
+
+ amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.aead);
+ crypto4xx_ctx_init(amcc_alg, ctx);
+ crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) +
+ max(sizeof(struct crypto4xx_ctx), 32 +
+ crypto_aead_reqsize(ctx->sw_cipher.aead)));
+ return 0;
+}
+
+static void crypto4xx_aead_exit(struct crypto_aead *tfm)
+{
+ struct crypto4xx_ctx *ctx = crypto_aead_ctx(tfm);
+
+ crypto4xx_common_exit(ctx);
+ crypto_free_aead(ctx->sw_cipher.aead);
+}
+
+static int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
+ struct crypto4xx_alg_common *crypto_alg,
+ int array_size)
{
struct crypto4xx_alg *alg;
int i;
@@ -896,6 +1002,10 @@ int crypto4xx_register_alg(struct crypto
alg->dev = sec_dev;
switch (alg->alg.type) {
+ case CRYPTO_ALG_TYPE_AEAD:
+ rc = crypto_register_aead(&alg->alg.u.aead);
+ break;
+
case CRYPTO_ALG_TYPE_AHASH:
rc = crypto_register_ahash(&alg->alg.u.hash);
break;
@@ -925,6 +1035,10 @@ static void crypto4xx_unregister_alg(str
crypto_unregister_ahash(&alg->alg.u.hash);
break;
+ case CRYPTO_ALG_TYPE_AEAD:
+ crypto_unregister_aead(&alg->alg.u.aead);
+ break;
+
default:
crypto_unregister_alg(&alg->alg.u.cipher);
}
@@ -978,7 +1092,7 @@ static irqreturn_t crypto4xx_ce_interrup
/**
* Supported Crypto Algorithms
*/
-struct crypto4xx_alg_common crypto4xx_alg[] = {
+static struct crypto4xx_alg_common crypto4xx_alg[] = {
/* Crypto AES modes */
{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
.cra_name = "cbc(aes)",
@@ -990,8 +1104,8 @@ struct crypto4xx_alg_common crypto4xx_al
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
.cra_type = &crypto_ablkcipher_type,
- .cra_init = crypto4xx_alg_init,
- .cra_exit = crypto4xx_alg_exit,
+ .cra_init = crypto4xx_ablk_init,
+ .cra_exit = crypto4xx_ablk_exit,
.cra_module = THIS_MODULE,
.cra_u = {
.ablkcipher = {
@@ -1014,8 +1128,8 @@ struct crypto4xx_alg_common crypto4xx_al
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
.cra_type = &crypto_ablkcipher_type,
- .cra_init = crypto4xx_alg_init,
- .cra_exit = crypto4xx_alg_exit,
+ .cra_init = crypto4xx_ablk_init,
+ .cra_exit = crypto4xx_ablk_exit,
.cra_module = THIS_MODULE,
.cra_u = {
.ablkcipher = {
@@ -1038,8 +1152,8 @@ struct crypto4xx_alg_common crypto4xx_al
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
.cra_type = &crypto_ablkcipher_type,
- .cra_init = crypto4xx_alg_init,
- .cra_exit = crypto4xx_alg_exit,
+ .cra_init = crypto4xx_ablk_init,
+ .cra_exit = crypto4xx_ablk_exit,
.cra_module = THIS_MODULE,
.cra_u = {
.ablkcipher = {
@@ -1064,8 +1178,8 @@ struct crypto4xx_alg_common crypto4xx_al
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
.cra_type = &crypto_ablkcipher_type,
- .cra_init = crypto4xx_alg_init,
- .cra_exit = crypto4xx_alg_exit,
+ .cra_init = crypto4xx_ablk_init,
+ .cra_exit = crypto4xx_ablk_exit,
.cra_module = THIS_MODULE,
.cra_u = {
.ablkcipher = {
@@ -1087,8 +1201,8 @@ struct crypto4xx_alg_common crypto4xx_al
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
.cra_type = &crypto_ablkcipher_type,
- .cra_init = crypto4xx_alg_init,
- .cra_exit = crypto4xx_alg_exit,
+ .cra_init = crypto4xx_ablk_init,
+ .cra_exit = crypto4xx_ablk_exit,
.cra_module = THIS_MODULE,
.cra_u = {
.ablkcipher = {
@@ -1154,6 +1268,7 @@ static int crypto4xx_probe(struct platfo
core_dev->device = dev;
spin_lock_init(&core_dev->lock);
INIT_LIST_HEAD(&core_dev->dev->alg_list);
+ ratelimit_default_init(&core_dev->dev->aead_ratelimit);
rc = crypto4xx_build_pdr(core_dev->dev);
if (rc)
goto err_build_pdr;
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -22,7 +22,9 @@
#ifndef __CRYPTO4XX_CORE_H__
#define __CRYPTO4XX_CORE_H__
+#include <linux/ratelimit.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/aead.h>
#include "crypto4xx_reg_def.h"
#include "crypto4xx_sa.h"
@@ -106,6 +108,7 @@ struct crypto4xx_device {
struct pd_uinfo *pdr_uinfo;
struct list_head alg_list; /* List of algorithm supported
by this device */
+ struct ratelimit_state aead_ratelimit;
};
struct crypto4xx_core_device {
@@ -125,6 +128,9 @@ struct crypto4xx_ctx {
struct dynamic_sa_ctl *sa_out;
__le32 iv_nonce;
u32 sa_len;
+ union {
+ struct crypto_aead *aead;
+ } sw_cipher;
};
struct crypto4xx_alg_common {
@@ -132,6 +138,7 @@ struct crypto4xx_alg_common {
union {
struct crypto_alg cipher;
struct ahash_alg hash;
+ struct aead_alg aead;
} u;
};
@@ -141,18 +148,6 @@ struct crypto4xx_alg {
struct crypto4xx_device *dev;
};
-static inline struct crypto4xx_alg *crypto_alg_to_crypto4xx_alg(
- struct crypto_alg *x)
-{
- switch (x->cra_flags & CRYPTO_ALG_TYPE_MASK) {
- case CRYPTO_ALG_TYPE_AHASH:
- return container_of(__crypto_ahash_alg(x),
- struct crypto4xx_alg, alg.u.hash);
- }
-
- return container_of(x, struct crypto4xx_alg, alg.u.cipher);
-}
-
int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size);
void crypto4xx_free_sa(struct crypto4xx_ctx *ctx);
void crypto4xx_free_ctx(struct crypto4xx_ctx *ctx);
@@ -163,7 +158,8 @@ int crypto4xx_build_pd(struct crypto_asy
const unsigned int datalen,
const __le32 *iv, const u32 iv_len,
const struct dynamic_sa_ctl *sa,
- const unsigned int sa_len);
+ const unsigned int sa_len,
+ const unsigned int assoclen);
int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher,
const u8 *key, unsigned int keylen);
int crypto4xx_setkey_aes_cfb(struct crypto_ablkcipher *cipher,
--- a/drivers/crypto/amcc/crypto4xx_sa.h
+++ b/drivers/crypto/amcc/crypto4xx_sa.h
@@ -55,6 +55,8 @@ union dynamic_sa_contents {
#define SA_OP_GROUP_BASIC 0
#define SA_OPCODE_ENCRYPT 0
#define SA_OPCODE_DECRYPT 0
+#define SA_OPCODE_ENCRYPT_HASH 1
+#define SA_OPCODE_HASH_DECRYPT 1
#define SA_OPCODE_HASH 3
#define SA_CIPHER_ALG_DES 0
#define SA_CIPHER_ALG_3DES 1
@@ -65,6 +67,8 @@ union dynamic_sa_contents {
#define SA_HASH_ALG_MD5 0
#define SA_HASH_ALG_SHA1 1
+#define SA_HASH_ALG_GHASH 12
+#define SA_HASH_ALG_CBC_MAC 14
#define SA_HASH_ALG_NULL 15
#define SA_HASH_ALG_SHA1_DIGEST_SIZE 20
@@ -234,6 +238,36 @@ struct dynamic_sa_aes256 {
#define SA_AES_CONTENTS 0x3e000002
/**
+ * Security Association (SA) for AES128 CCM
+ */
+struct dynamic_sa_aes128_ccm {
+ struct dynamic_sa_ctl ctrl;
+ __le32 key[4];
+ __le32 iv[4];
+ u32 state_ptr;
+ u32 reserved;
+} __packed;
+#define SA_AES128_CCM_LEN (sizeof(struct dynamic_sa_aes128_ccm)/4)
+#define SA_AES128_CCM_CONTENTS 0x3e000042
+#define SA_AES_CCM_CONTENTS 0x3e000002
+
+/**
+ * Security Association (SA) for AES128_GCM
+ */
+struct dynamic_sa_aes128_gcm {
+ struct dynamic_sa_ctl ctrl;
+ __le32 key[4];
+ __le32 inner_digest[4];
+ __le32 iv[4];
+ u32 state_ptr;
+ u32 reserved;
+} __packed;
+
+#define SA_AES128_GCM_LEN (sizeof(struct dynamic_sa_aes128_gcm)/4)
+#define SA_AES128_GCM_CONTENTS 0x3e000442
+#define SA_AES_GCM_CONTENTS 0x3e000402
+
+/**
* Security Association (SA) for HASH160: HMAC-SHA1
*/
struct dynamic_sa_hash160 {
@@ -274,4 +308,11 @@ static inline __le32 *get_dynamic_sa_key
return (__le32 *) ((unsigned long)cts + sizeof(struct dynamic_sa_ctl));
}
+static inline __le32 *get_dynamic_sa_inner_digest(struct dynamic_sa_ctl *cts)
+{
+ return (__le32 *) ((unsigned long)cts +
+ sizeof(struct dynamic_sa_ctl) +
+ cts->sa_contents.bf.key_size * 4);
+}
+
#endif

View File

@ -1,256 +0,0 @@
From 65ea8b678fcf385ac18864743bae66c0643e6842 Mon Sep 17 00:00:00 2001
From: Christian Lamparter <chunkeey@gmail.com>
Date: Wed, 4 Oct 2017 01:00:16 +0200
Subject: [PATCH 24/25] crypto: crypto4xx - add aes-ccm support
This patch adds aes-ccm support.
Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
drivers/crypto/amcc/crypto4xx_alg.c | 185 +++++++++++++++++++++++++++++++++++
drivers/crypto/amcc/crypto4xx_core.c | 23 +++++
drivers/crypto/amcc/crypto4xx_core.h | 8 ++
3 files changed, 216 insertions(+)
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -232,6 +232,191 @@ int crypto4xx_rfc3686_decrypt(struct abl
ctx->sa_out, ctx->sa_len, 0);
}
+static inline bool crypto4xx_aead_need_fallback(struct aead_request *req,
+ bool is_ccm, bool decrypt)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+
+ /* authsize has to be a multiple of 4 */
+ if (aead->authsize & 3)
+ return true;
+
+ /*
+ * hardware does not handle cases where cryptlen
+ * is less than a block
+ */
+ if (req->cryptlen < AES_BLOCK_SIZE)
+ return true;
+
+ /* assoc len needs to be a multiple of 4 */
+ if (req->assoclen & 0x3)
+ return true;
+
+ /* CCM supports only counter field length of 2 and 4 bytes */
+ if (is_ccm && !(req->iv[0] == 1 || req->iv[0] == 3))
+ return true;
+
+ /* CCM - fix CBC MAC mismatch in special case */
+ if (is_ccm && decrypt && !req->assoclen)
+ return true;
+
+ return false;
+}
+
+static int crypto4xx_aead_fallback(struct aead_request *req,
+ struct crypto4xx_ctx *ctx, bool do_decrypt)
+{
+ char aead_req_data[sizeof(struct aead_request) +
+ crypto_aead_reqsize(ctx->sw_cipher.aead)]
+ __aligned(__alignof__(struct aead_request));
+
+ struct aead_request *subreq = (void *) aead_req_data;
+
+ memset(subreq, 0, sizeof(aead_req_data));
+
+ aead_request_set_tfm(subreq, ctx->sw_cipher.aead);
+ aead_request_set_callback(subreq, req->base.flags,
+ req->base.complete, req->base.data);
+ aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
+ req->iv);
+ aead_request_set_ad(subreq, req->assoclen);
+ return do_decrypt ? crypto_aead_decrypt(subreq) :
+ crypto_aead_encrypt(subreq);
+}
+
+static int crypto4xx_setup_fallback(struct crypto4xx_ctx *ctx,
+ struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ int rc;
+
+ crypto_aead_clear_flags(ctx->sw_cipher.aead, CRYPTO_TFM_REQ_MASK);
+ crypto_aead_set_flags(ctx->sw_cipher.aead,
+ crypto_aead_get_flags(cipher) & CRYPTO_TFM_REQ_MASK);
+ rc = crypto_aead_setkey(ctx->sw_cipher.aead, key, keylen);
+ crypto_aead_clear_flags(cipher, CRYPTO_TFM_RES_MASK);
+ crypto_aead_set_flags(cipher,
+ crypto_aead_get_flags(ctx->sw_cipher.aead) &
+ CRYPTO_TFM_RES_MASK);
+
+ return rc;
+}
+
+/**
+ * AES-CCM Functions
+ */
+
+int crypto4xx_setkey_aes_ccm(struct crypto_aead *cipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct dynamic_sa_ctl *sa;
+ int rc = 0;
+
+ rc = crypto4xx_setup_fallback(ctx, cipher, key, keylen);
+ if (rc)
+ return rc;
+
+ if (ctx->sa_in || ctx->sa_out)
+ crypto4xx_free_sa(ctx);
+
+ rc = crypto4xx_alloc_sa(ctx, SA_AES128_CCM_LEN + (keylen - 16) / 4);
+ if (rc)
+ return rc;
+
+ /* Setup SA */
+ sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+ sa->sa_contents.w = SA_AES_CCM_CONTENTS | (keylen << 2);
+
+ set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
+ SA_NO_HEADER_PROC, SA_HASH_ALG_CBC_MAC,
+ SA_CIPHER_ALG_AES,
+ SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC,
+ SA_OPCODE_HASH_DECRYPT, DIR_INBOUND);
+
+ set_dynamic_sa_command_1(sa, CRYPTO_MODE_CTR, SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
+ SA_SEQ_MASK_OFF, SA_MC_ENABLE,
+ SA_NOT_COPY_PAD, SA_COPY_PAYLOAD,
+ SA_NOT_COPY_HDR);
+
+ sa->sa_command_1.bf.key_len = keylen >> 3;
+
+ crypto4xx_memcpy_to_le32(get_dynamic_sa_key_field(sa), key, keylen);
+
+ memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+ sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+
+ set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
+ SA_NO_HEADER_PROC, SA_HASH_ALG_CBC_MAC,
+ SA_CIPHER_ALG_AES,
+ SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC,
+ SA_OPCODE_ENCRYPT_HASH, DIR_OUTBOUND);
+
+ set_dynamic_sa_command_1(sa, CRYPTO_MODE_CTR, SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
+ SA_SEQ_MASK_OFF, SA_MC_ENABLE,
+ SA_COPY_PAD, SA_COPY_PAYLOAD,
+ SA_NOT_COPY_HDR);
+
+ sa->sa_command_1.bf.key_len = keylen >> 3;
+ return 0;
+}
+
+static int crypto4xx_crypt_aes_ccm(struct aead_request *req, bool decrypt)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ unsigned int len = req->cryptlen;
+ __le32 iv[16];
+ u32 tmp_sa[ctx->sa_len * 4];
+ struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *)tmp_sa;
+
+ if (crypto4xx_aead_need_fallback(req, true, decrypt))
+ return crypto4xx_aead_fallback(req, ctx, decrypt);
+
+ if (decrypt)
+ len -= crypto_aead_authsize(aead);
+
+ memcpy(tmp_sa, decrypt ? ctx->sa_in : ctx->sa_out, sizeof(tmp_sa));
+ sa->sa_command_0.bf.digest_len = crypto_aead_authsize(aead) >> 2;
+
+ if (req->iv[0] == 1) {
+ /* CRYPTO_MODE_AES_ICM */
+ sa->sa_command_1.bf.crypto_mode9_8 = 1;
+ }
+
+ iv[3] = cpu_to_le32(0);
+ crypto4xx_memcpy_to_le32(iv, req->iv, 16 - (req->iv[0] + 1));
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ len, iv, sizeof(iv),
+ sa, ctx->sa_len, req->assoclen);
+}
+
+int crypto4xx_encrypt_aes_ccm(struct aead_request *req)
+{
+ return crypto4xx_crypt_aes_ccm(req, false);
+}
+
+int crypto4xx_decrypt_aes_ccm(struct aead_request *req)
+{
+ return crypto4xx_crypt_aes_ccm(req, true);
+}
+
+int crypto4xx_setauthsize_aead(struct crypto_aead *cipher,
+ unsigned int authsize)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ return crypto_aead_setauthsize(ctx->sw_cipher.aead, authsize);
+}
+
/**
* HASH SHA1 Functions
*/
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -1215,6 +1215,29 @@ static struct crypto4xx_alg_common crypt
}
}
} },
+
+ /* AEAD */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.aead = {
+ .setkey = crypto4xx_setkey_aes_ccm,
+ .setauthsize = crypto4xx_setauthsize_aead,
+ .encrypt = crypto4xx_encrypt_aes_ccm,
+ .decrypt = crypto4xx_decrypt_aes_ccm,
+ .init = crypto4xx_aead_init,
+ .exit = crypto4xx_aead_exit,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = 16,
+ .base = {
+ .cra_name = "ccm(aes)",
+ .cra_driver_name = "ccm-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ } },
};
/**
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -222,4 +222,12 @@ static inline void crypto4xx_memcpy_to_l
{
crypto4xx_memcpy_swab32((u32 *)dst, buf, len);
}
+
+int crypto4xx_setauthsize_aead(struct crypto_aead *ciper,
+ unsigned int authsize);
+int crypto4xx_setkey_aes_ccm(struct crypto_aead *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_encrypt_aes_ccm(struct aead_request *req);
+int crypto4xx_decrypt_aes_ccm(struct aead_request *req);
+
#endif

View File

@ -1,220 +0,0 @@
From 59231368d3a959fc30c5142c406a045f49130daa Mon Sep 17 00:00:00 2001
From: Christian Lamparter <chunkeey@gmail.com>
Date: Wed, 4 Oct 2017 01:00:17 +0200
Subject: [PATCH 25/25] crypto: crypto4xx - add aes-gcm support
This patch adds aes-gcm support to crypto4xx.
Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
drivers/crypto/amcc/crypto4xx_alg.c | 139 +++++++++++++++++++++++++++++++++++
drivers/crypto/amcc/crypto4xx_core.c | 22 ++++++
drivers/crypto/amcc/crypto4xx_core.h | 4 +
3 files changed, 165 insertions(+)
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -28,6 +28,7 @@
#include <crypto/algapi.h>
#include <crypto/aead.h>
#include <crypto/aes.h>
+#include <crypto/gcm.h>
#include <crypto/sha.h>
#include <crypto/ctr.h>
#include "crypto4xx_reg_def.h"
@@ -418,6 +419,144 @@ int crypto4xx_setauthsize_aead(struct cr
}
/**
+ * AES-GCM Functions
+ */
+
+static int crypto4xx_aes_gcm_validate_keylen(unsigned int keylen)
+{
+ switch (keylen) {
+ case 16:
+ case 24:
+ case 32:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int crypto4xx_compute_gcm_hash_key_sw(__le32 *hash_start, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_cipher *aes_tfm = NULL;
+ uint8_t src[16] = { 0 };
+ int rc = 0;
+
+ aes_tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(aes_tfm)) {
+ rc = PTR_ERR(aes_tfm);
+ pr_warn("could not load aes cipher driver: %d\n", rc);
+ return rc;
+ }
+
+ rc = crypto_cipher_setkey(aes_tfm, key, keylen);
+ if (rc) {
+ pr_err("setkey() failed: %d\n", rc);
+ goto out;
+ }
+
+ crypto_cipher_encrypt_one(aes_tfm, src, src);
+ crypto4xx_memcpy_to_le32(hash_start, src, 16);
+out:
+ crypto_free_cipher(aes_tfm);
+ return rc;
+}
+
+int crypto4xx_setkey_aes_gcm(struct crypto_aead *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct dynamic_sa_ctl *sa;
+ int rc = 0;
+
+ if (crypto4xx_aes_gcm_validate_keylen(keylen) != 0) {
+ crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ rc = crypto4xx_setup_fallback(ctx, cipher, key, keylen);
+ if (rc)
+ return rc;
+
+ if (ctx->sa_in || ctx->sa_out)
+ crypto4xx_free_sa(ctx);
+
+ rc = crypto4xx_alloc_sa(ctx, SA_AES128_GCM_LEN + (keylen - 16) / 4);
+ if (rc)
+ return rc;
+
+ sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+
+ sa->sa_contents.w = SA_AES_GCM_CONTENTS | (keylen << 2);
+ set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
+ SA_NO_HEADER_PROC, SA_HASH_ALG_GHASH,
+ SA_CIPHER_ALG_AES, SA_PAD_TYPE_ZERO,
+ SA_OP_GROUP_BASIC, SA_OPCODE_HASH_DECRYPT,
+ DIR_INBOUND);
+ set_dynamic_sa_command_1(sa, CRYPTO_MODE_CTR, SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
+ SA_SEQ_MASK_ON, SA_MC_DISABLE,
+ SA_NOT_COPY_PAD, SA_COPY_PAYLOAD,
+ SA_NOT_COPY_HDR);
+
+ sa->sa_command_1.bf.key_len = keylen >> 3;
+
+ crypto4xx_memcpy_to_le32(get_dynamic_sa_key_field(sa),
+ key, keylen);
+
+ rc = crypto4xx_compute_gcm_hash_key_sw(get_dynamic_sa_inner_digest(sa),
+ key, keylen);
+ if (rc) {
+ pr_err("GCM hash key setting failed = %d\n", rc);
+ goto err;
+ }
+
+ memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+ sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+ sa->sa_command_0.bf.dir = DIR_OUTBOUND;
+ sa->sa_command_0.bf.opcode = SA_OPCODE_ENCRYPT_HASH;
+
+ return 0;
+err:
+ crypto4xx_free_sa(ctx);
+ return rc;
+}
+
+static inline int crypto4xx_crypt_aes_gcm(struct aead_request *req,
+ bool decrypt)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ unsigned int len = req->cryptlen;
+ __le32 iv[4];
+
+ if (crypto4xx_aead_need_fallback(req, false, decrypt))
+ return crypto4xx_aead_fallback(req, ctx, decrypt);
+
+ crypto4xx_memcpy_to_le32(iv, req->iv, GCM_AES_IV_SIZE);
+ iv[3] = cpu_to_le32(1);
+
+ if (decrypt)
+ len -= crypto_aead_authsize(crypto_aead_reqtfm(req));
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ len, iv, sizeof(iv),
+ decrypt ? ctx->sa_in : ctx->sa_out,
+ ctx->sa_len, req->assoclen);
+}
+
+int crypto4xx_encrypt_aes_gcm(struct aead_request *req)
+{
+ return crypto4xx_crypt_aes_gcm(req, false);
+}
+
+int crypto4xx_decrypt_aes_gcm(struct aead_request *req)
+{
+ return crypto4xx_crypt_aes_gcm(req, true);
+}
+
+/**
* HASH SHA1 Functions
*/
static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm,
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -38,6 +38,7 @@
#include <crypto/aead.h>
#include <crypto/aes.h>
#include <crypto/ctr.h>
+#include <crypto/gcm.h>
#include <crypto/sha.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/aead.h>
@@ -1232,6 +1233,27 @@ static struct crypto4xx_alg_common crypt
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ } },
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.aead = {
+ .setkey = crypto4xx_setkey_aes_gcm,
+ .setauthsize = crypto4xx_setauthsize_aead,
+ .encrypt = crypto4xx_encrypt_aes_gcm,
+ .decrypt = crypto4xx_decrypt_aes_gcm,
+ .init = crypto4xx_aead_init,
+ .exit = crypto4xx_aead_exit,
+ .ivsize = GCM_AES_IV_SIZE,
+ .maxauthsize = 16,
+ .base = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "gcm-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -229,5 +229,9 @@ int crypto4xx_setkey_aes_ccm(struct cryp
const u8 *key, unsigned int keylen);
int crypto4xx_encrypt_aes_ccm(struct aead_request *req);
int crypto4xx_decrypt_aes_ccm(struct aead_request *req);
+int crypto4xx_setkey_aes_gcm(struct crypto_aead *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_encrypt_aes_gcm(struct aead_request *req);
+int crypto4xx_decrypt_aes_gcm(struct aead_request *req);
#endif

View File

@ -1,71 +0,0 @@
From 4baa099377d73ea99c7802a9685815b32e8bf119 Mon Sep 17 00:00:00 2001
From: Christian Lamparter <chunkeey@gmail.com>
Date: Thu, 21 Dec 2017 15:08:18 +0100
Subject: [PATCH 1/6] crypto: crypto4xx - shuffle iomap in front of request_irq
It is possible to avoid the ce_base null pointer check in the
drivers' interrupt handler routine "crypto4xx_ce_interrupt_handler()"
by simply doing the iomap in front of the IRQ registration.
This way, the ce_base will always be valid in the handler and
a branch in an critical path can be avoided.
Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
---
drivers/crypto/amcc/crypto4xx_core.c | 21 +++++++++------------
1 file changed, 9 insertions(+), 12 deletions(-)
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -1080,9 +1080,6 @@ static irqreturn_t crypto4xx_ce_interrup
struct device *dev = (struct device *)data;
struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
- if (!core_dev->dev->ce_base)
- return 0;
-
writel(PPC4XX_INTERRUPT_CLR,
core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
tasklet_schedule(&core_dev->tasklet);
@@ -1330,13 +1327,6 @@ static int crypto4xx_probe(struct platfo
tasklet_init(&core_dev->tasklet, crypto4xx_bh_tasklet_cb,
(unsigned long) dev);
- /* Register for Crypto isr, Crypto Engine IRQ */
- core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
- rc = request_irq(core_dev->irq, crypto4xx_ce_interrupt_handler, 0,
- core_dev->dev->name, dev);
- if (rc)
- goto err_request_irq;
-
core_dev->dev->ce_base = of_iomap(ofdev->dev.of_node, 0);
if (!core_dev->dev->ce_base) {
dev_err(dev, "failed to of_iomap\n");
@@ -1344,6 +1334,13 @@ static int crypto4xx_probe(struct platfo
goto err_iomap;
}
+ /* Register for Crypto isr, Crypto Engine IRQ */
+ core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
+ rc = request_irq(core_dev->irq, crypto4xx_ce_interrupt_handler, 0,
+ core_dev->dev->name, dev);
+ if (rc)
+ goto err_request_irq;
+
/* need to setup pdr, rdr, gdr and sdr before this */
crypto4xx_hw_init(core_dev->dev);
@@ -1357,11 +1354,11 @@ static int crypto4xx_probe(struct platfo
return 0;
err_start_dev:
- iounmap(core_dev->dev->ce_base);
-err_iomap:
free_irq(core_dev->irq, dev);
err_request_irq:
irq_dispose_mapping(core_dev->irq);
+ iounmap(core_dev->dev->ce_base);
+err_iomap:
tasklet_kill(&core_dev->tasklet);
err_build_sdr:
crypto4xx_destroy_sdr(core_dev->dev);

View File

@ -1,150 +0,0 @@
From 1e932b627e79aa2c70e2c7278e4ac930303faa3f Mon Sep 17 00:00:00 2001
From: Christian Lamparter <chunkeey@gmail.com>
Date: Thu, 21 Dec 2017 15:09:18 +0100
Subject: [PATCH 2/6] crypto: crypto4xx - support Revision B parts
This patch adds support for the crypto4xx RevB cores
found in the 460EX, 460SX and later cores (like the APM821xx).
Without this patch, the crypto4xx driver will not be
able to process any offloaded requests and simply hang
indefinitely.
Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
---
drivers/crypto/amcc/crypto4xx_core.c | 48 +++++++++++++++++++++++++++++----
drivers/crypto/amcc/crypto4xx_core.h | 1 +
drivers/crypto/amcc/crypto4xx_reg_def.h | 4 ++-
3 files changed, 47 insertions(+), 6 deletions(-)
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -128,7 +128,14 @@ static void crypto4xx_hw_init(struct cry
writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
writel(PPC4XX_INT_CFG, dev->ce_base + CRYPTO4XX_INT_CFG);
- writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN);
+ if (dev->is_revb) {
+ writel(PPC4XX_INT_TIMEOUT_CNT_REVB << 10,
+ dev->ce_base + CRYPTO4XX_INT_TIMEOUT_CNT);
+ writel(PPC4XX_PD_DONE_INT | PPC4XX_TMO_ERR_INT,
+ dev->ce_base + CRYPTO4XX_INT_EN);
+ } else {
+ writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN);
+ }
}
int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
@@ -1075,18 +1082,29 @@ static void crypto4xx_bh_tasklet_cb(unsi
/**
* Top Half of isr.
*/
-static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
+static inline irqreturn_t crypto4xx_interrupt_handler(int irq, void *data,
+ u32 clr_val)
{
struct device *dev = (struct device *)data;
struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
- writel(PPC4XX_INTERRUPT_CLR,
- core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
+ writel(clr_val, core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
tasklet_schedule(&core_dev->tasklet);
return IRQ_HANDLED;
}
+static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
+{
+ return crypto4xx_interrupt_handler(irq, data, PPC4XX_INTERRUPT_CLR);
+}
+
+static irqreturn_t crypto4xx_ce_interrupt_handler_revb(int irq, void *data)
+{
+ return crypto4xx_interrupt_handler(irq, data, PPC4XX_INTERRUPT_CLR |
+ PPC4XX_TMO_ERR_INT);
+}
+
/**
* Supported Crypto Algorithms
*/
@@ -1268,6 +1286,8 @@ static int crypto4xx_probe(struct platfo
struct resource res;
struct device *dev = &ofdev->dev;
struct crypto4xx_core_device *core_dev;
+ u32 pvr;
+ bool is_revb = true;
rc = of_address_to_resource(ofdev->dev.of_node, 0, &res);
if (rc)
@@ -1284,6 +1304,7 @@ static int crypto4xx_probe(struct platfo
mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET);
mtdcri(SDR0, PPC405EX_SDR0_SRST,
mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET);
+ is_revb = false;
} else if (of_find_compatible_node(NULL, NULL,
"amcc,ppc460sx-crypto")) {
mtdcri(SDR0, PPC460SX_SDR0_SRST,
@@ -1306,7 +1327,22 @@ static int crypto4xx_probe(struct platfo
if (!core_dev->dev)
goto err_alloc_dev;
+ /*
+ * Older version of 460EX/GT have a hardware bug.
+ * Hence they do not support H/W based security intr coalescing
+ */
+ pvr = mfspr(SPRN_PVR);
+ if (is_revb && ((pvr >> 4) == 0x130218A)) {
+ u32 min = PVR_MIN(pvr);
+
+ if (min < 4) {
+ dev_info(dev, "RevA detected - disable interrupt coalescing\n");
+ is_revb = false;
+ }
+ }
+
core_dev->dev->core_dev = core_dev;
+ core_dev->dev->is_revb = is_revb;
core_dev->device = dev;
spin_lock_init(&core_dev->lock);
INIT_LIST_HEAD(&core_dev->dev->alg_list);
@@ -1336,7 +1372,9 @@ static int crypto4xx_probe(struct platfo
/* Register for Crypto isr, Crypto Engine IRQ */
core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
- rc = request_irq(core_dev->irq, crypto4xx_ce_interrupt_handler, 0,
+ rc = request_irq(core_dev->irq, is_revb ?
+ crypto4xx_ce_interrupt_handler_revb :
+ crypto4xx_ce_interrupt_handler, 0,
core_dev->dev->name, dev);
if (rc)
goto err_request_irq;
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -109,6 +109,7 @@ struct crypto4xx_device {
struct list_head alg_list; /* List of algorithm supported
by this device */
struct ratelimit_state aead_ratelimit;
+ bool is_revb;
};
struct crypto4xx_core_device {
--- a/drivers/crypto/amcc/crypto4xx_reg_def.h
+++ b/drivers/crypto/amcc/crypto4xx_reg_def.h
@@ -121,13 +121,15 @@
#define PPC4XX_PD_SIZE 6
#define PPC4XX_CTX_DONE_INT 0x2000
#define PPC4XX_PD_DONE_INT 0x8000
+#define PPC4XX_TMO_ERR_INT 0x40000
#define PPC4XX_BYTE_ORDER 0x22222
#define PPC4XX_INTERRUPT_CLR 0x3ffff
#define PPC4XX_PRNG_CTRL_AUTO_EN 0x3
#define PPC4XX_DC_3DES_EN 1
#define PPC4XX_TRNG_EN 0x00020000
-#define PPC4XX_INT_DESCR_CNT 4
+#define PPC4XX_INT_DESCR_CNT 7
#define PPC4XX_INT_TIMEOUT_CNT 0
+#define PPC4XX_INT_TIMEOUT_CNT_REVB 0x3FF
#define PPC4XX_INT_CFG 1
/**
* all follow define are ad hoc

View File

@ -1,37 +0,0 @@
From 00179ef6e3c4e5db6258cd6e273e4063b8437d18 Mon Sep 17 00:00:00 2001
From: Christian Lamparter <chunkeey@gmail.com>
Date: Thu, 21 Dec 2017 15:10:18 +0100
Subject: [PATCH 3/6] crypto: crypto4xx - fix missing irq devname
crypto4xx_device's name variable is not set to anything.
The common devname for request_irq seems to be the module
name. This will fix the seemingly anonymous interrupt
entry in /proc/interrupts for crypto4xx.
Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
---
drivers/crypto/amcc/crypto4xx_core.c | 2 +-
drivers/crypto/amcc/crypto4xx_core.h | 1 -
2 files changed, 1 insertion(+), 2 deletions(-)
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -1375,7 +1375,7 @@ static int crypto4xx_probe(struct platfo
rc = request_irq(core_dev->irq, is_revb ?
crypto4xx_ce_interrupt_handler_revb :
crypto4xx_ce_interrupt_handler, 0,
- core_dev->dev->name, dev);
+ KBUILD_MODNAME, dev);
if (rc)
goto err_request_irq;
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -82,7 +82,6 @@ struct pd_uinfo {
struct crypto4xx_device {
struct crypto4xx_core_device *core_dev;
- char *name;
void __iomem *ce_base;
void __iomem *trng_base;

View File

@ -1,47 +0,0 @@
From c3621f23fed7d6fff33083ae538004ea59c01d8f Mon Sep 17 00:00:00 2001
From: Christian Lamparter <chunkeey@gmail.com>
Date: Thu, 21 Dec 2017 15:11:18 +0100
Subject: [PATCH 4/6] crypto: crypto4xx - kill MODULE_NAME
KBUILD_MODNAME provides the same value.
Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
---
drivers/crypto/amcc/crypto4xx_core.c | 2 +-
drivers/crypto/amcc/crypto4xx_core.h | 2 --
drivers/crypto/amcc/crypto4xx_trng.c | 2 +-
3 files changed, 2 insertions(+), 4 deletions(-)
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -1437,7 +1437,7 @@ MODULE_DEVICE_TABLE(of, crypto4xx_match)
static struct platform_driver crypto4xx_driver = {
.driver = {
- .name = MODULE_NAME,
+ .name = KBUILD_MODNAME,
.of_match_table = crypto4xx_match,
},
.probe = crypto4xx_probe,
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -28,8 +28,6 @@
#include "crypto4xx_reg_def.h"
#include "crypto4xx_sa.h"
-#define MODULE_NAME "crypto4xx"
-
#define PPC460SX_SDR0_SRST 0x201
#define PPC405EX_SDR0_SRST 0x200
#define PPC460EX_SDR0_SRST 0x201
--- a/drivers/crypto/amcc/crypto4xx_trng.c
+++ b/drivers/crypto/amcc/crypto4xx_trng.c
@@ -94,7 +94,7 @@ void ppc4xx_trng_probe(struct crypto4xx_
if (!rng)
goto err_out;
- rng->name = MODULE_NAME;
+ rng->name = KBUILD_MODNAME;
rng->data_present = ppc4xx_trng_data_present;
rng->data_read = ppc4xx_trng_data_read;
rng->priv = (unsigned long) dev;

View File

@ -1,146 +0,0 @@
From 5b3856d1d98e6f6a58b70c1c0d7da3fb5f042e9c Mon Sep 17 00:00:00 2001
From: Christian Lamparter <chunkeey@gmail.com>
Date: Thu, 21 Dec 2017 16:00:01 +0100
Subject: [PATCH 5/6] crypto: crypto4xx - perform aead icv check in the driver
The ccm-aes-ppc4xx now fails one of testmgr's expected
failure test cases as such:
alg: aead: decryption failed on test 10 for ccm-aes-ppc4xx: ret was 0, expected -EBADMSG
Upon closer inspection, it turned out that the hardware's
crypto flags that would indicate an authentification failure
are not set by the hardware. The original vendor source from
which this was ported does not have any special code or notes
about why this would happen or if there are any WAs.
Hence, this patch converts the aead_done callback handler to
perform the icv check in the driver. And this fixes the false
negative and the ccm-aes-ppc4xx passes the selftests once again.
|name : ccm(aes)
|driver : ccm-aes-ppc4xx
|module : crypto4xx
|priority : 300
|refcnt : 1
|selftest : passed
|internal : no
|type : aead
|async : yes
|blocksize : 1
|ivsize : 16
|maxauthsize : 16
|geniv : <none>
Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
---
drivers/crypto/amcc/crypto4xx_alg.c | 6 +---
drivers/crypto/amcc/crypto4xx_core.c | 54 ++++++++++++++++++------------------
2 files changed, 28 insertions(+), 32 deletions(-)
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -257,10 +257,6 @@ static inline bool crypto4xx_aead_need_f
if (is_ccm && !(req->iv[0] == 1 || req->iv[0] == 3))
return true;
- /* CCM - fix CBC MAC mismatch in special case */
- if (is_ccm && decrypt && !req->assoclen)
- return true;
-
return false;
}
@@ -331,7 +327,7 @@ int crypto4xx_setkey_aes_ccm(struct cryp
sa = (struct dynamic_sa_ctl *) ctx->sa_in;
sa->sa_contents.w = SA_AES_CCM_CONTENTS | (keylen << 2);
- set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV,
+ set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
SA_NO_HEADER_PROC, SA_HASH_ALG_CBC_MAC,
SA_CIPHER_ALG_AES,
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -582,15 +582,14 @@ static void crypto4xx_aead_done(struct c
struct pd_uinfo *pd_uinfo,
struct ce_pd *pd)
{
- struct aead_request *aead_req;
- struct crypto4xx_ctx *ctx;
+ struct aead_request *aead_req = container_of(pd_uinfo->async_req,
+ struct aead_request, base);
struct scatterlist *dst = pd_uinfo->dest_va;
+ size_t cp_len = crypto_aead_authsize(
+ crypto_aead_reqtfm(aead_req));
+ u32 icv[cp_len];
int err = 0;
- aead_req = container_of(pd_uinfo->async_req, struct aead_request,
- base);
- ctx = crypto_tfm_ctx(aead_req->base.tfm);
-
if (pd_uinfo->using_sd) {
crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
pd->pd_ctl_len.bf.pkt_len,
@@ -602,38 +601,39 @@ static void crypto4xx_aead_done(struct c
if (pd_uinfo->sa_va->sa_command_0.bf.dir == DIR_OUTBOUND) {
/* append icv at the end */
- size_t cp_len = crypto_aead_authsize(
- crypto_aead_reqtfm(aead_req));
- u32 icv[cp_len];
-
crypto4xx_memcpy_from_le32(icv, pd_uinfo->sr_va->save_digest,
cp_len);
scatterwalk_map_and_copy(icv, dst, aead_req->cryptlen,
cp_len, 1);
+ } else {
+ /* check icv at the end */
+ scatterwalk_map_and_copy(icv, aead_req->src,
+ aead_req->assoclen + aead_req->cryptlen -
+ cp_len, cp_len, 0);
+
+ crypto4xx_memcpy_from_le32(icv, icv, cp_len);
+
+ if (crypto_memneq(icv, pd_uinfo->sr_va->save_digest, cp_len))
+ err = -EBADMSG;
}
crypto4xx_ret_sg_desc(dev, pd_uinfo);
if (pd->pd_ctl.bf.status & 0xff) {
- if (pd->pd_ctl.bf.status & 0x1) {
- /* authentication error */
- err = -EBADMSG;
- } else {
- if (!__ratelimit(&dev->aead_ratelimit)) {
- if (pd->pd_ctl.bf.status & 2)
- pr_err("pad fail error\n");
- if (pd->pd_ctl.bf.status & 4)
- pr_err("seqnum fail\n");
- if (pd->pd_ctl.bf.status & 8)
- pr_err("error _notify\n");
- pr_err("aead return err status = 0x%02x\n",
- pd->pd_ctl.bf.status & 0xff);
- pr_err("pd pad_ctl = 0x%08x\n",
- pd->pd_ctl.bf.pd_pad_ctl);
- }
- err = -EINVAL;
+ if (!__ratelimit(&dev->aead_ratelimit)) {
+ if (pd->pd_ctl.bf.status & 2)
+ pr_err("pad fail error\n");
+ if (pd->pd_ctl.bf.status & 4)
+ pr_err("seqnum fail\n");
+ if (pd->pd_ctl.bf.status & 8)
+ pr_err("error _notify\n");
+ pr_err("aead return err status = 0x%02x\n",
+ pd->pd_ctl.bf.status & 0xff);
+ pr_err("pd pad_ctl = 0x%08x\n",
+ pd->pd_ctl.bf.pd_pad_ctl);
}
+ err = -EINVAL;
}
if (pd_uinfo->state & PD_ENTRY_BUSY)

View File

@ -1,39 +0,0 @@
From 75d68369b544acc5d14c18a827654dfff248d09d Mon Sep 17 00:00:00 2001
From: Himanshu Jha <himanshujha199640@gmail.com>
Date: Sun, 31 Dec 2017 17:54:23 +0530
Subject: [PATCH 1/8] crypto: Use zeroing memory allocator instead of
allocator/memset
Use dma_zalloc_coherent for allocating zeroed
memory and remove unnecessary memset function.
Done using Coccinelle.
Generated-by: scripts/coccinelle/api/alloc/kzalloc-simple.cocci
0-day tested with no failures.
Signed-off-by: Himanshu Jha <himanshujha199640@gmail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
drivers/crypto/amcc/crypto4xx_core.c | 8 +++-----
1 file changed, 3 insertions(+), 5 deletions(-)
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -282,14 +282,12 @@ static u32 crypto4xx_put_pd_to_pdr(struc
*/
static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
{
- dev->gdr = dma_alloc_coherent(dev->core_dev->device,
- sizeof(struct ce_gd) * PPC4XX_NUM_GD,
- &dev->gdr_pa, GFP_ATOMIC);
+ dev->gdr = dma_zalloc_coherent(dev->core_dev->device,
+ sizeof(struct ce_gd) * PPC4XX_NUM_GD,
+ &dev->gdr_pa, GFP_ATOMIC);
if (!dev->gdr)
return -ENOMEM;
- memset(dev->gdr, 0, sizeof(struct ce_gd) * PPC4XX_NUM_GD);
-
return 0;
}

View File

@ -1,159 +0,0 @@
From a8d79d7bfb14f471914017103ee2329a74e5e89d Mon Sep 17 00:00:00 2001
From: Christian Lamparter <chunkeey@gmail.com>
Date: Thu, 19 Apr 2018 18:41:51 +0200
Subject: crypto: crypto4xx - performance optimizations
This patch provides a cheap 2MiB/s+ (~ 6%) performance
improvement over the current code. This is because the
compiler can now optimize several endian swap memcpy.
Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
drivers/crypto/amcc/crypto4xx_alg.c | 32 +++++++++++++++++++-------------
drivers/crypto/amcc/crypto4xx_core.c | 22 +++++++++++-----------
drivers/crypto/amcc/crypto4xx_core.h | 6 ++++--
3 files changed, 34 insertions(+), 26 deletions(-)
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -74,32 +74,38 @@ static void set_dynamic_sa_command_1(str
sa->sa_command_1.bf.copy_hdr = cp_hdr;
}
-int crypto4xx_encrypt(struct ablkcipher_request *req)
+static inline int crypto4xx_crypt(struct ablkcipher_request *req,
+ const unsigned int ivlen, bool decrypt)
{
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- unsigned int ivlen = crypto_ablkcipher_ivsize(
- crypto_ablkcipher_reqtfm(req));
__le32 iv[ivlen];
if (ivlen)
crypto4xx_memcpy_to_le32(iv, req->info, ivlen);
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
- req->nbytes, iv, ivlen, ctx->sa_out, ctx->sa_len, 0);
+ req->nbytes, iv, ivlen, decrypt ? ctx->sa_in : ctx->sa_out,
+ ctx->sa_len, 0);
}
-int crypto4xx_decrypt(struct ablkcipher_request *req)
+int crypto4xx_encrypt_noiv(struct ablkcipher_request *req)
{
- struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- unsigned int ivlen = crypto_ablkcipher_ivsize(
- crypto_ablkcipher_reqtfm(req));
- __le32 iv[ivlen];
+ return crypto4xx_crypt(req, 0, false);
+}
- if (ivlen)
- crypto4xx_memcpy_to_le32(iv, req->info, ivlen);
+int crypto4xx_encrypt_iv(struct ablkcipher_request *req)
+{
+ return crypto4xx_crypt(req, AES_IV_SIZE, false);
+}
- return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
- req->nbytes, iv, ivlen, ctx->sa_in, ctx->sa_len, 0);
+int crypto4xx_decrypt_noiv(struct ablkcipher_request *req)
+{
+ return crypto4xx_crypt(req, 0, true);
+}
+
+int crypto4xx_decrypt_iv(struct ablkcipher_request *req)
+{
+ return crypto4xx_crypt(req, AES_IV_SIZE, true);
}
/**
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -585,7 +585,7 @@ static void crypto4xx_aead_done(struct c
struct scatterlist *dst = pd_uinfo->dest_va;
size_t cp_len = crypto_aead_authsize(
crypto_aead_reqtfm(aead_req));
- u32 icv[cp_len];
+ u32 icv[AES_BLOCK_SIZE];
int err = 0;
if (pd_uinfo->using_sd) {
@@ -600,7 +600,7 @@ static void crypto4xx_aead_done(struct c
if (pd_uinfo->sa_va->sa_command_0.bf.dir == DIR_OUTBOUND) {
/* append icv at the end */
crypto4xx_memcpy_from_le32(icv, pd_uinfo->sr_va->save_digest,
- cp_len);
+ sizeof(icv));
scatterwalk_map_and_copy(icv, dst, aead_req->cryptlen,
cp_len, 1);
@@ -610,7 +610,7 @@ static void crypto4xx_aead_done(struct c
aead_req->assoclen + aead_req->cryptlen -
cp_len, cp_len, 0);
- crypto4xx_memcpy_from_le32(icv, icv, cp_len);
+ crypto4xx_memcpy_from_le32(icv, icv, sizeof(icv));
if (crypto_memneq(icv, pd_uinfo->sr_va->save_digest, cp_len))
err = -EBADMSG;
@@ -1127,8 +1127,8 @@ static struct crypto4xx_alg_common crypt
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_IV_SIZE,
.setkey = crypto4xx_setkey_aes_cbc,
- .encrypt = crypto4xx_encrypt,
- .decrypt = crypto4xx_decrypt,
+ .encrypt = crypto4xx_encrypt_iv,
+ .decrypt = crypto4xx_decrypt_iv,
}
}
}},
@@ -1151,8 +1151,8 @@ static struct crypto4xx_alg_common crypt
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_IV_SIZE,
.setkey = crypto4xx_setkey_aes_cfb,
- .encrypt = crypto4xx_encrypt,
- .decrypt = crypto4xx_decrypt,
+ .encrypt = crypto4xx_encrypt_iv,
+ .decrypt = crypto4xx_decrypt_iv,
}
}
} },
@@ -1200,8 +1200,8 @@ static struct crypto4xx_alg_common crypt
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = crypto4xx_setkey_aes_ecb,
- .encrypt = crypto4xx_encrypt,
- .decrypt = crypto4xx_decrypt,
+ .encrypt = crypto4xx_encrypt_noiv,
+ .decrypt = crypto4xx_decrypt_noiv,
}
}
} },
@@ -1224,8 +1224,8 @@ static struct crypto4xx_alg_common crypt
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_IV_SIZE,
.setkey = crypto4xx_setkey_aes_ofb,
- .encrypt = crypto4xx_encrypt,
- .decrypt = crypto4xx_decrypt,
+ .encrypt = crypto4xx_encrypt_iv,
+ .decrypt = crypto4xx_decrypt_iv,
}
}
} },
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -168,8 +168,10 @@ int crypto4xx_setkey_aes_ofb(struct cryp
const u8 *key, unsigned int keylen);
int crypto4xx_setkey_rfc3686(struct crypto_ablkcipher *cipher,
const u8 *key, unsigned int keylen);
-int crypto4xx_encrypt(struct ablkcipher_request *req);
-int crypto4xx_decrypt(struct ablkcipher_request *req);
+int crypto4xx_encrypt_iv(struct ablkcipher_request *req);
+int crypto4xx_decrypt_iv(struct ablkcipher_request *req);
+int crypto4xx_encrypt_noiv(struct ablkcipher_request *req);
+int crypto4xx_decrypt_noiv(struct ablkcipher_request *req);
int crypto4xx_rfc3686_encrypt(struct ablkcipher_request *req);
int crypto4xx_rfc3686_decrypt(struct ablkcipher_request *req);
int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);

View File

@ -1,578 +0,0 @@
From ce05ffe10457bda487fa049016a6ba79934bdece Mon Sep 17 00:00:00 2001
From: Christian Lamparter <chunkeey@gmail.com>
Date: Thu, 19 Apr 2018 18:41:52 +0200
Subject: [PATCH 3/8] crypto: crypto4xx - convert to skcipher
The ablkcipher APIs have been effectively deprecated since [1].
This patch converts the crypto4xx driver to the new skcipher APIs.
[1] <https://www.spinics.net/lists/linux-crypto/msg18133.html>
Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
drivers/crypto/amcc/crypto4xx_alg.c | 60 ++++---
drivers/crypto/amcc/crypto4xx_core.c | 255 +++++++++++++--------------
drivers/crypto/amcc/crypto4xx_core.h | 25 +--
3 files changed, 163 insertions(+), 177 deletions(-)
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -31,6 +31,7 @@
#include <crypto/gcm.h>
#include <crypto/sha.h>
#include <crypto/ctr.h>
+#include <crypto/skcipher.h>
#include "crypto4xx_reg_def.h"
#include "crypto4xx_core.h"
#include "crypto4xx_sa.h"
@@ -74,36 +75,37 @@ static void set_dynamic_sa_command_1(str
sa->sa_command_1.bf.copy_hdr = cp_hdr;
}
-static inline int crypto4xx_crypt(struct ablkcipher_request *req,
+static inline int crypto4xx_crypt(struct skcipher_request *req,
const unsigned int ivlen, bool decrypt)
{
- struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
__le32 iv[ivlen];
if (ivlen)
- crypto4xx_memcpy_to_le32(iv, req->info, ivlen);
+ crypto4xx_memcpy_to_le32(iv, req->iv, ivlen);
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
- req->nbytes, iv, ivlen, decrypt ? ctx->sa_in : ctx->sa_out,
+ req->cryptlen, iv, ivlen, decrypt ? ctx->sa_in : ctx->sa_out,
ctx->sa_len, 0);
}
-int crypto4xx_encrypt_noiv(struct ablkcipher_request *req)
+int crypto4xx_encrypt_noiv(struct skcipher_request *req)
{
return crypto4xx_crypt(req, 0, false);
}
-int crypto4xx_encrypt_iv(struct ablkcipher_request *req)
+int crypto4xx_encrypt_iv(struct skcipher_request *req)
{
return crypto4xx_crypt(req, AES_IV_SIZE, false);
}
-int crypto4xx_decrypt_noiv(struct ablkcipher_request *req)
+int crypto4xx_decrypt_noiv(struct skcipher_request *req)
{
return crypto4xx_crypt(req, 0, true);
}
-int crypto4xx_decrypt_iv(struct ablkcipher_request *req)
+int crypto4xx_decrypt_iv(struct skcipher_request *req)
{
return crypto4xx_crypt(req, AES_IV_SIZE, true);
}
@@ -111,20 +113,19 @@ int crypto4xx_decrypt_iv(struct ablkciph
/**
* AES Functions
*/
-static int crypto4xx_setkey_aes(struct crypto_ablkcipher *cipher,
+static int crypto4xx_setkey_aes(struct crypto_skcipher *cipher,
const u8 *key,
unsigned int keylen,
unsigned char cm,
u8 fb)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
struct dynamic_sa_ctl *sa;
int rc;
if (keylen != AES_KEYSIZE_256 &&
keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_128) {
- crypto_ablkcipher_set_flags(cipher,
+ crypto_skcipher_set_flags(cipher,
CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -165,39 +166,38 @@ static int crypto4xx_setkey_aes(struct c
return 0;
}
-int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher,
+int crypto4xx_setkey_aes_cbc(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_CBC,
CRYPTO_FEEDBACK_MODE_NO_FB);
}
-int crypto4xx_setkey_aes_cfb(struct crypto_ablkcipher *cipher,
+int crypto4xx_setkey_aes_cfb(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_CFB,
CRYPTO_FEEDBACK_MODE_128BIT_CFB);
}
-int crypto4xx_setkey_aes_ecb(struct crypto_ablkcipher *cipher,
+int crypto4xx_setkey_aes_ecb(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_ECB,
CRYPTO_FEEDBACK_MODE_NO_FB);
}
-int crypto4xx_setkey_aes_ofb(struct crypto_ablkcipher *cipher,
+int crypto4xx_setkey_aes_ofb(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_OFB,
CRYPTO_FEEDBACK_MODE_64BIT_OFB);
}
-int crypto4xx_setkey_rfc3686(struct crypto_ablkcipher *cipher,
+int crypto4xx_setkey_rfc3686(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
int rc;
rc = crypto4xx_setkey_aes(cipher, key, keylen - CTR_RFC3686_NONCE_SIZE,
@@ -211,31 +211,33 @@ int crypto4xx_setkey_rfc3686(struct cryp
return 0;
}
-int crypto4xx_rfc3686_encrypt(struct ablkcipher_request *req)
+int crypto4xx_rfc3686_encrypt(struct skcipher_request *req)
{
- struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
__le32 iv[AES_IV_SIZE / 4] = {
ctx->iv_nonce,
- cpu_to_le32p((u32 *) req->info),
- cpu_to_le32p((u32 *) (req->info + 4)),
+ cpu_to_le32p((u32 *) req->iv),
+ cpu_to_le32p((u32 *) (req->iv + 4)),
cpu_to_le32(1) };
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
- req->nbytes, iv, AES_IV_SIZE,
+ req->cryptlen, iv, AES_IV_SIZE,
ctx->sa_out, ctx->sa_len, 0);
}
-int crypto4xx_rfc3686_decrypt(struct ablkcipher_request *req)
+int crypto4xx_rfc3686_decrypt(struct skcipher_request *req)
{
- struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
__le32 iv[AES_IV_SIZE / 4] = {
ctx->iv_nonce,
- cpu_to_le32p((u32 *) req->info),
- cpu_to_le32p((u32 *) (req->info + 4)),
+ cpu_to_le32p((u32 *) req->iv),
+ cpu_to_le32p((u32 *) (req->iv + 4)),
cpu_to_le32(1) };
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
- req->nbytes, iv, AES_IV_SIZE,
+ req->cryptlen, iv, AES_IV_SIZE,
ctx->sa_out, ctx->sa_len, 0);
}
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -41,6 +41,7 @@
#include <crypto/gcm.h>
#include <crypto/sha.h>
#include <crypto/scatterwalk.h>
+#include <crypto/skcipher.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/skcipher.h>
#include "crypto4xx_reg_def.h"
@@ -522,21 +523,19 @@ static void crypto4xx_ret_sg_desc(struct
}
}
-static void crypto4xx_ablkcipher_done(struct crypto4xx_device *dev,
+static void crypto4xx_cipher_done(struct crypto4xx_device *dev,
struct pd_uinfo *pd_uinfo,
struct ce_pd *pd)
{
- struct crypto4xx_ctx *ctx;
- struct ablkcipher_request *ablk_req;
+ struct skcipher_request *req;
struct scatterlist *dst;
dma_addr_t addr;
- ablk_req = ablkcipher_request_cast(pd_uinfo->async_req);
- ctx = crypto_tfm_ctx(ablk_req->base.tfm);
+ req = skcipher_request_cast(pd_uinfo->async_req);
if (pd_uinfo->using_sd) {
- crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo, ablk_req->nbytes,
- ablk_req->dst);
+ crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
+ req->cryptlen, req->dst);
} else {
dst = pd_uinfo->dest_va;
addr = dma_map_page(dev->core_dev->device, sg_page(dst),
@@ -554,8 +553,8 @@ static void crypto4xx_ablkcipher_done(st
crypto4xx_ret_sg_desc(dev, pd_uinfo);
if (pd_uinfo->state & PD_ENTRY_BUSY)
- ablkcipher_request_complete(ablk_req, -EINPROGRESS);
- ablkcipher_request_complete(ablk_req, 0);
+ skcipher_request_complete(req, -EINPROGRESS);
+ skcipher_request_complete(req, 0);
}
static void crypto4xx_ahash_done(struct crypto4xx_device *dev,
@@ -646,8 +645,8 @@ static void crypto4xx_pd_done(struct cry
struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx];
switch (crypto_tfm_alg_type(pd_uinfo->async_req->tfm)) {
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- crypto4xx_ablkcipher_done(dev, pd_uinfo, pd);
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ crypto4xx_cipher_done(dev, pd_uinfo, pd);
break;
case CRYPTO_ALG_TYPE_AEAD:
crypto4xx_aead_done(dev, pd_uinfo, pd);
@@ -941,15 +940,14 @@ static void crypto4xx_ctx_init(struct cr
ctx->sa_len = 0;
}
-static int crypto4xx_ablk_init(struct crypto_tfm *tfm)
+static int crypto4xx_sk_init(struct crypto_skcipher *sk)
{
- struct crypto_alg *alg = tfm->__crt_alg;
+ struct skcipher_alg *alg = crypto_skcipher_alg(sk);
struct crypto4xx_alg *amcc_alg;
- struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(sk);
amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.cipher);
crypto4xx_ctx_init(amcc_alg, ctx);
- tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx);
return 0;
}
@@ -958,9 +956,11 @@ static void crypto4xx_common_exit(struct
crypto4xx_free_sa(ctx);
}
-static void crypto4xx_ablk_exit(struct crypto_tfm *tfm)
+static void crypto4xx_sk_exit(struct crypto_skcipher *sk)
{
- crypto4xx_common_exit(crypto_tfm_ctx(tfm));
+ struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(sk);
+
+ crypto4xx_common_exit(ctx);
}
static int crypto4xx_aead_init(struct crypto_aead *tfm)
@@ -1017,7 +1017,7 @@ static int crypto4xx_register_alg(struct
break;
default:
- rc = crypto_register_alg(&alg->alg.u.cipher);
+ rc = crypto_register_skcipher(&alg->alg.u.cipher);
break;
}
@@ -1046,7 +1046,7 @@ static void crypto4xx_unregister_alg(str
break;
default:
- crypto_unregister_alg(&alg->alg.u.cipher);
+ crypto_unregister_skcipher(&alg->alg.u.cipher);
}
kfree(alg);
}
@@ -1108,126 +1108,109 @@ static irqreturn_t crypto4xx_ce_interrup
*/
static struct crypto4xx_alg_common crypto4xx_alg[] = {
/* Crypto AES modes */
- { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-ppc4xx",
- .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct crypto4xx_ctx),
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = crypto4xx_ablk_init,
- .cra_exit = crypto4xx_ablk_exit,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_IV_SIZE,
- .setkey = crypto4xx_setkey_aes_cbc,
- .encrypt = crypto4xx_encrypt_iv,
- .decrypt = crypto4xx_decrypt_iv,
- }
- }
- }},
- { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
- .cra_name = "cfb(aes)",
- .cra_driver_name = "cfb-aes-ppc4xx",
- .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct crypto4xx_ctx),
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = crypto4xx_ablk_init,
- .cra_exit = crypto4xx_ablk_exit,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_IV_SIZE,
- .setkey = crypto4xx_setkey_aes_cfb,
- .encrypt = crypto4xx_encrypt_iv,
- .decrypt = crypto4xx_decrypt_iv,
- }
- }
+ { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_IV_SIZE,
+ .setkey = crypto4xx_setkey_aes_cbc,
+ .encrypt = crypto4xx_encrypt_iv,
+ .decrypt = crypto4xx_decrypt_iv,
+ .init = crypto4xx_sk_init,
+ .exit = crypto4xx_sk_exit,
} },
- { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
- .cra_name = "rfc3686(ctr(aes))",
- .cra_driver_name = "rfc3686-ctr-aes-ppc4xx",
- .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct crypto4xx_ctx),
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = crypto4xx_ablk_init,
- .cra_exit = crypto4xx_ablk_exit,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE +
- CTR_RFC3686_NONCE_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE +
- CTR_RFC3686_NONCE_SIZE,
- .ivsize = CTR_RFC3686_IV_SIZE,
- .setkey = crypto4xx_setkey_rfc3686,
- .encrypt = crypto4xx_rfc3686_encrypt,
- .decrypt = crypto4xx_rfc3686_decrypt,
- }
- }
+ { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
+ .base = {
+ .cra_name = "cfb(aes)",
+ .cra_driver_name = "cfb-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_IV_SIZE,
+ .setkey = crypto4xx_setkey_aes_cfb,
+ .encrypt = crypto4xx_encrypt_iv,
+ .decrypt = crypto4xx_decrypt_iv,
+ .init = crypto4xx_sk_init,
+ .exit = crypto4xx_sk_exit,
} },
- { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-ppc4xx",
- .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct crypto4xx_ctx),
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = crypto4xx_ablk_init,
- .cra_exit = crypto4xx_ablk_exit,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = crypto4xx_setkey_aes_ecb,
- .encrypt = crypto4xx_encrypt_noiv,
- .decrypt = crypto4xx_decrypt_noiv,
- }
- }
+ { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
+ .base = {
+ .cra_name = "rfc3686(ctr(aes))",
+ .cra_driver_name = "rfc3686-ctr-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .setkey = crypto4xx_setkey_rfc3686,
+ .encrypt = crypto4xx_rfc3686_encrypt,
+ .decrypt = crypto4xx_rfc3686_decrypt,
+ .init = crypto4xx_sk_init,
+ .exit = crypto4xx_sk_exit,
} },
- { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
- .cra_name = "ofb(aes)",
- .cra_driver_name = "ofb-aes-ppc4xx",
- .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct crypto4xx_ctx),
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = crypto4xx_ablk_init,
- .cra_exit = crypto4xx_ablk_exit,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_IV_SIZE,
- .setkey = crypto4xx_setkey_aes_ofb,
- .encrypt = crypto4xx_encrypt_iv,
- .decrypt = crypto4xx_decrypt_iv,
- }
- }
+ { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
+ .base = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = crypto4xx_setkey_aes_ecb,
+ .encrypt = crypto4xx_encrypt_noiv,
+ .decrypt = crypto4xx_decrypt_noiv,
+ .init = crypto4xx_sk_init,
+ .exit = crypto4xx_sk_exit,
+ } },
+ { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
+ .base = {
+ .cra_name = "ofb(aes)",
+ .cra_driver_name = "ofb-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_IV_SIZE,
+ .setkey = crypto4xx_setkey_aes_ofb,
+ .encrypt = crypto4xx_encrypt_iv,
+ .decrypt = crypto4xx_decrypt_iv,
+ .init = crypto4xx_sk_init,
+ .exit = crypto4xx_sk_exit,
} },
/* AEAD */
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -25,6 +25,7 @@
#include <linux/ratelimit.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
#include "crypto4xx_reg_def.h"
#include "crypto4xx_sa.h"
@@ -134,7 +135,7 @@ struct crypto4xx_ctx {
struct crypto4xx_alg_common {
u32 type;
union {
- struct crypto_alg cipher;
+ struct skcipher_alg cipher;
struct ahash_alg hash;
struct aead_alg aead;
} u;
@@ -158,22 +159,22 @@ int crypto4xx_build_pd(struct crypto_asy
const struct dynamic_sa_ctl *sa,
const unsigned int sa_len,
const unsigned int assoclen);
-int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher,
+int crypto4xx_setkey_aes_cbc(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen);
-int crypto4xx_setkey_aes_cfb(struct crypto_ablkcipher *cipher,
+int crypto4xx_setkey_aes_cfb(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen);
-int crypto4xx_setkey_aes_ecb(struct crypto_ablkcipher *cipher,
+int crypto4xx_setkey_aes_ecb(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen);
-int crypto4xx_setkey_aes_ofb(struct crypto_ablkcipher *cipher,
+int crypto4xx_setkey_aes_ofb(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen);
-int crypto4xx_setkey_rfc3686(struct crypto_ablkcipher *cipher,
+int crypto4xx_setkey_rfc3686(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen);
-int crypto4xx_encrypt_iv(struct ablkcipher_request *req);
-int crypto4xx_decrypt_iv(struct ablkcipher_request *req);
-int crypto4xx_encrypt_noiv(struct ablkcipher_request *req);
-int crypto4xx_decrypt_noiv(struct ablkcipher_request *req);
-int crypto4xx_rfc3686_encrypt(struct ablkcipher_request *req);
-int crypto4xx_rfc3686_decrypt(struct ablkcipher_request *req);
+int crypto4xx_encrypt_iv(struct skcipher_request *req);
+int crypto4xx_decrypt_iv(struct skcipher_request *req);
+int crypto4xx_encrypt_noiv(struct skcipher_request *req);
+int crypto4xx_decrypt_noiv(struct skcipher_request *req);
+int crypto4xx_rfc3686_encrypt(struct skcipher_request *req);
+int crypto4xx_rfc3686_decrypt(struct skcipher_request *req);
int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
int crypto4xx_hash_digest(struct ahash_request *req);
int crypto4xx_hash_final(struct ahash_request *req);

View File

@ -1,61 +0,0 @@
From c4e90650ff0cbf123ec9cfc32026fa0fb2931658 Mon Sep 17 00:00:00 2001
From: Christian Lamparter <chunkeey@gmail.com>
Date: Thu, 19 Apr 2018 18:41:53 +0200
Subject: [PATCH 4/8] crypto: crypto4xx - avoid VLA use
This patch fixes some of the -Wvla warnings.
crypto4xx_alg.c:83:19: warning: Variable length array is used.
crypto4xx_alg.c:273:56: warning: Variable length array is used.
crypto4xx_alg.c:380:32: warning: Variable length array is used.
Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
drivers/crypto/amcc/crypto4xx_alg.c | 14 ++++----------
1 file changed, 4 insertions(+), 10 deletions(-)
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -80,7 +80,7 @@ static inline int crypto4xx_crypt(struct
{
struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
- __le32 iv[ivlen];
+ __le32 iv[AES_IV_SIZE];
if (ivlen)
crypto4xx_memcpy_to_le32(iv, req->iv, ivlen);
@@ -271,13 +271,7 @@ static inline bool crypto4xx_aead_need_f
static int crypto4xx_aead_fallback(struct aead_request *req,
struct crypto4xx_ctx *ctx, bool do_decrypt)
{
- char aead_req_data[sizeof(struct aead_request) +
- crypto_aead_reqsize(ctx->sw_cipher.aead)]
- __aligned(__alignof__(struct aead_request));
-
- struct aead_request *subreq = (void *) aead_req_data;
-
- memset(subreq, 0, sizeof(aead_req_data));
+ struct aead_request *subreq = aead_request_ctx(req);
aead_request_set_tfm(subreq, ctx->sw_cipher.aead);
aead_request_set_callback(subreq, req->base.flags,
@@ -378,7 +372,7 @@ static int crypto4xx_crypt_aes_ccm(struc
struct crypto_aead *aead = crypto_aead_reqtfm(req);
unsigned int len = req->cryptlen;
__le32 iv[16];
- u32 tmp_sa[ctx->sa_len * 4];
+ u32 tmp_sa[SA_AES128_CCM_LEN + 4];
struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *)tmp_sa;
if (crypto4xx_aead_need_fallback(req, true, decrypt))
@@ -387,7 +381,7 @@ static int crypto4xx_crypt_aes_ccm(struc
if (decrypt)
len -= crypto_aead_authsize(aead);
- memcpy(tmp_sa, decrypt ? ctx->sa_in : ctx->sa_out, sizeof(tmp_sa));
+ memcpy(tmp_sa, decrypt ? ctx->sa_in : ctx->sa_out, ctx->sa_len * 4);
sa->sa_command_0.bf.digest_len = crypto_aead_authsize(aead) >> 2;
if (req->iv[0] == 1) {

View File

@ -1,247 +0,0 @@
From 98e87e3d933b8e504ea41b8857c038d2cd06cddc Mon Sep 17 00:00:00 2001
From: Christian Lamparter <chunkeey@gmail.com>
Date: Thu, 19 Apr 2018 18:41:54 +0200
Subject: [PATCH 5/8] crypto: crypto4xx - add aes-ctr support
This patch adds support for the aes-ctr skcipher.
name : ctr(aes)
driver : ctr-aes-ppc4xx
module : crypto4xx
priority : 300
refcnt : 1
selftest : passed
internal : no
type : skcipher
async : yes
blocksize : 16
min keysize : 16
max keysize : 32
ivsize : 16
chunksize : 16
walksize : 16
The hardware uses only the last 32-bits as the counter while the
kernel tests (aes_ctr_enc_tv_template[4] for example) expect that
the whole IV is a counter. To make this work, the driver will
fallback if the counter is going to overlow.
The aead's crypto4xx_setup_fallback() function is renamed to
crypto4xx_aead_setup_fallback.
Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
drivers/crypto/amcc/crypto4xx_alg.c | 91 ++++++++++++++++++++++++++--
drivers/crypto/amcc/crypto4xx_core.c | 37 +++++++++++
drivers/crypto/amcc/crypto4xx_core.h | 5 ++
3 files changed, 127 insertions(+), 6 deletions(-)
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -241,6 +241,85 @@ int crypto4xx_rfc3686_decrypt(struct skc
ctx->sa_out, ctx->sa_len, 0);
}
+static int
+crypto4xx_ctr_crypt(struct skcipher_request *req, bool encrypt)
+{
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
+ size_t iv_len = crypto_skcipher_ivsize(cipher);
+ unsigned int counter = be32_to_cpup((__be32 *)(req->iv + iv_len - 4));
+ unsigned int nblks = ALIGN(req->cryptlen, AES_BLOCK_SIZE) /
+ AES_BLOCK_SIZE;
+
+ /*
+ * The hardware uses only the last 32-bits as the counter while the
+ * kernel tests (aes_ctr_enc_tv_template[4] for example) expect that
+ * the whole IV is a counter. So fallback if the counter is going to
+ * overlow.
+ */
+ if (counter + nblks < counter) {
+ struct skcipher_request *subreq = skcipher_request_ctx(req);
+ int ret;
+
+ skcipher_request_set_tfm(subreq, ctx->sw_cipher.cipher);
+ skcipher_request_set_callback(subreq, req->base.flags,
+ NULL, NULL);
+ skcipher_request_set_crypt(subreq, req->src, req->dst,
+ req->cryptlen, req->iv);
+ ret = encrypt ? crypto_skcipher_encrypt(subreq)
+ : crypto_skcipher_decrypt(subreq);
+ skcipher_request_zero(subreq);
+ return ret;
+ }
+
+ return encrypt ? crypto4xx_encrypt_iv(req)
+ : crypto4xx_decrypt_iv(req);
+}
+
+static int crypto4xx_sk_setup_fallback(struct crypto4xx_ctx *ctx,
+ struct crypto_skcipher *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ int rc;
+
+ crypto_skcipher_clear_flags(ctx->sw_cipher.cipher,
+ CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(ctx->sw_cipher.cipher,
+ crypto_skcipher_get_flags(cipher) & CRYPTO_TFM_REQ_MASK);
+ rc = crypto_skcipher_setkey(ctx->sw_cipher.cipher, key, keylen);
+ crypto_skcipher_clear_flags(cipher, CRYPTO_TFM_RES_MASK);
+ crypto_skcipher_set_flags(cipher,
+ crypto_skcipher_get_flags(ctx->sw_cipher.cipher) &
+ CRYPTO_TFM_RES_MASK);
+
+ return rc;
+}
+
+int crypto4xx_setkey_aes_ctr(struct crypto_skcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
+ int rc;
+
+ rc = crypto4xx_sk_setup_fallback(ctx, cipher, key, keylen);
+ if (rc)
+ return rc;
+
+ return crypto4xx_setkey_aes(cipher, key, keylen,
+ CRYPTO_MODE_CTR, CRYPTO_FEEDBACK_MODE_NO_FB);
+}
+
+int crypto4xx_encrypt_ctr(struct skcipher_request *req)
+{
+ return crypto4xx_ctr_crypt(req, true);
+}
+
+int crypto4xx_decrypt_ctr(struct skcipher_request *req)
+{
+ return crypto4xx_ctr_crypt(req, false);
+}
+
static inline bool crypto4xx_aead_need_fallback(struct aead_request *req,
bool is_ccm, bool decrypt)
{
@@ -283,10 +362,10 @@ static int crypto4xx_aead_fallback(struc
crypto_aead_encrypt(subreq);
}
-static int crypto4xx_setup_fallback(struct crypto4xx_ctx *ctx,
- struct crypto_aead *cipher,
- const u8 *key,
- unsigned int keylen)
+static int crypto4xx_aead_setup_fallback(struct crypto4xx_ctx *ctx,
+ struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
{
int rc;
@@ -314,7 +393,7 @@ int crypto4xx_setkey_aes_ccm(struct cryp
struct dynamic_sa_ctl *sa;
int rc = 0;
- rc = crypto4xx_setup_fallback(ctx, cipher, key, keylen);
+ rc = crypto4xx_aead_setup_fallback(ctx, cipher, key, keylen);
if (rc)
return rc;
@@ -473,7 +552,7 @@ int crypto4xx_setkey_aes_gcm(struct cryp
return -EINVAL;
}
- rc = crypto4xx_setup_fallback(ctx, cipher, key, keylen);
+ rc = crypto4xx_aead_setup_fallback(ctx, cipher, key, keylen);
if (rc)
return rc;
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -946,6 +946,19 @@ static int crypto4xx_sk_init(struct cryp
struct crypto4xx_alg *amcc_alg;
struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(sk);
+ if (alg->base.cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
+ ctx->sw_cipher.cipher =
+ crypto_alloc_skcipher(alg->base.cra_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC);
+ if (IS_ERR(ctx->sw_cipher.cipher))
+ return PTR_ERR(ctx->sw_cipher.cipher);
+
+ crypto_skcipher_set_reqsize(sk,
+ sizeof(struct skcipher_request) + 32 +
+ crypto_skcipher_reqsize(ctx->sw_cipher.cipher));
+ }
+
amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.cipher);
crypto4xx_ctx_init(amcc_alg, ctx);
return 0;
@@ -961,6 +974,8 @@ static void crypto4xx_sk_exit(struct cry
struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(sk);
crypto4xx_common_exit(ctx);
+ if (ctx->sw_cipher.cipher)
+ crypto_free_skcipher(ctx->sw_cipher.cipher);
}
static int crypto4xx_aead_init(struct crypto_aead *tfm)
@@ -1150,6 +1165,28 @@ static struct crypto4xx_alg_common crypt
.init = crypto4xx_sk_init,
.exit = crypto4xx_sk_exit,
} },
+ { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
+ .base = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_IV_SIZE,
+ .setkey = crypto4xx_setkey_aes_ctr,
+ .encrypt = crypto4xx_encrypt_ctr,
+ .decrypt = crypto4xx_decrypt_ctr,
+ .init = crypto4xx_sk_init,
+ .exit = crypto4xx_sk_exit,
+ } },
{ .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
.base = {
.cra_name = "rfc3686(ctr(aes))",
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -128,6 +128,7 @@ struct crypto4xx_ctx {
__le32 iv_nonce;
u32 sa_len;
union {
+ struct crypto_skcipher *cipher;
struct crypto_aead *aead;
} sw_cipher;
};
@@ -163,12 +164,16 @@ int crypto4xx_setkey_aes_cbc(struct cryp
const u8 *key, unsigned int keylen);
int crypto4xx_setkey_aes_cfb(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen);
+int crypto4xx_setkey_aes_ctr(struct crypto_skcipher *cipher,
+ const u8 *key, unsigned int keylen);
int crypto4xx_setkey_aes_ecb(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen);
int crypto4xx_setkey_aes_ofb(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen);
int crypto4xx_setkey_rfc3686(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen);
+int crypto4xx_encrypt_ctr(struct skcipher_request *req);
+int crypto4xx_decrypt_ctr(struct skcipher_request *req);
int crypto4xx_encrypt_iv(struct skcipher_request *req);
int crypto4xx_decrypt_iv(struct skcipher_request *req);
int crypto4xx_encrypt_noiv(struct skcipher_request *req);

View File

@ -1,102 +0,0 @@
From 584201f1895d915c1aa523bc86afdc126e94beca Mon Sep 17 00:00:00 2001
From: Christian Lamparter <chunkeey@gmail.com>
Date: Thu, 19 Apr 2018 18:41:56 +0200
Subject: [PATCH 7/8] crypto: crypto4xx - extend aead fallback checks
1020 bytes is the limit for associated data. Any more
and it will no longer fit into hash_crypto_offset anymore.
The hardware will not process aead requests with plaintext
that have less than AES_BLOCK_SIZE bytes. When decrypting
aead requests the authsize has to be taken in account as
well, as it is part of the cryptlen. Otherwise the hardware
will think it has been misconfigured and will return:
aead return err status = 0x98
For rtc4543(gcm(aes)), the hardware has a dedicated GMAC
mode as part of the hash function set.
Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
drivers/crypto/amcc/crypto4xx_alg.c | 30 +++++++++++++++--------------
1 file changed, 16 insertions(+), 14 deletions(-)
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -321,6 +321,7 @@ int crypto4xx_decrypt_ctr(struct skciphe
}
static inline bool crypto4xx_aead_need_fallback(struct aead_request *req,
+ unsigned int len,
bool is_ccm, bool decrypt)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
@@ -330,14 +331,14 @@ static inline bool crypto4xx_aead_need_f
return true;
/*
- * hardware does not handle cases where cryptlen
- * is less than a block
+ * hardware does not handle cases where plaintext
+ * is less than a block.
*/
- if (req->cryptlen < AES_BLOCK_SIZE)
+ if (len < AES_BLOCK_SIZE)
return true;
- /* assoc len needs to be a multiple of 4 */
- if (req->assoclen & 0x3)
+ /* assoc len needs to be a multiple of 4 and <= 1020 */
+ if (req->assoclen & 0x3 || req->assoclen > 1020)
return true;
/* CCM supports only counter field length of 2 and 4 bytes */
@@ -449,17 +450,17 @@ static int crypto4xx_crypt_aes_ccm(struc
{
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- unsigned int len = req->cryptlen;
__le32 iv[16];
u32 tmp_sa[SA_AES128_CCM_LEN + 4];
struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *)tmp_sa;
-
- if (crypto4xx_aead_need_fallback(req, true, decrypt))
- return crypto4xx_aead_fallback(req, ctx, decrypt);
+ unsigned int len = req->cryptlen;
if (decrypt)
len -= crypto_aead_authsize(aead);
+ if (crypto4xx_aead_need_fallback(req, len, true, decrypt))
+ return crypto4xx_aead_fallback(req, ctx, decrypt);
+
memcpy(tmp_sa, decrypt ? ctx->sa_in : ctx->sa_out, ctx->sa_len * 4);
sa->sa_command_0.bf.digest_len = crypto_aead_authsize(aead) >> 2;
@@ -605,18 +606,19 @@ static inline int crypto4xx_crypt_aes_gc
bool decrypt)
{
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
- unsigned int len = req->cryptlen;
+ struct crypto4xx_aead_reqctx *rctx = aead_request_ctx(req);
__le32 iv[4];
+ unsigned int len = req->cryptlen;
+
+ if (decrypt)
+ len -= crypto_aead_authsize(crypto_aead_reqtfm(req));
- if (crypto4xx_aead_need_fallback(req, false, decrypt))
+ if (crypto4xx_aead_need_fallback(req, len, false, decrypt))
return crypto4xx_aead_fallback(req, ctx, decrypt);
crypto4xx_memcpy_to_le32(iv, req->iv, GCM_AES_IV_SIZE);
iv[3] = cpu_to_le32(1);
- if (decrypt)
- len -= crypto_aead_authsize(crypto_aead_reqtfm(req));
-
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
len, iv, sizeof(iv),
decrypt ? ctx->sa_in : ctx->sa_out,

View File

@ -1,157 +0,0 @@
From 658c9d2b9f374c835d0348d852a3f002196628d0 Mon Sep 17 00:00:00 2001
From: Christian Lamparter <chunkeey@gmail.com>
Date: Thu, 19 Apr 2018 18:41:57 +0200
Subject: [PATCH 8/8] crypto: crypto4xx - put temporary dst sg into request ctx
This patch fixes a crash that happens when testing rfc4543(gcm(aes))
Unable to handle kernel paging request for data at address 0xf59b3420
Faulting instruction address: 0xc0012994
Oops: Kernel access of bad area, sig: 11 [#1]
BE PowerPC 44x Platform
Modules linked in: tcrypt(+) crypto4xx [...]
CPU: 0 PID: 0 Comm: swapper Tainted: G O 4.17.0-rc1+ #23
NIP: c0012994 LR: d3077934 CTR: 06026d49
REGS: cfff7e30 TRAP: 0300 Tainted: G O (4.17.0-rc1+)
MSR: 00029000 <CE,EE,ME> CR: 44744822 XER: 00000000
DEAR: f59b3420 ESR: 00000000
NIP [c0012994] __dma_sync+0x58/0x10c
LR [d3077934] crypto4xx_bh_tasklet_cb+0x188/0x3c8 [crypto4xx]
__dma_sync was fed the temporary _dst that crypto4xx_build_pd()
had in it's function stack. This clearly never worked.
This patch therefore overhauls the code from the original driver
and puts the temporary dst sg list into aead's request context.
Fixes: a0aae821ba3d3 ("crypto: crypto4xx - prepare for AEAD support")
Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
drivers/crypto/amcc/crypto4xx_alg.c | 15 ++++++++-------
drivers/crypto/amcc/crypto4xx_core.c | 10 +++++-----
drivers/crypto/amcc/crypto4xx_core.h | 7 ++++++-
3 files changed, 19 insertions(+), 13 deletions(-)
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -87,7 +87,7 @@ static inline int crypto4xx_crypt(struct
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
req->cryptlen, iv, ivlen, decrypt ? ctx->sa_in : ctx->sa_out,
- ctx->sa_len, 0);
+ ctx->sa_len, 0, NULL);
}
int crypto4xx_encrypt_noiv(struct skcipher_request *req)
@@ -223,7 +223,7 @@ int crypto4xx_rfc3686_encrypt(struct skc
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
req->cryptlen, iv, AES_IV_SIZE,
- ctx->sa_out, ctx->sa_len, 0);
+ ctx->sa_out, ctx->sa_len, 0, NULL);
}
int crypto4xx_rfc3686_decrypt(struct skcipher_request *req)
@@ -238,7 +238,7 @@ int crypto4xx_rfc3686_decrypt(struct skc
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
req->cryptlen, iv, AES_IV_SIZE,
- ctx->sa_out, ctx->sa_len, 0);
+ ctx->sa_out, ctx->sa_len, 0, NULL);
}
static int
@@ -449,6 +449,7 @@ int crypto4xx_setkey_aes_ccm(struct cryp
static int crypto4xx_crypt_aes_ccm(struct aead_request *req, bool decrypt)
{
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto4xx_aead_reqctx *rctx = aead_request_ctx(req);
struct crypto_aead *aead = crypto_aead_reqtfm(req);
__le32 iv[16];
u32 tmp_sa[SA_AES128_CCM_LEN + 4];
@@ -474,7 +475,7 @@ static int crypto4xx_crypt_aes_ccm(struc
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
len, iv, sizeof(iv),
- sa, ctx->sa_len, req->assoclen);
+ sa, ctx->sa_len, req->assoclen, rctx->dst);
}
int crypto4xx_encrypt_aes_ccm(struct aead_request *req)
@@ -622,7 +623,7 @@ static inline int crypto4xx_crypt_aes_gc
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
len, iv, sizeof(iv),
decrypt ? ctx->sa_in : ctx->sa_out,
- ctx->sa_len, req->assoclen);
+ ctx->sa_len, req->assoclen, rctx->dst);
}
int crypto4xx_encrypt_aes_gcm(struct aead_request *req)
@@ -707,7 +708,7 @@ int crypto4xx_hash_update(struct ahash_r
return crypto4xx_build_pd(&req->base, ctx, req->src, &dst,
req->nbytes, NULL, 0, ctx->sa_in,
- ctx->sa_len, 0);
+ ctx->sa_len, 0, NULL);
}
int crypto4xx_hash_final(struct ahash_request *req)
@@ -726,7 +727,7 @@ int crypto4xx_hash_digest(struct ahash_r
return crypto4xx_build_pd(&req->base, ctx, req->src, &dst,
req->nbytes, NULL, 0, ctx->sa_in,
- ctx->sa_len, 0);
+ ctx->sa_len, 0, NULL);
}
/**
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -691,9 +691,9 @@ int crypto4xx_build_pd(struct crypto_asy
const __le32 *iv, const u32 iv_len,
const struct dynamic_sa_ctl *req_sa,
const unsigned int sa_len,
- const unsigned int assoclen)
+ const unsigned int assoclen,
+ struct scatterlist *_dst)
{
- struct scatterlist _dst[2];
struct crypto4xx_device *dev = ctx->dev;
struct dynamic_sa_ctl *sa;
struct ce_gd *gd;
@@ -992,9 +992,9 @@ static int crypto4xx_aead_init(struct cr
amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.aead);
crypto4xx_ctx_init(amcc_alg, ctx);
- crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) +
- max(sizeof(struct crypto4xx_ctx), 32 +
- crypto_aead_reqsize(ctx->sw_cipher.aead)));
+ crypto_aead_set_reqsize(tfm, max(sizeof(struct aead_request) + 32 +
+ crypto_aead_reqsize(ctx->sw_cipher.aead),
+ sizeof(struct crypto4xx_aead_reqctx)));
return 0;
}
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -133,6 +133,10 @@ struct crypto4xx_ctx {
} sw_cipher;
};
+struct crypto4xx_aead_reqctx {
+ struct scatterlist dst[2];
+};
+
struct crypto4xx_alg_common {
u32 type;
union {
@@ -159,7 +163,8 @@ int crypto4xx_build_pd(struct crypto_asy
const __le32 *iv, const u32 iv_len,
const struct dynamic_sa_ctl *sa,
const unsigned int sa_len,
- const unsigned int assoclen);
+ const unsigned int assoclen,
+ struct scatterlist *dst_tmp);
int crypto4xx_setkey_aes_cbc(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen);
int crypto4xx_setkey_aes_cfb(struct crypto_skcipher *cipher,

View File

@ -1,447 +0,0 @@
From 6396bb221514d2876fd6dc0aa2a1f240d99b37bb Mon Sep 17 00:00:00 2001
From: Kees Cook <keescook@chromium.org>
Date: Tue, 12 Jun 2018 14:03:40 -0700
Subject: [PATCH 01/15] treewide: kzalloc() -> kcalloc()
The kzalloc() function has a 2-factor argument form, kcalloc(). This
patch replaces cases of:
kzalloc(a * b, gfp)
with:
kcalloc(a * b, gfp)
as well as handling cases of:
kzalloc(a * b * c, gfp)
with:
kzalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kzalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kzalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kzalloc
+ kcalloc
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kzalloc(sizeof(THING) * C2, ...)
|
kzalloc(sizeof(TYPE) * C2, ...)
|
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(C1 * C2, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * E2
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
---
drivers/crypto/amcc/crypto4xx_core.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -141,11 +141,11 @@ static void crypto4xx_hw_init(struct cry
int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
{
- ctx->sa_in = kzalloc(size * 4, GFP_ATOMIC);
+ ctx->sa_in = kcalloc(size, 4, GFP_ATOMIC);
if (ctx->sa_in == NULL)
return -ENOMEM;
- ctx->sa_out = kzalloc(size * 4, GFP_ATOMIC);
+ ctx->sa_out = kcalloc(size, 4, GFP_ATOMIC);
if (ctx->sa_out == NULL) {
kfree(ctx->sa_in);
ctx->sa_in = NULL;
@@ -180,8 +180,8 @@ static u32 crypto4xx_build_pdr(struct cr
if (!dev->pdr)
return -ENOMEM;
- dev->pdr_uinfo = kzalloc(sizeof(struct pd_uinfo) * PPC4XX_NUM_PD,
- GFP_KERNEL);
+ dev->pdr_uinfo = kcalloc(PPC4XX_NUM_PD, sizeof(struct pd_uinfo),
+ GFP_KERNEL);
if (!dev->pdr_uinfo) {
dma_free_coherent(dev->core_dev->device,
sizeof(struct ce_pd) * PPC4XX_NUM_PD,

View File

@ -1,84 +0,0 @@
From 2c95e6d97892235b5b98cd4805e47fac87c2226f Mon Sep 17 00:00:00 2001
From: Eric Biggers <ebiggers@google.com>
Date: Sat, 30 Jun 2018 15:16:15 -0700
Subject: [PATCH 02/15] crypto: skcipher - remove useless setting of type flags
Some skcipher algorithms set .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER. But
this is redundant with the C structure type ('struct skcipher_alg'), and
crypto_register_skcipher() already sets the type flag automatically,
clearing any type flag that was already there. Apparently the useless
assignment has just been copy+pasted around.
So, remove the useless assignment from all the skcipher algorithms.
This patch shouldn't change any actual behavior.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Acked-by: Gilad Ben-Yossef <gilad@benyossef.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
drivers/crypto/amcc/crypto4xx_core.c | 18 ++++++------------
1 file changed, 6 insertions(+), 12 deletions(-)
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -1128,8 +1128,7 @@ static struct crypto4xx_alg_common crypt
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
@@ -1149,8 +1148,7 @@ static struct crypto4xx_alg_common crypt
.cra_name = "cfb(aes)",
.cra_driver_name = "cfb-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
@@ -1170,8 +1168,7 @@ static struct crypto4xx_alg_common crypt
.cra_name = "ctr(aes)",
.cra_driver_name = "ctr-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_NEED_FALLBACK |
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
@@ -1192,8 +1189,7 @@ static struct crypto4xx_alg_common crypt
.cra_name = "rfc3686(ctr(aes))",
.cra_driver_name = "rfc3686-ctr-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
@@ -1213,8 +1209,7 @@ static struct crypto4xx_alg_common crypt
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
@@ -1233,8 +1228,7 @@ static struct crypto4xx_alg_common crypt
.cra_name = "ofb(aes)",
.cra_driver_name = "ofb-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),

View File

@ -1,34 +0,0 @@
From 1ad0f1603a6b2afb62a1c065409aaa4e43ca7627 Mon Sep 17 00:00:00 2001
From: Eric Biggers <ebiggers@google.com>
Date: Wed, 14 Nov 2018 12:19:39 -0800
Subject: [PATCH 03/15] crypto: drop mask=CRYPTO_ALG_ASYNC from 'cipher' tfm
allocations
'cipher' algorithms (single block ciphers) are always synchronous, so
passing CRYPTO_ALG_ASYNC in the mask to crypto_alloc_cipher() has no
effect. Many users therefore already don't pass it, but some still do.
This inconsistency can cause confusion, especially since the way the
'mask' argument works is somewhat counterintuitive.
Thus, just remove the unneeded CRYPTO_ALG_ASYNC flags.
This patch shouldn't change any actual behavior.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
drivers/crypto/amcc/crypto4xx_alg.c | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -520,8 +520,7 @@ static int crypto4xx_compute_gcm_hash_ke
uint8_t src[16] = { 0 };
int rc = 0;
- aes_tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK);
+ aes_tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(aes_tfm)) {
rc = PTR_ERR(aes_tfm);
pr_warn("could not load aes cipher driver: %d\n", rc);

View File

@ -1,30 +0,0 @@
From 67d8208fba1324fa0198f9fc58a9edbe09596947 Mon Sep 17 00:00:00 2001
From: Christoph Hellwig <hch@lst.de>
Date: Sun, 16 Dec 2018 18:19:46 +0100
Subject: [PATCH 04/15] crypto4xx_core: don't abuse __dma_sync_page
This function is internal to the DMA API implementation. Instead use
the DMA API to properly unmap. Note that the DMA API usage in this
driver is a disaster and urgently needs some work - it is missing all
the unmaps, seems to do a secondary map where it looks like it should
to a unmap in one place to work around cache coherency and the
directions passed in seem to be partially wrong.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Tested-by: Christian Lamparter <chunkeey@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
---
drivers/crypto/amcc/crypto4xx_core.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -592,7 +592,7 @@ static void crypto4xx_aead_done(struct c
pd->pd_ctl_len.bf.pkt_len,
dst);
} else {
- __dma_sync_page(sg_page(dst), dst->offset, dst->length,
+ dma_unmap_page(dev->core_dev->device, pd->dest, dst->length,
DMA_FROM_DEVICE);
}

View File

@ -1,40 +0,0 @@
From 750afb08ca71310fcf0c4e2cb1565c63b8235b60 Mon Sep 17 00:00:00 2001
From: Luis Chamberlain <mcgrof@kernel.org>
Date: Fri, 4 Jan 2019 09:23:09 +0100
Subject: [PATCH 05/15] cross-tree: phase out dma_zalloc_coherent()
We already need to zero out memory for dma_alloc_coherent(), as such
using dma_zalloc_coherent() is superflous. Phase it out.
This change was generated with the following Coccinelle SmPL patch:
@ replace_dma_zalloc_coherent @
expression dev, size, data, handle, flags;
@@
-dma_zalloc_coherent(dev, size, handle, flags)
+dma_alloc_coherent(dev, size, handle, flags)
Suggested-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
[hch: re-ran the script on the latest tree]
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
drivers/crypto/amcc/crypto4xx_core.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -283,9 +283,9 @@ static u32 crypto4xx_put_pd_to_pdr(struc
*/
static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
{
- dev->gdr = dma_zalloc_coherent(dev->core_dev->device,
- sizeof(struct ce_gd) * PPC4XX_NUM_GD,
- &dev->gdr_pa, GFP_ATOMIC);
+ dev->gdr = dma_alloc_coherent(dev->core_dev->device,
+ sizeof(struct ce_gd) * PPC4XX_NUM_GD,
+ &dev->gdr_pa, GFP_ATOMIC);
if (!dev->gdr)
return -ENOMEM;

View File

@ -1,199 +0,0 @@
From d072bfa4885354fff86aa1fb1dbc4f1533c9e0bf Mon Sep 17 00:00:00 2001
From: Christian Lamparter <chunkeey@gmail.com>
Date: Sun, 23 Dec 2018 02:16:13 +0100
Subject: [PATCH 06/15] crypto: crypto4xx - add prng crypto support
This patch adds support for crypto4xx's ANSI X9.17 Annex C compliant
pseudo random number generator which provides a pseudo random source
for the purpose of generating Initialization Vectors (IV's) for AES
algorithms to the Packet Engine and other pseudo random number
requirements.
Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
drivers/crypto/amcc/crypto4xx_core.c | 87 +++++++++++++++++++++++++
drivers/crypto/amcc/crypto4xx_core.h | 4 ++
drivers/crypto/amcc/crypto4xx_reg_def.h | 1 +
3 files changed, 92 insertions(+)
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -40,9 +40,11 @@
#include <crypto/ctr.h>
#include <crypto/gcm.h>
#include <crypto/sha.h>
+#include <crypto/rng.h>
#include <crypto/scatterwalk.h>
#include <crypto/skcipher.h>
#include <crypto/internal/aead.h>
+#include <crypto/internal/rng.h>
#include <crypto/internal/skcipher.h>
#include "crypto4xx_reg_def.h"
#include "crypto4xx_core.h"
@@ -1031,6 +1033,10 @@ static int crypto4xx_register_alg(struct
rc = crypto_register_ahash(&alg->alg.u.hash);
break;
+ case CRYPTO_ALG_TYPE_RNG:
+ rc = crypto_register_rng(&alg->alg.u.rng);
+ break;
+
default:
rc = crypto_register_skcipher(&alg->alg.u.cipher);
break;
@@ -1060,6 +1066,10 @@ static void crypto4xx_unregister_alg(str
crypto_unregister_aead(&alg->alg.u.aead);
break;
+ case CRYPTO_ALG_TYPE_RNG:
+ crypto_unregister_rng(&alg->alg.u.rng);
+ break;
+
default:
crypto_unregister_skcipher(&alg->alg.u.cipher);
}
@@ -1118,6 +1128,69 @@ static irqreturn_t crypto4xx_ce_interrup
PPC4XX_TMO_ERR_INT);
}
+static int ppc4xx_prng_data_read(struct crypto4xx_device *dev,
+ u8 *data, unsigned int max)
+{
+ unsigned int i, curr = 0;
+ u32 val[2];
+
+ do {
+ /* trigger PRN generation */
+ writel(PPC4XX_PRNG_CTRL_AUTO_EN,
+ dev->ce_base + CRYPTO4XX_PRNG_CTRL);
+
+ for (i = 0; i < 1024; i++) {
+ /* usually 19 iterations are enough */
+ if ((readl(dev->ce_base + CRYPTO4XX_PRNG_STAT) &
+ CRYPTO4XX_PRNG_STAT_BUSY))
+ continue;
+
+ val[0] = readl_be(dev->ce_base + CRYPTO4XX_PRNG_RES_0);
+ val[1] = readl_be(dev->ce_base + CRYPTO4XX_PRNG_RES_1);
+ break;
+ }
+ if (i == 1024)
+ return -ETIMEDOUT;
+
+ if ((max - curr) >= 8) {
+ memcpy(data, &val, 8);
+ data += 8;
+ curr += 8;
+ } else {
+ /* copy only remaining bytes */
+ memcpy(data, &val, max - curr);
+ break;
+ }
+ } while (curr < max);
+
+ return curr;
+}
+
+static int crypto4xx_prng_generate(struct crypto_rng *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dstn, unsigned int dlen)
+{
+ struct rng_alg *alg = crypto_rng_alg(tfm);
+ struct crypto4xx_alg *amcc_alg;
+ struct crypto4xx_device *dev;
+ int ret;
+
+ amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.rng);
+ dev = amcc_alg->dev;
+
+ mutex_lock(&dev->core_dev->rng_lock);
+ ret = ppc4xx_prng_data_read(dev, dstn, dlen);
+ mutex_unlock(&dev->core_dev->rng_lock);
+ return ret;
+}
+
+
+static int crypto4xx_prng_seed(struct crypto_rng *tfm, const u8 *seed,
+ unsigned int slen)
+{
+ return 0;
+}
+
/**
* Supported Crypto Algorithms
*/
@@ -1287,6 +1360,18 @@ static struct crypto4xx_alg_common crypt
.cra_module = THIS_MODULE,
},
} },
+ { .type = CRYPTO_ALG_TYPE_RNG, .u.rng = {
+ .base = {
+ .cra_name = "stdrng",
+ .cra_driver_name = "crypto4xx_rng",
+ .cra_priority = 300,
+ .cra_ctxsize = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .generate = crypto4xx_prng_generate,
+ .seed = crypto4xx_prng_seed,
+ .seedsize = 0,
+ } },
};
/**
@@ -1356,6 +1441,7 @@ static int crypto4xx_probe(struct platfo
core_dev->dev->core_dev = core_dev;
core_dev->dev->is_revb = is_revb;
core_dev->device = dev;
+ mutex_init(&core_dev->rng_lock);
spin_lock_init(&core_dev->lock);
INIT_LIST_HEAD(&core_dev->dev->alg_list);
ratelimit_default_init(&core_dev->dev->aead_ratelimit);
@@ -1435,6 +1521,7 @@ static int crypto4xx_remove(struct platf
tasklet_kill(&core_dev->tasklet);
/* Un-register with Linux CryptoAPI */
crypto4xx_unregister_alg(core_dev->dev);
+ mutex_destroy(&core_dev->rng_lock);
/* Free all allocated memory */
crypto4xx_stop_all(core_dev);
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -23,8 +23,10 @@
#define __CRYPTO4XX_CORE_H__
#include <linux/ratelimit.h>
+#include <linux/mutex.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/aead.h>
+#include <crypto/internal/rng.h>
#include <crypto/internal/skcipher.h>
#include "crypto4xx_reg_def.h"
#include "crypto4xx_sa.h"
@@ -119,6 +121,7 @@ struct crypto4xx_core_device {
u32 irq;
struct tasklet_struct tasklet;
spinlock_t lock;
+ struct mutex rng_lock;
};
struct crypto4xx_ctx {
@@ -143,6 +146,7 @@ struct crypto4xx_alg_common {
struct skcipher_alg cipher;
struct ahash_alg hash;
struct aead_alg aead;
+ struct rng_alg rng;
} u;
};
--- a/drivers/crypto/amcc/crypto4xx_reg_def.h
+++ b/drivers/crypto/amcc/crypto4xx_reg_def.h
@@ -100,6 +100,7 @@
#define CRYPTO4XX_ENDIAN_CFG 0x000600d8
#define CRYPTO4XX_PRNG_STAT 0x00070000
+#define CRYPTO4XX_PRNG_STAT_BUSY 0x1
#define CRYPTO4XX_PRNG_CTRL 0x00070004
#define CRYPTO4XX_PRNG_SEED_L 0x00070008
#define CRYPTO4XX_PRNG_SEED_H 0x0007000c

View File

@ -1,60 +0,0 @@
From bfa2ba7d9e6b20aca82b99e6842fe18842ae3a0f Mon Sep 17 00:00:00 2001
From: Christian Lamparter <chunkeey@gmail.com>
Date: Fri, 17 May 2019 23:15:57 +0200
Subject: [PATCH 13/15] crypto: crypto4xx - fix AES CTR blocksize value
This patch fixes a issue with crypto4xx's ctr(aes) that was
discovered by libcapi's kcapi-enc-test.sh test.
The some of the ctr(aes) encryptions test were failing on the
non-power-of-two test:
kcapi-enc - Error: encryption failed with error 0
kcapi-enc - Error: decryption failed with error 0
[FAILED: 32-bit - 5.1.0-rc1+] 15 bytes: STDIN / STDOUT enc test (128 bits):
original file (1d100e..cc96184c) and generated file (e3b0c442..1b7852b855)
[FAILED: 32-bit - 5.1.0-rc1+] 15 bytes: STDIN / STDOUT enc test (128 bits)
(openssl generated CT): original file (e3b0..5) and generated file (3..8e)
[PASSED: 32-bit - 5.1.0-rc1+] 15 bytes: STDIN / STDOUT enc test (128 bits)
(openssl generated PT)
[FAILED: 32-bit - 5.1.0-rc1+] 15 bytes: STDIN / STDOUT enc test (password):
original file (1d1..84c) and generated file (e3b..852b855)
But the 16, 32, 512, 65536 tests always worked.
Thankfully, this isn't a hidden hardware problem like previously,
instead this turned out to be a copy and paste issue.
With this patch, all the tests are passing with and
kcapi-enc-test.sh gives crypto4xx's a clean bill of health:
"Number of failures: 0" :).
Cc: stable@vger.kernel.org
Fixes: 98e87e3d933b ("crypto: crypto4xx - add aes-ctr support")
Fixes: f2a13e7cba9e ("crypto: crypto4xx - enable AES RFC3686, ECB, CFB and OFB offloads")
Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
drivers/crypto/amcc/crypto4xx_core.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -1244,7 +1244,7 @@ static struct crypto4xx_alg_common crypt
.cra_flags = CRYPTO_ALG_NEED_FALLBACK |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_blocksize = 1,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
.cra_module = THIS_MODULE,
},
@@ -1264,7 +1264,7 @@ static struct crypto4xx_alg_common crypt
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_blocksize = 1,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
.cra_module = THIS_MODULE,
},

View File

@ -1,44 +0,0 @@
From 70c4997f34b6c6888b3ac157adec49e01d0df2d5 Mon Sep 17 00:00:00 2001
From: Christian Lamparter <chunkeey@gmail.com>
Date: Sat, 18 May 2019 23:28:11 +0200
Subject: [PATCH 14/15] crypto: crypto4xx - fix blocksize for cfb and ofb
While the hardware consider them to be blockciphers, the
reference implementation defines them as streamciphers.
Do the right thing and set the blocksize to 1. This
was found by CONFIG_CRYPTO_MANAGER_EXTRA_TESTS.
This fixes the following issues:
skcipher: blocksize for ofb-aes-ppc4xx (16) doesn't match generic impl (1)
skcipher: blocksize for cfb-aes-ppc4xx (16) doesn't match generic impl (1)
Cc: Eric Biggers <ebiggers@kernel.org>
Cc: stable@vger.kernel.org
Fixes: f2a13e7cba9e ("crypto: crypto4xx - enable AES RFC3686, ECB, CFB and OFB offloads")
Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
drivers/crypto/amcc/crypto4xx_core.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -1223,7 +1223,7 @@ static struct crypto4xx_alg_common crypt
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_blocksize = 1,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
.cra_module = THIS_MODULE,
},
@@ -1303,7 +1303,7 @@ static struct crypto4xx_alg_common crypt
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_blocksize = 1,
.cra_ctxsize = sizeof(struct crypto4xx_ctx),
.cra_module = THIS_MODULE,
},

View File

@ -1,172 +0,0 @@
From 0f7a81374060828280fcfdfbaa162cb559017f9f Mon Sep 17 00:00:00 2001
From: Christian Lamparter <chunkeey@gmail.com>
Date: Sat, 18 May 2019 23:28:12 +0200
Subject: [PATCH 15/15] crypto: crypto4xx - block ciphers should only accept
complete blocks
The hardware automatically zero pads incomplete block ciphers
blocks without raising any errors. This is a screw-up. This
was noticed by CONFIG_CRYPTO_MANAGER_EXTRA_TESTS tests that
sent a incomplete blocks and expect them to fail.
This fixes:
cbc-aes-ppc4xx encryption unexpectedly succeeded on test vector
"random: len=2409 klen=32"; expected_error=-22, cfg="random:
may_sleep use_digest src_divs=[96.90%@+2295, 2.34%@+4066,
0.32%@alignmask+12, 0.34%@+4087, 0.9%@alignmask+1787, 0.1%@+3767]
iv_offset=6"
ecb-aes-ppc4xx encryption unexpectedly succeeded on test vector
"random: len=1011 klen=32"; expected_error=-22, cfg="random:
may_sleep use_digest src_divs=[100.0%@alignmask+20]
dst_divs=[3.12%@+3001, 96.88%@+4070]"
Cc: Eric Biggers <ebiggers@kernel.org>
Cc: stable@vger.kernel.org [4.19, 5.0 and 5.1]
Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
drivers/crypto/amcc/crypto4xx_alg.c | 36 +++++++++++++++++++---------
drivers/crypto/amcc/crypto4xx_core.c | 16 ++++++-------
drivers/crypto/amcc/crypto4xx_core.h | 10 ++++----
3 files changed, 39 insertions(+), 23 deletions(-)
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -76,12 +76,16 @@ static void set_dynamic_sa_command_1(str
}
static inline int crypto4xx_crypt(struct skcipher_request *req,
- const unsigned int ivlen, bool decrypt)
+ const unsigned int ivlen, bool decrypt,
+ bool check_blocksize)
{
struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
__le32 iv[AES_IV_SIZE];
+ if (check_blocksize && !IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE))
+ return -EINVAL;
+
if (ivlen)
crypto4xx_memcpy_to_le32(iv, req->iv, ivlen);
@@ -90,24 +94,34 @@ static inline int crypto4xx_crypt(struct
ctx->sa_len, 0, NULL);
}
-int crypto4xx_encrypt_noiv(struct skcipher_request *req)
+int crypto4xx_encrypt_noiv_block(struct skcipher_request *req)
+{
+ return crypto4xx_crypt(req, 0, false, true);
+}
+
+int crypto4xx_encrypt_iv_stream(struct skcipher_request *req)
+{
+ return crypto4xx_crypt(req, AES_IV_SIZE, false, false);
+}
+
+int crypto4xx_decrypt_noiv_block(struct skcipher_request *req)
{
- return crypto4xx_crypt(req, 0, false);
+ return crypto4xx_crypt(req, 0, true, true);
}
-int crypto4xx_encrypt_iv(struct skcipher_request *req)
+int crypto4xx_decrypt_iv_stream(struct skcipher_request *req)
{
- return crypto4xx_crypt(req, AES_IV_SIZE, false);
+ return crypto4xx_crypt(req, AES_IV_SIZE, true, false);
}
-int crypto4xx_decrypt_noiv(struct skcipher_request *req)
+int crypto4xx_encrypt_iv_block(struct skcipher_request *req)
{
- return crypto4xx_crypt(req, 0, true);
+ return crypto4xx_crypt(req, AES_IV_SIZE, false, true);
}
-int crypto4xx_decrypt_iv(struct skcipher_request *req)
+int crypto4xx_decrypt_iv_block(struct skcipher_request *req)
{
- return crypto4xx_crypt(req, AES_IV_SIZE, true);
+ return crypto4xx_crypt(req, AES_IV_SIZE, true, true);
}
/**
@@ -272,8 +286,8 @@ crypto4xx_ctr_crypt(struct skcipher_requ
return ret;
}
- return encrypt ? crypto4xx_encrypt_iv(req)
- : crypto4xx_decrypt_iv(req);
+ return encrypt ? crypto4xx_encrypt_iv_stream(req)
+ : crypto4xx_decrypt_iv_stream(req);
}
static int crypto4xx_sk_setup_fallback(struct crypto4xx_ctx *ctx,
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -1211,8 +1211,8 @@ static struct crypto4xx_alg_common crypt
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_IV_SIZE,
.setkey = crypto4xx_setkey_aes_cbc,
- .encrypt = crypto4xx_encrypt_iv,
- .decrypt = crypto4xx_decrypt_iv,
+ .encrypt = crypto4xx_encrypt_iv_block,
+ .decrypt = crypto4xx_decrypt_iv_block,
.init = crypto4xx_sk_init,
.exit = crypto4xx_sk_exit,
} },
@@ -1231,8 +1231,8 @@ static struct crypto4xx_alg_common crypt
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_IV_SIZE,
.setkey = crypto4xx_setkey_aes_cfb,
- .encrypt = crypto4xx_encrypt_iv,
- .decrypt = crypto4xx_decrypt_iv,
+ .encrypt = crypto4xx_encrypt_iv_stream,
+ .decrypt = crypto4xx_decrypt_iv_stream,
.init = crypto4xx_sk_init,
.exit = crypto4xx_sk_exit,
} },
@@ -1291,8 +1291,8 @@ static struct crypto4xx_alg_common crypt
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = crypto4xx_setkey_aes_ecb,
- .encrypt = crypto4xx_encrypt_noiv,
- .decrypt = crypto4xx_decrypt_noiv,
+ .encrypt = crypto4xx_encrypt_noiv_block,
+ .decrypt = crypto4xx_decrypt_noiv_block,
.init = crypto4xx_sk_init,
.exit = crypto4xx_sk_exit,
} },
@@ -1311,8 +1311,8 @@ static struct crypto4xx_alg_common crypt
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_IV_SIZE,
.setkey = crypto4xx_setkey_aes_ofb,
- .encrypt = crypto4xx_encrypt_iv,
- .decrypt = crypto4xx_decrypt_iv,
+ .encrypt = crypto4xx_encrypt_iv_stream,
+ .decrypt = crypto4xx_decrypt_iv_stream,
.init = crypto4xx_sk_init,
.exit = crypto4xx_sk_exit,
} },
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -183,10 +183,12 @@ int crypto4xx_setkey_rfc3686(struct cryp
const u8 *key, unsigned int keylen);
int crypto4xx_encrypt_ctr(struct skcipher_request *req);
int crypto4xx_decrypt_ctr(struct skcipher_request *req);
-int crypto4xx_encrypt_iv(struct skcipher_request *req);
-int crypto4xx_decrypt_iv(struct skcipher_request *req);
-int crypto4xx_encrypt_noiv(struct skcipher_request *req);
-int crypto4xx_decrypt_noiv(struct skcipher_request *req);
+int crypto4xx_encrypt_iv_stream(struct skcipher_request *req);
+int crypto4xx_decrypt_iv_stream(struct skcipher_request *req);
+int crypto4xx_encrypt_iv_block(struct skcipher_request *req);
+int crypto4xx_decrypt_iv_block(struct skcipher_request *req);
+int crypto4xx_encrypt_noiv_block(struct skcipher_request *req);
+int crypto4xx_decrypt_noiv_block(struct skcipher_request *req);
int crypto4xx_rfc3686_encrypt(struct skcipher_request *req);
int crypto4xx_rfc3686_decrypt(struct skcipher_request *req);
int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);

View File

@ -1,51 +0,0 @@
From 54e1b3004eb85f9317f6c4ceff2e097231c7f52a Mon Sep 17 00:00:00 2001
From: Christian Lamparter <chunkeey@gmail.com>
Date: Wed, 20 Dec 2017 22:11:22 +0100
Subject: [PATCH 1/3] net: ibm: emac: replace custom rgmii_mode_name with
phy_modes
The common phylib defines the same names (in lower-case).
Since rgmii_mode_name() is used only in one place and
for a "notice-level" printk, I think it can be replaced.
Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
---
drivers/net/ethernet/ibm/emac/rgmii.c | 20 +-------------------
1 file changed, 1 insertion(+), 19 deletions(-)
--- a/drivers/net/ethernet/ibm/emac/rgmii.c
+++ b/drivers/net/ethernet/ibm/emac/rgmii.c
@@ -59,24 +59,6 @@ static inline int rgmii_valid_mode(int p
phy_mode == PHY_MODE_RTBI;
}
-static inline const char *rgmii_mode_name(int mode)
-{
- switch (mode) {
- case PHY_MODE_RGMII:
- return "RGMII";
- case PHY_MODE_TBI:
- return "TBI";
- case PHY_MODE_GMII:
- return "GMII";
- case PHY_MODE_MII:
- return "MII";
- case PHY_MODE_RTBI:
- return "RTBI";
- default:
- BUG();
- }
-}
-
static inline u32 rgmii_mode_mask(int mode, int input)
{
switch (mode) {
@@ -115,7 +97,7 @@ int rgmii_attach(struct platform_device
out_be32(&p->fer, in_be32(&p->fer) | rgmii_mode_mask(mode, input));
printk(KERN_NOTICE "%pOF: input %d in %s mode\n",
- ofdev->dev.of_node, input, rgmii_mode_name(mode));
+ ofdev->dev.of_node, input, phy_modes(mode));
++dev->users;

View File

@ -1,246 +0,0 @@
From 1477bea9e6931f6be96f45b9d277690a26d0cd97 Mon Sep 17 00:00:00 2001
From: Christian Lamparter <chunkeey@gmail.com>
Date: Wed, 20 Dec 2017 22:19:24 +0100
Subject: [PATCH 2/3] net: ibm: emac: replace custom PHY_MODE_* macros
The ibm_emac driver predates the shared PHY_INTERFACE_MODE_
enums provided by include/linux/phy.h by a few years.
And while the driver has been retrofitted to use the PHYLIB,
the old definitions have stuck around to this day.
Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
---
drivers/net/ethernet/ibm/emac/core.c | 20 ++++++++++----------
drivers/net/ethernet/ibm/emac/emac.h | 13 -------------
drivers/net/ethernet/ibm/emac/phy.c | 10 +++++-----
drivers/net/ethernet/ibm/emac/rgmii.c | 20 ++++++++++----------
drivers/net/ethernet/ibm/emac/zmii.c | 34 +++++++++++++++++-----------------
5 files changed, 42 insertions(+), 55 deletions(-)
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -199,18 +199,18 @@ static void __emac_set_multicast_list(st
static inline int emac_phy_supports_gige(int phy_mode)
{
- return phy_mode == PHY_MODE_GMII ||
- phy_mode == PHY_MODE_RGMII ||
- phy_mode == PHY_MODE_SGMII ||
- phy_mode == PHY_MODE_TBI ||
- phy_mode == PHY_MODE_RTBI;
+ return phy_mode == PHY_INTERFACE_MODE_GMII ||
+ phy_mode == PHY_INTERFACE_MODE_RGMII ||
+ phy_mode == PHY_INTERFACE_MODE_SGMII ||
+ phy_mode == PHY_INTERFACE_MODE_TBI ||
+ phy_mode == PHY_INTERFACE_MODE_RTBI;
}
static inline int emac_phy_gpcs(int phy_mode)
{
- return phy_mode == PHY_MODE_SGMII ||
- phy_mode == PHY_MODE_TBI ||
- phy_mode == PHY_MODE_RTBI;
+ return phy_mode == PHY_INTERFACE_MODE_SGMII ||
+ phy_mode == PHY_INTERFACE_MODE_TBI ||
+ phy_mode == PHY_INTERFACE_MODE_RTBI;
}
static inline void emac_tx_enable(struct emac_instance *dev)
@@ -2870,7 +2870,7 @@ static int emac_init_config(struct emac_
/* PHY mode needs some decoding */
dev->phy_mode = of_get_phy_mode(np);
if (dev->phy_mode < 0)
- dev->phy_mode = PHY_MODE_NA;
+ dev->phy_mode = PHY_INTERFACE_MODE_NA;
/* Check EMAC version */
if (of_device_is_compatible(np, "ibm,emac4sync")) {
@@ -3173,7 +3173,7 @@ static int emac_probe(struct platform_de
printk(KERN_INFO "%s: EMAC-%d %pOF, MAC %pM\n",
ndev->name, dev->cell_index, np, ndev->dev_addr);
- if (dev->phy_mode == PHY_MODE_SGMII)
+ if (dev->phy_mode == PHY_INTERFACE_MODE_SGMII)
printk(KERN_NOTICE "%s: in SGMII mode\n", ndev->name);
if (dev->phy.address >= 0)
--- a/drivers/net/ethernet/ibm/emac/emac.h
+++ b/drivers/net/ethernet/ibm/emac/emac.h
@@ -104,19 +104,6 @@ struct emac_regs {
} u1;
};
-/*
- * PHY mode settings (EMAC <-> ZMII/RGMII bridge <-> PHY)
- */
-#define PHY_MODE_NA PHY_INTERFACE_MODE_NA
-#define PHY_MODE_MII PHY_INTERFACE_MODE_MII
-#define PHY_MODE_RMII PHY_INTERFACE_MODE_RMII
-#define PHY_MODE_SMII PHY_INTERFACE_MODE_SMII
-#define PHY_MODE_RGMII PHY_INTERFACE_MODE_RGMII
-#define PHY_MODE_TBI PHY_INTERFACE_MODE_TBI
-#define PHY_MODE_GMII PHY_INTERFACE_MODE_GMII
-#define PHY_MODE_RTBI PHY_INTERFACE_MODE_RTBI
-#define PHY_MODE_SGMII PHY_INTERFACE_MODE_SGMII
-
/* EMACx_MR0 */
#define EMAC_MR0_RXI 0x80000000
#define EMAC_MR0_TXI 0x40000000
--- a/drivers/net/ethernet/ibm/emac/phy.c
+++ b/drivers/net/ethernet/ibm/emac/phy.c
@@ -96,7 +96,7 @@ int emac_mii_reset_gpcs(struct mii_phy *
if ((val & BMCR_ISOLATE) && limit > 0)
gpcs_phy_write(phy, MII_BMCR, val & ~BMCR_ISOLATE);
- if (limit > 0 && phy->mode == PHY_MODE_SGMII) {
+ if (limit > 0 && phy->mode == PHY_INTERFACE_MODE_SGMII) {
/* Configure GPCS interface to recommended setting for SGMII */
gpcs_phy_write(phy, 0x04, 0x8120); /* AsymPause, FDX */
gpcs_phy_write(phy, 0x07, 0x2801); /* msg_pg, toggle */
@@ -313,16 +313,16 @@ static int cis8201_init(struct mii_phy *
epcr &= ~EPCR_MODE_MASK;
switch (phy->mode) {
- case PHY_MODE_TBI:
+ case PHY_INTERFACE_MODE_TBI:
epcr |= EPCR_TBI_MODE;
break;
- case PHY_MODE_RTBI:
+ case PHY_INTERFACE_MODE_RTBI:
epcr |= EPCR_RTBI_MODE;
break;
- case PHY_MODE_GMII:
+ case PHY_INTERFACE_MODE_GMII:
epcr |= EPCR_GMII_MODE;
break;
- case PHY_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII:
default:
epcr |= EPCR_RGMII_MODE;
}
--- a/drivers/net/ethernet/ibm/emac/rgmii.c
+++ b/drivers/net/ethernet/ibm/emac/rgmii.c
@@ -52,25 +52,25 @@
/* RGMII bridge supports only GMII/TBI and RGMII/RTBI PHYs */
static inline int rgmii_valid_mode(int phy_mode)
{
- return phy_mode == PHY_MODE_GMII ||
- phy_mode == PHY_MODE_MII ||
- phy_mode == PHY_MODE_RGMII ||
- phy_mode == PHY_MODE_TBI ||
- phy_mode == PHY_MODE_RTBI;
+ return phy_mode == PHY_INTERFACE_MODE_GMII ||
+ phy_mode == PHY_INTERFACE_MODE_MII ||
+ phy_mode == PHY_INTERFACE_MODE_RGMII ||
+ phy_mode == PHY_INTERFACE_MODE_TBI ||
+ phy_mode == PHY_INTERFACE_MODE_RTBI;
}
static inline u32 rgmii_mode_mask(int mode, int input)
{
switch (mode) {
- case PHY_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII:
return RGMII_FER_RGMII(input);
- case PHY_MODE_TBI:
+ case PHY_INTERFACE_MODE_TBI:
return RGMII_FER_TBI(input);
- case PHY_MODE_GMII:
+ case PHY_INTERFACE_MODE_GMII:
return RGMII_FER_GMII(input);
- case PHY_MODE_MII:
+ case PHY_INTERFACE_MODE_MII:
return RGMII_FER_MII(input);
- case PHY_MODE_RTBI:
+ case PHY_INTERFACE_MODE_RTBI:
return RGMII_FER_RTBI(input);
default:
BUG();
--- a/drivers/net/ethernet/ibm/emac/zmii.c
+++ b/drivers/net/ethernet/ibm/emac/zmii.c
@@ -49,20 +49,20 @@
*/
static inline int zmii_valid_mode(int mode)
{
- return mode == PHY_MODE_MII ||
- mode == PHY_MODE_RMII ||
- mode == PHY_MODE_SMII ||
- mode == PHY_MODE_NA;
+ return mode == PHY_INTERFACE_MODE_MII ||
+ mode == PHY_INTERFACE_MODE_RMII ||
+ mode == PHY_INTERFACE_MODE_SMII ||
+ mode == PHY_INTERFACE_MODE_NA;
}
static inline const char *zmii_mode_name(int mode)
{
switch (mode) {
- case PHY_MODE_MII:
+ case PHY_INTERFACE_MODE_MII:
return "MII";
- case PHY_MODE_RMII:
+ case PHY_INTERFACE_MODE_RMII:
return "RMII";
- case PHY_MODE_SMII:
+ case PHY_INTERFACE_MODE_SMII:
return "SMII";
default:
BUG();
@@ -72,11 +72,11 @@ static inline const char *zmii_mode_name
static inline u32 zmii_mode_mask(int mode, int input)
{
switch (mode) {
- case PHY_MODE_MII:
+ case PHY_INTERFACE_MODE_MII:
return ZMII_FER_MII(input);
- case PHY_MODE_RMII:
+ case PHY_INTERFACE_MODE_RMII:
return ZMII_FER_RMII(input);
- case PHY_MODE_SMII:
+ case PHY_INTERFACE_MODE_SMII:
return ZMII_FER_SMII(input);
default:
return 0;
@@ -106,18 +106,18 @@ int zmii_attach(struct platform_device *
* Please, always specify PHY mode in your board port to avoid
* any surprises.
*/
- if (dev->mode == PHY_MODE_NA) {
- if (*mode == PHY_MODE_NA) {
+ if (dev->mode == PHY_INTERFACE_MODE_NA) {
+ if (*mode == PHY_INTERFACE_MODE_NA) {
u32 r = dev->fer_save;
ZMII_DBG(dev, "autodetecting mode, FER = 0x%08x" NL, r);
if (r & (ZMII_FER_MII(0) | ZMII_FER_MII(1)))
- dev->mode = PHY_MODE_MII;
+ dev->mode = PHY_INTERFACE_MODE_MII;
else if (r & (ZMII_FER_RMII(0) | ZMII_FER_RMII(1)))
- dev->mode = PHY_MODE_RMII;
+ dev->mode = PHY_INTERFACE_MODE_RMII;
else
- dev->mode = PHY_MODE_SMII;
+ dev->mode = PHY_INTERFACE_MODE_SMII;
} else
dev->mode = *mode;
@@ -126,7 +126,7 @@ int zmii_attach(struct platform_device *
zmii_mode_name(dev->mode));
} else {
/* All inputs must use the same mode */
- if (*mode != PHY_MODE_NA && *mode != dev->mode) {
+ if (*mode != PHY_INTERFACE_MODE_NA && *mode != dev->mode) {
printk(KERN_ERR
"%pOF: invalid mode %d specified for input %d\n",
ofdev->dev.of_node, *mode, input);
@@ -246,7 +246,7 @@ static int zmii_probe(struct platform_de
mutex_init(&dev->lock);
dev->ofdev = ofdev;
- dev->mode = PHY_MODE_NA;
+ dev->mode = PHY_INTERFACE_MODE_NA;
rc = -ENXIO;
if (of_address_to_resource(np, 0, &regs)) {

View File

@ -1,59 +0,0 @@
From 0a49f0d1958440f7928047433c983eece05a3723 Mon Sep 17 00:00:00 2001
From: Christian Lamparter <chunkeey@gmail.com>
Date: Thu, 14 Dec 2017 19:51:50 +0100
Subject: [PATCH 3/3] net: ibm: emac: support RGMII-[RX|TX]ID phymode
The RGMII spec allows compliance for devices that implement an internal
delay on TXC and/or RXC inside the transmitter. This patch adds the
necessary RGMII_[RX|TX]ID mode code to handle such PHYs with the
emac driver.
Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
---
v3: - replace PHY_MODE_* with PHY_INTERFACE_MODE_*
- replace rgmii_mode_name() with phy_modes[]
v2: - utilize phy_interface_mode_is_rgmii()
---
drivers/net/ethernet/ibm/emac/core.c | 4 ++--
drivers/net/ethernet/ibm/emac/rgmii.c | 7 +++++--
2 files changed, 7 insertions(+), 4 deletions(-)
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -199,8 +199,8 @@ static void __emac_set_multicast_list(st
static inline int emac_phy_supports_gige(int phy_mode)
{
- return phy_mode == PHY_INTERFACE_MODE_GMII ||
- phy_mode == PHY_INTERFACE_MODE_RGMII ||
+ return phy_interface_mode_is_rgmii(phy_mode) ||
+ phy_mode == PHY_INTERFACE_MODE_GMII ||
phy_mode == PHY_INTERFACE_MODE_SGMII ||
phy_mode == PHY_INTERFACE_MODE_TBI ||
phy_mode == PHY_INTERFACE_MODE_RTBI;
--- a/drivers/net/ethernet/ibm/emac/rgmii.c
+++ b/drivers/net/ethernet/ibm/emac/rgmii.c
@@ -52,9 +52,9 @@
/* RGMII bridge supports only GMII/TBI and RGMII/RTBI PHYs */
static inline int rgmii_valid_mode(int phy_mode)
{
- return phy_mode == PHY_INTERFACE_MODE_GMII ||
+ return phy_interface_mode_is_rgmii(phy_mode) ||
+ phy_mode == PHY_INTERFACE_MODE_GMII ||
phy_mode == PHY_INTERFACE_MODE_MII ||
- phy_mode == PHY_INTERFACE_MODE_RGMII ||
phy_mode == PHY_INTERFACE_MODE_TBI ||
phy_mode == PHY_INTERFACE_MODE_RTBI;
}
@@ -63,6 +63,9 @@ static inline u32 rgmii_mode_mask(int mo
{
switch (mode) {
case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
return RGMII_FER_RGMII(input);
case PHY_INTERFACE_MODE_TBI:
return RGMII_FER_TBI(input);

View File

@ -1,169 +0,0 @@
From cc809a441d8f2924f785eb863dfa6aef47a25b0b Mon Sep 17 00:00:00 2001
From: John Crispin <blogic@openwrt.org>
Date: Tue, 12 Aug 2014 20:49:27 +0200
Subject: [PATCH 30/36] GPIO: add named gpio exports
Signed-off-by: John Crispin <blogic@openwrt.org>
---
drivers/gpio/gpiolib-of.c | 68 +++++++++++++++++++++++++++++++++++++++++
drivers/gpio/gpiolib.c | 11 +++++--
include/asm-generic/gpio.h | 5 +++
include/linux/gpio/consumer.h | 8 +++++
4 files changed, 90 insertions(+), 2 deletions(-)
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -23,6 +23,8 @@
#include <linux/pinctrl/pinctrl.h>
#include <linux/slab.h>
#include <linux/gpio/machine.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
#include "gpiolib.h"
@@ -513,3 +515,72 @@ void of_gpiochip_remove(struct gpio_chip
gpiochip_remove_pin_ranges(chip);
of_node_put(chip->of_node);
}
+
+#ifdef CONFIG_GPIO_SYSFS
+
+static struct of_device_id gpio_export_ids[] = {
+ { .compatible = "gpio-export" },
+ { /* sentinel */ }
+};
+
+static int of_gpio_export_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *cnp;
+ u32 val;
+ int nb = 0;
+
+ for_each_child_of_node(np, cnp) {
+ const char *name = NULL;
+ int gpio;
+ bool dmc;
+ int max_gpio = 1;
+ int i;
+
+ of_property_read_string(cnp, "gpio-export,name", &name);
+
+ if (!name)
+ max_gpio = of_gpio_count(cnp);
+
+ for (i = 0; i < max_gpio; i++) {
+ unsigned flags = 0;
+ enum of_gpio_flags of_flags;
+
+ gpio = of_get_gpio_flags(cnp, i, &of_flags);
+ if (!gpio_is_valid(gpio))
+ return gpio;
+
+ if (of_flags == OF_GPIO_ACTIVE_LOW)
+ flags |= GPIOF_ACTIVE_LOW;
+
+ if (!of_property_read_u32(cnp, "gpio-export,output", &val))
+ flags |= val ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW;
+ else
+ flags |= GPIOF_IN;
+
+ if (devm_gpio_request_one(&pdev->dev, gpio, flags, name ? name : of_node_full_name(np)))
+ continue;
+
+ dmc = of_property_read_bool(cnp, "gpio-export,direction_may_change");
+ gpio_export_with_name(gpio, dmc, name);
+ nb++;
+ }
+ }
+
+ dev_info(&pdev->dev, "%d gpio(s) exported\n", nb);
+
+ return 0;
+}
+
+static struct platform_driver gpio_export_driver = {
+ .driver = {
+ .name = "gpio-export",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(gpio_export_ids),
+ },
+ .probe = of_gpio_export_probe,
+};
+
+module_platform_driver(gpio_export_driver);
+
+#endif
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -127,6 +127,12 @@ static inline int gpio_export(unsigned g
return gpiod_export(gpio_to_desc(gpio), direction_may_change);
}
+int __gpiod_export(struct gpio_desc *desc, bool direction_may_change, const char *name);
+static inline int gpio_export_with_name(unsigned gpio, bool direction_may_change, const char *name)
+{
+ return __gpiod_export(gpio_to_desc(gpio), direction_may_change, name);
+}
+
static inline int gpio_export_link(struct device *dev, const char *name,
unsigned gpio)
{
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -451,6 +451,7 @@ struct gpio_desc *devm_fwnode_get_gpiod_
#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_GPIO_SYSFS)
+int _gpiod_export(struct gpio_desc *desc, bool direction_may_change, const char *name);
int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
int gpiod_export_link(struct device *dev, const char *name,
struct gpio_desc *desc);
@@ -458,6 +459,13 @@ void gpiod_unexport(struct gpio_desc *de
#else /* CONFIG_GPIOLIB && CONFIG_GPIO_SYSFS */
+static inline int _gpiod_export(struct gpio_desc *desc,
+ bool direction_may_change,
+ const char *name)
+{
+ return -ENOSYS;
+}
+
static inline int gpiod_export(struct gpio_desc *desc,
bool direction_may_change)
{
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -553,7 +553,7 @@ static struct class gpio_class = {
*
* Returns zero on success, else an error.
*/
-int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
+int __gpiod_export(struct gpio_desc *desc, bool direction_may_change, const char *name)
{
struct gpio_chip *chip;
struct gpio_device *gdev;
@@ -615,6 +615,8 @@ int gpiod_export(struct gpio_desc *desc,
offset = gpio_chip_hwgpio(desc);
if (chip->names && chip->names[offset])
ioname = chip->names[offset];
+ if (name)
+ ioname = name;
dev = device_create_with_groups(&gpio_class, &gdev->dev,
MKDEV(0, 0), data, gpio_groups,
@@ -636,6 +638,12 @@ err_unlock:
gpiod_dbg(desc, "%s: status %d\n", __func__, status);
return status;
}
+EXPORT_SYMBOL_GPL(__gpiod_export);
+
+int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
+{
+ return __gpiod_export(desc, direction_may_change, NULL);
+}
EXPORT_SYMBOL_GPL(gpiod_export);
static int match_export(struct device *dev, const void *desc)

View File

@ -1,32 +0,0 @@
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -41,6 +41,19 @@ config EBONY
help
This option enables support for the IBM PPC440GP evaluation board.
+config IKAREM
+ bool "Ikarem"
+ depends on 44x
+ default n
+ select PPC44x_SIMPLE
+ select APM821xx
+ select PCI_MSI
+ select PPC4xx_MSI
+ select PPC4xx_PCI_EXPRESS
+ select IBM_EMAC_RGMII
+ help
+ This option enables support for the Cisco Meraki MR24 (Ikarem) Access Point.
+
config SAM440EP
bool "Sam440ep"
depends on 44x
--- a/arch/powerpc/platforms/44x/ppc44x_simple.c
+++ b/arch/powerpc/platforms/44x/ppc44x_simple.c
@@ -62,6 +62,7 @@ static char *board[] __initdata = {
"amcc,sequoia",
"amcc,taishan",
"amcc,yosemite",
+ "meraki,ikarem",
"mosaixtech,icon"
};

View File

@ -1,30 +0,0 @@
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -144,6 +144,17 @@ config CANYONLANDS
help
This option enables support for the AMCC PPC460EX evaluation board.
+config APOLLO3G
+ bool "Apollo3G"
+ depends on 44x
+ default n
+ select PPC44x_SIMPLE
+ select APM821xx
+ select IBM_EMAC_RGMII
+ select 460EX
+ help
+ This option enables support for the AMCC Apollo 3G board.
+
config GLACIER
bool "Glacier"
depends on 44x
--- a/arch/powerpc/platforms/44x/ppc44x_simple.c
+++ b/arch/powerpc/platforms/44x/ppc44x_simple.c
@@ -50,6 +50,7 @@ machine_device_initcall(ppc44x_simple, p
* board.c file for it rather than adding it to this list.
*/
static char *board[] __initdata = {
+ "amcc,apollo3g",
"amcc,arches",
"amcc,bamboo",
"apm,bluestone",

View File

@ -1,32 +0,0 @@
--- a/arch/powerpc/platforms/44x/Makefile
+++ b/arch/powerpc/platforms/44x/Makefile
@@ -4,6 +4,7 @@ ifneq ($(CONFIG_PPC4xx_CPM),y)
obj-y += idle.o
endif
obj-$(CONFIG_PPC44x_SIMPLE) += ppc44x_simple.o
+obj-$(CONFIG_WNDR4700) += wndr4700.o
obj-$(CONFIG_EBONY) += ebony.o
obj-$(CONFIG_SAM440EP) += sam440ep.o
obj-$(CONFIG_WARP) += warp.o
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -273,6 +273,19 @@ config ICON
help
This option enables support for the AMCC PPC440SPe evaluation board.
+config WNDR4700
+ bool "WNDR4700"
+ depends on 44x
+ default n
+ select APM821xx
+ select PCI_MSI
+ select PPC4xx_MSI
+ select PPC4xx_PCI_EXPRESS
+ select IBM_EMAC_RGMII
+ select 460EX
+ help
+ This option enables support for the Netgear WNDR4700/WNDR4720 board.
+
config XILINX_VIRTEX440_GENERIC_BOARD
bool "Generic Xilinx Virtex 5 FXT board support"
depends on 44x

View File

@ -1,32 +0,0 @@
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -31,6 +31,19 @@ config BLUESTONE
help
This option enables support for the APM APM821xx Evaluation board.
+config BUCKMINSTER
+ bool "Buckminster"
+ depends on 44x
+ default n
+ select APM821xx
+ select PCI_MSI
+ select PPC4xx_MSI
+ select PPC4xx_PCI_EXPRESS
+ select IBM_EMAC_RGMII
+ select 460EX
+ help
+ This option enables support for the Cisco Meraki MX60/MX60W (Buckminster) Security Appliance
+
config EBONY
bool "Ebony"
depends on 44x
--- a/arch/powerpc/platforms/44x/ppc44x_simple.c
+++ b/arch/powerpc/platforms/44x/ppc44x_simple.c
@@ -63,6 +63,7 @@ static char *board[] __initdata = {
"amcc,sequoia",
"amcc,taishan",
"amcc,yosemite",
+ "meraki,buckminster",
"meraki,ikarem",
"mosaixtech,icon"
};

View File

@ -1,51 +0,0 @@
--- a/arch/powerpc/platforms/4xx/pci.c
+++ b/arch/powerpc/platforms/4xx/pci.c
@@ -1060,15 +1060,24 @@ static int __init apm821xx_pciex_init_po
u32 val;
/*
- * Do a software reset on PCIe ports.
- * This code is to fix the issue that pci drivers doesn't re-assign
- * bus number for PCIE devices after Uboot
- * scanned and configured all the buses (eg. PCIE NIC IntelPro/1000
- * PT quad port, SAS LSI 1064E)
+ * Only reset the PHY when no link is currently established.
+ * This is for the Atheros PCIe board which has problems to establish
+ * the link (again) after this PHY reset. All other currently tested
+ * PCIe boards don't show this problem.
*/
-
- mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST, 0x0);
- mdelay(10);
+ val = mfdcri(SDR0, port->sdr_base + PESDRn_LOOP);
+ if (!(val & 0x00001000)) {
+ /*
+ * Do a software reset on PCIe ports.
+ * This code is to fix the issue that pci drivers doesn't re-assign
+ * bus number for PCIE devices after Uboot
+ * scanned and configured all the buses (eg. PCIE NIC IntelPro/1000
+ * PT quad port, SAS LSI 1064E)
+ */
+
+ mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST, 0x0);
+ mdelay(10);
+ }
if (port->endpoint)
val = PTYPE_LEGACY_ENDPOINT << 20;
@@ -1085,9 +1094,12 @@ static int __init apm821xx_pciex_init_po
mtdcri(SDR0, PESDR0_460EX_L0DRV, 0x00000130);
mtdcri(SDR0, PESDR0_460EX_L0CLK, 0x00000006);
- mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST, 0x10000000);
- mdelay(50);
- mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST, 0x30000000);
+ val = mfdcri(SDR0, port->sdr_base + PESDRn_LOOP);
+ if (!(val & 0x00001000)) {
+ mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST, 0x10000000);
+ mdelay(50);
+ mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST, 0x30000000);
+ }
mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) |

View File

@ -1,14 +0,0 @@
--- a/arch/powerpc/platforms/4xx/pci.c
+++ b/arch/powerpc/platforms/4xx/pci.c
@@ -1905,9 +1905,9 @@ static void __init ppc4xx_configure_pcie
* if it works
*/
out_le32(mbase + PECFG_PIM0LAL, 0x00000000);
- out_le32(mbase + PECFG_PIM0LAH, 0x00000000);
+ out_le32(mbase + PECFG_PIM0LAH, 0x00000008);
out_le32(mbase + PECFG_PIM1LAL, 0x00000000);
- out_le32(mbase + PECFG_PIM1LAH, 0x00000000);
+ out_le32(mbase + PECFG_PIM1LAH, 0x0000000c);
out_le32(mbase + PECFG_PIM01SAH, 0xffff0000);
out_le32(mbase + PECFG_PIM01SAL, 0x00000000);

View File

@ -1,38 +0,0 @@
From bc183b1da77d6e2fbc801327a1811d446d34f54f Mon Sep 17 00:00:00 2001
From: Christian Lamparter <chunkeey@gmail.com>
Date: Wed, 31 Oct 2018 22:20:46 +0100
Subject: [PATCH 1/2] dt-bindings: add protection control property
This patch adds the protection control property and
dt-binding definitions for the DesignWare AHB Central
Direct Memory Access Controller.
Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
---
include/dt-bindings/dma/dw-dmac.h | 20 +++++++++++++++++++
1 files changed, 20 insertions(+), 0 deletion(-)
create mode 100644 include/dt-bindings/dma/dw-dmac.h
--- /dev/null
+++ b/include/dt-bindings/dma/dw-dmac.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+
+#ifndef __DT_BINDINGS_DMA_DW_DMAC_H__
+#define __DT_BINDINGS_DMA_DW_DMAC_H__
+
+#define DW_DMAC_CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */
+#define DW_DMAC_CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */
+#define DW_DMAC_CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */
+#define DW_DMAC_CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */
+
+/*
+ * Protection Control bits provide protection against illegal transactions.
+ * The protection bits[0:2] are one-to-one mapped to AHB HPROT[3:1] signals.
+ * The AHB HPROT[0] bit is hardwired to 1: Data Access.
+ */
+#define DW_DMAC_HPROT1_PRIVILEGED_MODE (1 << 0) /* Privileged Mode */
+#define DW_DMAC_HPROT2_BUFFERABLE (1 << 1) /* DMA is bufferable */
+#define DW_DMAC_HPROT3_CACHEABLE (1 << 2) /* DMA is cacheable */
+
+#endif /* __DT_BINDINGS_DMA_DW_DMAC_H__ */

View File

@ -1,51 +0,0 @@
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -130,6 +130,7 @@ static inline void emac_report_timeout_e
{
if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX |
EMAC_FTR_460EX_PHY_CLK_FIX |
+ EMAC_FTR_APM821XX_PHY_CLK_FIX |
EMAC_FTR_440EP_PHY_CLK_FIX))
DBG(dev, "%s" NL, error);
else if (net_ratelimit())
@@ -146,6 +147,10 @@ static inline void emac_rx_clk_tx(struct
if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
dcri_clrset(SDR0, SDR0_MFR,
0, SDR0_MFR_ECS >> dev->cell_index);
+
+ if (emac_has_feature(dev, EMAC_FTR_APM821XX_PHY_CLK_FIX))
+ dcri_clrset(SDR0, SDR0_ETH_CFG,
+ 0, 0x00000100 >> dev->cell_index);
#endif
}
@@ -155,6 +160,10 @@ static inline void emac_rx_clk_default(s
if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
dcri_clrset(SDR0, SDR0_MFR,
SDR0_MFR_ECS >> dev->cell_index, 0);
+
+ if (emac_has_feature(dev, EMAC_FTR_APM821XX_PHY_CLK_FIX))
+ dcri_clrset(SDR0, SDR0_ETH_CFG,
+ 0x00000100 >> dev->cell_index, 0);
#endif
}
@@ -2884,6 +2893,7 @@ static int emac_init_config(struct emac_
if (of_device_is_compatible(np, "ibm,emac-apm821xx")) {
dev->features |= (EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE |
EMAC_FTR_APM821XX_NO_HALF_DUPLEX |
+ EMAC_FTR_APM821XX_PHY_CLK_FIX |
EMAC_FTR_460EX_PHY_CLK_FIX);
}
} else if (of_device_is_compatible(np, "ibm,emac4")) {
--- a/drivers/net/ethernet/ibm/emac/core.h
+++ b/drivers/net/ethernet/ibm/emac/core.h
@@ -335,6 +335,8 @@ struct emac_instance {
*/
#define EMAC_FTR_APM821XX_NO_HALF_DUPLEX 0x00001000
+#define EMAC_FTR_APM821XX_PHY_CLK_FIX 0x000002000
+
/* Right now, we don't quite handle the always/possible masks on the
* most optimal way as we don't have a way to say something like
* always EMAC4. Patches welcome.

View File

@ -1,545 +0,0 @@
From 419992bae5aaa4e06402e0b7c79fcf7bcb6b4764 Mon Sep 17 00:00:00 2001
From: Christian Lamparter <chunkeey@googlemail.com>
Date: Thu, 2 Jun 2016 00:48:46 +0200
Subject: [PATCH] usb: xhci: add firmware loader for uPD720201 and uPD720202
w/o ROM
This patch adds a firmware loader for the uPD720201K8-711-BAC-A
and uPD720202K8-711-BAA-A variant. Both of these chips are listed
in Renesas' R19UH0078EJ0500 Rev.5.00 "User's Manual: Hardware" as
devices which need the firmware loader on page 2 in order to
work as they "do not support the External ROM".
The "Firmware Download Sequence" is describe in chapter
"7.1 FW Download Interface" R19UH0078EJ0500 Rev.5.00 page 131.
The firmware "K2013080.mem" is available from a USB3.0 Host to
PCIe Adapter (PP2U-E card) "Firmware download" archive. An
alternative version can be sourced from Netgear's WNDR4700 GPL
archives.
The release notes of the PP2U-E's "Firmware Download" ver 2.0.1.3
(2012-06-15) state that the firmware is for the following devices:
- uPD720201 ES 2.0 sample whose revision ID is 2.
- uPD720201 ES 2.1 sample & CS sample & Mass product, ID is 3.
- uPD720202 ES 2.0 sample & CS sample & Mass product, ID is 2.
If someone from Renesas is listening: It would be great, if these
firmwares could be added to linux-firmware.git.
Cc: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
Signed-off-by: Christian Lamparter <chunkeey@googlemail.com>
---
drivers/usb/host/xhci-pci.c | 492 ++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 492 insertions(+)
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -24,6 +24,8 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/acpi.h>
+#include <linux/firmware.h>
+#include <asm/unaligned.h>
#include "xhci.h"
#include "xhci-trace.h"
@@ -268,6 +270,458 @@ static void xhci_pme_acpi_rtd3_enable(st
static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) { }
#endif /* CONFIG_ACPI */
+static const struct renesas_fw_entry {
+ const char *firmware_name;
+ u16 device;
+ u8 revision;
+ u16 expected_version;
+} renesas_fw_table[] = {
+ /*
+ * Only the uPD720201K8-711-BAC-A or uPD720202K8-711-BAA-A
+ * are listed in R19UH0078EJ0500 Rev.5.00 as devices which
+ * need the software loader.
+ *
+ * PP2U/ReleaseNote_USB3-201-202-FW.txt:
+ *
+ * Note: This firmware is for the following devices.
+ * - uPD720201 ES 2.0 sample whose revision ID is 2.
+ * - uPD720201 ES 2.1 sample & CS sample & Mass product, ID is 3.
+ * - uPD720202 ES 2.0 sample & CS sample & Mass product, ID is 2.
+ */
+ { "K2013080.mem", 0x0014, 0x02, 0x2013 },
+ { "K2013080.mem", 0x0014, 0x03, 0x2013 },
+ { "K2013080.mem", 0x0015, 0x02, 0x2013 },
+};
+
+static const struct renesas_fw_entry *renesas_needs_fw_dl(struct pci_dev *dev)
+{
+ const struct renesas_fw_entry *entry;
+ size_t i;
+
+ /* This loader will only work with a RENESAS device. */
+ if (!(dev->vendor == PCI_VENDOR_ID_RENESAS))
+ return NULL;
+
+ for (i = 0; i < ARRAY_SIZE(renesas_fw_table); i++) {
+ entry = &renesas_fw_table[i];
+ if (entry->device == dev->device &&
+ entry->revision == dev->revision)
+ return entry;
+ }
+
+ return NULL;
+}
+
+static int renesas_fw_download_image(struct pci_dev *dev,
+ const u32 *fw,
+ size_t step)
+{
+ size_t i;
+ int err;
+ u8 fw_status;
+ bool data0_or_data1;
+
+ /*
+ * The hardware does alternate between two 32-bit pages.
+ * (This is because each row of the firmware is 8 bytes).
+ *
+ * for even steps we use DATA0, for odd steps DATA1.
+ */
+ data0_or_data1 = (step & 1) == 1;
+
+ /* step+1. Read "Set DATAX" and confirm it is cleared. */
+ for (i = 0; i < 10000; i++) {
+ err = pci_read_config_byte(dev, 0xF5, &fw_status);
+ if (err)
+ return pcibios_err_to_errno(err);
+ if (!(fw_status & BIT(data0_or_data1)))
+ break;
+
+ udelay(1);
+ }
+ if (i == 10000)
+ return -ETIMEDOUT;
+
+ /*
+ * step+2. Write FW data to "DATAX".
+ * "LSB is left" => force little endian
+ */
+ err = pci_write_config_dword(dev, data0_or_data1 ? 0xFC : 0xF8,
+ (__force u32) cpu_to_le32(fw[step]));
+ if (err)
+ return pcibios_err_to_errno(err);
+
+ udelay(100);
+
+ /* step+3. Set "Set DATAX". */
+ err = pci_write_config_byte(dev, 0xF5, BIT(data0_or_data1));
+ if (err)
+ return pcibios_err_to_errno(err);
+
+ return 0;
+}
+
+static int renesas_fw_verify(struct pci_dev *dev,
+ const void *fw_data,
+ size_t length)
+{
+ const struct renesas_fw_entry *entry = renesas_needs_fw_dl(dev);
+ u16 fw_version_pointer;
+ u16 fw_version;
+
+ if (!entry)
+ return -EINVAL;
+
+ /*
+ * The Firmware's Data Format is describe in
+ * "6.3 Data Format" R19UH0078EJ0500 Rev.5.00 page 124
+ */
+
+ /* "Each row is 8 bytes". => firmware size must be a multiple of 8. */
+ if (length % 8 != 0) {
+ dev_err(&dev->dev, "firmware size is not a multipe of 8.");
+ return -EINVAL;
+ }
+
+ /*
+ * The bootrom chips of the big brother have sizes up to 64k, let's
+ * assume that's the biggest the firmware can get.
+ */
+ if (length < 0x1000 || length >= 0x10000) {
+ dev_err(&dev->dev, "firmware is size %zd is not (4k - 64k).",
+ length);
+ return -EINVAL;
+ }
+
+ /* The First 2 bytes are fixed value (55aa). "LSB on Left" */
+ if (get_unaligned_le16(fw_data) != 0x55aa) {
+ dev_err(&dev->dev, "no valid firmware header found.");
+ return -EINVAL;
+ }
+
+ /* verify the firmware version position and print it. */
+ fw_version_pointer = get_unaligned_le16(fw_data + 4);
+ if (fw_version_pointer + 2 >= length) {
+ dev_err(&dev->dev, "firmware version pointer is outside of the firmware image.");
+ return -EINVAL;
+ }
+
+ fw_version = get_unaligned_le16(fw_data + fw_version_pointer);
+ dev_dbg(&dev->dev, "got firmware version: %02x.", fw_version);
+
+ if (fw_version != entry->expected_version) {
+ dev_err(&dev->dev, "firmware version mismatch, expected version: %02x.",
+ entry->expected_version);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int renesas_fw_check_running(struct pci_dev *pdev)
+{
+ int err;
+ u8 fw_state;
+
+ /*
+ * Test if the device is actually needing the firmware. As most
+ * BIOSes will initialize the device for us. If the device is
+ * initialized.
+ */
+ err = pci_read_config_byte(pdev, 0xF4, &fw_state);
+ if (err)
+ return pcibios_err_to_errno(err);
+
+ /*
+ * Check if "FW Download Lock" is locked. If it is and the FW is
+ * ready we can simply continue. If the FW is not ready, we have
+ * to give up.
+ */
+ if (fw_state & BIT(1)) {
+ dev_dbg(&pdev->dev, "FW Download Lock is engaged.");
+
+ if (fw_state & BIT(4))
+ return 0;
+
+ dev_err(&pdev->dev, "FW Download Lock is set and FW is not ready. Giving Up.");
+ return -EIO;
+ }
+
+ /*
+ * Check if "FW Download Enable" is set. If someone (us?) tampered
+ * with it and it can't be resetted, we have to give up too... and
+ * ask for a forgiveness and a reboot.
+ */
+ if (fw_state & BIT(0)) {
+ dev_err(&pdev->dev, "FW Download Enable is stale. Giving Up (poweroff/reboot needed).");
+ return -EIO;
+ }
+
+ /* Otherwise, Check the "Result Code" Bits (6:4) and act accordingly */
+ switch ((fw_state & 0x70)) {
+ case 0: /* No result yet */
+ dev_dbg(&pdev->dev, "FW is not ready/loaded yet.");
+
+ /* tell the caller, that this device needs the firmware. */
+ return 1;
+
+ case BIT(4): /* Success, device should be working. */
+ dev_dbg(&pdev->dev, "FW is ready.");
+ return 0;
+
+ case BIT(5): /* Error State */
+ dev_err(&pdev->dev, "hardware is in an error state. Giving up (poweroff/reboot needed).");
+ return -ENODEV;
+
+ default: /* All other states are marked as "Reserved states" */
+ dev_err(&pdev->dev, "hardware is in an invalid state %x. Giving up (poweroff/reboot needed).",
+ (fw_state & 0x70) >> 4);
+ return -EINVAL;
+ }
+}
+
+static int renesas_hw_check_run_stop_busy(struct pci_dev *pdev)
+{
+#if 0
+ u32 val;
+
+ /*
+ * 7.1.3 Note 3: "... must not set 'FW Download Enable' when
+ * 'RUN/STOP' of USBCMD Register is set"
+ */
+ val = readl(hcd->regs + 0x20);
+ if (val & BIT(0)) {
+ dev_err(&pdev->dev, "hardware is busy and can't receive a FW.");
+ return -EBUSY;
+ }
+#endif
+ return 0;
+}
+
+static int renesas_fw_download(struct pci_dev *pdev,
+ const struct firmware *fw, unsigned int retry_counter)
+{
+ const u32 *fw_data = (const u32 *) fw->data;
+ size_t i;
+ int err;
+ u8 fw_status;
+
+ /*
+ * For more information and the big picture: please look at the
+ * "Firmware Download Sequence" in "7.1 FW Download Interface"
+ * of R19UH0078EJ0500 Rev.5.00 page 131
+ */
+ err = renesas_hw_check_run_stop_busy(pdev);
+ if (err)
+ return err;
+
+ /*
+ * 0. Set "FW Download Enable" bit in the
+ * "FW Download Control & Status Register" at 0xF4
+ */
+ err = pci_write_config_byte(pdev, 0xF4, BIT(0));
+ if (err)
+ return pcibios_err_to_errno(err);
+
+ /* 1 - 10 follow one step after the other. */
+ for (i = 0; i < fw->size / 4; i++) {
+ err = renesas_fw_download_image(pdev, fw_data, i);
+ if (err) {
+ dev_err(&pdev->dev, "Firmware Download Step %zd failed at position %zd bytes with (%d).",
+ i, i * 4, err);
+ return err;
+ }
+ }
+
+ /*
+ * This sequence continues until the last data is written to
+ * "DATA0" or "DATA1". Naturally, we wait until "SET DATA0/1"
+ * is cleared by the hardware beforehand.
+ */
+ for (i = 0; i < 10000; i++) {
+ err = pci_read_config_byte(pdev, 0xF5, &fw_status);
+ if (err)
+ return pcibios_err_to_errno(err);
+ if (!(fw_status & (BIT(0) | BIT(1))))
+ break;
+
+ udelay(1);
+ }
+ if (i == 10000)
+ dev_warn(&pdev->dev, "Final Firmware Download step timed out.");
+
+ /*
+ * 11. After finishing writing the last data of FW, the
+ * System Software must clear "FW Download Enable"
+ */
+ err = pci_write_config_byte(pdev, 0xF4, 0);
+ if (err)
+ return pcibios_err_to_errno(err);
+
+ /* 12. Read "Result Code" and confirm it is good. */
+ for (i = 0; i < 10000; i++) {
+ err = pci_read_config_byte(pdev, 0xF4, &fw_status);
+ if (err)
+ return pcibios_err_to_errno(err);
+ if (fw_status & BIT(4))
+ break;
+
+ udelay(1);
+ }
+ if (i == 10000) {
+ /* Timed out / Error - let's see if we can fix this */
+ err = renesas_fw_check_running(pdev);
+ switch (err) {
+ case 0: /*
+ * we shouldn't end up here.
+ * maybe it took a little bit longer.
+ * But all should be well?
+ */
+ break;
+
+ case 1: /* (No result yet? - we can try to retry) */
+ if (retry_counter < 10) {
+ retry_counter++;
+ dev_warn(&pdev->dev, "Retry Firmware download: %d try.",
+ retry_counter);
+ return renesas_fw_download(pdev, fw,
+ retry_counter);
+ }
+ return -ETIMEDOUT;
+
+ default:
+ return err;
+ }
+ }
+ /*
+ * Optional last step: Engage Firmware Lock
+ *
+ * err = pci_write_config_byte(pdev, 0xF4, BIT(2));
+ * if (err)
+ * return pcibios_err_to_errno(err);
+ */
+
+ return 0;
+}
+
+struct renesas_fw_ctx {
+ struct pci_dev *pdev;
+ const struct pci_device_id *id;
+ bool resume;
+};
+
+static int xhci_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id);
+
+static void renesas_fw_callback(const struct firmware *fw,
+ void *context)
+{
+ struct renesas_fw_ctx *ctx = context;
+ struct pci_dev *pdev = ctx->pdev;
+ struct device *parent = pdev->dev.parent;
+ int err = -ENOENT;
+
+ if (fw) {
+ err = renesas_fw_verify(pdev, fw->data, fw->size);
+ if (!err) {
+ err = renesas_fw_download(pdev, fw, 0);
+ release_firmware(fw);
+ if (!err) {
+ if (ctx->resume)
+ return;
+
+ err = xhci_pci_probe(pdev, ctx->id);
+ if (!err) {
+ /* everything worked */
+ devm_kfree(&pdev->dev, ctx);
+ return;
+ }
+
+ /* in case of an error - fall through */
+ } else {
+ dev_err(&pdev->dev, "firmware failed to download (%d).",
+ err);
+ }
+ }
+ } else {
+ dev_err(&pdev->dev, "firmware failed to load (%d).", err);
+ }
+
+ dev_info(&pdev->dev, "Unloading driver");
+
+ if (parent)
+ device_lock(parent);
+
+ device_release_driver(&pdev->dev);
+
+ if (parent)
+ device_unlock(parent);
+
+ pci_dev_put(pdev);
+}
+
+static int renesas_fw_alive_check(struct pci_dev *pdev)
+{
+ const struct renesas_fw_entry *entry;
+ int err;
+
+ /* check if we have a eligible RENESAS' uPD720201/2 w/o FW. */
+ entry = renesas_needs_fw_dl(pdev);
+ if (!entry)
+ return 0;
+
+ err = renesas_fw_check_running(pdev);
+ /* Also go ahead, if the firmware is running */
+ if (err == 0)
+ return 0;
+
+ /* At this point, we can be sure that the FW isn't ready. */
+ return err;
+}
+
+static int renesas_fw_download_to_hw(struct pci_dev *pdev,
+ const struct pci_device_id *id,
+ bool do_resume)
+{
+ const struct renesas_fw_entry *entry;
+ struct renesas_fw_ctx *ctx;
+ int err;
+
+ /* check if we have a eligible RENESAS' uPD720201/2 w/o FW. */
+ entry = renesas_needs_fw_dl(pdev);
+ if (!entry)
+ return 0;
+
+ err = renesas_fw_check_running(pdev);
+ /* Continue ahead, if the firmware is already running. */
+ if (err == 0)
+ return 0;
+
+ if (err != 1)
+ return err;
+
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ ctx->pdev = pdev;
+ ctx->resume = do_resume;
+ ctx->id = id;
+
+ pci_dev_get(pdev);
+ err = request_firmware_nowait(THIS_MODULE, 1, entry->firmware_name,
+ &pdev->dev, GFP_KERNEL, ctx, renesas_fw_callback);
+ if (err) {
+ pci_dev_put(pdev);
+ return err;
+ }
+
+ /*
+ * The renesas_fw_callback() callback will continue the probe
+ * process, once it aquires the firmware.
+ */
+ return 1;
+}
+
/* called during probe() after chip reset completes */
static int xhci_pci_setup(struct usb_hcd *hcd)
{
@@ -306,6 +760,22 @@ static int xhci_pci_probe(struct pci_dev
struct hc_driver *driver;
struct usb_hcd *hcd;
+ /*
+ * Check if this device is a RENESAS uPD720201/2 device.
+ * Otherwise, we can continue with xhci_pci_probe as usual.
+ */
+ retval = renesas_fw_download_to_hw(dev, id, false);
+ switch (retval) {
+ case 0:
+ break;
+
+ case 1: /* let it load the firmware and recontinue the probe. */
+ return 0;
+
+ default:
+ return retval;
+ }
+
driver = (struct hc_driver *)id->driver_data;
/* For some HW implementation, a XHCI reset is just not enough... */
@@ -367,6 +837,16 @@ static void xhci_pci_remove(struct pci_d
{
struct xhci_hcd *xhci;
+ if (renesas_fw_alive_check(dev)) {
+ /*
+ * bail out early, if this was a renesas device w/o FW.
+ * Else we might hit the NMI watchdog in xhci_handsake
+ * during xhci_reset as part of the driver's unloading.
+ * which we forced in the renesas_fw_callback().
+ */
+ return;
+ }
+
xhci = hcd_to_xhci(pci_get_drvdata(dev));
xhci->xhc_state |= XHCI_STATE_REMOVING;
if (xhci->shared_hcd) {

View File

@ -1,54 +0,0 @@
From a0dc613140bab907a3d5787a7ae7b0638bf674d0 Mon Sep 17 00:00:00 2001
From: Christian Lamparter <chunkeey@gmail.com>
Date: Thu, 23 Jun 2016 20:28:20 +0200
Subject: [PATCH] usb: xhci: force MSI for uPD720201 and
uPD720202
The APM82181 does not support MSI-X. When probed, it will
produce a noisy warning.
---
drivers/usb/host/pci-quirks.c | 362 ++++++++++++++++++++++++++++++++++++++++++
1 file changed, 362 insertions(+)
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -221,7 +221,7 @@ static void xhci_pci_quirks(struct devic
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
pdev->device == 0x0015)
- xhci->quirks |= XHCI_RESET_ON_RESUME;
+ xhci->quirks |= XHCI_RESET_ON_RESUME | XHCI_FORCE_MSI;
if (pdev->vendor == PCI_VENDOR_ID_VIA)
xhci->quirks |= XHCI_RESET_ON_RESUME;
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -370,10 +370,14 @@ static int xhci_try_enable_msi(struct us
free_irq(hcd->irq, hcd);
hcd->irq = 0;
- ret = xhci_setup_msix(xhci);
- if (ret)
- /* fall back to msi*/
+ if (xhci->quirks & XHCI_FORCE_MSI) {
ret = xhci_setup_msi(xhci);
+ } else {
+ ret = xhci_setup_msix(xhci);
+ if (ret)
+ /* fall back to msi*/
+ ret = xhci_setup_msi(xhci);
+ }
if (!ret) {
hcd->msi_enabled = 1;
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1858,6 +1858,7 @@ struct xhci_hcd {
/* support xHCI 0.96 spec USB2 software LPM */
unsigned sw_lpm_support:1;
/* support xHCI 1.0 spec USB2 hardware LPM */
+#define XHCI_FORCE_MSI (1 << 24)
unsigned hw_lpm_support:1;
/* Broken Suspend flag for SNPS Suspend resume issue */
unsigned broken_suspend:1;

View File

@ -1,65 +0,0 @@
From 694f9bfb8efaef8a33e8992015ff9d0866faf4a2 Mon Sep 17 00:00:00 2001
From: Christian Lamparter <chunkeey@gmail.com>
Date: Sun, 17 Dec 2017 17:27:15 +0100
Subject: [PATCH 1/2] hwmon: tc654 add detection routine
This patch adds a detection routine for the TC654/TC655
chips. Both IDs are listed in the Datasheet.
Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
---
drivers/hwmon/tc654.c | 29 +++++++++++++++++++++++++++++
1 file changed, 29 insertions(+)
--- a/drivers/hwmon/tc654.c
+++ b/drivers/hwmon/tc654.c
@@ -64,6 +64,11 @@ enum tc654_regs {
/* Register data is read (and cached) at most once per second. */
#define TC654_UPDATE_INTERVAL HZ
+/* Manufacturer and Version Identification Register Values */
+#define TC654_MFR_ID_MICROCHIP 0x84
+#define TC654_VER_ID 0x00
+#define TC655_VER_ID 0x01
+
struct tc654_data {
struct i2c_client *client;
@@ -497,6 +502,29 @@ static const struct i2c_device_id tc654_
{}
};
+static int
+tc654_detect(struct i2c_client *new_client, struct i2c_board_info *info)
+{
+ struct i2c_adapter *adapter = new_client->adapter;
+ int manufacturer, product;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -ENODEV;
+
+ manufacturer = i2c_smbus_read_byte_data(new_client, TC654_REG_MFR_ID);
+ if (manufacturer != TC654_MFR_ID_MICROCHIP)
+ return -ENODEV;
+
+ product = i2c_smbus_read_byte_data(new_client, TC654_REG_VER_ID);
+ if (!((product == TC654_VER_ID) || (product == TC655_VER_ID)))
+ return -ENODEV;
+
+ strlcpy(info->type, product == TC654_VER_ID ? "tc654" : "tc655",
+ I2C_NAME_SIZE);
+ return 0;
+}
+
+
MODULE_DEVICE_TABLE(i2c, tc654_id);
static struct i2c_driver tc654_driver = {
@@ -505,6 +533,7 @@ static struct i2c_driver tc654_driver =
},
.probe = tc654_probe,
.id_table = tc654_id,
+ .detect = tc654_detect,
};
module_i2c_driver(tc654_driver);

View File

@ -1,174 +0,0 @@
From 15ae701189744d321d3a1264ff46f8871e8765ee Mon Sep 17 00:00:00 2001
From: Christian Lamparter <chunkeey@gmail.com>
Date: Sun, 17 Dec 2017 17:29:13 +0100
Subject: [PATCH] hwmon: tc654: add thermal_cooling device
This patch adds a thermaL_cooling device to the tc654 driver.
This allows the chip to be used for DT-based cooling.
Signed-off-by: Christian Lamparter <chunkeey@gmail.com>
---
drivers/hwmon/tc654.c | 103 +++++++++++++++++++++++++++++++++++++++++---------
1 file changed, 86 insertions(+), 17 deletions(-)
--- a/drivers/hwmon/tc654.c
+++ b/drivers/hwmon/tc654.c
@@ -24,6 +24,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
+#include <linux/thermal.h>
#include <linux/util_macros.h>
enum tc654_regs {
@@ -141,6 +142,9 @@ struct tc654_data {
* writable register used to control the duty
* cycle of the V OUT output.
*/
+
+ /* optional cooling device */
+ struct thermal_cooling_device *cdev;
};
/* helper to grab and cache data, at most one time per second */
@@ -376,36 +380,30 @@ static ssize_t set_pwm_mode(struct devic
static const int tc654_pwm_map[16] = { 77, 88, 102, 112, 124, 136, 148, 160,
172, 184, 196, 207, 219, 231, 243, 255};
+static int get_pwm(struct tc654_data *data)
+{
+ if (data->config & TC654_REG_CONFIG_SDM)
+ return 0;
+ else
+ return tc654_pwm_map[data->duty_cycle];
+}
+
static ssize_t show_pwm(struct device *dev, struct device_attribute *da,
char *buf)
{
struct tc654_data *data = tc654_update_client(dev);
- int pwm;
if (IS_ERR(data))
return PTR_ERR(data);
- if (data->config & TC654_REG_CONFIG_SDM)
- pwm = 0;
- else
- pwm = tc654_pwm_map[data->duty_cycle];
-
- return sprintf(buf, "%d\n", pwm);
+ return sprintf(buf, "%d\n", get_pwm(data));
}
-static ssize_t set_pwm(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count)
+static int _set_pwm(struct tc654_data *data, unsigned long val)
{
- struct tc654_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
- unsigned long val;
int ret;
- if (kstrtoul(buf, 10, &val))
- return -EINVAL;
- if (val > 255)
- return -EINVAL;
-
mutex_lock(&data->update_lock);
if (val == 0)
@@ -425,6 +423,22 @@ static ssize_t set_pwm(struct device *de
out:
mutex_unlock(&data->update_lock);
+ return ret;
+}
+
+static ssize_t set_pwm(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
+{
+ struct tc654_data *data = dev_get_drvdata(dev);
+ unsigned long val;
+ int ret;
+
+ if (kstrtoul(buf, 10, &val))
+ return -EINVAL;
+ if (val > 255)
+ return -EINVAL;
+
+ ret = _set_pwm(data, val);
return ret < 0 ? ret : count;
}
@@ -462,6 +476,47 @@ static struct attribute *tc654_attrs[] =
ATTRIBUTE_GROUPS(tc654);
+/* cooling device */
+
+static int tc654_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ *state = 255;
+ return 0;
+}
+
+static int tc654_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct tc654_data *data = tc654_update_client(cdev->devdata);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ *state = get_pwm(data);
+ return 0;
+}
+
+static int tc654_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+{
+ struct tc654_data *data = tc654_update_client(cdev->devdata);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ if (state > 255)
+ return -EINVAL;
+
+ return _set_pwm(data, state);
+}
+
+static const struct thermal_cooling_device_ops tc654_fan_cool_ops = {
+ .get_max_state = tc654_get_max_state,
+ .get_cur_state = tc654_get_cur_state,
+ .set_cur_state = tc654_set_cur_state,
+};
+
/*
* device probe and removal
*/
@@ -493,7 +548,21 @@ static int tc654_probe(struct i2c_client
hwmon_dev =
devm_hwmon_device_register_with_groups(dev, client->name, data,
tc654_groups);
- return PTR_ERR_OR_ZERO(hwmon_dev);
+ if (IS_ERR(hwmon_dev))
+ return PTR_ERR(hwmon_dev);
+
+#if IS_ENABLED(CONFIG_OF)
+ /* Optional cooling device register for Device tree platforms */
+ data->cdev = thermal_of_cooling_device_register(client->dev.of_node,
+ "tc654", hwmon_dev,
+ &tc654_fan_cool_ops);
+#else /* CONFIG_OF */
+ /* Optional cooling device register for non Device tree platforms */
+ data->cdev = thermal_cooling_device_register("tc654", hwmon_dev,
+ &tc654_fan_cool_ops);
+#endif /* CONFIG_OF */
+
+ return PTR_ERR_OR_ZERO(data->cdev);
}
static const struct i2c_device_id tc654_id[] = {