diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c index a76a70a170a342c2c9ce961177b6198aad6aa303..6c54cbac312e845d1a8412edfb686e3f59cc24f6 100644 --- a/drivers/char/hw_random/via-rng.c +++ b/drivers/char/hw_random/via-rng.c @@ -135,7 +135,7 @@ static int via_rng_init(struct hwrng *rng) * is always enabled if CPUID rng_en is set. There is no * RNG configuration like it used to be the case in this * register */ - if (((c->x86 == 6) && (c->x86_model >= 0x0f)) || (c->x86 > 6)){ + if ((c->x86 == 6) && (c->x86_model >= 0x0f)) { if (!boot_cpu_has(X86_FEATURE_XSTORE_EN)) { pr_err(PFX "can't enable hardware RNG " "if XSTORE is not enabled\n"); @@ -191,17 +191,17 @@ static struct hwrng via_rng = { .data_read = via_rng_data_read, }; -static struct x86_cpu_id via_rng_ids[] = { - { X86_VENDOR_CENTAUR, 6, X86_MODEL_ANY, X86_FEATURE_XSTORE }, +static struct x86_cpu_id via_rng_cpu_ids[] = { + X86_MATCH_VENDOR_FAM_FEATURE(CENTAUR, 6, X86_FEATURE_XSTORE, NULL), {} }; -MODULE_DEVICE_TABLE(x86cpu, via_rng_ids); +MODULE_DEVICE_TABLE(x86cpu, via_rng_cpu_ids); static int __init mod_init(void) { int err; - if (!x86_match_cpu(via_rng_ids)) + if (!x86_match_cpu(via_rng_cpu_ids)) return -ENODEV; pr_info("VIA RNG detected\n"); diff --git a/drivers/char/hw_random/zhaoxin-rng.c b/drivers/char/hw_random/zhaoxin-rng.c index f4e1f58494af4bd8cc0111b36d6339106cffac0b..eb82d35b3f6e48b4f16a1fa4e733fcf78485f93b 100644 --- a/drivers/char/hw_random/zhaoxin-rng.c +++ b/drivers/char/hw_random/zhaoxin-rng.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /* * RNG driver for Zhaoxin RNGs + * + * Copyright 2023 (c) Zhaoxin Semiconductor Co., Ltd */ #include @@ -11,162 +13,82 @@ #include #include #include -#include #include +#define DRIVER_VERSION "2.0.0" + enum { - ZHAOXIN_STRFILT_CNT_SHIFT = 16, - ZHAOXIN_STRFILT_FAIL = (1 << 15), - ZHAOXIN_STRFILT_ENABLE = (1 << 14), - ZHAOXIN_RAWBITS_ENABLE = (1 << 13), - ZHAOXIN_RNG_ENABLE = (1 << 6), - ZHAOXIN_NOISESRC1 = (1 << 8), - ZHAOXIN_NOISESRC2 = (1 << 9), - ZHAOXIN_XSTORE_CNT_MASK = 0x0F, - - ZHAOXIN_RNG_CHUNK_8 = 0x00, /* 64 rand bits, 64 stored bits */ - ZHAOXIN_RNG_CHUNK_4 = 0x01, /* 32 rand bits, 32 stored bits */ - ZHAOXIN_RNG_CHUNK_4_MASK = 0xFFFFFFFF, - ZHAOXIN_RNG_CHUNK_2 = 0x02, /* 16 rand bits, 32 stored bits */ - ZHAOXIN_RNG_CHUNK_2_MASK = 0xFFFF, - ZHAOXIN_RNG_CHUNK_1 = 0x03, /* 8 rand bits, 32 stored bits */ - ZHAOXIN_RNG_CHUNK_1_MASK = 0xFF, + ZHAOXIN_RNG_CHUNK_8 = 0x00, /* 64 rand bits, 64 stored bits */ + ZHAOXIN_RNG_CHUNK_4 = 0x01, /* 32 rand bits, 32 stored bits */ + ZHAOXIN_RNG_CHUNK_2 = 0x02, /* 16 rand bits, 32 stored bits */ + ZHAOXIN_RNG_CHUNK_1 = 0x03, /* 8 rand bits, 32 stored bits */ + ZHAOXIN_RNG_MAX_SIZE = (128 * 1024), }; -/* - * Investigate using the 'rep' prefix to obtain 32 bits of random data - * in one insn. The upside is potentially better performance. The - * downside is that the instruction becomes no longer atomic. Due to - * this, just like familiar issues with /dev/random itself, the worst - * case of a 'rep xstore' could potentially pause a cpu for an - * unreasonably long time. In practice, this condition would likely - * only occur when the hardware is failing. (or so we hope :)) - * - * Another possible performance boost may come from simply buffering - * until we have 4 bytes, thus returning a u32 at a time, - * instead of the current u8-at-a-time. - * - * Padlock instructions can generate a spurious DNA fault, but the - * kernel doesn't use CR0.TS, so this doesn't matter. - */ - -static inline u32 xstore(u32 *addr, u32 edx_in) +static int zhaoxin_rng_init(struct hwrng *rng) { - u32 eax_out; - - asm(".byte 0x0F,0xA7,0xC0 /* xstore %%edi (addr=%0) */" - : "=m" (*addr), "=a" (eax_out), "+d" (edx_in), "+D" (addr)); + if (!boot_cpu_has(X86_FEATURE_XSTORE_EN)) { + pr_err(PFX "can't enable hardware RNG if XSTORE is not enabled\n"); + return -ENODEV; + } - return eax_out; + return 0; } -static int zhaoxin_rng_data_present(struct hwrng *rng, int wait) +static inline void rep_xstore(size_t size, size_t factor, void *result) { - char buf[16 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__ - ((aligned(STACK_ALIGN))); - u32 *zhaoxin_rng_datum = (u32 *)PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); - u32 bytes_out; - int i; - - /* We choose the recommended 1-byte-per-instruction RNG rate, - * for greater randomness at the expense of speed. Larger - * values 2, 4, or 8 bytes-per-instruction yield greater - * speed at lesser randomness. - * - * If you change this to another ZHAOXIN_CHUNK_n, you must also - * change the ->n_bytes values in rng_vendor_ops[] tables. - * ZHAOXIN_CHUNK_8 requires further code changes. - * - * A copy of MSR_ZHAOXIN_RNG is placed in eax_out when xstore - * completes. - */ - - for (i = 0; i < 20; i++) { - *zhaoxin_rng_datum = 0; /* paranoia, not really necessary */ - bytes_out = xstore(zhaoxin_rng_datum, ZHAOXIN_RNG_CHUNK_1); - bytes_out &= ZHAOXIN_XSTORE_CNT_MASK; - if (bytes_out || !wait) - break; - udelay(10); - } - rng->priv = *zhaoxin_rng_datum; - return bytes_out ? 1 : 0; + asm(".byte 0xf3, 0x0f, 0xa7, 0xc0" + : "=m"(*(size_t *)result), "+c"(size), "+d"(factor), "+D"(result)); } -static int zhaoxin_rng_data_read(struct hwrng *rng, u32 *data) +static int zhaoxin_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) { - u32 zhaoxin_rng_datum = (u32)rng->priv; + if (max > ZHAOXIN_RNG_MAX_SIZE) + max = ZHAOXIN_RNG_MAX_SIZE; - *data = zhaoxin_rng_datum; + rep_xstore(max, ZHAOXIN_RNG_CHUNK_1, data); - return 1; + return max; } -static int zhaoxin_rng_init(struct hwrng *rng) -{ - struct cpuinfo_x86 *c = &cpu_data(0); - - /* - * Zhaoxin CPUs don't have the MSR_ZHAOXIN_RNG anymore. The RNG - * is always enabled if CPUID rng_en is set. There is no - * RNG configuration like it used to be the case in this - * register - */ - if (c->x86 > 6) { - if (!boot_cpu_has(X86_FEATURE_XSTORE_EN)) { - pr_err(PFX "can't enable hardware RNG if XSTORE is not enabled\n"); - return -ENODEV; - } - return 0; - } - return 0; -} - - static struct hwrng zhaoxin_rng = { - .name = "zhaoxin", - .init = zhaoxin_rng_init, - .data_present = zhaoxin_rng_data_present, - .data_read = zhaoxin_rng_data_read, + .name = "zhaoxin", + .init = zhaoxin_rng_init, + .read = zhaoxin_rng_read, }; -static struct x86_cpu_id zhaoxin_rng_ids[] = { - { X86_VENDOR_CENTAUR, 7, X86_MODEL_ANY, X86_STEPPING_ANY, X86_FEATURE_XSTORE }, - { X86_VENDOR_ZHAOXIN, 7, X86_MODEL_ANY, X86_STEPPING_ANY, X86_FEATURE_XSTORE }, +static const struct x86_cpu_id zhaoxin_rng_cpu_ids[] = { + X86_MATCH_VENDOR_FAM_FEATURE(ZHAOXIN, 6, X86_FEATURE_XSTORE, NULL), + X86_MATCH_VENDOR_FAM_FEATURE(ZHAOXIN, 7, X86_FEATURE_XSTORE, NULL), + X86_MATCH_VENDOR_FAM_FEATURE(CENTAUR, 7, X86_FEATURE_XSTORE, NULL), {} }; -MODULE_DEVICE_TABLE(x86cpu, zhaoxin_rng_ids); +MODULE_DEVICE_TABLE(x86cpu, zhaoxin_rng_cpu_ids); -static int __init mod_init(void) +static int __init zhaoxin_rng_mod_init(void) { int err; - if (!x86_match_cpu(zhaoxin_rng_ids)) + if (!x86_match_cpu(zhaoxin_rng_cpu_ids)) return -ENODEV; - pr_info("RNG detected\n"); + pr_info(PFX "RNG detected\n"); + err = hwrng_register(&zhaoxin_rng); - if (err) { + if (err) pr_err(PFX "RNG registering failed (%d)\n", err); - goto out; - } -out: + return err; } +module_init(zhaoxin_rng_mod_init); -static void __exit mod_exit(void) +static void __exit zhaoxin_rng_mod_exit(void) { hwrng_unregister(&zhaoxin_rng); } +module_exit(zhaoxin_rng_mod_exit); -module_init(mod_init); -module_exit(mod_exit); - -static struct x86_cpu_id __maybe_unused zhaoxin_rng_cpu_id[] = { - X86_FEATURE_MATCH(X86_FEATURE_XSTORE), - {} -}; - -MODULE_DESCRIPTION("H/W RNG driver for Zhaoxin CPU with PadLock"); +MODULE_DESCRIPTION("H/W RNG driver for Zhaoxin CPUs"); +MODULE_AUTHOR("YunShen@zhaoxin.com"); MODULE_LICENSE("GPL"); -MODULE_DEVICE_TABLE(x86cpu, zhaoxin_rng_cpu_id); +MODULE_VERSION(DRIVER_VERSION);