diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index 34fecf97a355b85f6447cc33e4c3b36ad4a8022a..e8a89e18c640ef1a7038440cc04ffcf822bc7dba 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -2013,13 +2013,18 @@ static int i2c_check_for_quirks(struct i2c_adapter *adap, struct i2c_msg *msgs, * Returns negative errno, else the number of messages executed. * * Adapter lock must be held when calling this function. No debug logging - * takes place. adap->algo->master_xfer existence isn't checked. + * takes place. */ int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { unsigned long orig_jiffies; int ret, try; + if (!adap->algo->master_xfer) { + dev_dbg(&adap->dev, "I2C level transfers not supported\n"); + return -EOPNOTSUPP; + } + if (WARN_ON(!msgs || num < 1)) return -EINVAL; @@ -2086,11 +2091,6 @@ int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { int ret; - if (!adap->algo->master_xfer) { - dev_dbg(&adap->dev, "I2C level transfers not supported\n"); - return -EOPNOTSUPP; - } - /* REVISIT the fault reporting model here is weak: * * - When we get an error after receiving N bytes from a slave, diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index d8cb5bcd6b10e3982c31a0cfedfaecd772cd1d2b..e89b7252623b7617d1378416a23bb1dce247f5c0 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -4487,13 +4487,8 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq set_bit(i, bitmap); } - if (err) { - if (i > 0) - its_vpe_irq_domain_free(domain, virq, i); - - its_lpi_free(bitmap, base, nr_ids); - its_free_prop_table(vprop_page); - } + if (err) + its_vpe_irq_domain_free(domain, virq, i); return err; } diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 78a5ead8a0989c5603ed3535fb3d9a202ad8c444..042a019391978661d4ac4ad7bfdfcd2acb0e2563 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -2060,7 +2060,7 @@ EXPORT_SYMBOL_GPL(scsi_mode_select); /** * scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary. * @sdev: SCSI device to be queried - * @dbd: set if mode sense will allow block descriptors to be returned + * @dbd: set to prevent mode sense from returning block descriptors * @modepage: mode page being requested * @buffer: request buffer (may not be smaller than eight bytes) * @len: length of request buffer. @@ -2095,18 +2095,18 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, sshdr = &my_sshdr; retry: - use_10_for_ms = sdev->use_10_for_ms; + use_10_for_ms = sdev->use_10_for_ms || len > 255; if (use_10_for_ms) { - if (len < 8) - len = 8; + if (len < 8 || len > 65535) + return -EINVAL; cmd[0] = MODE_SENSE_10; - cmd[8] = len; + put_unaligned_be16(len, &cmd[7]); header_length = 8; } else { if (len < 4) - len = 4; + return -EINVAL; cmd[0] = MODE_SENSE; cmd[4] = len; @@ -2131,7 +2131,11 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, if ((sshdr->sense_key == ILLEGAL_REQUEST) && (sshdr->asc == 0x20) && (sshdr->ascq == 0)) { /* - * Invalid command operation code + * Invalid command operation code: retry using + * MODE SENSE(6) if this was a MODE SENSE(10) + * request, except if the request mode page is + * too large for MODE SENSE single byte + * allocation length field. */ sdev->use_10_for_ms = 0; goto retry; @@ -2150,12 +2154,11 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, data->longlba = 0; data->block_descriptor_length = 0; } else if (use_10_for_ms) { - data->length = buffer[0]*256 + buffer[1] + 2; + data->length = get_unaligned_be16(&buffer[0]) + 2; data->medium_type = buffer[2]; data->device_specific = buffer[3]; data->longlba = buffer[4] & 0x01; - data->block_descriptor_length = buffer[6]*256 - + buffer[7]; + data->block_descriptor_length = get_unaligned_be16(&buffer[6]); } else { data->length = buffer[0] + 1; data->medium_type = buffer[1]; diff --git a/fs/proc/array.c b/fs/proc/array.c index 18a4588c35be632449258c2fc5d0e6f4ccd5effd..e0eb4c74825b47743e4c5d964d61d344049fd75a 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -443,12 +443,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, int permitted; struct mm_struct *mm; unsigned long long start_time; - unsigned long cmin_flt = 0, cmaj_flt = 0; - unsigned long min_flt = 0, maj_flt = 0; - u64 cutime, cstime, utime, stime; - u64 cgtime, gtime; + unsigned long cmin_flt, cmaj_flt, min_flt, maj_flt; + u64 cutime, cstime, cgtime, utime, stime, gtime; unsigned long rsslim = 0; unsigned long flags; + int exit_code = task->exit_code; + struct signal_struct *sig = task->signal; + unsigned int seq = 1; state = *get_task_state(task); vsize = eip = esp = 0; @@ -476,12 +477,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, sigemptyset(&sigign); sigemptyset(&sigcatch); - cutime = cstime = utime = stime = 0; - cgtime = gtime = 0; if (lock_task_sighand(task, &flags)) { - struct signal_struct *sig = task->signal; - if (sig->tty) { struct pid *pgrp = tty_get_pgrp(sig->tty); tty_pgrp = pid_nr_ns(pgrp, ns); @@ -492,26 +489,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, num_threads = get_nr_threads(task); collect_sigign_sigcatch(task, &sigign, &sigcatch); - cmin_flt = sig->cmin_flt; - cmaj_flt = sig->cmaj_flt; - cutime = sig->cutime; - cstime = sig->cstime; - cgtime = sig->cgtime; rsslim = READ_ONCE(sig->rlim[RLIMIT_RSS].rlim_cur); - /* add up live thread stats at the group level */ if (whole) { - struct task_struct *t = task; - do { - min_flt += t->min_flt; - maj_flt += t->maj_flt; - gtime += task_gtime(t); - } while_each_thread(task, t); - - min_flt += sig->min_flt; - maj_flt += sig->maj_flt; - thread_group_cputime_adjusted(task, &utime, &stime); - gtime += sig->gtime; + if (sig->flags & (SIGNAL_GROUP_EXIT | SIGNAL_STOP_STOPPED)) + exit_code = sig->group_exit_code; } sid = task_session_nr_ns(task, ns); @@ -523,6 +505,35 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, if (permitted && (!whole || num_threads < 2)) wchan = get_wchan(task); + + do { + seq++; /* 2 on the 1st/lockless path, otherwise odd */ + flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq); + + cmin_flt = sig->cmin_flt; + cmaj_flt = sig->cmaj_flt; + cutime = sig->cutime; + cstime = sig->cstime; + cgtime = sig->cgtime; + + if (whole) { + struct task_struct *t; + + min_flt = sig->min_flt; + maj_flt = sig->maj_flt; + gtime = sig->gtime; + + rcu_read_lock(); + __for_each_thread(sig, t) { + min_flt += t->min_flt; + maj_flt += t->maj_flt; + gtime += task_gtime(t); + } + rcu_read_unlock(); + } + } while (need_seqretry(&sig->stats_lock, seq)); + done_seqretry_irqrestore(&sig->stats_lock, seq, flags); + if (!whole) { min_flt = task->min_flt; maj_flt = task->maj_flt; @@ -613,7 +624,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, seq_puts(m, " 0 0 0 0 0 0 0"); if (permitted) - seq_put_decimal_ll(m, " ", task->exit_code); + seq_put_decimal_ll(m, " ", exit_code); else seq_puts(m, " 0"); diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index a0f980e6150520bd97b4c58bd423decd71c2516e..7ce6db1ac558a7caff41581c90ed11d7309b8ccf 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -107,8 +107,10 @@ static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, if (hdev->req_status == HCI_REQ_PEND) { hdev->req_result = result; hdev->req_status = HCI_REQ_DONE; - if (skb) + if (skb) { + kfree_skb(hdev->req_skb); hdev->req_skb = skb_get(skb); + } wake_up_interruptible(&hdev->req_wait_q); } } diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index a7d4a770236da40134c0ade2678c5f00ca04158f..60e2258b37516b53073fb2c70e807b9e4cb6a85a 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -443,7 +443,6 @@ static void tls_encrypt_done(struct crypto_async_request *req, int err) struct scatterlist *sge; struct sk_msg *msg_en; struct tls_rec *rec; - bool ready = false; int pending; rec = container_of(aead_req, struct tls_rec, aead_req); @@ -475,8 +474,12 @@ static void tls_encrypt_done(struct crypto_async_request *req, int err) /* If received record is at head of tx_list, schedule tx */ first_rec = list_first_entry(&ctx->tx_list, struct tls_rec, list); - if (rec == first_rec) - ready = true; + if (rec == first_rec) { + /* Schedule the transmission */ + if (!test_and_set_bit(BIT_TX_SCHEDULED, + &ctx->tx_bitmask)) + schedule_delayed_work(&ctx->tx_work.work, 1); + } } spin_lock_bh(&ctx->encrypt_compl_lock); @@ -485,13 +488,6 @@ static void tls_encrypt_done(struct crypto_async_request *req, int err) if (!pending && ctx->async_notify) complete(&ctx->async_wait.completion); spin_unlock_bh(&ctx->encrypt_compl_lock); - - if (!ready) - return; - - /* Schedule the transmission */ - if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) - schedule_delayed_work(&ctx->tx_work.work, 1); } static int tls_do_encryption(struct sock *sk,