From e8b2769052e52ba20ed9bd9feee9298abe59151d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=83=A1=E9=80=B8=E9=A3=9E?= Date: Wed, 20 Apr 2022 18:35:00 +0800 Subject: [PATCH] =?UTF-8?q?=E5=9B=9E=E9=80=80?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 胡逸飞 --- arch/s390/kvm/kvm-s390.c | 2 - drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 5 +- drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 35 ++++++----- drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | 2 +- drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 2 +- drivers/net/xen-netback/common.h | 1 - drivers/net/xen-netback/rx.c | 77 +++++++++---------------- fs/fs_context.c | 2 +- fs/nfs/dir.c | 18 ------ kernel/bpf/verifier.c | 6 +- kernel/cgroup/cgroup-v1.c | 14 ----- net/packet/af_packet.c | 5 +- 12 files changed, 56 insertions(+), 113 deletions(-) mode change 100755 => 100644 arch/s390/kvm/kvm-s390.c mode change 100755 => 100644 drivers/gpu/drm/vmwgfx/vmwgfx_drv.h mode change 100755 => 100644 drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c mode change 100755 => 100644 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c mode change 100755 => 100644 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c mode change 100755 => 100644 drivers/net/xen-netback/common.h mode change 100755 => 100644 drivers/net/xen-netback/rx.c mode change 100755 => 100644 fs/fs_context.c mode change 100755 => 100644 fs/nfs/dir.c mode change 100755 => 100644 kernel/bpf/verifier.c mode change 100755 => 100644 kernel/cgroup/cgroup-v1.c mode change 100755 => 100644 net/packet/af_packet.c diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c old mode 100755 new mode 100644 index 957dda21055b..f94b4f78d4da --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -4648,8 +4648,6 @@ static long kvm_s390_guest_sida_op(struct kvm_vcpu *vcpu, return -EINVAL; if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block)) return -E2BIG; - if (!kvm_s390_pv_cpu_is_protected(vcpu)) - return -EINVAL; switch (mop->op) { case KVM_S390_MEMOP_SIDA_READ: diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h old mode 100755 new mode 100644 index ad208a5f4ebe..1523b51a7284 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -1088,14 +1088,15 @@ extern int vmw_execbuf_fence_commands(struct drm_file *file_priv, struct vmw_private *dev_priv, struct vmw_fence_obj **p_fence, uint32_t *p_handle); -extern int vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, +extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, struct vmw_fpriv *vmw_fp, int ret, struct drm_vmw_fence_rep __user *user_fence_rep, struct vmw_fence_obj *fence, uint32_t fence_handle, - int32_t out_fence_fd); + int32_t out_fence_fd, + struct sync_file *sync_file); bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd); /** diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c old mode 100755 new mode 100644 index 739cbc77d886..d3f7d7ece3d1 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -3816,17 +3816,17 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv, * Also if copying fails, user-space will be unable to signal the fence object * so we wait for it immediately, and then unreference the user-space reference. */ -int +void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, struct vmw_fpriv *vmw_fp, int ret, struct drm_vmw_fence_rep __user *user_fence_rep, struct vmw_fence_obj *fence, uint32_t fence_handle, - int32_t out_fence_fd) + int32_t out_fence_fd, struct sync_file *sync_file) { struct drm_vmw_fence_rep fence_rep; if (user_fence_rep == NULL) - return 0; + return; memset(&fence_rep, 0, sizeof(fence_rep)); @@ -3854,14 +3854,20 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, * handle. */ if (unlikely(ret != 0) && (fence_rep.error == 0)) { - ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle, + if (sync_file) + fput(sync_file->file); + + if (fence_rep.fd != -1) { + put_unused_fd(fence_rep.fd); + fence_rep.fd = -1; + } + + ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle, TTM_REF_USAGE); VMW_DEBUG_USER("Fence copy error. Syncing.\n"); (void) vmw_fence_obj_wait(fence, false, false, VMW_FENCE_WAIT_TIMEOUT); } - - return ret ? -EFAULT : 0; } /** @@ -4203,23 +4209,16 @@ int vmw_execbuf_process(struct drm_file *file_priv, (void) vmw_fence_obj_wait(fence, false, false, VMW_FENCE_WAIT_TIMEOUT); - } - } - - ret = vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, - user_fence_rep, fence, handle, out_fence_fd); - - if (sync_file) { - if (ret) { - /* usercopy of fence failed, put the file object */ - fput(sync_file->file); - put_unused_fd(out_fence_fd); } else { /* Link the fence with the FD created earlier */ fd_install(out_fence_fd, sync_file->file); } } + vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, + user_fence_rep, fence, handle, out_fence_fd, + sync_file); + /* Don't unreference when handing fence out */ if (unlikely(out_fence != NULL)) { *out_fence = fence; @@ -4237,7 +4236,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, */ vmw_validation_unref_lists(&val_ctx); - return ret; + return 0; out_unlock_binding: mutex_unlock(&dev_priv->binding_mutex); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c old mode 100755 new mode 100644 index 8bc41ec97d71..0f8d29397157 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -1171,7 +1171,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data, } vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence, - handle, -1); + handle, -1, NULL); vmw_fence_obj_unreference(&fence); return 0; out_no_create: diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c old mode 100755 new mode 100644 index e58112997c88..312ed0881a99 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -2479,7 +2479,7 @@ void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv, if (file_priv) vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, user_fence_rep, fence, - handle, -1); + handle, -1, NULL); if (out_fence) *out_fence = fence; else diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h old mode 100755 new mode 100644 index 6a9178896c90..8ee24e351bdc --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -203,7 +203,6 @@ struct xenvif_queue { /* Per-queue data for xenvif */ unsigned int rx_queue_max; unsigned int rx_queue_len; unsigned long last_rx_time; - unsigned int rx_slots_needed; bool stalled; struct xenvif_copy_state rx_copy; diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c old mode 100755 new mode 100644 index dbac4c03d21a..accc991d153f --- a/drivers/net/xen-netback/rx.c +++ b/drivers/net/xen-netback/rx.c @@ -33,36 +33,28 @@ #include #include -/* - * Update the needed ring page slots for the first SKB queued. - * Note that any call sequence outside the RX thread calling this function - * needs to wake up the RX thread via a call of xenvif_kick_thread() - * afterwards in order to avoid a race with putting the thread to sleep. - */ -static void xenvif_update_needed_slots(struct xenvif_queue *queue, - const struct sk_buff *skb) -{ - unsigned int needed = 0; - - if (skb) { - needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE); - if (skb_is_gso(skb)) - needed++; - if (skb->sw_hash) - needed++; - } - - WRITE_ONCE(queue->rx_slots_needed, needed); -} - static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) { RING_IDX prod, cons; - unsigned int needed; + struct sk_buff *skb; + int needed; + unsigned long flags; + + spin_lock_irqsave(&queue->rx_queue.lock, flags); - needed = READ_ONCE(queue->rx_slots_needed); - if (!needed) + skb = skb_peek(&queue->rx_queue); + if (!skb) { + spin_unlock_irqrestore(&queue->rx_queue.lock, flags); return false; + } + + needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE); + if (skb_is_gso(skb)) + needed++; + if (skb->sw_hash) + needed++; + + spin_unlock_irqrestore(&queue->rx_queue.lock, flags); do { prod = queue->rx.sring->req_prod; @@ -88,19 +80,13 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) spin_lock_irqsave(&queue->rx_queue.lock, flags); - if (queue->rx_queue_len >= queue->rx_queue_max) { + __skb_queue_tail(&queue->rx_queue, skb); + + queue->rx_queue_len += skb->len; + if (queue->rx_queue_len > queue->rx_queue_max) { struct net_device *dev = queue->vif->dev; netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id)); - kfree_skb(skb); - queue->vif->dev->stats.rx_dropped++; - } else { - if (skb_queue_empty(&queue->rx_queue)) - xenvif_update_needed_slots(queue, skb); - - __skb_queue_tail(&queue->rx_queue, skb); - - queue->rx_queue_len += skb->len; } spin_unlock_irqrestore(&queue->rx_queue.lock, flags); @@ -114,8 +100,6 @@ static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue) skb = __skb_dequeue(&queue->rx_queue); if (skb) { - xenvif_update_needed_slots(queue, skb_peek(&queue->rx_queue)); - queue->rx_queue_len -= skb->len; if (queue->rx_queue_len < queue->rx_queue_max) { struct netdev_queue *txq; @@ -150,7 +134,6 @@ static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue) break; xenvif_rx_dequeue(queue); kfree_skb(skb); - queue->vif->dev->stats.rx_dropped++; } } @@ -504,31 +487,27 @@ void xenvif_rx_action(struct xenvif_queue *queue) xenvif_rx_copy_flush(queue); } -static RING_IDX xenvif_rx_queue_slots(const struct xenvif_queue *queue) +static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue) { RING_IDX prod, cons; prod = queue->rx.sring->req_prod; cons = queue->rx.req_cons; - return prod - cons; -} - -static bool xenvif_rx_queue_stalled(const struct xenvif_queue *queue) -{ - unsigned int needed = READ_ONCE(queue->rx_slots_needed); - return !queue->stalled && - xenvif_rx_queue_slots(queue) < needed && + prod - cons < 1 && time_after(jiffies, queue->last_rx_time + queue->vif->stall_timeout); } static bool xenvif_rx_queue_ready(struct xenvif_queue *queue) { - unsigned int needed = READ_ONCE(queue->rx_slots_needed); + RING_IDX prod, cons; + + prod = queue->rx.sring->req_prod; + cons = queue->rx.req_cons; - return queue->stalled && xenvif_rx_queue_slots(queue) >= needed; + return queue->stalled && prod - cons >= 1; } bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread) diff --git a/fs/fs_context.c b/fs/fs_context.c old mode 100755 new mode 100644 index b11677802ee1..2834d1afa6e8 --- a/fs/fs_context.c +++ b/fs/fs_context.c @@ -530,7 +530,7 @@ static int legacy_parse_param(struct fs_context *fc, struct fs_parameter *param) param->key); } - if (size + len + 2 > PAGE_SIZE) + if (len > PAGE_SIZE - 2 - size) return invalf(fc, "VFS: Legacy: Cumulative options too large"); if (strchr(param->key, ',') || (param->type == fs_value_is_string && diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c old mode 100755 new mode 100644 index 4e7f608e8f3d..c837675cd395 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1778,24 +1778,6 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry, no_open: res = nfs_lookup(dir, dentry, lookup_flags); - if (!res) { - inode = d_inode(dentry); - if ((lookup_flags & LOOKUP_DIRECTORY) && inode && - !(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) - res = ERR_PTR(-ENOTDIR); - else if (inode && S_ISREG(inode->i_mode)) - res = ERR_PTR(-EOPENSTALE); - } else if (!IS_ERR(res)) { - inode = d_inode(res); - if ((lookup_flags & LOOKUP_DIRECTORY) && inode && - !(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) { - dput(res); - res = ERR_PTR(-ENOTDIR); - } else if (inode && S_ISREG(inode->i_mode)) { - dput(res); - res = ERR_PTR(-EOPENSTALE); - } - } if (switched) { d_lookup_done(dentry); if (!res) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c old mode 100755 new mode 100644 index 8766bc3239ed..ce1e9193365f --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -6012,16 +6012,16 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, fallthrough; case PTR_TO_PACKET_END: case PTR_TO_SOCKET: + case PTR_TO_SOCKET_OR_NULL: case PTR_TO_SOCK_COMMON: + case PTR_TO_SOCK_COMMON_OR_NULL: case PTR_TO_TCP_SOCK: + case PTR_TO_TCP_SOCK_OR_NULL: case PTR_TO_XDP_SOCK: -reject: verbose(env, "R%d pointer arithmetic on %s prohibited\n", dst, reg_type_str[ptr_reg->type]); return -EACCES; default: - if (type_may_be_null(ptr_reg->type)) - goto reject; break; } diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c old mode 100755 new mode 100644 index 134723ce778a..812dd4ed0129 --- a/kernel/cgroup/cgroup-v1.c +++ b/kernel/cgroup/cgroup-v1.c @@ -545,14 +545,6 @@ static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of, BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX); - /* - * Release agent gets called with all capabilities, - * require capabilities to set release agent. - */ - if ((of->file->f_cred->user_ns != &init_user_ns) || - !capable(CAP_SYS_ADMIN)) - return -EPERM; - cgrp = cgroup_kn_lock_live(of->kn, false); if (!cgrp) return -ENODEV; @@ -970,12 +962,6 @@ int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param) /* Specifying two release agents is forbidden */ if (ctx->release_agent) return invalfc(fc, "release_agent respecified"); - /* - * Release agent gets called with all capabilities, - * require capabilities to set release agent. - */ - if ((fc->user_ns != &init_user_ns) || !capable(CAP_SYS_ADMIN)) - return invalfc(fc, "Setting release_agent not allowed"); ctx->release_agent = param->string; param->string = NULL; break; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c old mode 100755 new mode 100644 index f78097aa403a..3cbc0fd323bf --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -4461,10 +4461,9 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, } out_free_pg_vec: - if (pg_vec) { - bitmap_free(rx_owner_map); + bitmap_free(rx_owner_map); + if (pg_vec) free_pg_vec(pg_vec, order, req->tp_block_nr); - } out: return err; } -- Gitee