Unverified Commit e835dbfb authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!4674 Backport 5.10.193- 5.10.194 LTS patches from upstream

Merge Pull Request from: @sanglipeng 
 
issue: https://gitee.com/openeuler/kernel/issues/I9399M


Conflicts:

context confilict(2):  
5ac05ce56843: sched/cpuset: Keep track of SCHED_DEADLINE task in cpusets
2d69f68ad409: cgroup/cpuset: Free DL BW in case can_attach() fails

Already merged(5):
c6aecc29d29e: objtool/x86: Fix SRSO mess
4bc6a4fca1f0: x86/fpu: Set X86_FEATURE_OSXSAVE feature after enabling OSXSAVE in CR4
9c2ceffd4e36: tracing: Fix cpu buffers unavailable due to 'record_disabled' missed
b8205dfed681: tracing: Fix memleak due to race between current_tracer and trace
ad4f8c117b8b: rcu: Prevent expedited GP from enabling tick on offline CPU

Rejected(5):
749630ce9147: mm,hwpoison: refactor get_any_page
406166a3acd7: mm: fix page reference leak in soft_offline_page()
20c2db79f157: mm: memory-failure: kill soft_offline_free_page()
32f71ef62737: mm: memory-failure: fix unexpected return value in soft_offline_page()
b3ac2c1d725b: mm,hwpoison: fix printing of page flags

Kabi broken(2):
70dfdbba3070: net: validate veth and vxcan peer ifindexes
72e4a5a28ebf: batman-adv: Hold rtnl lock during MTU update via netlink

Total patches: 95 - 5 - 5 - 2 = 83 
 
Link:https://gitee.com/openeuler/kernel/pulls/4674

 

Reviewed-by: default avatarJialin Zhang <zhangjialin11@huawei.com>
Signed-off-by: default avatarJialin Zhang <zhangjialin11@huawei.com>
parents a24a9238 972554dd
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -263,7 +263,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
		/* sort by type and symbol index */
		sort(rels, numrels, sizeof(Elf32_Rel), cmp_rel, NULL);

		if (strncmp(secstrings + dstsec->sh_name, ".init", 5) != 0)
		if (!module_init_layout_section(secstrings + dstsec->sh_name))
			core_plts += count_plts(syms, dstsec->sh_addr, rels,
						numrels, s->sh_info);
		else
+2 −1
Original line number Diff line number Diff line
@@ -7,6 +7,7 @@
#include <linux/ftrace.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleloader.h>
#include <linux/sort.h>

static struct plt_entry __get_adrp_add_pair(u64 dst, u64 pc,
@@ -342,7 +343,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
		if (nents)
			sort(rels, nents, sizeof(Elf64_Rela), cmp_rela, NULL);

		if (!str_has_prefix(secstrings + dstsec->sh_name, ".init"))
		if (!module_init_layout_section(secstrings + dstsec->sh_name))
			core_plts += count_plts(syms, rels, numrels,
						sechdrs[i].sh_info, dstsec);
		else
+12 −15
Original line number Diff line number Diff line
@@ -30,7 +30,6 @@
 *
 */

#include <linux/dma-map-ops.h> /* for dma_default_coherent */
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -624,18 +623,17 @@ u32 au1xxx_dbdma_put_source(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
		dp->dscr_cmd0 &= ~DSCR_CMD0_IE;

	/*
	 * There is an erratum on certain Au1200/Au1550 revisions that could
	 * result in "stale" data being DMA'ed. It has to do with the snoop
	 * logic on the cache eviction buffer.  dma_default_coherent is set
	 * to false on these parts.
	 * There is an errata on the Au1200/Au1550 parts that could result
	 * in "stale" data being DMA'ed. It has to do with the snoop logic on
	 * the cache eviction buffer.  DMA_NONCOHERENT is on by default for
	 * these parts. If it is fixed in the future, these dma_cache_inv will
	 * just be nothing more than empty macros. See io.h.
	 */
	if (!dma_default_coherent)
		dma_cache_wback_inv(KSEG0ADDR(buf), nbytes);
	dma_cache_wback_inv((unsigned long)buf, nbytes);
	dp->dscr_cmd0 |= DSCR_CMD0_V;	/* Let it rip */
	wmb(); /* drain writebuffer */
	dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
	ctp->chan_ptr->ddma_dbell = 0;
	wmb(); /* force doorbell write out to dma engine */

	/* Get next descriptor pointer. */
	ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
@@ -687,18 +685,17 @@ u32 au1xxx_dbdma_put_dest(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
			  dp->dscr_source1, dp->dscr_dest0, dp->dscr_dest1);
#endif
	/*
	 * There is an erratum on certain Au1200/Au1550 revisions that could
	 * result in "stale" data being DMA'ed. It has to do with the snoop
	 * logic on the cache eviction buffer.  dma_default_coherent is set
	 * to false on these parts.
	 * There is an errata on the Au1200/Au1550 parts that could result in
	 * "stale" data being DMA'ed. It has to do with the snoop logic on the
	 * cache eviction buffer.  DMA_NONCOHERENT is on by default for these
	 * parts. If it is fixed in the future, these dma_cache_inv will just
	 * be nothing more than empty macros. See io.h.
	 */
	if (!dma_default_coherent)
		dma_cache_inv(KSEG0ADDR(buf), nbytes);
	dma_cache_inv((unsigned long)buf, nbytes);
	dp->dscr_cmd0 |= DSCR_CMD0_V;	/* Let it rip */
	wmb(); /* drain writebuffer */
	dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
	ctp->chan_ptr->ddma_dbell = 0;
	wmb(); /* force doorbell write out to dma engine */

	/* Get next descriptor pointer. */
	ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
+19 −2
Original line number Diff line number Diff line
@@ -126,7 +126,24 @@
#define cpu_has_tx39_cache	__opt(MIPS_CPU_TX39_CACHE)
#endif
#ifndef cpu_has_octeon_cache
#define cpu_has_octeon_cache	0
#define cpu_has_octeon_cache						\
({									\
	int __res;							\
									\
	switch (boot_cpu_type()) {					\
	case CPU_CAVIUM_OCTEON:						\
	case CPU_CAVIUM_OCTEON_PLUS:					\
	case CPU_CAVIUM_OCTEON2:					\
	case CPU_CAVIUM_OCTEON3:					\
		__res = 1;						\
		break;							\
									\
	default:							\
		__res = 0;						\
	}								\
									\
	__res;								\
})
#endif
/* Don't override `cpu_has_fpu' to 1 or the "nofpu" option won't work.  */
#ifndef cpu_has_fpu
@@ -353,7 +370,7 @@
({									\
	int __res;							\
									\
	switch (current_cpu_type()) {					\
	switch (boot_cpu_type()) {					\
	case CPU_M14KC:							\
	case CPU_74K:							\
	case CPU_1074K:							\
+92 −47
Original line number Diff line number Diff line
@@ -3740,7 +3740,7 @@ static int rbd_lock(struct rbd_device *rbd_dev)
	ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
			    RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie,
			    RBD_LOCK_TAG, "", 0);
	if (ret)
	if (ret && ret != -EEXIST)
		return ret;

	__rbd_lock(rbd_dev, cookie);
@@ -3914,10 +3914,26 @@ static void wake_lock_waiters(struct rbd_device *rbd_dev, int result)
	list_splice_tail_init(&rbd_dev->acquiring_list, &rbd_dev->running_list);
}

static int get_lock_owner_info(struct rbd_device *rbd_dev,
			       struct ceph_locker **lockers, u32 *num_lockers)
static bool locker_equal(const struct ceph_locker *lhs,
			 const struct ceph_locker *rhs)
{
	return lhs->id.name.type == rhs->id.name.type &&
	       lhs->id.name.num == rhs->id.name.num &&
	       !strcmp(lhs->id.cookie, rhs->id.cookie) &&
	       ceph_addr_equal_no_type(&lhs->info.addr, &rhs->info.addr);
}

static void free_locker(struct ceph_locker *locker)
{
	if (locker)
		ceph_free_lockers(locker, 1);
}

static struct ceph_locker *get_lock_owner_info(struct rbd_device *rbd_dev)
{
	struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
	struct ceph_locker *lockers;
	u32 num_lockers;
	u8 lock_type;
	char *lock_tag;
	int ret;
@@ -3926,39 +3942,45 @@ static int get_lock_owner_info(struct rbd_device *rbd_dev,

	ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid,
				 &rbd_dev->header_oloc, RBD_LOCK_NAME,
				 &lock_type, &lock_tag, lockers, num_lockers);
	if (ret)
		return ret;
				 &lock_type, &lock_tag, &lockers, &num_lockers);
	if (ret) {
		rbd_warn(rbd_dev, "failed to get header lockers: %d", ret);
		return ERR_PTR(ret);
	}

	if (*num_lockers == 0) {
	if (num_lockers == 0) {
		dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev);
		lockers = NULL;
		goto out;
	}

	if (strcmp(lock_tag, RBD_LOCK_TAG)) {
		rbd_warn(rbd_dev, "locked by external mechanism, tag %s",
			 lock_tag);
		ret = -EBUSY;
		goto out;
		goto err_busy;
	}

	if (lock_type == CEPH_CLS_LOCK_SHARED) {
		rbd_warn(rbd_dev, "shared lock type detected");
		ret = -EBUSY;
		goto out;
		goto err_busy;
	}

	if (strncmp((*lockers)[0].id.cookie, RBD_LOCK_COOKIE_PREFIX,
	WARN_ON(num_lockers != 1);
	if (strncmp(lockers[0].id.cookie, RBD_LOCK_COOKIE_PREFIX,
		    strlen(RBD_LOCK_COOKIE_PREFIX))) {
		rbd_warn(rbd_dev, "locked by external mechanism, cookie %s",
			 (*lockers)[0].id.cookie);
		ret = -EBUSY;
		goto out;
			 lockers[0].id.cookie);
		goto err_busy;
	}

out:
	kfree(lock_tag);
	return ret;
	return lockers;

err_busy:
	kfree(lock_tag);
	ceph_free_lockers(lockers, num_lockers);
	return ERR_PTR(-EBUSY);
}

static int find_watcher(struct rbd_device *rbd_dev,
@@ -3974,13 +3996,19 @@ static int find_watcher(struct rbd_device *rbd_dev,
	ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid,
				      &rbd_dev->header_oloc, &watchers,
				      &num_watchers);
	if (ret)
	if (ret) {
		rbd_warn(rbd_dev, "failed to get watchers: %d", ret);
		return ret;
	}

	sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie);
	for (i = 0; i < num_watchers; i++) {
		if (!memcmp(&watchers[i].addr, &locker->info.addr,
			    sizeof(locker->info.addr)) &&
		/*
		 * Ignore addr->type while comparing.  This mimics
		 * entity_addr_t::get_legacy_str() + strcmp().
		 */
		if (ceph_addr_equal_no_type(&watchers[i].addr,
					    &locker->info.addr) &&
		    watchers[i].cookie == cookie) {
			struct rbd_client_id cid = {
				.gid = le64_to_cpu(watchers[i].name.num),
@@ -4008,51 +4036,72 @@ static int find_watcher(struct rbd_device *rbd_dev,
static int rbd_try_lock(struct rbd_device *rbd_dev)
{
	struct ceph_client *client = rbd_dev->rbd_client->client;
	struct ceph_locker *lockers;
	u32 num_lockers;
	struct ceph_locker *locker, *refreshed_locker;
	int ret;

	for (;;) {
		locker = refreshed_locker = NULL;

		ret = rbd_lock(rbd_dev);
		if (ret != -EBUSY)
			return ret;
		if (!ret)
			goto out;
		if (ret != -EBUSY) {
			rbd_warn(rbd_dev, "failed to lock header: %d", ret);
			goto out;
		}

		/* determine if the current lock holder is still alive */
		ret = get_lock_owner_info(rbd_dev, &lockers, &num_lockers);
		if (ret)
			return ret;

		if (num_lockers == 0)
		locker = get_lock_owner_info(rbd_dev);
		if (IS_ERR(locker)) {
			ret = PTR_ERR(locker);
			locker = NULL;
			goto out;
		}
		if (!locker)
			goto again;

		ret = find_watcher(rbd_dev, lockers);
		ret = find_watcher(rbd_dev, locker);
		if (ret)
			goto out; /* request lock or error */

		refreshed_locker = get_lock_owner_info(rbd_dev);
		if (IS_ERR(refreshed_locker)) {
			ret = PTR_ERR(refreshed_locker);
			refreshed_locker = NULL;
			goto out;
		}
		if (!refreshed_locker ||
		    !locker_equal(locker, refreshed_locker))
			goto again;

		rbd_warn(rbd_dev, "breaking header lock owned by %s%llu",
			 ENTITY_NAME(lockers[0].id.name));
			 ENTITY_NAME(locker->id.name));

		ret = ceph_monc_blocklist_add(&client->monc,
					      &lockers[0].info.addr);
					      &locker->info.addr);
		if (ret) {
			rbd_warn(rbd_dev, "blocklist of %s%llu failed: %d",
				 ENTITY_NAME(lockers[0].id.name), ret);
			rbd_warn(rbd_dev, "failed to blocklist %s%llu: %d",
				 ENTITY_NAME(locker->id.name), ret);
			goto out;
		}

		ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid,
					  &rbd_dev->header_oloc, RBD_LOCK_NAME,
					  lockers[0].id.cookie,
					  &lockers[0].id.name);
		if (ret && ret != -ENOENT)
					  locker->id.cookie, &locker->id.name);
		if (ret && ret != -ENOENT) {
			rbd_warn(rbd_dev, "failed to break header lock: %d",
				 ret);
			goto out;
		}

again:
		ceph_free_lockers(lockers, num_lockers);
		free_locker(refreshed_locker);
		free_locker(locker);
	}

out:
	ceph_free_lockers(lockers, num_lockers);
	free_locker(refreshed_locker);
	free_locker(locker);
	return ret;
}

@@ -4102,11 +4151,8 @@ static int rbd_try_acquire_lock(struct rbd_device *rbd_dev)

	ret = rbd_try_lock(rbd_dev);
	if (ret < 0) {
		rbd_warn(rbd_dev, "failed to lock header: %d", ret);
		if (ret == -EBLOCKLISTED)
		rbd_warn(rbd_dev, "failed to acquire lock: %d", ret);
		goto out;

		ret = 1; /* request lock anyway */
	}
	if (ret > 0) {
		up_write(&rbd_dev->lock_rwsem);
@@ -6656,12 +6702,11 @@ static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
		cancel_delayed_work_sync(&rbd_dev->lock_dwork);
		if (!ret)
			ret = -ETIMEDOUT;
	}

	if (ret) {
		rbd_warn(rbd_dev, "failed to acquire exclusive lock: %ld", ret);
		return ret;
		rbd_warn(rbd_dev, "failed to acquire lock: %ld", ret);
	}
	if (ret)
		return ret;

	/*
	 * The lock may have been released by now, unless automatic lock
Loading