Skip to content
/ linux Public
forked from torvalds/linux

Commit

Permalink
Merge tag 'for-linus-5.15b-rc2-tag' of git://git.kernel.org/pub/scm/l…
Browse files Browse the repository at this point in the history
…inux/kernel/git/xen/tip

Pull xen fixes from Juergen Gross:

 - The first hunk of a Xen swiotlb fixup series fixing multiple minor
   issues and doing some small cleanups

 - Some further Xen related fixes avoiding WARN() splats when running as
   Xen guests or dom0

 - A Kconfig fix allowing the pvcalls frontend to be built as a module

* tag 'for-linus-5.15b-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  swiotlb-xen: drop DEFAULT_NSLABS
  swiotlb-xen: arrange to have buffer info logged
  swiotlb-xen: drop leftover __ref
  swiotlb-xen: limit init retries
  swiotlb-xen: suppress certain init retries
  swiotlb-xen: maintain slab count properly
  swiotlb-xen: fix late init retry
  swiotlb-xen: avoid double free
  xen/pvcalls: backend can be a module
  xen: fix usage of pmd_populate in mremap for pv guests
  xen: reset legacy rtc flag for PV domU
  PM: base: power: don't try to use non-existing RTC for storing data
  xen/balloon: use a kernel thread instead a workqueue
  • Loading branch information
torvalds committed Sep 17, 2021
2 parents bdb575f + d859ed2 commit c6460da
Show file tree
Hide file tree
Showing 6 changed files with 85 additions and 40 deletions.
7 changes: 7 additions & 0 deletions arch/x86/xen/enlighten_pv.c
Original file line number Diff line number Diff line change
Expand Up @@ -1214,6 +1214,11 @@ static void __init xen_dom0_set_legacy_features(void)
x86_platform.legacy.rtc = 1;
}

static void __init xen_domu_set_legacy_features(void)
{
x86_platform.legacy.rtc = 0;
}

/* First C function to be called on Xen boot */
asmlinkage __visible void __init xen_start_kernel(void)
{
Expand Down Expand Up @@ -1359,6 +1364,8 @@ asmlinkage __visible void __init xen_start_kernel(void)
add_preferred_console("xenboot", 0, NULL);
if (pci_xen)
x86_init.pci.arch_init = pci_xen_init;
x86_platform.set_legacy_features =
xen_domu_set_legacy_features;
} else {
const struct dom0_vga_console_info *info =
(void *)((char *)xen_start_info +
Expand Down
7 changes: 5 additions & 2 deletions arch/x86/xen/mmu_pv.c
Original file line number Diff line number Diff line change
Expand Up @@ -1518,14 +1518,17 @@ static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
if (pinned) {
struct page *page = pfn_to_page(pfn);

if (static_branch_likely(&xen_struct_pages_ready))
pinned = false;
if (static_branch_likely(&xen_struct_pages_ready)) {
pinned = PagePinned(page);
SetPagePinned(page);
}

xen_mc_batch();

__set_pfn_prot(pfn, PAGE_KERNEL_RO);

if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS && !pinned)
__pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);

xen_mc_issue(PARAVIRT_LAZY_MMU);
Expand Down
10 changes: 10 additions & 0 deletions drivers/base/power/trace.c
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
#include <linux/export.h>
#include <linux/rtc.h>
#include <linux/suspend.h>
#include <linux/init.h>

#include <linux/mc146818rtc.h>

Expand Down Expand Up @@ -165,6 +166,9 @@ void generate_pm_trace(const void *tracedata, unsigned int user)
const char *file = *(const char **)(tracedata + 2);
unsigned int user_hash_value, file_hash_value;

if (!x86_platform.legacy.rtc)
return;

user_hash_value = user % USERHASH;
file_hash_value = hash_string(lineno, file, FILEHASH);
set_magic_time(user_hash_value, file_hash_value, dev_hash_value);
Expand Down Expand Up @@ -267,6 +271,9 @@ static struct notifier_block pm_trace_nb = {

static int __init early_resume_init(void)
{
if (!x86_platform.legacy.rtc)
return 0;

hash_value_early_read = read_magic_time();
register_pm_notifier(&pm_trace_nb);
return 0;
Expand All @@ -277,6 +284,9 @@ static int __init late_resume_init(void)
unsigned int val = hash_value_early_read;
unsigned int user, file, dev;

if (!x86_platform.legacy.rtc)
return 0;

user = val % USERHASH;
val = val / USERHASH;
file = val % FILEHASH;
Expand Down
2 changes: 1 addition & 1 deletion drivers/xen/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -214,7 +214,7 @@ config XEN_PVCALLS_FRONTEND
implements them.

config XEN_PVCALLS_BACKEND
bool "XEN PV Calls backend driver"
tristate "XEN PV Calls backend driver"
depends on INET && XEN && XEN_BACKEND
help
Experimental backend for the Xen PV Calls protocol
Expand Down
62 changes: 45 additions & 17 deletions drivers/xen/balloon.c
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,8 @@
#include <linux/sched.h>
#include <linux/cred.h>
#include <linux/errno.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/mm.h>
#include <linux/memblock.h>
#include <linux/pagemap.h>
Expand Down Expand Up @@ -115,7 +117,7 @@ static struct ctl_table xen_root[] = {
#define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1)

/*
* balloon_process() state:
* balloon_thread() state:
*
* BP_DONE: done or nothing to do,
* BP_WAIT: wait to be rescheduled,
Expand All @@ -130,6 +132,8 @@ enum bp_state {
BP_ECANCELED
};

/* Main waiting point for xen-balloon thread. */
static DECLARE_WAIT_QUEUE_HEAD(balloon_thread_wq);

static DEFINE_MUTEX(balloon_mutex);

Expand All @@ -144,10 +148,6 @@ static xen_pfn_t frame_list[PAGE_SIZE / sizeof(xen_pfn_t)];
static LIST_HEAD(ballooned_pages);
static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);

/* Main work function, always executed in process context. */
static void balloon_process(struct work_struct *work);
static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);

/* When ballooning out (allocating memory to return to Xen) we don't really
want the kernel to try too hard since that can trigger the oom killer. */
#define GFP_BALLOON \
Expand Down Expand Up @@ -366,7 +366,7 @@ static void xen_online_page(struct page *page, unsigned int order)
static int xen_memory_notifier(struct notifier_block *nb, unsigned long val, void *v)
{
if (val == MEM_ONLINE)
schedule_delayed_work(&balloon_worker, 0);
wake_up(&balloon_thread_wq);

return NOTIFY_OK;
}
Expand Down Expand Up @@ -491,18 +491,43 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
}

/*
* As this is a work item it is guaranteed to run as a single instance only.
* Stop waiting if either state is not BP_EAGAIN and ballooning action is
* needed, or if the credit has changed while state is BP_EAGAIN.
*/
static bool balloon_thread_cond(enum bp_state state, long credit)
{
if (state != BP_EAGAIN)
credit = 0;

return current_credit() != credit || kthread_should_stop();
}

/*
* As this is a kthread it is guaranteed to run as a single instance only.
* We may of course race updates of the target counts (which are protected
* by the balloon lock), or with changes to the Xen hard limit, but we will
* recover from these in time.
*/
static void balloon_process(struct work_struct *work)
static int balloon_thread(void *unused)
{
enum bp_state state = BP_DONE;
long credit;
unsigned long timeout;

set_freezable();
for (;;) {
if (state == BP_EAGAIN)
timeout = balloon_stats.schedule_delay * HZ;
else
timeout = 3600 * HZ;
credit = current_credit();

wait_event_interruptible_timeout(balloon_thread_wq,
balloon_thread_cond(state, credit), timeout);

if (kthread_should_stop())
return 0;

do {
mutex_lock(&balloon_mutex);

credit = current_credit();
Expand All @@ -529,20 +554,15 @@ static void balloon_process(struct work_struct *work)
mutex_unlock(&balloon_mutex);

cond_resched();

} while (credit && state == BP_DONE);

/* Schedule more work if there is some still to be done. */
if (state == BP_EAGAIN)
schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ);
}
}

/* Resets the Xen limit, sets new target, and kicks off processing. */
void balloon_set_new_target(unsigned long target)
{
/* No need for lock. Not read-modify-write updates. */
balloon_stats.target_pages = target;
schedule_delayed_work(&balloon_worker, 0);
wake_up(&balloon_thread_wq);
}
EXPORT_SYMBOL_GPL(balloon_set_new_target);

Expand Down Expand Up @@ -647,7 +667,7 @@ void free_xenballooned_pages(int nr_pages, struct page **pages)

/* The balloon may be too large now. Shrink it if needed. */
if (current_credit())
schedule_delayed_work(&balloon_worker, 0);
wake_up(&balloon_thread_wq);

mutex_unlock(&balloon_mutex);
}
Expand Down Expand Up @@ -679,6 +699,8 @@ static void __init balloon_add_region(unsigned long start_pfn,

static int __init balloon_init(void)
{
struct task_struct *task;

if (!xen_domain())
return -ENODEV;

Expand Down Expand Up @@ -722,6 +744,12 @@ static int __init balloon_init(void)
}
#endif

task = kthread_run(balloon_thread, NULL, "xen-balloon");
if (IS_ERR(task)) {
pr_err("xen-balloon thread could not be started, ballooning will not work!\n");
return PTR_ERR(task);
}

/* Init the xen-balloon driver. */
xen_balloon_init();

Expand Down
37 changes: 17 additions & 20 deletions drivers/xen/swiotlb-xen.c
Original file line number Diff line number Diff line change
Expand Up @@ -106,27 +106,26 @@ static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr)

static int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
{
int i, rc;
int dma_bits;
int rc;
unsigned int order = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT);
unsigned int i, dma_bits = order + PAGE_SHIFT;
dma_addr_t dma_handle;
phys_addr_t p = virt_to_phys(buf);

dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
BUILD_BUG_ON(IO_TLB_SEGSIZE & (IO_TLB_SEGSIZE - 1));
BUG_ON(nslabs % IO_TLB_SEGSIZE);

i = 0;
do {
int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);

do {
rc = xen_create_contiguous_region(
p + (i << IO_TLB_SHIFT),
get_order(slabs << IO_TLB_SHIFT),
p + (i << IO_TLB_SHIFT), order,
dma_bits, &dma_handle);
} while (rc && dma_bits++ < MAX_DMA_BITS);
if (rc)
return rc;

i += slabs;
i += IO_TLB_SEGSIZE;
} while (i < nslabs);
return 0;
}
Expand All @@ -153,9 +152,7 @@ static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
return "";
}

#define DEFAULT_NSLABS ALIGN(SZ_64M >> IO_TLB_SHIFT, IO_TLB_SEGSIZE)

int __ref xen_swiotlb_init(void)
int xen_swiotlb_init(void)
{
enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
unsigned long bytes = swiotlb_size_or_default();
Expand Down Expand Up @@ -185,7 +182,7 @@ int __ref xen_swiotlb_init(void)
order--;
}
if (!start)
goto error;
goto exit;
if (order != get_order(bytes)) {
pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
(PAGE_SIZE << order) >> 20);
Expand All @@ -208,15 +205,15 @@ int __ref xen_swiotlb_init(void)
swiotlb_set_max_segment(PAGE_SIZE);
return 0;
error:
if (repeat--) {
if (nslabs > 1024 && repeat--) {
/* Min is 2MB */
nslabs = max(1024UL, (nslabs >> 1));
pr_info("Lowering to %luMB\n",
(nslabs << IO_TLB_SHIFT) >> 20);
nslabs = max(1024UL, ALIGN(nslabs >> 1, IO_TLB_SEGSIZE));
bytes = nslabs << IO_TLB_SHIFT;
pr_info("Lowering to %luMB\n", bytes >> 20);
goto retry;
}
exit:
pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc);
free_pages((unsigned long)start, order);
return rc;
}

Expand Down Expand Up @@ -244,17 +241,17 @@ void __init xen_swiotlb_init_early(void)
rc = xen_swiotlb_fixup(start, nslabs);
if (rc) {
memblock_free(__pa(start), PAGE_ALIGN(bytes));
if (repeat--) {
if (nslabs > 1024 && repeat--) {
/* Min is 2MB */
nslabs = max(1024UL, (nslabs >> 1));
nslabs = max(1024UL, ALIGN(nslabs >> 1, IO_TLB_SEGSIZE));
bytes = nslabs << IO_TLB_SHIFT;
pr_info("Lowering to %luMB\n", bytes >> 20);
goto retry;
}
panic("%s (rc:%d)", xen_swiotlb_error(XEN_SWIOTLB_EFIXUP), rc);
}

if (swiotlb_init_with_tbl(start, nslabs, false))
if (swiotlb_init_with_tbl(start, nslabs, true))
panic("Cannot allocate SWIOTLB buffer");
swiotlb_set_max_segment(PAGE_SIZE);
}
Expand Down

0 comments on commit c6460da

Please sign in to comment.