Skip to content

Commit

Permalink
Merge tag 'hyperv-fixes-signed-20210722' of git://git.kernel.org/pub/…
Browse files Browse the repository at this point in the history
…scm/linux/kernel/git/hyperv/linux

Pull hyperv fixes from Wei Liu:

 - bug fix from Haiyang for vmbus CPU assignment

 - revert of a bogus patch that went into 5.14-rc1

* tag 'hyperv-fixes-signed-20210722' of git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux:
  Revert "x86/hyperv: fix logical processor creation"
  Drivers: hv: vmbus: Fix duplicate CPU assignments within a device
  • Loading branch information
torvalds committed Jul 22, 2021
2 parents 4784dc9 + f5a11c6 commit 7c14e4d
Show file tree
Hide file tree
Showing 2 changed files with 65 additions and 33 deletions.
2 changes: 1 addition & 1 deletion arch/x86/kernel/cpu/mshyperv.c
Original file line number Diff line number Diff line change
Expand Up @@ -237,7 +237,7 @@ static void __init hv_smp_prepare_cpus(unsigned int max_cpus)
for_each_present_cpu(i) {
if (i == 0)
continue;
ret = hv_call_add_logical_proc(numa_cpu_node(i), i, i);
ret = hv_call_add_logical_proc(numa_cpu_node(i), i, cpu_physical_id(i));
BUG_ON(ret);
}

Expand Down
96 changes: 64 additions & 32 deletions drivers/hv/channel_mgmt.c
Original file line number Diff line number Diff line change
Expand Up @@ -605,6 +605,17 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
*/
mutex_lock(&vmbus_connection.channel_mutex);

list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
if (guid_equal(&channel->offermsg.offer.if_type,
&newchannel->offermsg.offer.if_type) &&
guid_equal(&channel->offermsg.offer.if_instance,
&newchannel->offermsg.offer.if_instance)) {
fnew = false;
newchannel->primary_channel = channel;
break;
}
}

init_vp_index(newchannel);

/* Remember the channels that should be cleaned up upon suspend. */
Expand All @@ -617,16 +628,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
*/
atomic_dec(&vmbus_connection.offer_in_progress);

list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
if (guid_equal(&channel->offermsg.offer.if_type,
&newchannel->offermsg.offer.if_type) &&
guid_equal(&channel->offermsg.offer.if_instance,
&newchannel->offermsg.offer.if_instance)) {
fnew = false;
break;
}
}

if (fnew) {
list_add_tail(&newchannel->listentry,
&vmbus_connection.chn_list);
Expand All @@ -647,7 +648,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
/*
* Process the sub-channel.
*/
newchannel->primary_channel = channel;
list_add_tail(&newchannel->sc_list, &channel->sc_list);
}

Expand Down Expand Up @@ -683,6 +683,30 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
queue_work(wq, &newchannel->add_channel_work);
}

/*
* Check if CPUs used by other channels of the same device.
* It should only be called by init_vp_index().
*/
static bool hv_cpuself_used(u32 cpu, struct vmbus_channel *chn)
{
struct vmbus_channel *primary = chn->primary_channel;
struct vmbus_channel *sc;

lockdep_assert_held(&vmbus_connection.channel_mutex);

if (!primary)
return false;

if (primary->target_cpu == cpu)
return true;

list_for_each_entry(sc, &primary->sc_list, sc_list)
if (sc != chn && sc->target_cpu == cpu)
return true;

return false;
}

/*
* We use this state to statically distribute the channel interrupt load.
*/
Expand All @@ -702,6 +726,7 @@ static int next_numa_node_id;
static void init_vp_index(struct vmbus_channel *channel)
{
bool perf_chn = hv_is_perf_channel(channel);
u32 i, ncpu = num_online_cpus();
cpumask_var_t available_mask;
struct cpumask *alloced_mask;
u32 target_cpu;
Expand All @@ -724,31 +749,38 @@ static void init_vp_index(struct vmbus_channel *channel)
return;
}

while (true) {
numa_node = next_numa_node_id++;
if (numa_node == nr_node_ids) {
next_numa_node_id = 0;
continue;
for (i = 1; i <= ncpu + 1; i++) {
while (true) {
numa_node = next_numa_node_id++;
if (numa_node == nr_node_ids) {
next_numa_node_id = 0;
continue;
}
if (cpumask_empty(cpumask_of_node(numa_node)))
continue;
break;
}
alloced_mask = &hv_context.hv_numa_map[numa_node];

if (cpumask_weight(alloced_mask) ==
cpumask_weight(cpumask_of_node(numa_node))) {
/*
* We have cycled through all the CPUs in the node;
* reset the alloced map.
*/
cpumask_clear(alloced_mask);
}
if (cpumask_empty(cpumask_of_node(numa_node)))
continue;
break;
}
alloced_mask = &hv_context.hv_numa_map[numa_node];

if (cpumask_weight(alloced_mask) ==
cpumask_weight(cpumask_of_node(numa_node))) {
/*
* We have cycled through all the CPUs in the node;
* reset the alloced map.
*/
cpumask_clear(alloced_mask);
}
cpumask_xor(available_mask, alloced_mask,
cpumask_of_node(numa_node));

cpumask_xor(available_mask, alloced_mask, cpumask_of_node(numa_node));
target_cpu = cpumask_first(available_mask);
cpumask_set_cpu(target_cpu, alloced_mask);

target_cpu = cpumask_first(available_mask);
cpumask_set_cpu(target_cpu, alloced_mask);
if (channel->offermsg.offer.sub_channel_index >= ncpu ||
i > ncpu || !hv_cpuself_used(target_cpu, channel))
break;
}

channel->target_cpu = target_cpu;

Expand Down

0 comments on commit 7c14e4d

Please sign in to comment.