Skip to content

Commit

Permalink
Change fixed huge page size to default huge page size of host
Browse files Browse the repository at this point in the history
Signed-off-by: liang-cong-red-hat <[email protected]>
  • Loading branch information
liang-cong-red-hat committed Dec 30, 2024
1 parent 3808cba commit 4ed7e4a
Show file tree
Hide file tree
Showing 2 changed files with 30 additions and 19 deletions.
5 changes: 4 additions & 1 deletion libvirt/tests/cfg/numa/numa_numanode_cpu_info.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -6,5 +6,8 @@
variants:
- default:
err_msg = 'unable to map backing store for guest RAM: Cannot allocate memory'
nodes_pages = ['900', '300']
current_memory_size = 4194304
memory_size = 4194304
nodes_memory = ['1572864', '524288']
memory_mode = "strict"

44 changes: 26 additions & 18 deletions libvirt/tests/src/numa/numa_numanode_cpu_info.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from virttest import utils_misc
from virttest import utils_test
from virttest import virsh
from virttest.staging import utils_memory


# Using as lower capital is not the best way to do, but this is just a
Expand All @@ -23,45 +24,51 @@ def update_xml(vm_name, online_nodes, params):
"""
vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
memory_mode = params.get("memory_mode")
memory_size = int(params.get("memory_size"))
current_memory_size = int(params.get("current_memory_size"))
numa_memory = {'mode': memory_mode,
'nodeset': online_nodes[1]}
vmxml.numa_memory = numa_memory
mb_xml = libvirt_xml.vm_xml.VMMemBackingXML()
mb_xml.hugepages = libvirt_xml.vm_xml.VMHugepagesXML()
vmxml.mb = mb_xml
vmxml.memory = memory_size
vmxml.current_mem = current_memory_size
logging.debug("vm xml is %s", vmxml)
vmxml.sync()


def setup_host(required_node_num, online_nodes, pages_list, ori_page_set):
def setup_host(required_node_num, online_nodes, memory_list, ori_page_set):
"""
Setup host for test - update number of hugepages and check
:param required_node_num: int, numa node number at least on the host required by the test
:param online_nodes: List of all online nodes with memory available
:param pages_list: List of required number of pages for particular nodes
:param memory_list: List of required hugepage memory for particular nodes
:param ori_page_set: A dict used to save original node page
"""
index = 0

if len(online_nodes) >= required_node_num:
for pages in pages_list:
hugepage_size = utils_memory.get_huge_page_size()
for memory_size in memory_list:
ori_page_set[online_nodes[index]] = process.run(
'cat /sys/devices/system/node/node{}/hugepages/hugepages-2048kB/nr_hugepages'.
format(online_nodes[index]), shell=True).stdout_text.strip()
'cat /sys/devices/system/node/node{}/hugepages/hugepages-{}kB/nr_hugepages'.
format(online_nodes[index], hugepage_size), shell=True).stdout_text.strip()
logging.debug("ori_page_set is {}".format(ori_page_set))
pages = int(int(memory_size) / hugepage_size)
ret = process.run(
'echo {} > /sys/devices/system/node/node{}/hugepages/hugepages-2048kB/nr_hugepages'.
format(pages, online_nodes[index]), shell=True)
'echo {} > /sys/devices/system/node/node{}/hugepages/hugepages-{}kB/nr_hugepages'.
format(pages, online_nodes[index], hugepage_size), shell=True)
if ret.exit_status:
raise TestError('Cannot set {} hugepages on node {}'.
format(pages, online_nodes[index]))
raise TestError('Cannot set {} pages for {}kB huge page on node {}'.
format(pages, hugepage_size, online_nodes[index]))
ret = process.run(
'cat /sys/devices/system/node/node{}/hugepages/hugepages-2048kB/nr_hugepages'.
format(online_nodes[index]), shell=True)
if pages not in ret.stdout_text:
raise TestError('Setting {} hugepages on node {} was unsuccessful'.
format(pages, online_nodes[index]))
'cat /sys/devices/system/node/node{}/hugepages/hugepages-{}kB/nr_hugepages'.
format(online_nodes[index], hugepage_size), shell=True)
if str(pages) not in ret.stdout_text:
raise TestError('Setting {} pages for {}kB huge page on node {} was unsuccessful'.
format(pages, hugepage_size, online_nodes[index]))
index += 1
else:
raise TestCancel("The test cannot continue since there is no enough "
Expand All @@ -75,13 +82,13 @@ def run(test, params, env):
vm_name = params.get("main_vm")
vm = env.get_vm(vm_name)
error_message = params.get("err_msg")
pages_list = eval(params.get('nodes_pages'))
node_memory_list = eval(params.get('nodes_memory'))
backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
numa_info = utils_misc.NumaInfo()
online_nodes = numa_info.get_online_nodes_withmem()
ori_page_set = {}
required_numa_node_num = int(params.get("numa_cells_with_memory_required", '2'))
setup_host(required_numa_node_num, online_nodes, pages_list, ori_page_set)
setup_host(required_numa_node_num, online_nodes, node_memory_list, ori_page_set)
try:
if vm.is_alive():
vm.destroy()
Expand All @@ -94,8 +101,9 @@ def run(test, params, env):
except Exception as e:
test.error("Unexpected error: {}".format(e))
finally:
hugepage_size = utils_memory.get_huge_page_size()
for node_index, ori_page in ori_page_set.items():
process.run(
'echo {} > /sys/devices/system/node/node{}/hugepages/hugepages-2048kB/nr_hugepages'.
format(ori_page, node_index), shell=True)
'echo {} > /sys/devices/system/node/node{}/hugepages/hugepages-{}kB/nr_hugepages'.
format(ori_page, node_index, hugepage_size), shell=True)
backup_xml.sync()

0 comments on commit 4ed7e4a

Please sign in to comment.