@@ -106,25 +106,7 @@ def get_namespaces(self, force_run: bool = False) -> List[str]:
106106 namespaces_cli .append (matched_result .group ("namespace" ))
107107 return namespaces_cli
108108
109- def get_devices (self , force_run : bool = False ) -> Any :
110- # get nvme devices information ignoring stderror
111- nvme_list = self .run (
112- "list -o json 2>/dev/null" ,
113- shell = True ,
114- sudo = True ,
115- force_run = force_run ,
116- no_error_log = True ,
117- )
118- # NVMe list command returns empty string when no NVMe devices are found.
119- if not nvme_list .stdout :
120- raise LisaException (
121- "No NVMe devices found. "
122- "The 'nvme list' command returned an empty string."
123- )
124- nvme_devices = json .loads (nvme_list .stdout )
125- return nvme_devices ["Devices" ]
126-
127- def get_disks (self , force_run : bool = False ) -> List [str ]:
109+ def get_devices (self , force_run : bool = False ) -> Dict [str , int ]:
128110 """
129111 Return NVMe device nodes/paths (`/dev/...`) robustly across nvme-cli schemas.
130112
@@ -139,7 +121,7 @@ def get_disks(self, force_run: bool = False) -> List[str]:
139121 emit `DevicePath`. This logic supports both.
140122
141123 Returns:
142- List [str]: device nodes/ paths like `/dev/nvme0n1`
124+ Dict [str, int ]: Mapping of NVMe device paths to their namespace IDs.
143125 """
144126 # NVME namespace ids are unique for each disk under any NVME controller.
145127 # These are useful in detecting the lun id of the remote azure disk disks.
@@ -185,238 +167,102 @@ def get_disks(self, force_run: bool = False) -> List[str]:
185167 # /dev/nvme1n1 68e8d42a7ed4e5f90002 Microsoft NVMe Direct Disk v2 1 472.45 GB / 472.45 GB 512 B + 0 B NVMDV00 # noqa: E501
186168 # /dev/nvme2n1 68e8d42a7ed4e5f90001 Microsoft NVMe Direct Disk v2 1 472.45 GB / 472.45 GB 512 B + 0 B NVMDV00 # noqa: E501
187169 #
188- # Another example output of nvme -list -o json without DevicePath key
189- # cmd: ['sudo', 'sh', '-c', 'nvme list -o json 2>/dev/null']
190- # "Devices":[
191- # {
192- # "HostNQN":"nqn.2014-08.org.nvmexpress:uuid:ec2bfbbc-632e-0494-048e-31ebc97bd499",
193- # "HostID":"ec2bfbbc-632e-0494-048e-31ebc97bd499",
194- # "Subsystems":[
195- # {
196- # "Subsystem":"nvme-subsys0",
197- # "SubsystemNQN":"nqn.2014-08.org.nvmexpress:uuid:7ad35d50-c05b-47ab-b3a0-56a9a845852b",
198- # "Controllers":[
199- # {
200- # "Controller":"nvme0",
201- # "Cntlid":"0",
202- # "SerialNumber":"SN: 00000",
203- # "ModelNumber":"MSFT NVMe Accelerator v1.0",
204- # "Firmware":"v1.00000",
205- # "Transport":"pcie",
206- # "Address":"c05b:00:00.0",
207- # "Slot":"2060672336",
208- # "Namespaces":[
209- # {
210- # "NameSpace":"nvme0n1",
211- # "Generic":"ng0n1",
212- # "NSID":1,
213- # "UsedBytes":68719476736,
214- # "MaximumLBA":134217728,
215- # "PhysicalSize":68719476736,
216- # "SectorSize":512
217- # },
218- # {
219- # "NameSpace":"nvme0n10",
220- # "Generic":"ng0n10",
221- # "NSID":10,
222- # "UsedBytes":1099511627776,
223- # "MaximumLBA":2147483648,
224- # "PhysicalSize":1099511627776,
225- # "SectorSize":512
226- # },
227- # {
228- # "NameSpace":"nvme0n11",
229- # "Generic":"ng0n11",
230- # "NSID":11,
231- # "UsedBytes":1099511627776,
232- # "MaximumLBA":2147483648,
233- # "PhysicalSize":1099511627776,
234- # "SectorSize":512
235- # },
236- # {
237- # "NameSpace":"nvme0n12",
238- # "Generic":"ng0n12",
239- # "NSID":12,
240- # "UsedBytes":1099511627776,
241- # "MaximumLBA":2147483648,
242- # "PhysicalSize":1099511627776,
243- # "SectorSize":512
244- # },
245- # {
246- # "NameSpace":"nvme0n13",
247- # "Generic":"ng0n13",
248- # "NSID":13,
249- # "UsedBytes":1099511627776,
250- # "MaximumLBA":2147483648,
251- # "PhysicalSize":1099511627776,
252- # "SectorSize":512
253- # },
254- # {
255- # "NameSpace":"nvme0n14",
256- # "Generic":"ng0n14",
257- # "NSID":14,
258- # "UsedBytes":1099511627776,
259- # "MaximumLBA":2147483648,
260- # "PhysicalSize":1099511627776,
261- # "SectorSize":512
262- # },
263- # {
264- # "NameSpace":"nvme0n15",
265- # "Generic":"ng0n15",
266- # "NSID":15,
267- # "UsedBytes":1099511627776,
268- # "MaximumLBA":2147483648,
269- # "PhysicalSize":1099511627776,
270- # "SectorSize":512
271- # },
272- # {
273- # "NameSpace":"nvme0n16",
274- # "Generic":"ng0n16",
275- # "NSID":16,
276- # "UsedBytes":1099511627776,
277- # "MaximumLBA":2147483648,
278- # "PhysicalSize":1099511627776,
279- # "SectorSize":512
280- # },
281- # {
282- # "NameSpace":"nvme0n17",
283- # "Generic":"ng0n17",
284- # "NSID":17,
285- # "UsedBytes":1099511627776,
286- # "MaximumLBA":2147483648,
287- # "PhysicalSize":1099511627776,
288- # "SectorSize":512
289- # },
290- # {
291- # "NameSpace":"nvme0n2",
292- # "Generic":"ng0n2",
293- # "NSID":2,
294- # "UsedBytes":1099511627776,
295- # "MaximumLBA":2147483648,
296- # "PhysicalSize":1099511627776,
297- # "SectorSize":512
298- # },
299- # {
300- # "NameSpace":"nvme0n3",
301- # "Generic":"ng0n3",
302- # "NSID":3,
303- # "UsedBytes":1099511627776,
304- # "MaximumLBA":2147483648,
305- # "PhysicalSize":1099511627776,
306- # "SectorSize":512
307- # },
308- # {
309- # "NameSpace":"nvme0n4",
310- # "Generic":"ng0n4",
311- # "NSID":4,
312- # "UsedBytes":1099511627776,
313- # "MaximumLBA":2147483648,
314- # "PhysicalSize":1099511627776,
315- # "SectorSize":512
316- # },
317- # {
318- # "NameSpace":"nvme0n5",
319- # "Generic":"ng0n5",
320- # "NSID":5,
321- # "UsedBytes":1099511627776,
322- # "MaximumLBA":2147483648,
323- # "PhysicalSize":1099511627776,
324- # "SectorSize":512
325- # },
326- # {
327- # "NameSpace":"nvme0n6",
328- # "Generic":"ng0n6",
329- # "NSID":6,
330- # "UsedBytes":1099511627776,
331- # "MaximumLBA":2147483648,
332- # "PhysicalSize":1099511627776,
333- # "SectorSize":512
334- # },
335- # {
336- # "NameSpace":"nvme0n7",
337- # "Generic":"ng0n7",
338- # "NSID":7,
339- # "UsedBytes":1099511627776,
340- # "MaximumLBA":2147483648,
341- # "PhysicalSize":1099511627776,
342- # "SectorSize":512
343- # },
344- # {
345- # "NameSpace":"nvme0n8",
346- # "Generic":"ng0n8",
347- # "NSID":8,
348- # "UsedBytes":1099511627776,
349- # "MaximumLBA":2147483648,
350- # "PhysicalSize":1099511627776,
351- # "SectorSize":512
352- # },
353- # {
354- # "NameSpace":"nvme0n9",
355- # "Generic":"ng0n9",
356- # "NSID":9,
357- # "UsedBytes":1099511627776,
358- # "MaximumLBA":2147483648,
359- # "PhysicalSize":1099511627776,
360- # "SectorSize":512
361- # }
362- # ],
363- # "Paths":[
364- # ]
365- # }
366- # ],
367- # "Namespaces":[
368- # ]
369- # }
370- # ]
371- # }
372- # ]
373- # }
374- nvme_devices = self .get_devices (force_run = force_run ) # raw ["Devices"]
375- device_paths = []
376-
377- def _add (path : str ) -> None :
378- if isinstance (path , str ) and path .startswith ("/dev/" ) and len (path ) > 5 :
379- device_paths .append (path )
170+ # Another example output of nvme -list -o json without DevicePath key as this is the new schema with the newer version of nvme-cli:
171+ # root@lisa--170-e0-n0:/home/lisa# nvme -list
172+ # Node Generic SN Model Namespace Usage Format FW Rev
173+ # --------------------- --------------------- -------------------- ---------------------------------------- ---------- -------------------------- ---------------- --------
174+ # /dev/nvme0n1 /dev/ng0n1 SN: 00000 MSFT NVMe Accelerator v1.0 0x1 68.72 GB / 68.72 GB 512 B + 0 B v1.00000
175+ # root@lisa--170-e0-n0:/home/lisa# nvme list -o json 2>/dev/null
176+ # {
177+ # "Devices":[
178+ # {
179+ # "HostNQN":"nqn.2014-08.org.nvmexpress:uuid:ec2bfbbc-632e-0494-048e-31ebc97bd499",
180+ # "HostID":"ec2bfbbc-632e-0494-048e-31ebc97bd499",
181+ # "Subsystems":[
182+ # {
183+ # "Subsystem":"nvme-subsys0",
184+ # "SubsystemNQN":"nqn.2014-08.org.nvmexpress:uuid:7ad35d50-c05b-47ab-b3a0-56a9a845852b",
185+ # "Controllers":[
186+ # {
187+ # "Controller":"nvme0",
188+ # "Cntlid":"0",
189+ # "SerialNumber":"SN: 00000",
190+ # "ModelNumber":"MSFT NVMe Accelerator v1.0",
191+ # "Firmware":"v1.00000",
192+ # "Transport":"pcie",
193+ # "Address":"c05b:00:00.0",
194+ # "Slot":"2060672336",
195+ # "Namespaces":[
196+ # {
197+ # "NameSpace":"nvme0n1",
198+ # "Generic":"ng0n1",
199+ # "NSID":1,
200+ # "UsedBytes":68719476736,
201+ # "MaximumLBA":134217728,
202+ # "PhysicalSize":68719476736,
203+ # "SectorSize":512
204+ # }
205+ # ],
206+ # "Paths":[
207+ # ]
208+ # }
209+ # ],
210+ # "Namespaces":[
211+ # ]
212+ # }
213+ # ]
214+ # }
215+ # ]
216+ # }
217+ # get nvme devices information ignoring stderror
218+ nvme_list = self .run (
219+ "list -o json 2>/dev/null" ,
220+ shell = True ,
221+ sudo = True ,
222+ force_run = force_run ,
223+ no_error_log = True ,
224+ )
225+ # NVMe list command returns empty string when no NVMe devices are found.
226+ if not nvme_list .stdout :
227+ raise LisaException (
228+ "No NVMe devices found. "
229+ "The 'nvme list' command returned an empty string."
230+ )
231+ nvme_devices = json .loads (nvme_list .stdout )
232+ device_paths_namespace_ids = {}
233+
234+ def _add (device_path : str , namespace_id : int ) -> None :
235+ if isinstance (device_path , str ) and device_path .startswith ("/dev/" ) and len (device_path ) > 5 :
236+ device_paths_namespace_ids [device_path ] = namespace_id
380237
381238 for nvme_device in nvme_devices or []:
382239 # Legacy schema (flat fields):
383- _add (nvme_device .get ("DevicePath" ))
384- # _add(nvme_device.get("GenericPath"))
240+ _add (nvme_device .get ("DevicePath" ), int (nvme_device .get ("NameSpace" )))
385241
386242 # New schema: Subsystems → Controllers → Namespaces
387243 for subsystem in nvme_device .get ("Subsystems" ) or []:
388244 for controller in (subsystem or {}).get ("Controllers" ) or []:
389245 for namespace in (controller or {}).get ("Namespaces" ) or []:
390- namespace_name = namespace .get ("NameSpace" ) # e.g., "nvme0n1"
391- # generic_name = namespace.get("Generic ") # e.g., "ng0n1"
246+ namespace_name = int ( namespace .get ("NameSpace" ) ) # e.g., "nvme0n1"
247+ namespace_id = namespace .get ("NSID " ) # e.g., 1, 2, ...
392248 if isinstance (namespace_name , str ) and namespace_name :
393- _add (f"/dev/{ namespace_name } " )
394- # if isinstance(generic_name, str) and generic_name:
395- # _add(f"/dev/{generic_name}")
249+ _add (f"/dev/{ namespace_name } " , namespace_id )
396250
397- device_paths = sorted (set (device_paths ))
398- if not device_paths :
251+ if not device_paths_namespace_ids :
399252 raise LisaException (
400253 "No NVMe device nodes could be derived from 'nvme list -o json'."
401254 )
255+ return device_paths_namespace_ids
256+
257+ def get_disks (
258+ self , force_run : bool = False
259+ ) -> List [str ]:
260+ device_paths = sorted (self .get_devices (force_run = force_run ).keys ())
402261 return device_paths
403262
404263 def get_namespace_ids (self , force_run : bool = False ) -> List [Dict [str , int ]]:
405- nvme_devices = self .get_devices (force_run = force_run )
406- # Older versions of nvme-cli do not have the NameSpace key in the output
407- # skip the test if NameSpace key is not available
408- if not nvme_devices :
409- raise LisaException ("No NVMe devices found. Unable to get namespace ids." )
410- if "NameSpace" not in nvme_devices [0 ]:
411- raise LisaException (
412- "The version of nvme-cli is too old,"
413- " it doesn't support to get namespace ids."
414- )
415-
416- return [
417- {device ["DevicePath" ]: int (device ["NameSpace" ])} for device in nvme_devices
418- ]
419-
264+ device_paths_namespace_ids_map = self .get_devices (force_run = force_run )
265+ return [{path : nsid } for path , nsid in device_paths_namespace_ids_map .items ()]
420266
421267class BSDNvmecli (Nvmecli ):
422268 # nvme0ns1 (1831420MB)
0 commit comments