Skip to content

Reference for ultralytics/utils/autodevice.py

Note

This file is available at https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/autodevice.py. If you spot a problem please help fix it by contributing a Pull Request 🛠️. Thank you 🙏!


ultralytics.utils.autodevice.GPUInfo

GPUInfo()

Manages NVIDIA GPU information via pynvml with robust error handling.

Provides methods to query detailed GPU statistics (utilization, memory, temp, power) and select the most idle GPUs based on configurable criteria. It safely handles the absence or initialization failure of the pynvml library by logging warnings and disabling related features, preventing application crashes.

Includes fallback logic using torch.cuda for basic device counting if NVML is unavailable during GPU selection. Manages NVML initialization and shutdown internally.

Attributes:

Name Type Description
pynvml module | None

The pynvml module if successfully imported and initialized, otherwise None.

nvml_available bool

Indicates if pynvml is ready for use. True if import and nvmlInit() succeeded, False otherwise.

gpu_stats List[Dict[str, Any]]

A list of dictionaries, each holding stats for one GPU. Populated on initialization and by refresh_stats(). Keys include: 'index', 'name', 'utilization' (%), 'memory_used' (MiB), 'memory_total' (MiB), 'memory_free' (MiB), 'temperature' (C), 'power_draw' (W), 'power_limit' (W or 'N/A'). Empty if NVML is unavailable or queries fail.

Methods:

Name Description
refresh_stats

Refresh the internal gpu_stats list by querying NVML.

print_status

Print GPU status in a compact table format using current stats.

select_idle_gpu

Select the most idle GPUs based on utilization and free memory.

shutdown

Shut down NVML if it was initialized.

Examples:

Initialize GPUInfo and print status

>>> gpu_info = GPUInfo()
>>> gpu_info.print_status()

Select idle GPUs with minimum memory requirements

>>> selected = gpu_info.select_idle_gpu(count=2, min_memory_fraction=0.2)
>>> print(f"Selected GPU indices: {selected}")
Source code in ultralytics/utils/autodevice.py
45
46
47
48
49
50
51
52
53
54
55
56
57
58
def __init__(self):
    """Initialize GPUInfo, attempting to import and initialize pynvml."""
    self.pynvml: Optional[Any] = None
    self.nvml_available: bool = False
    self.gpu_stats: List[Dict[str, Any]] = []

    try:
        check_requirements("pynvml>=12.0.0")
        self.pynvml = __import__("pynvml")
        self.pynvml.nvmlInit()
        self.nvml_available = True
        self.refresh_stats()
    except Exception as e:
        LOGGER.warning(f"Failed to initialize pynvml, GPU stats disabled: {e}")

__del__

__del__()

Ensure NVML is shut down when the object is garbage collected.

Source code in ultralytics/utils/autodevice.py
60
61
62
def __del__(self):
    """Ensure NVML is shut down when the object is garbage collected."""
    self.shutdown()

print_status

print_status()

Print GPU status in a compact table format using current stats.

Source code in ultralytics/utils/autodevice.py
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
def print_status(self):
    """Print GPU status in a compact table format using current stats."""
    self.refresh_stats()
    if not self.gpu_stats:
        LOGGER.warning("No GPU stats available.")
        return

    stats = self.gpu_stats
    name_len = max(len(gpu.get("name", "N/A")) for gpu in stats)
    hdr = f"{'Idx':<3} {'Name':<{name_len}} {'Util':>6} {'Mem (MiB)':>15} {'Temp':>5} {'Pwr (W)':>10}"
    LOGGER.info(f"\n--- GPU Status ---\n{hdr}\n{'-' * len(hdr)}")

    for gpu in stats:
        u = f"{gpu['utilization']:>5}%" if gpu["utilization"] >= 0 else " N/A "
        m = f"{gpu['memory_used']:>6}/{gpu['memory_total']:<6}" if gpu["memory_used"] >= 0 else " N/A / N/A "
        t = f"{gpu['temperature']}C" if gpu["temperature"] >= 0 else " N/A "
        p = f"{gpu['power_draw']:>3}/{gpu['power_limit']:<3}" if gpu["power_draw"] >= 0 else " N/A "

        LOGGER.info(f"{gpu.get('index'):<3d} {gpu.get('name', 'N/A'):<{name_len}} {u:>6} {m:>15} {t:>5} {p:>10}")

    LOGGER.info(f"{'-' * len(hdr)}\n")

refresh_stats

refresh_stats()

Refresh the internal gpu_stats list by querying NVML.

Source code in ultralytics/utils/autodevice.py
73
74
75
76
77
78
79
80
81
82
83
84
85
def refresh_stats(self):
    """Refresh the internal gpu_stats list by querying NVML."""
    self.gpu_stats = []
    if not self.nvml_available or not self.pynvml:
        return

    try:
        device_count = self.pynvml.nvmlDeviceGetCount()
        for i in range(device_count):
            self.gpu_stats.append(self._get_device_stats(i))
    except Exception as e:
        LOGGER.warning(f"Error during device query: {e}")
        self.gpu_stats = []

select_idle_gpu

select_idle_gpu(
    count: int = 1, min_memory_fraction: float = 0, min_util_fraction: float = 0
) -> List[int]

Select the most idle GPUs based on utilization and free memory.

Parameters:

Name Type Description Default
count int

The number of idle GPUs to select.

1
min_memory_fraction float

Minimum free memory required as a fraction of total memory.

0
min_util_fraction float

Minimum free utilization rate required from 0.0 - 1.0.

0

Returns:

Type Description
List[int]

Indices of the selected GPUs, sorted by idleness (lowest utilization first).

Notes

Returns fewer than 'count' if not enough qualify or exist. Returns basic CUDA indices if NVML fails. Empty list if no GPUs found.

Source code in ultralytics/utils/autodevice.py
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
def select_idle_gpu(
    self, count: int = 1, min_memory_fraction: float = 0, min_util_fraction: float = 0
) -> List[int]:
    """
    Select the most idle GPUs based on utilization and free memory.

    Args:
        count (int): The number of idle GPUs to select.
        min_memory_fraction (float): Minimum free memory required as a fraction of total memory.
        min_util_fraction (float): Minimum free utilization rate required from 0.0 - 1.0.

    Returns:
        (List[int]): Indices of the selected GPUs, sorted by idleness (lowest utilization first).

    Notes:
         Returns fewer than 'count' if not enough qualify or exist.
         Returns basic CUDA indices if NVML fails. Empty list if no GPUs found.
    """
    assert min_memory_fraction <= 1.0, f"min_memory_fraction must be <= 1.0, got {min_memory_fraction}"
    assert min_util_fraction <= 1.0, f"min_util_fraction must be <= 1.0, got {min_util_fraction}"
    LOGGER.info(
        f"Searching for {count} idle GPUs with free memory >= {min_memory_fraction * 100:.1f}% and free utilization >= {min_util_fraction * 100:.1f}%..."
    )

    if count <= 0:
        return []

    self.refresh_stats()
    if not self.gpu_stats:
        LOGGER.warning("NVML stats unavailable.")
        return []

    # Filter and sort eligible GPUs
    eligible_gpus = [
        gpu
        for gpu in self.gpu_stats
        if gpu.get("memory_free", 0) / gpu.get("memory_total", 1) >= min_memory_fraction
        and (100 - gpu.get("utilization", 100)) >= min_util_fraction * 100
    ]
    eligible_gpus.sort(key=lambda x: (x.get("utilization", 101), -x.get("memory_free", 0)))

    # Select top 'count' indices
    selected = [gpu["index"] for gpu in eligible_gpus[:count]]

    if selected:
        LOGGER.info(f"Selected idle CUDA devices {selected}")
    else:
        LOGGER.warning(
            f"No GPUs met criteria (Free Mem >= {min_memory_fraction * 100:.1f}% and Free Util >= {min_util_fraction * 100:.1f}%)."
        )

    return selected

shutdown

shutdown()

Shut down NVML if it was initialized.

Source code in ultralytics/utils/autodevice.py
64
65
66
67
68
69
70
71
def shutdown(self):
    """Shut down NVML if it was initialized."""
    if self.nvml_available and self.pynvml:
        try:
            self.pynvml.nvmlShutdown()
        except Exception:
            pass
        self.nvml_available = False





📅 Created 1 month ago ✏️ Updated 1 month ago