|
3 | 3 | use bit_field::BitField;
|
4 | 4 |
|
5 | 5 | use crate::{
|
6 |
| - structures::paging::{page::NotGiantPageSize, Page, PageSize, Size2MiB}, |
7 |
| - VirtAddr, |
| 6 | + instructions::segmentation::{Segment, CS}, |
| 7 | + structures::paging::{ |
| 8 | + page::{NotGiantPageSize, PageRange}, |
| 9 | + Page, PageSize, Size2MiB, Size4KiB, |
| 10 | + }, |
| 11 | + PrivilegeLevel, VirtAddr, |
8 | 12 | };
|
9 |
| -use core::arch::asm; |
| 13 | +use core::{arch::asm, cmp, convert::TryFrom}; |
10 | 14 |
|
11 | 15 | /// Invalidate the given address in the TLB using the `invlpg` instruction.
|
12 | 16 | #[inline]
|
@@ -103,14 +107,255 @@ pub unsafe fn flush_pcid(command: InvPicdCommand) {
|
103 | 107 | }
|
104 | 108 | }
|
105 | 109 |
|
106 |
| -/// Invalidates TLB entry(s) with Broadcast. |
| 110 | +/// Used to broadcast flushes to all logical processors. |
107 | 111 | ///
|
108 |
| -/// # Safety |
| 112 | +/// ```no_run |
| 113 | +/// use x86_64::VirtAddr; |
| 114 | +/// use x86_64::structures::paging::Page; |
| 115 | +/// use x86_64::instructions::tlb::Invlpgb; |
109 | 116 | ///
|
110 |
| -/// This function is unsafe as it requires CPUID.(EAX=8000_0008H, ECX=0H):EBX.INVLPGB |
111 |
| -/// to be 1 and count to be less than or equal to CPUID.(EAX=8000_0008H, ECX=0H):EDX[0..=15]. |
| 117 | +/// // Check that `invlpgb` and `tlbsync` are supported. |
| 118 | +/// let invlpgb = Invlpgb::new().unwrap(); |
| 119 | +/// |
| 120 | +/// // Broadcast flushing some pages to all logical processors. |
| 121 | +/// let start: Page = Page::from_start_address(VirtAddr::new(0xf000_0000)).unwrap(); |
| 122 | +/// let pages = Page::range(start, start + 3); |
| 123 | +/// invlpgb.build().pages(pages).include_global().flush(); |
| 124 | +/// |
| 125 | +/// // Wait for all logical processors to respond. |
| 126 | +/// invlpgb.tlbsync(); |
| 127 | +/// ``` |
| 128 | +#[derive(Debug, Clone, Copy)] |
| 129 | +pub struct Invlpgb { |
| 130 | + invlpgb_count_max: u16, |
| 131 | + tlb_flush_nested: bool, |
| 132 | + nasid: u32, |
| 133 | +} |
| 134 | + |
| 135 | +impl Invlpgb { |
| 136 | + /// Check that `invlpgb` and `tlbsync` are supported and query limits. |
| 137 | + /// |
| 138 | + /// # Panics |
| 139 | + /// |
| 140 | + /// Panics if the CPL is not 0. |
| 141 | + pub fn new() -> Option<Self> { |
| 142 | + let cs = CS::get_reg(); |
| 143 | + assert_eq!(cs.rpl(), PrivilegeLevel::Ring0); |
| 144 | + |
| 145 | + // Check if the `INVLPGB` and `TLBSYNC` instruction are supported. |
| 146 | + let cpuid = unsafe { core::arch::x86_64::__cpuid(0x8000_0008) }; |
| 147 | + if !cpuid.ebx.get_bit(3) { |
| 148 | + return None; |
| 149 | + } |
| 150 | + |
| 151 | + let tlb_flush_nested = cpuid.ebx.get_bit(21); |
| 152 | + let invlpgb_count_max = cpuid.edx.get_bits(0..=15) as u16; |
| 153 | + |
| 154 | + // Figure out the number of supported ASIDs. |
| 155 | + let cpuid = unsafe { core::arch::x86_64::__cpuid(0x8000_000a) }; |
| 156 | + let nasid = cpuid.ebx; |
| 157 | + |
| 158 | + Some(Self { |
| 159 | + tlb_flush_nested, |
| 160 | + invlpgb_count_max, |
| 161 | + nasid, |
| 162 | + }) |
| 163 | + } |
| 164 | + |
| 165 | + /// Returns the maximum count of pages to be flushed supported by the processor. |
| 166 | + #[inline] |
| 167 | + pub fn invlpgb_count_max(&self) -> u16 { |
| 168 | + self.invlpgb_count_max |
| 169 | + } |
| 170 | + |
| 171 | + /// Returns whether the processor supports flushing translations used for guest translation. |
| 172 | + #[inline] |
| 173 | + pub fn tlb_flush_nested(&self) -> bool { |
| 174 | + self.tlb_flush_nested |
| 175 | + } |
| 176 | + |
| 177 | + /// Returns the number of available address space identifiers. |
| 178 | + #[inline] |
| 179 | + pub fn nasid(&self) -> u32 { |
| 180 | + self.nasid |
| 181 | + } |
| 182 | + |
| 183 | + /// Create a `InvlpgbFlushBuilder`. |
| 184 | + pub fn build(&self) -> InvlpgbFlushBuilder<'_> { |
| 185 | + InvlpgbFlushBuilder { |
| 186 | + invlpgb: self, |
| 187 | + page_range: None, |
| 188 | + pcid: None, |
| 189 | + asid: None, |
| 190 | + include_global: false, |
| 191 | + final_translation_only: false, |
| 192 | + include_nested_translations: false, |
| 193 | + } |
| 194 | + } |
| 195 | + |
| 196 | + /// Wait for all previous `invlpgb` instruction executed on the current |
| 197 | + /// logical processor to be acknowledged by all other logical processors. |
| 198 | + #[inline] |
| 199 | + pub fn tlbsync(&self) { |
| 200 | + unsafe { |
| 201 | + asm!("tlbsync", options(nomem, preserves_flags)); |
| 202 | + } |
| 203 | + } |
| 204 | +} |
| 205 | + |
| 206 | +/// A builder struct to construct the parameters for the `invlpgb` instruction. |
| 207 | +#[derive(Debug, Clone)] |
| 208 | +#[must_use] |
| 209 | +pub struct InvlpgbFlushBuilder<'a, S = Size4KiB> |
| 210 | +where |
| 211 | + S: NotGiantPageSize, |
| 212 | +{ |
| 213 | + invlpgb: &'a Invlpgb, |
| 214 | + page_range: Option<PageRange<S>>, |
| 215 | + pcid: Option<Pcid>, |
| 216 | + asid: Option<u16>, |
| 217 | + include_global: bool, |
| 218 | + final_translation_only: bool, |
| 219 | + include_nested_translations: bool, |
| 220 | +} |
| 221 | + |
| 222 | +impl<'a, S> InvlpgbFlushBuilder<'a, S> |
| 223 | +where |
| 224 | + S: NotGiantPageSize, |
| 225 | +{ |
| 226 | + /// Flush a range of pages. |
| 227 | + /// |
| 228 | + /// If the range doesn't fit within `invlpgb_count_max`, `invlpgb` is |
| 229 | + /// executed multiple times. |
| 230 | + pub fn pages<T>(self, page_range: PageRange<T>) -> InvlpgbFlushBuilder<'a, T> |
| 231 | + where |
| 232 | + T: NotGiantPageSize, |
| 233 | + { |
| 234 | + InvlpgbFlushBuilder { |
| 235 | + invlpgb: self.invlpgb, |
| 236 | + page_range: Some(page_range), |
| 237 | + pcid: self.pcid, |
| 238 | + asid: self.asid, |
| 239 | + include_global: self.include_global, |
| 240 | + final_translation_only: self.final_translation_only, |
| 241 | + include_nested_translations: self.include_nested_translations, |
| 242 | + } |
| 243 | + } |
| 244 | + |
| 245 | + /// Only flush TLB entries with the given PCID. |
| 246 | + /// |
| 247 | + /// # Safety |
| 248 | + /// |
| 249 | + /// The caller has to ensure that PCID is enabled in CR4 when the flush is executed. |
| 250 | + pub unsafe fn pcid(mut self, pcid: Pcid) -> Self { |
| 251 | + self.pcid = Some(pcid); |
| 252 | + self |
| 253 | + } |
| 254 | + |
| 255 | + /// Only flush TLB entries with the given ASID. |
| 256 | + /// |
| 257 | + /// # Safety |
| 258 | + /// |
| 259 | + /// The caller has to ensure that SVM is enabled in EFER when the flush is executed. |
| 260 | + // FIXME: Make ASID a type and remove error type. |
| 261 | + pub unsafe fn asid(mut self, asid: u16) -> Result<Self, AsidOutOfRangeError> { |
| 262 | + if u32::from(asid) > self.invlpgb.nasid { |
| 263 | + return Err(AsidOutOfRangeError { |
| 264 | + asid, |
| 265 | + nasid: self.invlpgb.nasid, |
| 266 | + }); |
| 267 | + } |
| 268 | + |
| 269 | + self.asid = Some(asid); |
| 270 | + Ok(self) |
| 271 | + } |
| 272 | + |
| 273 | + /// Also flush global pages. |
| 274 | + pub fn include_global(mut self) -> Self { |
| 275 | + self.include_global = true; |
| 276 | + self |
| 277 | + } |
| 278 | + |
| 279 | + /// Only flush the final translation and not the cached upper level TLB entries. |
| 280 | + pub fn final_translation_only(mut self) -> Self { |
| 281 | + self.final_translation_only = true; |
| 282 | + self |
| 283 | + } |
| 284 | + |
| 285 | + /// Also flush nestred translations that could be used for guest translation. |
| 286 | + pub fn include_nested_translations(mut self) -> Self { |
| 287 | + assert!( |
| 288 | + self.invlpgb.tlb_flush_nested, |
| 289 | + "flushing all nested translations is not supported" |
| 290 | + ); |
| 291 | + |
| 292 | + self.include_nested_translations = true; |
| 293 | + self |
| 294 | + } |
| 295 | + |
| 296 | + /// Execute the flush. |
| 297 | + pub fn flush(self) { |
| 298 | + if let Some(mut pages) = self.page_range { |
| 299 | + while !pages.is_empty() { |
| 300 | + // Calculate out how many pages we still need to flush. |
| 301 | + let count = Page::<S>::steps_between(&pages.start, &pages.end).unwrap(); |
| 302 | + |
| 303 | + // Make sure that we never jump the gap in the address space when flushing. |
| 304 | + let second_half_start = |
| 305 | + Page::<S>::containing_address(VirtAddr::new(0xffff_8000_0000_0000)); |
| 306 | + let count = if pages.start < second_half_start { |
| 307 | + let count_to_second_half = |
| 308 | + Page::steps_between(&pages.start, &second_half_start).unwrap(); |
| 309 | + cmp::min(count, count_to_second_half) |
| 310 | + } else { |
| 311 | + count |
| 312 | + }; |
| 313 | + |
| 314 | + // We can flush at most u16::MAX pages at once. |
| 315 | + let count = u16::try_from(count).unwrap_or(u16::MAX); |
| 316 | + |
| 317 | + // Cap the count by the maximum supported count of the processor. |
| 318 | + let count = cmp::min(count, self.invlpgb.invlpgb_count_max); |
| 319 | + |
| 320 | + unsafe { |
| 321 | + flush_broadcast( |
| 322 | + Some((pages.start, count)), |
| 323 | + self.pcid, |
| 324 | + self.asid, |
| 325 | + self.include_global, |
| 326 | + self.final_translation_only, |
| 327 | + self.include_nested_translations, |
| 328 | + ); |
| 329 | + } |
| 330 | + |
| 331 | + // Even if the count is zero, one page is still flushed and so |
| 332 | + // we need to advance by at least one. |
| 333 | + let inc_count = cmp::max(count, 1); |
| 334 | + pages.start = Page::forward_checked(pages.start, usize::from(inc_count)).unwrap(); |
| 335 | + } |
| 336 | + } else { |
| 337 | + unsafe { |
| 338 | + flush_broadcast::<S>( |
| 339 | + None, |
| 340 | + self.pcid, |
| 341 | + self.asid, |
| 342 | + self.include_global, |
| 343 | + self.final_translation_only, |
| 344 | + self.include_nested_translations, |
| 345 | + ); |
| 346 | + } |
| 347 | + } |
| 348 | + } |
| 349 | +} |
| 350 | + |
| 351 | +#[derive(Debug)] |
| 352 | +pub struct AsidOutOfRangeError { |
| 353 | + asid: u16, |
| 354 | + nasid: u32, |
| 355 | +} |
| 356 | + |
112 | 357 | #[inline]
|
113 |
| -pub unsafe fn flush_broadcast<S>( |
| 358 | +unsafe fn flush_broadcast<S>( |
114 | 359 | va_and_count: Option<(Page<S>, u16)>,
|
115 | 360 | pcid: Option<Pcid>,
|
116 | 361 | asid: Option<u16>,
|
@@ -156,15 +401,3 @@ pub unsafe fn flush_broadcast<S>(
|
156 | 401 | );
|
157 | 402 | }
|
158 | 403 | }
|
159 |
| - |
160 |
| -/// Synchronize broadcasted TLB Invalidations. |
161 |
| -/// |
162 |
| -/// # Safety |
163 |
| -/// |
164 |
| -/// This function is unsafe as it requires CPUID.(EAX=8000_0008H, ECX=0H):EBX.INVLPGB to be 1. |
165 |
| -#[inline] |
166 |
| -pub unsafe fn tlbsync() { |
167 |
| - unsafe { |
168 |
| - asm!("tlbsync", options(nomem, preserves_flags)); |
169 |
| - } |
170 |
| -} |
|
0 commit comments