Show More
@@ -1,849 +1,879 | |||
|
1 | 1 | # dirstatemap.py |
|
2 | 2 | # |
|
3 | 3 | # This software may be used and distributed according to the terms of the |
|
4 | 4 | # GNU General Public License version 2 or any later version. |
|
5 | 5 | |
|
6 | 6 | |
|
7 | from typing import ( | |
|
8 | Optional, | |
|
9 | TYPE_CHECKING, | |
|
10 | ) | |
|
11 | ||
|
7 | 12 | from .i18n import _ |
|
8 | 13 | |
|
9 | 14 | from . import ( |
|
10 | 15 | error, |
|
11 | 16 | pathutil, |
|
12 | 17 | policy, |
|
13 | 18 | testing, |
|
14 | 19 | txnutil, |
|
20 | typelib, | |
|
15 | 21 | util, |
|
16 | 22 | ) |
|
17 | 23 | |
|
18 | 24 | from .dirstateutils import ( |
|
19 | 25 | docket as docketmod, |
|
20 | 26 | v2, |
|
21 | 27 | ) |
|
22 | 28 | |
|
29 | if TYPE_CHECKING: | |
|
30 | from . import ( | |
|
31 | ui as uimod, | |
|
32 | ) | |
|
33 | ||
|
23 | 34 | parsers = policy.importmod('parsers') |
|
24 | 35 | rustmod = policy.importrust('dirstate') |
|
25 | 36 | |
|
26 | 37 | propertycache = util.propertycache |
|
27 | 38 | |
|
28 | 39 | if rustmod is None: |
|
29 | 40 | DirstateItem = parsers.DirstateItem |
|
30 | 41 | else: |
|
31 | 42 | DirstateItem = rustmod.DirstateItem |
|
32 | 43 | |
|
33 | 44 | rangemask = 0x7FFFFFFF |
|
34 | 45 | |
|
35 | 46 | WRITE_MODE_AUTO = 0 |
|
36 | 47 | WRITE_MODE_FORCE_NEW = 1 |
|
37 | 48 | WRITE_MODE_FORCE_APPEND = 2 |
|
38 | 49 | |
|
39 | 50 | |
|
40 | 51 | V2_MAX_READ_ATTEMPTS = 5 |
|
41 | 52 | |
|
42 | 53 | |
|
43 | 54 | class _dirstatemapcommon: |
|
44 | 55 | """ |
|
45 | 56 | Methods that are identical for both implementations of the dirstatemap |
|
46 | 57 | class, with and without Rust extensions enabled. |
|
47 | 58 | """ |
|
48 | 59 | |
|
60 | _use_dirstate_v2: bool | |
|
61 | _nodeconstants: typelib.NodeConstants | |
|
62 | _ui: "uimod.ui" | |
|
63 | _root: bytes | |
|
64 | _filename: bytes | |
|
65 | _nodelen: int | |
|
66 | _dirtyparents: bool | |
|
67 | _docket: Optional["docketmod.DirstateDocket"] | |
|
68 | _write_mode: int | |
|
69 | _pendingmode: Optional[bool] | |
|
70 | identity: Optional[typelib.CacheStat] | |
|
71 | ||
|
49 | 72 | # please pytype |
|
50 | 73 | |
|
51 | 74 | _map = None |
|
52 | 75 | copymap = None |
|
53 | 76 | |
|
54 | def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2): | |
|
77 | def __init__( | |
|
78 | self, | |
|
79 | ui: "uimod.ui", | |
|
80 | opener, | |
|
81 | root: bytes, | |
|
82 | nodeconstants: typelib.NodeConstants, | |
|
83 | use_dirstate_v2: bool, | |
|
84 | ) -> None: | |
|
55 | 85 | self._use_dirstate_v2 = use_dirstate_v2 |
|
56 | 86 | self._nodeconstants = nodeconstants |
|
57 | 87 | self._ui = ui |
|
58 | 88 | self._opener = opener |
|
59 | 89 | self._root = root |
|
60 | 90 | self._filename = b'dirstate' |
|
61 | 91 | self._nodelen = 20 # Also update Rust code when changing this! |
|
62 | 92 | self._parents = None |
|
63 | 93 | self._dirtyparents = False |
|
64 | 94 | self._docket = None |
|
65 | 95 | write_mode = ui.config(b"devel", b"dirstate.v2.data_update_mode") |
|
66 | 96 | if write_mode == b"auto": |
|
67 | 97 | self._write_mode = WRITE_MODE_AUTO |
|
68 | 98 | elif write_mode == b"force-append": |
|
69 | 99 | self._write_mode = WRITE_MODE_FORCE_APPEND |
|
70 | 100 | elif write_mode == b"force-new": |
|
71 | 101 | self._write_mode = WRITE_MODE_FORCE_NEW |
|
72 | 102 | else: |
|
73 | 103 | # unknown value, fallback to default |
|
74 | 104 | self._write_mode = WRITE_MODE_AUTO |
|
75 | 105 | |
|
76 | 106 | # for consistent view between _pl() and _read() invocations |
|
77 | 107 | self._pendingmode = None |
|
78 | 108 | |
|
79 | def _set_identity(self): | |
|
109 | def _set_identity(self) -> None: | |
|
80 | 110 | self.identity = self._get_current_identity() |
|
81 | 111 | |
|
82 | def _get_current_identity(self): | |
|
112 | def _get_current_identity(self) -> Optional[typelib.CacheStat]: | |
|
83 | 113 | try: |
|
84 | 114 | return util.cachestat(self._opener.join(self._filename)) |
|
85 | 115 | except FileNotFoundError: |
|
86 | 116 | return None |
|
87 | 117 | |
|
88 | def may_need_refresh(self): | |
|
118 | def may_need_refresh(self) -> bool: | |
|
89 | 119 | if 'identity' not in vars(self): |
|
90 | 120 | # no existing identity, we need a refresh |
|
91 | 121 | return True |
|
92 | 122 | if self.identity is None: |
|
93 | 123 | return True |
|
94 | 124 | if not self.identity.cacheable(): |
|
95 | 125 | # We cannot trust the entry |
|
96 | 126 | # XXX this is a problem on windows, NFS, or other inode less system |
|
97 | 127 | return True |
|
98 | 128 | current_identity = self._get_current_identity() |
|
99 | 129 | if current_identity is None: |
|
100 | 130 | return True |
|
101 | 131 | if not current_identity.cacheable(): |
|
102 | 132 | # We cannot trust the entry |
|
103 | 133 | # XXX this is a problem on windows, NFS, or other inode less system |
|
104 | 134 | return True |
|
105 | 135 | return current_identity != self.identity |
|
106 | 136 | |
|
107 | def preload(self): | |
|
137 | def preload(self) -> None: | |
|
108 | 138 | """Loads the underlying data, if it's not already loaded""" |
|
109 | 139 | self._map |
|
110 | 140 | |
|
111 | 141 | def get(self, key, default=None): |
|
112 | 142 | return self._map.get(key, default) |
|
113 | 143 | |
|
114 | 144 | def __len__(self): |
|
115 | 145 | return len(self._map) |
|
116 | 146 | |
|
117 | 147 | def __iter__(self): |
|
118 | 148 | return iter(self._map) |
|
119 | 149 | |
|
120 | 150 | def __contains__(self, key): |
|
121 | 151 | return key in self._map |
|
122 | 152 | |
|
123 | 153 | def __getitem__(self, item): |
|
124 | 154 | return self._map[item] |
|
125 | 155 | |
|
126 | 156 | ### disk interaction |
|
127 | 157 | |
|
128 | 158 | def _opendirstatefile(self): |
|
129 | 159 | fp, mode = txnutil.trypending(self._root, self._opener, self._filename) |
|
130 | 160 | if self._pendingmode is not None and self._pendingmode != mode: |
|
131 | 161 | fp.close() |
|
132 | 162 | raise error.Abort( |
|
133 | 163 | _(b'working directory state may be changed parallelly') |
|
134 | 164 | ) |
|
135 | 165 | self._pendingmode = mode |
|
136 | 166 | return fp |
|
137 | 167 | |
|
138 | def _readdirstatefile(self, size=-1): | |
|
168 | def _readdirstatefile(self, size: int = -1) -> bytes: | |
|
139 | 169 | try: |
|
140 | 170 | with self._opendirstatefile() as fp: |
|
141 | 171 | return fp.read(size) |
|
142 | 172 | except FileNotFoundError: |
|
143 | 173 | # File doesn't exist, so the current state is empty |
|
144 | 174 | return b'' |
|
145 | 175 | |
|
146 | 176 | @property |
|
147 | def docket(self): | |
|
177 | def docket(self) -> "docketmod.DirstateDocket": | |
|
148 | 178 | if not self._docket: |
|
149 | 179 | if not self._use_dirstate_v2: |
|
150 | 180 | raise error.ProgrammingError( |
|
151 | 181 | b'dirstate only has a docket in v2 format' |
|
152 | 182 | ) |
|
153 | 183 | self._set_identity() |
|
154 | 184 | data = self._readdirstatefile() |
|
155 | 185 | if data == b'' or data.startswith(docketmod.V2_FORMAT_MARKER): |
|
156 | 186 | self._docket = docketmod.DirstateDocket.parse( |
|
157 | 187 | data, self._nodeconstants |
|
158 | 188 | ) |
|
159 | 189 | else: |
|
160 | 190 | raise error.CorruptedDirstate(b"dirstate is not in v2 format") |
|
161 | 191 | return self._docket |
|
162 | 192 | |
|
163 | 193 | def _read_v2_data(self): |
|
164 | 194 | data = None |
|
165 | 195 | attempts = 0 |
|
166 | 196 | while attempts < V2_MAX_READ_ATTEMPTS: |
|
167 | 197 | attempts += 1 |
|
168 | 198 | try: |
|
169 | 199 | # TODO: use mmap when possible |
|
170 | 200 | data = self._opener.read(self.docket.data_filename()) |
|
171 | 201 | except FileNotFoundError: |
|
172 | 202 | # read race detected between docket and data file |
|
173 | 203 | # reload the docket and retry |
|
174 | 204 | self._docket = None |
|
175 | 205 | if data is None: |
|
176 | 206 | assert attempts >= V2_MAX_READ_ATTEMPTS |
|
177 | 207 | msg = b"dirstate read race happened %d times in a row" |
|
178 | 208 | msg %= attempts |
|
179 | 209 | raise error.Abort(msg) |
|
180 | 210 | return self._opener.read(self.docket.data_filename()) |
|
181 | 211 | |
|
182 | 212 | def write_v2_no_append(self, tr, st, meta, packed): |
|
183 | 213 | try: |
|
184 | 214 | old_docket = self.docket |
|
185 | 215 | except error.CorruptedDirstate: |
|
186 | 216 | # This means we've identified a dirstate-v1 file on-disk when we |
|
187 | 217 | # were expecting a dirstate-v2 docket. We've managed to recover |
|
188 | 218 | # from that unexpected situation, and now we want to write back a |
|
189 | 219 | # dirstate-v2 file to make the on-disk situation right again. |
|
190 | 220 | # |
|
191 | 221 | # This shouldn't be triggered since `self.docket` is cached and |
|
192 | 222 | # we would have called parents() or read() first, but it's here |
|
193 | 223 | # just in case. |
|
194 | 224 | old_docket = None |
|
195 | 225 | |
|
196 | 226 | new_docket = docketmod.DirstateDocket.with_new_uuid( |
|
197 | 227 | self.parents(), len(packed), meta |
|
198 | 228 | ) |
|
199 | 229 | if old_docket is not None and old_docket.uuid == new_docket.uuid: |
|
200 | 230 | raise error.ProgrammingError(b'dirstate docket name collision') |
|
201 | 231 | data_filename = new_docket.data_filename() |
|
202 | 232 | self._opener.write(data_filename, packed) |
|
203 | 233 | # tell the transaction that we are adding a new file |
|
204 | 234 | if tr is not None: |
|
205 | 235 | tr.addbackup(data_filename, location=b'plain') |
|
206 | 236 | # Write the new docket after the new data file has been |
|
207 | 237 | # written. Because `st` was opened with `atomictemp=True`, |
|
208 | 238 | # the actual `.hg/dirstate` file is only affected on close. |
|
209 | 239 | st.write(new_docket.serialize()) |
|
210 | 240 | st.close() |
|
211 | 241 | # Remove the old data file after the new docket pointing to |
|
212 | 242 | # the new data file was written. |
|
213 | 243 | if old_docket is not None and old_docket.uuid: |
|
214 | 244 | data_filename = old_docket.data_filename() |
|
215 | 245 | if tr is not None: |
|
216 | 246 | tr.addbackup(data_filename, location=b'plain') |
|
217 | 247 | unlink = lambda _tr=None: self._opener.unlink(data_filename) |
|
218 | 248 | if tr: |
|
219 | 249 | category = b"dirstate-v2-clean-" + old_docket.uuid |
|
220 | 250 | tr.addpostclose(category, unlink) |
|
221 | 251 | else: |
|
222 | 252 | unlink() |
|
223 | 253 | self._docket = new_docket |
|
224 | 254 | |
|
225 | 255 | ### reading/setting parents |
|
226 | 256 | |
|
227 | 257 | def parents(self): |
|
228 | 258 | if not self._parents: |
|
229 | 259 | if self._use_dirstate_v2: |
|
230 | 260 | try: |
|
231 | 261 | self.docket |
|
232 | 262 | except error.CorruptedDirstate as e: |
|
233 | 263 | # fall back to dirstate-v1 if we fail to read v2 |
|
234 | 264 | self._v1_parents(e) |
|
235 | 265 | else: |
|
236 | 266 | self._parents = self.docket.parents |
|
237 | 267 | else: |
|
238 | 268 | self._v1_parents() |
|
239 | 269 | |
|
240 | 270 | return self._parents |
|
241 | 271 | |
|
242 | 272 | def _v1_parents(self, from_v2_exception=None): |
|
243 | 273 | read_len = self._nodelen * 2 |
|
244 | 274 | st = self._readdirstatefile(read_len) |
|
245 | 275 | l = len(st) |
|
246 | 276 | if l == read_len: |
|
247 | 277 | self._parents = ( |
|
248 | 278 | st[: self._nodelen], |
|
249 | 279 | st[self._nodelen : 2 * self._nodelen], |
|
250 | 280 | ) |
|
251 | 281 | elif l == 0: |
|
252 | 282 | self._parents = ( |
|
253 | 283 | self._nodeconstants.nullid, |
|
254 | 284 | self._nodeconstants.nullid, |
|
255 | 285 | ) |
|
256 | 286 | else: |
|
257 | 287 | hint = None |
|
258 | 288 | if from_v2_exception is not None: |
|
259 | 289 | hint = _(b"falling back to dirstate-v1 from v2 also failed") |
|
260 | 290 | raise error.Abort( |
|
261 | 291 | _(b'working directory state appears damaged!'), hint |
|
262 | 292 | ) |
|
263 | 293 | |
|
264 | 294 | |
|
265 | 295 | class dirstatemap(_dirstatemapcommon): |
|
266 | 296 | """Map encapsulating the dirstate's contents. |
|
267 | 297 | |
|
268 | 298 | The dirstate contains the following state: |
|
269 | 299 | |
|
270 | 300 | - `identity` is the identity of the dirstate file, which can be used to |
|
271 | 301 | detect when changes have occurred to the dirstate file. |
|
272 | 302 | |
|
273 | 303 | - `parents` is a pair containing the parents of the working copy. The |
|
274 | 304 | parents are updated by calling `setparents`. |
|
275 | 305 | |
|
276 | 306 | - the state map maps filenames to tuples of (state, mode, size, mtime), |
|
277 | 307 | where state is a single character representing 'normal', 'added', |
|
278 | 308 | 'removed', or 'merged'. It is read by treating the dirstate as a |
|
279 | 309 | dict. File state is updated by calling various methods (see each |
|
280 | 310 | documentation for details): |
|
281 | 311 | |
|
282 | 312 | - `reset_state`, |
|
283 | 313 | - `set_tracked` |
|
284 | 314 | - `set_untracked` |
|
285 | 315 | - `set_clean` |
|
286 | 316 | - `set_possibly_dirty` |
|
287 | 317 | |
|
288 | 318 | - `copymap` maps destination filenames to their source filename. |
|
289 | 319 | |
|
290 | 320 | The dirstate also provides the following views onto the state: |
|
291 | 321 | |
|
292 | 322 | - `filefoldmap` is a dict mapping normalized filenames to the denormalized |
|
293 | 323 | form that they appear as in the dirstate. |
|
294 | 324 | |
|
295 | 325 | - `dirfoldmap` is a dict mapping normalized directory names to the |
|
296 | 326 | denormalized form that they appear as in the dirstate. |
|
297 | 327 | """ |
|
298 | 328 | |
|
299 | 329 | ### Core data storage and access |
|
300 | 330 | |
|
301 | 331 | @propertycache |
|
302 | 332 | def _map(self): |
|
303 | 333 | self._map = {} |
|
304 | 334 | self.read() |
|
305 | 335 | return self._map |
|
306 | 336 | |
|
307 | 337 | @propertycache |
|
308 | 338 | def copymap(self): |
|
309 | 339 | self.copymap = {} |
|
310 | 340 | self._map |
|
311 | 341 | return self.copymap |
|
312 | 342 | |
|
313 | 343 | def clear(self): |
|
314 | 344 | self._map.clear() |
|
315 | 345 | self.copymap.clear() |
|
316 | 346 | self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid) |
|
317 | 347 | util.clearcachedproperty(self, b"_dirs") |
|
318 | 348 | util.clearcachedproperty(self, b"_alldirs") |
|
319 | 349 | util.clearcachedproperty(self, b"filefoldmap") |
|
320 | 350 | util.clearcachedproperty(self, b"dirfoldmap") |
|
321 | 351 | |
|
322 | 352 | def items(self): |
|
323 | 353 | return self._map.items() |
|
324 | 354 | |
|
325 | 355 | # forward for python2,3 compat |
|
326 | 356 | iteritems = items |
|
327 | 357 | |
|
328 | 358 | def debug_iter(self, all): |
|
329 | 359 | """ |
|
330 | 360 | Return an iterator of (filename, state, mode, size, mtime) tuples |
|
331 | 361 | |
|
332 | 362 | `all` is unused when Rust is not enabled |
|
333 | 363 | """ |
|
334 | 364 | for filename, item in self.items(): |
|
335 | 365 | yield (filename, item.state, item.mode, item.size, item.mtime) |
|
336 | 366 | |
|
337 | 367 | def keys(self): |
|
338 | 368 | return self._map.keys() |
|
339 | 369 | |
|
340 | 370 | ### reading/setting parents |
|
341 | 371 | |
|
342 | 372 | def setparents(self, p1, p2, fold_p2=False): |
|
343 | 373 | self._parents = (p1, p2) |
|
344 | 374 | self._dirtyparents = True |
|
345 | 375 | copies = {} |
|
346 | 376 | if fold_p2: |
|
347 | 377 | for f, s in self._map.items(): |
|
348 | 378 | # Discard "merged" markers when moving away from a merge state |
|
349 | 379 | if s.p2_info: |
|
350 | 380 | source = self.copymap.pop(f, None) |
|
351 | 381 | if source: |
|
352 | 382 | copies[f] = source |
|
353 | 383 | s.drop_merge_data() |
|
354 | 384 | return copies |
|
355 | 385 | |
|
356 | 386 | ### disk interaction |
|
357 | 387 | |
|
358 | 388 | def read(self): |
|
359 | 389 | testing.wait_on_cfg(self._ui, b'dirstate.pre-read-file') |
|
360 | 390 | if self._use_dirstate_v2: |
|
361 | 391 | try: |
|
362 | 392 | self.docket |
|
363 | 393 | except error.CorruptedDirstate: |
|
364 | 394 | # fall back to dirstate-v1 if we fail to read v2 |
|
365 | 395 | self._set_identity() |
|
366 | 396 | st = self._readdirstatefile() |
|
367 | 397 | else: |
|
368 | 398 | if not self.docket.uuid: |
|
369 | 399 | return |
|
370 | 400 | testing.wait_on_cfg(self._ui, b'dirstate.post-docket-read-file') |
|
371 | 401 | st = self._read_v2_data() |
|
372 | 402 | else: |
|
373 | 403 | self._set_identity() |
|
374 | 404 | st = self._readdirstatefile() |
|
375 | 405 | |
|
376 | 406 | if not st: |
|
377 | 407 | return |
|
378 | 408 | |
|
379 | 409 | # TODO: adjust this estimate for dirstate-v2 |
|
380 | 410 | if hasattr(parsers, 'dict_new_presized'): |
|
381 | 411 | # Make an estimate of the number of files in the dirstate based on |
|
382 | 412 | # its size. This trades wasting some memory for avoiding costly |
|
383 | 413 | # resizes. Each entry have a prefix of 17 bytes followed by one or |
|
384 | 414 | # two path names. Studies on various large-scale real-world repositories |
|
385 | 415 | # found 54 bytes a reasonable upper limit for the average path names. |
|
386 | 416 | # Copy entries are ignored for the sake of this estimate. |
|
387 | 417 | self._map = parsers.dict_new_presized(len(st) // 71) |
|
388 | 418 | |
|
389 | 419 | # Python's garbage collector triggers a GC each time a certain number |
|
390 | 420 | # of container objects (the number being defined by |
|
391 | 421 | # gc.get_threshold()) are allocated. parse_dirstate creates a tuple |
|
392 | 422 | # for each file in the dirstate. The C version then immediately marks |
|
393 | 423 | # them as not to be tracked by the collector. However, this has no |
|
394 | 424 | # effect on when GCs are triggered, only on what objects the GC looks |
|
395 | 425 | # into. This means that O(number of files) GCs are unavoidable. |
|
396 | 426 | # Depending on when in the process's lifetime the dirstate is parsed, |
|
397 | 427 | # this can get very expensive. As a workaround, disable GC while |
|
398 | 428 | # parsing the dirstate. |
|
399 | 429 | # |
|
400 | 430 | # (we cannot decorate the function directly since it is in a C module) |
|
401 | 431 | if self._use_dirstate_v2: |
|
402 | 432 | try: |
|
403 | 433 | self.docket |
|
404 | 434 | except error.CorruptedDirstate: |
|
405 | 435 | # fall back to dirstate-v1 if we fail to parse v2 |
|
406 | 436 | parse_dirstate = util.nogc(parsers.parse_dirstate) |
|
407 | 437 | p = parse_dirstate(self._map, self.copymap, st) |
|
408 | 438 | else: |
|
409 | 439 | p = self.docket.parents |
|
410 | 440 | meta = self.docket.tree_metadata |
|
411 | 441 | parse_dirstate = util.nogc(v2.parse_dirstate) |
|
412 | 442 | parse_dirstate(self._map, self.copymap, st, meta) |
|
413 | 443 | else: |
|
414 | 444 | parse_dirstate = util.nogc(parsers.parse_dirstate) |
|
415 | 445 | p = parse_dirstate(self._map, self.copymap, st) |
|
416 | 446 | if not self._dirtyparents: |
|
417 | 447 | self.setparents(*p) |
|
418 | 448 | |
|
419 | 449 | # Avoid excess attribute lookups by fast pathing certain checks |
|
420 | 450 | self.__contains__ = self._map.__contains__ |
|
421 | 451 | self.__getitem__ = self._map.__getitem__ |
|
422 | 452 | self.get = self._map.get |
|
423 | 453 | |
|
424 | 454 | def write(self, tr, st): |
|
425 | 455 | if self._use_dirstate_v2: |
|
426 | 456 | packed, meta = v2.pack_dirstate(self._map, self.copymap) |
|
427 | 457 | self.write_v2_no_append(tr, st, meta, packed) |
|
428 | 458 | else: |
|
429 | 459 | packed = parsers.pack_dirstate( |
|
430 | 460 | self._map, self.copymap, self.parents() |
|
431 | 461 | ) |
|
432 | 462 | st.write(packed) |
|
433 | 463 | st.close() |
|
434 | 464 | self._dirtyparents = False |
|
435 | 465 | |
|
436 | 466 | @propertycache |
|
437 | 467 | def identity(self): |
|
438 | 468 | self._map |
|
439 | 469 | return self.identity |
|
440 | 470 | |
|
441 | 471 | ### code related to maintaining and accessing "extra" property |
|
442 | 472 | # (e.g. "has_dir") |
|
443 | 473 | |
|
444 | 474 | def _dirs_incr(self, filename, old_entry=None): |
|
445 | 475 | """increment the dirstate counter if applicable""" |
|
446 | 476 | if ( |
|
447 | 477 | old_entry is None or old_entry.removed |
|
448 | 478 | ) and "_dirs" in self.__dict__: |
|
449 | 479 | self._dirs.addpath(filename) |
|
450 | 480 | if old_entry is None and "_alldirs" in self.__dict__: |
|
451 | 481 | self._alldirs.addpath(filename) |
|
452 | 482 | |
|
453 | 483 | def _dirs_decr(self, filename, old_entry=None, remove_variant=False): |
|
454 | 484 | """decrement the dirstate counter if applicable""" |
|
455 | 485 | if old_entry is not None: |
|
456 | 486 | if "_dirs" in self.__dict__ and not old_entry.removed: |
|
457 | 487 | self._dirs.delpath(filename) |
|
458 | 488 | if "_alldirs" in self.__dict__ and not remove_variant: |
|
459 | 489 | self._alldirs.delpath(filename) |
|
460 | 490 | elif remove_variant and "_alldirs" in self.__dict__: |
|
461 | 491 | self._alldirs.addpath(filename) |
|
462 | 492 | if "filefoldmap" in self.__dict__: |
|
463 | 493 | normed = util.normcase(filename) |
|
464 | 494 | self.filefoldmap.pop(normed, None) |
|
465 | 495 | |
|
466 | 496 | @propertycache |
|
467 | 497 | def filefoldmap(self): |
|
468 | 498 | """Returns a dictionary mapping normalized case paths to their |
|
469 | 499 | non-normalized versions. |
|
470 | 500 | """ |
|
471 | 501 | try: |
|
472 | 502 | makefilefoldmap = parsers.make_file_foldmap |
|
473 | 503 | except AttributeError: |
|
474 | 504 | pass |
|
475 | 505 | else: |
|
476 | 506 | return makefilefoldmap( |
|
477 | 507 | self._map, util.normcasespec, util.normcasefallback |
|
478 | 508 | ) |
|
479 | 509 | |
|
480 | 510 | f = {} |
|
481 | 511 | normcase = util.normcase |
|
482 | 512 | for name, s in self._map.items(): |
|
483 | 513 | if not s.removed: |
|
484 | 514 | f[normcase(name)] = name |
|
485 | 515 | f[b'.'] = b'.' # prevents useless util.fspath() invocation |
|
486 | 516 | return f |
|
487 | 517 | |
|
488 | 518 | @propertycache |
|
489 | 519 | def dirfoldmap(self): |
|
490 | 520 | f = {} |
|
491 | 521 | normcase = util.normcase |
|
492 | 522 | for name in self._dirs: |
|
493 | 523 | f[normcase(name)] = name |
|
494 | 524 | return f |
|
495 | 525 | |
|
496 | 526 | def hastrackeddir(self, d): |
|
497 | 527 | """ |
|
498 | 528 | Returns True if the dirstate contains a tracked (not removed) file |
|
499 | 529 | in this directory. |
|
500 | 530 | """ |
|
501 | 531 | return d in self._dirs |
|
502 | 532 | |
|
503 | 533 | def hasdir(self, d): |
|
504 | 534 | """ |
|
505 | 535 | Returns True if the dirstate contains a file (tracked or removed) |
|
506 | 536 | in this directory. |
|
507 | 537 | """ |
|
508 | 538 | return d in self._alldirs |
|
509 | 539 | |
|
510 | 540 | @propertycache |
|
511 | 541 | def _dirs(self): |
|
512 | 542 | return pathutil.dirs(self._map, only_tracked=True) |
|
513 | 543 | |
|
514 | 544 | @propertycache |
|
515 | 545 | def _alldirs(self): |
|
516 | 546 | return pathutil.dirs(self._map) |
|
517 | 547 | |
|
518 | 548 | ### code related to manipulation of entries and copy-sources |
|
519 | 549 | |
|
520 | 550 | def reset_state( |
|
521 | 551 | self, |
|
522 | 552 | filename, |
|
523 | 553 | wc_tracked=False, |
|
524 | 554 | p1_tracked=False, |
|
525 | 555 | p2_info=False, |
|
526 | 556 | has_meaningful_mtime=True, |
|
527 | 557 | parentfiledata=None, |
|
528 | 558 | ): |
|
529 | 559 | """Set a entry to a given state, diregarding all previous state |
|
530 | 560 | |
|
531 | 561 | This is to be used by the part of the dirstate API dedicated to |
|
532 | 562 | adjusting the dirstate after a update/merge. |
|
533 | 563 | |
|
534 | 564 | note: calling this might result to no entry existing at all if the |
|
535 | 565 | dirstate map does not see any point at having one for this file |
|
536 | 566 | anymore. |
|
537 | 567 | """ |
|
538 | 568 | # copy information are now outdated |
|
539 | 569 | # (maybe new information should be in directly passed to this function) |
|
540 | 570 | self.copymap.pop(filename, None) |
|
541 | 571 | |
|
542 | 572 | if not (p1_tracked or p2_info or wc_tracked): |
|
543 | 573 | old_entry = self._map.get(filename) |
|
544 | 574 | self._drop_entry(filename) |
|
545 | 575 | self._dirs_decr(filename, old_entry=old_entry) |
|
546 | 576 | return |
|
547 | 577 | |
|
548 | 578 | old_entry = self._map.get(filename) |
|
549 | 579 | self._dirs_incr(filename, old_entry) |
|
550 | 580 | entry = DirstateItem( |
|
551 | 581 | wc_tracked=wc_tracked, |
|
552 | 582 | p1_tracked=p1_tracked, |
|
553 | 583 | p2_info=p2_info, |
|
554 | 584 | has_meaningful_mtime=has_meaningful_mtime, |
|
555 | 585 | parentfiledata=parentfiledata, |
|
556 | 586 | ) |
|
557 | 587 | self._map[filename] = entry |
|
558 | 588 | |
|
559 | 589 | def set_tracked(self, filename): |
|
560 | 590 | new = False |
|
561 | 591 | entry = self.get(filename) |
|
562 | 592 | if entry is None: |
|
563 | 593 | self._dirs_incr(filename) |
|
564 | 594 | entry = DirstateItem( |
|
565 | 595 | wc_tracked=True, |
|
566 | 596 | ) |
|
567 | 597 | |
|
568 | 598 | self._map[filename] = entry |
|
569 | 599 | new = True |
|
570 | 600 | elif not entry.tracked: |
|
571 | 601 | self._dirs_incr(filename, entry) |
|
572 | 602 | entry.set_tracked() |
|
573 | 603 | self._refresh_entry(filename, entry) |
|
574 | 604 | new = True |
|
575 | 605 | else: |
|
576 | 606 | # XXX This is probably overkill for more case, but we need this to |
|
577 | 607 | # fully replace the `normallookup` call with `set_tracked` one. |
|
578 | 608 | # Consider smoothing this in the future. |
|
579 | 609 | entry.set_possibly_dirty() |
|
580 | 610 | self._refresh_entry(filename, entry) |
|
581 | 611 | return new |
|
582 | 612 | |
|
583 | 613 | def set_untracked(self, f): |
|
584 | 614 | """Mark a file as no longer tracked in the dirstate map""" |
|
585 | 615 | entry = self.get(f) |
|
586 | 616 | if entry is None: |
|
587 | 617 | return False |
|
588 | 618 | else: |
|
589 | 619 | self._dirs_decr(f, old_entry=entry, remove_variant=not entry.added) |
|
590 | 620 | if not entry.p2_info: |
|
591 | 621 | self.copymap.pop(f, None) |
|
592 | 622 | entry.set_untracked() |
|
593 | 623 | self._refresh_entry(f, entry) |
|
594 | 624 | return True |
|
595 | 625 | |
|
596 | 626 | def set_clean(self, filename, mode, size, mtime): |
|
597 | 627 | """mark a file as back to a clean state""" |
|
598 | 628 | entry = self[filename] |
|
599 | 629 | size = size & rangemask |
|
600 | 630 | entry.set_clean(mode, size, mtime) |
|
601 | 631 | self._refresh_entry(filename, entry) |
|
602 | 632 | self.copymap.pop(filename, None) |
|
603 | 633 | |
|
604 | 634 | def set_possibly_dirty(self, filename): |
|
605 | 635 | """record that the current state of the file on disk is unknown""" |
|
606 | 636 | entry = self[filename] |
|
607 | 637 | entry.set_possibly_dirty() |
|
608 | 638 | self._refresh_entry(filename, entry) |
|
609 | 639 | |
|
610 | 640 | def _refresh_entry(self, f, entry): |
|
611 | 641 | """record updated state of an entry""" |
|
612 | 642 | if not entry.any_tracked: |
|
613 | 643 | self._map.pop(f, None) |
|
614 | 644 | |
|
615 | 645 | def _drop_entry(self, f): |
|
616 | 646 | """remove any entry for file f |
|
617 | 647 | |
|
618 | 648 | This should also drop associated copy information |
|
619 | 649 | |
|
620 | 650 | The fact we actually need to drop it is the responsability of the caller |
|
621 | 651 | """ |
|
622 | 652 | self._map.pop(f, None) |
|
623 | 653 | self.copymap.pop(f, None) |
|
624 | 654 | |
|
625 | 655 | |
|
626 | 656 | if rustmod is not None: |
|
627 | 657 | |
|
628 | 658 | class dirstatemap(_dirstatemapcommon): |
|
629 | 659 | ### Core data storage and access |
|
630 | 660 | |
|
631 | 661 | @propertycache |
|
632 | 662 | def _map(self): |
|
633 | 663 | """ |
|
634 | 664 | Fills the Dirstatemap when called. |
|
635 | 665 | """ |
|
636 | 666 | # ignore HG_PENDING because identity is used only for writing |
|
637 | 667 | self._set_identity() |
|
638 | 668 | |
|
639 | 669 | testing.wait_on_cfg(self._ui, b'dirstate.pre-read-file') |
|
640 | 670 | if self._use_dirstate_v2: |
|
641 | 671 | try: |
|
642 | 672 | self.docket |
|
643 | 673 | except error.CorruptedDirstate as e: |
|
644 | 674 | # fall back to dirstate-v1 if we fail to read v2 |
|
645 | 675 | parents = self._v1_map(e) |
|
646 | 676 | else: |
|
647 | 677 | parents = self.docket.parents |
|
648 | 678 | inode = ( |
|
649 | 679 | self.identity.stat.st_ino |
|
650 | 680 | if self.identity is not None |
|
651 | 681 | and self.identity.stat is not None |
|
652 | 682 | else None |
|
653 | 683 | ) |
|
654 | 684 | testing.wait_on_cfg( |
|
655 | 685 | self._ui, b'dirstate.post-docket-read-file' |
|
656 | 686 | ) |
|
657 | 687 | if not self.docket.uuid: |
|
658 | 688 | data = b'' |
|
659 | 689 | self._map = rustmod.DirstateMap.new_empty() |
|
660 | 690 | else: |
|
661 | 691 | data = self._read_v2_data() |
|
662 | 692 | self._map = rustmod.DirstateMap.new_v2( |
|
663 | 693 | data, |
|
664 | 694 | self.docket.data_size, |
|
665 | 695 | self.docket.tree_metadata, |
|
666 | 696 | self.docket.uuid, |
|
667 | 697 | inode, |
|
668 | 698 | ) |
|
669 | 699 | parents = self.docket.parents |
|
670 | 700 | else: |
|
671 | 701 | parents = self._v1_map() |
|
672 | 702 | |
|
673 | 703 | if parents and not self._dirtyparents: |
|
674 | 704 | self.setparents(*parents) |
|
675 | 705 | |
|
676 | 706 | self.__contains__ = self._map.__contains__ |
|
677 | 707 | self.__getitem__ = self._map.__getitem__ |
|
678 | 708 | self.get = self._map.get |
|
679 | 709 | return self._map |
|
680 | 710 | |
|
681 | 711 | def _v1_map(self, from_v2_exception=None): |
|
682 | 712 | self._set_identity() |
|
683 | 713 | inode = ( |
|
684 | 714 | self.identity.stat.st_ino |
|
685 | 715 | if self.identity is not None and self.identity.stat is not None |
|
686 | 716 | else None |
|
687 | 717 | ) |
|
688 | 718 | try: |
|
689 | 719 | self._map, parents = rustmod.DirstateMap.new_v1( |
|
690 | 720 | self._readdirstatefile(), inode |
|
691 | 721 | ) |
|
692 | 722 | except OSError as e: |
|
693 | 723 | if from_v2_exception is not None: |
|
694 | 724 | raise e from from_v2_exception |
|
695 | 725 | raise |
|
696 | 726 | return parents |
|
697 | 727 | |
|
698 | 728 | @property |
|
699 | 729 | def copymap(self): |
|
700 | 730 | return self._map.copymap() |
|
701 | 731 | |
|
702 | 732 | def debug_iter(self, all): |
|
703 | 733 | """ |
|
704 | 734 | Return an iterator of (filename, state, mode, size, mtime) tuples |
|
705 | 735 | |
|
706 | 736 | `all`: also include with `state == b' '` dirstate tree nodes that |
|
707 | 737 | don't have an associated `DirstateItem`. |
|
708 | 738 | |
|
709 | 739 | """ |
|
710 | 740 | return self._map.debug_iter(all) |
|
711 | 741 | |
|
712 | 742 | def clear(self): |
|
713 | 743 | self._map.clear() |
|
714 | 744 | self.setparents( |
|
715 | 745 | self._nodeconstants.nullid, self._nodeconstants.nullid |
|
716 | 746 | ) |
|
717 | 747 | util.clearcachedproperty(self, b"_dirs") |
|
718 | 748 | util.clearcachedproperty(self, b"_alldirs") |
|
719 | 749 | util.clearcachedproperty(self, b"dirfoldmap") |
|
720 | 750 | |
|
721 | 751 | def items(self): |
|
722 | 752 | return self._map.items() |
|
723 | 753 | |
|
724 | 754 | # forward for python2,3 compat |
|
725 | 755 | iteritems = items |
|
726 | 756 | |
|
727 | 757 | def keys(self): |
|
728 | 758 | return iter(self._map) |
|
729 | 759 | |
|
730 | 760 | ### reading/setting parents |
|
731 | 761 | |
|
732 | 762 | def setparents(self, p1, p2, fold_p2=False): |
|
733 | 763 | self._parents = (p1, p2) |
|
734 | 764 | self._dirtyparents = True |
|
735 | 765 | copies = {} |
|
736 | 766 | if fold_p2: |
|
737 | 767 | copies = self._map.setparents_fixup() |
|
738 | 768 | return copies |
|
739 | 769 | |
|
740 | 770 | ### disk interaction |
|
741 | 771 | |
|
742 | 772 | @propertycache |
|
743 | 773 | def identity(self): |
|
744 | 774 | self._map |
|
745 | 775 | return self.identity |
|
746 | 776 | |
|
747 | 777 | def write(self, tr, st): |
|
748 | 778 | if not self._use_dirstate_v2: |
|
749 | 779 | p1, p2 = self.parents() |
|
750 | 780 | packed = self._map.write_v1(p1, p2) |
|
751 | 781 | st.write(packed) |
|
752 | 782 | st.close() |
|
753 | 783 | self._dirtyparents = False |
|
754 | 784 | return |
|
755 | 785 | |
|
756 | 786 | write_mode = self._write_mode |
|
757 | 787 | try: |
|
758 | 788 | docket = self.docket |
|
759 | 789 | except error.CorruptedDirstate: |
|
760 | 790 | # fall back to dirstate-v1 if we fail to parse v2 |
|
761 | 791 | docket = None |
|
762 | 792 | |
|
763 | 793 | # We can only append to an existing data file if there is one |
|
764 | 794 | if docket is None or docket.uuid is None: |
|
765 | 795 | write_mode = WRITE_MODE_FORCE_NEW |
|
766 | 796 | packed, meta, append = self._map.write_v2(write_mode) |
|
767 | 797 | if append: |
|
768 | 798 | docket = self.docket |
|
769 | 799 | data_filename = docket.data_filename() |
|
770 | 800 | # We mark it for backup to make sure a future `hg rollback` (or |
|
771 | 801 | # `hg recover`?) call find the data it needs to restore a |
|
772 | 802 | # working repository. |
|
773 | 803 | # |
|
774 | 804 | # The backup can use a hardlink because the format is resistant |
|
775 | 805 | # to trailing "dead" data. |
|
776 | 806 | if tr is not None: |
|
777 | 807 | tr.addbackup(data_filename, location=b'plain') |
|
778 | 808 | with self._opener(data_filename, b'r+b') as fp: |
|
779 | 809 | fp.seek(docket.data_size) |
|
780 | 810 | assert fp.tell() == docket.data_size |
|
781 | 811 | written = fp.write(packed) |
|
782 | 812 | if written is not None: # py2 may return None |
|
783 | 813 | assert written == len(packed), (written, len(packed)) |
|
784 | 814 | docket.data_size += len(packed) |
|
785 | 815 | docket.parents = self.parents() |
|
786 | 816 | docket.tree_metadata = meta |
|
787 | 817 | st.write(docket.serialize()) |
|
788 | 818 | st.close() |
|
789 | 819 | else: |
|
790 | 820 | self.write_v2_no_append(tr, st, meta, packed) |
|
791 | 821 | # Reload from the newly-written file |
|
792 | 822 | util.clearcachedproperty(self, b"_map") |
|
793 | 823 | self._dirtyparents = False |
|
794 | 824 | |
|
795 | 825 | ### code related to maintaining and accessing "extra" property |
|
796 | 826 | # (e.g. "has_dir") |
|
797 | 827 | |
|
798 | 828 | @propertycache |
|
799 | 829 | def filefoldmap(self): |
|
800 | 830 | """Returns a dictionary mapping normalized case paths to their |
|
801 | 831 | non-normalized versions. |
|
802 | 832 | """ |
|
803 | 833 | return self._map.filefoldmapasdict() |
|
804 | 834 | |
|
805 | 835 | def hastrackeddir(self, d): |
|
806 | 836 | return self._map.hastrackeddir(d) |
|
807 | 837 | |
|
808 | 838 | def hasdir(self, d): |
|
809 | 839 | return self._map.hasdir(d) |
|
810 | 840 | |
|
811 | 841 | @propertycache |
|
812 | 842 | def dirfoldmap(self): |
|
813 | 843 | f = {} |
|
814 | 844 | normcase = util.normcase |
|
815 | 845 | for name in self._map.tracked_dirs(): |
|
816 | 846 | f[normcase(name)] = name |
|
817 | 847 | return f |
|
818 | 848 | |
|
819 | 849 | ### code related to manipulation of entries and copy-sources |
|
820 | 850 | |
|
821 | 851 | def set_tracked(self, f): |
|
822 | 852 | return self._map.set_tracked(f) |
|
823 | 853 | |
|
824 | 854 | def set_untracked(self, f): |
|
825 | 855 | return self._map.set_untracked(f) |
|
826 | 856 | |
|
827 | 857 | def set_clean(self, filename, mode, size, mtime): |
|
828 | 858 | self._map.set_clean(filename, mode, size, mtime) |
|
829 | 859 | |
|
830 | 860 | def set_possibly_dirty(self, f): |
|
831 | 861 | self._map.set_possibly_dirty(f) |
|
832 | 862 | |
|
833 | 863 | def reset_state( |
|
834 | 864 | self, |
|
835 | 865 | filename, |
|
836 | 866 | wc_tracked=False, |
|
837 | 867 | p1_tracked=False, |
|
838 | 868 | p2_info=False, |
|
839 | 869 | has_meaningful_mtime=True, |
|
840 | 870 | parentfiledata=None, |
|
841 | 871 | ): |
|
842 | 872 | return self._map.reset_state( |
|
843 | 873 | filename, |
|
844 | 874 | wc_tracked, |
|
845 | 875 | p1_tracked, |
|
846 | 876 | p2_info, |
|
847 | 877 | has_meaningful_mtime, |
|
848 | 878 | parentfiledata, |
|
849 | 879 | ) |
@@ -1,809 +1,811 | |||
|
1 | 1 | # posix.py - Posix utility function implementations for Mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2009 Olivia Mackall <olivia@selenic.com> and others |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | |
|
9 | 9 | import errno |
|
10 | 10 | import fcntl |
|
11 | 11 | import getpass |
|
12 | 12 | import grp |
|
13 | 13 | import os |
|
14 | 14 | import pwd |
|
15 | 15 | import re |
|
16 | 16 | import select |
|
17 | 17 | import stat |
|
18 | 18 | import sys |
|
19 | 19 | import tempfile |
|
20 | 20 | import typing |
|
21 | 21 | import unicodedata |
|
22 | 22 | |
|
23 | 23 | from typing import ( |
|
24 | 24 | Any, |
|
25 | 25 | AnyStr, |
|
26 | 26 | Iterable, |
|
27 | 27 | Iterator, |
|
28 | 28 | List, |
|
29 | 29 | Match, |
|
30 | 30 | NoReturn, |
|
31 | 31 | Optional, |
|
32 | 32 | Sequence, |
|
33 | 33 | Tuple, |
|
34 | 34 | Union, |
|
35 | 35 | ) |
|
36 | 36 | |
|
37 | 37 | from .i18n import _ |
|
38 | 38 | from .pycompat import ( |
|
39 | 39 | open, |
|
40 | 40 | ) |
|
41 | 41 | from . import ( |
|
42 | 42 | encoding, |
|
43 | 43 | error, |
|
44 | 44 | policy, |
|
45 | 45 | pycompat, |
|
46 | 46 | ) |
|
47 | 47 | |
|
48 | 48 | osutil = policy.importmod('osutil') |
|
49 | 49 | |
|
50 | 50 | normpath = os.path.normpath |
|
51 | 51 | samestat = os.path.samestat |
|
52 | 52 | abspath = os.path.abspath # re-exports |
|
53 | 53 | |
|
54 | 54 | try: |
|
55 | 55 | oslink = os.link |
|
56 | 56 | except AttributeError: |
|
57 | 57 | # Some platforms build Python without os.link on systems that are |
|
58 | 58 | # vaguely unix-like but don't have hardlink support. For those |
|
59 | 59 | # poor souls, just say we tried and that it failed so we fall back |
|
60 | 60 | # to copies. |
|
61 | 61 | def oslink(src: bytes, dst: bytes) -> NoReturn: |
|
62 | 62 | raise OSError( |
|
63 | 63 | errno.EINVAL, b'hardlinks not supported: %s to %s' % (src, dst) |
|
64 | 64 | ) |
|
65 | 65 | |
|
66 | 66 | |
|
67 | 67 | readlink = os.readlink |
|
68 | 68 | unlink = os.unlink |
|
69 | 69 | rename = os.rename |
|
70 | 70 | removedirs = os.removedirs |
|
71 | 71 | |
|
72 | 72 | if typing.TYPE_CHECKING: |
|
73 | 73 | |
|
74 | 74 | def normpath(path: bytes) -> bytes: |
|
75 | 75 | raise NotImplementedError |
|
76 | 76 | |
|
77 | 77 | def abspath(path: AnyStr) -> AnyStr: |
|
78 | 78 | raise NotImplementedError |
|
79 | 79 | |
|
80 | 80 | def oslink(src: bytes, dst: bytes) -> None: |
|
81 | 81 | raise NotImplementedError |
|
82 | 82 | |
|
83 | 83 | def readlink(path: bytes) -> bytes: |
|
84 | 84 | raise NotImplementedError |
|
85 | 85 | |
|
86 | 86 | def unlink(path: bytes) -> None: |
|
87 | 87 | raise NotImplementedError |
|
88 | 88 | |
|
89 | 89 | def rename(src: bytes, dst: bytes) -> None: |
|
90 | 90 | raise NotImplementedError |
|
91 | 91 | |
|
92 | 92 | def removedirs(name: bytes) -> None: |
|
93 | 93 | raise NotImplementedError |
|
94 | 94 | |
|
95 | 95 | |
|
96 | 96 | expandglobs: bool = False |
|
97 | 97 | |
|
98 | 98 | umask: int = os.umask(0) |
|
99 | 99 | os.umask(umask) |
|
100 | 100 | |
|
101 | 101 | posixfile = open |
|
102 | 102 | |
|
103 | 103 | |
|
104 | 104 | def split(p: bytes) -> Tuple[bytes, bytes]: |
|
105 | 105 | """Same as posixpath.split, but faster |
|
106 | 106 | |
|
107 | 107 | >>> import posixpath |
|
108 | 108 | >>> for f in [b'/absolute/path/to/file', |
|
109 | 109 | ... b'relative/path/to/file', |
|
110 | 110 | ... b'file_alone', |
|
111 | 111 | ... b'path/to/directory/', |
|
112 | 112 | ... b'/multiple/path//separators', |
|
113 | 113 | ... b'/file_at_root', |
|
114 | 114 | ... b'///multiple_leading_separators_at_root', |
|
115 | 115 | ... b'']: |
|
116 | 116 | ... assert split(f) == posixpath.split(f), f |
|
117 | 117 | """ |
|
118 | 118 | ht = p.rsplit(b'/', 1) |
|
119 | 119 | if len(ht) == 1: |
|
120 | 120 | return b'', p |
|
121 | 121 | nh = ht[0].rstrip(b'/') |
|
122 | 122 | if nh: |
|
123 | 123 | return nh, ht[1] |
|
124 | 124 | return ht[0] + b'/', ht[1] |
|
125 | 125 | |
|
126 | 126 | |
|
127 | 127 | def openhardlinks() -> bool: |
|
128 | 128 | '''return true if it is safe to hold open file handles to hardlinks''' |
|
129 | 129 | return True |
|
130 | 130 | |
|
131 | 131 | |
|
132 | 132 | def nlinks(name: bytes) -> int: |
|
133 | 133 | '''return number of hardlinks for the given file''' |
|
134 | 134 | return os.lstat(name).st_nlink |
|
135 | 135 | |
|
136 | 136 | |
|
137 | 137 | def parsepatchoutput(output_line: bytes) -> bytes: |
|
138 | 138 | """parses the output produced by patch and returns the filename""" |
|
139 | 139 | pf = output_line[14:] |
|
140 | 140 | if pycompat.sysplatform == b'OpenVMS': |
|
141 | 141 | if pf[0] == b'`': |
|
142 | 142 | pf = pf[1:-1] # Remove the quotes |
|
143 | 143 | else: |
|
144 | 144 | if pf.startswith(b"'") and pf.endswith(b"'") and b" " in pf: |
|
145 | 145 | pf = pf[1:-1] # Remove the quotes |
|
146 | 146 | return pf |
|
147 | 147 | |
|
148 | 148 | |
|
149 | 149 | def sshargs( |
|
150 | 150 | sshcmd: bytes, host: bytes, user: Optional[bytes], port: Optional[bytes] |
|
151 | 151 | ) -> bytes: |
|
152 | 152 | '''Build argument list for ssh''' |
|
153 | 153 | args = user and (b"%s@%s" % (user, host)) or host |
|
154 | 154 | if b'-' in args[:1]: |
|
155 | 155 | raise error.Abort( |
|
156 | 156 | _(b'illegal ssh hostname or username starting with -: %s') % args |
|
157 | 157 | ) |
|
158 | 158 | args = shellquote(args) |
|
159 | 159 | if port: |
|
160 | 160 | args = b'-p %s %s' % (shellquote(port), args) |
|
161 | 161 | return args |
|
162 | 162 | |
|
163 | 163 | |
|
164 | 164 | def isexec(f: bytes) -> bool: |
|
165 | 165 | """check whether a file is executable""" |
|
166 | 166 | return os.lstat(f).st_mode & 0o100 != 0 |
|
167 | 167 | |
|
168 | 168 | |
|
169 | 169 | def setflags(f: bytes, l: bool, x: bool) -> None: |
|
170 | 170 | st = os.lstat(f) |
|
171 | 171 | s = st.st_mode |
|
172 | 172 | if l: |
|
173 | 173 | if not stat.S_ISLNK(s): |
|
174 | 174 | # switch file to link |
|
175 | 175 | with open(f, b'rb') as fp: |
|
176 | 176 | data = fp.read() |
|
177 | 177 | unlink(f) |
|
178 | 178 | try: |
|
179 | 179 | os.symlink(data, f) |
|
180 | 180 | except OSError: |
|
181 | 181 | # failed to make a link, rewrite file |
|
182 | 182 | with open(f, b"wb") as fp: |
|
183 | 183 | fp.write(data) |
|
184 | 184 | |
|
185 | 185 | # no chmod needed at this point |
|
186 | 186 | return |
|
187 | 187 | if stat.S_ISLNK(s): |
|
188 | 188 | # switch link to file |
|
189 | 189 | data = os.readlink(f) |
|
190 | 190 | unlink(f) |
|
191 | 191 | with open(f, b"wb") as fp: |
|
192 | 192 | fp.write(data) |
|
193 | 193 | s = 0o666 & ~umask # avoid restatting for chmod |
|
194 | 194 | |
|
195 | 195 | sx = s & 0o100 |
|
196 | 196 | if st.st_nlink > 1 and bool(x) != bool(sx): |
|
197 | 197 | # the file is a hardlink, break it |
|
198 | 198 | with open(f, b"rb") as fp: |
|
199 | 199 | data = fp.read() |
|
200 | 200 | unlink(f) |
|
201 | 201 | with open(f, b"wb") as fp: |
|
202 | 202 | fp.write(data) |
|
203 | 203 | |
|
204 | 204 | if x and not sx: |
|
205 | 205 | # Turn on +x for every +r bit when making a file executable |
|
206 | 206 | # and obey umask. |
|
207 | 207 | os.chmod(f, s | (s & 0o444) >> 2 & ~umask) |
|
208 | 208 | elif not x and sx: |
|
209 | 209 | # Turn off all +x bits |
|
210 | 210 | os.chmod(f, s & 0o666) |
|
211 | 211 | |
|
212 | 212 | |
|
213 | 213 | def copymode( |
|
214 | 214 | src: bytes, |
|
215 | 215 | dst: bytes, |
|
216 | 216 | mode: Optional[bytes] = None, |
|
217 | 217 | enforcewritable: bool = False, |
|
218 | 218 | ) -> None: |
|
219 | 219 | """Copy the file mode from the file at path src to dst. |
|
220 | 220 | If src doesn't exist, we're using mode instead. If mode is None, we're |
|
221 | 221 | using umask.""" |
|
222 | 222 | try: |
|
223 | 223 | st_mode = os.lstat(src).st_mode & 0o777 |
|
224 | 224 | except FileNotFoundError: |
|
225 | 225 | st_mode = mode |
|
226 | 226 | if st_mode is None: |
|
227 | 227 | st_mode = ~umask |
|
228 | 228 | st_mode &= 0o666 |
|
229 | 229 | |
|
230 | 230 | new_mode = st_mode |
|
231 | 231 | |
|
232 | 232 | if enforcewritable: |
|
233 | 233 | new_mode |= stat.S_IWUSR |
|
234 | 234 | |
|
235 | 235 | os.chmod(dst, new_mode) |
|
236 | 236 | |
|
237 | 237 | |
|
238 | 238 | def checkexec(path: bytes) -> bool: |
|
239 | 239 | """ |
|
240 | 240 | Check whether the given path is on a filesystem with UNIX-like exec flags |
|
241 | 241 | |
|
242 | 242 | Requires a directory (like /foo/.hg) |
|
243 | 243 | """ |
|
244 | 244 | |
|
245 | 245 | # VFAT on some Linux versions can flip mode but it doesn't persist |
|
246 | 246 | # a FS remount. Frequently we can detect it if files are created |
|
247 | 247 | # with exec bit on. |
|
248 | 248 | |
|
249 | 249 | try: |
|
250 | 250 | EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH |
|
251 | 251 | basedir = os.path.join(path, b'.hg') |
|
252 | 252 | cachedir = os.path.join(basedir, b'wcache') |
|
253 | 253 | storedir = os.path.join(basedir, b'store') |
|
254 | 254 | if not os.path.exists(cachedir): |
|
255 | 255 | try: |
|
256 | 256 | # we want to create the 'cache' directory, not the '.hg' one. |
|
257 | 257 | # Automatically creating '.hg' directory could silently spawn |
|
258 | 258 | # invalid Mercurial repositories. That seems like a bad idea. |
|
259 | 259 | os.mkdir(cachedir) |
|
260 | 260 | if os.path.exists(storedir): |
|
261 | 261 | copymode(storedir, cachedir) |
|
262 | 262 | else: |
|
263 | 263 | copymode(basedir, cachedir) |
|
264 | 264 | except (IOError, OSError): |
|
265 | 265 | # we other fallback logic triggers |
|
266 | 266 | pass |
|
267 | 267 | if os.path.isdir(cachedir): |
|
268 | 268 | checkisexec = os.path.join(cachedir, b'checkisexec') |
|
269 | 269 | checknoexec = os.path.join(cachedir, b'checknoexec') |
|
270 | 270 | |
|
271 | 271 | try: |
|
272 | 272 | m = os.stat(checkisexec).st_mode |
|
273 | 273 | except FileNotFoundError: |
|
274 | 274 | # checkisexec does not exist - fall through ... |
|
275 | 275 | pass |
|
276 | 276 | else: |
|
277 | 277 | # checkisexec exists, check if it actually is exec |
|
278 | 278 | if m & EXECFLAGS != 0: |
|
279 | 279 | # ensure checknoexec exists, check it isn't exec |
|
280 | 280 | try: |
|
281 | 281 | m = os.stat(checknoexec).st_mode |
|
282 | 282 | except FileNotFoundError: |
|
283 | 283 | open(checknoexec, b'w').close() # might fail |
|
284 | 284 | m = os.stat(checknoexec).st_mode |
|
285 | 285 | if m & EXECFLAGS == 0: |
|
286 | 286 | # check-exec is exec and check-no-exec is not exec |
|
287 | 287 | return True |
|
288 | 288 | # checknoexec exists but is exec - delete it |
|
289 | 289 | unlink(checknoexec) |
|
290 | 290 | # checkisexec exists but is not exec - delete it |
|
291 | 291 | unlink(checkisexec) |
|
292 | 292 | |
|
293 | 293 | # check using one file, leave it as checkisexec |
|
294 | 294 | checkdir = cachedir |
|
295 | 295 | else: |
|
296 | 296 | # check directly in path and don't leave checkisexec behind |
|
297 | 297 | checkdir = path |
|
298 | 298 | checkisexec = None |
|
299 | 299 | fh, fn = pycompat.mkstemp(dir=checkdir, prefix=b'hg-checkexec-') |
|
300 | 300 | try: |
|
301 | 301 | os.close(fh) |
|
302 | 302 | m = os.stat(fn).st_mode |
|
303 | 303 | if m & EXECFLAGS == 0: |
|
304 | 304 | os.chmod(fn, m & 0o777 | EXECFLAGS) |
|
305 | 305 | if os.stat(fn).st_mode & EXECFLAGS != 0: |
|
306 | 306 | if checkisexec is not None: |
|
307 | 307 | os.rename(fn, checkisexec) |
|
308 | 308 | fn = None |
|
309 | 309 | return True |
|
310 | 310 | finally: |
|
311 | 311 | if fn is not None: |
|
312 | 312 | unlink(fn) |
|
313 | 313 | except (IOError, OSError): |
|
314 | 314 | # we don't care, the user probably won't be able to commit anyway |
|
315 | 315 | return False |
|
316 | 316 | |
|
317 | 317 | |
|
318 | 318 | def checklink(path: bytes) -> bool: |
|
319 | 319 | """check whether the given path is on a symlink-capable filesystem""" |
|
320 | 320 | # mktemp is not racy because symlink creation will fail if the |
|
321 | 321 | # file already exists |
|
322 | 322 | while True: |
|
323 | 323 | cachedir = os.path.join(path, b'.hg', b'wcache') |
|
324 | 324 | checklink = os.path.join(cachedir, b'checklink') |
|
325 | 325 | # try fast path, read only |
|
326 | 326 | if os.path.islink(checklink): |
|
327 | 327 | return True |
|
328 | 328 | if os.path.isdir(cachedir): |
|
329 | 329 | checkdir = cachedir |
|
330 | 330 | else: |
|
331 | 331 | checkdir = path |
|
332 | 332 | cachedir = None |
|
333 | 333 | name = tempfile.mktemp( |
|
334 | 334 | dir=pycompat.fsdecode(checkdir), prefix=r'checklink-' |
|
335 | 335 | ) |
|
336 | 336 | name = pycompat.fsencode(name) |
|
337 | 337 | try: |
|
338 | 338 | fd = None |
|
339 | 339 | if cachedir is None: |
|
340 | 340 | fd = pycompat.namedtempfile( |
|
341 | 341 | dir=checkdir, prefix=b'hg-checklink-' |
|
342 | 342 | ) |
|
343 | 343 | target = os.path.basename(fd.name) |
|
344 | 344 | else: |
|
345 | 345 | # create a fixed file to link to; doesn't matter if it |
|
346 | 346 | # already exists. |
|
347 | 347 | target = b'checklink-target' |
|
348 | 348 | try: |
|
349 | 349 | fullpath = os.path.join(cachedir, target) |
|
350 | 350 | open(fullpath, b'w').close() |
|
351 | 351 | except PermissionError: |
|
352 | 352 | # If we can't write to cachedir, just pretend |
|
353 | 353 | # that the fs is readonly and by association |
|
354 | 354 | # that the fs won't support symlinks. This |
|
355 | 355 | # seems like the least dangerous way to avoid |
|
356 | 356 | # data loss. |
|
357 | 357 | return False |
|
358 | 358 | try: |
|
359 | 359 | os.symlink(target, name) |
|
360 | 360 | if cachedir is None: |
|
361 | 361 | unlink(name) |
|
362 | 362 | else: |
|
363 | 363 | try: |
|
364 | 364 | os.rename(name, checklink) |
|
365 | 365 | except OSError: |
|
366 | 366 | unlink(name) |
|
367 | 367 | return True |
|
368 | 368 | except FileExistsError: |
|
369 | 369 | # link creation might race, try again |
|
370 | 370 | continue |
|
371 | 371 | finally: |
|
372 | 372 | if fd is not None: |
|
373 | 373 | fd.close() |
|
374 | 374 | except AttributeError: |
|
375 | 375 | return False |
|
376 | 376 | except OSError as inst: |
|
377 | 377 | # sshfs might report failure while successfully creating the link |
|
378 | 378 | if inst.errno == errno.EIO and os.path.exists(name): |
|
379 | 379 | unlink(name) |
|
380 | 380 | return False |
|
381 | 381 | |
|
382 | 382 | |
|
383 | 383 | def checkosfilename(path: bytes) -> Optional[bytes]: |
|
384 | 384 | """Check that the base-relative path is a valid filename on this platform. |
|
385 | 385 | Returns None if the path is ok, or a UI string describing the problem.""" |
|
386 | 386 | return None # on posix platforms, every path is ok |
|
387 | 387 | |
|
388 | 388 | |
|
389 | 389 | def getfsmountpoint(dirpath: bytes) -> Optional[bytes]: |
|
390 | 390 | """Get the filesystem mount point from a directory (best-effort) |
|
391 | 391 | |
|
392 | 392 | Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc. |
|
393 | 393 | """ |
|
394 | 394 | return getattr(osutil, 'getfsmountpoint', lambda x: None)(dirpath) |
|
395 | 395 | |
|
396 | 396 | |
|
397 | 397 | def getfstype(dirpath: bytes) -> Optional[bytes]: |
|
398 | 398 | """Get the filesystem type name from a directory (best-effort) |
|
399 | 399 | |
|
400 | 400 | Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc. |
|
401 | 401 | """ |
|
402 | 402 | return getattr(osutil, 'getfstype', lambda x: None)(dirpath) |
|
403 | 403 | |
|
404 | 404 | |
|
405 | 405 | def get_password() -> bytes: |
|
406 | 406 | return encoding.strtolocal(getpass.getpass('')) |
|
407 | 407 | |
|
408 | 408 | |
|
409 | 409 | def setbinary(fd) -> None: |
|
410 | 410 | pass |
|
411 | 411 | |
|
412 | 412 | |
|
413 | 413 | def pconvert(path: bytes) -> bytes: |
|
414 | 414 | return path |
|
415 | 415 | |
|
416 | 416 | |
|
417 | 417 | def localpath(path: bytes) -> bytes: |
|
418 | 418 | return path |
|
419 | 419 | |
|
420 | 420 | |
|
421 | 421 | def samefile(fpath1: bytes, fpath2: bytes) -> bool: |
|
422 | 422 | """Returns whether path1 and path2 refer to the same file. This is only |
|
423 | 423 | guaranteed to work for files, not directories.""" |
|
424 | 424 | return os.path.samefile(fpath1, fpath2) |
|
425 | 425 | |
|
426 | 426 | |
|
427 | 427 | def samedevice(fpath1: bytes, fpath2: bytes) -> bool: |
|
428 | 428 | """Returns whether fpath1 and fpath2 are on the same device. This is only |
|
429 | 429 | guaranteed to work for files, not directories.""" |
|
430 | 430 | st1 = os.lstat(fpath1) |
|
431 | 431 | st2 = os.lstat(fpath2) |
|
432 | 432 | return st1.st_dev == st2.st_dev |
|
433 | 433 | |
|
434 | 434 | |
|
435 | 435 | # os.path.normcase is a no-op, which doesn't help us on non-native filesystems |
|
436 | 436 | def normcase(path: bytes) -> bytes: |
|
437 | 437 | return path.lower() |
|
438 | 438 | |
|
439 | 439 | |
|
440 | 440 | # what normcase does to ASCII strings |
|
441 | 441 | normcasespec: int = encoding.normcasespecs.lower |
|
442 | 442 | # fallback normcase function for non-ASCII strings |
|
443 | 443 | normcasefallback = normcase |
|
444 | 444 | |
|
445 | 445 | if pycompat.isdarwin: |
|
446 | 446 | |
|
447 | 447 | def normcase(path: bytes) -> bytes: |
|
448 | 448 | """ |
|
449 | 449 | Normalize a filename for OS X-compatible comparison: |
|
450 | 450 | - escape-encode invalid characters |
|
451 | 451 | - decompose to NFD |
|
452 | 452 | - lowercase |
|
453 | 453 | - omit ignored characters [200c-200f, 202a-202e, 206a-206f,feff] |
|
454 | 454 | |
|
455 | 455 | >>> normcase(b'UPPER') |
|
456 | 456 | 'upper' |
|
457 | 457 | >>> normcase(b'Caf\\xc3\\xa9') |
|
458 | 458 | 'cafe\\xcc\\x81' |
|
459 | 459 | >>> normcase(b'\\xc3\\x89') |
|
460 | 460 | 'e\\xcc\\x81' |
|
461 | 461 | >>> normcase(b'\\xb8\\xca\\xc3\\xca\\xbe\\xc8.JPG') # issue3918 |
|
462 | 462 | '%b8%ca%c3\\xca\\xbe%c8.jpg' |
|
463 | 463 | """ |
|
464 | 464 | |
|
465 | 465 | try: |
|
466 | 466 | return encoding.asciilower(path) # exception for non-ASCII |
|
467 | 467 | except UnicodeDecodeError: |
|
468 | 468 | return normcasefallback(path) |
|
469 | 469 | |
|
470 | 470 | normcasespec = encoding.normcasespecs.lower |
|
471 | 471 | |
|
472 | 472 | def normcasefallback(path: bytes) -> bytes: |
|
473 | 473 | try: |
|
474 | 474 | u = path.decode('utf-8') |
|
475 | 475 | except UnicodeDecodeError: |
|
476 | 476 | # OS X percent-encodes any bytes that aren't valid utf-8 |
|
477 | 477 | s = b'' |
|
478 | 478 | pos = 0 |
|
479 | 479 | l = len(path) |
|
480 | 480 | while pos < l: |
|
481 | 481 | try: |
|
482 | 482 | c = encoding.getutf8char(path, pos) |
|
483 | 483 | pos += len(c) |
|
484 | 484 | except ValueError: |
|
485 | 485 | c = b'%%%02X' % ord(path[pos : pos + 1]) |
|
486 | 486 | pos += 1 |
|
487 | 487 | s += c |
|
488 | 488 | |
|
489 | 489 | u = s.decode('utf-8') |
|
490 | 490 | |
|
491 | 491 | # Decompose then lowercase (HFS+ technote specifies lower) |
|
492 | 492 | enc = unicodedata.normalize('NFD', u).lower().encode('utf-8') |
|
493 | 493 | # drop HFS+ ignored characters |
|
494 | 494 | return encoding.hfsignoreclean(enc) |
|
495 | 495 | |
|
496 | 496 | |
|
497 | 497 | if pycompat.sysplatform == b'cygwin': |
|
498 | 498 | # workaround for cygwin, in which mount point part of path is |
|
499 | 499 | # treated as case sensitive, even though underlying NTFS is case |
|
500 | 500 | # insensitive. |
|
501 | 501 | |
|
502 | 502 | # default mount points |
|
503 | 503 | cygwinmountpoints = sorted( |
|
504 | 504 | [ |
|
505 | 505 | b"/usr/bin", |
|
506 | 506 | b"/usr/lib", |
|
507 | 507 | b"/cygdrive", |
|
508 | 508 | ], |
|
509 | 509 | reverse=True, |
|
510 | 510 | ) |
|
511 | 511 | |
|
512 | 512 | # use upper-ing as normcase as same as NTFS workaround |
|
513 | 513 | def normcase(path: bytes) -> bytes: |
|
514 | 514 | pathlen = len(path) |
|
515 | 515 | if (pathlen == 0) or (path[0] != pycompat.ossep): |
|
516 | 516 | # treat as relative |
|
517 | 517 | return encoding.upper(path) |
|
518 | 518 | |
|
519 | 519 | # to preserve case of mountpoint part |
|
520 | 520 | for mp in cygwinmountpoints: |
|
521 | 521 | if not path.startswith(mp): |
|
522 | 522 | continue |
|
523 | 523 | |
|
524 | 524 | mplen = len(mp) |
|
525 | 525 | if mplen == pathlen: # mount point itself |
|
526 | 526 | return mp |
|
527 | 527 | if path[mplen] == pycompat.ossep: |
|
528 | 528 | return mp + encoding.upper(path[mplen:]) |
|
529 | 529 | |
|
530 | 530 | return encoding.upper(path) |
|
531 | 531 | |
|
532 | 532 | normcasespec = encoding.normcasespecs.other |
|
533 | 533 | normcasefallback = normcase |
|
534 | 534 | |
|
535 | 535 | # Cygwin translates native ACLs to POSIX permissions, |
|
536 | 536 | # but these translations are not supported by native |
|
537 | 537 | # tools, so the exec bit tends to be set erroneously. |
|
538 | 538 | # Therefore, disable executable bit access on Cygwin. |
|
539 | 539 | def checkexec(path: bytes) -> bool: |
|
540 | 540 | return False |
|
541 | 541 | |
|
542 | 542 | # Similarly, Cygwin's symlink emulation is likely to create |
|
543 | 543 | # problems when Mercurial is used from both Cygwin and native |
|
544 | 544 | # Windows, with other native tools, or on shared volumes |
|
545 | 545 | def checklink(path: bytes) -> bool: |
|
546 | 546 | return False |
|
547 | 547 | |
|
548 | 548 | |
|
549 | 549 | if pycompat.sysplatform == b'OpenVMS': |
|
550 | 550 | # OpenVMS's symlink emulation is broken on some OpenVMS versions. |
|
551 | 551 | def checklink(path: bytes) -> bool: |
|
552 | 552 | return False |
|
553 | 553 | |
|
554 | 554 | |
|
555 | 555 | _needsshellquote: Optional[Match[bytes]] = None |
|
556 | 556 | |
|
557 | 557 | |
|
558 | 558 | def shellquote(s: bytes) -> bytes: |
|
559 | 559 | if pycompat.sysplatform == b'OpenVMS': |
|
560 | 560 | return b'"%s"' % s |
|
561 | 561 | global _needsshellquote |
|
562 | 562 | if _needsshellquote is None: |
|
563 | 563 | _needsshellquote = re.compile(br'[^a-zA-Z0-9._/+-]').search |
|
564 | 564 | if s and not _needsshellquote(s): |
|
565 | 565 | # "s" shouldn't have to be quoted |
|
566 | 566 | return s |
|
567 | 567 | else: |
|
568 | 568 | return b"'%s'" % s.replace(b"'", b"'\\''") |
|
569 | 569 | |
|
570 | 570 | |
|
571 | 571 | def shellsplit(s: bytes) -> List[bytes]: |
|
572 | 572 | """Parse a command string in POSIX shell way (best-effort)""" |
|
573 | 573 | return pycompat.shlexsplit(s, posix=True) |
|
574 | 574 | |
|
575 | 575 | |
|
576 | 576 | def testpid(pid: int) -> bool: |
|
577 | 577 | '''return False if pid dead, True if running or not sure''' |
|
578 | 578 | if pycompat.sysplatform == b'OpenVMS': |
|
579 | 579 | return True |
|
580 | 580 | try: |
|
581 | 581 | os.kill(pid, 0) |
|
582 | 582 | return True |
|
583 | 583 | except OSError as inst: |
|
584 | 584 | return inst.errno != errno.ESRCH |
|
585 | 585 | |
|
586 | 586 | |
|
587 | 587 | def isowner(st: os.stat_result) -> bool: |
|
588 | 588 | """Return True if the stat object st is from the current user.""" |
|
589 | 589 | return st.st_uid == os.getuid() |
|
590 | 590 | |
|
591 | 591 | |
|
592 | 592 | def findexe(command: bytes) -> Optional[bytes]: |
|
593 | 593 | """Find executable for command searching like which does. |
|
594 | 594 | If command is a basename then PATH is searched for command. |
|
595 | 595 | PATH isn't searched if command is an absolute or relative path. |
|
596 | 596 | If command isn't found None is returned.""" |
|
597 | 597 | if pycompat.sysplatform == b'OpenVMS': |
|
598 | 598 | return command |
|
599 | 599 | |
|
600 | 600 | def findexisting(executable: bytes) -> Optional[bytes]: |
|
601 | 601 | b'Will return executable if existing file' |
|
602 | 602 | if os.path.isfile(executable) and os.access(executable, os.X_OK): |
|
603 | 603 | return executable |
|
604 | 604 | return None |
|
605 | 605 | |
|
606 | 606 | if pycompat.ossep in command: |
|
607 | 607 | return findexisting(command) |
|
608 | 608 | |
|
609 | 609 | if pycompat.sysplatform == b'plan9': |
|
610 | 610 | return findexisting(os.path.join(b'/bin', command)) |
|
611 | 611 | |
|
612 | 612 | for path in encoding.environ.get(b'PATH', b'').split(pycompat.ospathsep): |
|
613 | 613 | executable = findexisting(os.path.join(path, command)) |
|
614 | 614 | if executable is not None: |
|
615 | 615 | return executable |
|
616 | 616 | return None |
|
617 | 617 | |
|
618 | 618 | |
|
619 | 619 | def setsignalhandler() -> None: |
|
620 | 620 | pass |
|
621 | 621 | |
|
622 | 622 | |
|
623 | 623 | _wantedkinds = {stat.S_IFREG, stat.S_IFLNK} |
|
624 | 624 | |
|
625 | 625 | |
|
626 | 626 | def statfiles(files: Sequence[bytes]) -> Iterator[Optional[os.stat_result]]: |
|
627 | 627 | """Stat each file in files. Yield each stat, or None if a file does not |
|
628 | 628 | exist or has a type we don't care about.""" |
|
629 | 629 | lstat = os.lstat |
|
630 | 630 | getkind = stat.S_IFMT |
|
631 | 631 | for nf in files: |
|
632 | 632 | try: |
|
633 | 633 | st = lstat(nf) |
|
634 | 634 | if getkind(st.st_mode) not in _wantedkinds: |
|
635 | 635 | st = None |
|
636 | 636 | except (FileNotFoundError, NotADirectoryError): |
|
637 | 637 | st = None |
|
638 | 638 | yield st |
|
639 | 639 | |
|
640 | 640 | |
|
641 | 641 | def getuser() -> bytes: |
|
642 | 642 | '''return name of current user''' |
|
643 | 643 | return pycompat.fsencode(getpass.getuser()) |
|
644 | 644 | |
|
645 | 645 | |
|
646 | 646 | def username(uid: Optional[int] = None) -> Optional[bytes]: |
|
647 | 647 | """Return the name of the user with the given uid. |
|
648 | 648 | |
|
649 | 649 | If uid is None, return the name of the current user.""" |
|
650 | 650 | |
|
651 | 651 | if uid is None: |
|
652 | 652 | uid = os.getuid() |
|
653 | 653 | try: |
|
654 | 654 | return pycompat.fsencode(pwd.getpwuid(uid)[0]) |
|
655 | 655 | except KeyError: |
|
656 | 656 | return b'%d' % uid |
|
657 | 657 | |
|
658 | 658 | |
|
659 | 659 | def groupname(gid: Optional[int] = None) -> Optional[bytes]: |
|
660 | 660 | """Return the name of the group with the given gid. |
|
661 | 661 | |
|
662 | 662 | If gid is None, return the name of the current group.""" |
|
663 | 663 | |
|
664 | 664 | if gid is None: |
|
665 | 665 | gid = os.getgid() |
|
666 | 666 | try: |
|
667 | 667 | return pycompat.fsencode(grp.getgrgid(gid)[0]) |
|
668 | 668 | except KeyError: |
|
669 | 669 | return pycompat.bytestr(gid) |
|
670 | 670 | |
|
671 | 671 | |
|
672 | 672 | def groupmembers(name: bytes) -> List[bytes]: |
|
673 | 673 | """Return the list of members of the group with the given |
|
674 | 674 | name, KeyError if the group does not exist. |
|
675 | 675 | """ |
|
676 | 676 | name = pycompat.fsdecode(name) |
|
677 | 677 | return pycompat.rapply(pycompat.fsencode, list(grp.getgrnam(name).gr_mem)) |
|
678 | 678 | |
|
679 | 679 | |
|
680 | 680 | def spawndetached(args: List[bytes]) -> int: |
|
681 | 681 | return os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0), args[0], args) |
|
682 | 682 | |
|
683 | 683 | |
|
684 | 684 | def gethgcmd(): # TODO: convert to bytes, like on Windows? |
|
685 | 685 | return sys.argv[:1] |
|
686 | 686 | |
|
687 | 687 | |
|
688 | 688 | def makedir(path: bytes, notindexed: bool) -> None: |
|
689 | 689 | os.mkdir(path) |
|
690 | 690 | |
|
691 | 691 | |
|
692 | 692 | def lookupreg( |
|
693 | 693 | key: bytes, |
|
694 | 694 | name: Optional[bytes] = None, |
|
695 | 695 | scope: Optional[Union[int, Iterable[int]]] = None, |
|
696 | 696 | ) -> Optional[bytes]: |
|
697 | 697 | return None |
|
698 | 698 | |
|
699 | 699 | |
|
700 | 700 | def hidewindow() -> None: |
|
701 | 701 | """Hide current shell window. |
|
702 | 702 | |
|
703 | 703 | Used to hide the window opened when starting asynchronous |
|
704 | 704 | child process under Windows, unneeded on other systems. |
|
705 | 705 | """ |
|
706 | 706 | pass |
|
707 | 707 | |
|
708 | 708 | |
|
709 | 709 | class cachestat: |
|
710 | stat: os.stat_result | |
|
711 | ||
|
710 | 712 | def __init__(self, path: bytes) -> None: |
|
711 | 713 | self.stat = os.stat(path) |
|
712 | 714 | |
|
713 | 715 | def cacheable(self) -> bool: |
|
714 | 716 | return bool(self.stat.st_ino) |
|
715 | 717 | |
|
716 | 718 | __hash__ = object.__hash__ |
|
717 | 719 | |
|
718 | 720 | def __eq__(self, other: Any) -> bool: |
|
719 | 721 | try: |
|
720 | 722 | # Only dev, ino, size, mtime and atime are likely to change. Out |
|
721 | 723 | # of these, we shouldn't compare atime but should compare the |
|
722 | 724 | # rest. However, one of the other fields changing indicates |
|
723 | 725 | # something fishy going on, so return False if anything but atime |
|
724 | 726 | # changes. |
|
725 | 727 | return ( |
|
726 | 728 | self.stat.st_mode == other.stat.st_mode |
|
727 | 729 | and self.stat.st_ino == other.stat.st_ino |
|
728 | 730 | and self.stat.st_dev == other.stat.st_dev |
|
729 | 731 | and self.stat.st_nlink == other.stat.st_nlink |
|
730 | 732 | and self.stat.st_uid == other.stat.st_uid |
|
731 | 733 | and self.stat.st_gid == other.stat.st_gid |
|
732 | 734 | and self.stat.st_size == other.stat.st_size |
|
733 | 735 | and self.stat[stat.ST_MTIME] == other.stat[stat.ST_MTIME] |
|
734 | 736 | and self.stat[stat.ST_CTIME] == other.stat[stat.ST_CTIME] |
|
735 | 737 | ) |
|
736 | 738 | except AttributeError: |
|
737 | 739 | return False |
|
738 | 740 | |
|
739 | 741 | def __ne__(self, other: Any) -> bool: |
|
740 | 742 | return not self == other |
|
741 | 743 | |
|
742 | 744 | |
|
743 | 745 | def statislink(st: Optional[os.stat_result]) -> bool: |
|
744 | 746 | '''check whether a stat result is a symlink''' |
|
745 | 747 | return stat.S_ISLNK(st.st_mode) if st else False |
|
746 | 748 | |
|
747 | 749 | |
|
748 | 750 | def statisexec(st: Optional[os.stat_result]) -> bool: |
|
749 | 751 | '''check whether a stat result is an executable file''' |
|
750 | 752 | return (st.st_mode & 0o100 != 0) if st else False |
|
751 | 753 | |
|
752 | 754 | |
|
753 | 755 | def poll(fds): |
|
754 | 756 | """block until something happens on any file descriptor |
|
755 | 757 | |
|
756 | 758 | This is a generic helper that will check for any activity |
|
757 | 759 | (read, write. exception) and return the list of touched files. |
|
758 | 760 | |
|
759 | 761 | In unsupported cases, it will raise a NotImplementedError""" |
|
760 | 762 | try: |
|
761 | 763 | res = select.select(fds, fds, fds) |
|
762 | 764 | except ValueError: # out of range file descriptor |
|
763 | 765 | raise NotImplementedError() |
|
764 | 766 | return sorted(list(set(sum(res, [])))) |
|
765 | 767 | |
|
766 | 768 | |
|
767 | 769 | def readpipe(pipe) -> bytes: |
|
768 | 770 | """Read all available data from a pipe.""" |
|
769 | 771 | # We can't fstat() a pipe because Linux will always report 0. |
|
770 | 772 | # So, we set the pipe to non-blocking mode and read everything |
|
771 | 773 | # that's available. |
|
772 | 774 | flags = fcntl.fcntl(pipe, fcntl.F_GETFL) |
|
773 | 775 | flags |= os.O_NONBLOCK |
|
774 | 776 | oldflags = fcntl.fcntl(pipe, fcntl.F_SETFL, flags) |
|
775 | 777 | |
|
776 | 778 | try: |
|
777 | 779 | chunks = [] |
|
778 | 780 | while True: |
|
779 | 781 | try: |
|
780 | 782 | s = pipe.read() |
|
781 | 783 | if not s: |
|
782 | 784 | break |
|
783 | 785 | chunks.append(s) |
|
784 | 786 | except IOError: |
|
785 | 787 | break |
|
786 | 788 | |
|
787 | 789 | return b''.join(chunks) |
|
788 | 790 | finally: |
|
789 | 791 | fcntl.fcntl(pipe, fcntl.F_SETFL, oldflags) |
|
790 | 792 | |
|
791 | 793 | |
|
792 | 794 | def bindunixsocket(sock, path: bytes) -> None: |
|
793 | 795 | """Bind the UNIX domain socket to the specified path""" |
|
794 | 796 | # use relative path instead of full path at bind() if possible, since |
|
795 | 797 | # AF_UNIX path has very small length limit (107 chars) on common |
|
796 | 798 | # platforms (see sys/un.h) |
|
797 | 799 | dirname, basename = os.path.split(path) |
|
798 | 800 | bakwdfd = None |
|
799 | 801 | |
|
800 | 802 | try: |
|
801 | 803 | if dirname: |
|
802 | 804 | bakwdfd = os.open(b'.', os.O_DIRECTORY) |
|
803 | 805 | os.chdir(dirname) |
|
804 | 806 | sock.bind(basename) |
|
805 | 807 | if bakwdfd: |
|
806 | 808 | os.fchdir(bakwdfd) |
|
807 | 809 | finally: |
|
808 | 810 | if bakwdfd: |
|
809 | 811 | os.close(bakwdfd) |
@@ -1,28 +1,41 | |||
|
1 | 1 | # typelib.py - type hint aliases and support |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2022 Matt Harbison <matt_harbison@yahoo.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | import typing |
|
9 | 9 | |
|
10 | 10 | # Note: this is slightly different from pycompat.TYPE_CHECKING, as using |
|
11 | 11 | # pycompat causes the BinaryIO_Proxy type to be resolved to ``object`` when |
|
12 | 12 | # used as the base class during a pytype run. |
|
13 | 13 | TYPE_CHECKING = typing.TYPE_CHECKING |
|
14 | 14 | |
|
15 | 15 | |
|
16 | 16 | # The BinaryIO class provides empty methods, which at runtime means that |
|
17 | 17 | # ``__getattr__`` on the proxy classes won't get called for the methods that |
|
18 | 18 | # should delegate to the internal object. So to avoid runtime changes because |
|
19 | 19 | # of the required typing inheritance, just use BinaryIO when typechecking, and |
|
20 | 20 | # ``object`` otherwise. |
|
21 | 21 | if TYPE_CHECKING: |
|
22 | 22 | from typing import ( |
|
23 | 23 | BinaryIO, |
|
24 | Union, | |
|
25 | ) | |
|
26 | ||
|
27 | from . import ( | |
|
28 | node, | |
|
29 | posix, | |
|
30 | windows, | |
|
24 | 31 | ) |
|
25 | 32 | |
|
26 | 33 | BinaryIO_Proxy = BinaryIO |
|
34 | CacheStat = Union[posix.cachestat, windows.cachestat] | |
|
35 | NodeConstants = node.sha1nodeconstants | |
|
27 | 36 | else: |
|
37 | from typing import Any | |
|
38 | ||
|
28 | 39 | BinaryIO_Proxy = object |
|
40 | CacheStat = Any | |
|
41 | NodeConstants = Any |
@@ -1,757 +1,759 | |||
|
1 | 1 | # windows.py - Windows utility function implementations for Mercurial |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2009 Olivia Mackall <olivia@selenic.com> and others |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | |
|
9 | 9 | import errno |
|
10 | 10 | import getpass |
|
11 | 11 | import msvcrt # pytype: disable=import-error |
|
12 | 12 | import os |
|
13 | 13 | import re |
|
14 | 14 | import stat |
|
15 | 15 | import string |
|
16 | 16 | import sys |
|
17 | 17 | import typing |
|
18 | 18 | import winreg # pytype: disable=import-error |
|
19 | 19 | |
|
20 | 20 | from typing import ( |
|
21 | 21 | AnyStr, |
|
22 | 22 | BinaryIO, |
|
23 | 23 | Iterable, |
|
24 | 24 | Iterator, |
|
25 | 25 | List, |
|
26 | 26 | Mapping, |
|
27 | 27 | NoReturn, |
|
28 | 28 | Optional, |
|
29 | 29 | Pattern, |
|
30 | 30 | Sequence, |
|
31 | 31 | Tuple, |
|
32 | 32 | Union, |
|
33 | 33 | ) |
|
34 | 34 | |
|
35 | 35 | from .i18n import _ |
|
36 | 36 | from . import ( |
|
37 | 37 | encoding, |
|
38 | 38 | error, |
|
39 | 39 | policy, |
|
40 | 40 | pycompat, |
|
41 | 41 | typelib, |
|
42 | 42 | win32, |
|
43 | 43 | ) |
|
44 | 44 | |
|
45 | 45 | |
|
46 | 46 | osutil = policy.importmod('osutil') |
|
47 | 47 | |
|
48 | 48 | getfsmountpoint = win32.getvolumename |
|
49 | 49 | getfstype = win32.getfstype |
|
50 | 50 | getuser = win32.getuser |
|
51 | 51 | hidewindow = win32.hidewindow |
|
52 | 52 | makedir = win32.makedir |
|
53 | 53 | nlinks = win32.nlinks |
|
54 | 54 | oslink = win32.oslink |
|
55 | 55 | samedevice = win32.samedevice |
|
56 | 56 | samefile = win32.samefile |
|
57 | 57 | setsignalhandler = win32.setsignalhandler |
|
58 | 58 | spawndetached = win32.spawndetached |
|
59 | 59 | split = os.path.split |
|
60 | 60 | testpid = win32.testpid |
|
61 | 61 | unlink = win32.unlink |
|
62 | 62 | |
|
63 | 63 | if typing.TYPE_CHECKING: |
|
64 | 64 | |
|
65 | 65 | def split(p: bytes) -> Tuple[bytes, bytes]: |
|
66 | 66 | raise NotImplementedError |
|
67 | 67 | |
|
68 | 68 | |
|
69 | 69 | umask: int = 0o022 |
|
70 | 70 | |
|
71 | 71 | |
|
72 | 72 | class mixedfilemodewrapper: |
|
73 | 73 | """Wraps a file handle when it is opened in read/write mode. |
|
74 | 74 | |
|
75 | 75 | fopen() and fdopen() on Windows have a specific-to-Windows requirement |
|
76 | 76 | that files opened with mode r+, w+, or a+ make a call to a file positioning |
|
77 | 77 | function when switching between reads and writes. Without this extra call, |
|
78 | 78 | Python will raise a not very intuitive "IOError: [Errno 0] Error." |
|
79 | 79 | |
|
80 | 80 | This class wraps posixfile instances when the file is opened in read/write |
|
81 | 81 | mode and automatically adds checks or inserts appropriate file positioning |
|
82 | 82 | calls when necessary. |
|
83 | 83 | """ |
|
84 | 84 | |
|
85 | 85 | OPNONE = 0 |
|
86 | 86 | OPREAD = 1 |
|
87 | 87 | OPWRITE = 2 |
|
88 | 88 | |
|
89 | 89 | def __init__(self, fp): |
|
90 | 90 | object.__setattr__(self, '_fp', fp) |
|
91 | 91 | object.__setattr__(self, '_lastop', 0) |
|
92 | 92 | |
|
93 | 93 | def __enter__(self): |
|
94 | 94 | self._fp.__enter__() |
|
95 | 95 | return self |
|
96 | 96 | |
|
97 | 97 | def __exit__(self, exc_type, exc_val, exc_tb): |
|
98 | 98 | self._fp.__exit__(exc_type, exc_val, exc_tb) |
|
99 | 99 | |
|
100 | 100 | def __getattr__(self, name): |
|
101 | 101 | return getattr(self._fp, name) |
|
102 | 102 | |
|
103 | 103 | def __setattr__(self, name, value): |
|
104 | 104 | return self._fp.__setattr__(name, value) |
|
105 | 105 | |
|
106 | 106 | def _noopseek(self): |
|
107 | 107 | self._fp.seek(0, os.SEEK_CUR) |
|
108 | 108 | |
|
109 | 109 | def seek(self, *args, **kwargs): |
|
110 | 110 | object.__setattr__(self, '_lastop', self.OPNONE) |
|
111 | 111 | return self._fp.seek(*args, **kwargs) |
|
112 | 112 | |
|
113 | 113 | def write(self, d): |
|
114 | 114 | if self._lastop == self.OPREAD: |
|
115 | 115 | self._noopseek() |
|
116 | 116 | |
|
117 | 117 | object.__setattr__(self, '_lastop', self.OPWRITE) |
|
118 | 118 | return self._fp.write(d) |
|
119 | 119 | |
|
120 | 120 | def writelines(self, *args, **kwargs): |
|
121 | 121 | if self._lastop == self.OPREAD: |
|
122 | 122 | self._noopeseek() |
|
123 | 123 | |
|
124 | 124 | object.__setattr__(self, '_lastop', self.OPWRITE) |
|
125 | 125 | return self._fp.writelines(*args, **kwargs) |
|
126 | 126 | |
|
127 | 127 | def read(self, *args, **kwargs): |
|
128 | 128 | if self._lastop == self.OPWRITE: |
|
129 | 129 | self._noopseek() |
|
130 | 130 | |
|
131 | 131 | object.__setattr__(self, '_lastop', self.OPREAD) |
|
132 | 132 | return self._fp.read(*args, **kwargs) |
|
133 | 133 | |
|
134 | 134 | def readline(self, *args, **kwargs): |
|
135 | 135 | if self._lastop == self.OPWRITE: |
|
136 | 136 | self._noopseek() |
|
137 | 137 | |
|
138 | 138 | object.__setattr__(self, '_lastop', self.OPREAD) |
|
139 | 139 | return self._fp.readline(*args, **kwargs) |
|
140 | 140 | |
|
141 | 141 | def readlines(self, *args, **kwargs): |
|
142 | 142 | if self._lastop == self.OPWRITE: |
|
143 | 143 | self._noopseek() |
|
144 | 144 | |
|
145 | 145 | object.__setattr__(self, '_lastop', self.OPREAD) |
|
146 | 146 | return self._fp.readlines(*args, **kwargs) |
|
147 | 147 | |
|
148 | 148 | |
|
149 | 149 | class fdproxy: |
|
150 | 150 | """Wraps osutil.posixfile() to override the name attribute to reflect the |
|
151 | 151 | underlying file name. |
|
152 | 152 | """ |
|
153 | 153 | |
|
154 | 154 | def __init__(self, name, fp): |
|
155 | 155 | self.name = name |
|
156 | 156 | self._fp = fp |
|
157 | 157 | |
|
158 | 158 | def __enter__(self): |
|
159 | 159 | self._fp.__enter__() |
|
160 | 160 | # Return this wrapper for the context manager so that the name is |
|
161 | 161 | # still available. |
|
162 | 162 | return self |
|
163 | 163 | |
|
164 | 164 | def __exit__(self, exc_type, exc_value, traceback): |
|
165 | 165 | self._fp.__exit__(exc_type, exc_value, traceback) |
|
166 | 166 | |
|
167 | 167 | def __iter__(self): |
|
168 | 168 | return iter(self._fp) |
|
169 | 169 | |
|
170 | 170 | def __getattr__(self, name): |
|
171 | 171 | return getattr(self._fp, name) |
|
172 | 172 | |
|
173 | 173 | |
|
174 | 174 | def posixfile(name, mode=b'r', buffering=-1): |
|
175 | 175 | '''Open a file with even more POSIX-like semantics''' |
|
176 | 176 | try: |
|
177 | 177 | fp = osutil.posixfile(name, mode, buffering) # may raise WindowsError |
|
178 | 178 | |
|
179 | 179 | # PyFile_FromFd() ignores the name, and seems to report fp.name as the |
|
180 | 180 | # underlying file descriptor. |
|
181 | 181 | fp = fdproxy(name, fp) |
|
182 | 182 | |
|
183 | 183 | # The position when opening in append mode is implementation defined, so |
|
184 | 184 | # make it consistent with other platforms, which position at EOF. |
|
185 | 185 | if b'a' in mode: |
|
186 | 186 | fp.seek(0, os.SEEK_END) |
|
187 | 187 | |
|
188 | 188 | if b'+' in mode: |
|
189 | 189 | return mixedfilemodewrapper(fp) |
|
190 | 190 | |
|
191 | 191 | return fp |
|
192 | 192 | except WindowsError as err: # pytype: disable=name-error |
|
193 | 193 | # convert to a friendlier exception |
|
194 | 194 | raise IOError( |
|
195 | 195 | err.errno, '%s: %s' % (encoding.strfromlocal(name), err.strerror) |
|
196 | 196 | ) |
|
197 | 197 | |
|
198 | 198 | |
|
199 | 199 | # may be wrapped by win32mbcs extension |
|
200 | 200 | listdir = osutil.listdir |
|
201 | 201 | |
|
202 | 202 | |
|
203 | 203 | def get_password() -> bytes: |
|
204 | 204 | """Prompt for password with echo off, using Windows getch(). |
|
205 | 205 | |
|
206 | 206 | This shouldn't be called directly- use ``ui.getpass()`` instead, which |
|
207 | 207 | checks if the session is interactive first. |
|
208 | 208 | """ |
|
209 | 209 | pw = u"" |
|
210 | 210 | while True: |
|
211 | 211 | c = msvcrt.getwch() # pytype: disable=module-attr |
|
212 | 212 | if c == u'\r' or c == u'\n': |
|
213 | 213 | break |
|
214 | 214 | if c == u'\003': |
|
215 | 215 | raise KeyboardInterrupt |
|
216 | 216 | if c == u'\b': |
|
217 | 217 | pw = pw[:-1] |
|
218 | 218 | else: |
|
219 | 219 | pw = pw + c |
|
220 | 220 | msvcrt.putwch(u'\r') # pytype: disable=module-attr |
|
221 | 221 | msvcrt.putwch(u'\n') # pytype: disable=module-attr |
|
222 | 222 | return encoding.unitolocal(pw) |
|
223 | 223 | |
|
224 | 224 | |
|
225 | 225 | class winstdout(typelib.BinaryIO_Proxy): |
|
226 | 226 | """Some files on Windows misbehave. |
|
227 | 227 | |
|
228 | 228 | When writing to a broken pipe, EINVAL instead of EPIPE may be raised. |
|
229 | 229 | |
|
230 | 230 | When writing too many bytes to a console at the same, a "Not enough space" |
|
231 | 231 | error may happen. Python 3 already works around that. |
|
232 | 232 | """ |
|
233 | 233 | |
|
234 | 234 | def __init__(self, fp: BinaryIO): |
|
235 | 235 | self.fp = fp |
|
236 | 236 | |
|
237 | 237 | def __getattr__(self, key): |
|
238 | 238 | return getattr(self.fp, key) |
|
239 | 239 | |
|
240 | 240 | def close(self): |
|
241 | 241 | try: |
|
242 | 242 | self.fp.close() |
|
243 | 243 | except IOError: |
|
244 | 244 | pass |
|
245 | 245 | |
|
246 | 246 | def write(self, s): |
|
247 | 247 | try: |
|
248 | 248 | return self.fp.write(s) |
|
249 | 249 | except IOError as inst: |
|
250 | 250 | if inst.errno != 0 and not win32.lasterrorwaspipeerror(inst): |
|
251 | 251 | raise |
|
252 | 252 | self.close() |
|
253 | 253 | raise IOError(errno.EPIPE, 'Broken pipe') |
|
254 | 254 | |
|
255 | 255 | def flush(self): |
|
256 | 256 | try: |
|
257 | 257 | return self.fp.flush() |
|
258 | 258 | except IOError as inst: |
|
259 | 259 | if not win32.lasterrorwaspipeerror(inst): |
|
260 | 260 | raise |
|
261 | 261 | raise IOError(errno.EPIPE, 'Broken pipe') |
|
262 | 262 | |
|
263 | 263 | |
|
264 | 264 | def openhardlinks() -> bool: |
|
265 | 265 | return True |
|
266 | 266 | |
|
267 | 267 | |
|
268 | 268 | def parsepatchoutput(output_line: bytes) -> bytes: |
|
269 | 269 | """parses the output produced by patch and returns the filename""" |
|
270 | 270 | pf = output_line[14:] |
|
271 | 271 | if pf[0] == b'`': |
|
272 | 272 | pf = pf[1:-1] # Remove the quotes |
|
273 | 273 | return pf |
|
274 | 274 | |
|
275 | 275 | |
|
276 | 276 | def sshargs( |
|
277 | 277 | sshcmd: bytes, host: bytes, user: Optional[bytes], port: Optional[bytes] |
|
278 | 278 | ) -> bytes: |
|
279 | 279 | '''Build argument list for ssh or Plink''' |
|
280 | 280 | pflag = b'plink' in sshcmd.lower() and b'-P' or b'-p' |
|
281 | 281 | args = user and (b"%s@%s" % (user, host)) or host |
|
282 | 282 | if args.startswith(b'-') or args.startswith(b'/'): |
|
283 | 283 | raise error.Abort( |
|
284 | 284 | _(b'illegal ssh hostname or username starting with - or /: %s') |
|
285 | 285 | % args |
|
286 | 286 | ) |
|
287 | 287 | args = shellquote(args) |
|
288 | 288 | if port: |
|
289 | 289 | args = b'%s %s %s' % (pflag, shellquote(port), args) |
|
290 | 290 | return args |
|
291 | 291 | |
|
292 | 292 | |
|
293 | 293 | def setflags(f: bytes, l: bool, x: bool) -> None: |
|
294 | 294 | pass |
|
295 | 295 | |
|
296 | 296 | |
|
297 | 297 | def copymode( |
|
298 | 298 | src: bytes, |
|
299 | 299 | dst: bytes, |
|
300 | 300 | mode: Optional[bytes] = None, |
|
301 | 301 | enforcewritable: bool = False, |
|
302 | 302 | ) -> None: |
|
303 | 303 | pass |
|
304 | 304 | |
|
305 | 305 | |
|
306 | 306 | def checkexec(path: bytes) -> bool: |
|
307 | 307 | return False |
|
308 | 308 | |
|
309 | 309 | |
|
310 | 310 | def checklink(path: bytes) -> bool: |
|
311 | 311 | return False |
|
312 | 312 | |
|
313 | 313 | |
|
314 | 314 | def setbinary(fd) -> None: |
|
315 | 315 | # When run without console, pipes may expose invalid |
|
316 | 316 | # fileno(), usually set to -1. |
|
317 | 317 | fno = getattr(fd, 'fileno', None) |
|
318 | 318 | if fno is not None and fno() >= 0: |
|
319 | 319 | msvcrt.setmode(fno(), os.O_BINARY) # pytype: disable=module-attr |
|
320 | 320 | |
|
321 | 321 | |
|
322 | 322 | def pconvert(path: bytes) -> bytes: |
|
323 | 323 | return path.replace(pycompat.ossep, b'/') |
|
324 | 324 | |
|
325 | 325 | |
|
326 | 326 | def localpath(path: bytes) -> bytes: |
|
327 | 327 | return path.replace(b'/', b'\\') |
|
328 | 328 | |
|
329 | 329 | |
|
330 | 330 | def normpath(path: bytes) -> bytes: |
|
331 | 331 | return pconvert(os.path.normpath(path)) |
|
332 | 332 | |
|
333 | 333 | |
|
334 | 334 | def normcase(path: bytes) -> bytes: |
|
335 | 335 | return encoding.upper(path) # NTFS compares via upper() |
|
336 | 336 | |
|
337 | 337 | |
|
338 | 338 | DRIVE_RE_B: Pattern[bytes] = re.compile(b'^[a-z]:') |
|
339 | 339 | DRIVE_RE_S: Pattern[str] = re.compile('^[a-z]:') |
|
340 | 340 | |
|
341 | 341 | |
|
342 | 342 | # TODO: why is this accepting str? |
|
343 | 343 | def abspath(path: AnyStr) -> AnyStr: |
|
344 | 344 | abs_path = os.path.abspath(path) # re-exports |
|
345 | 345 | # Python on Windows is inconsistent regarding the capitalization of drive |
|
346 | 346 | # letter and this cause issue with various path comparison along the way. |
|
347 | 347 | # So we normalize the drive later to upper case here. |
|
348 | 348 | # |
|
349 | 349 | # See https://bugs.python.org/issue40368 for and example of this hell. |
|
350 | 350 | if isinstance(abs_path, bytes): |
|
351 | 351 | if DRIVE_RE_B.match(abs_path): |
|
352 | 352 | abs_path = abs_path[0:1].upper() + abs_path[1:] |
|
353 | 353 | elif DRIVE_RE_S.match(abs_path): |
|
354 | 354 | abs_path = abs_path[0:1].upper() + abs_path[1:] |
|
355 | 355 | return abs_path |
|
356 | 356 | |
|
357 | 357 | |
|
358 | 358 | # see posix.py for definitions |
|
359 | 359 | normcasespec: int = encoding.normcasespecs.upper |
|
360 | 360 | normcasefallback = encoding.upperfallback |
|
361 | 361 | |
|
362 | 362 | |
|
363 | 363 | def samestat(s1: os.stat_result, s2: os.stat_result) -> bool: |
|
364 | 364 | return False |
|
365 | 365 | |
|
366 | 366 | |
|
367 | 367 | def shelltocmdexe(path: bytes, env: Mapping[bytes, bytes]) -> bytes: |
|
368 | 368 | r"""Convert shell variables in the form $var and ${var} inside ``path`` |
|
369 | 369 | to %var% form. Existing Windows style variables are left unchanged. |
|
370 | 370 | |
|
371 | 371 | The variables are limited to the given environment. Unknown variables are |
|
372 | 372 | left unchanged. |
|
373 | 373 | |
|
374 | 374 | >>> e = {b'var1': b'v1', b'var2': b'v2', b'var3': b'v3'} |
|
375 | 375 | >>> # Only valid values are expanded |
|
376 | 376 | >>> shelltocmdexe(b'cmd $var1 ${var2} %var3% $missing ${missing} %missing%', |
|
377 | 377 | ... e) |
|
378 | 378 | 'cmd %var1% %var2% %var3% $missing ${missing} %missing%' |
|
379 | 379 | >>> # Single quote prevents expansion, as does \$ escaping |
|
380 | 380 | >>> shelltocmdexe(b"cmd '$var1 ${var2} %var3%' \$var1 \${var2} \\", e) |
|
381 | 381 | 'cmd "$var1 ${var2} %var3%" $var1 ${var2} \\' |
|
382 | 382 | >>> # $$ is not special. %% is not special either, but can be the end and |
|
383 | 383 | >>> # start of consecutive variables |
|
384 | 384 | >>> shelltocmdexe(b"cmd $$ %% %var1%%var2%", e) |
|
385 | 385 | 'cmd $$ %% %var1%%var2%' |
|
386 | 386 | >>> # No double substitution |
|
387 | 387 | >>> shelltocmdexe(b"$var1 %var1%", {b'var1': b'%var2%', b'var2': b'boom'}) |
|
388 | 388 | '%var1% %var1%' |
|
389 | 389 | >>> # Tilde expansion |
|
390 | 390 | >>> shelltocmdexe(b"~/dir ~\dir2 ~tmpfile \~/", {}) |
|
391 | 391 | '%USERPROFILE%/dir %USERPROFILE%\\dir2 ~tmpfile ~/' |
|
392 | 392 | """ |
|
393 | 393 | if not any(c in path for c in b"$'~"): |
|
394 | 394 | return path |
|
395 | 395 | |
|
396 | 396 | varchars = pycompat.sysbytes(string.ascii_letters + string.digits) + b'_-' |
|
397 | 397 | |
|
398 | 398 | res = b'' |
|
399 | 399 | index = 0 |
|
400 | 400 | pathlen = len(path) |
|
401 | 401 | while index < pathlen: |
|
402 | 402 | c = path[index : index + 1] |
|
403 | 403 | if c == b'\'': # no expansion within single quotes |
|
404 | 404 | path = path[index + 1 :] |
|
405 | 405 | pathlen = len(path) |
|
406 | 406 | try: |
|
407 | 407 | index = path.index(b'\'') |
|
408 | 408 | res += b'"' + path[:index] + b'"' |
|
409 | 409 | except ValueError: |
|
410 | 410 | res += c + path |
|
411 | 411 | index = pathlen - 1 |
|
412 | 412 | elif c == b'%': # variable |
|
413 | 413 | path = path[index + 1 :] |
|
414 | 414 | pathlen = len(path) |
|
415 | 415 | try: |
|
416 | 416 | index = path.index(b'%') |
|
417 | 417 | except ValueError: |
|
418 | 418 | res += b'%' + path |
|
419 | 419 | index = pathlen - 1 |
|
420 | 420 | else: |
|
421 | 421 | var = path[:index] |
|
422 | 422 | res += b'%' + var + b'%' |
|
423 | 423 | elif c == b'$': # variable |
|
424 | 424 | if path[index + 1 : index + 2] == b'{': |
|
425 | 425 | path = path[index + 2 :] |
|
426 | 426 | pathlen = len(path) |
|
427 | 427 | try: |
|
428 | 428 | index = path.index(b'}') |
|
429 | 429 | var = path[:index] |
|
430 | 430 | |
|
431 | 431 | # See below for why empty variables are handled specially |
|
432 | 432 | if env.get(var, b'') != b'': |
|
433 | 433 | res += b'%' + var + b'%' |
|
434 | 434 | else: |
|
435 | 435 | res += b'${' + var + b'}' |
|
436 | 436 | except ValueError: |
|
437 | 437 | res += b'${' + path |
|
438 | 438 | index = pathlen - 1 |
|
439 | 439 | else: |
|
440 | 440 | var = b'' |
|
441 | 441 | index += 1 |
|
442 | 442 | c = path[index : index + 1] |
|
443 | 443 | while c != b'' and c in varchars: |
|
444 | 444 | var += c |
|
445 | 445 | index += 1 |
|
446 | 446 | c = path[index : index + 1] |
|
447 | 447 | # Some variables (like HG_OLDNODE) may be defined, but have an |
|
448 | 448 | # empty value. Those need to be skipped because when spawning |
|
449 | 449 | # cmd.exe to run the hook, it doesn't replace %VAR% for an empty |
|
450 | 450 | # VAR, and that really confuses things like revset expressions. |
|
451 | 451 | # OTOH, if it's left in Unix format and the hook runs sh.exe, it |
|
452 | 452 | # will substitute to an empty string, and everything is happy. |
|
453 | 453 | if env.get(var, b'') != b'': |
|
454 | 454 | res += b'%' + var + b'%' |
|
455 | 455 | else: |
|
456 | 456 | res += b'$' + var |
|
457 | 457 | |
|
458 | 458 | if c != b'': |
|
459 | 459 | index -= 1 |
|
460 | 460 | elif ( |
|
461 | 461 | c == b'~' |
|
462 | 462 | and index + 1 < pathlen |
|
463 | 463 | and path[index + 1 : index + 2] in (b'\\', b'/') |
|
464 | 464 | ): |
|
465 | 465 | res += b"%USERPROFILE%" |
|
466 | 466 | elif ( |
|
467 | 467 | c == b'\\' |
|
468 | 468 | and index + 1 < pathlen |
|
469 | 469 | and path[index + 1 : index + 2] in (b'$', b'~') |
|
470 | 470 | ): |
|
471 | 471 | # Skip '\', but only if it is escaping $ or ~ |
|
472 | 472 | res += path[index + 1 : index + 2] |
|
473 | 473 | index += 1 |
|
474 | 474 | else: |
|
475 | 475 | res += c |
|
476 | 476 | |
|
477 | 477 | index += 1 |
|
478 | 478 | return res |
|
479 | 479 | |
|
480 | 480 | |
|
481 | 481 | # A sequence of backslashes is special iff it precedes a double quote: |
|
482 | 482 | # - if there's an even number of backslashes, the double quote is not |
|
483 | 483 | # quoted (i.e. it ends the quoted region) |
|
484 | 484 | # - if there's an odd number of backslashes, the double quote is quoted |
|
485 | 485 | # - in both cases, every pair of backslashes is unquoted into a single |
|
486 | 486 | # backslash |
|
487 | 487 | # (See http://msdn2.microsoft.com/en-us/library/a1y7w461.aspx ) |
|
488 | 488 | # So, to quote a string, we must surround it in double quotes, double |
|
489 | 489 | # the number of backslashes that precede double quotes and add another |
|
490 | 490 | # backslash before every double quote (being careful with the double |
|
491 | 491 | # quote we've appended to the end) |
|
492 | 492 | _quotere: Optional[Pattern[bytes]] = None |
|
493 | 493 | _needsshellquote = None |
|
494 | 494 | |
|
495 | 495 | |
|
496 | 496 | def shellquote(s: bytes) -> bytes: |
|
497 | 497 | r""" |
|
498 | 498 | >>> shellquote(br'C:\Users\xyz') |
|
499 | 499 | '"C:\\Users\\xyz"' |
|
500 | 500 | >>> shellquote(br'C:\Users\xyz/mixed') |
|
501 | 501 | '"C:\\Users\\xyz/mixed"' |
|
502 | 502 | >>> # Would be safe not to quote too, since it is all double backslashes |
|
503 | 503 | >>> shellquote(br'C:\\Users\\xyz') |
|
504 | 504 | '"C:\\\\Users\\\\xyz"' |
|
505 | 505 | >>> # But this must be quoted |
|
506 | 506 | >>> shellquote(br'C:\\Users\\xyz/abc') |
|
507 | 507 | '"C:\\\\Users\\\\xyz/abc"' |
|
508 | 508 | """ |
|
509 | 509 | global _quotere |
|
510 | 510 | if _quotere is None: |
|
511 | 511 | _quotere = re.compile(br'(\\*)("|\\$)') |
|
512 | 512 | global _needsshellquote |
|
513 | 513 | if _needsshellquote is None: |
|
514 | 514 | # ":" is also treated as "safe character", because it is used as a part |
|
515 | 515 | # of path name on Windows. "\" is also part of a path name, but isn't |
|
516 | 516 | # safe because shlex.split() (kind of) treats it as an escape char and |
|
517 | 517 | # drops it. It will leave the next character, even if it is another |
|
518 | 518 | # "\". |
|
519 | 519 | _needsshellquote = re.compile(br'[^a-zA-Z0-9._:/-]').search |
|
520 | 520 | if s and not _needsshellquote(s) and not _quotere.search(s): |
|
521 | 521 | # "s" shouldn't have to be quoted |
|
522 | 522 | return s |
|
523 | 523 | return b'"%s"' % _quotere.sub(br'\1\1\\\2', s) |
|
524 | 524 | |
|
525 | 525 | |
|
526 | 526 | def _unquote(s: bytes) -> bytes: |
|
527 | 527 | if s.startswith(b'"') and s.endswith(b'"'): |
|
528 | 528 | return s[1:-1] |
|
529 | 529 | return s |
|
530 | 530 | |
|
531 | 531 | |
|
532 | 532 | def shellsplit(s: bytes) -> List[bytes]: |
|
533 | 533 | """Parse a command string in cmd.exe way (best-effort)""" |
|
534 | 534 | return pycompat.maplist(_unquote, pycompat.shlexsplit(s, posix=False)) |
|
535 | 535 | |
|
536 | 536 | |
|
537 | 537 | # if you change this stub into a real check, please try to implement the |
|
538 | 538 | # username and groupname functions above, too. |
|
539 | 539 | def isowner(st: os.stat_result) -> bool: |
|
540 | 540 | return True |
|
541 | 541 | |
|
542 | 542 | |
|
543 | 543 | def findexe(command: bytes) -> Optional[bytes]: |
|
544 | 544 | """Find executable for command searching like cmd.exe does. |
|
545 | 545 | If command is a basename then PATH is searched for command. |
|
546 | 546 | PATH isn't searched if command is an absolute or relative path. |
|
547 | 547 | An extension from PATHEXT is found and added if not present. |
|
548 | 548 | If command isn't found None is returned.""" |
|
549 | 549 | pathext = encoding.environ.get(b'PATHEXT', b'.COM;.EXE;.BAT;.CMD') |
|
550 | 550 | pathexts = [ext for ext in pathext.lower().split(pycompat.ospathsep)] |
|
551 | 551 | if os.path.splitext(command)[1].lower() in pathexts: |
|
552 | 552 | pathexts = [b''] |
|
553 | 553 | |
|
554 | 554 | def findexisting(pathcommand: bytes) -> Optional[bytes]: |
|
555 | 555 | """Will append extension (if needed) and return existing file""" |
|
556 | 556 | for ext in pathexts: |
|
557 | 557 | executable = pathcommand + ext |
|
558 | 558 | if os.path.exists(executable): |
|
559 | 559 | return executable |
|
560 | 560 | return None |
|
561 | 561 | |
|
562 | 562 | if pycompat.ossep in command: |
|
563 | 563 | return findexisting(command) |
|
564 | 564 | |
|
565 | 565 | for path in encoding.environ.get(b'PATH', b'').split(pycompat.ospathsep): |
|
566 | 566 | executable = findexisting(os.path.join(path, command)) |
|
567 | 567 | if executable is not None: |
|
568 | 568 | return executable |
|
569 | 569 | return findexisting(os.path.expanduser(os.path.expandvars(command))) |
|
570 | 570 | |
|
571 | 571 | |
|
572 | 572 | _wantedkinds = {stat.S_IFREG, stat.S_IFLNK} |
|
573 | 573 | |
|
574 | 574 | |
|
575 | 575 | def statfiles(files: Sequence[bytes]) -> Iterator[Optional[os.stat_result]]: |
|
576 | 576 | """Stat each file in files. Yield each stat, or None if a file |
|
577 | 577 | does not exist or has a type we don't care about. |
|
578 | 578 | |
|
579 | 579 | Cluster and cache stat per directory to minimize number of OS stat calls.""" |
|
580 | 580 | dircache = {} # dirname -> filename -> status | None if file does not exist |
|
581 | 581 | getkind = stat.S_IFMT |
|
582 | 582 | for nf in files: |
|
583 | 583 | nf = normcase(nf) |
|
584 | 584 | dir, base = os.path.split(nf) |
|
585 | 585 | if not dir: |
|
586 | 586 | dir = b'.' |
|
587 | 587 | cache = dircache.get(dir, None) |
|
588 | 588 | if cache is None: |
|
589 | 589 | try: |
|
590 | 590 | dmap = { |
|
591 | 591 | normcase(n): s |
|
592 | 592 | for n, k, s in listdir(dir, True) |
|
593 | 593 | if getkind(s.st_mode) in _wantedkinds |
|
594 | 594 | } |
|
595 | 595 | except (FileNotFoundError, NotADirectoryError): |
|
596 | 596 | dmap = {} |
|
597 | 597 | cache = dircache.setdefault(dir, dmap) |
|
598 | 598 | yield cache.get(base, None) |
|
599 | 599 | |
|
600 | 600 | |
|
601 | 601 | def username(uid: Optional[int] = None) -> Optional[bytes]: |
|
602 | 602 | """Return the name of the user with the given uid. |
|
603 | 603 | |
|
604 | 604 | If uid is None, return the name of the current user.""" |
|
605 | 605 | if not uid: |
|
606 | 606 | try: |
|
607 | 607 | return pycompat.fsencode(getpass.getuser()) |
|
608 | 608 | except ModuleNotFoundError: |
|
609 | 609 | # getpass.getuser() checks for a few environment variables first, |
|
610 | 610 | # but if those aren't set, imports pwd and calls getpwuid(), none of |
|
611 | 611 | # which exists on Windows. |
|
612 | 612 | pass |
|
613 | 613 | return None |
|
614 | 614 | |
|
615 | 615 | |
|
616 | 616 | def groupname(gid: Optional[int] = None) -> Optional[bytes]: |
|
617 | 617 | """Return the name of the group with the given gid. |
|
618 | 618 | |
|
619 | 619 | If gid is None, return the name of the current group.""" |
|
620 | 620 | return None |
|
621 | 621 | |
|
622 | 622 | |
|
623 | 623 | def readlink(pathname: bytes) -> bytes: |
|
624 | 624 | path = pycompat.fsdecode(pathname) |
|
625 | 625 | try: |
|
626 | 626 | link = os.readlink(path) |
|
627 | 627 | except ValueError as e: |
|
628 | 628 | # On py2, os.readlink() raises an AttributeError since it is |
|
629 | 629 | # unsupported. On py3, reading a non-link raises a ValueError. Simply |
|
630 | 630 | # treat this as the error the locking code has been expecting up to now |
|
631 | 631 | # until an effort can be made to enable symlink support on Windows. |
|
632 | 632 | raise AttributeError(e) |
|
633 | 633 | return pycompat.fsencode(link) |
|
634 | 634 | |
|
635 | 635 | |
|
636 | 636 | def removedirs(name: bytes) -> None: |
|
637 | 637 | """special version of os.removedirs that does not remove symlinked |
|
638 | 638 | directories or junction points if they actually contain files""" |
|
639 | 639 | if listdir(name): |
|
640 | 640 | return |
|
641 | 641 | os.rmdir(name) |
|
642 | 642 | head, tail = os.path.split(name) |
|
643 | 643 | if not tail: |
|
644 | 644 | head, tail = os.path.split(head) |
|
645 | 645 | while head and tail: |
|
646 | 646 | try: |
|
647 | 647 | if listdir(head): |
|
648 | 648 | return |
|
649 | 649 | os.rmdir(head) |
|
650 | 650 | except (ValueError, OSError): |
|
651 | 651 | break |
|
652 | 652 | head, tail = os.path.split(head) |
|
653 | 653 | |
|
654 | 654 | |
|
655 | 655 | def rename(src: bytes, dst: bytes) -> None: |
|
656 | 656 | '''atomically rename file src to dst, replacing dst if it exists''' |
|
657 | 657 | try: |
|
658 | 658 | os.rename(src, dst) |
|
659 | 659 | except FileExistsError: |
|
660 | 660 | unlink(dst) |
|
661 | 661 | os.rename(src, dst) |
|
662 | 662 | |
|
663 | 663 | |
|
664 | 664 | def gethgcmd() -> List[bytes]: |
|
665 | 665 | return [encoding.strtolocal(arg) for arg in [sys.executable] + sys.argv[:1]] |
|
666 | 666 | |
|
667 | 667 | |
|
668 | 668 | def groupmembers(name: bytes) -> List[bytes]: |
|
669 | 669 | # Don't support groups on Windows for now |
|
670 | 670 | raise KeyError |
|
671 | 671 | |
|
672 | 672 | |
|
673 | 673 | def isexec(f: bytes) -> bool: |
|
674 | 674 | return False |
|
675 | 675 | |
|
676 | 676 | |
|
677 | 677 | class cachestat: |
|
678 | stat: Optional[os.stat_result] | |
|
679 | ||
|
678 | 680 | def __init__(self, path: bytes) -> None: |
|
679 | pass | |
|
681 | self.stat = None | |
|
680 | 682 | |
|
681 | 683 | def cacheable(self) -> bool: |
|
682 | 684 | return False |
|
683 | 685 | |
|
684 | 686 | |
|
685 | 687 | def lookupreg( |
|
686 | 688 | key: bytes, |
|
687 | 689 | valname: Optional[bytes] = None, |
|
688 | 690 | scope: Optional[Union[int, Iterable[int]]] = None, |
|
689 | 691 | ) -> Optional[bytes]: |
|
690 | 692 | """Look up a key/value name in the Windows registry. |
|
691 | 693 | |
|
692 | 694 | valname: value name. If unspecified, the default value for the key |
|
693 | 695 | is used. |
|
694 | 696 | scope: optionally specify scope for registry lookup, this can be |
|
695 | 697 | a sequence of scopes to look up in order. Default (CURRENT_USER, |
|
696 | 698 | LOCAL_MACHINE). |
|
697 | 699 | """ |
|
698 | 700 | if scope is None: |
|
699 | 701 | # pytype: disable=module-attr |
|
700 | 702 | scope = (winreg.HKEY_CURRENT_USER, winreg.HKEY_LOCAL_MACHINE) |
|
701 | 703 | # pytype: enable=module-attr |
|
702 | 704 | elif not isinstance(scope, (list, tuple)): |
|
703 | 705 | scope = (scope,) |
|
704 | 706 | for s in scope: |
|
705 | 707 | try: |
|
706 | 708 | # pytype: disable=module-attr |
|
707 | 709 | with winreg.OpenKey(s, encoding.strfromlocal(key)) as hkey: |
|
708 | 710 | # pytype: enable=module-attr |
|
709 | 711 | name = None |
|
710 | 712 | if valname is not None: |
|
711 | 713 | name = encoding.strfromlocal(valname) |
|
712 | 714 | # pytype: disable=module-attr |
|
713 | 715 | val = winreg.QueryValueEx(hkey, name)[0] |
|
714 | 716 | # pytype: enable=module-attr |
|
715 | 717 | |
|
716 | 718 | # never let a Unicode string escape into the wild |
|
717 | 719 | return encoding.unitolocal(val) |
|
718 | 720 | except EnvironmentError: |
|
719 | 721 | pass |
|
720 | 722 | |
|
721 | 723 | |
|
722 | 724 | expandglobs: bool = True |
|
723 | 725 | |
|
724 | 726 | |
|
725 | 727 | def statislink(st: Optional[os.stat_result]) -> bool: |
|
726 | 728 | '''check whether a stat result is a symlink''' |
|
727 | 729 | return False |
|
728 | 730 | |
|
729 | 731 | |
|
730 | 732 | def statisexec(st: Optional[os.stat_result]) -> bool: |
|
731 | 733 | '''check whether a stat result is an executable file''' |
|
732 | 734 | return False |
|
733 | 735 | |
|
734 | 736 | |
|
735 | 737 | def poll(fds) -> List: |
|
736 | 738 | # see posix.py for description |
|
737 | 739 | raise NotImplementedError() |
|
738 | 740 | |
|
739 | 741 | |
|
740 | 742 | def readpipe(pipe) -> bytes: |
|
741 | 743 | """Read all available data from a pipe.""" |
|
742 | 744 | chunks = [] |
|
743 | 745 | while True: |
|
744 | 746 | size = win32.peekpipe(pipe) |
|
745 | 747 | if not size: |
|
746 | 748 | break |
|
747 | 749 | |
|
748 | 750 | s = pipe.read(size) |
|
749 | 751 | if not s: |
|
750 | 752 | break |
|
751 | 753 | chunks.append(s) |
|
752 | 754 | |
|
753 | 755 | return b''.join(chunks) |
|
754 | 756 | |
|
755 | 757 | |
|
756 | 758 | def bindunixsocket(sock, path: bytes) -> NoReturn: |
|
757 | 759 | raise NotImplementedError('unsupported platform') |
General Comments 0
You need to be logged in to leave comments.
Login now