##// END OF EJS Templates
py3: replace os.sep with pycompat.ossep (part 2 of 4)...
Pulkit Goyal -
r30614:cfe66dcf default
parent child Browse files
Show More
@@ -1,1260 +1,1260 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import errno
12 12 import os
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import nullid
17 17 from . import (
18 18 encoding,
19 19 error,
20 20 match as matchmod,
21 21 osutil,
22 22 parsers,
23 23 pathutil,
24 24 pycompat,
25 25 scmutil,
26 26 util,
27 27 )
28 28
29 29 propertycache = util.propertycache
30 30 filecache = scmutil.filecache
31 31 _rangemask = 0x7fffffff
32 32
33 33 dirstatetuple = parsers.dirstatetuple
34 34
35 35 class repocache(filecache):
36 36 """filecache for files in .hg/"""
37 37 def join(self, obj, fname):
38 38 return obj._opener.join(fname)
39 39
40 40 class rootcache(filecache):
41 41 """filecache for files in the repository root"""
42 42 def join(self, obj, fname):
43 43 return obj._join(fname)
44 44
45 45 def _getfsnow(vfs):
46 46 '''Get "now" timestamp on filesystem'''
47 47 tmpfd, tmpname = vfs.mkstemp()
48 48 try:
49 49 return os.fstat(tmpfd).st_mtime
50 50 finally:
51 51 os.close(tmpfd)
52 52 vfs.unlink(tmpname)
53 53
54 54 def nonnormalentries(dmap):
55 55 '''Compute the nonnormal dirstate entries from the dmap'''
56 56 try:
57 57 return parsers.nonnormalentries(dmap)
58 58 except AttributeError:
59 59 return set(fname for fname, e in dmap.iteritems()
60 60 if e[0] != 'n' or e[3] == -1)
61 61
62 62 def _trypending(root, vfs, filename):
63 63 '''Open file to be read according to HG_PENDING environment variable
64 64
65 65 This opens '.pending' of specified 'filename' only when HG_PENDING
66 66 is equal to 'root'.
67 67
68 68 This returns '(fp, is_pending_opened)' tuple.
69 69 '''
70 70 if root == os.environ.get('HG_PENDING'):
71 71 try:
72 72 return (vfs('%s.pending' % filename), True)
73 73 except IOError as inst:
74 74 if inst.errno != errno.ENOENT:
75 75 raise
76 76 return (vfs(filename), False)
77 77
78 78 class dirstate(object):
79 79
80 80 def __init__(self, opener, ui, root, validate):
81 81 '''Create a new dirstate object.
82 82
83 83 opener is an open()-like callable that can be used to open the
84 84 dirstate file; root is the root of the directory tracked by
85 85 the dirstate.
86 86 '''
87 87 self._opener = opener
88 88 self._validate = validate
89 89 self._root = root
90 90 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
91 91 # UNC path pointing to root share (issue4557)
92 92 self._rootdir = pathutil.normasprefix(root)
93 93 # internal config: ui.forcecwd
94 94 forcecwd = ui.config('ui', 'forcecwd')
95 95 if forcecwd:
96 96 self._cwd = forcecwd
97 97 self._dirty = False
98 98 self._dirtypl = False
99 99 self._lastnormaltime = 0
100 100 self._ui = ui
101 101 self._filecache = {}
102 102 self._parentwriters = 0
103 103 self._filename = 'dirstate'
104 104 self._pendingfilename = '%s.pending' % self._filename
105 105 self._plchangecallbacks = {}
106 106 self._origpl = None
107 107
108 108 # for consistent view between _pl() and _read() invocations
109 109 self._pendingmode = None
110 110
111 111 def beginparentchange(self):
112 112 '''Marks the beginning of a set of changes that involve changing
113 113 the dirstate parents. If there is an exception during this time,
114 114 the dirstate will not be written when the wlock is released. This
115 115 prevents writing an incoherent dirstate where the parent doesn't
116 116 match the contents.
117 117 '''
118 118 self._parentwriters += 1
119 119
120 120 def endparentchange(self):
121 121 '''Marks the end of a set of changes that involve changing the
122 122 dirstate parents. Once all parent changes have been marked done,
123 123 the wlock will be free to write the dirstate on release.
124 124 '''
125 125 if self._parentwriters > 0:
126 126 self._parentwriters -= 1
127 127
128 128 def pendingparentchange(self):
129 129 '''Returns true if the dirstate is in the middle of a set of changes
130 130 that modify the dirstate parent.
131 131 '''
132 132 return self._parentwriters > 0
133 133
134 134 @propertycache
135 135 def _map(self):
136 136 '''Return the dirstate contents as a map from filename to
137 137 (state, mode, size, time).'''
138 138 self._read()
139 139 return self._map
140 140
141 141 @propertycache
142 142 def _copymap(self):
143 143 self._read()
144 144 return self._copymap
145 145
146 146 @propertycache
147 147 def _nonnormalset(self):
148 148 return nonnormalentries(self._map)
149 149
150 150 @propertycache
151 151 def _filefoldmap(self):
152 152 try:
153 153 makefilefoldmap = parsers.make_file_foldmap
154 154 except AttributeError:
155 155 pass
156 156 else:
157 157 return makefilefoldmap(self._map, util.normcasespec,
158 158 util.normcasefallback)
159 159
160 160 f = {}
161 161 normcase = util.normcase
162 162 for name, s in self._map.iteritems():
163 163 if s[0] != 'r':
164 164 f[normcase(name)] = name
165 165 f['.'] = '.' # prevents useless util.fspath() invocation
166 166 return f
167 167
168 168 @propertycache
169 169 def _dirfoldmap(self):
170 170 f = {}
171 171 normcase = util.normcase
172 172 for name in self._dirs:
173 173 f[normcase(name)] = name
174 174 return f
175 175
176 176 @repocache('branch')
177 177 def _branch(self):
178 178 try:
179 179 return self._opener.read("branch").strip() or "default"
180 180 except IOError as inst:
181 181 if inst.errno != errno.ENOENT:
182 182 raise
183 183 return "default"
184 184
185 185 @propertycache
186 186 def _pl(self):
187 187 try:
188 188 fp = self._opendirstatefile()
189 189 st = fp.read(40)
190 190 fp.close()
191 191 l = len(st)
192 192 if l == 40:
193 193 return st[:20], st[20:40]
194 194 elif l > 0 and l < 40:
195 195 raise error.Abort(_('working directory state appears damaged!'))
196 196 except IOError as err:
197 197 if err.errno != errno.ENOENT:
198 198 raise
199 199 return [nullid, nullid]
200 200
201 201 @propertycache
202 202 def _dirs(self):
203 203 return util.dirs(self._map, 'r')
204 204
205 205 def dirs(self):
206 206 return self._dirs
207 207
208 208 @rootcache('.hgignore')
209 209 def _ignore(self):
210 210 files = self._ignorefiles()
211 211 if not files:
212 212 return util.never
213 213
214 214 pats = ['include:%s' % f for f in files]
215 215 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
216 216
217 217 @propertycache
218 218 def _slash(self):
219 219 return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
220 220
221 221 @propertycache
222 222 def _checklink(self):
223 223 return util.checklink(self._root)
224 224
225 225 @propertycache
226 226 def _checkexec(self):
227 227 return util.checkexec(self._root)
228 228
229 229 @propertycache
230 230 def _checkcase(self):
231 231 return not util.fscasesensitive(self._join('.hg'))
232 232
233 233 def _join(self, f):
234 234 # much faster than os.path.join()
235 235 # it's safe because f is always a relative path
236 236 return self._rootdir + f
237 237
238 238 def flagfunc(self, buildfallback):
239 239 if self._checklink and self._checkexec:
240 240 def f(x):
241 241 try:
242 242 st = os.lstat(self._join(x))
243 243 if util.statislink(st):
244 244 return 'l'
245 245 if util.statisexec(st):
246 246 return 'x'
247 247 except OSError:
248 248 pass
249 249 return ''
250 250 return f
251 251
252 252 fallback = buildfallback()
253 253 if self._checklink:
254 254 def f(x):
255 255 if os.path.islink(self._join(x)):
256 256 return 'l'
257 257 if 'x' in fallback(x):
258 258 return 'x'
259 259 return ''
260 260 return f
261 261 if self._checkexec:
262 262 def f(x):
263 263 if 'l' in fallback(x):
264 264 return 'l'
265 265 if util.isexec(self._join(x)):
266 266 return 'x'
267 267 return ''
268 268 return f
269 269 else:
270 270 return fallback
271 271
272 272 @propertycache
273 273 def _cwd(self):
274 274 return pycompat.getcwd()
275 275
276 276 def getcwd(self):
277 277 '''Return the path from which a canonical path is calculated.
278 278
279 279 This path should be used to resolve file patterns or to convert
280 280 canonical paths back to file paths for display. It shouldn't be
281 281 used to get real file paths. Use vfs functions instead.
282 282 '''
283 283 cwd = self._cwd
284 284 if cwd == self._root:
285 285 return ''
286 286 # self._root ends with a path separator if self._root is '/' or 'C:\'
287 287 rootsep = self._root
288 288 if not util.endswithsep(rootsep):
289 rootsep += os.sep
289 rootsep += pycompat.ossep
290 290 if cwd.startswith(rootsep):
291 291 return cwd[len(rootsep):]
292 292 else:
293 293 # we're outside the repo. return an absolute path.
294 294 return cwd
295 295
296 296 def pathto(self, f, cwd=None):
297 297 if cwd is None:
298 298 cwd = self.getcwd()
299 299 path = util.pathto(self._root, cwd, f)
300 300 if self._slash:
301 301 return util.pconvert(path)
302 302 return path
303 303
304 304 def __getitem__(self, key):
305 305 '''Return the current state of key (a filename) in the dirstate.
306 306
307 307 States are:
308 308 n normal
309 309 m needs merging
310 310 r marked for removal
311 311 a marked for addition
312 312 ? not tracked
313 313 '''
314 314 return self._map.get(key, ("?",))[0]
315 315
316 316 def __contains__(self, key):
317 317 return key in self._map
318 318
319 319 def __iter__(self):
320 320 for x in sorted(self._map):
321 321 yield x
322 322
323 323 def iteritems(self):
324 324 return self._map.iteritems()
325 325
326 326 def parents(self):
327 327 return [self._validate(p) for p in self._pl]
328 328
329 329 def p1(self):
330 330 return self._validate(self._pl[0])
331 331
332 332 def p2(self):
333 333 return self._validate(self._pl[1])
334 334
335 335 def branch(self):
336 336 return encoding.tolocal(self._branch)
337 337
338 338 def setparents(self, p1, p2=nullid):
339 339 """Set dirstate parents to p1 and p2.
340 340
341 341 When moving from two parents to one, 'm' merged entries a
342 342 adjusted to normal and previous copy records discarded and
343 343 returned by the call.
344 344
345 345 See localrepo.setparents()
346 346 """
347 347 if self._parentwriters == 0:
348 348 raise ValueError("cannot set dirstate parent without "
349 349 "calling dirstate.beginparentchange")
350 350
351 351 self._dirty = self._dirtypl = True
352 352 oldp2 = self._pl[1]
353 353 if self._origpl is None:
354 354 self._origpl = self._pl
355 355 self._pl = p1, p2
356 356 copies = {}
357 357 if oldp2 != nullid and p2 == nullid:
358 358 for f, s in self._map.iteritems():
359 359 # Discard 'm' markers when moving away from a merge state
360 360 if s[0] == 'm':
361 361 if f in self._copymap:
362 362 copies[f] = self._copymap[f]
363 363 self.normallookup(f)
364 364 # Also fix up otherparent markers
365 365 elif s[0] == 'n' and s[2] == -2:
366 366 if f in self._copymap:
367 367 copies[f] = self._copymap[f]
368 368 self.add(f)
369 369 return copies
370 370
371 371 def setbranch(self, branch):
372 372 self._branch = encoding.fromlocal(branch)
373 373 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
374 374 try:
375 375 f.write(self._branch + '\n')
376 376 f.close()
377 377
378 378 # make sure filecache has the correct stat info for _branch after
379 379 # replacing the underlying file
380 380 ce = self._filecache['_branch']
381 381 if ce:
382 382 ce.refresh()
383 383 except: # re-raises
384 384 f.discard()
385 385 raise
386 386
387 387 def _opendirstatefile(self):
388 388 fp, mode = _trypending(self._root, self._opener, self._filename)
389 389 if self._pendingmode is not None and self._pendingmode != mode:
390 390 fp.close()
391 391 raise error.Abort(_('working directory state may be '
392 392 'changed parallelly'))
393 393 self._pendingmode = mode
394 394 return fp
395 395
396 396 def _read(self):
397 397 self._map = {}
398 398 self._copymap = {}
399 399 try:
400 400 fp = self._opendirstatefile()
401 401 try:
402 402 st = fp.read()
403 403 finally:
404 404 fp.close()
405 405 except IOError as err:
406 406 if err.errno != errno.ENOENT:
407 407 raise
408 408 return
409 409 if not st:
410 410 return
411 411
412 412 if util.safehasattr(parsers, 'dict_new_presized'):
413 413 # Make an estimate of the number of files in the dirstate based on
414 414 # its size. From a linear regression on a set of real-world repos,
415 415 # all over 10,000 files, the size of a dirstate entry is 85
416 416 # bytes. The cost of resizing is significantly higher than the cost
417 417 # of filling in a larger presized dict, so subtract 20% from the
418 418 # size.
419 419 #
420 420 # This heuristic is imperfect in many ways, so in a future dirstate
421 421 # format update it makes sense to just record the number of entries
422 422 # on write.
423 423 self._map = parsers.dict_new_presized(len(st) / 71)
424 424
425 425 # Python's garbage collector triggers a GC each time a certain number
426 426 # of container objects (the number being defined by
427 427 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
428 428 # for each file in the dirstate. The C version then immediately marks
429 429 # them as not to be tracked by the collector. However, this has no
430 430 # effect on when GCs are triggered, only on what objects the GC looks
431 431 # into. This means that O(number of files) GCs are unavoidable.
432 432 # Depending on when in the process's lifetime the dirstate is parsed,
433 433 # this can get very expensive. As a workaround, disable GC while
434 434 # parsing the dirstate.
435 435 #
436 436 # (we cannot decorate the function directly since it is in a C module)
437 437 parse_dirstate = util.nogc(parsers.parse_dirstate)
438 438 p = parse_dirstate(self._map, self._copymap, st)
439 439 if not self._dirtypl:
440 440 self._pl = p
441 441
442 442 def invalidate(self):
443 443 for a in ("_map", "_copymap", "_filefoldmap", "_dirfoldmap", "_branch",
444 444 "_pl", "_dirs", "_ignore", "_nonnormalset"):
445 445 if a in self.__dict__:
446 446 delattr(self, a)
447 447 self._lastnormaltime = 0
448 448 self._dirty = False
449 449 self._parentwriters = 0
450 450 self._origpl = None
451 451
452 452 def copy(self, source, dest):
453 453 """Mark dest as a copy of source. Unmark dest if source is None."""
454 454 if source == dest:
455 455 return
456 456 self._dirty = True
457 457 if source is not None:
458 458 self._copymap[dest] = source
459 459 elif dest in self._copymap:
460 460 del self._copymap[dest]
461 461
462 462 def copied(self, file):
463 463 return self._copymap.get(file, None)
464 464
465 465 def copies(self):
466 466 return self._copymap
467 467
468 468 def _droppath(self, f):
469 469 if self[f] not in "?r" and "_dirs" in self.__dict__:
470 470 self._dirs.delpath(f)
471 471
472 472 if "_filefoldmap" in self.__dict__:
473 473 normed = util.normcase(f)
474 474 if normed in self._filefoldmap:
475 475 del self._filefoldmap[normed]
476 476
477 477 def _addpath(self, f, state, mode, size, mtime):
478 478 oldstate = self[f]
479 479 if state == 'a' or oldstate == 'r':
480 480 scmutil.checkfilename(f)
481 481 if f in self._dirs:
482 482 raise error.Abort(_('directory %r already in dirstate') % f)
483 483 # shadows
484 484 for d in util.finddirs(f):
485 485 if d in self._dirs:
486 486 break
487 487 if d in self._map and self[d] != 'r':
488 488 raise error.Abort(
489 489 _('file %r in dirstate clashes with %r') % (d, f))
490 490 if oldstate in "?r" and "_dirs" in self.__dict__:
491 491 self._dirs.addpath(f)
492 492 self._dirty = True
493 493 self._map[f] = dirstatetuple(state, mode, size, mtime)
494 494 if state != 'n' or mtime == -1:
495 495 self._nonnormalset.add(f)
496 496
497 497 def normal(self, f):
498 498 '''Mark a file normal and clean.'''
499 499 s = os.lstat(self._join(f))
500 500 mtime = s.st_mtime
501 501 self._addpath(f, 'n', s.st_mode,
502 502 s.st_size & _rangemask, mtime & _rangemask)
503 503 if f in self._copymap:
504 504 del self._copymap[f]
505 505 if f in self._nonnormalset:
506 506 self._nonnormalset.remove(f)
507 507 if mtime > self._lastnormaltime:
508 508 # Remember the most recent modification timeslot for status(),
509 509 # to make sure we won't miss future size-preserving file content
510 510 # modifications that happen within the same timeslot.
511 511 self._lastnormaltime = mtime
512 512
513 513 def normallookup(self, f):
514 514 '''Mark a file normal, but possibly dirty.'''
515 515 if self._pl[1] != nullid and f in self._map:
516 516 # if there is a merge going on and the file was either
517 517 # in state 'm' (-1) or coming from other parent (-2) before
518 518 # being removed, restore that state.
519 519 entry = self._map[f]
520 520 if entry[0] == 'r' and entry[2] in (-1, -2):
521 521 source = self._copymap.get(f)
522 522 if entry[2] == -1:
523 523 self.merge(f)
524 524 elif entry[2] == -2:
525 525 self.otherparent(f)
526 526 if source:
527 527 self.copy(source, f)
528 528 return
529 529 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
530 530 return
531 531 self._addpath(f, 'n', 0, -1, -1)
532 532 if f in self._copymap:
533 533 del self._copymap[f]
534 534 if f in self._nonnormalset:
535 535 self._nonnormalset.remove(f)
536 536
537 537 def otherparent(self, f):
538 538 '''Mark as coming from the other parent, always dirty.'''
539 539 if self._pl[1] == nullid:
540 540 raise error.Abort(_("setting %r to other parent "
541 541 "only allowed in merges") % f)
542 542 if f in self and self[f] == 'n':
543 543 # merge-like
544 544 self._addpath(f, 'm', 0, -2, -1)
545 545 else:
546 546 # add-like
547 547 self._addpath(f, 'n', 0, -2, -1)
548 548
549 549 if f in self._copymap:
550 550 del self._copymap[f]
551 551
552 552 def add(self, f):
553 553 '''Mark a file added.'''
554 554 self._addpath(f, 'a', 0, -1, -1)
555 555 if f in self._copymap:
556 556 del self._copymap[f]
557 557
558 558 def remove(self, f):
559 559 '''Mark a file removed.'''
560 560 self._dirty = True
561 561 self._droppath(f)
562 562 size = 0
563 563 if self._pl[1] != nullid and f in self._map:
564 564 # backup the previous state
565 565 entry = self._map[f]
566 566 if entry[0] == 'm': # merge
567 567 size = -1
568 568 elif entry[0] == 'n' and entry[2] == -2: # other parent
569 569 size = -2
570 570 self._map[f] = dirstatetuple('r', 0, size, 0)
571 571 self._nonnormalset.add(f)
572 572 if size == 0 and f in self._copymap:
573 573 del self._copymap[f]
574 574
575 575 def merge(self, f):
576 576 '''Mark a file merged.'''
577 577 if self._pl[1] == nullid:
578 578 return self.normallookup(f)
579 579 return self.otherparent(f)
580 580
581 581 def drop(self, f):
582 582 '''Drop a file from the dirstate'''
583 583 if f in self._map:
584 584 self._dirty = True
585 585 self._droppath(f)
586 586 del self._map[f]
587 587 if f in self._nonnormalset:
588 588 self._nonnormalset.remove(f)
589 589 if f in self._copymap:
590 590 del self._copymap[f]
591 591
592 592 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
593 593 if exists is None:
594 594 exists = os.path.lexists(os.path.join(self._root, path))
595 595 if not exists:
596 596 # Maybe a path component exists
597 597 if not ignoremissing and '/' in path:
598 598 d, f = path.rsplit('/', 1)
599 599 d = self._normalize(d, False, ignoremissing, None)
600 600 folded = d + "/" + f
601 601 else:
602 602 # No path components, preserve original case
603 603 folded = path
604 604 else:
605 605 # recursively normalize leading directory components
606 606 # against dirstate
607 607 if '/' in normed:
608 608 d, f = normed.rsplit('/', 1)
609 609 d = self._normalize(d, False, ignoremissing, True)
610 610 r = self._root + "/" + d
611 611 folded = d + "/" + util.fspath(f, r)
612 612 else:
613 613 folded = util.fspath(normed, self._root)
614 614 storemap[normed] = folded
615 615
616 616 return folded
617 617
618 618 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
619 619 normed = util.normcase(path)
620 620 folded = self._filefoldmap.get(normed, None)
621 621 if folded is None:
622 622 if isknown:
623 623 folded = path
624 624 else:
625 625 folded = self._discoverpath(path, normed, ignoremissing, exists,
626 626 self._filefoldmap)
627 627 return folded
628 628
629 629 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
630 630 normed = util.normcase(path)
631 631 folded = self._filefoldmap.get(normed, None)
632 632 if folded is None:
633 633 folded = self._dirfoldmap.get(normed, None)
634 634 if folded is None:
635 635 if isknown:
636 636 folded = path
637 637 else:
638 638 # store discovered result in dirfoldmap so that future
639 639 # normalizefile calls don't start matching directories
640 640 folded = self._discoverpath(path, normed, ignoremissing, exists,
641 641 self._dirfoldmap)
642 642 return folded
643 643
644 644 def normalize(self, path, isknown=False, ignoremissing=False):
645 645 '''
646 646 normalize the case of a pathname when on a casefolding filesystem
647 647
648 648 isknown specifies whether the filename came from walking the
649 649 disk, to avoid extra filesystem access.
650 650
651 651 If ignoremissing is True, missing path are returned
652 652 unchanged. Otherwise, we try harder to normalize possibly
653 653 existing path components.
654 654
655 655 The normalized case is determined based on the following precedence:
656 656
657 657 - version of name already stored in the dirstate
658 658 - version of name stored on disk
659 659 - version provided via command arguments
660 660 '''
661 661
662 662 if self._checkcase:
663 663 return self._normalize(path, isknown, ignoremissing)
664 664 return path
665 665
666 666 def clear(self):
667 667 self._map = {}
668 668 self._nonnormalset = set()
669 669 if "_dirs" in self.__dict__:
670 670 delattr(self, "_dirs")
671 671 self._copymap = {}
672 672 self._pl = [nullid, nullid]
673 673 self._lastnormaltime = 0
674 674 self._dirty = True
675 675
676 676 def rebuild(self, parent, allfiles, changedfiles=None):
677 677 if changedfiles is None:
678 678 # Rebuild entire dirstate
679 679 changedfiles = allfiles
680 680 lastnormaltime = self._lastnormaltime
681 681 self.clear()
682 682 self._lastnormaltime = lastnormaltime
683 683
684 684 if self._origpl is None:
685 685 self._origpl = self._pl
686 686 self._pl = (parent, nullid)
687 687 for f in changedfiles:
688 688 if f in allfiles:
689 689 self.normallookup(f)
690 690 else:
691 691 self.drop(f)
692 692
693 693 self._dirty = True
694 694
695 695 def write(self, tr):
696 696 if not self._dirty:
697 697 return
698 698
699 699 filename = self._filename
700 700 if tr:
701 701 # 'dirstate.write()' is not only for writing in-memory
702 702 # changes out, but also for dropping ambiguous timestamp.
703 703 # delayed writing re-raise "ambiguous timestamp issue".
704 704 # See also the wiki page below for detail:
705 705 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
706 706
707 707 # emulate dropping timestamp in 'parsers.pack_dirstate'
708 708 now = _getfsnow(self._opener)
709 709 dmap = self._map
710 710 for f, e in dmap.iteritems():
711 711 if e[0] == 'n' and e[3] == now:
712 712 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
713 713 self._nonnormalset.add(f)
714 714
715 715 # emulate that all 'dirstate.normal' results are written out
716 716 self._lastnormaltime = 0
717 717
718 718 # delay writing in-memory changes out
719 719 tr.addfilegenerator('dirstate', (self._filename,),
720 720 self._writedirstate, location='plain')
721 721 return
722 722
723 723 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
724 724 self._writedirstate(st)
725 725
726 726 def addparentchangecallback(self, category, callback):
727 727 """add a callback to be called when the wd parents are changed
728 728
729 729 Callback will be called with the following arguments:
730 730 dirstate, (oldp1, oldp2), (newp1, newp2)
731 731
732 732 Category is a unique identifier to allow overwriting an old callback
733 733 with a newer callback.
734 734 """
735 735 self._plchangecallbacks[category] = callback
736 736
737 737 def _writedirstate(self, st):
738 738 # notify callbacks about parents change
739 739 if self._origpl is not None and self._origpl != self._pl:
740 740 for c, callback in sorted(self._plchangecallbacks.iteritems()):
741 741 callback(self, self._origpl, self._pl)
742 742 self._origpl = None
743 743 # use the modification time of the newly created temporary file as the
744 744 # filesystem's notion of 'now'
745 745 now = util.fstat(st).st_mtime & _rangemask
746 746
747 747 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
748 748 # timestamp of each entries in dirstate, because of 'now > mtime'
749 749 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
750 750 if delaywrite > 0:
751 751 # do we have any files to delay for?
752 752 for f, e in self._map.iteritems():
753 753 if e[0] == 'n' and e[3] == now:
754 754 import time # to avoid useless import
755 755 # rather than sleep n seconds, sleep until the next
756 756 # multiple of n seconds
757 757 clock = time.time()
758 758 start = int(clock) - (int(clock) % delaywrite)
759 759 end = start + delaywrite
760 760 time.sleep(end - clock)
761 761 now = end # trust our estimate that the end is near now
762 762 break
763 763
764 764 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
765 765 self._nonnormalset = nonnormalentries(self._map)
766 766 st.close()
767 767 self._lastnormaltime = 0
768 768 self._dirty = self._dirtypl = False
769 769
770 770 def _dirignore(self, f):
771 771 if f == '.':
772 772 return False
773 773 if self._ignore(f):
774 774 return True
775 775 for p in util.finddirs(f):
776 776 if self._ignore(p):
777 777 return True
778 778 return False
779 779
780 780 def _ignorefiles(self):
781 781 files = []
782 782 if os.path.exists(self._join('.hgignore')):
783 783 files.append(self._join('.hgignore'))
784 784 for name, path in self._ui.configitems("ui"):
785 785 if name == 'ignore' or name.startswith('ignore.'):
786 786 # we need to use os.path.join here rather than self._join
787 787 # because path is arbitrary and user-specified
788 788 files.append(os.path.join(self._rootdir, util.expandpath(path)))
789 789 return files
790 790
791 791 def _ignorefileandline(self, f):
792 792 files = collections.deque(self._ignorefiles())
793 793 visited = set()
794 794 while files:
795 795 i = files.popleft()
796 796 patterns = matchmod.readpatternfile(i, self._ui.warn,
797 797 sourceinfo=True)
798 798 for pattern, lineno, line in patterns:
799 799 kind, p = matchmod._patsplit(pattern, 'glob')
800 800 if kind == "subinclude":
801 801 if p not in visited:
802 802 files.append(p)
803 803 continue
804 804 m = matchmod.match(self._root, '', [], [pattern],
805 805 warn=self._ui.warn)
806 806 if m(f):
807 807 return (i, lineno, line)
808 808 visited.add(i)
809 809 return (None, -1, "")
810 810
811 811 def _walkexplicit(self, match, subrepos):
812 812 '''Get stat data about the files explicitly specified by match.
813 813
814 814 Return a triple (results, dirsfound, dirsnotfound).
815 815 - results is a mapping from filename to stat result. It also contains
816 816 listings mapping subrepos and .hg to None.
817 817 - dirsfound is a list of files found to be directories.
818 818 - dirsnotfound is a list of files that the dirstate thinks are
819 819 directories and that were not found.'''
820 820
821 821 def badtype(mode):
822 822 kind = _('unknown')
823 823 if stat.S_ISCHR(mode):
824 824 kind = _('character device')
825 825 elif stat.S_ISBLK(mode):
826 826 kind = _('block device')
827 827 elif stat.S_ISFIFO(mode):
828 828 kind = _('fifo')
829 829 elif stat.S_ISSOCK(mode):
830 830 kind = _('socket')
831 831 elif stat.S_ISDIR(mode):
832 832 kind = _('directory')
833 833 return _('unsupported file type (type is %s)') % kind
834 834
835 835 matchedir = match.explicitdir
836 836 badfn = match.bad
837 837 dmap = self._map
838 838 lstat = os.lstat
839 839 getkind = stat.S_IFMT
840 840 dirkind = stat.S_IFDIR
841 841 regkind = stat.S_IFREG
842 842 lnkkind = stat.S_IFLNK
843 843 join = self._join
844 844 dirsfound = []
845 845 foundadd = dirsfound.append
846 846 dirsnotfound = []
847 847 notfoundadd = dirsnotfound.append
848 848
849 849 if not match.isexact() and self._checkcase:
850 850 normalize = self._normalize
851 851 else:
852 852 normalize = None
853 853
854 854 files = sorted(match.files())
855 855 subrepos.sort()
856 856 i, j = 0, 0
857 857 while i < len(files) and j < len(subrepos):
858 858 subpath = subrepos[j] + "/"
859 859 if files[i] < subpath:
860 860 i += 1
861 861 continue
862 862 while i < len(files) and files[i].startswith(subpath):
863 863 del files[i]
864 864 j += 1
865 865
866 866 if not files or '.' in files:
867 867 files = ['.']
868 868 results = dict.fromkeys(subrepos)
869 869 results['.hg'] = None
870 870
871 871 alldirs = None
872 872 for ff in files:
873 873 # constructing the foldmap is expensive, so don't do it for the
874 874 # common case where files is ['.']
875 875 if normalize and ff != '.':
876 876 nf = normalize(ff, False, True)
877 877 else:
878 878 nf = ff
879 879 if nf in results:
880 880 continue
881 881
882 882 try:
883 883 st = lstat(join(nf))
884 884 kind = getkind(st.st_mode)
885 885 if kind == dirkind:
886 886 if nf in dmap:
887 887 # file replaced by dir on disk but still in dirstate
888 888 results[nf] = None
889 889 if matchedir:
890 890 matchedir(nf)
891 891 foundadd((nf, ff))
892 892 elif kind == regkind or kind == lnkkind:
893 893 results[nf] = st
894 894 else:
895 895 badfn(ff, badtype(kind))
896 896 if nf in dmap:
897 897 results[nf] = None
898 898 except OSError as inst: # nf not found on disk - it is dirstate only
899 899 if nf in dmap: # does it exactly match a missing file?
900 900 results[nf] = None
901 901 else: # does it match a missing directory?
902 902 if alldirs is None:
903 903 alldirs = util.dirs(dmap)
904 904 if nf in alldirs:
905 905 if matchedir:
906 906 matchedir(nf)
907 907 notfoundadd(nf)
908 908 else:
909 909 badfn(ff, inst.strerror)
910 910
911 911 # Case insensitive filesystems cannot rely on lstat() failing to detect
912 912 # a case-only rename. Prune the stat object for any file that does not
913 913 # match the case in the filesystem, if there are multiple files that
914 914 # normalize to the same path.
915 915 if match.isexact() and self._checkcase:
916 916 normed = {}
917 917
918 918 for f, st in results.iteritems():
919 919 if st is None:
920 920 continue
921 921
922 922 nc = util.normcase(f)
923 923 paths = normed.get(nc)
924 924
925 925 if paths is None:
926 926 paths = set()
927 927 normed[nc] = paths
928 928
929 929 paths.add(f)
930 930
931 931 for norm, paths in normed.iteritems():
932 932 if len(paths) > 1:
933 933 for path in paths:
934 934 folded = self._discoverpath(path, norm, True, None,
935 935 self._dirfoldmap)
936 936 if path != folded:
937 937 results[path] = None
938 938
939 939 return results, dirsfound, dirsnotfound
940 940
941 941 def walk(self, match, subrepos, unknown, ignored, full=True):
942 942 '''
943 943 Walk recursively through the directory tree, finding all files
944 944 matched by match.
945 945
946 946 If full is False, maybe skip some known-clean files.
947 947
948 948 Return a dict mapping filename to stat-like object (either
949 949 mercurial.osutil.stat instance or return value of os.stat()).
950 950
951 951 '''
952 952 # full is a flag that extensions that hook into walk can use -- this
953 953 # implementation doesn't use it at all. This satisfies the contract
954 954 # because we only guarantee a "maybe".
955 955
956 956 if ignored:
957 957 ignore = util.never
958 958 dirignore = util.never
959 959 elif unknown:
960 960 ignore = self._ignore
961 961 dirignore = self._dirignore
962 962 else:
963 963 # if not unknown and not ignored, drop dir recursion and step 2
964 964 ignore = util.always
965 965 dirignore = util.always
966 966
967 967 matchfn = match.matchfn
968 968 matchalways = match.always()
969 969 matchtdir = match.traversedir
970 970 dmap = self._map
971 971 listdir = osutil.listdir
972 972 lstat = os.lstat
973 973 dirkind = stat.S_IFDIR
974 974 regkind = stat.S_IFREG
975 975 lnkkind = stat.S_IFLNK
976 976 join = self._join
977 977
978 978 exact = skipstep3 = False
979 979 if match.isexact(): # match.exact
980 980 exact = True
981 981 dirignore = util.always # skip step 2
982 982 elif match.prefix(): # match.match, no patterns
983 983 skipstep3 = True
984 984
985 985 if not exact and self._checkcase:
986 986 normalize = self._normalize
987 987 normalizefile = self._normalizefile
988 988 skipstep3 = False
989 989 else:
990 990 normalize = self._normalize
991 991 normalizefile = None
992 992
993 993 # step 1: find all explicit files
994 994 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
995 995
996 996 skipstep3 = skipstep3 and not (work or dirsnotfound)
997 997 work = [d for d in work if not dirignore(d[0])]
998 998
999 999 # step 2: visit subdirectories
1000 1000 def traverse(work, alreadynormed):
1001 1001 wadd = work.append
1002 1002 while work:
1003 1003 nd = work.pop()
1004 1004 skip = None
1005 1005 if nd == '.':
1006 1006 nd = ''
1007 1007 else:
1008 1008 skip = '.hg'
1009 1009 try:
1010 1010 entries = listdir(join(nd), stat=True, skip=skip)
1011 1011 except OSError as inst:
1012 1012 if inst.errno in (errno.EACCES, errno.ENOENT):
1013 1013 match.bad(self.pathto(nd), inst.strerror)
1014 1014 continue
1015 1015 raise
1016 1016 for f, kind, st in entries:
1017 1017 if normalizefile:
1018 1018 # even though f might be a directory, we're only
1019 1019 # interested in comparing it to files currently in the
1020 1020 # dmap -- therefore normalizefile is enough
1021 1021 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1022 1022 True)
1023 1023 else:
1024 1024 nf = nd and (nd + "/" + f) or f
1025 1025 if nf not in results:
1026 1026 if kind == dirkind:
1027 1027 if not ignore(nf):
1028 1028 if matchtdir:
1029 1029 matchtdir(nf)
1030 1030 wadd(nf)
1031 1031 if nf in dmap and (matchalways or matchfn(nf)):
1032 1032 results[nf] = None
1033 1033 elif kind == regkind or kind == lnkkind:
1034 1034 if nf in dmap:
1035 1035 if matchalways or matchfn(nf):
1036 1036 results[nf] = st
1037 1037 elif ((matchalways or matchfn(nf))
1038 1038 and not ignore(nf)):
1039 1039 # unknown file -- normalize if necessary
1040 1040 if not alreadynormed:
1041 1041 nf = normalize(nf, False, True)
1042 1042 results[nf] = st
1043 1043 elif nf in dmap and (matchalways or matchfn(nf)):
1044 1044 results[nf] = None
1045 1045
1046 1046 for nd, d in work:
1047 1047 # alreadynormed means that processwork doesn't have to do any
1048 1048 # expensive directory normalization
1049 1049 alreadynormed = not normalize or nd == d
1050 1050 traverse([d], alreadynormed)
1051 1051
1052 1052 for s in subrepos:
1053 1053 del results[s]
1054 1054 del results['.hg']
1055 1055
1056 1056 # step 3: visit remaining files from dmap
1057 1057 if not skipstep3 and not exact:
1058 1058 # If a dmap file is not in results yet, it was either
1059 1059 # a) not matching matchfn b) ignored, c) missing, or d) under a
1060 1060 # symlink directory.
1061 1061 if not results and matchalways:
1062 1062 visit = dmap.keys()
1063 1063 else:
1064 1064 visit = [f for f in dmap if f not in results and matchfn(f)]
1065 1065 visit.sort()
1066 1066
1067 1067 if unknown:
1068 1068 # unknown == True means we walked all dirs under the roots
1069 1069 # that wasn't ignored, and everything that matched was stat'ed
1070 1070 # and is already in results.
1071 1071 # The rest must thus be ignored or under a symlink.
1072 1072 audit_path = pathutil.pathauditor(self._root)
1073 1073
1074 1074 for nf in iter(visit):
1075 1075 # If a stat for the same file was already added with a
1076 1076 # different case, don't add one for this, since that would
1077 1077 # make it appear as if the file exists under both names
1078 1078 # on disk.
1079 1079 if (normalizefile and
1080 1080 normalizefile(nf, True, True) in results):
1081 1081 results[nf] = None
1082 1082 # Report ignored items in the dmap as long as they are not
1083 1083 # under a symlink directory.
1084 1084 elif audit_path.check(nf):
1085 1085 try:
1086 1086 results[nf] = lstat(join(nf))
1087 1087 # file was just ignored, no links, and exists
1088 1088 except OSError:
1089 1089 # file doesn't exist
1090 1090 results[nf] = None
1091 1091 else:
1092 1092 # It's either missing or under a symlink directory
1093 1093 # which we in this case report as missing
1094 1094 results[nf] = None
1095 1095 else:
1096 1096 # We may not have walked the full directory tree above,
1097 1097 # so stat and check everything we missed.
1098 1098 nf = iter(visit).next
1099 1099 for st in util.statfiles([join(i) for i in visit]):
1100 1100 results[nf()] = st
1101 1101 return results
1102 1102
1103 1103 def status(self, match, subrepos, ignored, clean, unknown):
1104 1104 '''Determine the status of the working copy relative to the
1105 1105 dirstate and return a pair of (unsure, status), where status is of type
1106 1106 scmutil.status and:
1107 1107
1108 1108 unsure:
1109 1109 files that might have been modified since the dirstate was
1110 1110 written, but need to be read to be sure (size is the same
1111 1111 but mtime differs)
1112 1112 status.modified:
1113 1113 files that have definitely been modified since the dirstate
1114 1114 was written (different size or mode)
1115 1115 status.clean:
1116 1116 files that have definitely not been modified since the
1117 1117 dirstate was written
1118 1118 '''
1119 1119 listignored, listclean, listunknown = ignored, clean, unknown
1120 1120 lookup, modified, added, unknown, ignored = [], [], [], [], []
1121 1121 removed, deleted, clean = [], [], []
1122 1122
1123 1123 dmap = self._map
1124 1124 ladd = lookup.append # aka "unsure"
1125 1125 madd = modified.append
1126 1126 aadd = added.append
1127 1127 uadd = unknown.append
1128 1128 iadd = ignored.append
1129 1129 radd = removed.append
1130 1130 dadd = deleted.append
1131 1131 cadd = clean.append
1132 1132 mexact = match.exact
1133 1133 dirignore = self._dirignore
1134 1134 checkexec = self._checkexec
1135 1135 copymap = self._copymap
1136 1136 lastnormaltime = self._lastnormaltime
1137 1137
1138 1138 # We need to do full walks when either
1139 1139 # - we're listing all clean files, or
1140 1140 # - match.traversedir does something, because match.traversedir should
1141 1141 # be called for every dir in the working dir
1142 1142 full = listclean or match.traversedir is not None
1143 1143 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1144 1144 full=full).iteritems():
1145 1145 if fn not in dmap:
1146 1146 if (listignored or mexact(fn)) and dirignore(fn):
1147 1147 if listignored:
1148 1148 iadd(fn)
1149 1149 else:
1150 1150 uadd(fn)
1151 1151 continue
1152 1152
1153 1153 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1154 1154 # written like that for performance reasons. dmap[fn] is not a
1155 1155 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1156 1156 # opcode has fast paths when the value to be unpacked is a tuple or
1157 1157 # a list, but falls back to creating a full-fledged iterator in
1158 1158 # general. That is much slower than simply accessing and storing the
1159 1159 # tuple members one by one.
1160 1160 t = dmap[fn]
1161 1161 state = t[0]
1162 1162 mode = t[1]
1163 1163 size = t[2]
1164 1164 time = t[3]
1165 1165
1166 1166 if not st and state in "nma":
1167 1167 dadd(fn)
1168 1168 elif state == 'n':
1169 1169 if (size >= 0 and
1170 1170 ((size != st.st_size and size != st.st_size & _rangemask)
1171 1171 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1172 1172 or size == -2 # other parent
1173 1173 or fn in copymap):
1174 1174 madd(fn)
1175 1175 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1176 1176 ladd(fn)
1177 1177 elif st.st_mtime == lastnormaltime:
1178 1178 # fn may have just been marked as normal and it may have
1179 1179 # changed in the same second without changing its size.
1180 1180 # This can happen if we quickly do multiple commits.
1181 1181 # Force lookup, so we don't miss such a racy file change.
1182 1182 ladd(fn)
1183 1183 elif listclean:
1184 1184 cadd(fn)
1185 1185 elif state == 'm':
1186 1186 madd(fn)
1187 1187 elif state == 'a':
1188 1188 aadd(fn)
1189 1189 elif state == 'r':
1190 1190 radd(fn)
1191 1191
1192 1192 return (lookup, scmutil.status(modified, added, removed, deleted,
1193 1193 unknown, ignored, clean))
1194 1194
1195 1195 def matches(self, match):
1196 1196 '''
1197 1197 return files in the dirstate (in whatever state) filtered by match
1198 1198 '''
1199 1199 dmap = self._map
1200 1200 if match.always():
1201 1201 return dmap.keys()
1202 1202 files = match.files()
1203 1203 if match.isexact():
1204 1204 # fast path -- filter the other way around, since typically files is
1205 1205 # much smaller than dmap
1206 1206 return [f for f in files if f in dmap]
1207 1207 if match.prefix() and all(fn in dmap for fn in files):
1208 1208 # fast path -- all the values are known to be files, so just return
1209 1209 # that
1210 1210 return list(files)
1211 1211 return [f for f in dmap if match(f)]
1212 1212
1213 1213 def _actualfilename(self, tr):
1214 1214 if tr:
1215 1215 return self._pendingfilename
1216 1216 else:
1217 1217 return self._filename
1218 1218
1219 1219 def savebackup(self, tr, suffix='', prefix=''):
1220 1220 '''Save current dirstate into backup file with suffix'''
1221 1221 assert len(suffix) > 0 or len(prefix) > 0
1222 1222 filename = self._actualfilename(tr)
1223 1223
1224 1224 # use '_writedirstate' instead of 'write' to write changes certainly,
1225 1225 # because the latter omits writing out if transaction is running.
1226 1226 # output file will be used to create backup of dirstate at this point.
1227 1227 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1228 1228 checkambig=True))
1229 1229
1230 1230 if tr:
1231 1231 # ensure that subsequent tr.writepending returns True for
1232 1232 # changes written out above, even if dirstate is never
1233 1233 # changed after this
1234 1234 tr.addfilegenerator('dirstate', (self._filename,),
1235 1235 self._writedirstate, location='plain')
1236 1236
1237 1237 # ensure that pending file written above is unlinked at
1238 1238 # failure, even if tr.writepending isn't invoked until the
1239 1239 # end of this transaction
1240 1240 tr.registertmp(filename, location='plain')
1241 1241
1242 1242 self._opener.write(prefix + self._filename + suffix,
1243 1243 self._opener.tryread(filename))
1244 1244
1245 1245 def restorebackup(self, tr, suffix='', prefix=''):
1246 1246 '''Restore dirstate by backup file with suffix'''
1247 1247 assert len(suffix) > 0 or len(prefix) > 0
1248 1248 # this "invalidate()" prevents "wlock.release()" from writing
1249 1249 # changes of dirstate out after restoring from backup file
1250 1250 self.invalidate()
1251 1251 filename = self._actualfilename(tr)
1252 1252 # using self._filename to avoid having "pending" in the backup filename
1253 1253 self._opener.rename(prefix + self._filename + suffix, filename,
1254 1254 checkambig=True)
1255 1255
1256 1256 def clearbackup(self, tr, suffix='', prefix=''):
1257 1257 '''Clear backup file with suffix'''
1258 1258 assert len(suffix) > 0 or len(prefix) > 0
1259 1259 # using self._filename to avoid having "pending" in the backup filename
1260 1260 self._opener.unlink(prefix + self._filename + suffix)
@@ -1,214 +1,215 b''
1 1 from __future__ import absolute_import
2 2
3 3 import errno
4 4 import os
5 5 import posixpath
6 6 import stat
7 7
8 8 from .i18n import _
9 9 from . import (
10 10 encoding,
11 11 error,
12 pycompat,
12 13 util,
13 14 )
14 15
15 16 def _lowerclean(s):
16 17 return encoding.hfsignoreclean(s.lower())
17 18
18 19 class pathauditor(object):
19 20 '''ensure that a filesystem path contains no banned components.
20 21 the following properties of a path are checked:
21 22
22 23 - ends with a directory separator
23 24 - under top-level .hg
24 25 - starts at the root of a windows drive
25 26 - contains ".."
26 27
27 28 More check are also done about the file system states:
28 29 - traverses a symlink (e.g. a/symlink_here/b)
29 30 - inside a nested repository (a callback can be used to approve
30 31 some nested repositories, e.g., subrepositories)
31 32
32 33 The file system checks are only done when 'realfs' is set to True (the
33 34 default). They should be disable then we are auditing path for operation on
34 35 stored history.
35 36 '''
36 37
37 38 def __init__(self, root, callback=None, realfs=True):
38 39 self.audited = set()
39 40 self.auditeddir = set()
40 41 self.root = root
41 42 self._realfs = realfs
42 43 self.callback = callback
43 44 if os.path.lexists(root) and not util.fscasesensitive(root):
44 45 self.normcase = util.normcase
45 46 else:
46 47 self.normcase = lambda x: x
47 48
48 49 def __call__(self, path):
49 50 '''Check the relative path.
50 51 path may contain a pattern (e.g. foodir/**.txt)'''
51 52
52 53 path = util.localpath(path)
53 54 normpath = self.normcase(path)
54 55 if normpath in self.audited:
55 56 return
56 57 # AIX ignores "/" at end of path, others raise EISDIR.
57 58 if util.endswithsep(path):
58 59 raise error.Abort(_("path ends in directory separator: %s") % path)
59 60 parts = util.splitpath(path)
60 61 if (os.path.splitdrive(path)[0]
61 62 or _lowerclean(parts[0]) in ('.hg', '.hg.', '')
62 63 or os.pardir in parts):
63 64 raise error.Abort(_("path contains illegal component: %s") % path)
64 65 # Windows shortname aliases
65 66 for p in parts:
66 67 if "~" in p:
67 68 first, last = p.split("~", 1)
68 69 if last.isdigit() and first.upper() in ["HG", "HG8B6C"]:
69 70 raise error.Abort(_("path contains illegal component: %s")
70 71 % path)
71 72 if '.hg' in _lowerclean(path):
72 73 lparts = [_lowerclean(p.lower()) for p in parts]
73 74 for p in '.hg', '.hg.':
74 75 if p in lparts[1:]:
75 76 pos = lparts.index(p)
76 77 base = os.path.join(*parts[:pos])
77 78 raise error.Abort(_("path '%s' is inside nested repo %r")
78 79 % (path, base))
79 80
80 81 normparts = util.splitpath(normpath)
81 82 assert len(parts) == len(normparts)
82 83
83 84 parts.pop()
84 85 normparts.pop()
85 86 prefixes = []
86 87 # It's important that we check the path parts starting from the root.
87 88 # This means we won't accidentally traverse a symlink into some other
88 89 # filesystem (which is potentially expensive to access).
89 90 for i in range(len(parts)):
90 prefix = os.sep.join(parts[:i + 1])
91 normprefix = os.sep.join(normparts[:i + 1])
91 prefix = pycompat.ossep.join(parts[:i + 1])
92 normprefix = pycompat.ossep.join(normparts[:i + 1])
92 93 if normprefix in self.auditeddir:
93 94 continue
94 95 if self._realfs:
95 96 self._checkfs(prefix, path)
96 97 prefixes.append(normprefix)
97 98
98 99 self.audited.add(normpath)
99 100 # only add prefixes to the cache after checking everything: we don't
100 101 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
101 102 self.auditeddir.update(prefixes)
102 103
103 104 def _checkfs(self, prefix, path):
104 105 """raise exception if a file system backed check fails"""
105 106 curpath = os.path.join(self.root, prefix)
106 107 try:
107 108 st = os.lstat(curpath)
108 109 except OSError as err:
109 110 # EINVAL can be raised as invalid path syntax under win32.
110 111 # They must be ignored for patterns can be checked too.
111 112 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
112 113 raise
113 114 else:
114 115 if stat.S_ISLNK(st.st_mode):
115 116 msg = _('path %r traverses symbolic link %r') % (path, prefix)
116 117 raise error.Abort(msg)
117 118 elif (stat.S_ISDIR(st.st_mode) and
118 119 os.path.isdir(os.path.join(curpath, '.hg'))):
119 120 if not self.callback or not self.callback(curpath):
120 121 msg = _("path '%s' is inside nested repo %r")
121 122 raise error.Abort(msg % (path, prefix))
122 123
123 124 def check(self, path):
124 125 try:
125 126 self(path)
126 127 return True
127 128 except (OSError, error.Abort):
128 129 return False
129 130
130 131 def canonpath(root, cwd, myname, auditor=None):
131 132 '''return the canonical path of myname, given cwd and root'''
132 133 if util.endswithsep(root):
133 134 rootsep = root
134 135 else:
135 rootsep = root + os.sep
136 rootsep = root + pycompat.ossep
136 137 name = myname
137 138 if not os.path.isabs(name):
138 139 name = os.path.join(root, cwd, name)
139 140 name = os.path.normpath(name)
140 141 if auditor is None:
141 142 auditor = pathauditor(root)
142 143 if name != rootsep and name.startswith(rootsep):
143 144 name = name[len(rootsep):]
144 145 auditor(name)
145 146 return util.pconvert(name)
146 147 elif name == root:
147 148 return ''
148 149 else:
149 150 # Determine whether `name' is in the hierarchy at or beneath `root',
150 151 # by iterating name=dirname(name) until that causes no change (can't
151 152 # check name == '/', because that doesn't work on windows). The list
152 153 # `rel' holds the reversed list of components making up the relative
153 154 # file name we want.
154 155 rel = []
155 156 while True:
156 157 try:
157 158 s = util.samefile(name, root)
158 159 except OSError:
159 160 s = False
160 161 if s:
161 162 if not rel:
162 163 # name was actually the same as root (maybe a symlink)
163 164 return ''
164 165 rel.reverse()
165 166 name = os.path.join(*rel)
166 167 auditor(name)
167 168 return util.pconvert(name)
168 169 dirname, basename = util.split(name)
169 170 rel.append(basename)
170 171 if dirname == name:
171 172 break
172 173 name = dirname
173 174
174 175 # A common mistake is to use -R, but specify a file relative to the repo
175 176 # instead of cwd. Detect that case, and provide a hint to the user.
176 177 hint = None
177 178 try:
178 179 if cwd != root:
179 180 canonpath(root, root, myname, auditor)
180 181 hint = (_("consider using '--cwd %s'")
181 182 % os.path.relpath(root, cwd))
182 183 except error.Abort:
183 184 pass
184 185
185 186 raise error.Abort(_("%s not under root '%s'") % (myname, root),
186 187 hint=hint)
187 188
188 189 def normasprefix(path):
189 190 '''normalize the specified path as path prefix
190 191
191 192 Returned value can be used safely for "p.startswith(prefix)",
192 193 "p[len(prefix):]", and so on.
193 194
194 195 For efficiency, this expects "path" argument to be already
195 196 normalized by "os.path.normpath", "os.path.realpath", and so on.
196 197
197 198 See also issue3033 for detail about need of this function.
198 199
199 200 >>> normasprefix('/foo/bar').replace(os.sep, '/')
200 201 '/foo/bar/'
201 202 >>> normasprefix('/').replace(os.sep, '/')
202 203 '/'
203 204 '''
204 205 d, p = os.path.splitdrive(path)
205 if len(p) != len(os.sep):
206 return path + os.sep
206 if len(p) != len(pycompat.ossep):
207 return path + pycompat.ossep
207 208 else:
208 209 return path
209 210
210 211 # forward two methods from posixpath that do what we need, but we'd
211 212 # rather not let our internals know that we're thinking in posix terms
212 213 # - instead we'll let them be oblivious.
213 214 join = posixpath.join
214 215 dirname = posixpath.dirname
@@ -1,652 +1,652 b''
1 1 # posix.py - Posix utility function implementations for Mercurial
2 2 #
3 3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import fcntl
12 12 import getpass
13 13 import grp
14 14 import os
15 15 import pwd
16 16 import re
17 17 import select
18 18 import stat
19 19 import sys
20 20 import tempfile
21 21 import unicodedata
22 22
23 23 from .i18n import _
24 24 from . import (
25 25 encoding,
26 26 pycompat,
27 27 )
28 28
29 29 posixfile = open
30 30 normpath = os.path.normpath
31 31 samestat = os.path.samestat
32 32 try:
33 33 oslink = os.link
34 34 except AttributeError:
35 35 # Some platforms build Python without os.link on systems that are
36 36 # vaguely unix-like but don't have hardlink support. For those
37 37 # poor souls, just say we tried and that it failed so we fall back
38 38 # to copies.
39 39 def oslink(src, dst):
40 40 raise OSError(errno.EINVAL,
41 41 'hardlinks not supported: %s to %s' % (src, dst))
42 42 unlink = os.unlink
43 43 rename = os.rename
44 44 removedirs = os.removedirs
45 45 expandglobs = False
46 46
47 47 umask = os.umask(0)
48 48 os.umask(umask)
49 49
50 50 def split(p):
51 51 '''Same as posixpath.split, but faster
52 52
53 53 >>> import posixpath
54 54 >>> for f in ['/absolute/path/to/file',
55 55 ... 'relative/path/to/file',
56 56 ... 'file_alone',
57 57 ... 'path/to/directory/',
58 58 ... '/multiple/path//separators',
59 59 ... '/file_at_root',
60 60 ... '///multiple_leading_separators_at_root',
61 61 ... '']:
62 62 ... assert split(f) == posixpath.split(f), f
63 63 '''
64 64 ht = p.rsplit('/', 1)
65 65 if len(ht) == 1:
66 66 return '', p
67 67 nh = ht[0].rstrip('/')
68 68 if nh:
69 69 return nh, ht[1]
70 70 return ht[0] + '/', ht[1]
71 71
72 72 def openhardlinks():
73 73 '''return true if it is safe to hold open file handles to hardlinks'''
74 74 return True
75 75
76 76 def nlinks(name):
77 77 '''return number of hardlinks for the given file'''
78 78 return os.lstat(name).st_nlink
79 79
80 80 def parsepatchoutput(output_line):
81 81 """parses the output produced by patch and returns the filename"""
82 82 pf = output_line[14:]
83 83 if os.sys.platform == 'OpenVMS':
84 84 if pf[0] == '`':
85 85 pf = pf[1:-1] # Remove the quotes
86 86 else:
87 87 if pf.startswith("'") and pf.endswith("'") and " " in pf:
88 88 pf = pf[1:-1] # Remove the quotes
89 89 return pf
90 90
91 91 def sshargs(sshcmd, host, user, port):
92 92 '''Build argument list for ssh'''
93 93 args = user and ("%s@%s" % (user, host)) or host
94 94 return port and ("%s -p %s" % (args, port)) or args
95 95
96 96 def isexec(f):
97 97 """check whether a file is executable"""
98 98 return (os.lstat(f).st_mode & 0o100 != 0)
99 99
100 100 def setflags(f, l, x):
101 101 s = os.lstat(f).st_mode
102 102 if l:
103 103 if not stat.S_ISLNK(s):
104 104 # switch file to link
105 105 fp = open(f)
106 106 data = fp.read()
107 107 fp.close()
108 108 os.unlink(f)
109 109 try:
110 110 os.symlink(data, f)
111 111 except OSError:
112 112 # failed to make a link, rewrite file
113 113 fp = open(f, "w")
114 114 fp.write(data)
115 115 fp.close()
116 116 # no chmod needed at this point
117 117 return
118 118 if stat.S_ISLNK(s):
119 119 # switch link to file
120 120 data = os.readlink(f)
121 121 os.unlink(f)
122 122 fp = open(f, "w")
123 123 fp.write(data)
124 124 fp.close()
125 125 s = 0o666 & ~umask # avoid restatting for chmod
126 126
127 127 sx = s & 0o100
128 128 if x and not sx:
129 129 # Turn on +x for every +r bit when making a file executable
130 130 # and obey umask.
131 131 os.chmod(f, s | (s & 0o444) >> 2 & ~umask)
132 132 elif not x and sx:
133 133 # Turn off all +x bits
134 134 os.chmod(f, s & 0o666)
135 135
136 136 def copymode(src, dst, mode=None):
137 137 '''Copy the file mode from the file at path src to dst.
138 138 If src doesn't exist, we're using mode instead. If mode is None, we're
139 139 using umask.'''
140 140 try:
141 141 st_mode = os.lstat(src).st_mode & 0o777
142 142 except OSError as inst:
143 143 if inst.errno != errno.ENOENT:
144 144 raise
145 145 st_mode = mode
146 146 if st_mode is None:
147 147 st_mode = ~umask
148 148 st_mode &= 0o666
149 149 os.chmod(dst, st_mode)
150 150
151 151 def checkexec(path):
152 152 """
153 153 Check whether the given path is on a filesystem with UNIX-like exec flags
154 154
155 155 Requires a directory (like /foo/.hg)
156 156 """
157 157
158 158 # VFAT on some Linux versions can flip mode but it doesn't persist
159 159 # a FS remount. Frequently we can detect it if files are created
160 160 # with exec bit on.
161 161
162 162 try:
163 163 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
164 164 cachedir = os.path.join(path, '.hg', 'cache')
165 165 if os.path.isdir(cachedir):
166 166 checkisexec = os.path.join(cachedir, 'checkisexec')
167 167 checknoexec = os.path.join(cachedir, 'checknoexec')
168 168
169 169 try:
170 170 m = os.stat(checkisexec).st_mode
171 171 except OSError as e:
172 172 if e.errno != errno.ENOENT:
173 173 raise
174 174 # checkisexec does not exist - fall through ...
175 175 else:
176 176 # checkisexec exists, check if it actually is exec
177 177 if m & EXECFLAGS != 0:
178 178 # ensure checkisexec exists, check it isn't exec
179 179 try:
180 180 m = os.stat(checknoexec).st_mode
181 181 except OSError as e:
182 182 if e.errno != errno.ENOENT:
183 183 raise
184 184 file(checknoexec, 'w').close() # might fail
185 185 m = os.stat(checknoexec).st_mode
186 186 if m & EXECFLAGS == 0:
187 187 # check-exec is exec and check-no-exec is not exec
188 188 return True
189 189 # checknoexec exists but is exec - delete it
190 190 os.unlink(checknoexec)
191 191 # checkisexec exists but is not exec - delete it
192 192 os.unlink(checkisexec)
193 193
194 194 # check using one file, leave it as checkisexec
195 195 checkdir = cachedir
196 196 else:
197 197 # check directly in path and don't leave checkisexec behind
198 198 checkdir = path
199 199 checkisexec = None
200 200 fh, fn = tempfile.mkstemp(dir=checkdir, prefix='hg-checkexec-')
201 201 try:
202 202 os.close(fh)
203 203 m = os.stat(fn).st_mode
204 204 if m & EXECFLAGS == 0:
205 205 os.chmod(fn, m & 0o777 | EXECFLAGS)
206 206 if os.stat(fn).st_mode & EXECFLAGS != 0:
207 207 if checkisexec is not None:
208 208 os.rename(fn, checkisexec)
209 209 fn = None
210 210 return True
211 211 finally:
212 212 if fn is not None:
213 213 os.unlink(fn)
214 214 except (IOError, OSError):
215 215 # we don't care, the user probably won't be able to commit anyway
216 216 return False
217 217
218 218 def checklink(path):
219 219 """check whether the given path is on a symlink-capable filesystem"""
220 220 # mktemp is not racy because symlink creation will fail if the
221 221 # file already exists
222 222 while True:
223 223 cachedir = os.path.join(path, '.hg', 'cache')
224 224 checklink = os.path.join(cachedir, 'checklink')
225 225 # try fast path, read only
226 226 if os.path.islink(checklink):
227 227 return True
228 228 if os.path.isdir(cachedir):
229 229 checkdir = cachedir
230 230 else:
231 231 checkdir = path
232 232 cachedir = None
233 233 name = tempfile.mktemp(dir=checkdir, prefix='checklink-')
234 234 try:
235 235 fd = None
236 236 if cachedir is None:
237 237 fd = tempfile.NamedTemporaryFile(dir=checkdir,
238 238 prefix='hg-checklink-')
239 239 target = os.path.basename(fd.name)
240 240 else:
241 241 # create a fixed file to link to; doesn't matter if it
242 242 # already exists.
243 243 target = 'checklink-target'
244 244 open(os.path.join(cachedir, target), 'w').close()
245 245 try:
246 246 os.symlink(target, name)
247 247 if cachedir is None:
248 248 os.unlink(name)
249 249 else:
250 250 try:
251 251 os.rename(name, checklink)
252 252 except OSError:
253 253 os.unlink(name)
254 254 return True
255 255 except OSError as inst:
256 256 # link creation might race, try again
257 257 if inst[0] == errno.EEXIST:
258 258 continue
259 259 raise
260 260 finally:
261 261 if fd is not None:
262 262 fd.close()
263 263 except AttributeError:
264 264 return False
265 265 except OSError as inst:
266 266 # sshfs might report failure while successfully creating the link
267 267 if inst[0] == errno.EIO and os.path.exists(name):
268 268 os.unlink(name)
269 269 return False
270 270
271 271 def checkosfilename(path):
272 272 '''Check that the base-relative path is a valid filename on this platform.
273 273 Returns None if the path is ok, or a UI string describing the problem.'''
274 274 pass # on posix platforms, every path is ok
275 275
276 276 def setbinary(fd):
277 277 pass
278 278
279 279 def pconvert(path):
280 280 return path
281 281
282 282 def localpath(path):
283 283 return path
284 284
285 285 def samefile(fpath1, fpath2):
286 286 """Returns whether path1 and path2 refer to the same file. This is only
287 287 guaranteed to work for files, not directories."""
288 288 return os.path.samefile(fpath1, fpath2)
289 289
290 290 def samedevice(fpath1, fpath2):
291 291 """Returns whether fpath1 and fpath2 are on the same device. This is only
292 292 guaranteed to work for files, not directories."""
293 293 st1 = os.lstat(fpath1)
294 294 st2 = os.lstat(fpath2)
295 295 return st1.st_dev == st2.st_dev
296 296
297 297 # os.path.normcase is a no-op, which doesn't help us on non-native filesystems
298 298 def normcase(path):
299 299 return path.lower()
300 300
301 301 # what normcase does to ASCII strings
302 302 normcasespec = encoding.normcasespecs.lower
303 303 # fallback normcase function for non-ASCII strings
304 304 normcasefallback = normcase
305 305
306 306 if sys.platform == 'darwin':
307 307
308 308 def normcase(path):
309 309 '''
310 310 Normalize a filename for OS X-compatible comparison:
311 311 - escape-encode invalid characters
312 312 - decompose to NFD
313 313 - lowercase
314 314 - omit ignored characters [200c-200f, 202a-202e, 206a-206f,feff]
315 315
316 316 >>> normcase('UPPER')
317 317 'upper'
318 318 >>> normcase('Caf\xc3\xa9')
319 319 'cafe\\xcc\\x81'
320 320 >>> normcase('\xc3\x89')
321 321 'e\\xcc\\x81'
322 322 >>> normcase('\xb8\xca\xc3\xca\xbe\xc8.JPG') # issue3918
323 323 '%b8%ca%c3\\xca\\xbe%c8.jpg'
324 324 '''
325 325
326 326 try:
327 327 return encoding.asciilower(path) # exception for non-ASCII
328 328 except UnicodeDecodeError:
329 329 return normcasefallback(path)
330 330
331 331 normcasespec = encoding.normcasespecs.lower
332 332
333 333 def normcasefallback(path):
334 334 try:
335 335 u = path.decode('utf-8')
336 336 except UnicodeDecodeError:
337 337 # OS X percent-encodes any bytes that aren't valid utf-8
338 338 s = ''
339 339 pos = 0
340 340 l = len(path)
341 341 while pos < l:
342 342 try:
343 343 c = encoding.getutf8char(path, pos)
344 344 pos += len(c)
345 345 except ValueError:
346 346 c = '%%%02X' % ord(path[pos])
347 347 pos += 1
348 348 s += c
349 349
350 350 u = s.decode('utf-8')
351 351
352 352 # Decompose then lowercase (HFS+ technote specifies lower)
353 353 enc = unicodedata.normalize('NFD', u).lower().encode('utf-8')
354 354 # drop HFS+ ignored characters
355 355 return encoding.hfsignoreclean(enc)
356 356
357 357 if sys.platform == 'cygwin':
358 358 # workaround for cygwin, in which mount point part of path is
359 359 # treated as case sensitive, even though underlying NTFS is case
360 360 # insensitive.
361 361
362 362 # default mount points
363 363 cygwinmountpoints = sorted([
364 364 "/usr/bin",
365 365 "/usr/lib",
366 366 "/cygdrive",
367 367 ], reverse=True)
368 368
369 369 # use upper-ing as normcase as same as NTFS workaround
370 370 def normcase(path):
371 371 pathlen = len(path)
372 if (pathlen == 0) or (path[0] != os.sep):
372 if (pathlen == 0) or (path[0] != pycompat.ossep):
373 373 # treat as relative
374 374 return encoding.upper(path)
375 375
376 376 # to preserve case of mountpoint part
377 377 for mp in cygwinmountpoints:
378 378 if not path.startswith(mp):
379 379 continue
380 380
381 381 mplen = len(mp)
382 382 if mplen == pathlen: # mount point itself
383 383 return mp
384 if path[mplen] == os.sep:
384 if path[mplen] == pycompat.ossep:
385 385 return mp + encoding.upper(path[mplen:])
386 386
387 387 return encoding.upper(path)
388 388
389 389 normcasespec = encoding.normcasespecs.other
390 390 normcasefallback = normcase
391 391
392 392 # Cygwin translates native ACLs to POSIX permissions,
393 393 # but these translations are not supported by native
394 394 # tools, so the exec bit tends to be set erroneously.
395 395 # Therefore, disable executable bit access on Cygwin.
396 396 def checkexec(path):
397 397 return False
398 398
399 399 # Similarly, Cygwin's symlink emulation is likely to create
400 400 # problems when Mercurial is used from both Cygwin and native
401 401 # Windows, with other native tools, or on shared volumes
402 402 def checklink(path):
403 403 return False
404 404
405 405 _needsshellquote = None
406 406 def shellquote(s):
407 407 if os.sys.platform == 'OpenVMS':
408 408 return '"%s"' % s
409 409 global _needsshellquote
410 410 if _needsshellquote is None:
411 411 _needsshellquote = re.compile(r'[^a-zA-Z0-9._/+-]').search
412 412 if s and not _needsshellquote(s):
413 413 # "s" shouldn't have to be quoted
414 414 return s
415 415 else:
416 416 return "'%s'" % s.replace("'", "'\\''")
417 417
418 418 def quotecommand(cmd):
419 419 return cmd
420 420
421 421 def popen(command, mode='r'):
422 422 return os.popen(command, mode)
423 423
424 424 def testpid(pid):
425 425 '''return False if pid dead, True if running or not sure'''
426 426 if os.sys.platform == 'OpenVMS':
427 427 return True
428 428 try:
429 429 os.kill(pid, 0)
430 430 return True
431 431 except OSError as inst:
432 432 return inst.errno != errno.ESRCH
433 433
434 434 def explainexit(code):
435 435 """return a 2-tuple (desc, code) describing a subprocess status
436 436 (codes from kill are negative - not os.system/wait encoding)"""
437 437 if code >= 0:
438 438 return _("exited with status %d") % code, code
439 439 return _("killed by signal %d") % -code, -code
440 440
441 441 def isowner(st):
442 442 """Return True if the stat object st is from the current user."""
443 443 return st.st_uid == os.getuid()
444 444
445 445 def findexe(command):
446 446 '''Find executable for command searching like which does.
447 447 If command is a basename then PATH is searched for command.
448 448 PATH isn't searched if command is an absolute or relative path.
449 449 If command isn't found None is returned.'''
450 450 if sys.platform == 'OpenVMS':
451 451 return command
452 452
453 453 def findexisting(executable):
454 454 'Will return executable if existing file'
455 455 if os.path.isfile(executable) and os.access(executable, os.X_OK):
456 456 return executable
457 457 return None
458 458
459 if os.sep in command:
459 if pycompat.ossep in command:
460 460 return findexisting(command)
461 461
462 462 if sys.platform == 'plan9':
463 463 return findexisting(os.path.join('/bin', command))
464 464
465 465 for path in os.environ.get('PATH', '').split(pycompat.ospathsep):
466 466 executable = findexisting(os.path.join(path, command))
467 467 if executable is not None:
468 468 return executable
469 469 return None
470 470
471 471 def setsignalhandler():
472 472 pass
473 473
474 474 _wantedkinds = set([stat.S_IFREG, stat.S_IFLNK])
475 475
476 476 def statfiles(files):
477 477 '''Stat each file in files. Yield each stat, or None if a file does not
478 478 exist or has a type we don't care about.'''
479 479 lstat = os.lstat
480 480 getkind = stat.S_IFMT
481 481 for nf in files:
482 482 try:
483 483 st = lstat(nf)
484 484 if getkind(st.st_mode) not in _wantedkinds:
485 485 st = None
486 486 except OSError as err:
487 487 if err.errno not in (errno.ENOENT, errno.ENOTDIR):
488 488 raise
489 489 st = None
490 490 yield st
491 491
492 492 def getuser():
493 493 '''return name of current user'''
494 494 return getpass.getuser()
495 495
496 496 def username(uid=None):
497 497 """Return the name of the user with the given uid.
498 498
499 499 If uid is None, return the name of the current user."""
500 500
501 501 if uid is None:
502 502 uid = os.getuid()
503 503 try:
504 504 return pwd.getpwuid(uid)[0]
505 505 except KeyError:
506 506 return str(uid)
507 507
508 508 def groupname(gid=None):
509 509 """Return the name of the group with the given gid.
510 510
511 511 If gid is None, return the name of the current group."""
512 512
513 513 if gid is None:
514 514 gid = os.getgid()
515 515 try:
516 516 return grp.getgrgid(gid)[0]
517 517 except KeyError:
518 518 return str(gid)
519 519
520 520 def groupmembers(name):
521 521 """Return the list of members of the group with the given
522 522 name, KeyError if the group does not exist.
523 523 """
524 524 return list(grp.getgrnam(name).gr_mem)
525 525
526 526 def spawndetached(args):
527 527 return os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0),
528 528 args[0], args)
529 529
530 530 def gethgcmd():
531 531 return sys.argv[:1]
532 532
533 533 def makedir(path, notindexed):
534 534 os.mkdir(path)
535 535
536 536 def unlinkpath(f, ignoremissing=False):
537 537 """unlink and remove the directory if it is empty"""
538 538 try:
539 539 os.unlink(f)
540 540 except OSError as e:
541 541 if not (ignoremissing and e.errno == errno.ENOENT):
542 542 raise
543 543 # try removing directories that might now be empty
544 544 try:
545 545 os.removedirs(os.path.dirname(f))
546 546 except OSError:
547 547 pass
548 548
549 549 def lookupreg(key, name=None, scope=None):
550 550 return None
551 551
552 552 def hidewindow():
553 553 """Hide current shell window.
554 554
555 555 Used to hide the window opened when starting asynchronous
556 556 child process under Windows, unneeded on other systems.
557 557 """
558 558 pass
559 559
560 560 class cachestat(object):
561 561 def __init__(self, path):
562 562 self.stat = os.stat(path)
563 563
564 564 def cacheable(self):
565 565 return bool(self.stat.st_ino)
566 566
567 567 __hash__ = object.__hash__
568 568
569 569 def __eq__(self, other):
570 570 try:
571 571 # Only dev, ino, size, mtime and atime are likely to change. Out
572 572 # of these, we shouldn't compare atime but should compare the
573 573 # rest. However, one of the other fields changing indicates
574 574 # something fishy going on, so return False if anything but atime
575 575 # changes.
576 576 return (self.stat.st_mode == other.stat.st_mode and
577 577 self.stat.st_ino == other.stat.st_ino and
578 578 self.stat.st_dev == other.stat.st_dev and
579 579 self.stat.st_nlink == other.stat.st_nlink and
580 580 self.stat.st_uid == other.stat.st_uid and
581 581 self.stat.st_gid == other.stat.st_gid and
582 582 self.stat.st_size == other.stat.st_size and
583 583 self.stat.st_mtime == other.stat.st_mtime and
584 584 self.stat.st_ctime == other.stat.st_ctime)
585 585 except AttributeError:
586 586 return False
587 587
588 588 def __ne__(self, other):
589 589 return not self == other
590 590
591 591 def executablepath():
592 592 return None # available on Windows only
593 593
594 594 def statislink(st):
595 595 '''check whether a stat result is a symlink'''
596 596 return st and stat.S_ISLNK(st.st_mode)
597 597
598 598 def statisexec(st):
599 599 '''check whether a stat result is an executable file'''
600 600 return st and (st.st_mode & 0o100 != 0)
601 601
602 602 def poll(fds):
603 603 """block until something happens on any file descriptor
604 604
605 605 This is a generic helper that will check for any activity
606 606 (read, write. exception) and return the list of touched files.
607 607
608 608 In unsupported cases, it will raise a NotImplementedError"""
609 609 try:
610 610 res = select.select(fds, fds, fds)
611 611 except ValueError: # out of range file descriptor
612 612 raise NotImplementedError()
613 613 return sorted(list(set(sum(res, []))))
614 614
615 615 def readpipe(pipe):
616 616 """Read all available data from a pipe."""
617 617 # We can't fstat() a pipe because Linux will always report 0.
618 618 # So, we set the pipe to non-blocking mode and read everything
619 619 # that's available.
620 620 flags = fcntl.fcntl(pipe, fcntl.F_GETFL)
621 621 flags |= os.O_NONBLOCK
622 622 oldflags = fcntl.fcntl(pipe, fcntl.F_SETFL, flags)
623 623
624 624 try:
625 625 chunks = []
626 626 while True:
627 627 try:
628 628 s = pipe.read()
629 629 if not s:
630 630 break
631 631 chunks.append(s)
632 632 except IOError:
633 633 break
634 634
635 635 return ''.join(chunks)
636 636 finally:
637 637 fcntl.fcntl(pipe, fcntl.F_SETFL, oldflags)
638 638
639 639 def bindunixsocket(sock, path):
640 640 """Bind the UNIX domain socket to the specified path"""
641 641 # use relative path instead of full path at bind() if possible, since
642 642 # AF_UNIX path has very small length limit (107 chars) on common
643 643 # platforms (see sys/un.h)
644 644 dirname, basename = os.path.split(path)
645 645 bakwdfd = None
646 646 if dirname:
647 647 bakwdfd = os.open('.', os.O_DIRECTORY)
648 648 os.chdir(dirname)
649 649 sock.bind(basename)
650 650 if bakwdfd:
651 651 os.fchdir(bakwdfd)
652 652 os.close(bakwdfd)
General Comments 0
You need to be logged in to leave comments. Login now