##// END OF EJS Templates
filestat: move __init__ to frompath constructor...
Siddharth Agarwal -
r32772:7ad95626 default
parent child Browse files
Show More
@@ -1,1335 +1,1336
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import contextlib
12 12 import errno
13 13 import os
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import nullid
18 18 from . import (
19 19 encoding,
20 20 error,
21 21 match as matchmod,
22 22 pathutil,
23 23 policy,
24 24 pycompat,
25 25 scmutil,
26 26 txnutil,
27 27 util,
28 28 )
29 29
30 30 parsers = policy.importmod(r'parsers')
31 31
32 32 propertycache = util.propertycache
33 33 filecache = scmutil.filecache
34 34 _rangemask = 0x7fffffff
35 35
36 36 dirstatetuple = parsers.dirstatetuple
37 37
38 38 class repocache(filecache):
39 39 """filecache for files in .hg/"""
40 40 def join(self, obj, fname):
41 41 return obj._opener.join(fname)
42 42
43 43 class rootcache(filecache):
44 44 """filecache for files in the repository root"""
45 45 def join(self, obj, fname):
46 46 return obj._join(fname)
47 47
48 48 def _getfsnow(vfs):
49 49 '''Get "now" timestamp on filesystem'''
50 50 tmpfd, tmpname = vfs.mkstemp()
51 51 try:
52 52 return os.fstat(tmpfd).st_mtime
53 53 finally:
54 54 os.close(tmpfd)
55 55 vfs.unlink(tmpname)
56 56
57 57 def nonnormalentries(dmap):
58 58 '''Compute the nonnormal dirstate entries from the dmap'''
59 59 try:
60 60 return parsers.nonnormalotherparententries(dmap)
61 61 except AttributeError:
62 62 nonnorm = set()
63 63 otherparent = set()
64 64 for fname, e in dmap.iteritems():
65 65 if e[0] != 'n' or e[3] == -1:
66 66 nonnorm.add(fname)
67 67 if e[0] == 'n' and e[2] == -2:
68 68 otherparent.add(fname)
69 69 return nonnorm, otherparent
70 70
71 71 class dirstate(object):
72 72
73 73 def __init__(self, opener, ui, root, validate):
74 74 '''Create a new dirstate object.
75 75
76 76 opener is an open()-like callable that can be used to open the
77 77 dirstate file; root is the root of the directory tracked by
78 78 the dirstate.
79 79 '''
80 80 self._opener = opener
81 81 self._validate = validate
82 82 self._root = root
83 83 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
84 84 # UNC path pointing to root share (issue4557)
85 85 self._rootdir = pathutil.normasprefix(root)
86 86 # internal config: ui.forcecwd
87 87 forcecwd = ui.config('ui', 'forcecwd')
88 88 if forcecwd:
89 89 self._cwd = forcecwd
90 90 self._dirty = False
91 91 self._dirtypl = False
92 92 self._lastnormaltime = 0
93 93 self._ui = ui
94 94 self._filecache = {}
95 95 self._parentwriters = 0
96 96 self._filename = 'dirstate'
97 97 self._pendingfilename = '%s.pending' % self._filename
98 98 self._plchangecallbacks = {}
99 99 self._origpl = None
100 100 self._updatedfiles = set()
101 101
102 102 # for consistent view between _pl() and _read() invocations
103 103 self._pendingmode = None
104 104
105 105 @contextlib.contextmanager
106 106 def parentchange(self):
107 107 '''Context manager for handling dirstate parents.
108 108
109 109 If an exception occurs in the scope of the context manager,
110 110 the incoherent dirstate won't be written when wlock is
111 111 released.
112 112 '''
113 113 self._parentwriters += 1
114 114 yield
115 115 # Typically we want the "undo" step of a context manager in a
116 116 # finally block so it happens even when an exception
117 117 # occurs. In this case, however, we only want to decrement
118 118 # parentwriters if the code in the with statement exits
119 119 # normally, so we don't have a try/finally here on purpose.
120 120 self._parentwriters -= 1
121 121
122 122 def beginparentchange(self):
123 123 '''Marks the beginning of a set of changes that involve changing
124 124 the dirstate parents. If there is an exception during this time,
125 125 the dirstate will not be written when the wlock is released. This
126 126 prevents writing an incoherent dirstate where the parent doesn't
127 127 match the contents.
128 128 '''
129 129 self._ui.deprecwarn('beginparentchange is obsoleted by the '
130 130 'parentchange context manager.', '4.3')
131 131 self._parentwriters += 1
132 132
133 133 def endparentchange(self):
134 134 '''Marks the end of a set of changes that involve changing the
135 135 dirstate parents. Once all parent changes have been marked done,
136 136 the wlock will be free to write the dirstate on release.
137 137 '''
138 138 self._ui.deprecwarn('endparentchange is obsoleted by the '
139 139 'parentchange context manager.', '4.3')
140 140 if self._parentwriters > 0:
141 141 self._parentwriters -= 1
142 142
143 143 def pendingparentchange(self):
144 144 '''Returns true if the dirstate is in the middle of a set of changes
145 145 that modify the dirstate parent.
146 146 '''
147 147 return self._parentwriters > 0
148 148
149 149 @propertycache
150 150 def _map(self):
151 151 '''Return the dirstate contents as a map from filename to
152 152 (state, mode, size, time).'''
153 153 self._read()
154 154 return self._map
155 155
156 156 @propertycache
157 157 def _copymap(self):
158 158 self._read()
159 159 return self._copymap
160 160
161 161 @propertycache
162 162 def _identity(self):
163 163 self._read()
164 164 return self._identity
165 165
166 166 @propertycache
167 167 def _nonnormalset(self):
168 168 nonnorm, otherparents = nonnormalentries(self._map)
169 169 self._otherparentset = otherparents
170 170 return nonnorm
171 171
172 172 @propertycache
173 173 def _otherparentset(self):
174 174 nonnorm, otherparents = nonnormalentries(self._map)
175 175 self._nonnormalset = nonnorm
176 176 return otherparents
177 177
178 178 @propertycache
179 179 def _filefoldmap(self):
180 180 try:
181 181 makefilefoldmap = parsers.make_file_foldmap
182 182 except AttributeError:
183 183 pass
184 184 else:
185 185 return makefilefoldmap(self._map, util.normcasespec,
186 186 util.normcasefallback)
187 187
188 188 f = {}
189 189 normcase = util.normcase
190 190 for name, s in self._map.iteritems():
191 191 if s[0] != 'r':
192 192 f[normcase(name)] = name
193 193 f['.'] = '.' # prevents useless util.fspath() invocation
194 194 return f
195 195
196 196 @propertycache
197 197 def _dirfoldmap(self):
198 198 f = {}
199 199 normcase = util.normcase
200 200 for name in self._dirs:
201 201 f[normcase(name)] = name
202 202 return f
203 203
204 204 @repocache('branch')
205 205 def _branch(self):
206 206 try:
207 207 return self._opener.read("branch").strip() or "default"
208 208 except IOError as inst:
209 209 if inst.errno != errno.ENOENT:
210 210 raise
211 211 return "default"
212 212
213 213 @propertycache
214 214 def _pl(self):
215 215 try:
216 216 fp = self._opendirstatefile()
217 217 st = fp.read(40)
218 218 fp.close()
219 219 l = len(st)
220 220 if l == 40:
221 221 return st[:20], st[20:40]
222 222 elif l > 0 and l < 40:
223 223 raise error.Abort(_('working directory state appears damaged!'))
224 224 except IOError as err:
225 225 if err.errno != errno.ENOENT:
226 226 raise
227 227 return [nullid, nullid]
228 228
229 229 @propertycache
230 230 def _dirs(self):
231 231 return util.dirs(self._map, 'r')
232 232
233 233 def dirs(self):
234 234 return self._dirs
235 235
236 236 @rootcache('.hgignore')
237 237 def _ignore(self):
238 238 files = self._ignorefiles()
239 239 if not files:
240 240 return matchmod.never(self._root, '')
241 241
242 242 pats = ['include:%s' % f for f in files]
243 243 return matchmod.match(self._root, '', [], pats, warn=self._ui.warn)
244 244
245 245 @propertycache
246 246 def _slash(self):
247 247 return self._ui.configbool('ui', 'slash') and pycompat.ossep != '/'
248 248
249 249 @propertycache
250 250 def _checklink(self):
251 251 return util.checklink(self._root)
252 252
253 253 @propertycache
254 254 def _checkexec(self):
255 255 return util.checkexec(self._root)
256 256
257 257 @propertycache
258 258 def _checkcase(self):
259 259 return not util.fscasesensitive(self._join('.hg'))
260 260
261 261 def _join(self, f):
262 262 # much faster than os.path.join()
263 263 # it's safe because f is always a relative path
264 264 return self._rootdir + f
265 265
266 266 def flagfunc(self, buildfallback):
267 267 if self._checklink and self._checkexec:
268 268 def f(x):
269 269 try:
270 270 st = os.lstat(self._join(x))
271 271 if util.statislink(st):
272 272 return 'l'
273 273 if util.statisexec(st):
274 274 return 'x'
275 275 except OSError:
276 276 pass
277 277 return ''
278 278 return f
279 279
280 280 fallback = buildfallback()
281 281 if self._checklink:
282 282 def f(x):
283 283 if os.path.islink(self._join(x)):
284 284 return 'l'
285 285 if 'x' in fallback(x):
286 286 return 'x'
287 287 return ''
288 288 return f
289 289 if self._checkexec:
290 290 def f(x):
291 291 if 'l' in fallback(x):
292 292 return 'l'
293 293 if util.isexec(self._join(x)):
294 294 return 'x'
295 295 return ''
296 296 return f
297 297 else:
298 298 return fallback
299 299
300 300 @propertycache
301 301 def _cwd(self):
302 302 return pycompat.getcwd()
303 303
304 304 def getcwd(self):
305 305 '''Return the path from which a canonical path is calculated.
306 306
307 307 This path should be used to resolve file patterns or to convert
308 308 canonical paths back to file paths for display. It shouldn't be
309 309 used to get real file paths. Use vfs functions instead.
310 310 '''
311 311 cwd = self._cwd
312 312 if cwd == self._root:
313 313 return ''
314 314 # self._root ends with a path separator if self._root is '/' or 'C:\'
315 315 rootsep = self._root
316 316 if not util.endswithsep(rootsep):
317 317 rootsep += pycompat.ossep
318 318 if cwd.startswith(rootsep):
319 319 return cwd[len(rootsep):]
320 320 else:
321 321 # we're outside the repo. return an absolute path.
322 322 return cwd
323 323
324 324 def pathto(self, f, cwd=None):
325 325 if cwd is None:
326 326 cwd = self.getcwd()
327 327 path = util.pathto(self._root, cwd, f)
328 328 if self._slash:
329 329 return util.pconvert(path)
330 330 return path
331 331
332 332 def __getitem__(self, key):
333 333 '''Return the current state of key (a filename) in the dirstate.
334 334
335 335 States are:
336 336 n normal
337 337 m needs merging
338 338 r marked for removal
339 339 a marked for addition
340 340 ? not tracked
341 341 '''
342 342 return self._map.get(key, ("?",))[0]
343 343
344 344 def __contains__(self, key):
345 345 return key in self._map
346 346
347 347 def __iter__(self):
348 348 for x in sorted(self._map):
349 349 yield x
350 350
351 351 def items(self):
352 352 return self._map.iteritems()
353 353
354 354 iteritems = items
355 355
356 356 def parents(self):
357 357 return [self._validate(p) for p in self._pl]
358 358
359 359 def p1(self):
360 360 return self._validate(self._pl[0])
361 361
362 362 def p2(self):
363 363 return self._validate(self._pl[1])
364 364
365 365 def branch(self):
366 366 return encoding.tolocal(self._branch)
367 367
368 368 def setparents(self, p1, p2=nullid):
369 369 """Set dirstate parents to p1 and p2.
370 370
371 371 When moving from two parents to one, 'm' merged entries a
372 372 adjusted to normal and previous copy records discarded and
373 373 returned by the call.
374 374
375 375 See localrepo.setparents()
376 376 """
377 377 if self._parentwriters == 0:
378 378 raise ValueError("cannot set dirstate parent without "
379 379 "calling dirstate.beginparentchange")
380 380
381 381 self._dirty = self._dirtypl = True
382 382 oldp2 = self._pl[1]
383 383 if self._origpl is None:
384 384 self._origpl = self._pl
385 385 self._pl = p1, p2
386 386 copies = {}
387 387 if oldp2 != nullid and p2 == nullid:
388 388 candidatefiles = self._nonnormalset.union(self._otherparentset)
389 389 for f in candidatefiles:
390 390 s = self._map.get(f)
391 391 if s is None:
392 392 continue
393 393
394 394 # Discard 'm' markers when moving away from a merge state
395 395 if s[0] == 'm':
396 396 if f in self._copymap:
397 397 copies[f] = self._copymap[f]
398 398 self.normallookup(f)
399 399 # Also fix up otherparent markers
400 400 elif s[0] == 'n' and s[2] == -2:
401 401 if f in self._copymap:
402 402 copies[f] = self._copymap[f]
403 403 self.add(f)
404 404 return copies
405 405
406 406 def setbranch(self, branch):
407 407 self._branch = encoding.fromlocal(branch)
408 408 f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
409 409 try:
410 410 f.write(self._branch + '\n')
411 411 f.close()
412 412
413 413 # make sure filecache has the correct stat info for _branch after
414 414 # replacing the underlying file
415 415 ce = self._filecache['_branch']
416 416 if ce:
417 417 ce.refresh()
418 418 except: # re-raises
419 419 f.discard()
420 420 raise
421 421
422 422 def _opendirstatefile(self):
423 423 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
424 424 if self._pendingmode is not None and self._pendingmode != mode:
425 425 fp.close()
426 426 raise error.Abort(_('working directory state may be '
427 427 'changed parallelly'))
428 428 self._pendingmode = mode
429 429 return fp
430 430
431 431 def _read(self):
432 432 self._map = {}
433 433 self._copymap = {}
434 434 # ignore HG_PENDING because identity is used only for writing
435 self._identity = util.filestat(self._opener.join(self._filename))
435 self._identity = util.filestat.frompath(
436 self._opener.join(self._filename))
436 437 try:
437 438 fp = self._opendirstatefile()
438 439 try:
439 440 st = fp.read()
440 441 finally:
441 442 fp.close()
442 443 except IOError as err:
443 444 if err.errno != errno.ENOENT:
444 445 raise
445 446 return
446 447 if not st:
447 448 return
448 449
449 450 if util.safehasattr(parsers, 'dict_new_presized'):
450 451 # Make an estimate of the number of files in the dirstate based on
451 452 # its size. From a linear regression on a set of real-world repos,
452 453 # all over 10,000 files, the size of a dirstate entry is 85
453 454 # bytes. The cost of resizing is significantly higher than the cost
454 455 # of filling in a larger presized dict, so subtract 20% from the
455 456 # size.
456 457 #
457 458 # This heuristic is imperfect in many ways, so in a future dirstate
458 459 # format update it makes sense to just record the number of entries
459 460 # on write.
460 461 self._map = parsers.dict_new_presized(len(st) / 71)
461 462
462 463 # Python's garbage collector triggers a GC each time a certain number
463 464 # of container objects (the number being defined by
464 465 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
465 466 # for each file in the dirstate. The C version then immediately marks
466 467 # them as not to be tracked by the collector. However, this has no
467 468 # effect on when GCs are triggered, only on what objects the GC looks
468 469 # into. This means that O(number of files) GCs are unavoidable.
469 470 # Depending on when in the process's lifetime the dirstate is parsed,
470 471 # this can get very expensive. As a workaround, disable GC while
471 472 # parsing the dirstate.
472 473 #
473 474 # (we cannot decorate the function directly since it is in a C module)
474 475 parse_dirstate = util.nogc(parsers.parse_dirstate)
475 476 p = parse_dirstate(self._map, self._copymap, st)
476 477 if not self._dirtypl:
477 478 self._pl = p
478 479
479 480 def invalidate(self):
480 481 '''Causes the next access to reread the dirstate.
481 482
482 483 This is different from localrepo.invalidatedirstate() because it always
483 484 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
484 485 check whether the dirstate has changed before rereading it.'''
485 486
486 487 for a in ("_map", "_copymap", "_identity",
487 488 "_filefoldmap", "_dirfoldmap", "_branch",
488 489 "_pl", "_dirs", "_ignore", "_nonnormalset",
489 490 "_otherparentset"):
490 491 if a in self.__dict__:
491 492 delattr(self, a)
492 493 self._lastnormaltime = 0
493 494 self._dirty = False
494 495 self._updatedfiles.clear()
495 496 self._parentwriters = 0
496 497 self._origpl = None
497 498
498 499 def copy(self, source, dest):
499 500 """Mark dest as a copy of source. Unmark dest if source is None."""
500 501 if source == dest:
501 502 return
502 503 self._dirty = True
503 504 if source is not None:
504 505 self._copymap[dest] = source
505 506 self._updatedfiles.add(source)
506 507 self._updatedfiles.add(dest)
507 508 elif dest in self._copymap:
508 509 del self._copymap[dest]
509 510 self._updatedfiles.add(dest)
510 511
511 512 def copied(self, file):
512 513 return self._copymap.get(file, None)
513 514
514 515 def copies(self):
515 516 return self._copymap
516 517
517 518 def _droppath(self, f):
518 519 if self[f] not in "?r" and "_dirs" in self.__dict__:
519 520 self._dirs.delpath(f)
520 521
521 522 if "_filefoldmap" in self.__dict__:
522 523 normed = util.normcase(f)
523 524 if normed in self._filefoldmap:
524 525 del self._filefoldmap[normed]
525 526
526 527 self._updatedfiles.add(f)
527 528
528 529 def _addpath(self, f, state, mode, size, mtime):
529 530 oldstate = self[f]
530 531 if state == 'a' or oldstate == 'r':
531 532 scmutil.checkfilename(f)
532 533 if f in self._dirs:
533 534 raise error.Abort(_('directory %r already in dirstate') % f)
534 535 # shadows
535 536 for d in util.finddirs(f):
536 537 if d in self._dirs:
537 538 break
538 539 if d in self._map and self[d] != 'r':
539 540 raise error.Abort(
540 541 _('file %r in dirstate clashes with %r') % (d, f))
541 542 if oldstate in "?r" and "_dirs" in self.__dict__:
542 543 self._dirs.addpath(f)
543 544 self._dirty = True
544 545 self._updatedfiles.add(f)
545 546 self._map[f] = dirstatetuple(state, mode, size, mtime)
546 547 if state != 'n' or mtime == -1:
547 548 self._nonnormalset.add(f)
548 549 if size == -2:
549 550 self._otherparentset.add(f)
550 551
551 552 def normal(self, f):
552 553 '''Mark a file normal and clean.'''
553 554 s = os.lstat(self._join(f))
554 555 mtime = s.st_mtime
555 556 self._addpath(f, 'n', s.st_mode,
556 557 s.st_size & _rangemask, mtime & _rangemask)
557 558 if f in self._copymap:
558 559 del self._copymap[f]
559 560 if f in self._nonnormalset:
560 561 self._nonnormalset.remove(f)
561 562 if mtime > self._lastnormaltime:
562 563 # Remember the most recent modification timeslot for status(),
563 564 # to make sure we won't miss future size-preserving file content
564 565 # modifications that happen within the same timeslot.
565 566 self._lastnormaltime = mtime
566 567
567 568 def normallookup(self, f):
568 569 '''Mark a file normal, but possibly dirty.'''
569 570 if self._pl[1] != nullid and f in self._map:
570 571 # if there is a merge going on and the file was either
571 572 # in state 'm' (-1) or coming from other parent (-2) before
572 573 # being removed, restore that state.
573 574 entry = self._map[f]
574 575 if entry[0] == 'r' and entry[2] in (-1, -2):
575 576 source = self._copymap.get(f)
576 577 if entry[2] == -1:
577 578 self.merge(f)
578 579 elif entry[2] == -2:
579 580 self.otherparent(f)
580 581 if source:
581 582 self.copy(source, f)
582 583 return
583 584 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
584 585 return
585 586 self._addpath(f, 'n', 0, -1, -1)
586 587 if f in self._copymap:
587 588 del self._copymap[f]
588 589 if f in self._nonnormalset:
589 590 self._nonnormalset.remove(f)
590 591
591 592 def otherparent(self, f):
592 593 '''Mark as coming from the other parent, always dirty.'''
593 594 if self._pl[1] == nullid:
594 595 raise error.Abort(_("setting %r to other parent "
595 596 "only allowed in merges") % f)
596 597 if f in self and self[f] == 'n':
597 598 # merge-like
598 599 self._addpath(f, 'm', 0, -2, -1)
599 600 else:
600 601 # add-like
601 602 self._addpath(f, 'n', 0, -2, -1)
602 603
603 604 if f in self._copymap:
604 605 del self._copymap[f]
605 606
606 607 def add(self, f):
607 608 '''Mark a file added.'''
608 609 self._addpath(f, 'a', 0, -1, -1)
609 610 if f in self._copymap:
610 611 del self._copymap[f]
611 612
612 613 def remove(self, f):
613 614 '''Mark a file removed.'''
614 615 self._dirty = True
615 616 self._droppath(f)
616 617 size = 0
617 618 if self._pl[1] != nullid and f in self._map:
618 619 # backup the previous state
619 620 entry = self._map[f]
620 621 if entry[0] == 'm': # merge
621 622 size = -1
622 623 elif entry[0] == 'n' and entry[2] == -2: # other parent
623 624 size = -2
624 625 self._otherparentset.add(f)
625 626 self._map[f] = dirstatetuple('r', 0, size, 0)
626 627 self._nonnormalset.add(f)
627 628 if size == 0 and f in self._copymap:
628 629 del self._copymap[f]
629 630
630 631 def merge(self, f):
631 632 '''Mark a file merged.'''
632 633 if self._pl[1] == nullid:
633 634 return self.normallookup(f)
634 635 return self.otherparent(f)
635 636
636 637 def drop(self, f):
637 638 '''Drop a file from the dirstate'''
638 639 if f in self._map:
639 640 self._dirty = True
640 641 self._droppath(f)
641 642 del self._map[f]
642 643 if f in self._nonnormalset:
643 644 self._nonnormalset.remove(f)
644 645 if f in self._copymap:
645 646 del self._copymap[f]
646 647
647 648 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
648 649 if exists is None:
649 650 exists = os.path.lexists(os.path.join(self._root, path))
650 651 if not exists:
651 652 # Maybe a path component exists
652 653 if not ignoremissing and '/' in path:
653 654 d, f = path.rsplit('/', 1)
654 655 d = self._normalize(d, False, ignoremissing, None)
655 656 folded = d + "/" + f
656 657 else:
657 658 # No path components, preserve original case
658 659 folded = path
659 660 else:
660 661 # recursively normalize leading directory components
661 662 # against dirstate
662 663 if '/' in normed:
663 664 d, f = normed.rsplit('/', 1)
664 665 d = self._normalize(d, False, ignoremissing, True)
665 666 r = self._root + "/" + d
666 667 folded = d + "/" + util.fspath(f, r)
667 668 else:
668 669 folded = util.fspath(normed, self._root)
669 670 storemap[normed] = folded
670 671
671 672 return folded
672 673
673 674 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
674 675 normed = util.normcase(path)
675 676 folded = self._filefoldmap.get(normed, None)
676 677 if folded is None:
677 678 if isknown:
678 679 folded = path
679 680 else:
680 681 folded = self._discoverpath(path, normed, ignoremissing, exists,
681 682 self._filefoldmap)
682 683 return folded
683 684
684 685 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
685 686 normed = util.normcase(path)
686 687 folded = self._filefoldmap.get(normed, None)
687 688 if folded is None:
688 689 folded = self._dirfoldmap.get(normed, None)
689 690 if folded is None:
690 691 if isknown:
691 692 folded = path
692 693 else:
693 694 # store discovered result in dirfoldmap so that future
694 695 # normalizefile calls don't start matching directories
695 696 folded = self._discoverpath(path, normed, ignoremissing, exists,
696 697 self._dirfoldmap)
697 698 return folded
698 699
699 700 def normalize(self, path, isknown=False, ignoremissing=False):
700 701 '''
701 702 normalize the case of a pathname when on a casefolding filesystem
702 703
703 704 isknown specifies whether the filename came from walking the
704 705 disk, to avoid extra filesystem access.
705 706
706 707 If ignoremissing is True, missing path are returned
707 708 unchanged. Otherwise, we try harder to normalize possibly
708 709 existing path components.
709 710
710 711 The normalized case is determined based on the following precedence:
711 712
712 713 - version of name already stored in the dirstate
713 714 - version of name stored on disk
714 715 - version provided via command arguments
715 716 '''
716 717
717 718 if self._checkcase:
718 719 return self._normalize(path, isknown, ignoremissing)
719 720 return path
720 721
721 722 def clear(self):
722 723 self._map = {}
723 724 self._nonnormalset = set()
724 725 self._otherparentset = set()
725 726 if "_dirs" in self.__dict__:
726 727 delattr(self, "_dirs")
727 728 self._copymap = {}
728 729 self._pl = [nullid, nullid]
729 730 self._lastnormaltime = 0
730 731 self._updatedfiles.clear()
731 732 self._dirty = True
732 733
733 734 def rebuild(self, parent, allfiles, changedfiles=None):
734 735 if changedfiles is None:
735 736 # Rebuild entire dirstate
736 737 changedfiles = allfiles
737 738 lastnormaltime = self._lastnormaltime
738 739 self.clear()
739 740 self._lastnormaltime = lastnormaltime
740 741
741 742 if self._origpl is None:
742 743 self._origpl = self._pl
743 744 self._pl = (parent, nullid)
744 745 for f in changedfiles:
745 746 if f in allfiles:
746 747 self.normallookup(f)
747 748 else:
748 749 self.drop(f)
749 750
750 751 self._dirty = True
751 752
752 753 def identity(self):
753 754 '''Return identity of dirstate itself to detect changing in storage
754 755
755 756 If identity of previous dirstate is equal to this, writing
756 757 changes based on the former dirstate out can keep consistency.
757 758 '''
758 759 return self._identity
759 760
760 761 def write(self, tr):
761 762 if not self._dirty:
762 763 return
763 764
764 765 filename = self._filename
765 766 if tr:
766 767 # 'dirstate.write()' is not only for writing in-memory
767 768 # changes out, but also for dropping ambiguous timestamp.
768 769 # delayed writing re-raise "ambiguous timestamp issue".
769 770 # See also the wiki page below for detail:
770 771 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
771 772
772 773 # emulate dropping timestamp in 'parsers.pack_dirstate'
773 774 now = _getfsnow(self._opener)
774 775 dmap = self._map
775 776 for f in self._updatedfiles:
776 777 e = dmap.get(f)
777 778 if e is not None and e[0] == 'n' and e[3] == now:
778 779 dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
779 780 self._nonnormalset.add(f)
780 781
781 782 # emulate that all 'dirstate.normal' results are written out
782 783 self._lastnormaltime = 0
783 784 self._updatedfiles.clear()
784 785
785 786 # delay writing in-memory changes out
786 787 tr.addfilegenerator('dirstate', (self._filename,),
787 788 self._writedirstate, location='plain')
788 789 return
789 790
790 791 st = self._opener(filename, "w", atomictemp=True, checkambig=True)
791 792 self._writedirstate(st)
792 793
793 794 def addparentchangecallback(self, category, callback):
794 795 """add a callback to be called when the wd parents are changed
795 796
796 797 Callback will be called with the following arguments:
797 798 dirstate, (oldp1, oldp2), (newp1, newp2)
798 799
799 800 Category is a unique identifier to allow overwriting an old callback
800 801 with a newer callback.
801 802 """
802 803 self._plchangecallbacks[category] = callback
803 804
804 805 def _writedirstate(self, st):
805 806 # notify callbacks about parents change
806 807 if self._origpl is not None and self._origpl != self._pl:
807 808 for c, callback in sorted(self._plchangecallbacks.iteritems()):
808 809 callback(self, self._origpl, self._pl)
809 810 self._origpl = None
810 811 # use the modification time of the newly created temporary file as the
811 812 # filesystem's notion of 'now'
812 813 now = util.fstat(st).st_mtime & _rangemask
813 814
814 815 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
815 816 # timestamp of each entries in dirstate, because of 'now > mtime'
816 817 delaywrite = self._ui.configint('debug', 'dirstate.delaywrite', 0)
817 818 if delaywrite > 0:
818 819 # do we have any files to delay for?
819 820 for f, e in self._map.iteritems():
820 821 if e[0] == 'n' and e[3] == now:
821 822 import time # to avoid useless import
822 823 # rather than sleep n seconds, sleep until the next
823 824 # multiple of n seconds
824 825 clock = time.time()
825 826 start = int(clock) - (int(clock) % delaywrite)
826 827 end = start + delaywrite
827 828 time.sleep(end - clock)
828 829 now = end # trust our estimate that the end is near now
829 830 break
830 831
831 832 st.write(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
832 833 self._nonnormalset, self._otherparentset = nonnormalentries(self._map)
833 834 st.close()
834 835 self._lastnormaltime = 0
835 836 self._dirty = self._dirtypl = False
836 837
837 838 def _dirignore(self, f):
838 839 if f == '.':
839 840 return False
840 841 if self._ignore(f):
841 842 return True
842 843 for p in util.finddirs(f):
843 844 if self._ignore(p):
844 845 return True
845 846 return False
846 847
847 848 def _ignorefiles(self):
848 849 files = []
849 850 if os.path.exists(self._join('.hgignore')):
850 851 files.append(self._join('.hgignore'))
851 852 for name, path in self._ui.configitems("ui"):
852 853 if name == 'ignore' or name.startswith('ignore.'):
853 854 # we need to use os.path.join here rather than self._join
854 855 # because path is arbitrary and user-specified
855 856 files.append(os.path.join(self._rootdir, util.expandpath(path)))
856 857 return files
857 858
858 859 def _ignorefileandline(self, f):
859 860 files = collections.deque(self._ignorefiles())
860 861 visited = set()
861 862 while files:
862 863 i = files.popleft()
863 864 patterns = matchmod.readpatternfile(i, self._ui.warn,
864 865 sourceinfo=True)
865 866 for pattern, lineno, line in patterns:
866 867 kind, p = matchmod._patsplit(pattern, 'glob')
867 868 if kind == "subinclude":
868 869 if p not in visited:
869 870 files.append(p)
870 871 continue
871 872 m = matchmod.match(self._root, '', [], [pattern],
872 873 warn=self._ui.warn)
873 874 if m(f):
874 875 return (i, lineno, line)
875 876 visited.add(i)
876 877 return (None, -1, "")
877 878
878 879 def _walkexplicit(self, match, subrepos):
879 880 '''Get stat data about the files explicitly specified by match.
880 881
881 882 Return a triple (results, dirsfound, dirsnotfound).
882 883 - results is a mapping from filename to stat result. It also contains
883 884 listings mapping subrepos and .hg to None.
884 885 - dirsfound is a list of files found to be directories.
885 886 - dirsnotfound is a list of files that the dirstate thinks are
886 887 directories and that were not found.'''
887 888
888 889 def badtype(mode):
889 890 kind = _('unknown')
890 891 if stat.S_ISCHR(mode):
891 892 kind = _('character device')
892 893 elif stat.S_ISBLK(mode):
893 894 kind = _('block device')
894 895 elif stat.S_ISFIFO(mode):
895 896 kind = _('fifo')
896 897 elif stat.S_ISSOCK(mode):
897 898 kind = _('socket')
898 899 elif stat.S_ISDIR(mode):
899 900 kind = _('directory')
900 901 return _('unsupported file type (type is %s)') % kind
901 902
902 903 matchedir = match.explicitdir
903 904 badfn = match.bad
904 905 dmap = self._map
905 906 lstat = os.lstat
906 907 getkind = stat.S_IFMT
907 908 dirkind = stat.S_IFDIR
908 909 regkind = stat.S_IFREG
909 910 lnkkind = stat.S_IFLNK
910 911 join = self._join
911 912 dirsfound = []
912 913 foundadd = dirsfound.append
913 914 dirsnotfound = []
914 915 notfoundadd = dirsnotfound.append
915 916
916 917 if not match.isexact() and self._checkcase:
917 918 normalize = self._normalize
918 919 else:
919 920 normalize = None
920 921
921 922 files = sorted(match.files())
922 923 subrepos.sort()
923 924 i, j = 0, 0
924 925 while i < len(files) and j < len(subrepos):
925 926 subpath = subrepos[j] + "/"
926 927 if files[i] < subpath:
927 928 i += 1
928 929 continue
929 930 while i < len(files) and files[i].startswith(subpath):
930 931 del files[i]
931 932 j += 1
932 933
933 934 if not files or '.' in files:
934 935 files = ['.']
935 936 results = dict.fromkeys(subrepos)
936 937 results['.hg'] = None
937 938
938 939 alldirs = None
939 940 for ff in files:
940 941 # constructing the foldmap is expensive, so don't do it for the
941 942 # common case where files is ['.']
942 943 if normalize and ff != '.':
943 944 nf = normalize(ff, False, True)
944 945 else:
945 946 nf = ff
946 947 if nf in results:
947 948 continue
948 949
949 950 try:
950 951 st = lstat(join(nf))
951 952 kind = getkind(st.st_mode)
952 953 if kind == dirkind:
953 954 if nf in dmap:
954 955 # file replaced by dir on disk but still in dirstate
955 956 results[nf] = None
956 957 if matchedir:
957 958 matchedir(nf)
958 959 foundadd((nf, ff))
959 960 elif kind == regkind or kind == lnkkind:
960 961 results[nf] = st
961 962 else:
962 963 badfn(ff, badtype(kind))
963 964 if nf in dmap:
964 965 results[nf] = None
965 966 except OSError as inst: # nf not found on disk - it is dirstate only
966 967 if nf in dmap: # does it exactly match a missing file?
967 968 results[nf] = None
968 969 else: # does it match a missing directory?
969 970 if alldirs is None:
970 971 alldirs = util.dirs(dmap)
971 972 if nf in alldirs:
972 973 if matchedir:
973 974 matchedir(nf)
974 975 notfoundadd(nf)
975 976 else:
976 977 badfn(ff, inst.strerror)
977 978
978 979 # Case insensitive filesystems cannot rely on lstat() failing to detect
979 980 # a case-only rename. Prune the stat object for any file that does not
980 981 # match the case in the filesystem, if there are multiple files that
981 982 # normalize to the same path.
982 983 if match.isexact() and self._checkcase:
983 984 normed = {}
984 985
985 986 for f, st in results.iteritems():
986 987 if st is None:
987 988 continue
988 989
989 990 nc = util.normcase(f)
990 991 paths = normed.get(nc)
991 992
992 993 if paths is None:
993 994 paths = set()
994 995 normed[nc] = paths
995 996
996 997 paths.add(f)
997 998
998 999 for norm, paths in normed.iteritems():
999 1000 if len(paths) > 1:
1000 1001 for path in paths:
1001 1002 folded = self._discoverpath(path, norm, True, None,
1002 1003 self._dirfoldmap)
1003 1004 if path != folded:
1004 1005 results[path] = None
1005 1006
1006 1007 return results, dirsfound, dirsnotfound
1007 1008
1008 1009 def walk(self, match, subrepos, unknown, ignored, full=True):
1009 1010 '''
1010 1011 Walk recursively through the directory tree, finding all files
1011 1012 matched by match.
1012 1013
1013 1014 If full is False, maybe skip some known-clean files.
1014 1015
1015 1016 Return a dict mapping filename to stat-like object (either
1016 1017 mercurial.osutil.stat instance or return value of os.stat()).
1017 1018
1018 1019 '''
1019 1020 # full is a flag that extensions that hook into walk can use -- this
1020 1021 # implementation doesn't use it at all. This satisfies the contract
1021 1022 # because we only guarantee a "maybe".
1022 1023
1023 1024 if ignored:
1024 1025 ignore = util.never
1025 1026 dirignore = util.never
1026 1027 elif unknown:
1027 1028 ignore = self._ignore
1028 1029 dirignore = self._dirignore
1029 1030 else:
1030 1031 # if not unknown and not ignored, drop dir recursion and step 2
1031 1032 ignore = util.always
1032 1033 dirignore = util.always
1033 1034
1034 1035 matchfn = match.matchfn
1035 1036 matchalways = match.always()
1036 1037 matchtdir = match.traversedir
1037 1038 dmap = self._map
1038 1039 listdir = util.listdir
1039 1040 lstat = os.lstat
1040 1041 dirkind = stat.S_IFDIR
1041 1042 regkind = stat.S_IFREG
1042 1043 lnkkind = stat.S_IFLNK
1043 1044 join = self._join
1044 1045
1045 1046 exact = skipstep3 = False
1046 1047 if match.isexact(): # match.exact
1047 1048 exact = True
1048 1049 dirignore = util.always # skip step 2
1049 1050 elif match.prefix(): # match.match, no patterns
1050 1051 skipstep3 = True
1051 1052
1052 1053 if not exact and self._checkcase:
1053 1054 normalize = self._normalize
1054 1055 normalizefile = self._normalizefile
1055 1056 skipstep3 = False
1056 1057 else:
1057 1058 normalize = self._normalize
1058 1059 normalizefile = None
1059 1060
1060 1061 # step 1: find all explicit files
1061 1062 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1062 1063
1063 1064 skipstep3 = skipstep3 and not (work or dirsnotfound)
1064 1065 work = [d for d in work if not dirignore(d[0])]
1065 1066
1066 1067 # step 2: visit subdirectories
1067 1068 def traverse(work, alreadynormed):
1068 1069 wadd = work.append
1069 1070 while work:
1070 1071 nd = work.pop()
1071 1072 if not match.visitdir(nd):
1072 1073 continue
1073 1074 skip = None
1074 1075 if nd == '.':
1075 1076 nd = ''
1076 1077 else:
1077 1078 skip = '.hg'
1078 1079 try:
1079 1080 entries = listdir(join(nd), stat=True, skip=skip)
1080 1081 except OSError as inst:
1081 1082 if inst.errno in (errno.EACCES, errno.ENOENT):
1082 1083 match.bad(self.pathto(nd), inst.strerror)
1083 1084 continue
1084 1085 raise
1085 1086 for f, kind, st in entries:
1086 1087 if normalizefile:
1087 1088 # even though f might be a directory, we're only
1088 1089 # interested in comparing it to files currently in the
1089 1090 # dmap -- therefore normalizefile is enough
1090 1091 nf = normalizefile(nd and (nd + "/" + f) or f, True,
1091 1092 True)
1092 1093 else:
1093 1094 nf = nd and (nd + "/" + f) or f
1094 1095 if nf not in results:
1095 1096 if kind == dirkind:
1096 1097 if not ignore(nf):
1097 1098 if matchtdir:
1098 1099 matchtdir(nf)
1099 1100 wadd(nf)
1100 1101 if nf in dmap and (matchalways or matchfn(nf)):
1101 1102 results[nf] = None
1102 1103 elif kind == regkind or kind == lnkkind:
1103 1104 if nf in dmap:
1104 1105 if matchalways or matchfn(nf):
1105 1106 results[nf] = st
1106 1107 elif ((matchalways or matchfn(nf))
1107 1108 and not ignore(nf)):
1108 1109 # unknown file -- normalize if necessary
1109 1110 if not alreadynormed:
1110 1111 nf = normalize(nf, False, True)
1111 1112 results[nf] = st
1112 1113 elif nf in dmap and (matchalways or matchfn(nf)):
1113 1114 results[nf] = None
1114 1115
1115 1116 for nd, d in work:
1116 1117 # alreadynormed means that processwork doesn't have to do any
1117 1118 # expensive directory normalization
1118 1119 alreadynormed = not normalize or nd == d
1119 1120 traverse([d], alreadynormed)
1120 1121
1121 1122 for s in subrepos:
1122 1123 del results[s]
1123 1124 del results['.hg']
1124 1125
1125 1126 # step 3: visit remaining files from dmap
1126 1127 if not skipstep3 and not exact:
1127 1128 # If a dmap file is not in results yet, it was either
1128 1129 # a) not matching matchfn b) ignored, c) missing, or d) under a
1129 1130 # symlink directory.
1130 1131 if not results and matchalways:
1131 1132 visit = [f for f in dmap]
1132 1133 else:
1133 1134 visit = [f for f in dmap if f not in results and matchfn(f)]
1134 1135 visit.sort()
1135 1136
1136 1137 if unknown:
1137 1138 # unknown == True means we walked all dirs under the roots
1138 1139 # that wasn't ignored, and everything that matched was stat'ed
1139 1140 # and is already in results.
1140 1141 # The rest must thus be ignored or under a symlink.
1141 1142 audit_path = pathutil.pathauditor(self._root)
1142 1143
1143 1144 for nf in iter(visit):
1144 1145 # If a stat for the same file was already added with a
1145 1146 # different case, don't add one for this, since that would
1146 1147 # make it appear as if the file exists under both names
1147 1148 # on disk.
1148 1149 if (normalizefile and
1149 1150 normalizefile(nf, True, True) in results):
1150 1151 results[nf] = None
1151 1152 # Report ignored items in the dmap as long as they are not
1152 1153 # under a symlink directory.
1153 1154 elif audit_path.check(nf):
1154 1155 try:
1155 1156 results[nf] = lstat(join(nf))
1156 1157 # file was just ignored, no links, and exists
1157 1158 except OSError:
1158 1159 # file doesn't exist
1159 1160 results[nf] = None
1160 1161 else:
1161 1162 # It's either missing or under a symlink directory
1162 1163 # which we in this case report as missing
1163 1164 results[nf] = None
1164 1165 else:
1165 1166 # We may not have walked the full directory tree above,
1166 1167 # so stat and check everything we missed.
1167 1168 iv = iter(visit)
1168 1169 for st in util.statfiles([join(i) for i in visit]):
1169 1170 results[next(iv)] = st
1170 1171 return results
1171 1172
1172 1173 def status(self, match, subrepos, ignored, clean, unknown):
1173 1174 '''Determine the status of the working copy relative to the
1174 1175 dirstate and return a pair of (unsure, status), where status is of type
1175 1176 scmutil.status and:
1176 1177
1177 1178 unsure:
1178 1179 files that might have been modified since the dirstate was
1179 1180 written, but need to be read to be sure (size is the same
1180 1181 but mtime differs)
1181 1182 status.modified:
1182 1183 files that have definitely been modified since the dirstate
1183 1184 was written (different size or mode)
1184 1185 status.clean:
1185 1186 files that have definitely not been modified since the
1186 1187 dirstate was written
1187 1188 '''
1188 1189 listignored, listclean, listunknown = ignored, clean, unknown
1189 1190 lookup, modified, added, unknown, ignored = [], [], [], [], []
1190 1191 removed, deleted, clean = [], [], []
1191 1192
1192 1193 dmap = self._map
1193 1194 ladd = lookup.append # aka "unsure"
1194 1195 madd = modified.append
1195 1196 aadd = added.append
1196 1197 uadd = unknown.append
1197 1198 iadd = ignored.append
1198 1199 radd = removed.append
1199 1200 dadd = deleted.append
1200 1201 cadd = clean.append
1201 1202 mexact = match.exact
1202 1203 dirignore = self._dirignore
1203 1204 checkexec = self._checkexec
1204 1205 copymap = self._copymap
1205 1206 lastnormaltime = self._lastnormaltime
1206 1207
1207 1208 # We need to do full walks when either
1208 1209 # - we're listing all clean files, or
1209 1210 # - match.traversedir does something, because match.traversedir should
1210 1211 # be called for every dir in the working dir
1211 1212 full = listclean or match.traversedir is not None
1212 1213 for fn, st in self.walk(match, subrepos, listunknown, listignored,
1213 1214 full=full).iteritems():
1214 1215 if fn not in dmap:
1215 1216 if (listignored or mexact(fn)) and dirignore(fn):
1216 1217 if listignored:
1217 1218 iadd(fn)
1218 1219 else:
1219 1220 uadd(fn)
1220 1221 continue
1221 1222
1222 1223 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1223 1224 # written like that for performance reasons. dmap[fn] is not a
1224 1225 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1225 1226 # opcode has fast paths when the value to be unpacked is a tuple or
1226 1227 # a list, but falls back to creating a full-fledged iterator in
1227 1228 # general. That is much slower than simply accessing and storing the
1228 1229 # tuple members one by one.
1229 1230 t = dmap[fn]
1230 1231 state = t[0]
1231 1232 mode = t[1]
1232 1233 size = t[2]
1233 1234 time = t[3]
1234 1235
1235 1236 if not st and state in "nma":
1236 1237 dadd(fn)
1237 1238 elif state == 'n':
1238 1239 if (size >= 0 and
1239 1240 ((size != st.st_size and size != st.st_size & _rangemask)
1240 1241 or ((mode ^ st.st_mode) & 0o100 and checkexec))
1241 1242 or size == -2 # other parent
1242 1243 or fn in copymap):
1243 1244 madd(fn)
1244 1245 elif time != st.st_mtime and time != st.st_mtime & _rangemask:
1245 1246 ladd(fn)
1246 1247 elif st.st_mtime == lastnormaltime:
1247 1248 # fn may have just been marked as normal and it may have
1248 1249 # changed in the same second without changing its size.
1249 1250 # This can happen if we quickly do multiple commits.
1250 1251 # Force lookup, so we don't miss such a racy file change.
1251 1252 ladd(fn)
1252 1253 elif listclean:
1253 1254 cadd(fn)
1254 1255 elif state == 'm':
1255 1256 madd(fn)
1256 1257 elif state == 'a':
1257 1258 aadd(fn)
1258 1259 elif state == 'r':
1259 1260 radd(fn)
1260 1261
1261 1262 return (lookup, scmutil.status(modified, added, removed, deleted,
1262 1263 unknown, ignored, clean))
1263 1264
1264 1265 def matches(self, match):
1265 1266 '''
1266 1267 return files in the dirstate (in whatever state) filtered by match
1267 1268 '''
1268 1269 dmap = self._map
1269 1270 if match.always():
1270 1271 return dmap.keys()
1271 1272 files = match.files()
1272 1273 if match.isexact():
1273 1274 # fast path -- filter the other way around, since typically files is
1274 1275 # much smaller than dmap
1275 1276 return [f for f in files if f in dmap]
1276 1277 if match.prefix() and all(fn in dmap for fn in files):
1277 1278 # fast path -- all the values are known to be files, so just return
1278 1279 # that
1279 1280 return list(files)
1280 1281 return [f for f in dmap if match(f)]
1281 1282
1282 1283 def _actualfilename(self, tr):
1283 1284 if tr:
1284 1285 return self._pendingfilename
1285 1286 else:
1286 1287 return self._filename
1287 1288
1288 1289 def savebackup(self, tr, suffix='', prefix=''):
1289 1290 '''Save current dirstate into backup file with suffix'''
1290 1291 assert len(suffix) > 0 or len(prefix) > 0
1291 1292 filename = self._actualfilename(tr)
1292 1293
1293 1294 # use '_writedirstate' instead of 'write' to write changes certainly,
1294 1295 # because the latter omits writing out if transaction is running.
1295 1296 # output file will be used to create backup of dirstate at this point.
1296 1297 if self._dirty or not self._opener.exists(filename):
1297 1298 self._writedirstate(self._opener(filename, "w", atomictemp=True,
1298 1299 checkambig=True))
1299 1300
1300 1301 if tr:
1301 1302 # ensure that subsequent tr.writepending returns True for
1302 1303 # changes written out above, even if dirstate is never
1303 1304 # changed after this
1304 1305 tr.addfilegenerator('dirstate', (self._filename,),
1305 1306 self._writedirstate, location='plain')
1306 1307
1307 1308 # ensure that pending file written above is unlinked at
1308 1309 # failure, even if tr.writepending isn't invoked until the
1309 1310 # end of this transaction
1310 1311 tr.registertmp(filename, location='plain')
1311 1312
1312 1313 backupname = prefix + self._filename + suffix
1313 1314 assert backupname != filename
1314 1315 self._opener.tryunlink(backupname)
1315 1316 # hardlink backup is okay because _writedirstate is always called
1316 1317 # with an "atomictemp=True" file.
1317 1318 util.copyfile(self._opener.join(filename),
1318 1319 self._opener.join(backupname), hardlink=True)
1319 1320
1320 1321 def restorebackup(self, tr, suffix='', prefix=''):
1321 1322 '''Restore dirstate by backup file with suffix'''
1322 1323 assert len(suffix) > 0 or len(prefix) > 0
1323 1324 # this "invalidate()" prevents "wlock.release()" from writing
1324 1325 # changes of dirstate out after restoring from backup file
1325 1326 self.invalidate()
1326 1327 filename = self._actualfilename(tr)
1327 1328 # using self._filename to avoid having "pending" in the backup filename
1328 1329 self._opener.rename(prefix + self._filename + suffix, filename,
1329 1330 checkambig=True)
1330 1331
1331 1332 def clearbackup(self, tr, suffix='', prefix=''):
1332 1333 '''Clear backup file with suffix'''
1333 1334 assert len(suffix) > 0 or len(prefix) > 0
1334 1335 # using self._filename to avoid having "pending" in the backup filename
1335 1336 self._opener.unlink(prefix + self._filename + suffix)
@@ -1,3742 +1,3747
1 1 # util.py - Mercurial utility functions and platform specific implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Mercurial utility functions and platform specific implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16 from __future__ import absolute_import
17 17
18 18 import bz2
19 19 import calendar
20 20 import codecs
21 21 import collections
22 22 import datetime
23 23 import errno
24 24 import gc
25 25 import hashlib
26 26 import imp
27 27 import os
28 28 import platform as pyplatform
29 29 import re as remod
30 30 import shutil
31 31 import signal
32 32 import socket
33 33 import stat
34 34 import string
35 35 import subprocess
36 36 import sys
37 37 import tempfile
38 38 import textwrap
39 39 import time
40 40 import traceback
41 41 import warnings
42 42 import zlib
43 43
44 44 from . import (
45 45 encoding,
46 46 error,
47 47 i18n,
48 48 policy,
49 49 pycompat,
50 50 )
51 51
52 52 base85 = policy.importmod(r'base85')
53 53 osutil = policy.importmod(r'osutil')
54 54 parsers = policy.importmod(r'parsers')
55 55
56 56 b85decode = base85.b85decode
57 57 b85encode = base85.b85encode
58 58
59 59 cookielib = pycompat.cookielib
60 60 empty = pycompat.empty
61 61 httplib = pycompat.httplib
62 62 httpserver = pycompat.httpserver
63 63 pickle = pycompat.pickle
64 64 queue = pycompat.queue
65 65 socketserver = pycompat.socketserver
66 66 stderr = pycompat.stderr
67 67 stdin = pycompat.stdin
68 68 stdout = pycompat.stdout
69 69 stringio = pycompat.stringio
70 70 urlerr = pycompat.urlerr
71 71 urlreq = pycompat.urlreq
72 72 xmlrpclib = pycompat.xmlrpclib
73 73
74 74 # workaround for win32mbcs
75 75 _filenamebytestr = pycompat.bytestr
76 76
77 77 def isatty(fp):
78 78 try:
79 79 return fp.isatty()
80 80 except AttributeError:
81 81 return False
82 82
83 83 # glibc determines buffering on first write to stdout - if we replace a TTY
84 84 # destined stdout with a pipe destined stdout (e.g. pager), we want line
85 85 # buffering
86 86 if isatty(stdout):
87 87 stdout = os.fdopen(stdout.fileno(), pycompat.sysstr('wb'), 1)
88 88
89 89 if pycompat.osname == 'nt':
90 90 from . import windows as platform
91 91 stdout = platform.winstdout(stdout)
92 92 else:
93 93 from . import posix as platform
94 94
95 95 _ = i18n._
96 96
97 97 bindunixsocket = platform.bindunixsocket
98 98 cachestat = platform.cachestat
99 99 checkexec = platform.checkexec
100 100 checklink = platform.checklink
101 101 copymode = platform.copymode
102 102 executablepath = platform.executablepath
103 103 expandglobs = platform.expandglobs
104 104 explainexit = platform.explainexit
105 105 findexe = platform.findexe
106 106 gethgcmd = platform.gethgcmd
107 107 getuser = platform.getuser
108 108 getpid = os.getpid
109 109 groupmembers = platform.groupmembers
110 110 groupname = platform.groupname
111 111 hidewindow = platform.hidewindow
112 112 isexec = platform.isexec
113 113 isowner = platform.isowner
114 114 listdir = osutil.listdir
115 115 localpath = platform.localpath
116 116 lookupreg = platform.lookupreg
117 117 makedir = platform.makedir
118 118 nlinks = platform.nlinks
119 119 normpath = platform.normpath
120 120 normcase = platform.normcase
121 121 normcasespec = platform.normcasespec
122 122 normcasefallback = platform.normcasefallback
123 123 openhardlinks = platform.openhardlinks
124 124 oslink = platform.oslink
125 125 parsepatchoutput = platform.parsepatchoutput
126 126 pconvert = platform.pconvert
127 127 poll = platform.poll
128 128 popen = platform.popen
129 129 posixfile = platform.posixfile
130 130 quotecommand = platform.quotecommand
131 131 readpipe = platform.readpipe
132 132 rename = platform.rename
133 133 removedirs = platform.removedirs
134 134 samedevice = platform.samedevice
135 135 samefile = platform.samefile
136 136 samestat = platform.samestat
137 137 setbinary = platform.setbinary
138 138 setflags = platform.setflags
139 139 setsignalhandler = platform.setsignalhandler
140 140 shellquote = platform.shellquote
141 141 spawndetached = platform.spawndetached
142 142 split = platform.split
143 143 sshargs = platform.sshargs
144 144 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
145 145 statisexec = platform.statisexec
146 146 statislink = platform.statislink
147 147 testpid = platform.testpid
148 148 umask = platform.umask
149 149 unlink = platform.unlink
150 150 username = platform.username
151 151
152 152 try:
153 153 recvfds = osutil.recvfds
154 154 except AttributeError:
155 155 pass
156 156 try:
157 157 setprocname = osutil.setprocname
158 158 except AttributeError:
159 159 pass
160 160
161 161 # Python compatibility
162 162
163 163 _notset = object()
164 164
165 165 # disable Python's problematic floating point timestamps (issue4836)
166 166 # (Python hypocritically says you shouldn't change this behavior in
167 167 # libraries, and sure enough Mercurial is not a library.)
168 168 os.stat_float_times(False)
169 169
170 170 def safehasattr(thing, attr):
171 171 return getattr(thing, attr, _notset) is not _notset
172 172
173 173 def bitsfrom(container):
174 174 bits = 0
175 175 for bit in container:
176 176 bits |= bit
177 177 return bits
178 178
179 179 # python 2.6 still have deprecation warning enabled by default. We do not want
180 180 # to display anything to standard user so detect if we are running test and
181 181 # only use python deprecation warning in this case.
182 182 _dowarn = bool(encoding.environ.get('HGEMITWARNINGS'))
183 183 if _dowarn:
184 184 # explicitly unfilter our warning for python 2.7
185 185 #
186 186 # The option of setting PYTHONWARNINGS in the test runner was investigated.
187 187 # However, module name set through PYTHONWARNINGS was exactly matched, so
188 188 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
189 189 # makes the whole PYTHONWARNINGS thing useless for our usecase.
190 190 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'mercurial')
191 191 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext')
192 192 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext3rd')
193 193
194 194 def nouideprecwarn(msg, version, stacklevel=1):
195 195 """Issue an python native deprecation warning
196 196
197 197 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
198 198 """
199 199 if _dowarn:
200 200 msg += ("\n(compatibility will be dropped after Mercurial-%s,"
201 201 " update your code.)") % version
202 202 warnings.warn(msg, DeprecationWarning, stacklevel + 1)
203 203
204 204 DIGESTS = {
205 205 'md5': hashlib.md5,
206 206 'sha1': hashlib.sha1,
207 207 'sha512': hashlib.sha512,
208 208 }
209 209 # List of digest types from strongest to weakest
210 210 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
211 211
212 212 for k in DIGESTS_BY_STRENGTH:
213 213 assert k in DIGESTS
214 214
215 215 class digester(object):
216 216 """helper to compute digests.
217 217
218 218 This helper can be used to compute one or more digests given their name.
219 219
220 220 >>> d = digester(['md5', 'sha1'])
221 221 >>> d.update('foo')
222 222 >>> [k for k in sorted(d)]
223 223 ['md5', 'sha1']
224 224 >>> d['md5']
225 225 'acbd18db4cc2f85cedef654fccc4a4d8'
226 226 >>> d['sha1']
227 227 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
228 228 >>> digester.preferred(['md5', 'sha1'])
229 229 'sha1'
230 230 """
231 231
232 232 def __init__(self, digests, s=''):
233 233 self._hashes = {}
234 234 for k in digests:
235 235 if k not in DIGESTS:
236 236 raise Abort(_('unknown digest type: %s') % k)
237 237 self._hashes[k] = DIGESTS[k]()
238 238 if s:
239 239 self.update(s)
240 240
241 241 def update(self, data):
242 242 for h in self._hashes.values():
243 243 h.update(data)
244 244
245 245 def __getitem__(self, key):
246 246 if key not in DIGESTS:
247 247 raise Abort(_('unknown digest type: %s') % k)
248 248 return self._hashes[key].hexdigest()
249 249
250 250 def __iter__(self):
251 251 return iter(self._hashes)
252 252
253 253 @staticmethod
254 254 def preferred(supported):
255 255 """returns the strongest digest type in both supported and DIGESTS."""
256 256
257 257 for k in DIGESTS_BY_STRENGTH:
258 258 if k in supported:
259 259 return k
260 260 return None
261 261
262 262 class digestchecker(object):
263 263 """file handle wrapper that additionally checks content against a given
264 264 size and digests.
265 265
266 266 d = digestchecker(fh, size, {'md5': '...'})
267 267
268 268 When multiple digests are given, all of them are validated.
269 269 """
270 270
271 271 def __init__(self, fh, size, digests):
272 272 self._fh = fh
273 273 self._size = size
274 274 self._got = 0
275 275 self._digests = dict(digests)
276 276 self._digester = digester(self._digests.keys())
277 277
278 278 def read(self, length=-1):
279 279 content = self._fh.read(length)
280 280 self._digester.update(content)
281 281 self._got += len(content)
282 282 return content
283 283
284 284 def validate(self):
285 285 if self._size != self._got:
286 286 raise Abort(_('size mismatch: expected %d, got %d') %
287 287 (self._size, self._got))
288 288 for k, v in self._digests.items():
289 289 if v != self._digester[k]:
290 290 # i18n: first parameter is a digest name
291 291 raise Abort(_('%s mismatch: expected %s, got %s') %
292 292 (k, v, self._digester[k]))
293 293
294 294 try:
295 295 buffer = buffer
296 296 except NameError:
297 297 if not pycompat.ispy3:
298 298 def buffer(sliceable, offset=0, length=None):
299 299 if length is not None:
300 300 return sliceable[offset:offset + length]
301 301 return sliceable[offset:]
302 302 else:
303 303 def buffer(sliceable, offset=0, length=None):
304 304 if length is not None:
305 305 return memoryview(sliceable)[offset:offset + length]
306 306 return memoryview(sliceable)[offset:]
307 307
308 308 closefds = pycompat.osname == 'posix'
309 309
310 310 _chunksize = 4096
311 311
312 312 class bufferedinputpipe(object):
313 313 """a manually buffered input pipe
314 314
315 315 Python will not let us use buffered IO and lazy reading with 'polling' at
316 316 the same time. We cannot probe the buffer state and select will not detect
317 317 that data are ready to read if they are already buffered.
318 318
319 319 This class let us work around that by implementing its own buffering
320 320 (allowing efficient readline) while offering a way to know if the buffer is
321 321 empty from the output (allowing collaboration of the buffer with polling).
322 322
323 323 This class lives in the 'util' module because it makes use of the 'os'
324 324 module from the python stdlib.
325 325 """
326 326
327 327 def __init__(self, input):
328 328 self._input = input
329 329 self._buffer = []
330 330 self._eof = False
331 331 self._lenbuf = 0
332 332
333 333 @property
334 334 def hasbuffer(self):
335 335 """True is any data is currently buffered
336 336
337 337 This will be used externally a pre-step for polling IO. If there is
338 338 already data then no polling should be set in place."""
339 339 return bool(self._buffer)
340 340
341 341 @property
342 342 def closed(self):
343 343 return self._input.closed
344 344
345 345 def fileno(self):
346 346 return self._input.fileno()
347 347
348 348 def close(self):
349 349 return self._input.close()
350 350
351 351 def read(self, size):
352 352 while (not self._eof) and (self._lenbuf < size):
353 353 self._fillbuffer()
354 354 return self._frombuffer(size)
355 355
356 356 def readline(self, *args, **kwargs):
357 357 if 1 < len(self._buffer):
358 358 # this should not happen because both read and readline end with a
359 359 # _frombuffer call that collapse it.
360 360 self._buffer = [''.join(self._buffer)]
361 361 self._lenbuf = len(self._buffer[0])
362 362 lfi = -1
363 363 if self._buffer:
364 364 lfi = self._buffer[-1].find('\n')
365 365 while (not self._eof) and lfi < 0:
366 366 self._fillbuffer()
367 367 if self._buffer:
368 368 lfi = self._buffer[-1].find('\n')
369 369 size = lfi + 1
370 370 if lfi < 0: # end of file
371 371 size = self._lenbuf
372 372 elif 1 < len(self._buffer):
373 373 # we need to take previous chunks into account
374 374 size += self._lenbuf - len(self._buffer[-1])
375 375 return self._frombuffer(size)
376 376
377 377 def _frombuffer(self, size):
378 378 """return at most 'size' data from the buffer
379 379
380 380 The data are removed from the buffer."""
381 381 if size == 0 or not self._buffer:
382 382 return ''
383 383 buf = self._buffer[0]
384 384 if 1 < len(self._buffer):
385 385 buf = ''.join(self._buffer)
386 386
387 387 data = buf[:size]
388 388 buf = buf[len(data):]
389 389 if buf:
390 390 self._buffer = [buf]
391 391 self._lenbuf = len(buf)
392 392 else:
393 393 self._buffer = []
394 394 self._lenbuf = 0
395 395 return data
396 396
397 397 def _fillbuffer(self):
398 398 """read data to the buffer"""
399 399 data = os.read(self._input.fileno(), _chunksize)
400 400 if not data:
401 401 self._eof = True
402 402 else:
403 403 self._lenbuf += len(data)
404 404 self._buffer.append(data)
405 405
406 406 def popen2(cmd, env=None, newlines=False):
407 407 # Setting bufsize to -1 lets the system decide the buffer size.
408 408 # The default for bufsize is 0, meaning unbuffered. This leads to
409 409 # poor performance on Mac OS X: http://bugs.python.org/issue4194
410 410 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
411 411 close_fds=closefds,
412 412 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
413 413 universal_newlines=newlines,
414 414 env=env)
415 415 return p.stdin, p.stdout
416 416
417 417 def popen3(cmd, env=None, newlines=False):
418 418 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
419 419 return stdin, stdout, stderr
420 420
421 421 def popen4(cmd, env=None, newlines=False, bufsize=-1):
422 422 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
423 423 close_fds=closefds,
424 424 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
425 425 stderr=subprocess.PIPE,
426 426 universal_newlines=newlines,
427 427 env=env)
428 428 return p.stdin, p.stdout, p.stderr, p
429 429
430 430 def version():
431 431 """Return version information if available."""
432 432 try:
433 433 from . import __version__
434 434 return __version__.version
435 435 except ImportError:
436 436 return 'unknown'
437 437
438 438 def versiontuple(v=None, n=4):
439 439 """Parses a Mercurial version string into an N-tuple.
440 440
441 441 The version string to be parsed is specified with the ``v`` argument.
442 442 If it isn't defined, the current Mercurial version string will be parsed.
443 443
444 444 ``n`` can be 2, 3, or 4. Here is how some version strings map to
445 445 returned values:
446 446
447 447 >>> v = '3.6.1+190-df9b73d2d444'
448 448 >>> versiontuple(v, 2)
449 449 (3, 6)
450 450 >>> versiontuple(v, 3)
451 451 (3, 6, 1)
452 452 >>> versiontuple(v, 4)
453 453 (3, 6, 1, '190-df9b73d2d444')
454 454
455 455 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
456 456 (3, 6, 1, '190-df9b73d2d444+20151118')
457 457
458 458 >>> v = '3.6'
459 459 >>> versiontuple(v, 2)
460 460 (3, 6)
461 461 >>> versiontuple(v, 3)
462 462 (3, 6, None)
463 463 >>> versiontuple(v, 4)
464 464 (3, 6, None, None)
465 465
466 466 >>> v = '3.9-rc'
467 467 >>> versiontuple(v, 2)
468 468 (3, 9)
469 469 >>> versiontuple(v, 3)
470 470 (3, 9, None)
471 471 >>> versiontuple(v, 4)
472 472 (3, 9, None, 'rc')
473 473
474 474 >>> v = '3.9-rc+2-02a8fea4289b'
475 475 >>> versiontuple(v, 2)
476 476 (3, 9)
477 477 >>> versiontuple(v, 3)
478 478 (3, 9, None)
479 479 >>> versiontuple(v, 4)
480 480 (3, 9, None, 'rc+2-02a8fea4289b')
481 481 """
482 482 if not v:
483 483 v = version()
484 484 parts = remod.split('[\+-]', v, 1)
485 485 if len(parts) == 1:
486 486 vparts, extra = parts[0], None
487 487 else:
488 488 vparts, extra = parts
489 489
490 490 vints = []
491 491 for i in vparts.split('.'):
492 492 try:
493 493 vints.append(int(i))
494 494 except ValueError:
495 495 break
496 496 # (3, 6) -> (3, 6, None)
497 497 while len(vints) < 3:
498 498 vints.append(None)
499 499
500 500 if n == 2:
501 501 return (vints[0], vints[1])
502 502 if n == 3:
503 503 return (vints[0], vints[1], vints[2])
504 504 if n == 4:
505 505 return (vints[0], vints[1], vints[2], extra)
506 506
507 507 # used by parsedate
508 508 defaultdateformats = (
509 509 '%Y-%m-%dT%H:%M:%S', # the 'real' ISO8601
510 510 '%Y-%m-%dT%H:%M', # without seconds
511 511 '%Y-%m-%dT%H%M%S', # another awful but legal variant without :
512 512 '%Y-%m-%dT%H%M', # without seconds
513 513 '%Y-%m-%d %H:%M:%S', # our common legal variant
514 514 '%Y-%m-%d %H:%M', # without seconds
515 515 '%Y-%m-%d %H%M%S', # without :
516 516 '%Y-%m-%d %H%M', # without seconds
517 517 '%Y-%m-%d %I:%M:%S%p',
518 518 '%Y-%m-%d %H:%M',
519 519 '%Y-%m-%d %I:%M%p',
520 520 '%Y-%m-%d',
521 521 '%m-%d',
522 522 '%m/%d',
523 523 '%m/%d/%y',
524 524 '%m/%d/%Y',
525 525 '%a %b %d %H:%M:%S %Y',
526 526 '%a %b %d %I:%M:%S%p %Y',
527 527 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
528 528 '%b %d %H:%M:%S %Y',
529 529 '%b %d %I:%M:%S%p %Y',
530 530 '%b %d %H:%M:%S',
531 531 '%b %d %I:%M:%S%p',
532 532 '%b %d %H:%M',
533 533 '%b %d %I:%M%p',
534 534 '%b %d %Y',
535 535 '%b %d',
536 536 '%H:%M:%S',
537 537 '%I:%M:%S%p',
538 538 '%H:%M',
539 539 '%I:%M%p',
540 540 )
541 541
542 542 extendeddateformats = defaultdateformats + (
543 543 "%Y",
544 544 "%Y-%m",
545 545 "%b",
546 546 "%b %Y",
547 547 )
548 548
549 549 def cachefunc(func):
550 550 '''cache the result of function calls'''
551 551 # XXX doesn't handle keywords args
552 552 if func.__code__.co_argcount == 0:
553 553 cache = []
554 554 def f():
555 555 if len(cache) == 0:
556 556 cache.append(func())
557 557 return cache[0]
558 558 return f
559 559 cache = {}
560 560 if func.__code__.co_argcount == 1:
561 561 # we gain a small amount of time because
562 562 # we don't need to pack/unpack the list
563 563 def f(arg):
564 564 if arg not in cache:
565 565 cache[arg] = func(arg)
566 566 return cache[arg]
567 567 else:
568 568 def f(*args):
569 569 if args not in cache:
570 570 cache[args] = func(*args)
571 571 return cache[args]
572 572
573 573 return f
574 574
575 575 class sortdict(collections.OrderedDict):
576 576 '''a simple sorted dictionary
577 577
578 578 >>> d1 = sortdict([('a', 0), ('b', 1)])
579 579 >>> d2 = d1.copy()
580 580 >>> d2
581 581 sortdict([('a', 0), ('b', 1)])
582 582 >>> d2.update([('a', 2)])
583 583 >>> d2.keys() # should still be in last-set order
584 584 ['b', 'a']
585 585 '''
586 586
587 587 def __setitem__(self, key, value):
588 588 if key in self:
589 589 del self[key]
590 590 super(sortdict, self).__setitem__(key, value)
591 591
592 592 class _lrucachenode(object):
593 593 """A node in a doubly linked list.
594 594
595 595 Holds a reference to nodes on either side as well as a key-value
596 596 pair for the dictionary entry.
597 597 """
598 598 __slots__ = (u'next', u'prev', u'key', u'value')
599 599
600 600 def __init__(self):
601 601 self.next = None
602 602 self.prev = None
603 603
604 604 self.key = _notset
605 605 self.value = None
606 606
607 607 def markempty(self):
608 608 """Mark the node as emptied."""
609 609 self.key = _notset
610 610
611 611 class lrucachedict(object):
612 612 """Dict that caches most recent accesses and sets.
613 613
614 614 The dict consists of an actual backing dict - indexed by original
615 615 key - and a doubly linked circular list defining the order of entries in
616 616 the cache.
617 617
618 618 The head node is the newest entry in the cache. If the cache is full,
619 619 we recycle head.prev and make it the new head. Cache accesses result in
620 620 the node being moved to before the existing head and being marked as the
621 621 new head node.
622 622 """
623 623 def __init__(self, max):
624 624 self._cache = {}
625 625
626 626 self._head = head = _lrucachenode()
627 627 head.prev = head
628 628 head.next = head
629 629 self._size = 1
630 630 self._capacity = max
631 631
632 632 def __len__(self):
633 633 return len(self._cache)
634 634
635 635 def __contains__(self, k):
636 636 return k in self._cache
637 637
638 638 def __iter__(self):
639 639 # We don't have to iterate in cache order, but why not.
640 640 n = self._head
641 641 for i in range(len(self._cache)):
642 642 yield n.key
643 643 n = n.next
644 644
645 645 def __getitem__(self, k):
646 646 node = self._cache[k]
647 647 self._movetohead(node)
648 648 return node.value
649 649
650 650 def __setitem__(self, k, v):
651 651 node = self._cache.get(k)
652 652 # Replace existing value and mark as newest.
653 653 if node is not None:
654 654 node.value = v
655 655 self._movetohead(node)
656 656 return
657 657
658 658 if self._size < self._capacity:
659 659 node = self._addcapacity()
660 660 else:
661 661 # Grab the last/oldest item.
662 662 node = self._head.prev
663 663
664 664 # At capacity. Kill the old entry.
665 665 if node.key is not _notset:
666 666 del self._cache[node.key]
667 667
668 668 node.key = k
669 669 node.value = v
670 670 self._cache[k] = node
671 671 # And mark it as newest entry. No need to adjust order since it
672 672 # is already self._head.prev.
673 673 self._head = node
674 674
675 675 def __delitem__(self, k):
676 676 node = self._cache.pop(k)
677 677 node.markempty()
678 678
679 679 # Temporarily mark as newest item before re-adjusting head to make
680 680 # this node the oldest item.
681 681 self._movetohead(node)
682 682 self._head = node.next
683 683
684 684 # Additional dict methods.
685 685
686 686 def get(self, k, default=None):
687 687 try:
688 688 return self._cache[k].value
689 689 except KeyError:
690 690 return default
691 691
692 692 def clear(self):
693 693 n = self._head
694 694 while n.key is not _notset:
695 695 n.markempty()
696 696 n = n.next
697 697
698 698 self._cache.clear()
699 699
700 700 def copy(self):
701 701 result = lrucachedict(self._capacity)
702 702 n = self._head.prev
703 703 # Iterate in oldest-to-newest order, so the copy has the right ordering
704 704 for i in range(len(self._cache)):
705 705 result[n.key] = n.value
706 706 n = n.prev
707 707 return result
708 708
709 709 def _movetohead(self, node):
710 710 """Mark a node as the newest, making it the new head.
711 711
712 712 When a node is accessed, it becomes the freshest entry in the LRU
713 713 list, which is denoted by self._head.
714 714
715 715 Visually, let's make ``N`` the new head node (* denotes head):
716 716
717 717 previous/oldest <-> head <-> next/next newest
718 718
719 719 ----<->--- A* ---<->-----
720 720 | |
721 721 E <-> D <-> N <-> C <-> B
722 722
723 723 To:
724 724
725 725 ----<->--- N* ---<->-----
726 726 | |
727 727 E <-> D <-> C <-> B <-> A
728 728
729 729 This requires the following moves:
730 730
731 731 C.next = D (node.prev.next = node.next)
732 732 D.prev = C (node.next.prev = node.prev)
733 733 E.next = N (head.prev.next = node)
734 734 N.prev = E (node.prev = head.prev)
735 735 N.next = A (node.next = head)
736 736 A.prev = N (head.prev = node)
737 737 """
738 738 head = self._head
739 739 # C.next = D
740 740 node.prev.next = node.next
741 741 # D.prev = C
742 742 node.next.prev = node.prev
743 743 # N.prev = E
744 744 node.prev = head.prev
745 745 # N.next = A
746 746 # It is tempting to do just "head" here, however if node is
747 747 # adjacent to head, this will do bad things.
748 748 node.next = head.prev.next
749 749 # E.next = N
750 750 node.next.prev = node
751 751 # A.prev = N
752 752 node.prev.next = node
753 753
754 754 self._head = node
755 755
756 756 def _addcapacity(self):
757 757 """Add a node to the circular linked list.
758 758
759 759 The new node is inserted before the head node.
760 760 """
761 761 head = self._head
762 762 node = _lrucachenode()
763 763 head.prev.next = node
764 764 node.prev = head.prev
765 765 node.next = head
766 766 head.prev = node
767 767 self._size += 1
768 768 return node
769 769
770 770 def lrucachefunc(func):
771 771 '''cache most recent results of function calls'''
772 772 cache = {}
773 773 order = collections.deque()
774 774 if func.__code__.co_argcount == 1:
775 775 def f(arg):
776 776 if arg not in cache:
777 777 if len(cache) > 20:
778 778 del cache[order.popleft()]
779 779 cache[arg] = func(arg)
780 780 else:
781 781 order.remove(arg)
782 782 order.append(arg)
783 783 return cache[arg]
784 784 else:
785 785 def f(*args):
786 786 if args not in cache:
787 787 if len(cache) > 20:
788 788 del cache[order.popleft()]
789 789 cache[args] = func(*args)
790 790 else:
791 791 order.remove(args)
792 792 order.append(args)
793 793 return cache[args]
794 794
795 795 return f
796 796
797 797 class propertycache(object):
798 798 def __init__(self, func):
799 799 self.func = func
800 800 self.name = func.__name__
801 801 def __get__(self, obj, type=None):
802 802 result = self.func(obj)
803 803 self.cachevalue(obj, result)
804 804 return result
805 805
806 806 def cachevalue(self, obj, value):
807 807 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
808 808 obj.__dict__[self.name] = value
809 809
810 810 def pipefilter(s, cmd):
811 811 '''filter string S through command CMD, returning its output'''
812 812 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
813 813 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
814 814 pout, perr = p.communicate(s)
815 815 return pout
816 816
817 817 def tempfilter(s, cmd):
818 818 '''filter string S through a pair of temporary files with CMD.
819 819 CMD is used as a template to create the real command to be run,
820 820 with the strings INFILE and OUTFILE replaced by the real names of
821 821 the temporary files generated.'''
822 822 inname, outname = None, None
823 823 try:
824 824 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
825 825 fp = os.fdopen(infd, pycompat.sysstr('wb'))
826 826 fp.write(s)
827 827 fp.close()
828 828 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
829 829 os.close(outfd)
830 830 cmd = cmd.replace('INFILE', inname)
831 831 cmd = cmd.replace('OUTFILE', outname)
832 832 code = os.system(cmd)
833 833 if pycompat.sysplatform == 'OpenVMS' and code & 1:
834 834 code = 0
835 835 if code:
836 836 raise Abort(_("command '%s' failed: %s") %
837 837 (cmd, explainexit(code)))
838 838 return readfile(outname)
839 839 finally:
840 840 try:
841 841 if inname:
842 842 os.unlink(inname)
843 843 except OSError:
844 844 pass
845 845 try:
846 846 if outname:
847 847 os.unlink(outname)
848 848 except OSError:
849 849 pass
850 850
851 851 filtertable = {
852 852 'tempfile:': tempfilter,
853 853 'pipe:': pipefilter,
854 854 }
855 855
856 856 def filter(s, cmd):
857 857 "filter a string through a command that transforms its input to its output"
858 858 for name, fn in filtertable.iteritems():
859 859 if cmd.startswith(name):
860 860 return fn(s, cmd[len(name):].lstrip())
861 861 return pipefilter(s, cmd)
862 862
863 863 def binary(s):
864 864 """return true if a string is binary data"""
865 865 return bool(s and '\0' in s)
866 866
867 867 def increasingchunks(source, min=1024, max=65536):
868 868 '''return no less than min bytes per chunk while data remains,
869 869 doubling min after each chunk until it reaches max'''
870 870 def log2(x):
871 871 if not x:
872 872 return 0
873 873 i = 0
874 874 while x:
875 875 x >>= 1
876 876 i += 1
877 877 return i - 1
878 878
879 879 buf = []
880 880 blen = 0
881 881 for chunk in source:
882 882 buf.append(chunk)
883 883 blen += len(chunk)
884 884 if blen >= min:
885 885 if min < max:
886 886 min = min << 1
887 887 nmin = 1 << log2(blen)
888 888 if nmin > min:
889 889 min = nmin
890 890 if min > max:
891 891 min = max
892 892 yield ''.join(buf)
893 893 blen = 0
894 894 buf = []
895 895 if buf:
896 896 yield ''.join(buf)
897 897
898 898 Abort = error.Abort
899 899
900 900 def always(fn):
901 901 return True
902 902
903 903 def never(fn):
904 904 return False
905 905
906 906 def nogc(func):
907 907 """disable garbage collector
908 908
909 909 Python's garbage collector triggers a GC each time a certain number of
910 910 container objects (the number being defined by gc.get_threshold()) are
911 911 allocated even when marked not to be tracked by the collector. Tracking has
912 912 no effect on when GCs are triggered, only on what objects the GC looks
913 913 into. As a workaround, disable GC while building complex (huge)
914 914 containers.
915 915
916 916 This garbage collector issue have been fixed in 2.7.
917 917 """
918 918 if sys.version_info >= (2, 7):
919 919 return func
920 920 def wrapper(*args, **kwargs):
921 921 gcenabled = gc.isenabled()
922 922 gc.disable()
923 923 try:
924 924 return func(*args, **kwargs)
925 925 finally:
926 926 if gcenabled:
927 927 gc.enable()
928 928 return wrapper
929 929
930 930 def pathto(root, n1, n2):
931 931 '''return the relative path from one place to another.
932 932 root should use os.sep to separate directories
933 933 n1 should use os.sep to separate directories
934 934 n2 should use "/" to separate directories
935 935 returns an os.sep-separated path.
936 936
937 937 If n1 is a relative path, it's assumed it's
938 938 relative to root.
939 939 n2 should always be relative to root.
940 940 '''
941 941 if not n1:
942 942 return localpath(n2)
943 943 if os.path.isabs(n1):
944 944 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
945 945 return os.path.join(root, localpath(n2))
946 946 n2 = '/'.join((pconvert(root), n2))
947 947 a, b = splitpath(n1), n2.split('/')
948 948 a.reverse()
949 949 b.reverse()
950 950 while a and b and a[-1] == b[-1]:
951 951 a.pop()
952 952 b.pop()
953 953 b.reverse()
954 954 return pycompat.ossep.join((['..'] * len(a)) + b) or '.'
955 955
956 956 def mainfrozen():
957 957 """return True if we are a frozen executable.
958 958
959 959 The code supports py2exe (most common, Windows only) and tools/freeze
960 960 (portable, not much used).
961 961 """
962 962 return (safehasattr(sys, "frozen") or # new py2exe
963 963 safehasattr(sys, "importers") or # old py2exe
964 964 imp.is_frozen(u"__main__")) # tools/freeze
965 965
966 966 # the location of data files matching the source code
967 967 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
968 968 # executable version (py2exe) doesn't support __file__
969 969 datapath = os.path.dirname(pycompat.sysexecutable)
970 970 else:
971 971 datapath = os.path.dirname(pycompat.fsencode(__file__))
972 972
973 973 i18n.setdatapath(datapath)
974 974
975 975 _hgexecutable = None
976 976
977 977 def hgexecutable():
978 978 """return location of the 'hg' executable.
979 979
980 980 Defaults to $HG or 'hg' in the search path.
981 981 """
982 982 if _hgexecutable is None:
983 983 hg = encoding.environ.get('HG')
984 984 mainmod = sys.modules[pycompat.sysstr('__main__')]
985 985 if hg:
986 986 _sethgexecutable(hg)
987 987 elif mainfrozen():
988 988 if getattr(sys, 'frozen', None) == 'macosx_app':
989 989 # Env variable set by py2app
990 990 _sethgexecutable(encoding.environ['EXECUTABLEPATH'])
991 991 else:
992 992 _sethgexecutable(pycompat.sysexecutable)
993 993 elif (os.path.basename(
994 994 pycompat.fsencode(getattr(mainmod, '__file__', ''))) == 'hg'):
995 995 _sethgexecutable(pycompat.fsencode(mainmod.__file__))
996 996 else:
997 997 exe = findexe('hg') or os.path.basename(sys.argv[0])
998 998 _sethgexecutable(exe)
999 999 return _hgexecutable
1000 1000
1001 1001 def _sethgexecutable(path):
1002 1002 """set location of the 'hg' executable"""
1003 1003 global _hgexecutable
1004 1004 _hgexecutable = path
1005 1005
1006 1006 def _isstdout(f):
1007 1007 fileno = getattr(f, 'fileno', None)
1008 1008 return fileno and fileno() == sys.__stdout__.fileno()
1009 1009
1010 1010 def shellenviron(environ=None):
1011 1011 """return environ with optional override, useful for shelling out"""
1012 1012 def py2shell(val):
1013 1013 'convert python object into string that is useful to shell'
1014 1014 if val is None or val is False:
1015 1015 return '0'
1016 1016 if val is True:
1017 1017 return '1'
1018 1018 return str(val)
1019 1019 env = dict(encoding.environ)
1020 1020 if environ:
1021 1021 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1022 1022 env['HG'] = hgexecutable()
1023 1023 return env
1024 1024
1025 1025 def system(cmd, environ=None, cwd=None, out=None):
1026 1026 '''enhanced shell command execution.
1027 1027 run with environment maybe modified, maybe in different dir.
1028 1028
1029 1029 if out is specified, it is assumed to be a file-like object that has a
1030 1030 write() method. stdout and stderr will be redirected to out.'''
1031 1031 try:
1032 1032 stdout.flush()
1033 1033 except Exception:
1034 1034 pass
1035 1035 cmd = quotecommand(cmd)
1036 1036 if pycompat.sysplatform == 'plan9' and (sys.version_info[0] == 2
1037 1037 and sys.version_info[1] < 7):
1038 1038 # subprocess kludge to work around issues in half-baked Python
1039 1039 # ports, notably bichued/python:
1040 1040 if not cwd is None:
1041 1041 os.chdir(cwd)
1042 1042 rc = os.system(cmd)
1043 1043 else:
1044 1044 env = shellenviron(environ)
1045 1045 if out is None or _isstdout(out):
1046 1046 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1047 1047 env=env, cwd=cwd)
1048 1048 else:
1049 1049 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1050 1050 env=env, cwd=cwd, stdout=subprocess.PIPE,
1051 1051 stderr=subprocess.STDOUT)
1052 1052 for line in iter(proc.stdout.readline, ''):
1053 1053 out.write(line)
1054 1054 proc.wait()
1055 1055 rc = proc.returncode
1056 1056 if pycompat.sysplatform == 'OpenVMS' and rc & 1:
1057 1057 rc = 0
1058 1058 return rc
1059 1059
1060 1060 def checksignature(func):
1061 1061 '''wrap a function with code to check for calling errors'''
1062 1062 def check(*args, **kwargs):
1063 1063 try:
1064 1064 return func(*args, **kwargs)
1065 1065 except TypeError:
1066 1066 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1067 1067 raise error.SignatureError
1068 1068 raise
1069 1069
1070 1070 return check
1071 1071
1072 1072 # a whilelist of known filesystems where hardlink works reliably
1073 1073 _hardlinkfswhitelist = {
1074 1074 'btrfs',
1075 1075 'ext2',
1076 1076 'ext3',
1077 1077 'ext4',
1078 1078 'hfs',
1079 1079 'jfs',
1080 1080 'reiserfs',
1081 1081 'tmpfs',
1082 1082 'ufs',
1083 1083 'xfs',
1084 1084 'zfs',
1085 1085 }
1086 1086
1087 1087 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1088 1088 '''copy a file, preserving mode and optionally other stat info like
1089 1089 atime/mtime
1090 1090
1091 1091 checkambig argument is used with filestat, and is useful only if
1092 1092 destination file is guarded by any lock (e.g. repo.lock or
1093 1093 repo.wlock).
1094 1094
1095 1095 copystat and checkambig should be exclusive.
1096 1096 '''
1097 1097 assert not (copystat and checkambig)
1098 1098 oldstat = None
1099 1099 if os.path.lexists(dest):
1100 1100 if checkambig:
1101 oldstat = checkambig and filestat(dest)
1101 oldstat = checkambig and filestat.frompath(dest)
1102 1102 unlink(dest)
1103 1103 if hardlink:
1104 1104 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1105 1105 # unless we are confident that dest is on a whitelisted filesystem.
1106 1106 try:
1107 1107 fstype = getfstype(os.path.dirname(dest))
1108 1108 except OSError:
1109 1109 fstype = None
1110 1110 if fstype not in _hardlinkfswhitelist:
1111 1111 hardlink = False
1112 1112 if hardlink:
1113 1113 try:
1114 1114 oslink(src, dest)
1115 1115 return
1116 1116 except (IOError, OSError):
1117 1117 pass # fall back to normal copy
1118 1118 if os.path.islink(src):
1119 1119 os.symlink(os.readlink(src), dest)
1120 1120 # copytime is ignored for symlinks, but in general copytime isn't needed
1121 1121 # for them anyway
1122 1122 else:
1123 1123 try:
1124 1124 shutil.copyfile(src, dest)
1125 1125 if copystat:
1126 1126 # copystat also copies mode
1127 1127 shutil.copystat(src, dest)
1128 1128 else:
1129 1129 shutil.copymode(src, dest)
1130 1130 if oldstat and oldstat.stat:
1131 newstat = filestat(dest)
1131 newstat = filestat.frompath(dest)
1132 1132 if newstat.isambig(oldstat):
1133 1133 # stat of copied file is ambiguous to original one
1134 1134 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1135 1135 os.utime(dest, (advanced, advanced))
1136 1136 except shutil.Error as inst:
1137 1137 raise Abort(str(inst))
1138 1138
1139 1139 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1140 1140 """Copy a directory tree using hardlinks if possible."""
1141 1141 num = 0
1142 1142
1143 1143 gettopic = lambda: hardlink and _('linking') or _('copying')
1144 1144
1145 1145 if os.path.isdir(src):
1146 1146 if hardlink is None:
1147 1147 hardlink = (os.stat(src).st_dev ==
1148 1148 os.stat(os.path.dirname(dst)).st_dev)
1149 1149 topic = gettopic()
1150 1150 os.mkdir(dst)
1151 1151 for name, kind in listdir(src):
1152 1152 srcname = os.path.join(src, name)
1153 1153 dstname = os.path.join(dst, name)
1154 1154 def nprog(t, pos):
1155 1155 if pos is not None:
1156 1156 return progress(t, pos + num)
1157 1157 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1158 1158 num += n
1159 1159 else:
1160 1160 if hardlink is None:
1161 1161 hardlink = (os.stat(os.path.dirname(src)).st_dev ==
1162 1162 os.stat(os.path.dirname(dst)).st_dev)
1163 1163 topic = gettopic()
1164 1164
1165 1165 if hardlink:
1166 1166 try:
1167 1167 oslink(src, dst)
1168 1168 except (IOError, OSError):
1169 1169 hardlink = False
1170 1170 shutil.copy(src, dst)
1171 1171 else:
1172 1172 shutil.copy(src, dst)
1173 1173 num += 1
1174 1174 progress(topic, num)
1175 1175 progress(topic, None)
1176 1176
1177 1177 return hardlink, num
1178 1178
1179 1179 _winreservednames = '''con prn aux nul
1180 1180 com1 com2 com3 com4 com5 com6 com7 com8 com9
1181 1181 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1182 1182 _winreservedchars = ':*?"<>|'
1183 1183 def checkwinfilename(path):
1184 1184 r'''Check that the base-relative path is a valid filename on Windows.
1185 1185 Returns None if the path is ok, or a UI string describing the problem.
1186 1186
1187 1187 >>> checkwinfilename("just/a/normal/path")
1188 1188 >>> checkwinfilename("foo/bar/con.xml")
1189 1189 "filename contains 'con', which is reserved on Windows"
1190 1190 >>> checkwinfilename("foo/con.xml/bar")
1191 1191 "filename contains 'con', which is reserved on Windows"
1192 1192 >>> checkwinfilename("foo/bar/xml.con")
1193 1193 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1194 1194 "filename contains 'AUX', which is reserved on Windows"
1195 1195 >>> checkwinfilename("foo/bar/bla:.txt")
1196 1196 "filename contains ':', which is reserved on Windows"
1197 1197 >>> checkwinfilename("foo/bar/b\07la.txt")
1198 1198 "filename contains '\\x07', which is invalid on Windows"
1199 1199 >>> checkwinfilename("foo/bar/bla ")
1200 1200 "filename ends with ' ', which is not allowed on Windows"
1201 1201 >>> checkwinfilename("../bar")
1202 1202 >>> checkwinfilename("foo\\")
1203 1203 "filename ends with '\\', which is invalid on Windows"
1204 1204 >>> checkwinfilename("foo\\/bar")
1205 1205 "directory name ends with '\\', which is invalid on Windows"
1206 1206 '''
1207 1207 if path.endswith('\\'):
1208 1208 return _("filename ends with '\\', which is invalid on Windows")
1209 1209 if '\\/' in path:
1210 1210 return _("directory name ends with '\\', which is invalid on Windows")
1211 1211 for n in path.replace('\\', '/').split('/'):
1212 1212 if not n:
1213 1213 continue
1214 1214 for c in _filenamebytestr(n):
1215 1215 if c in _winreservedchars:
1216 1216 return _("filename contains '%s', which is reserved "
1217 1217 "on Windows") % c
1218 1218 if ord(c) <= 31:
1219 1219 return _("filename contains %r, which is invalid "
1220 1220 "on Windows") % c
1221 1221 base = n.split('.')[0]
1222 1222 if base and base.lower() in _winreservednames:
1223 1223 return _("filename contains '%s', which is reserved "
1224 1224 "on Windows") % base
1225 1225 t = n[-1]
1226 1226 if t in '. ' and n not in '..':
1227 1227 return _("filename ends with '%s', which is not allowed "
1228 1228 "on Windows") % t
1229 1229
1230 1230 if pycompat.osname == 'nt':
1231 1231 checkosfilename = checkwinfilename
1232 1232 timer = time.clock
1233 1233 else:
1234 1234 checkosfilename = platform.checkosfilename
1235 1235 timer = time.time
1236 1236
1237 1237 if safehasattr(time, "perf_counter"):
1238 1238 timer = time.perf_counter
1239 1239
1240 1240 def makelock(info, pathname):
1241 1241 try:
1242 1242 return os.symlink(info, pathname)
1243 1243 except OSError as why:
1244 1244 if why.errno == errno.EEXIST:
1245 1245 raise
1246 1246 except AttributeError: # no symlink in os
1247 1247 pass
1248 1248
1249 1249 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1250 1250 os.write(ld, info)
1251 1251 os.close(ld)
1252 1252
1253 1253 def readlock(pathname):
1254 1254 try:
1255 1255 return os.readlink(pathname)
1256 1256 except OSError as why:
1257 1257 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1258 1258 raise
1259 1259 except AttributeError: # no symlink in os
1260 1260 pass
1261 1261 fp = posixfile(pathname)
1262 1262 r = fp.read()
1263 1263 fp.close()
1264 1264 return r
1265 1265
1266 1266 def fstat(fp):
1267 1267 '''stat file object that may not have fileno method.'''
1268 1268 try:
1269 1269 return os.fstat(fp.fileno())
1270 1270 except AttributeError:
1271 1271 return os.stat(fp.name)
1272 1272
1273 1273 # File system features
1274 1274
1275 1275 def fscasesensitive(path):
1276 1276 """
1277 1277 Return true if the given path is on a case-sensitive filesystem
1278 1278
1279 1279 Requires a path (like /foo/.hg) ending with a foldable final
1280 1280 directory component.
1281 1281 """
1282 1282 s1 = os.lstat(path)
1283 1283 d, b = os.path.split(path)
1284 1284 b2 = b.upper()
1285 1285 if b == b2:
1286 1286 b2 = b.lower()
1287 1287 if b == b2:
1288 1288 return True # no evidence against case sensitivity
1289 1289 p2 = os.path.join(d, b2)
1290 1290 try:
1291 1291 s2 = os.lstat(p2)
1292 1292 if s2 == s1:
1293 1293 return False
1294 1294 return True
1295 1295 except OSError:
1296 1296 return True
1297 1297
1298 1298 try:
1299 1299 import re2
1300 1300 _re2 = None
1301 1301 except ImportError:
1302 1302 _re2 = False
1303 1303
1304 1304 class _re(object):
1305 1305 def _checkre2(self):
1306 1306 global _re2
1307 1307 try:
1308 1308 # check if match works, see issue3964
1309 1309 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1310 1310 except ImportError:
1311 1311 _re2 = False
1312 1312
1313 1313 def compile(self, pat, flags=0):
1314 1314 '''Compile a regular expression, using re2 if possible
1315 1315
1316 1316 For best performance, use only re2-compatible regexp features. The
1317 1317 only flags from the re module that are re2-compatible are
1318 1318 IGNORECASE and MULTILINE.'''
1319 1319 if _re2 is None:
1320 1320 self._checkre2()
1321 1321 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1322 1322 if flags & remod.IGNORECASE:
1323 1323 pat = '(?i)' + pat
1324 1324 if flags & remod.MULTILINE:
1325 1325 pat = '(?m)' + pat
1326 1326 try:
1327 1327 return re2.compile(pat)
1328 1328 except re2.error:
1329 1329 pass
1330 1330 return remod.compile(pat, flags)
1331 1331
1332 1332 @propertycache
1333 1333 def escape(self):
1334 1334 '''Return the version of escape corresponding to self.compile.
1335 1335
1336 1336 This is imperfect because whether re2 or re is used for a particular
1337 1337 function depends on the flags, etc, but it's the best we can do.
1338 1338 '''
1339 1339 global _re2
1340 1340 if _re2 is None:
1341 1341 self._checkre2()
1342 1342 if _re2:
1343 1343 return re2.escape
1344 1344 else:
1345 1345 return remod.escape
1346 1346
1347 1347 re = _re()
1348 1348
1349 1349 _fspathcache = {}
1350 1350 def fspath(name, root):
1351 1351 '''Get name in the case stored in the filesystem
1352 1352
1353 1353 The name should be relative to root, and be normcase-ed for efficiency.
1354 1354
1355 1355 Note that this function is unnecessary, and should not be
1356 1356 called, for case-sensitive filesystems (simply because it's expensive).
1357 1357
1358 1358 The root should be normcase-ed, too.
1359 1359 '''
1360 1360 def _makefspathcacheentry(dir):
1361 1361 return dict((normcase(n), n) for n in os.listdir(dir))
1362 1362
1363 1363 seps = pycompat.ossep
1364 1364 if pycompat.osaltsep:
1365 1365 seps = seps + pycompat.osaltsep
1366 1366 # Protect backslashes. This gets silly very quickly.
1367 1367 seps.replace('\\','\\\\')
1368 1368 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
1369 1369 dir = os.path.normpath(root)
1370 1370 result = []
1371 1371 for part, sep in pattern.findall(name):
1372 1372 if sep:
1373 1373 result.append(sep)
1374 1374 continue
1375 1375
1376 1376 if dir not in _fspathcache:
1377 1377 _fspathcache[dir] = _makefspathcacheentry(dir)
1378 1378 contents = _fspathcache[dir]
1379 1379
1380 1380 found = contents.get(part)
1381 1381 if not found:
1382 1382 # retry "once per directory" per "dirstate.walk" which
1383 1383 # may take place for each patches of "hg qpush", for example
1384 1384 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1385 1385 found = contents.get(part)
1386 1386
1387 1387 result.append(found or part)
1388 1388 dir = os.path.join(dir, part)
1389 1389
1390 1390 return ''.join(result)
1391 1391
1392 1392 def getfstype(dirpath):
1393 1393 '''Get the filesystem type name from a directory (best-effort)
1394 1394
1395 1395 Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc.
1396 1396 '''
1397 1397 return getattr(osutil, 'getfstype', lambda x: None)(dirpath)
1398 1398
1399 1399 def checknlink(testfile):
1400 1400 '''check whether hardlink count reporting works properly'''
1401 1401
1402 1402 # testfile may be open, so we need a separate file for checking to
1403 1403 # work around issue2543 (or testfile may get lost on Samba shares)
1404 1404 f1 = testfile + ".hgtmp1"
1405 1405 if os.path.lexists(f1):
1406 1406 return False
1407 1407 try:
1408 1408 posixfile(f1, 'w').close()
1409 1409 except IOError:
1410 1410 try:
1411 1411 os.unlink(f1)
1412 1412 except OSError:
1413 1413 pass
1414 1414 return False
1415 1415
1416 1416 f2 = testfile + ".hgtmp2"
1417 1417 fd = None
1418 1418 try:
1419 1419 oslink(f1, f2)
1420 1420 # nlinks() may behave differently for files on Windows shares if
1421 1421 # the file is open.
1422 1422 fd = posixfile(f2)
1423 1423 return nlinks(f2) > 1
1424 1424 except OSError:
1425 1425 return False
1426 1426 finally:
1427 1427 if fd is not None:
1428 1428 fd.close()
1429 1429 for f in (f1, f2):
1430 1430 try:
1431 1431 os.unlink(f)
1432 1432 except OSError:
1433 1433 pass
1434 1434
1435 1435 def endswithsep(path):
1436 1436 '''Check path ends with os.sep or os.altsep.'''
1437 1437 return (path.endswith(pycompat.ossep)
1438 1438 or pycompat.osaltsep and path.endswith(pycompat.osaltsep))
1439 1439
1440 1440 def splitpath(path):
1441 1441 '''Split path by os.sep.
1442 1442 Note that this function does not use os.altsep because this is
1443 1443 an alternative of simple "xxx.split(os.sep)".
1444 1444 It is recommended to use os.path.normpath() before using this
1445 1445 function if need.'''
1446 1446 return path.split(pycompat.ossep)
1447 1447
1448 1448 def gui():
1449 1449 '''Are we running in a GUI?'''
1450 1450 if pycompat.sysplatform == 'darwin':
1451 1451 if 'SSH_CONNECTION' in encoding.environ:
1452 1452 # handle SSH access to a box where the user is logged in
1453 1453 return False
1454 1454 elif getattr(osutil, 'isgui', None):
1455 1455 # check if a CoreGraphics session is available
1456 1456 return osutil.isgui()
1457 1457 else:
1458 1458 # pure build; use a safe default
1459 1459 return True
1460 1460 else:
1461 1461 return pycompat.osname == "nt" or encoding.environ.get("DISPLAY")
1462 1462
1463 1463 def mktempcopy(name, emptyok=False, createmode=None):
1464 1464 """Create a temporary file with the same contents from name
1465 1465
1466 1466 The permission bits are copied from the original file.
1467 1467
1468 1468 If the temporary file is going to be truncated immediately, you
1469 1469 can use emptyok=True as an optimization.
1470 1470
1471 1471 Returns the name of the temporary file.
1472 1472 """
1473 1473 d, fn = os.path.split(name)
1474 1474 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1475 1475 os.close(fd)
1476 1476 # Temporary files are created with mode 0600, which is usually not
1477 1477 # what we want. If the original file already exists, just copy
1478 1478 # its mode. Otherwise, manually obey umask.
1479 1479 copymode(name, temp, createmode)
1480 1480 if emptyok:
1481 1481 return temp
1482 1482 try:
1483 1483 try:
1484 1484 ifp = posixfile(name, "rb")
1485 1485 except IOError as inst:
1486 1486 if inst.errno == errno.ENOENT:
1487 1487 return temp
1488 1488 if not getattr(inst, 'filename', None):
1489 1489 inst.filename = name
1490 1490 raise
1491 1491 ofp = posixfile(temp, "wb")
1492 1492 for chunk in filechunkiter(ifp):
1493 1493 ofp.write(chunk)
1494 1494 ifp.close()
1495 1495 ofp.close()
1496 1496 except: # re-raises
1497 1497 try: os.unlink(temp)
1498 1498 except OSError: pass
1499 1499 raise
1500 1500 return temp
1501 1501
1502 1502 class filestat(object):
1503 1503 """help to exactly detect change of a file
1504 1504
1505 1505 'stat' attribute is result of 'os.stat()' if specified 'path'
1506 1506 exists. Otherwise, it is None. This can avoid preparative
1507 1507 'exists()' examination on client side of this class.
1508 1508 """
1509 def __init__(self, path):
1509 def __init__(self, stat):
1510 self.stat = stat
1511
1512 @classmethod
1513 def frompath(cls, path):
1510 1514 try:
1511 self.stat = os.stat(path)
1515 stat = os.stat(path)
1512 1516 except OSError as err:
1513 1517 if err.errno != errno.ENOENT:
1514 1518 raise
1515 self.stat = None
1519 stat = None
1520 return cls(stat)
1516 1521
1517 1522 __hash__ = object.__hash__
1518 1523
1519 1524 def __eq__(self, old):
1520 1525 try:
1521 1526 # if ambiguity between stat of new and old file is
1522 1527 # avoided, comparison of size, ctime and mtime is enough
1523 1528 # to exactly detect change of a file regardless of platform
1524 1529 return (self.stat.st_size == old.stat.st_size and
1525 1530 self.stat.st_ctime == old.stat.st_ctime and
1526 1531 self.stat.st_mtime == old.stat.st_mtime)
1527 1532 except AttributeError:
1528 1533 pass
1529 1534 try:
1530 1535 return self.stat is None and old.stat is None
1531 1536 except AttributeError:
1532 1537 return False
1533 1538
1534 1539 def isambig(self, old):
1535 1540 """Examine whether new (= self) stat is ambiguous against old one
1536 1541
1537 1542 "S[N]" below means stat of a file at N-th change:
1538 1543
1539 1544 - S[n-1].ctime < S[n].ctime: can detect change of a file
1540 1545 - S[n-1].ctime == S[n].ctime
1541 1546 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1542 1547 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1543 1548 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1544 1549 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1545 1550
1546 1551 Case (*2) above means that a file was changed twice or more at
1547 1552 same time in sec (= S[n-1].ctime), and comparison of timestamp
1548 1553 is ambiguous.
1549 1554
1550 1555 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1551 1556 timestamp is ambiguous".
1552 1557
1553 1558 But advancing mtime only in case (*2) doesn't work as
1554 1559 expected, because naturally advanced S[n].mtime in case (*1)
1555 1560 might be equal to manually advanced S[n-1 or earlier].mtime.
1556 1561
1557 1562 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1558 1563 treated as ambiguous regardless of mtime, to avoid overlooking
1559 1564 by confliction between such mtime.
1560 1565
1561 1566 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1562 1567 S[n].mtime", even if size of a file isn't changed.
1563 1568 """
1564 1569 try:
1565 1570 return (self.stat.st_ctime == old.stat.st_ctime)
1566 1571 except AttributeError:
1567 1572 return False
1568 1573
1569 1574 def avoidambig(self, path, old):
1570 1575 """Change file stat of specified path to avoid ambiguity
1571 1576
1572 1577 'old' should be previous filestat of 'path'.
1573 1578
1574 1579 This skips avoiding ambiguity, if a process doesn't have
1575 1580 appropriate privileges for 'path'. This returns False in this
1576 1581 case.
1577 1582
1578 1583 Otherwise, this returns True, as "ambiguity is avoided".
1579 1584 """
1580 1585 advanced = (old.stat.st_mtime + 1) & 0x7fffffff
1581 1586 try:
1582 1587 os.utime(path, (advanced, advanced))
1583 1588 except OSError as inst:
1584 1589 if inst.errno == errno.EPERM:
1585 1590 # utime() on the file created by another user causes EPERM,
1586 1591 # if a process doesn't have appropriate privileges
1587 1592 return False
1588 1593 raise
1589 1594 return True
1590 1595
1591 1596 def __ne__(self, other):
1592 1597 return not self == other
1593 1598
1594 1599 class atomictempfile(object):
1595 1600 '''writable file object that atomically updates a file
1596 1601
1597 1602 All writes will go to a temporary copy of the original file. Call
1598 1603 close() when you are done writing, and atomictempfile will rename
1599 1604 the temporary copy to the original name, making the changes
1600 1605 visible. If the object is destroyed without being closed, all your
1601 1606 writes are discarded.
1602 1607
1603 1608 checkambig argument of constructor is used with filestat, and is
1604 1609 useful only if target file is guarded by any lock (e.g. repo.lock
1605 1610 or repo.wlock).
1606 1611 '''
1607 1612 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1608 1613 self.__name = name # permanent name
1609 1614 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1610 1615 createmode=createmode)
1611 1616 self._fp = posixfile(self._tempname, mode)
1612 1617 self._checkambig = checkambig
1613 1618
1614 1619 # delegated methods
1615 1620 self.read = self._fp.read
1616 1621 self.write = self._fp.write
1617 1622 self.seek = self._fp.seek
1618 1623 self.tell = self._fp.tell
1619 1624 self.fileno = self._fp.fileno
1620 1625
1621 1626 def close(self):
1622 1627 if not self._fp.closed:
1623 1628 self._fp.close()
1624 1629 filename = localpath(self.__name)
1625 oldstat = self._checkambig and filestat(filename)
1630 oldstat = self._checkambig and filestat.frompath(filename)
1626 1631 if oldstat and oldstat.stat:
1627 1632 rename(self._tempname, filename)
1628 newstat = filestat(filename)
1633 newstat = filestat.frompath(filename)
1629 1634 if newstat.isambig(oldstat):
1630 1635 # stat of changed file is ambiguous to original one
1631 1636 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1632 1637 os.utime(filename, (advanced, advanced))
1633 1638 else:
1634 1639 rename(self._tempname, filename)
1635 1640
1636 1641 def discard(self):
1637 1642 if not self._fp.closed:
1638 1643 try:
1639 1644 os.unlink(self._tempname)
1640 1645 except OSError:
1641 1646 pass
1642 1647 self._fp.close()
1643 1648
1644 1649 def __del__(self):
1645 1650 if safehasattr(self, '_fp'): # constructor actually did something
1646 1651 self.discard()
1647 1652
1648 1653 def __enter__(self):
1649 1654 return self
1650 1655
1651 1656 def __exit__(self, exctype, excvalue, traceback):
1652 1657 if exctype is not None:
1653 1658 self.discard()
1654 1659 else:
1655 1660 self.close()
1656 1661
1657 1662 def unlinkpath(f, ignoremissing=False):
1658 1663 """unlink and remove the directory if it is empty"""
1659 1664 if ignoremissing:
1660 1665 tryunlink(f)
1661 1666 else:
1662 1667 unlink(f)
1663 1668 # try removing directories that might now be empty
1664 1669 try:
1665 1670 removedirs(os.path.dirname(f))
1666 1671 except OSError:
1667 1672 pass
1668 1673
1669 1674 def tryunlink(f):
1670 1675 """Attempt to remove a file, ignoring ENOENT errors."""
1671 1676 try:
1672 1677 unlink(f)
1673 1678 except OSError as e:
1674 1679 if e.errno != errno.ENOENT:
1675 1680 raise
1676 1681
1677 1682 def makedirs(name, mode=None, notindexed=False):
1678 1683 """recursive directory creation with parent mode inheritance
1679 1684
1680 1685 Newly created directories are marked as "not to be indexed by
1681 1686 the content indexing service", if ``notindexed`` is specified
1682 1687 for "write" mode access.
1683 1688 """
1684 1689 try:
1685 1690 makedir(name, notindexed)
1686 1691 except OSError as err:
1687 1692 if err.errno == errno.EEXIST:
1688 1693 return
1689 1694 if err.errno != errno.ENOENT or not name:
1690 1695 raise
1691 1696 parent = os.path.dirname(os.path.abspath(name))
1692 1697 if parent == name:
1693 1698 raise
1694 1699 makedirs(parent, mode, notindexed)
1695 1700 try:
1696 1701 makedir(name, notindexed)
1697 1702 except OSError as err:
1698 1703 # Catch EEXIST to handle races
1699 1704 if err.errno == errno.EEXIST:
1700 1705 return
1701 1706 raise
1702 1707 if mode is not None:
1703 1708 os.chmod(name, mode)
1704 1709
1705 1710 def readfile(path):
1706 1711 with open(path, 'rb') as fp:
1707 1712 return fp.read()
1708 1713
1709 1714 def writefile(path, text):
1710 1715 with open(path, 'wb') as fp:
1711 1716 fp.write(text)
1712 1717
1713 1718 def appendfile(path, text):
1714 1719 with open(path, 'ab') as fp:
1715 1720 fp.write(text)
1716 1721
1717 1722 class chunkbuffer(object):
1718 1723 """Allow arbitrary sized chunks of data to be efficiently read from an
1719 1724 iterator over chunks of arbitrary size."""
1720 1725
1721 1726 def __init__(self, in_iter):
1722 1727 """in_iter is the iterator that's iterating over the input chunks."""
1723 1728 def splitbig(chunks):
1724 1729 for chunk in chunks:
1725 1730 if len(chunk) > 2**20:
1726 1731 pos = 0
1727 1732 while pos < len(chunk):
1728 1733 end = pos + 2 ** 18
1729 1734 yield chunk[pos:end]
1730 1735 pos = end
1731 1736 else:
1732 1737 yield chunk
1733 1738 self.iter = splitbig(in_iter)
1734 1739 self._queue = collections.deque()
1735 1740 self._chunkoffset = 0
1736 1741
1737 1742 def read(self, l=None):
1738 1743 """Read L bytes of data from the iterator of chunks of data.
1739 1744 Returns less than L bytes if the iterator runs dry.
1740 1745
1741 1746 If size parameter is omitted, read everything"""
1742 1747 if l is None:
1743 1748 return ''.join(self.iter)
1744 1749
1745 1750 left = l
1746 1751 buf = []
1747 1752 queue = self._queue
1748 1753 while left > 0:
1749 1754 # refill the queue
1750 1755 if not queue:
1751 1756 target = 2**18
1752 1757 for chunk in self.iter:
1753 1758 queue.append(chunk)
1754 1759 target -= len(chunk)
1755 1760 if target <= 0:
1756 1761 break
1757 1762 if not queue:
1758 1763 break
1759 1764
1760 1765 # The easy way to do this would be to queue.popleft(), modify the
1761 1766 # chunk (if necessary), then queue.appendleft(). However, for cases
1762 1767 # where we read partial chunk content, this incurs 2 dequeue
1763 1768 # mutations and creates a new str for the remaining chunk in the
1764 1769 # queue. Our code below avoids this overhead.
1765 1770
1766 1771 chunk = queue[0]
1767 1772 chunkl = len(chunk)
1768 1773 offset = self._chunkoffset
1769 1774
1770 1775 # Use full chunk.
1771 1776 if offset == 0 and left >= chunkl:
1772 1777 left -= chunkl
1773 1778 queue.popleft()
1774 1779 buf.append(chunk)
1775 1780 # self._chunkoffset remains at 0.
1776 1781 continue
1777 1782
1778 1783 chunkremaining = chunkl - offset
1779 1784
1780 1785 # Use all of unconsumed part of chunk.
1781 1786 if left >= chunkremaining:
1782 1787 left -= chunkremaining
1783 1788 queue.popleft()
1784 1789 # offset == 0 is enabled by block above, so this won't merely
1785 1790 # copy via ``chunk[0:]``.
1786 1791 buf.append(chunk[offset:])
1787 1792 self._chunkoffset = 0
1788 1793
1789 1794 # Partial chunk needed.
1790 1795 else:
1791 1796 buf.append(chunk[offset:offset + left])
1792 1797 self._chunkoffset += left
1793 1798 left -= chunkremaining
1794 1799
1795 1800 return ''.join(buf)
1796 1801
1797 1802 def filechunkiter(f, size=131072, limit=None):
1798 1803 """Create a generator that produces the data in the file size
1799 1804 (default 131072) bytes at a time, up to optional limit (default is
1800 1805 to read all data). Chunks may be less than size bytes if the
1801 1806 chunk is the last chunk in the file, or the file is a socket or
1802 1807 some other type of file that sometimes reads less data than is
1803 1808 requested."""
1804 1809 assert size >= 0
1805 1810 assert limit is None or limit >= 0
1806 1811 while True:
1807 1812 if limit is None:
1808 1813 nbytes = size
1809 1814 else:
1810 1815 nbytes = min(limit, size)
1811 1816 s = nbytes and f.read(nbytes)
1812 1817 if not s:
1813 1818 break
1814 1819 if limit:
1815 1820 limit -= len(s)
1816 1821 yield s
1817 1822
1818 1823 def makedate(timestamp=None):
1819 1824 '''Return a unix timestamp (or the current time) as a (unixtime,
1820 1825 offset) tuple based off the local timezone.'''
1821 1826 if timestamp is None:
1822 1827 timestamp = time.time()
1823 1828 if timestamp < 0:
1824 1829 hint = _("check your clock")
1825 1830 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1826 1831 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1827 1832 datetime.datetime.fromtimestamp(timestamp))
1828 1833 tz = delta.days * 86400 + delta.seconds
1829 1834 return timestamp, tz
1830 1835
1831 1836 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1832 1837 """represent a (unixtime, offset) tuple as a localized time.
1833 1838 unixtime is seconds since the epoch, and offset is the time zone's
1834 1839 number of seconds away from UTC.
1835 1840
1836 1841 >>> datestr((0, 0))
1837 1842 'Thu Jan 01 00:00:00 1970 +0000'
1838 1843 >>> datestr((42, 0))
1839 1844 'Thu Jan 01 00:00:42 1970 +0000'
1840 1845 >>> datestr((-42, 0))
1841 1846 'Wed Dec 31 23:59:18 1969 +0000'
1842 1847 >>> datestr((0x7fffffff, 0))
1843 1848 'Tue Jan 19 03:14:07 2038 +0000'
1844 1849 >>> datestr((-0x80000000, 0))
1845 1850 'Fri Dec 13 20:45:52 1901 +0000'
1846 1851 """
1847 1852 t, tz = date or makedate()
1848 1853 if "%1" in format or "%2" in format or "%z" in format:
1849 1854 sign = (tz > 0) and "-" or "+"
1850 1855 minutes = abs(tz) // 60
1851 1856 q, r = divmod(minutes, 60)
1852 1857 format = format.replace("%z", "%1%2")
1853 1858 format = format.replace("%1", "%c%02d" % (sign, q))
1854 1859 format = format.replace("%2", "%02d" % r)
1855 1860 d = t - tz
1856 1861 if d > 0x7fffffff:
1857 1862 d = 0x7fffffff
1858 1863 elif d < -0x80000000:
1859 1864 d = -0x80000000
1860 1865 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1861 1866 # because they use the gmtime() system call which is buggy on Windows
1862 1867 # for negative values.
1863 1868 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1864 1869 s = encoding.strtolocal(t.strftime(encoding.strfromlocal(format)))
1865 1870 return s
1866 1871
1867 1872 def shortdate(date=None):
1868 1873 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1869 1874 return datestr(date, format='%Y-%m-%d')
1870 1875
1871 1876 def parsetimezone(s):
1872 1877 """find a trailing timezone, if any, in string, and return a
1873 1878 (offset, remainder) pair"""
1874 1879
1875 1880 if s.endswith("GMT") or s.endswith("UTC"):
1876 1881 return 0, s[:-3].rstrip()
1877 1882
1878 1883 # Unix-style timezones [+-]hhmm
1879 1884 if len(s) >= 5 and s[-5] in "+-" and s[-4:].isdigit():
1880 1885 sign = (s[-5] == "+") and 1 or -1
1881 1886 hours = int(s[-4:-2])
1882 1887 minutes = int(s[-2:])
1883 1888 return -sign * (hours * 60 + minutes) * 60, s[:-5].rstrip()
1884 1889
1885 1890 # ISO8601 trailing Z
1886 1891 if s.endswith("Z") and s[-2:-1].isdigit():
1887 1892 return 0, s[:-1]
1888 1893
1889 1894 # ISO8601-style [+-]hh:mm
1890 1895 if (len(s) >= 6 and s[-6] in "+-" and s[-3] == ":" and
1891 1896 s[-5:-3].isdigit() and s[-2:].isdigit()):
1892 1897 sign = (s[-6] == "+") and 1 or -1
1893 1898 hours = int(s[-5:-3])
1894 1899 minutes = int(s[-2:])
1895 1900 return -sign * (hours * 60 + minutes) * 60, s[:-6]
1896 1901
1897 1902 return None, s
1898 1903
1899 1904 def strdate(string, format, defaults=None):
1900 1905 """parse a localized time string and return a (unixtime, offset) tuple.
1901 1906 if the string cannot be parsed, ValueError is raised."""
1902 1907 if defaults is None:
1903 1908 defaults = {}
1904 1909
1905 1910 # NOTE: unixtime = localunixtime + offset
1906 1911 offset, date = parsetimezone(string)
1907 1912
1908 1913 # add missing elements from defaults
1909 1914 usenow = False # default to using biased defaults
1910 1915 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1911 1916 part = pycompat.bytestr(part)
1912 1917 found = [True for p in part if ("%"+p) in format]
1913 1918 if not found:
1914 1919 date += "@" + defaults[part][usenow]
1915 1920 format += "@%" + part[0]
1916 1921 else:
1917 1922 # We've found a specific time element, less specific time
1918 1923 # elements are relative to today
1919 1924 usenow = True
1920 1925
1921 1926 timetuple = time.strptime(encoding.strfromlocal(date),
1922 1927 encoding.strfromlocal(format))
1923 1928 localunixtime = int(calendar.timegm(timetuple))
1924 1929 if offset is None:
1925 1930 # local timezone
1926 1931 unixtime = int(time.mktime(timetuple))
1927 1932 offset = unixtime - localunixtime
1928 1933 else:
1929 1934 unixtime = localunixtime + offset
1930 1935 return unixtime, offset
1931 1936
1932 1937 def parsedate(date, formats=None, bias=None):
1933 1938 """parse a localized date/time and return a (unixtime, offset) tuple.
1934 1939
1935 1940 The date may be a "unixtime offset" string or in one of the specified
1936 1941 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1937 1942
1938 1943 >>> parsedate(' today ') == parsedate(\
1939 1944 datetime.date.today().strftime('%b %d'))
1940 1945 True
1941 1946 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1942 1947 datetime.timedelta(days=1)\
1943 1948 ).strftime('%b %d'))
1944 1949 True
1945 1950 >>> now, tz = makedate()
1946 1951 >>> strnow, strtz = parsedate('now')
1947 1952 >>> (strnow - now) < 1
1948 1953 True
1949 1954 >>> tz == strtz
1950 1955 True
1951 1956 """
1952 1957 if bias is None:
1953 1958 bias = {}
1954 1959 if not date:
1955 1960 return 0, 0
1956 1961 if isinstance(date, tuple) and len(date) == 2:
1957 1962 return date
1958 1963 if not formats:
1959 1964 formats = defaultdateformats
1960 1965 date = date.strip()
1961 1966
1962 1967 if date == 'now' or date == _('now'):
1963 1968 return makedate()
1964 1969 if date == 'today' or date == _('today'):
1965 1970 date = datetime.date.today().strftime('%b %d')
1966 1971 elif date == 'yesterday' or date == _('yesterday'):
1967 1972 date = (datetime.date.today() -
1968 1973 datetime.timedelta(days=1)).strftime('%b %d')
1969 1974
1970 1975 try:
1971 1976 when, offset = map(int, date.split(' '))
1972 1977 except ValueError:
1973 1978 # fill out defaults
1974 1979 now = makedate()
1975 1980 defaults = {}
1976 1981 for part in ("d", "mb", "yY", "HI", "M", "S"):
1977 1982 # this piece is for rounding the specific end of unknowns
1978 1983 b = bias.get(part)
1979 1984 if b is None:
1980 1985 if part[0:1] in "HMS":
1981 1986 b = "00"
1982 1987 else:
1983 1988 b = "0"
1984 1989
1985 1990 # this piece is for matching the generic end to today's date
1986 1991 n = datestr(now, "%" + part[0:1])
1987 1992
1988 1993 defaults[part] = (b, n)
1989 1994
1990 1995 for format in formats:
1991 1996 try:
1992 1997 when, offset = strdate(date, format, defaults)
1993 1998 except (ValueError, OverflowError):
1994 1999 pass
1995 2000 else:
1996 2001 break
1997 2002 else:
1998 2003 raise error.ParseError(_('invalid date: %r') % date)
1999 2004 # validate explicit (probably user-specified) date and
2000 2005 # time zone offset. values must fit in signed 32 bits for
2001 2006 # current 32-bit linux runtimes. timezones go from UTC-12
2002 2007 # to UTC+14
2003 2008 if when < -0x80000000 or when > 0x7fffffff:
2004 2009 raise error.ParseError(_('date exceeds 32 bits: %d') % when)
2005 2010 if offset < -50400 or offset > 43200:
2006 2011 raise error.ParseError(_('impossible time zone offset: %d') % offset)
2007 2012 return when, offset
2008 2013
2009 2014 def matchdate(date):
2010 2015 """Return a function that matches a given date match specifier
2011 2016
2012 2017 Formats include:
2013 2018
2014 2019 '{date}' match a given date to the accuracy provided
2015 2020
2016 2021 '<{date}' on or before a given date
2017 2022
2018 2023 '>{date}' on or after a given date
2019 2024
2020 2025 >>> p1 = parsedate("10:29:59")
2021 2026 >>> p2 = parsedate("10:30:00")
2022 2027 >>> p3 = parsedate("10:30:59")
2023 2028 >>> p4 = parsedate("10:31:00")
2024 2029 >>> p5 = parsedate("Sep 15 10:30:00 1999")
2025 2030 >>> f = matchdate("10:30")
2026 2031 >>> f(p1[0])
2027 2032 False
2028 2033 >>> f(p2[0])
2029 2034 True
2030 2035 >>> f(p3[0])
2031 2036 True
2032 2037 >>> f(p4[0])
2033 2038 False
2034 2039 >>> f(p5[0])
2035 2040 False
2036 2041 """
2037 2042
2038 2043 def lower(date):
2039 2044 d = {'mb': "1", 'd': "1"}
2040 2045 return parsedate(date, extendeddateformats, d)[0]
2041 2046
2042 2047 def upper(date):
2043 2048 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
2044 2049 for days in ("31", "30", "29"):
2045 2050 try:
2046 2051 d["d"] = days
2047 2052 return parsedate(date, extendeddateformats, d)[0]
2048 2053 except Abort:
2049 2054 pass
2050 2055 d["d"] = "28"
2051 2056 return parsedate(date, extendeddateformats, d)[0]
2052 2057
2053 2058 date = date.strip()
2054 2059
2055 2060 if not date:
2056 2061 raise Abort(_("dates cannot consist entirely of whitespace"))
2057 2062 elif date[0] == "<":
2058 2063 if not date[1:]:
2059 2064 raise Abort(_("invalid day spec, use '<DATE'"))
2060 2065 when = upper(date[1:])
2061 2066 return lambda x: x <= when
2062 2067 elif date[0] == ">":
2063 2068 if not date[1:]:
2064 2069 raise Abort(_("invalid day spec, use '>DATE'"))
2065 2070 when = lower(date[1:])
2066 2071 return lambda x: x >= when
2067 2072 elif date[0] == "-":
2068 2073 try:
2069 2074 days = int(date[1:])
2070 2075 except ValueError:
2071 2076 raise Abort(_("invalid day spec: %s") % date[1:])
2072 2077 if days < 0:
2073 2078 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
2074 2079 % date[1:])
2075 2080 when = makedate()[0] - days * 3600 * 24
2076 2081 return lambda x: x >= when
2077 2082 elif " to " in date:
2078 2083 a, b = date.split(" to ")
2079 2084 start, stop = lower(a), upper(b)
2080 2085 return lambda x: x >= start and x <= stop
2081 2086 else:
2082 2087 start, stop = lower(date), upper(date)
2083 2088 return lambda x: x >= start and x <= stop
2084 2089
2085 2090 def stringmatcher(pattern, casesensitive=True):
2086 2091 """
2087 2092 accepts a string, possibly starting with 're:' or 'literal:' prefix.
2088 2093 returns the matcher name, pattern, and matcher function.
2089 2094 missing or unknown prefixes are treated as literal matches.
2090 2095
2091 2096 helper for tests:
2092 2097 >>> def test(pattern, *tests):
2093 2098 ... kind, pattern, matcher = stringmatcher(pattern)
2094 2099 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2095 2100 >>> def itest(pattern, *tests):
2096 2101 ... kind, pattern, matcher = stringmatcher(pattern, casesensitive=False)
2097 2102 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2098 2103
2099 2104 exact matching (no prefix):
2100 2105 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
2101 2106 ('literal', 'abcdefg', [False, False, True])
2102 2107
2103 2108 regex matching ('re:' prefix)
2104 2109 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
2105 2110 ('re', 'a.+b', [False, False, True])
2106 2111
2107 2112 force exact matches ('literal:' prefix)
2108 2113 >>> test('literal:re:foobar', 'foobar', 're:foobar')
2109 2114 ('literal', 're:foobar', [False, True])
2110 2115
2111 2116 unknown prefixes are ignored and treated as literals
2112 2117 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
2113 2118 ('literal', 'foo:bar', [False, False, True])
2114 2119
2115 2120 case insensitive regex matches
2116 2121 >>> itest('re:A.+b', 'nomatch', 'fooadef', 'fooadefBar')
2117 2122 ('re', 'A.+b', [False, False, True])
2118 2123
2119 2124 case insensitive literal matches
2120 2125 >>> itest('ABCDEFG', 'abc', 'def', 'abcdefg')
2121 2126 ('literal', 'ABCDEFG', [False, False, True])
2122 2127 """
2123 2128 if pattern.startswith('re:'):
2124 2129 pattern = pattern[3:]
2125 2130 try:
2126 2131 flags = 0
2127 2132 if not casesensitive:
2128 2133 flags = remod.I
2129 2134 regex = remod.compile(pattern, flags)
2130 2135 except remod.error as e:
2131 2136 raise error.ParseError(_('invalid regular expression: %s')
2132 2137 % e)
2133 2138 return 're', pattern, regex.search
2134 2139 elif pattern.startswith('literal:'):
2135 2140 pattern = pattern[8:]
2136 2141
2137 2142 match = pattern.__eq__
2138 2143
2139 2144 if not casesensitive:
2140 2145 ipat = encoding.lower(pattern)
2141 2146 match = lambda s: ipat == encoding.lower(s)
2142 2147 return 'literal', pattern, match
2143 2148
2144 2149 def shortuser(user):
2145 2150 """Return a short representation of a user name or email address."""
2146 2151 f = user.find('@')
2147 2152 if f >= 0:
2148 2153 user = user[:f]
2149 2154 f = user.find('<')
2150 2155 if f >= 0:
2151 2156 user = user[f + 1:]
2152 2157 f = user.find(' ')
2153 2158 if f >= 0:
2154 2159 user = user[:f]
2155 2160 f = user.find('.')
2156 2161 if f >= 0:
2157 2162 user = user[:f]
2158 2163 return user
2159 2164
2160 2165 def emailuser(user):
2161 2166 """Return the user portion of an email address."""
2162 2167 f = user.find('@')
2163 2168 if f >= 0:
2164 2169 user = user[:f]
2165 2170 f = user.find('<')
2166 2171 if f >= 0:
2167 2172 user = user[f + 1:]
2168 2173 return user
2169 2174
2170 2175 def email(author):
2171 2176 '''get email of author.'''
2172 2177 r = author.find('>')
2173 2178 if r == -1:
2174 2179 r = None
2175 2180 return author[author.find('<') + 1:r]
2176 2181
2177 2182 def ellipsis(text, maxlength=400):
2178 2183 """Trim string to at most maxlength (default: 400) columns in display."""
2179 2184 return encoding.trim(text, maxlength, ellipsis='...')
2180 2185
2181 2186 def unitcountfn(*unittable):
2182 2187 '''return a function that renders a readable count of some quantity'''
2183 2188
2184 2189 def go(count):
2185 2190 for multiplier, divisor, format in unittable:
2186 2191 if abs(count) >= divisor * multiplier:
2187 2192 return format % (count / float(divisor))
2188 2193 return unittable[-1][2] % count
2189 2194
2190 2195 return go
2191 2196
2192 2197 def processlinerange(fromline, toline):
2193 2198 """Check that linerange <fromline>:<toline> makes sense and return a
2194 2199 0-based range.
2195 2200
2196 2201 >>> processlinerange(10, 20)
2197 2202 (9, 20)
2198 2203 >>> processlinerange(2, 1)
2199 2204 Traceback (most recent call last):
2200 2205 ...
2201 2206 ParseError: line range must be positive
2202 2207 >>> processlinerange(0, 5)
2203 2208 Traceback (most recent call last):
2204 2209 ...
2205 2210 ParseError: fromline must be strictly positive
2206 2211 """
2207 2212 if toline - fromline < 0:
2208 2213 raise error.ParseError(_("line range must be positive"))
2209 2214 if fromline < 1:
2210 2215 raise error.ParseError(_("fromline must be strictly positive"))
2211 2216 return fromline - 1, toline
2212 2217
2213 2218 bytecount = unitcountfn(
2214 2219 (100, 1 << 30, _('%.0f GB')),
2215 2220 (10, 1 << 30, _('%.1f GB')),
2216 2221 (1, 1 << 30, _('%.2f GB')),
2217 2222 (100, 1 << 20, _('%.0f MB')),
2218 2223 (10, 1 << 20, _('%.1f MB')),
2219 2224 (1, 1 << 20, _('%.2f MB')),
2220 2225 (100, 1 << 10, _('%.0f KB')),
2221 2226 (10, 1 << 10, _('%.1f KB')),
2222 2227 (1, 1 << 10, _('%.2f KB')),
2223 2228 (1, 1, _('%.0f bytes')),
2224 2229 )
2225 2230
2226 2231 # Matches a single EOL which can either be a CRLF where repeated CR
2227 2232 # are removed or a LF. We do not care about old Macintosh files, so a
2228 2233 # stray CR is an error.
2229 2234 _eolre = remod.compile(br'\r*\n')
2230 2235
2231 2236 def tolf(s):
2232 2237 return _eolre.sub('\n', s)
2233 2238
2234 2239 def tocrlf(s):
2235 2240 return _eolre.sub('\r\n', s)
2236 2241
2237 2242 if pycompat.oslinesep == '\r\n':
2238 2243 tonativeeol = tocrlf
2239 2244 fromnativeeol = tolf
2240 2245 else:
2241 2246 tonativeeol = pycompat.identity
2242 2247 fromnativeeol = pycompat.identity
2243 2248
2244 2249 def escapestr(s):
2245 2250 # call underlying function of s.encode('string_escape') directly for
2246 2251 # Python 3 compatibility
2247 2252 return codecs.escape_encode(s)[0]
2248 2253
2249 2254 def unescapestr(s):
2250 2255 return codecs.escape_decode(s)[0]
2251 2256
2252 2257 def uirepr(s):
2253 2258 # Avoid double backslash in Windows path repr()
2254 2259 return repr(s).replace('\\\\', '\\')
2255 2260
2256 2261 # delay import of textwrap
2257 2262 def MBTextWrapper(**kwargs):
2258 2263 class tw(textwrap.TextWrapper):
2259 2264 """
2260 2265 Extend TextWrapper for width-awareness.
2261 2266
2262 2267 Neither number of 'bytes' in any encoding nor 'characters' is
2263 2268 appropriate to calculate terminal columns for specified string.
2264 2269
2265 2270 Original TextWrapper implementation uses built-in 'len()' directly,
2266 2271 so overriding is needed to use width information of each characters.
2267 2272
2268 2273 In addition, characters classified into 'ambiguous' width are
2269 2274 treated as wide in East Asian area, but as narrow in other.
2270 2275
2271 2276 This requires use decision to determine width of such characters.
2272 2277 """
2273 2278 def _cutdown(self, ucstr, space_left):
2274 2279 l = 0
2275 2280 colwidth = encoding.ucolwidth
2276 2281 for i in xrange(len(ucstr)):
2277 2282 l += colwidth(ucstr[i])
2278 2283 if space_left < l:
2279 2284 return (ucstr[:i], ucstr[i:])
2280 2285 return ucstr, ''
2281 2286
2282 2287 # overriding of base class
2283 2288 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2284 2289 space_left = max(width - cur_len, 1)
2285 2290
2286 2291 if self.break_long_words:
2287 2292 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2288 2293 cur_line.append(cut)
2289 2294 reversed_chunks[-1] = res
2290 2295 elif not cur_line:
2291 2296 cur_line.append(reversed_chunks.pop())
2292 2297
2293 2298 # this overriding code is imported from TextWrapper of Python 2.6
2294 2299 # to calculate columns of string by 'encoding.ucolwidth()'
2295 2300 def _wrap_chunks(self, chunks):
2296 2301 colwidth = encoding.ucolwidth
2297 2302
2298 2303 lines = []
2299 2304 if self.width <= 0:
2300 2305 raise ValueError("invalid width %r (must be > 0)" % self.width)
2301 2306
2302 2307 # Arrange in reverse order so items can be efficiently popped
2303 2308 # from a stack of chucks.
2304 2309 chunks.reverse()
2305 2310
2306 2311 while chunks:
2307 2312
2308 2313 # Start the list of chunks that will make up the current line.
2309 2314 # cur_len is just the length of all the chunks in cur_line.
2310 2315 cur_line = []
2311 2316 cur_len = 0
2312 2317
2313 2318 # Figure out which static string will prefix this line.
2314 2319 if lines:
2315 2320 indent = self.subsequent_indent
2316 2321 else:
2317 2322 indent = self.initial_indent
2318 2323
2319 2324 # Maximum width for this line.
2320 2325 width = self.width - len(indent)
2321 2326
2322 2327 # First chunk on line is whitespace -- drop it, unless this
2323 2328 # is the very beginning of the text (i.e. no lines started yet).
2324 2329 if self.drop_whitespace and chunks[-1].strip() == r'' and lines:
2325 2330 del chunks[-1]
2326 2331
2327 2332 while chunks:
2328 2333 l = colwidth(chunks[-1])
2329 2334
2330 2335 # Can at least squeeze this chunk onto the current line.
2331 2336 if cur_len + l <= width:
2332 2337 cur_line.append(chunks.pop())
2333 2338 cur_len += l
2334 2339
2335 2340 # Nope, this line is full.
2336 2341 else:
2337 2342 break
2338 2343
2339 2344 # The current line is full, and the next chunk is too big to
2340 2345 # fit on *any* line (not just this one).
2341 2346 if chunks and colwidth(chunks[-1]) > width:
2342 2347 self._handle_long_word(chunks, cur_line, cur_len, width)
2343 2348
2344 2349 # If the last chunk on this line is all whitespace, drop it.
2345 2350 if (self.drop_whitespace and
2346 2351 cur_line and cur_line[-1].strip() == r''):
2347 2352 del cur_line[-1]
2348 2353
2349 2354 # Convert current line back to a string and store it in list
2350 2355 # of all lines (return value).
2351 2356 if cur_line:
2352 2357 lines.append(indent + r''.join(cur_line))
2353 2358
2354 2359 return lines
2355 2360
2356 2361 global MBTextWrapper
2357 2362 MBTextWrapper = tw
2358 2363 return tw(**kwargs)
2359 2364
2360 2365 def wrap(line, width, initindent='', hangindent=''):
2361 2366 maxindent = max(len(hangindent), len(initindent))
2362 2367 if width <= maxindent:
2363 2368 # adjust for weird terminal size
2364 2369 width = max(78, maxindent + 1)
2365 2370 line = line.decode(pycompat.sysstr(encoding.encoding),
2366 2371 pycompat.sysstr(encoding.encodingmode))
2367 2372 initindent = initindent.decode(pycompat.sysstr(encoding.encoding),
2368 2373 pycompat.sysstr(encoding.encodingmode))
2369 2374 hangindent = hangindent.decode(pycompat.sysstr(encoding.encoding),
2370 2375 pycompat.sysstr(encoding.encodingmode))
2371 2376 wrapper = MBTextWrapper(width=width,
2372 2377 initial_indent=initindent,
2373 2378 subsequent_indent=hangindent)
2374 2379 return wrapper.fill(line).encode(pycompat.sysstr(encoding.encoding))
2375 2380
2376 2381 if (pyplatform.python_implementation() == 'CPython' and
2377 2382 sys.version_info < (3, 0)):
2378 2383 # There is an issue in CPython that some IO methods do not handle EINTR
2379 2384 # correctly. The following table shows what CPython version (and functions)
2380 2385 # are affected (buggy: has the EINTR bug, okay: otherwise):
2381 2386 #
2382 2387 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2383 2388 # --------------------------------------------------
2384 2389 # fp.__iter__ | buggy | buggy | okay
2385 2390 # fp.read* | buggy | okay [1] | okay
2386 2391 #
2387 2392 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2388 2393 #
2389 2394 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2390 2395 # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
2391 2396 #
2392 2397 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2393 2398 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2394 2399 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2395 2400 # fp.__iter__ but not other fp.read* methods.
2396 2401 #
2397 2402 # On modern systems like Linux, the "read" syscall cannot be interrupted
2398 2403 # when reading "fast" files like on-disk files. So the EINTR issue only
2399 2404 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2400 2405 # files approximately as "fast" files and use the fast (unsafe) code path,
2401 2406 # to minimize the performance impact.
2402 2407 if sys.version_info >= (2, 7, 4):
2403 2408 # fp.readline deals with EINTR correctly, use it as a workaround.
2404 2409 def _safeiterfile(fp):
2405 2410 return iter(fp.readline, '')
2406 2411 else:
2407 2412 # fp.read* are broken too, manually deal with EINTR in a stupid way.
2408 2413 # note: this may block longer than necessary because of bufsize.
2409 2414 def _safeiterfile(fp, bufsize=4096):
2410 2415 fd = fp.fileno()
2411 2416 line = ''
2412 2417 while True:
2413 2418 try:
2414 2419 buf = os.read(fd, bufsize)
2415 2420 except OSError as ex:
2416 2421 # os.read only raises EINTR before any data is read
2417 2422 if ex.errno == errno.EINTR:
2418 2423 continue
2419 2424 else:
2420 2425 raise
2421 2426 line += buf
2422 2427 if '\n' in buf:
2423 2428 splitted = line.splitlines(True)
2424 2429 line = ''
2425 2430 for l in splitted:
2426 2431 if l[-1] == '\n':
2427 2432 yield l
2428 2433 else:
2429 2434 line = l
2430 2435 if not buf:
2431 2436 break
2432 2437 if line:
2433 2438 yield line
2434 2439
2435 2440 def iterfile(fp):
2436 2441 fastpath = True
2437 2442 if type(fp) is file:
2438 2443 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2439 2444 if fastpath:
2440 2445 return fp
2441 2446 else:
2442 2447 return _safeiterfile(fp)
2443 2448 else:
2444 2449 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2445 2450 def iterfile(fp):
2446 2451 return fp
2447 2452
2448 2453 def iterlines(iterator):
2449 2454 for chunk in iterator:
2450 2455 for line in chunk.splitlines():
2451 2456 yield line
2452 2457
2453 2458 def expandpath(path):
2454 2459 return os.path.expanduser(os.path.expandvars(path))
2455 2460
2456 2461 def hgcmd():
2457 2462 """Return the command used to execute current hg
2458 2463
2459 2464 This is different from hgexecutable() because on Windows we want
2460 2465 to avoid things opening new shell windows like batch files, so we
2461 2466 get either the python call or current executable.
2462 2467 """
2463 2468 if mainfrozen():
2464 2469 if getattr(sys, 'frozen', None) == 'macosx_app':
2465 2470 # Env variable set by py2app
2466 2471 return [encoding.environ['EXECUTABLEPATH']]
2467 2472 else:
2468 2473 return [pycompat.sysexecutable]
2469 2474 return gethgcmd()
2470 2475
2471 2476 def rundetached(args, condfn):
2472 2477 """Execute the argument list in a detached process.
2473 2478
2474 2479 condfn is a callable which is called repeatedly and should return
2475 2480 True once the child process is known to have started successfully.
2476 2481 At this point, the child process PID is returned. If the child
2477 2482 process fails to start or finishes before condfn() evaluates to
2478 2483 True, return -1.
2479 2484 """
2480 2485 # Windows case is easier because the child process is either
2481 2486 # successfully starting and validating the condition or exiting
2482 2487 # on failure. We just poll on its PID. On Unix, if the child
2483 2488 # process fails to start, it will be left in a zombie state until
2484 2489 # the parent wait on it, which we cannot do since we expect a long
2485 2490 # running process on success. Instead we listen for SIGCHLD telling
2486 2491 # us our child process terminated.
2487 2492 terminated = set()
2488 2493 def handler(signum, frame):
2489 2494 terminated.add(os.wait())
2490 2495 prevhandler = None
2491 2496 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2492 2497 if SIGCHLD is not None:
2493 2498 prevhandler = signal.signal(SIGCHLD, handler)
2494 2499 try:
2495 2500 pid = spawndetached(args)
2496 2501 while not condfn():
2497 2502 if ((pid in terminated or not testpid(pid))
2498 2503 and not condfn()):
2499 2504 return -1
2500 2505 time.sleep(0.1)
2501 2506 return pid
2502 2507 finally:
2503 2508 if prevhandler is not None:
2504 2509 signal.signal(signal.SIGCHLD, prevhandler)
2505 2510
2506 2511 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2507 2512 """Return the result of interpolating items in the mapping into string s.
2508 2513
2509 2514 prefix is a single character string, or a two character string with
2510 2515 a backslash as the first character if the prefix needs to be escaped in
2511 2516 a regular expression.
2512 2517
2513 2518 fn is an optional function that will be applied to the replacement text
2514 2519 just before replacement.
2515 2520
2516 2521 escape_prefix is an optional flag that allows using doubled prefix for
2517 2522 its escaping.
2518 2523 """
2519 2524 fn = fn or (lambda s: s)
2520 2525 patterns = '|'.join(mapping.keys())
2521 2526 if escape_prefix:
2522 2527 patterns += '|' + prefix
2523 2528 if len(prefix) > 1:
2524 2529 prefix_char = prefix[1:]
2525 2530 else:
2526 2531 prefix_char = prefix
2527 2532 mapping[prefix_char] = prefix_char
2528 2533 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2529 2534 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2530 2535
2531 2536 def getport(port):
2532 2537 """Return the port for a given network service.
2533 2538
2534 2539 If port is an integer, it's returned as is. If it's a string, it's
2535 2540 looked up using socket.getservbyname(). If there's no matching
2536 2541 service, error.Abort is raised.
2537 2542 """
2538 2543 try:
2539 2544 return int(port)
2540 2545 except ValueError:
2541 2546 pass
2542 2547
2543 2548 try:
2544 2549 return socket.getservbyname(port)
2545 2550 except socket.error:
2546 2551 raise Abort(_("no port number associated with service '%s'") % port)
2547 2552
2548 2553 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2549 2554 '0': False, 'no': False, 'false': False, 'off': False,
2550 2555 'never': False}
2551 2556
2552 2557 def parsebool(s):
2553 2558 """Parse s into a boolean.
2554 2559
2555 2560 If s is not a valid boolean, returns None.
2556 2561 """
2557 2562 return _booleans.get(s.lower(), None)
2558 2563
2559 2564 _hextochr = dict((a + b, chr(int(a + b, 16)))
2560 2565 for a in string.hexdigits for b in string.hexdigits)
2561 2566
2562 2567 class url(object):
2563 2568 r"""Reliable URL parser.
2564 2569
2565 2570 This parses URLs and provides attributes for the following
2566 2571 components:
2567 2572
2568 2573 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2569 2574
2570 2575 Missing components are set to None. The only exception is
2571 2576 fragment, which is set to '' if present but empty.
2572 2577
2573 2578 If parsefragment is False, fragment is included in query. If
2574 2579 parsequery is False, query is included in path. If both are
2575 2580 False, both fragment and query are included in path.
2576 2581
2577 2582 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2578 2583
2579 2584 Note that for backward compatibility reasons, bundle URLs do not
2580 2585 take host names. That means 'bundle://../' has a path of '../'.
2581 2586
2582 2587 Examples:
2583 2588
2584 2589 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2585 2590 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2586 2591 >>> url('ssh://[::1]:2200//home/joe/repo')
2587 2592 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2588 2593 >>> url('file:///home/joe/repo')
2589 2594 <url scheme: 'file', path: '/home/joe/repo'>
2590 2595 >>> url('file:///c:/temp/foo/')
2591 2596 <url scheme: 'file', path: 'c:/temp/foo/'>
2592 2597 >>> url('bundle:foo')
2593 2598 <url scheme: 'bundle', path: 'foo'>
2594 2599 >>> url('bundle://../foo')
2595 2600 <url scheme: 'bundle', path: '../foo'>
2596 2601 >>> url(r'c:\foo\bar')
2597 2602 <url path: 'c:\\foo\\bar'>
2598 2603 >>> url(r'\\blah\blah\blah')
2599 2604 <url path: '\\\\blah\\blah\\blah'>
2600 2605 >>> url(r'\\blah\blah\blah#baz')
2601 2606 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2602 2607 >>> url(r'file:///C:\users\me')
2603 2608 <url scheme: 'file', path: 'C:\\users\\me'>
2604 2609
2605 2610 Authentication credentials:
2606 2611
2607 2612 >>> url('ssh://joe:xyz@x/repo')
2608 2613 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2609 2614 >>> url('ssh://joe@x/repo')
2610 2615 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2611 2616
2612 2617 Query strings and fragments:
2613 2618
2614 2619 >>> url('http://host/a?b#c')
2615 2620 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2616 2621 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2617 2622 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2618 2623
2619 2624 Empty path:
2620 2625
2621 2626 >>> url('')
2622 2627 <url path: ''>
2623 2628 >>> url('#a')
2624 2629 <url path: '', fragment: 'a'>
2625 2630 >>> url('http://host/')
2626 2631 <url scheme: 'http', host: 'host', path: ''>
2627 2632 >>> url('http://host/#a')
2628 2633 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2629 2634
2630 2635 Only scheme:
2631 2636
2632 2637 >>> url('http:')
2633 2638 <url scheme: 'http'>
2634 2639 """
2635 2640
2636 2641 _safechars = "!~*'()+"
2637 2642 _safepchars = "/!~*'()+:\\"
2638 2643 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2639 2644
2640 2645 def __init__(self, path, parsequery=True, parsefragment=True):
2641 2646 # We slowly chomp away at path until we have only the path left
2642 2647 self.scheme = self.user = self.passwd = self.host = None
2643 2648 self.port = self.path = self.query = self.fragment = None
2644 2649 self._localpath = True
2645 2650 self._hostport = ''
2646 2651 self._origpath = path
2647 2652
2648 2653 if parsefragment and '#' in path:
2649 2654 path, self.fragment = path.split('#', 1)
2650 2655
2651 2656 # special case for Windows drive letters and UNC paths
2652 2657 if hasdriveletter(path) or path.startswith('\\\\'):
2653 2658 self.path = path
2654 2659 return
2655 2660
2656 2661 # For compatibility reasons, we can't handle bundle paths as
2657 2662 # normal URLS
2658 2663 if path.startswith('bundle:'):
2659 2664 self.scheme = 'bundle'
2660 2665 path = path[7:]
2661 2666 if path.startswith('//'):
2662 2667 path = path[2:]
2663 2668 self.path = path
2664 2669 return
2665 2670
2666 2671 if self._matchscheme(path):
2667 2672 parts = path.split(':', 1)
2668 2673 if parts[0]:
2669 2674 self.scheme, path = parts
2670 2675 self._localpath = False
2671 2676
2672 2677 if not path:
2673 2678 path = None
2674 2679 if self._localpath:
2675 2680 self.path = ''
2676 2681 return
2677 2682 else:
2678 2683 if self._localpath:
2679 2684 self.path = path
2680 2685 return
2681 2686
2682 2687 if parsequery and '?' in path:
2683 2688 path, self.query = path.split('?', 1)
2684 2689 if not path:
2685 2690 path = None
2686 2691 if not self.query:
2687 2692 self.query = None
2688 2693
2689 2694 # // is required to specify a host/authority
2690 2695 if path and path.startswith('//'):
2691 2696 parts = path[2:].split('/', 1)
2692 2697 if len(parts) > 1:
2693 2698 self.host, path = parts
2694 2699 else:
2695 2700 self.host = parts[0]
2696 2701 path = None
2697 2702 if not self.host:
2698 2703 self.host = None
2699 2704 # path of file:///d is /d
2700 2705 # path of file:///d:/ is d:/, not /d:/
2701 2706 if path and not hasdriveletter(path):
2702 2707 path = '/' + path
2703 2708
2704 2709 if self.host and '@' in self.host:
2705 2710 self.user, self.host = self.host.rsplit('@', 1)
2706 2711 if ':' in self.user:
2707 2712 self.user, self.passwd = self.user.split(':', 1)
2708 2713 if not self.host:
2709 2714 self.host = None
2710 2715
2711 2716 # Don't split on colons in IPv6 addresses without ports
2712 2717 if (self.host and ':' in self.host and
2713 2718 not (self.host.startswith('[') and self.host.endswith(']'))):
2714 2719 self._hostport = self.host
2715 2720 self.host, self.port = self.host.rsplit(':', 1)
2716 2721 if not self.host:
2717 2722 self.host = None
2718 2723
2719 2724 if (self.host and self.scheme == 'file' and
2720 2725 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2721 2726 raise Abort(_('file:// URLs can only refer to localhost'))
2722 2727
2723 2728 self.path = path
2724 2729
2725 2730 # leave the query string escaped
2726 2731 for a in ('user', 'passwd', 'host', 'port',
2727 2732 'path', 'fragment'):
2728 2733 v = getattr(self, a)
2729 2734 if v is not None:
2730 2735 setattr(self, a, urlreq.unquote(v))
2731 2736
2732 2737 def __repr__(self):
2733 2738 attrs = []
2734 2739 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2735 2740 'query', 'fragment'):
2736 2741 v = getattr(self, a)
2737 2742 if v is not None:
2738 2743 attrs.append('%s: %r' % (a, v))
2739 2744 return '<url %s>' % ', '.join(attrs)
2740 2745
2741 2746 def __str__(self):
2742 2747 r"""Join the URL's components back into a URL string.
2743 2748
2744 2749 Examples:
2745 2750
2746 2751 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2747 2752 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2748 2753 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2749 2754 'http://user:pw@host:80/?foo=bar&baz=42'
2750 2755 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2751 2756 'http://user:pw@host:80/?foo=bar%3dbaz'
2752 2757 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2753 2758 'ssh://user:pw@[::1]:2200//home/joe#'
2754 2759 >>> str(url('http://localhost:80//'))
2755 2760 'http://localhost:80//'
2756 2761 >>> str(url('http://localhost:80/'))
2757 2762 'http://localhost:80/'
2758 2763 >>> str(url('http://localhost:80'))
2759 2764 'http://localhost:80/'
2760 2765 >>> str(url('bundle:foo'))
2761 2766 'bundle:foo'
2762 2767 >>> str(url('bundle://../foo'))
2763 2768 'bundle:../foo'
2764 2769 >>> str(url('path'))
2765 2770 'path'
2766 2771 >>> str(url('file:///tmp/foo/bar'))
2767 2772 'file:///tmp/foo/bar'
2768 2773 >>> str(url('file:///c:/tmp/foo/bar'))
2769 2774 'file:///c:/tmp/foo/bar'
2770 2775 >>> print url(r'bundle:foo\bar')
2771 2776 bundle:foo\bar
2772 2777 >>> print url(r'file:///D:\data\hg')
2773 2778 file:///D:\data\hg
2774 2779 """
2775 2780 return encoding.strfromlocal(self.__bytes__())
2776 2781
2777 2782 def __bytes__(self):
2778 2783 if self._localpath:
2779 2784 s = self.path
2780 2785 if self.scheme == 'bundle':
2781 2786 s = 'bundle:' + s
2782 2787 if self.fragment:
2783 2788 s += '#' + self.fragment
2784 2789 return s
2785 2790
2786 2791 s = self.scheme + ':'
2787 2792 if self.user or self.passwd or self.host:
2788 2793 s += '//'
2789 2794 elif self.scheme and (not self.path or self.path.startswith('/')
2790 2795 or hasdriveletter(self.path)):
2791 2796 s += '//'
2792 2797 if hasdriveletter(self.path):
2793 2798 s += '/'
2794 2799 if self.user:
2795 2800 s += urlreq.quote(self.user, safe=self._safechars)
2796 2801 if self.passwd:
2797 2802 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2798 2803 if self.user or self.passwd:
2799 2804 s += '@'
2800 2805 if self.host:
2801 2806 if not (self.host.startswith('[') and self.host.endswith(']')):
2802 2807 s += urlreq.quote(self.host)
2803 2808 else:
2804 2809 s += self.host
2805 2810 if self.port:
2806 2811 s += ':' + urlreq.quote(self.port)
2807 2812 if self.host:
2808 2813 s += '/'
2809 2814 if self.path:
2810 2815 # TODO: similar to the query string, we should not unescape the
2811 2816 # path when we store it, the path might contain '%2f' = '/',
2812 2817 # which we should *not* escape.
2813 2818 s += urlreq.quote(self.path, safe=self._safepchars)
2814 2819 if self.query:
2815 2820 # we store the query in escaped form.
2816 2821 s += '?' + self.query
2817 2822 if self.fragment is not None:
2818 2823 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2819 2824 return s
2820 2825
2821 2826 def authinfo(self):
2822 2827 user, passwd = self.user, self.passwd
2823 2828 try:
2824 2829 self.user, self.passwd = None, None
2825 2830 s = bytes(self)
2826 2831 finally:
2827 2832 self.user, self.passwd = user, passwd
2828 2833 if not self.user:
2829 2834 return (s, None)
2830 2835 # authinfo[1] is passed to urllib2 password manager, and its
2831 2836 # URIs must not contain credentials. The host is passed in the
2832 2837 # URIs list because Python < 2.4.3 uses only that to search for
2833 2838 # a password.
2834 2839 return (s, (None, (s, self.host),
2835 2840 self.user, self.passwd or ''))
2836 2841
2837 2842 def isabs(self):
2838 2843 if self.scheme and self.scheme != 'file':
2839 2844 return True # remote URL
2840 2845 if hasdriveletter(self.path):
2841 2846 return True # absolute for our purposes - can't be joined()
2842 2847 if self.path.startswith(r'\\'):
2843 2848 return True # Windows UNC path
2844 2849 if self.path.startswith('/'):
2845 2850 return True # POSIX-style
2846 2851 return False
2847 2852
2848 2853 def localpath(self):
2849 2854 if self.scheme == 'file' or self.scheme == 'bundle':
2850 2855 path = self.path or '/'
2851 2856 # For Windows, we need to promote hosts containing drive
2852 2857 # letters to paths with drive letters.
2853 2858 if hasdriveletter(self._hostport):
2854 2859 path = self._hostport + '/' + self.path
2855 2860 elif (self.host is not None and self.path
2856 2861 and not hasdriveletter(path)):
2857 2862 path = '/' + path
2858 2863 return path
2859 2864 return self._origpath
2860 2865
2861 2866 def islocal(self):
2862 2867 '''whether localpath will return something that posixfile can open'''
2863 2868 return (not self.scheme or self.scheme == 'file'
2864 2869 or self.scheme == 'bundle')
2865 2870
2866 2871 def hasscheme(path):
2867 2872 return bool(url(path).scheme)
2868 2873
2869 2874 def hasdriveletter(path):
2870 2875 return path and path[1:2] == ':' and path[0:1].isalpha()
2871 2876
2872 2877 def urllocalpath(path):
2873 2878 return url(path, parsequery=False, parsefragment=False).localpath()
2874 2879
2875 2880 def hidepassword(u):
2876 2881 '''hide user credential in a url string'''
2877 2882 u = url(u)
2878 2883 if u.passwd:
2879 2884 u.passwd = '***'
2880 2885 return bytes(u)
2881 2886
2882 2887 def removeauth(u):
2883 2888 '''remove all authentication information from a url string'''
2884 2889 u = url(u)
2885 2890 u.user = u.passwd = None
2886 2891 return str(u)
2887 2892
2888 2893 timecount = unitcountfn(
2889 2894 (1, 1e3, _('%.0f s')),
2890 2895 (100, 1, _('%.1f s')),
2891 2896 (10, 1, _('%.2f s')),
2892 2897 (1, 1, _('%.3f s')),
2893 2898 (100, 0.001, _('%.1f ms')),
2894 2899 (10, 0.001, _('%.2f ms')),
2895 2900 (1, 0.001, _('%.3f ms')),
2896 2901 (100, 0.000001, _('%.1f us')),
2897 2902 (10, 0.000001, _('%.2f us')),
2898 2903 (1, 0.000001, _('%.3f us')),
2899 2904 (100, 0.000000001, _('%.1f ns')),
2900 2905 (10, 0.000000001, _('%.2f ns')),
2901 2906 (1, 0.000000001, _('%.3f ns')),
2902 2907 )
2903 2908
2904 2909 _timenesting = [0]
2905 2910
2906 2911 def timed(func):
2907 2912 '''Report the execution time of a function call to stderr.
2908 2913
2909 2914 During development, use as a decorator when you need to measure
2910 2915 the cost of a function, e.g. as follows:
2911 2916
2912 2917 @util.timed
2913 2918 def foo(a, b, c):
2914 2919 pass
2915 2920 '''
2916 2921
2917 2922 def wrapper(*args, **kwargs):
2918 2923 start = timer()
2919 2924 indent = 2
2920 2925 _timenesting[0] += indent
2921 2926 try:
2922 2927 return func(*args, **kwargs)
2923 2928 finally:
2924 2929 elapsed = timer() - start
2925 2930 _timenesting[0] -= indent
2926 2931 stderr.write('%s%s: %s\n' %
2927 2932 (' ' * _timenesting[0], func.__name__,
2928 2933 timecount(elapsed)))
2929 2934 return wrapper
2930 2935
2931 2936 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2932 2937 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2933 2938
2934 2939 def sizetoint(s):
2935 2940 '''Convert a space specifier to a byte count.
2936 2941
2937 2942 >>> sizetoint('30')
2938 2943 30
2939 2944 >>> sizetoint('2.2kb')
2940 2945 2252
2941 2946 >>> sizetoint('6M')
2942 2947 6291456
2943 2948 '''
2944 2949 t = s.strip().lower()
2945 2950 try:
2946 2951 for k, u in _sizeunits:
2947 2952 if t.endswith(k):
2948 2953 return int(float(t[:-len(k)]) * u)
2949 2954 return int(t)
2950 2955 except ValueError:
2951 2956 raise error.ParseError(_("couldn't parse size: %s") % s)
2952 2957
2953 2958 class hooks(object):
2954 2959 '''A collection of hook functions that can be used to extend a
2955 2960 function's behavior. Hooks are called in lexicographic order,
2956 2961 based on the names of their sources.'''
2957 2962
2958 2963 def __init__(self):
2959 2964 self._hooks = []
2960 2965
2961 2966 def add(self, source, hook):
2962 2967 self._hooks.append((source, hook))
2963 2968
2964 2969 def __call__(self, *args):
2965 2970 self._hooks.sort(key=lambda x: x[0])
2966 2971 results = []
2967 2972 for source, hook in self._hooks:
2968 2973 results.append(hook(*args))
2969 2974 return results
2970 2975
2971 2976 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s', depth=0):
2972 2977 '''Yields lines for a nicely formatted stacktrace.
2973 2978 Skips the 'skip' last entries, then return the last 'depth' entries.
2974 2979 Each file+linenumber is formatted according to fileline.
2975 2980 Each line is formatted according to line.
2976 2981 If line is None, it yields:
2977 2982 length of longest filepath+line number,
2978 2983 filepath+linenumber,
2979 2984 function
2980 2985
2981 2986 Not be used in production code but very convenient while developing.
2982 2987 '''
2983 2988 entries = [(fileline % (fn, ln), func)
2984 2989 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]
2985 2990 ][-depth:]
2986 2991 if entries:
2987 2992 fnmax = max(len(entry[0]) for entry in entries)
2988 2993 for fnln, func in entries:
2989 2994 if line is None:
2990 2995 yield (fnmax, fnln, func)
2991 2996 else:
2992 2997 yield line % (fnmax, fnln, func)
2993 2998
2994 2999 def debugstacktrace(msg='stacktrace', skip=0,
2995 3000 f=stderr, otherf=stdout, depth=0):
2996 3001 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2997 3002 Skips the 'skip' entries closest to the call, then show 'depth' entries.
2998 3003 By default it will flush stdout first.
2999 3004 It can be used everywhere and intentionally does not require an ui object.
3000 3005 Not be used in production code but very convenient while developing.
3001 3006 '''
3002 3007 if otherf:
3003 3008 otherf.flush()
3004 3009 f.write('%s at:\n' % msg.rstrip())
3005 3010 for line in getstackframes(skip + 1, depth=depth):
3006 3011 f.write(line)
3007 3012 f.flush()
3008 3013
3009 3014 class dirs(object):
3010 3015 '''a multiset of directory names from a dirstate or manifest'''
3011 3016
3012 3017 def __init__(self, map, skip=None):
3013 3018 self._dirs = {}
3014 3019 addpath = self.addpath
3015 3020 if safehasattr(map, 'iteritems') and skip is not None:
3016 3021 for f, s in map.iteritems():
3017 3022 if s[0] != skip:
3018 3023 addpath(f)
3019 3024 else:
3020 3025 for f in map:
3021 3026 addpath(f)
3022 3027
3023 3028 def addpath(self, path):
3024 3029 dirs = self._dirs
3025 3030 for base in finddirs(path):
3026 3031 if base in dirs:
3027 3032 dirs[base] += 1
3028 3033 return
3029 3034 dirs[base] = 1
3030 3035
3031 3036 def delpath(self, path):
3032 3037 dirs = self._dirs
3033 3038 for base in finddirs(path):
3034 3039 if dirs[base] > 1:
3035 3040 dirs[base] -= 1
3036 3041 return
3037 3042 del dirs[base]
3038 3043
3039 3044 def __iter__(self):
3040 3045 return iter(self._dirs)
3041 3046
3042 3047 def __contains__(self, d):
3043 3048 return d in self._dirs
3044 3049
3045 3050 if safehasattr(parsers, 'dirs'):
3046 3051 dirs = parsers.dirs
3047 3052
3048 3053 def finddirs(path):
3049 3054 pos = path.rfind('/')
3050 3055 while pos != -1:
3051 3056 yield path[:pos]
3052 3057 pos = path.rfind('/', 0, pos)
3053 3058
3054 3059 class ctxmanager(object):
3055 3060 '''A context manager for use in 'with' blocks to allow multiple
3056 3061 contexts to be entered at once. This is both safer and more
3057 3062 flexible than contextlib.nested.
3058 3063
3059 3064 Once Mercurial supports Python 2.7+, this will become mostly
3060 3065 unnecessary.
3061 3066 '''
3062 3067
3063 3068 def __init__(self, *args):
3064 3069 '''Accepts a list of no-argument functions that return context
3065 3070 managers. These will be invoked at __call__ time.'''
3066 3071 self._pending = args
3067 3072 self._atexit = []
3068 3073
3069 3074 def __enter__(self):
3070 3075 return self
3071 3076
3072 3077 def enter(self):
3073 3078 '''Create and enter context managers in the order in which they were
3074 3079 passed to the constructor.'''
3075 3080 values = []
3076 3081 for func in self._pending:
3077 3082 obj = func()
3078 3083 values.append(obj.__enter__())
3079 3084 self._atexit.append(obj.__exit__)
3080 3085 del self._pending
3081 3086 return values
3082 3087
3083 3088 def atexit(self, func, *args, **kwargs):
3084 3089 '''Add a function to call when this context manager exits. The
3085 3090 ordering of multiple atexit calls is unspecified, save that
3086 3091 they will happen before any __exit__ functions.'''
3087 3092 def wrapper(exc_type, exc_val, exc_tb):
3088 3093 func(*args, **kwargs)
3089 3094 self._atexit.append(wrapper)
3090 3095 return func
3091 3096
3092 3097 def __exit__(self, exc_type, exc_val, exc_tb):
3093 3098 '''Context managers are exited in the reverse order from which
3094 3099 they were created.'''
3095 3100 received = exc_type is not None
3096 3101 suppressed = False
3097 3102 pending = None
3098 3103 self._atexit.reverse()
3099 3104 for exitfunc in self._atexit:
3100 3105 try:
3101 3106 if exitfunc(exc_type, exc_val, exc_tb):
3102 3107 suppressed = True
3103 3108 exc_type = None
3104 3109 exc_val = None
3105 3110 exc_tb = None
3106 3111 except BaseException:
3107 3112 pending = sys.exc_info()
3108 3113 exc_type, exc_val, exc_tb = pending = sys.exc_info()
3109 3114 del self._atexit
3110 3115 if pending:
3111 3116 raise exc_val
3112 3117 return received and suppressed
3113 3118
3114 3119 # compression code
3115 3120
3116 3121 SERVERROLE = 'server'
3117 3122 CLIENTROLE = 'client'
3118 3123
3119 3124 compewireprotosupport = collections.namedtuple(u'compenginewireprotosupport',
3120 3125 (u'name', u'serverpriority',
3121 3126 u'clientpriority'))
3122 3127
3123 3128 class compressormanager(object):
3124 3129 """Holds registrations of various compression engines.
3125 3130
3126 3131 This class essentially abstracts the differences between compression
3127 3132 engines to allow new compression formats to be added easily, possibly from
3128 3133 extensions.
3129 3134
3130 3135 Compressors are registered against the global instance by calling its
3131 3136 ``register()`` method.
3132 3137 """
3133 3138 def __init__(self):
3134 3139 self._engines = {}
3135 3140 # Bundle spec human name to engine name.
3136 3141 self._bundlenames = {}
3137 3142 # Internal bundle identifier to engine name.
3138 3143 self._bundletypes = {}
3139 3144 # Revlog header to engine name.
3140 3145 self._revlogheaders = {}
3141 3146 # Wire proto identifier to engine name.
3142 3147 self._wiretypes = {}
3143 3148
3144 3149 def __getitem__(self, key):
3145 3150 return self._engines[key]
3146 3151
3147 3152 def __contains__(self, key):
3148 3153 return key in self._engines
3149 3154
3150 3155 def __iter__(self):
3151 3156 return iter(self._engines.keys())
3152 3157
3153 3158 def register(self, engine):
3154 3159 """Register a compression engine with the manager.
3155 3160
3156 3161 The argument must be a ``compressionengine`` instance.
3157 3162 """
3158 3163 if not isinstance(engine, compressionengine):
3159 3164 raise ValueError(_('argument must be a compressionengine'))
3160 3165
3161 3166 name = engine.name()
3162 3167
3163 3168 if name in self._engines:
3164 3169 raise error.Abort(_('compression engine %s already registered') %
3165 3170 name)
3166 3171
3167 3172 bundleinfo = engine.bundletype()
3168 3173 if bundleinfo:
3169 3174 bundlename, bundletype = bundleinfo
3170 3175
3171 3176 if bundlename in self._bundlenames:
3172 3177 raise error.Abort(_('bundle name %s already registered') %
3173 3178 bundlename)
3174 3179 if bundletype in self._bundletypes:
3175 3180 raise error.Abort(_('bundle type %s already registered by %s') %
3176 3181 (bundletype, self._bundletypes[bundletype]))
3177 3182
3178 3183 # No external facing name declared.
3179 3184 if bundlename:
3180 3185 self._bundlenames[bundlename] = name
3181 3186
3182 3187 self._bundletypes[bundletype] = name
3183 3188
3184 3189 wiresupport = engine.wireprotosupport()
3185 3190 if wiresupport:
3186 3191 wiretype = wiresupport.name
3187 3192 if wiretype in self._wiretypes:
3188 3193 raise error.Abort(_('wire protocol compression %s already '
3189 3194 'registered by %s') %
3190 3195 (wiretype, self._wiretypes[wiretype]))
3191 3196
3192 3197 self._wiretypes[wiretype] = name
3193 3198
3194 3199 revlogheader = engine.revlogheader()
3195 3200 if revlogheader and revlogheader in self._revlogheaders:
3196 3201 raise error.Abort(_('revlog header %s already registered by %s') %
3197 3202 (revlogheader, self._revlogheaders[revlogheader]))
3198 3203
3199 3204 if revlogheader:
3200 3205 self._revlogheaders[revlogheader] = name
3201 3206
3202 3207 self._engines[name] = engine
3203 3208
3204 3209 @property
3205 3210 def supportedbundlenames(self):
3206 3211 return set(self._bundlenames.keys())
3207 3212
3208 3213 @property
3209 3214 def supportedbundletypes(self):
3210 3215 return set(self._bundletypes.keys())
3211 3216
3212 3217 def forbundlename(self, bundlename):
3213 3218 """Obtain a compression engine registered to a bundle name.
3214 3219
3215 3220 Will raise KeyError if the bundle type isn't registered.
3216 3221
3217 3222 Will abort if the engine is known but not available.
3218 3223 """
3219 3224 engine = self._engines[self._bundlenames[bundlename]]
3220 3225 if not engine.available():
3221 3226 raise error.Abort(_('compression engine %s could not be loaded') %
3222 3227 engine.name())
3223 3228 return engine
3224 3229
3225 3230 def forbundletype(self, bundletype):
3226 3231 """Obtain a compression engine registered to a bundle type.
3227 3232
3228 3233 Will raise KeyError if the bundle type isn't registered.
3229 3234
3230 3235 Will abort if the engine is known but not available.
3231 3236 """
3232 3237 engine = self._engines[self._bundletypes[bundletype]]
3233 3238 if not engine.available():
3234 3239 raise error.Abort(_('compression engine %s could not be loaded') %
3235 3240 engine.name())
3236 3241 return engine
3237 3242
3238 3243 def supportedwireengines(self, role, onlyavailable=True):
3239 3244 """Obtain compression engines that support the wire protocol.
3240 3245
3241 3246 Returns a list of engines in prioritized order, most desired first.
3242 3247
3243 3248 If ``onlyavailable`` is set, filter out engines that can't be
3244 3249 loaded.
3245 3250 """
3246 3251 assert role in (SERVERROLE, CLIENTROLE)
3247 3252
3248 3253 attr = 'serverpriority' if role == SERVERROLE else 'clientpriority'
3249 3254
3250 3255 engines = [self._engines[e] for e in self._wiretypes.values()]
3251 3256 if onlyavailable:
3252 3257 engines = [e for e in engines if e.available()]
3253 3258
3254 3259 def getkey(e):
3255 3260 # Sort first by priority, highest first. In case of tie, sort
3256 3261 # alphabetically. This is arbitrary, but ensures output is
3257 3262 # stable.
3258 3263 w = e.wireprotosupport()
3259 3264 return -1 * getattr(w, attr), w.name
3260 3265
3261 3266 return list(sorted(engines, key=getkey))
3262 3267
3263 3268 def forwiretype(self, wiretype):
3264 3269 engine = self._engines[self._wiretypes[wiretype]]
3265 3270 if not engine.available():
3266 3271 raise error.Abort(_('compression engine %s could not be loaded') %
3267 3272 engine.name())
3268 3273 return engine
3269 3274
3270 3275 def forrevlogheader(self, header):
3271 3276 """Obtain a compression engine registered to a revlog header.
3272 3277
3273 3278 Will raise KeyError if the revlog header value isn't registered.
3274 3279 """
3275 3280 return self._engines[self._revlogheaders[header]]
3276 3281
3277 3282 compengines = compressormanager()
3278 3283
3279 3284 class compressionengine(object):
3280 3285 """Base class for compression engines.
3281 3286
3282 3287 Compression engines must implement the interface defined by this class.
3283 3288 """
3284 3289 def name(self):
3285 3290 """Returns the name of the compression engine.
3286 3291
3287 3292 This is the key the engine is registered under.
3288 3293
3289 3294 This method must be implemented.
3290 3295 """
3291 3296 raise NotImplementedError()
3292 3297
3293 3298 def available(self):
3294 3299 """Whether the compression engine is available.
3295 3300
3296 3301 The intent of this method is to allow optional compression engines
3297 3302 that may not be available in all installations (such as engines relying
3298 3303 on C extensions that may not be present).
3299 3304 """
3300 3305 return True
3301 3306
3302 3307 def bundletype(self):
3303 3308 """Describes bundle identifiers for this engine.
3304 3309
3305 3310 If this compression engine isn't supported for bundles, returns None.
3306 3311
3307 3312 If this engine can be used for bundles, returns a 2-tuple of strings of
3308 3313 the user-facing "bundle spec" compression name and an internal
3309 3314 identifier used to denote the compression format within bundles. To
3310 3315 exclude the name from external usage, set the first element to ``None``.
3311 3316
3312 3317 If bundle compression is supported, the class must also implement
3313 3318 ``compressstream`` and `decompressorreader``.
3314 3319
3315 3320 The docstring of this method is used in the help system to tell users
3316 3321 about this engine.
3317 3322 """
3318 3323 return None
3319 3324
3320 3325 def wireprotosupport(self):
3321 3326 """Declare support for this compression format on the wire protocol.
3322 3327
3323 3328 If this compression engine isn't supported for compressing wire
3324 3329 protocol payloads, returns None.
3325 3330
3326 3331 Otherwise, returns ``compenginewireprotosupport`` with the following
3327 3332 fields:
3328 3333
3329 3334 * String format identifier
3330 3335 * Integer priority for the server
3331 3336 * Integer priority for the client
3332 3337
3333 3338 The integer priorities are used to order the advertisement of format
3334 3339 support by server and client. The highest integer is advertised
3335 3340 first. Integers with non-positive values aren't advertised.
3336 3341
3337 3342 The priority values are somewhat arbitrary and only used for default
3338 3343 ordering. The relative order can be changed via config options.
3339 3344
3340 3345 If wire protocol compression is supported, the class must also implement
3341 3346 ``compressstream`` and ``decompressorreader``.
3342 3347 """
3343 3348 return None
3344 3349
3345 3350 def revlogheader(self):
3346 3351 """Header added to revlog chunks that identifies this engine.
3347 3352
3348 3353 If this engine can be used to compress revlogs, this method should
3349 3354 return the bytes used to identify chunks compressed with this engine.
3350 3355 Else, the method should return ``None`` to indicate it does not
3351 3356 participate in revlog compression.
3352 3357 """
3353 3358 return None
3354 3359
3355 3360 def compressstream(self, it, opts=None):
3356 3361 """Compress an iterator of chunks.
3357 3362
3358 3363 The method receives an iterator (ideally a generator) of chunks of
3359 3364 bytes to be compressed. It returns an iterator (ideally a generator)
3360 3365 of bytes of chunks representing the compressed output.
3361 3366
3362 3367 Optionally accepts an argument defining how to perform compression.
3363 3368 Each engine treats this argument differently.
3364 3369 """
3365 3370 raise NotImplementedError()
3366 3371
3367 3372 def decompressorreader(self, fh):
3368 3373 """Perform decompression on a file object.
3369 3374
3370 3375 Argument is an object with a ``read(size)`` method that returns
3371 3376 compressed data. Return value is an object with a ``read(size)`` that
3372 3377 returns uncompressed data.
3373 3378 """
3374 3379 raise NotImplementedError()
3375 3380
3376 3381 def revlogcompressor(self, opts=None):
3377 3382 """Obtain an object that can be used to compress revlog entries.
3378 3383
3379 3384 The object has a ``compress(data)`` method that compresses binary
3380 3385 data. This method returns compressed binary data or ``None`` if
3381 3386 the data could not be compressed (too small, not compressible, etc).
3382 3387 The returned data should have a header uniquely identifying this
3383 3388 compression format so decompression can be routed to this engine.
3384 3389 This header should be identified by the ``revlogheader()`` return
3385 3390 value.
3386 3391
3387 3392 The object has a ``decompress(data)`` method that decompresses
3388 3393 data. The method will only be called if ``data`` begins with
3389 3394 ``revlogheader()``. The method should return the raw, uncompressed
3390 3395 data or raise a ``RevlogError``.
3391 3396
3392 3397 The object is reusable but is not thread safe.
3393 3398 """
3394 3399 raise NotImplementedError()
3395 3400
3396 3401 class _zlibengine(compressionengine):
3397 3402 def name(self):
3398 3403 return 'zlib'
3399 3404
3400 3405 def bundletype(self):
3401 3406 """zlib compression using the DEFLATE algorithm.
3402 3407
3403 3408 All Mercurial clients should support this format. The compression
3404 3409 algorithm strikes a reasonable balance between compression ratio
3405 3410 and size.
3406 3411 """
3407 3412 return 'gzip', 'GZ'
3408 3413
3409 3414 def wireprotosupport(self):
3410 3415 return compewireprotosupport('zlib', 20, 20)
3411 3416
3412 3417 def revlogheader(self):
3413 3418 return 'x'
3414 3419
3415 3420 def compressstream(self, it, opts=None):
3416 3421 opts = opts or {}
3417 3422
3418 3423 z = zlib.compressobj(opts.get('level', -1))
3419 3424 for chunk in it:
3420 3425 data = z.compress(chunk)
3421 3426 # Not all calls to compress emit data. It is cheaper to inspect
3422 3427 # here than to feed empty chunks through generator.
3423 3428 if data:
3424 3429 yield data
3425 3430
3426 3431 yield z.flush()
3427 3432
3428 3433 def decompressorreader(self, fh):
3429 3434 def gen():
3430 3435 d = zlib.decompressobj()
3431 3436 for chunk in filechunkiter(fh):
3432 3437 while chunk:
3433 3438 # Limit output size to limit memory.
3434 3439 yield d.decompress(chunk, 2 ** 18)
3435 3440 chunk = d.unconsumed_tail
3436 3441
3437 3442 return chunkbuffer(gen())
3438 3443
3439 3444 class zlibrevlogcompressor(object):
3440 3445 def compress(self, data):
3441 3446 insize = len(data)
3442 3447 # Caller handles empty input case.
3443 3448 assert insize > 0
3444 3449
3445 3450 if insize < 44:
3446 3451 return None
3447 3452
3448 3453 elif insize <= 1000000:
3449 3454 compressed = zlib.compress(data)
3450 3455 if len(compressed) < insize:
3451 3456 return compressed
3452 3457 return None
3453 3458
3454 3459 # zlib makes an internal copy of the input buffer, doubling
3455 3460 # memory usage for large inputs. So do streaming compression
3456 3461 # on large inputs.
3457 3462 else:
3458 3463 z = zlib.compressobj()
3459 3464 parts = []
3460 3465 pos = 0
3461 3466 while pos < insize:
3462 3467 pos2 = pos + 2**20
3463 3468 parts.append(z.compress(data[pos:pos2]))
3464 3469 pos = pos2
3465 3470 parts.append(z.flush())
3466 3471
3467 3472 if sum(map(len, parts)) < insize:
3468 3473 return ''.join(parts)
3469 3474 return None
3470 3475
3471 3476 def decompress(self, data):
3472 3477 try:
3473 3478 return zlib.decompress(data)
3474 3479 except zlib.error as e:
3475 3480 raise error.RevlogError(_('revlog decompress error: %s') %
3476 3481 str(e))
3477 3482
3478 3483 def revlogcompressor(self, opts=None):
3479 3484 return self.zlibrevlogcompressor()
3480 3485
3481 3486 compengines.register(_zlibengine())
3482 3487
3483 3488 class _bz2engine(compressionengine):
3484 3489 def name(self):
3485 3490 return 'bz2'
3486 3491
3487 3492 def bundletype(self):
3488 3493 """An algorithm that produces smaller bundles than ``gzip``.
3489 3494
3490 3495 All Mercurial clients should support this format.
3491 3496
3492 3497 This engine will likely produce smaller bundles than ``gzip`` but
3493 3498 will be significantly slower, both during compression and
3494 3499 decompression.
3495 3500
3496 3501 If available, the ``zstd`` engine can yield similar or better
3497 3502 compression at much higher speeds.
3498 3503 """
3499 3504 return 'bzip2', 'BZ'
3500 3505
3501 3506 # We declare a protocol name but don't advertise by default because
3502 3507 # it is slow.
3503 3508 def wireprotosupport(self):
3504 3509 return compewireprotosupport('bzip2', 0, 0)
3505 3510
3506 3511 def compressstream(self, it, opts=None):
3507 3512 opts = opts or {}
3508 3513 z = bz2.BZ2Compressor(opts.get('level', 9))
3509 3514 for chunk in it:
3510 3515 data = z.compress(chunk)
3511 3516 if data:
3512 3517 yield data
3513 3518
3514 3519 yield z.flush()
3515 3520
3516 3521 def decompressorreader(self, fh):
3517 3522 def gen():
3518 3523 d = bz2.BZ2Decompressor()
3519 3524 for chunk in filechunkiter(fh):
3520 3525 yield d.decompress(chunk)
3521 3526
3522 3527 return chunkbuffer(gen())
3523 3528
3524 3529 compengines.register(_bz2engine())
3525 3530
3526 3531 class _truncatedbz2engine(compressionengine):
3527 3532 def name(self):
3528 3533 return 'bz2truncated'
3529 3534
3530 3535 def bundletype(self):
3531 3536 return None, '_truncatedBZ'
3532 3537
3533 3538 # We don't implement compressstream because it is hackily handled elsewhere.
3534 3539
3535 3540 def decompressorreader(self, fh):
3536 3541 def gen():
3537 3542 # The input stream doesn't have the 'BZ' header. So add it back.
3538 3543 d = bz2.BZ2Decompressor()
3539 3544 d.decompress('BZ')
3540 3545 for chunk in filechunkiter(fh):
3541 3546 yield d.decompress(chunk)
3542 3547
3543 3548 return chunkbuffer(gen())
3544 3549
3545 3550 compengines.register(_truncatedbz2engine())
3546 3551
3547 3552 class _noopengine(compressionengine):
3548 3553 def name(self):
3549 3554 return 'none'
3550 3555
3551 3556 def bundletype(self):
3552 3557 """No compression is performed.
3553 3558
3554 3559 Use this compression engine to explicitly disable compression.
3555 3560 """
3556 3561 return 'none', 'UN'
3557 3562
3558 3563 # Clients always support uncompressed payloads. Servers don't because
3559 3564 # unless you are on a fast network, uncompressed payloads can easily
3560 3565 # saturate your network pipe.
3561 3566 def wireprotosupport(self):
3562 3567 return compewireprotosupport('none', 0, 10)
3563 3568
3564 3569 # We don't implement revlogheader because it is handled specially
3565 3570 # in the revlog class.
3566 3571
3567 3572 def compressstream(self, it, opts=None):
3568 3573 return it
3569 3574
3570 3575 def decompressorreader(self, fh):
3571 3576 return fh
3572 3577
3573 3578 class nooprevlogcompressor(object):
3574 3579 def compress(self, data):
3575 3580 return None
3576 3581
3577 3582 def revlogcompressor(self, opts=None):
3578 3583 return self.nooprevlogcompressor()
3579 3584
3580 3585 compengines.register(_noopengine())
3581 3586
3582 3587 class _zstdengine(compressionengine):
3583 3588 def name(self):
3584 3589 return 'zstd'
3585 3590
3586 3591 @propertycache
3587 3592 def _module(self):
3588 3593 # Not all installs have the zstd module available. So defer importing
3589 3594 # until first access.
3590 3595 try:
3591 3596 from . import zstd
3592 3597 # Force delayed import.
3593 3598 zstd.__version__
3594 3599 return zstd
3595 3600 except ImportError:
3596 3601 return None
3597 3602
3598 3603 def available(self):
3599 3604 return bool(self._module)
3600 3605
3601 3606 def bundletype(self):
3602 3607 """A modern compression algorithm that is fast and highly flexible.
3603 3608
3604 3609 Only supported by Mercurial 4.1 and newer clients.
3605 3610
3606 3611 With the default settings, zstd compression is both faster and yields
3607 3612 better compression than ``gzip``. It also frequently yields better
3608 3613 compression than ``bzip2`` while operating at much higher speeds.
3609 3614
3610 3615 If this engine is available and backwards compatibility is not a
3611 3616 concern, it is likely the best available engine.
3612 3617 """
3613 3618 return 'zstd', 'ZS'
3614 3619
3615 3620 def wireprotosupport(self):
3616 3621 return compewireprotosupport('zstd', 50, 50)
3617 3622
3618 3623 def revlogheader(self):
3619 3624 return '\x28'
3620 3625
3621 3626 def compressstream(self, it, opts=None):
3622 3627 opts = opts or {}
3623 3628 # zstd level 3 is almost always significantly faster than zlib
3624 3629 # while providing no worse compression. It strikes a good balance
3625 3630 # between speed and compression.
3626 3631 level = opts.get('level', 3)
3627 3632
3628 3633 zstd = self._module
3629 3634 z = zstd.ZstdCompressor(level=level).compressobj()
3630 3635 for chunk in it:
3631 3636 data = z.compress(chunk)
3632 3637 if data:
3633 3638 yield data
3634 3639
3635 3640 yield z.flush()
3636 3641
3637 3642 def decompressorreader(self, fh):
3638 3643 zstd = self._module
3639 3644 dctx = zstd.ZstdDecompressor()
3640 3645 return chunkbuffer(dctx.read_from(fh))
3641 3646
3642 3647 class zstdrevlogcompressor(object):
3643 3648 def __init__(self, zstd, level=3):
3644 3649 # Writing the content size adds a few bytes to the output. However,
3645 3650 # it allows decompression to be more optimal since we can
3646 3651 # pre-allocate a buffer to hold the result.
3647 3652 self._cctx = zstd.ZstdCompressor(level=level,
3648 3653 write_content_size=True)
3649 3654 self._dctx = zstd.ZstdDecompressor()
3650 3655 self._compinsize = zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE
3651 3656 self._decompinsize = zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
3652 3657
3653 3658 def compress(self, data):
3654 3659 insize = len(data)
3655 3660 # Caller handles empty input case.
3656 3661 assert insize > 0
3657 3662
3658 3663 if insize < 50:
3659 3664 return None
3660 3665
3661 3666 elif insize <= 1000000:
3662 3667 compressed = self._cctx.compress(data)
3663 3668 if len(compressed) < insize:
3664 3669 return compressed
3665 3670 return None
3666 3671 else:
3667 3672 z = self._cctx.compressobj()
3668 3673 chunks = []
3669 3674 pos = 0
3670 3675 while pos < insize:
3671 3676 pos2 = pos + self._compinsize
3672 3677 chunk = z.compress(data[pos:pos2])
3673 3678 if chunk:
3674 3679 chunks.append(chunk)
3675 3680 pos = pos2
3676 3681 chunks.append(z.flush())
3677 3682
3678 3683 if sum(map(len, chunks)) < insize:
3679 3684 return ''.join(chunks)
3680 3685 return None
3681 3686
3682 3687 def decompress(self, data):
3683 3688 insize = len(data)
3684 3689
3685 3690 try:
3686 3691 # This was measured to be faster than other streaming
3687 3692 # decompressors.
3688 3693 dobj = self._dctx.decompressobj()
3689 3694 chunks = []
3690 3695 pos = 0
3691 3696 while pos < insize:
3692 3697 pos2 = pos + self._decompinsize
3693 3698 chunk = dobj.decompress(data[pos:pos2])
3694 3699 if chunk:
3695 3700 chunks.append(chunk)
3696 3701 pos = pos2
3697 3702 # Frame should be exhausted, so no finish() API.
3698 3703
3699 3704 return ''.join(chunks)
3700 3705 except Exception as e:
3701 3706 raise error.RevlogError(_('revlog decompress error: %s') %
3702 3707 str(e))
3703 3708
3704 3709 def revlogcompressor(self, opts=None):
3705 3710 opts = opts or {}
3706 3711 return self.zstdrevlogcompressor(self._module,
3707 3712 level=opts.get('level', 3))
3708 3713
3709 3714 compengines.register(_zstdengine())
3710 3715
3711 3716 def bundlecompressiontopics():
3712 3717 """Obtains a list of available bundle compressions for use in help."""
3713 3718 # help.makeitemsdocs() expects a dict of names to items with a .__doc__.
3714 3719 items = {}
3715 3720
3716 3721 # We need to format the docstring. So use a dummy object/type to hold it
3717 3722 # rather than mutating the original.
3718 3723 class docobject(object):
3719 3724 pass
3720 3725
3721 3726 for name in compengines:
3722 3727 engine = compengines[name]
3723 3728
3724 3729 if not engine.available():
3725 3730 continue
3726 3731
3727 3732 bt = engine.bundletype()
3728 3733 if not bt or not bt[0]:
3729 3734 continue
3730 3735
3731 3736 doc = pycompat.sysstr('``%s``\n %s') % (
3732 3737 bt[0], engine.bundletype.__doc__)
3733 3738
3734 3739 value = docobject()
3735 3740 value.__doc__ = doc
3736 3741
3737 3742 items[bt[0]] = value
3738 3743
3739 3744 return items
3740 3745
3741 3746 # convenient shortcut
3742 3747 dst = debugstacktrace
@@ -1,644 +1,644
1 1 # vfs.py - Mercurial 'vfs' classes
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from __future__ import absolute_import
8 8
9 9 import contextlib
10 10 import errno
11 11 import os
12 12 import shutil
13 13 import stat
14 14 import tempfile
15 15 import threading
16 16
17 17 from .i18n import _
18 18 from . import (
19 19 error,
20 20 pathutil,
21 21 pycompat,
22 22 util,
23 23 )
24 24
25 25 class abstractvfs(object):
26 26 """Abstract base class; cannot be instantiated"""
27 27
28 28 def __init__(self, *args, **kwargs):
29 29 '''Prevent instantiation; don't call this from subclasses.'''
30 30 raise NotImplementedError('attempted instantiating ' + str(type(self)))
31 31
32 32 def tryread(self, path):
33 33 '''gracefully return an empty string for missing files'''
34 34 try:
35 35 return self.read(path)
36 36 except IOError as inst:
37 37 if inst.errno != errno.ENOENT:
38 38 raise
39 39 return ""
40 40
41 41 def tryreadlines(self, path, mode='rb'):
42 42 '''gracefully return an empty array for missing files'''
43 43 try:
44 44 return self.readlines(path, mode=mode)
45 45 except IOError as inst:
46 46 if inst.errno != errno.ENOENT:
47 47 raise
48 48 return []
49 49
50 50 @util.propertycache
51 51 def open(self):
52 52 '''Open ``path`` file, which is relative to vfs root.
53 53
54 54 Newly created directories are marked as "not to be indexed by
55 55 the content indexing service", if ``notindexed`` is specified
56 56 for "write" mode access.
57 57 '''
58 58 return self.__call__
59 59
60 60 def read(self, path):
61 61 with self(path, 'rb') as fp:
62 62 return fp.read()
63 63
64 64 def readlines(self, path, mode='rb'):
65 65 with self(path, mode=mode) as fp:
66 66 return fp.readlines()
67 67
68 68 def write(self, path, data, backgroundclose=False):
69 69 with self(path, 'wb', backgroundclose=backgroundclose) as fp:
70 70 return fp.write(data)
71 71
72 72 def writelines(self, path, data, mode='wb', notindexed=False):
73 73 with self(path, mode=mode, notindexed=notindexed) as fp:
74 74 return fp.writelines(data)
75 75
76 76 def append(self, path, data):
77 77 with self(path, 'ab') as fp:
78 78 return fp.write(data)
79 79
80 80 def basename(self, path):
81 81 """return base element of a path (as os.path.basename would do)
82 82
83 83 This exists to allow handling of strange encoding if needed."""
84 84 return os.path.basename(path)
85 85
86 86 def chmod(self, path, mode):
87 87 return os.chmod(self.join(path), mode)
88 88
89 89 def dirname(self, path):
90 90 """return dirname element of a path (as os.path.dirname would do)
91 91
92 92 This exists to allow handling of strange encoding if needed."""
93 93 return os.path.dirname(path)
94 94
95 95 def exists(self, path=None):
96 96 return os.path.exists(self.join(path))
97 97
98 98 def fstat(self, fp):
99 99 return util.fstat(fp)
100 100
101 101 def isdir(self, path=None):
102 102 return os.path.isdir(self.join(path))
103 103
104 104 def isfile(self, path=None):
105 105 return os.path.isfile(self.join(path))
106 106
107 107 def islink(self, path=None):
108 108 return os.path.islink(self.join(path))
109 109
110 110 def isfileorlink(self, path=None):
111 111 '''return whether path is a regular file or a symlink
112 112
113 113 Unlike isfile, this doesn't follow symlinks.'''
114 114 try:
115 115 st = self.lstat(path)
116 116 except OSError:
117 117 return False
118 118 mode = st.st_mode
119 119 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
120 120
121 121 def reljoin(self, *paths):
122 122 """join various elements of a path together (as os.path.join would do)
123 123
124 124 The vfs base is not injected so that path stay relative. This exists
125 125 to allow handling of strange encoding if needed."""
126 126 return os.path.join(*paths)
127 127
128 128 def split(self, path):
129 129 """split top-most element of a path (as os.path.split would do)
130 130
131 131 This exists to allow handling of strange encoding if needed."""
132 132 return os.path.split(path)
133 133
134 134 def lexists(self, path=None):
135 135 return os.path.lexists(self.join(path))
136 136
137 137 def lstat(self, path=None):
138 138 return os.lstat(self.join(path))
139 139
140 140 def listdir(self, path=None):
141 141 return os.listdir(self.join(path))
142 142
143 143 def makedir(self, path=None, notindexed=True):
144 144 return util.makedir(self.join(path), notindexed)
145 145
146 146 def makedirs(self, path=None, mode=None):
147 147 return util.makedirs(self.join(path), mode)
148 148
149 149 def makelock(self, info, path):
150 150 return util.makelock(info, self.join(path))
151 151
152 152 def mkdir(self, path=None):
153 153 return os.mkdir(self.join(path))
154 154
155 155 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
156 156 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
157 157 dir=self.join(dir), text=text)
158 158 dname, fname = util.split(name)
159 159 if dir:
160 160 return fd, os.path.join(dir, fname)
161 161 else:
162 162 return fd, fname
163 163
164 164 def readdir(self, path=None, stat=None, skip=None):
165 165 return util.listdir(self.join(path), stat, skip)
166 166
167 167 def readlock(self, path):
168 168 return util.readlock(self.join(path))
169 169
170 170 def rename(self, src, dst, checkambig=False):
171 171 """Rename from src to dst
172 172
173 173 checkambig argument is used with util.filestat, and is useful
174 174 only if destination file is guarded by any lock
175 175 (e.g. repo.lock or repo.wlock).
176 176 """
177 177 srcpath = self.join(src)
178 178 dstpath = self.join(dst)
179 oldstat = checkambig and util.filestat(dstpath)
179 oldstat = checkambig and util.filestat.frompath(dstpath)
180 180 if oldstat and oldstat.stat:
181 181 def dorename(spath, dpath):
182 182 ret = util.rename(spath, dpath)
183 newstat = util.filestat(dpath)
183 newstat = util.filestat.frompath(dpath)
184 184 if newstat.isambig(oldstat):
185 185 # stat of renamed file is ambiguous to original one
186 186 return ret, newstat.avoidambig(dpath, oldstat)
187 187 return ret, True
188 188 ret, avoided = dorename(srcpath, dstpath)
189 189 if not avoided:
190 190 # simply copy to change owner of srcpath (see issue5418)
191 191 util.copyfile(dstpath, srcpath)
192 192 ret, avoided = dorename(srcpath, dstpath)
193 193 return ret
194 194 return util.rename(srcpath, dstpath)
195 195
196 196 def readlink(self, path):
197 197 return os.readlink(self.join(path))
198 198
199 199 def removedirs(self, path=None):
200 200 """Remove a leaf directory and all empty intermediate ones
201 201 """
202 202 return util.removedirs(self.join(path))
203 203
204 204 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
205 205 """Remove a directory tree recursively
206 206
207 207 If ``forcibly``, this tries to remove READ-ONLY files, too.
208 208 """
209 209 if forcibly:
210 210 def onerror(function, path, excinfo):
211 211 if function is not os.remove:
212 212 raise
213 213 # read-only files cannot be unlinked under Windows
214 214 s = os.stat(path)
215 215 if (s.st_mode & stat.S_IWRITE) != 0:
216 216 raise
217 217 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
218 218 os.remove(path)
219 219 else:
220 220 onerror = None
221 221 return shutil.rmtree(self.join(path),
222 222 ignore_errors=ignore_errors, onerror=onerror)
223 223
224 224 def setflags(self, path, l, x):
225 225 return util.setflags(self.join(path), l, x)
226 226
227 227 def stat(self, path=None):
228 228 return os.stat(self.join(path))
229 229
230 230 def unlink(self, path=None):
231 231 return util.unlink(self.join(path))
232 232
233 233 def tryunlink(self, path=None):
234 234 """Attempt to remove a file, ignoring missing file errors."""
235 235 util.tryunlink(self.join(path))
236 236
237 237 def unlinkpath(self, path=None, ignoremissing=False):
238 238 return util.unlinkpath(self.join(path), ignoremissing=ignoremissing)
239 239
240 240 def utime(self, path=None, t=None):
241 241 return os.utime(self.join(path), t)
242 242
243 243 def walk(self, path=None, onerror=None):
244 244 """Yield (dirpath, dirs, files) tuple for each directories under path
245 245
246 246 ``dirpath`` is relative one from the root of this vfs. This
247 247 uses ``os.sep`` as path separator, even you specify POSIX
248 248 style ``path``.
249 249
250 250 "The root of this vfs" is represented as empty ``dirpath``.
251 251 """
252 252 root = os.path.normpath(self.join(None))
253 253 # when dirpath == root, dirpath[prefixlen:] becomes empty
254 254 # because len(dirpath) < prefixlen.
255 255 prefixlen = len(pathutil.normasprefix(root))
256 256 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
257 257 yield (dirpath[prefixlen:], dirs, files)
258 258
259 259 @contextlib.contextmanager
260 260 def backgroundclosing(self, ui, expectedcount=-1):
261 261 """Allow files to be closed asynchronously.
262 262
263 263 When this context manager is active, ``backgroundclose`` can be passed
264 264 to ``__call__``/``open`` to result in the file possibly being closed
265 265 asynchronously, on a background thread.
266 266 """
267 267 # This is an arbitrary restriction and could be changed if we ever
268 268 # have a use case.
269 269 vfs = getattr(self, 'vfs', self)
270 270 if getattr(vfs, '_backgroundfilecloser', None):
271 271 raise error.Abort(
272 272 _('can only have 1 active background file closer'))
273 273
274 274 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
275 275 try:
276 276 vfs._backgroundfilecloser = bfc
277 277 yield bfc
278 278 finally:
279 279 vfs._backgroundfilecloser = None
280 280
281 281 class vfs(abstractvfs):
282 282 '''Operate files relative to a base directory
283 283
284 284 This class is used to hide the details of COW semantics and
285 285 remote file access from higher level code.
286 286 '''
287 287 def __init__(self, base, audit=True, expandpath=False, realpath=False):
288 288 if expandpath:
289 289 base = util.expandpath(base)
290 290 if realpath:
291 291 base = os.path.realpath(base)
292 292 self.base = base
293 293 self.mustaudit = audit
294 294 self.createmode = None
295 295 self._trustnlink = None
296 296
297 297 @property
298 298 def mustaudit(self):
299 299 return self._audit
300 300
301 301 @mustaudit.setter
302 302 def mustaudit(self, onoff):
303 303 self._audit = onoff
304 304 if onoff:
305 305 self.audit = pathutil.pathauditor(self.base)
306 306 else:
307 307 self.audit = util.always
308 308
309 309 @util.propertycache
310 310 def _cansymlink(self):
311 311 return util.checklink(self.base)
312 312
313 313 @util.propertycache
314 314 def _chmod(self):
315 315 return util.checkexec(self.base)
316 316
317 317 def _fixfilemode(self, name):
318 318 if self.createmode is None or not self._chmod:
319 319 return
320 320 os.chmod(name, self.createmode & 0o666)
321 321
322 322 def __call__(self, path, mode="r", text=False, atomictemp=False,
323 323 notindexed=False, backgroundclose=False, checkambig=False):
324 324 '''Open ``path`` file, which is relative to vfs root.
325 325
326 326 Newly created directories are marked as "not to be indexed by
327 327 the content indexing service", if ``notindexed`` is specified
328 328 for "write" mode access.
329 329
330 330 If ``backgroundclose`` is passed, the file may be closed asynchronously.
331 331 It can only be used if the ``self.backgroundclosing()`` context manager
332 332 is active. This should only be specified if the following criteria hold:
333 333
334 334 1. There is a potential for writing thousands of files. Unless you
335 335 are writing thousands of files, the performance benefits of
336 336 asynchronously closing files is not realized.
337 337 2. Files are opened exactly once for the ``backgroundclosing``
338 338 active duration and are therefore free of race conditions between
339 339 closing a file on a background thread and reopening it. (If the
340 340 file were opened multiple times, there could be unflushed data
341 341 because the original file handle hasn't been flushed/closed yet.)
342 342
343 343 ``checkambig`` argument is passed to atomictemplfile (valid
344 344 only for writing), and is useful only if target file is
345 345 guarded by any lock (e.g. repo.lock or repo.wlock).
346 346 '''
347 347 if self._audit:
348 348 r = util.checkosfilename(path)
349 349 if r:
350 350 raise error.Abort("%s: %r" % (r, path))
351 351 self.audit(path)
352 352 f = self.join(path)
353 353
354 354 if not text and "b" not in mode:
355 355 mode += "b" # for that other OS
356 356
357 357 nlink = -1
358 358 if mode not in ('r', 'rb'):
359 359 dirname, basename = util.split(f)
360 360 # If basename is empty, then the path is malformed because it points
361 361 # to a directory. Let the posixfile() call below raise IOError.
362 362 if basename:
363 363 if atomictemp:
364 364 util.makedirs(dirname, self.createmode, notindexed)
365 365 return util.atomictempfile(f, mode, self.createmode,
366 366 checkambig=checkambig)
367 367 try:
368 368 if 'w' in mode:
369 369 util.unlink(f)
370 370 nlink = 0
371 371 else:
372 372 # nlinks() may behave differently for files on Windows
373 373 # shares if the file is open.
374 374 with util.posixfile(f):
375 375 nlink = util.nlinks(f)
376 376 if nlink < 1:
377 377 nlink = 2 # force mktempcopy (issue1922)
378 378 except (OSError, IOError) as e:
379 379 if e.errno != errno.ENOENT:
380 380 raise
381 381 nlink = 0
382 382 util.makedirs(dirname, self.createmode, notindexed)
383 383 if nlink > 0:
384 384 if self._trustnlink is None:
385 385 self._trustnlink = nlink > 1 or util.checknlink(f)
386 386 if nlink > 1 or not self._trustnlink:
387 387 util.rename(util.mktempcopy(f), f)
388 388 fp = util.posixfile(f, mode)
389 389 if nlink == 0:
390 390 self._fixfilemode(f)
391 391
392 392 if checkambig:
393 393 if mode in ('r', 'rb'):
394 394 raise error.Abort(_('implementation error: mode %s is not'
395 395 ' valid for checkambig=True') % mode)
396 396 fp = checkambigatclosing(fp)
397 397
398 398 if backgroundclose:
399 399 if not self._backgroundfilecloser:
400 400 raise error.Abort(_('backgroundclose can only be used when a '
401 401 'backgroundclosing context manager is active')
402 402 )
403 403
404 404 fp = delayclosedfile(fp, self._backgroundfilecloser)
405 405
406 406 return fp
407 407
408 408 def symlink(self, src, dst):
409 409 self.audit(dst)
410 410 linkname = self.join(dst)
411 411 util.tryunlink(linkname)
412 412
413 413 util.makedirs(os.path.dirname(linkname), self.createmode)
414 414
415 415 if self._cansymlink:
416 416 try:
417 417 os.symlink(src, linkname)
418 418 except OSError as err:
419 419 raise OSError(err.errno, _('could not symlink to %r: %s') %
420 420 (src, err.strerror), linkname)
421 421 else:
422 422 self.write(dst, src)
423 423
424 424 def join(self, path, *insidef):
425 425 if path:
426 426 return os.path.join(self.base, path, *insidef)
427 427 else:
428 428 return self.base
429 429
430 430 opener = vfs
431 431
432 432 class auditvfs(object):
433 433 def __init__(self, vfs):
434 434 self.vfs = vfs
435 435
436 436 @property
437 437 def mustaudit(self):
438 438 return self.vfs.mustaudit
439 439
440 440 @mustaudit.setter
441 441 def mustaudit(self, onoff):
442 442 self.vfs.mustaudit = onoff
443 443
444 444 @property
445 445 def options(self):
446 446 return self.vfs.options
447 447
448 448 @options.setter
449 449 def options(self, value):
450 450 self.vfs.options = value
451 451
452 452 class filtervfs(abstractvfs, auditvfs):
453 453 '''Wrapper vfs for filtering filenames with a function.'''
454 454
455 455 def __init__(self, vfs, filter):
456 456 auditvfs.__init__(self, vfs)
457 457 self._filter = filter
458 458
459 459 def __call__(self, path, *args, **kwargs):
460 460 return self.vfs(self._filter(path), *args, **kwargs)
461 461
462 462 def join(self, path, *insidef):
463 463 if path:
464 464 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
465 465 else:
466 466 return self.vfs.join(path)
467 467
468 468 filteropener = filtervfs
469 469
470 470 class readonlyvfs(abstractvfs, auditvfs):
471 471 '''Wrapper vfs preventing any writing.'''
472 472
473 473 def __init__(self, vfs):
474 474 auditvfs.__init__(self, vfs)
475 475
476 476 def __call__(self, path, mode='r', *args, **kw):
477 477 if mode not in ('r', 'rb'):
478 478 raise error.Abort(_('this vfs is read only'))
479 479 return self.vfs(path, mode, *args, **kw)
480 480
481 481 def join(self, path, *insidef):
482 482 return self.vfs.join(path, *insidef)
483 483
484 484 class closewrapbase(object):
485 485 """Base class of wrapper, which hooks closing
486 486
487 487 Do not instantiate outside of the vfs layer.
488 488 """
489 489 def __init__(self, fh):
490 490 object.__setattr__(self, r'_origfh', fh)
491 491
492 492 def __getattr__(self, attr):
493 493 return getattr(self._origfh, attr)
494 494
495 495 def __setattr__(self, attr, value):
496 496 return setattr(self._origfh, attr, value)
497 497
498 498 def __delattr__(self, attr):
499 499 return delattr(self._origfh, attr)
500 500
501 501 def __enter__(self):
502 502 return self._origfh.__enter__()
503 503
504 504 def __exit__(self, exc_type, exc_value, exc_tb):
505 505 raise NotImplementedError('attempted instantiating ' + str(type(self)))
506 506
507 507 def close(self):
508 508 raise NotImplementedError('attempted instantiating ' + str(type(self)))
509 509
510 510 class delayclosedfile(closewrapbase):
511 511 """Proxy for a file object whose close is delayed.
512 512
513 513 Do not instantiate outside of the vfs layer.
514 514 """
515 515 def __init__(self, fh, closer):
516 516 super(delayclosedfile, self).__init__(fh)
517 517 object.__setattr__(self, r'_closer', closer)
518 518
519 519 def __exit__(self, exc_type, exc_value, exc_tb):
520 520 self._closer.close(self._origfh)
521 521
522 522 def close(self):
523 523 self._closer.close(self._origfh)
524 524
525 525 class backgroundfilecloser(object):
526 526 """Coordinates background closing of file handles on multiple threads."""
527 527 def __init__(self, ui, expectedcount=-1):
528 528 self._running = False
529 529 self._entered = False
530 530 self._threads = []
531 531 self._threadexception = None
532 532
533 533 # Only Windows/NTFS has slow file closing. So only enable by default
534 534 # on that platform. But allow to be enabled elsewhere for testing.
535 535 defaultenabled = pycompat.osname == 'nt'
536 536 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
537 537
538 538 if not enabled:
539 539 return
540 540
541 541 # There is overhead to starting and stopping the background threads.
542 542 # Don't do background processing unless the file count is large enough
543 543 # to justify it.
544 544 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
545 545 2048)
546 546 # FUTURE dynamically start background threads after minfilecount closes.
547 547 # (We don't currently have any callers that don't know their file count)
548 548 if expectedcount > 0 and expectedcount < minfilecount:
549 549 return
550 550
551 551 # Windows defaults to a limit of 512 open files. A buffer of 128
552 552 # should give us enough headway.
553 553 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
554 554 threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
555 555
556 556 ui.debug('starting %d threads for background file closing\n' %
557 557 threadcount)
558 558
559 559 self._queue = util.queue(maxsize=maxqueue)
560 560 self._running = True
561 561
562 562 for i in range(threadcount):
563 563 t = threading.Thread(target=self._worker, name='backgroundcloser')
564 564 self._threads.append(t)
565 565 t.start()
566 566
567 567 def __enter__(self):
568 568 self._entered = True
569 569 return self
570 570
571 571 def __exit__(self, exc_type, exc_value, exc_tb):
572 572 self._running = False
573 573
574 574 # Wait for threads to finish closing so open files don't linger for
575 575 # longer than lifetime of context manager.
576 576 for t in self._threads:
577 577 t.join()
578 578
579 579 def _worker(self):
580 580 """Main routine for worker thread."""
581 581 while True:
582 582 try:
583 583 fh = self._queue.get(block=True, timeout=0.100)
584 584 # Need to catch or the thread will terminate and
585 585 # we could orphan file descriptors.
586 586 try:
587 587 fh.close()
588 588 except Exception as e:
589 589 # Stash so can re-raise from main thread later.
590 590 self._threadexception = e
591 591 except util.empty:
592 592 if not self._running:
593 593 break
594 594
595 595 def close(self, fh):
596 596 """Schedule a file for closing."""
597 597 if not self._entered:
598 598 raise error.Abort(_('can only call close() when context manager '
599 599 'active'))
600 600
601 601 # If a background thread encountered an exception, raise now so we fail
602 602 # fast. Otherwise we may potentially go on for minutes until the error
603 603 # is acted on.
604 604 if self._threadexception:
605 605 e = self._threadexception
606 606 self._threadexception = None
607 607 raise e
608 608
609 609 # If we're not actively running, close synchronously.
610 610 if not self._running:
611 611 fh.close()
612 612 return
613 613
614 614 self._queue.put(fh, block=True, timeout=None)
615 615
616 616 class checkambigatclosing(closewrapbase):
617 617 """Proxy for a file object, to avoid ambiguity of file stat
618 618
619 619 See also util.filestat for detail about "ambiguity of file stat".
620 620
621 621 This proxy is useful only if the target file is guarded by any
622 622 lock (e.g. repo.lock or repo.wlock)
623 623
624 624 Do not instantiate outside of the vfs layer.
625 625 """
626 626 def __init__(self, fh):
627 627 super(checkambigatclosing, self).__init__(fh)
628 object.__setattr__(self, r'_oldstat', util.filestat(fh.name))
628 object.__setattr__(self, r'_oldstat', util.filestat.frompath(fh.name))
629 629
630 630 def _checkambig(self):
631 631 oldstat = self._oldstat
632 632 if oldstat.stat:
633 newstat = util.filestat(self._origfh.name)
633 newstat = util.filestat.frompath(self._origfh.name)
634 634 if newstat.isambig(oldstat):
635 635 # stat of changed file is ambiguous to original one
636 636 newstat.avoidambig(self._origfh.name, oldstat)
637 637
638 638 def __exit__(self, exc_type, exc_value, exc_tb):
639 639 self._origfh.__exit__(exc_type, exc_value, exc_tb)
640 640 self._checkambig()
641 641
642 642 def close(self):
643 643 self._origfh.close()
644 644 self._checkambig()
General Comments 0
You need to be logged in to leave comments. Login now