##// END OF EJS Templates
auditvfs: forward options property from nested vfs...
Augie Fackler -
r29714:69109052 default
parent child Browse files
Show More
@@ -1,1423 +1,1431 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import contextlib
11 11 import errno
12 12 import glob
13 13 import hashlib
14 14 import os
15 15 import re
16 16 import shutil
17 17 import stat
18 18 import tempfile
19 19 import threading
20 20
21 21 from .i18n import _
22 22 from .node import wdirrev
23 23 from . import (
24 24 encoding,
25 25 error,
26 26 match as matchmod,
27 27 osutil,
28 28 pathutil,
29 29 phases,
30 30 revset,
31 31 similar,
32 32 util,
33 33 )
34 34
35 35 if os.name == 'nt':
36 36 from . import scmwindows as scmplatform
37 37 else:
38 38 from . import scmposix as scmplatform
39 39
40 40 systemrcpath = scmplatform.systemrcpath
41 41 userrcpath = scmplatform.userrcpath
42 42
43 43 class status(tuple):
44 44 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
45 45 and 'ignored' properties are only relevant to the working copy.
46 46 '''
47 47
48 48 __slots__ = ()
49 49
50 50 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
51 51 clean):
52 52 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
53 53 ignored, clean))
54 54
55 55 @property
56 56 def modified(self):
57 57 '''files that have been modified'''
58 58 return self[0]
59 59
60 60 @property
61 61 def added(self):
62 62 '''files that have been added'''
63 63 return self[1]
64 64
65 65 @property
66 66 def removed(self):
67 67 '''files that have been removed'''
68 68 return self[2]
69 69
70 70 @property
71 71 def deleted(self):
72 72 '''files that are in the dirstate, but have been deleted from the
73 73 working copy (aka "missing")
74 74 '''
75 75 return self[3]
76 76
77 77 @property
78 78 def unknown(self):
79 79 '''files not in the dirstate that are not ignored'''
80 80 return self[4]
81 81
82 82 @property
83 83 def ignored(self):
84 84 '''files not in the dirstate that are ignored (by _dirignore())'''
85 85 return self[5]
86 86
87 87 @property
88 88 def clean(self):
89 89 '''files that have not been modified'''
90 90 return self[6]
91 91
92 92 def __repr__(self, *args, **kwargs):
93 93 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
94 94 'unknown=%r, ignored=%r, clean=%r>') % self)
95 95
96 96 def itersubrepos(ctx1, ctx2):
97 97 """find subrepos in ctx1 or ctx2"""
98 98 # Create a (subpath, ctx) mapping where we prefer subpaths from
99 99 # ctx1. The subpaths from ctx2 are important when the .hgsub file
100 100 # has been modified (in ctx2) but not yet committed (in ctx1).
101 101 subpaths = dict.fromkeys(ctx2.substate, ctx2)
102 102 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
103 103
104 104 missing = set()
105 105
106 106 for subpath in ctx2.substate:
107 107 if subpath not in ctx1.substate:
108 108 del subpaths[subpath]
109 109 missing.add(subpath)
110 110
111 111 for subpath, ctx in sorted(subpaths.iteritems()):
112 112 yield subpath, ctx.sub(subpath)
113 113
114 114 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
115 115 # status and diff will have an accurate result when it does
116 116 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
117 117 # against itself.
118 118 for subpath in missing:
119 119 yield subpath, ctx2.nullsub(subpath, ctx1)
120 120
121 121 def nochangesfound(ui, repo, excluded=None):
122 122 '''Report no changes for push/pull, excluded is None or a list of
123 123 nodes excluded from the push/pull.
124 124 '''
125 125 secretlist = []
126 126 if excluded:
127 127 for n in excluded:
128 128 if n not in repo:
129 129 # discovery should not have included the filtered revision,
130 130 # we have to explicitly exclude it until discovery is cleanup.
131 131 continue
132 132 ctx = repo[n]
133 133 if ctx.phase() >= phases.secret and not ctx.extinct():
134 134 secretlist.append(n)
135 135
136 136 if secretlist:
137 137 ui.status(_("no changes found (ignored %d secret changesets)\n")
138 138 % len(secretlist))
139 139 else:
140 140 ui.status(_("no changes found\n"))
141 141
142 142 def checknewlabel(repo, lbl, kind):
143 143 # Do not use the "kind" parameter in ui output.
144 144 # It makes strings difficult to translate.
145 145 if lbl in ['tip', '.', 'null']:
146 146 raise error.Abort(_("the name '%s' is reserved") % lbl)
147 147 for c in (':', '\0', '\n', '\r'):
148 148 if c in lbl:
149 149 raise error.Abort(_("%r cannot be used in a name") % c)
150 150 try:
151 151 int(lbl)
152 152 raise error.Abort(_("cannot use an integer as a name"))
153 153 except ValueError:
154 154 pass
155 155
156 156 def checkfilename(f):
157 157 '''Check that the filename f is an acceptable filename for a tracked file'''
158 158 if '\r' in f or '\n' in f:
159 159 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
160 160
161 161 def checkportable(ui, f):
162 162 '''Check if filename f is portable and warn or abort depending on config'''
163 163 checkfilename(f)
164 164 abort, warn = checkportabilityalert(ui)
165 165 if abort or warn:
166 166 msg = util.checkwinfilename(f)
167 167 if msg:
168 168 msg = "%s: %r" % (msg, f)
169 169 if abort:
170 170 raise error.Abort(msg)
171 171 ui.warn(_("warning: %s\n") % msg)
172 172
173 173 def checkportabilityalert(ui):
174 174 '''check if the user's config requests nothing, a warning, or abort for
175 175 non-portable filenames'''
176 176 val = ui.config('ui', 'portablefilenames', 'warn')
177 177 lval = val.lower()
178 178 bval = util.parsebool(val)
179 179 abort = os.name == 'nt' or lval == 'abort'
180 180 warn = bval or lval == 'warn'
181 181 if bval is None and not (warn or abort or lval == 'ignore'):
182 182 raise error.ConfigError(
183 183 _("ui.portablefilenames value is invalid ('%s')") % val)
184 184 return abort, warn
185 185
186 186 class casecollisionauditor(object):
187 187 def __init__(self, ui, abort, dirstate):
188 188 self._ui = ui
189 189 self._abort = abort
190 190 allfiles = '\0'.join(dirstate._map)
191 191 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
192 192 self._dirstate = dirstate
193 193 # The purpose of _newfiles is so that we don't complain about
194 194 # case collisions if someone were to call this object with the
195 195 # same filename twice.
196 196 self._newfiles = set()
197 197
198 198 def __call__(self, f):
199 199 if f in self._newfiles:
200 200 return
201 201 fl = encoding.lower(f)
202 202 if fl in self._loweredfiles and f not in self._dirstate:
203 203 msg = _('possible case-folding collision for %s') % f
204 204 if self._abort:
205 205 raise error.Abort(msg)
206 206 self._ui.warn(_("warning: %s\n") % msg)
207 207 self._loweredfiles.add(fl)
208 208 self._newfiles.add(f)
209 209
210 210 def filteredhash(repo, maxrev):
211 211 """build hash of filtered revisions in the current repoview.
212 212
213 213 Multiple caches perform up-to-date validation by checking that the
214 214 tiprev and tipnode stored in the cache file match the current repository.
215 215 However, this is not sufficient for validating repoviews because the set
216 216 of revisions in the view may change without the repository tiprev and
217 217 tipnode changing.
218 218
219 219 This function hashes all the revs filtered from the view and returns
220 220 that SHA-1 digest.
221 221 """
222 222 cl = repo.changelog
223 223 if not cl.filteredrevs:
224 224 return None
225 225 key = None
226 226 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
227 227 if revs:
228 228 s = hashlib.sha1()
229 229 for rev in revs:
230 230 s.update('%s;' % rev)
231 231 key = s.digest()
232 232 return key
233 233
234 234 class abstractvfs(object):
235 235 """Abstract base class; cannot be instantiated"""
236 236
237 237 def __init__(self, *args, **kwargs):
238 238 '''Prevent instantiation; don't call this from subclasses.'''
239 239 raise NotImplementedError('attempted instantiating ' + str(type(self)))
240 240
241 241 def tryread(self, path):
242 242 '''gracefully return an empty string for missing files'''
243 243 try:
244 244 return self.read(path)
245 245 except IOError as inst:
246 246 if inst.errno != errno.ENOENT:
247 247 raise
248 248 return ""
249 249
250 250 def tryreadlines(self, path, mode='rb'):
251 251 '''gracefully return an empty array for missing files'''
252 252 try:
253 253 return self.readlines(path, mode=mode)
254 254 except IOError as inst:
255 255 if inst.errno != errno.ENOENT:
256 256 raise
257 257 return []
258 258
259 259 def open(self, path, mode="r", text=False, atomictemp=False,
260 260 notindexed=False, backgroundclose=False):
261 261 '''Open ``path`` file, which is relative to vfs root.
262 262
263 263 Newly created directories are marked as "not to be indexed by
264 264 the content indexing service", if ``notindexed`` is specified
265 265 for "write" mode access.
266 266 '''
267 267 self.open = self.__call__
268 268 return self.__call__(path, mode, text, atomictemp, notindexed,
269 269 backgroundclose=backgroundclose)
270 270
271 271 def read(self, path):
272 272 with self(path, 'rb') as fp:
273 273 return fp.read()
274 274
275 275 def readlines(self, path, mode='rb'):
276 276 with self(path, mode=mode) as fp:
277 277 return fp.readlines()
278 278
279 279 def write(self, path, data, backgroundclose=False):
280 280 with self(path, 'wb', backgroundclose=backgroundclose) as fp:
281 281 return fp.write(data)
282 282
283 283 def writelines(self, path, data, mode='wb', notindexed=False):
284 284 with self(path, mode=mode, notindexed=notindexed) as fp:
285 285 return fp.writelines(data)
286 286
287 287 def append(self, path, data):
288 288 with self(path, 'ab') as fp:
289 289 return fp.write(data)
290 290
291 291 def basename(self, path):
292 292 """return base element of a path (as os.path.basename would do)
293 293
294 294 This exists to allow handling of strange encoding if needed."""
295 295 return os.path.basename(path)
296 296
297 297 def chmod(self, path, mode):
298 298 return os.chmod(self.join(path), mode)
299 299
300 300 def dirname(self, path):
301 301 """return dirname element of a path (as os.path.dirname would do)
302 302
303 303 This exists to allow handling of strange encoding if needed."""
304 304 return os.path.dirname(path)
305 305
306 306 def exists(self, path=None):
307 307 return os.path.exists(self.join(path))
308 308
309 309 def fstat(self, fp):
310 310 return util.fstat(fp)
311 311
312 312 def isdir(self, path=None):
313 313 return os.path.isdir(self.join(path))
314 314
315 315 def isfile(self, path=None):
316 316 return os.path.isfile(self.join(path))
317 317
318 318 def islink(self, path=None):
319 319 return os.path.islink(self.join(path))
320 320
321 321 def isfileorlink(self, path=None):
322 322 '''return whether path is a regular file or a symlink
323 323
324 324 Unlike isfile, this doesn't follow symlinks.'''
325 325 try:
326 326 st = self.lstat(path)
327 327 except OSError:
328 328 return False
329 329 mode = st.st_mode
330 330 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
331 331
332 332 def reljoin(self, *paths):
333 333 """join various elements of a path together (as os.path.join would do)
334 334
335 335 The vfs base is not injected so that path stay relative. This exists
336 336 to allow handling of strange encoding if needed."""
337 337 return os.path.join(*paths)
338 338
339 339 def split(self, path):
340 340 """split top-most element of a path (as os.path.split would do)
341 341
342 342 This exists to allow handling of strange encoding if needed."""
343 343 return os.path.split(path)
344 344
345 345 def lexists(self, path=None):
346 346 return os.path.lexists(self.join(path))
347 347
348 348 def lstat(self, path=None):
349 349 return os.lstat(self.join(path))
350 350
351 351 def listdir(self, path=None):
352 352 return os.listdir(self.join(path))
353 353
354 354 def makedir(self, path=None, notindexed=True):
355 355 return util.makedir(self.join(path), notindexed)
356 356
357 357 def makedirs(self, path=None, mode=None):
358 358 return util.makedirs(self.join(path), mode)
359 359
360 360 def makelock(self, info, path):
361 361 return util.makelock(info, self.join(path))
362 362
363 363 def mkdir(self, path=None):
364 364 return os.mkdir(self.join(path))
365 365
366 366 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
367 367 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
368 368 dir=self.join(dir), text=text)
369 369 dname, fname = util.split(name)
370 370 if dir:
371 371 return fd, os.path.join(dir, fname)
372 372 else:
373 373 return fd, fname
374 374
375 375 def readdir(self, path=None, stat=None, skip=None):
376 376 return osutil.listdir(self.join(path), stat, skip)
377 377
378 378 def readlock(self, path):
379 379 return util.readlock(self.join(path))
380 380
381 381 def rename(self, src, dst, checkambig=False):
382 382 """Rename from src to dst
383 383
384 384 checkambig argument is used with util.filestat, and is useful
385 385 only if destination file is guarded by any lock
386 386 (e.g. repo.lock or repo.wlock).
387 387 """
388 388 dstpath = self.join(dst)
389 389 oldstat = checkambig and util.filestat(dstpath)
390 390 if oldstat and oldstat.stat:
391 391 ret = util.rename(self.join(src), dstpath)
392 392 newstat = util.filestat(dstpath)
393 393 if newstat.isambig(oldstat):
394 394 # stat of renamed file is ambiguous to original one
395 395 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
396 396 os.utime(dstpath, (advanced, advanced))
397 397 return ret
398 398 return util.rename(self.join(src), dstpath)
399 399
400 400 def readlink(self, path):
401 401 return os.readlink(self.join(path))
402 402
403 403 def removedirs(self, path=None):
404 404 """Remove a leaf directory and all empty intermediate ones
405 405 """
406 406 return util.removedirs(self.join(path))
407 407
408 408 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
409 409 """Remove a directory tree recursively
410 410
411 411 If ``forcibly``, this tries to remove READ-ONLY files, too.
412 412 """
413 413 if forcibly:
414 414 def onerror(function, path, excinfo):
415 415 if function is not os.remove:
416 416 raise
417 417 # read-only files cannot be unlinked under Windows
418 418 s = os.stat(path)
419 419 if (s.st_mode & stat.S_IWRITE) != 0:
420 420 raise
421 421 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
422 422 os.remove(path)
423 423 else:
424 424 onerror = None
425 425 return shutil.rmtree(self.join(path),
426 426 ignore_errors=ignore_errors, onerror=onerror)
427 427
428 428 def setflags(self, path, l, x):
429 429 return util.setflags(self.join(path), l, x)
430 430
431 431 def stat(self, path=None):
432 432 return os.stat(self.join(path))
433 433
434 434 def unlink(self, path=None):
435 435 return util.unlink(self.join(path))
436 436
437 437 def unlinkpath(self, path=None, ignoremissing=False):
438 438 return util.unlinkpath(self.join(path), ignoremissing)
439 439
440 440 def utime(self, path=None, t=None):
441 441 return os.utime(self.join(path), t)
442 442
443 443 def walk(self, path=None, onerror=None):
444 444 """Yield (dirpath, dirs, files) tuple for each directories under path
445 445
446 446 ``dirpath`` is relative one from the root of this vfs. This
447 447 uses ``os.sep`` as path separator, even you specify POSIX
448 448 style ``path``.
449 449
450 450 "The root of this vfs" is represented as empty ``dirpath``.
451 451 """
452 452 root = os.path.normpath(self.join(None))
453 453 # when dirpath == root, dirpath[prefixlen:] becomes empty
454 454 # because len(dirpath) < prefixlen.
455 455 prefixlen = len(pathutil.normasprefix(root))
456 456 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
457 457 yield (dirpath[prefixlen:], dirs, files)
458 458
459 459 @contextlib.contextmanager
460 460 def backgroundclosing(self, ui, expectedcount=-1):
461 461 """Allow files to be closed asynchronously.
462 462
463 463 When this context manager is active, ``backgroundclose`` can be passed
464 464 to ``__call__``/``open`` to result in the file possibly being closed
465 465 asynchronously, on a background thread.
466 466 """
467 467 # This is an arbitrary restriction and could be changed if we ever
468 468 # have a use case.
469 469 vfs = getattr(self, 'vfs', self)
470 470 if getattr(vfs, '_backgroundfilecloser', None):
471 471 raise error.Abort(
472 472 _('can only have 1 active background file closer'))
473 473
474 474 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
475 475 try:
476 476 vfs._backgroundfilecloser = bfc
477 477 yield bfc
478 478 finally:
479 479 vfs._backgroundfilecloser = None
480 480
481 481 class vfs(abstractvfs):
482 482 '''Operate files relative to a base directory
483 483
484 484 This class is used to hide the details of COW semantics and
485 485 remote file access from higher level code.
486 486 '''
487 487 def __init__(self, base, audit=True, expandpath=False, realpath=False):
488 488 if expandpath:
489 489 base = util.expandpath(base)
490 490 if realpath:
491 491 base = os.path.realpath(base)
492 492 self.base = base
493 493 self.mustaudit = audit
494 494 self.createmode = None
495 495 self._trustnlink = None
496 496
497 497 @property
498 498 def mustaudit(self):
499 499 return self._audit
500 500
501 501 @mustaudit.setter
502 502 def mustaudit(self, onoff):
503 503 self._audit = onoff
504 504 if onoff:
505 505 self.audit = pathutil.pathauditor(self.base)
506 506 else:
507 507 self.audit = util.always
508 508
509 509 @util.propertycache
510 510 def _cansymlink(self):
511 511 return util.checklink(self.base)
512 512
513 513 @util.propertycache
514 514 def _chmod(self):
515 515 return util.checkexec(self.base)
516 516
517 517 def _fixfilemode(self, name):
518 518 if self.createmode is None or not self._chmod:
519 519 return
520 520 os.chmod(name, self.createmode & 0o666)
521 521
522 522 def __call__(self, path, mode="r", text=False, atomictemp=False,
523 523 notindexed=False, backgroundclose=False, checkambig=False):
524 524 '''Open ``path`` file, which is relative to vfs root.
525 525
526 526 Newly created directories are marked as "not to be indexed by
527 527 the content indexing service", if ``notindexed`` is specified
528 528 for "write" mode access.
529 529
530 530 If ``backgroundclose`` is passed, the file may be closed asynchronously.
531 531 It can only be used if the ``self.backgroundclosing()`` context manager
532 532 is active. This should only be specified if the following criteria hold:
533 533
534 534 1. There is a potential for writing thousands of files. Unless you
535 535 are writing thousands of files, the performance benefits of
536 536 asynchronously closing files is not realized.
537 537 2. Files are opened exactly once for the ``backgroundclosing``
538 538 active duration and are therefore free of race conditions between
539 539 closing a file on a background thread and reopening it. (If the
540 540 file were opened multiple times, there could be unflushed data
541 541 because the original file handle hasn't been flushed/closed yet.)
542 542
543 543 ``checkambig`` argument is passed to atomictemplfile (valid
544 544 only for writing), and is useful only if target file is
545 545 guarded by any lock (e.g. repo.lock or repo.wlock).
546 546 '''
547 547 if self._audit:
548 548 r = util.checkosfilename(path)
549 549 if r:
550 550 raise error.Abort("%s: %r" % (r, path))
551 551 self.audit(path)
552 552 f = self.join(path)
553 553
554 554 if not text and "b" not in mode:
555 555 mode += "b" # for that other OS
556 556
557 557 nlink = -1
558 558 if mode not in ('r', 'rb'):
559 559 dirname, basename = util.split(f)
560 560 # If basename is empty, then the path is malformed because it points
561 561 # to a directory. Let the posixfile() call below raise IOError.
562 562 if basename:
563 563 if atomictemp:
564 564 util.makedirs(dirname, self.createmode, notindexed)
565 565 return util.atomictempfile(f, mode, self.createmode,
566 566 checkambig=checkambig)
567 567 try:
568 568 if 'w' in mode:
569 569 util.unlink(f)
570 570 nlink = 0
571 571 else:
572 572 # nlinks() may behave differently for files on Windows
573 573 # shares if the file is open.
574 574 with util.posixfile(f):
575 575 nlink = util.nlinks(f)
576 576 if nlink < 1:
577 577 nlink = 2 # force mktempcopy (issue1922)
578 578 except (OSError, IOError) as e:
579 579 if e.errno != errno.ENOENT:
580 580 raise
581 581 nlink = 0
582 582 util.makedirs(dirname, self.createmode, notindexed)
583 583 if nlink > 0:
584 584 if self._trustnlink is None:
585 585 self._trustnlink = nlink > 1 or util.checknlink(f)
586 586 if nlink > 1 or not self._trustnlink:
587 587 util.rename(util.mktempcopy(f), f)
588 588 fp = util.posixfile(f, mode)
589 589 if nlink == 0:
590 590 self._fixfilemode(f)
591 591
592 592 if backgroundclose:
593 593 if not self._backgroundfilecloser:
594 594 raise error.Abort(_('backgroundclose can only be used when a '
595 595 'backgroundclosing context manager is active')
596 596 )
597 597
598 598 fp = delayclosedfile(fp, self._backgroundfilecloser)
599 599
600 600 return fp
601 601
602 602 def symlink(self, src, dst):
603 603 self.audit(dst)
604 604 linkname = self.join(dst)
605 605 try:
606 606 os.unlink(linkname)
607 607 except OSError:
608 608 pass
609 609
610 610 util.makedirs(os.path.dirname(linkname), self.createmode)
611 611
612 612 if self._cansymlink:
613 613 try:
614 614 os.symlink(src, linkname)
615 615 except OSError as err:
616 616 raise OSError(err.errno, _('could not symlink to %r: %s') %
617 617 (src, err.strerror), linkname)
618 618 else:
619 619 self.write(dst, src)
620 620
621 621 def join(self, path, *insidef):
622 622 if path:
623 623 return os.path.join(self.base, path, *insidef)
624 624 else:
625 625 return self.base
626 626
627 627 opener = vfs
628 628
629 629 class auditvfs(object):
630 630 def __init__(self, vfs):
631 631 self.vfs = vfs
632 632
633 633 @property
634 634 def mustaudit(self):
635 635 return self.vfs.mustaudit
636 636
637 637 @mustaudit.setter
638 638 def mustaudit(self, onoff):
639 639 self.vfs.mustaudit = onoff
640 640
641 @property
642 def options(self):
643 return self.vfs.options
644
645 @options.setter
646 def options(self, value):
647 self.vfs.options = value
648
641 649 class filtervfs(abstractvfs, auditvfs):
642 650 '''Wrapper vfs for filtering filenames with a function.'''
643 651
644 652 def __init__(self, vfs, filter):
645 653 auditvfs.__init__(self, vfs)
646 654 self._filter = filter
647 655
648 656 def __call__(self, path, *args, **kwargs):
649 657 return self.vfs(self._filter(path), *args, **kwargs)
650 658
651 659 def join(self, path, *insidef):
652 660 if path:
653 661 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
654 662 else:
655 663 return self.vfs.join(path)
656 664
657 665 filteropener = filtervfs
658 666
659 667 class readonlyvfs(abstractvfs, auditvfs):
660 668 '''Wrapper vfs preventing any writing.'''
661 669
662 670 def __init__(self, vfs):
663 671 auditvfs.__init__(self, vfs)
664 672
665 673 def __call__(self, path, mode='r', *args, **kw):
666 674 if mode not in ('r', 'rb'):
667 675 raise error.Abort(_('this vfs is read only'))
668 676 return self.vfs(path, mode, *args, **kw)
669 677
670 678 def join(self, path, *insidef):
671 679 return self.vfs.join(path, *insidef)
672 680
673 681 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
674 682 '''yield every hg repository under path, always recursively.
675 683 The recurse flag will only control recursion into repo working dirs'''
676 684 def errhandler(err):
677 685 if err.filename == path:
678 686 raise err
679 687 samestat = getattr(os.path, 'samestat', None)
680 688 if followsym and samestat is not None:
681 689 def adddir(dirlst, dirname):
682 690 match = False
683 691 dirstat = os.stat(dirname)
684 692 for lstdirstat in dirlst:
685 693 if samestat(dirstat, lstdirstat):
686 694 match = True
687 695 break
688 696 if not match:
689 697 dirlst.append(dirstat)
690 698 return not match
691 699 else:
692 700 followsym = False
693 701
694 702 if (seen_dirs is None) and followsym:
695 703 seen_dirs = []
696 704 adddir(seen_dirs, path)
697 705 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
698 706 dirs.sort()
699 707 if '.hg' in dirs:
700 708 yield root # found a repository
701 709 qroot = os.path.join(root, '.hg', 'patches')
702 710 if os.path.isdir(os.path.join(qroot, '.hg')):
703 711 yield qroot # we have a patch queue repo here
704 712 if recurse:
705 713 # avoid recursing inside the .hg directory
706 714 dirs.remove('.hg')
707 715 else:
708 716 dirs[:] = [] # don't descend further
709 717 elif followsym:
710 718 newdirs = []
711 719 for d in dirs:
712 720 fname = os.path.join(root, d)
713 721 if adddir(seen_dirs, fname):
714 722 if os.path.islink(fname):
715 723 for hgname in walkrepos(fname, True, seen_dirs):
716 724 yield hgname
717 725 else:
718 726 newdirs.append(d)
719 727 dirs[:] = newdirs
720 728
721 729 def osrcpath():
722 730 '''return default os-specific hgrc search path'''
723 731 path = []
724 732 defaultpath = os.path.join(util.datapath, 'default.d')
725 733 if os.path.isdir(defaultpath):
726 734 for f, kind in osutil.listdir(defaultpath):
727 735 if f.endswith('.rc'):
728 736 path.append(os.path.join(defaultpath, f))
729 737 path.extend(systemrcpath())
730 738 path.extend(userrcpath())
731 739 path = [os.path.normpath(f) for f in path]
732 740 return path
733 741
734 742 _rcpath = None
735 743
736 744 def rcpath():
737 745 '''return hgrc search path. if env var HGRCPATH is set, use it.
738 746 for each item in path, if directory, use files ending in .rc,
739 747 else use item.
740 748 make HGRCPATH empty to only look in .hg/hgrc of current repo.
741 749 if no HGRCPATH, use default os-specific path.'''
742 750 global _rcpath
743 751 if _rcpath is None:
744 752 if 'HGRCPATH' in os.environ:
745 753 _rcpath = []
746 754 for p in os.environ['HGRCPATH'].split(os.pathsep):
747 755 if not p:
748 756 continue
749 757 p = util.expandpath(p)
750 758 if os.path.isdir(p):
751 759 for f, kind in osutil.listdir(p):
752 760 if f.endswith('.rc'):
753 761 _rcpath.append(os.path.join(p, f))
754 762 else:
755 763 _rcpath.append(p)
756 764 else:
757 765 _rcpath = osrcpath()
758 766 return _rcpath
759 767
760 768 def intrev(rev):
761 769 """Return integer for a given revision that can be used in comparison or
762 770 arithmetic operation"""
763 771 if rev is None:
764 772 return wdirrev
765 773 return rev
766 774
767 775 def revsingle(repo, revspec, default='.'):
768 776 if not revspec and revspec != 0:
769 777 return repo[default]
770 778
771 779 l = revrange(repo, [revspec])
772 780 if not l:
773 781 raise error.Abort(_('empty revision set'))
774 782 return repo[l.last()]
775 783
776 784 def _pairspec(revspec):
777 785 tree = revset.parse(revspec)
778 786 tree = revset.optimize(tree) # fix up "x^:y" -> "(x^):y"
779 787 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
780 788
781 789 def revpair(repo, revs):
782 790 if not revs:
783 791 return repo.dirstate.p1(), None
784 792
785 793 l = revrange(repo, revs)
786 794
787 795 if not l:
788 796 first = second = None
789 797 elif l.isascending():
790 798 first = l.min()
791 799 second = l.max()
792 800 elif l.isdescending():
793 801 first = l.max()
794 802 second = l.min()
795 803 else:
796 804 first = l.first()
797 805 second = l.last()
798 806
799 807 if first is None:
800 808 raise error.Abort(_('empty revision range'))
801 809 if (first == second and len(revs) >= 2
802 810 and not all(revrange(repo, [r]) for r in revs)):
803 811 raise error.Abort(_('empty revision on one side of range'))
804 812
805 813 # if top-level is range expression, the result must always be a pair
806 814 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
807 815 return repo.lookup(first), None
808 816
809 817 return repo.lookup(first), repo.lookup(second)
810 818
811 819 def revrange(repo, specs):
812 820 """Execute 1 to many revsets and return the union.
813 821
814 822 This is the preferred mechanism for executing revsets using user-specified
815 823 config options, such as revset aliases.
816 824
817 825 The revsets specified by ``specs`` will be executed via a chained ``OR``
818 826 expression. If ``specs`` is empty, an empty result is returned.
819 827
820 828 ``specs`` can contain integers, in which case they are assumed to be
821 829 revision numbers.
822 830
823 831 It is assumed the revsets are already formatted. If you have arguments
824 832 that need to be expanded in the revset, call ``revset.formatspec()``
825 833 and pass the result as an element of ``specs``.
826 834
827 835 Specifying a single revset is allowed.
828 836
829 837 Returns a ``revset.abstractsmartset`` which is a list-like interface over
830 838 integer revisions.
831 839 """
832 840 allspecs = []
833 841 for spec in specs:
834 842 if isinstance(spec, int):
835 843 spec = revset.formatspec('rev(%d)', spec)
836 844 allspecs.append(spec)
837 845 m = revset.matchany(repo.ui, allspecs, repo)
838 846 return m(repo)
839 847
840 848 def meaningfulparents(repo, ctx):
841 849 """Return list of meaningful (or all if debug) parentrevs for rev.
842 850
843 851 For merges (two non-nullrev revisions) both parents are meaningful.
844 852 Otherwise the first parent revision is considered meaningful if it
845 853 is not the preceding revision.
846 854 """
847 855 parents = ctx.parents()
848 856 if len(parents) > 1:
849 857 return parents
850 858 if repo.ui.debugflag:
851 859 return [parents[0], repo['null']]
852 860 if parents[0].rev() >= intrev(ctx.rev()) - 1:
853 861 return []
854 862 return parents
855 863
856 864 def expandpats(pats):
857 865 '''Expand bare globs when running on windows.
858 866 On posix we assume it already has already been done by sh.'''
859 867 if not util.expandglobs:
860 868 return list(pats)
861 869 ret = []
862 870 for kindpat in pats:
863 871 kind, pat = matchmod._patsplit(kindpat, None)
864 872 if kind is None:
865 873 try:
866 874 globbed = glob.glob(pat)
867 875 except re.error:
868 876 globbed = [pat]
869 877 if globbed:
870 878 ret.extend(globbed)
871 879 continue
872 880 ret.append(kindpat)
873 881 return ret
874 882
875 883 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
876 884 badfn=None):
877 885 '''Return a matcher and the patterns that were used.
878 886 The matcher will warn about bad matches, unless an alternate badfn callback
879 887 is provided.'''
880 888 if pats == ("",):
881 889 pats = []
882 890 if opts is None:
883 891 opts = {}
884 892 if not globbed and default == 'relpath':
885 893 pats = expandpats(pats or [])
886 894
887 895 def bad(f, msg):
888 896 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
889 897
890 898 if badfn is None:
891 899 badfn = bad
892 900
893 901 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
894 902 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
895 903
896 904 if m.always():
897 905 pats = []
898 906 return m, pats
899 907
900 908 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
901 909 badfn=None):
902 910 '''Return a matcher that will warn about bad matches.'''
903 911 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
904 912
905 913 def matchall(repo):
906 914 '''Return a matcher that will efficiently match everything.'''
907 915 return matchmod.always(repo.root, repo.getcwd())
908 916
909 917 def matchfiles(repo, files, badfn=None):
910 918 '''Return a matcher that will efficiently match exactly these files.'''
911 919 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
912 920
913 921 def origpath(ui, repo, filepath):
914 922 '''customize where .orig files are created
915 923
916 924 Fetch user defined path from config file: [ui] origbackuppath = <path>
917 925 Fall back to default (filepath) if not specified
918 926 '''
919 927 origbackuppath = ui.config('ui', 'origbackuppath', None)
920 928 if origbackuppath is None:
921 929 return filepath + ".orig"
922 930
923 931 filepathfromroot = os.path.relpath(filepath, start=repo.root)
924 932 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
925 933
926 934 origbackupdir = repo.vfs.dirname(fullorigpath)
927 935 if not repo.vfs.exists(origbackupdir):
928 936 ui.note(_('creating directory: %s\n') % origbackupdir)
929 937 util.makedirs(origbackupdir)
930 938
931 939 return fullorigpath + ".orig"
932 940
933 941 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
934 942 if opts is None:
935 943 opts = {}
936 944 m = matcher
937 945 if dry_run is None:
938 946 dry_run = opts.get('dry_run')
939 947 if similarity is None:
940 948 similarity = float(opts.get('similarity') or 0)
941 949
942 950 ret = 0
943 951 join = lambda f: os.path.join(prefix, f)
944 952
945 953 def matchessubrepo(matcher, subpath):
946 954 if matcher.exact(subpath):
947 955 return True
948 956 for f in matcher.files():
949 957 if f.startswith(subpath):
950 958 return True
951 959 return False
952 960
953 961 wctx = repo[None]
954 962 for subpath in sorted(wctx.substate):
955 963 if opts.get('subrepos') or matchessubrepo(m, subpath):
956 964 sub = wctx.sub(subpath)
957 965 try:
958 966 submatch = matchmod.subdirmatcher(subpath, m)
959 967 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
960 968 ret = 1
961 969 except error.LookupError:
962 970 repo.ui.status(_("skipping missing subrepository: %s\n")
963 971 % join(subpath))
964 972
965 973 rejected = []
966 974 def badfn(f, msg):
967 975 if f in m.files():
968 976 m.bad(f, msg)
969 977 rejected.append(f)
970 978
971 979 badmatch = matchmod.badmatch(m, badfn)
972 980 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
973 981 badmatch)
974 982
975 983 unknownset = set(unknown + forgotten)
976 984 toprint = unknownset.copy()
977 985 toprint.update(deleted)
978 986 for abs in sorted(toprint):
979 987 if repo.ui.verbose or not m.exact(abs):
980 988 if abs in unknownset:
981 989 status = _('adding %s\n') % m.uipath(abs)
982 990 else:
983 991 status = _('removing %s\n') % m.uipath(abs)
984 992 repo.ui.status(status)
985 993
986 994 renames = _findrenames(repo, m, added + unknown, removed + deleted,
987 995 similarity)
988 996
989 997 if not dry_run:
990 998 _markchanges(repo, unknown + forgotten, deleted, renames)
991 999
992 1000 for f in rejected:
993 1001 if f in m.files():
994 1002 return 1
995 1003 return ret
996 1004
997 1005 def marktouched(repo, files, similarity=0.0):
998 1006 '''Assert that files have somehow been operated upon. files are relative to
999 1007 the repo root.'''
1000 1008 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1001 1009 rejected = []
1002 1010
1003 1011 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1004 1012
1005 1013 if repo.ui.verbose:
1006 1014 unknownset = set(unknown + forgotten)
1007 1015 toprint = unknownset.copy()
1008 1016 toprint.update(deleted)
1009 1017 for abs in sorted(toprint):
1010 1018 if abs in unknownset:
1011 1019 status = _('adding %s\n') % abs
1012 1020 else:
1013 1021 status = _('removing %s\n') % abs
1014 1022 repo.ui.status(status)
1015 1023
1016 1024 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1017 1025 similarity)
1018 1026
1019 1027 _markchanges(repo, unknown + forgotten, deleted, renames)
1020 1028
1021 1029 for f in rejected:
1022 1030 if f in m.files():
1023 1031 return 1
1024 1032 return 0
1025 1033
1026 1034 def _interestingfiles(repo, matcher):
1027 1035 '''Walk dirstate with matcher, looking for files that addremove would care
1028 1036 about.
1029 1037
1030 1038 This is different from dirstate.status because it doesn't care about
1031 1039 whether files are modified or clean.'''
1032 1040 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1033 1041 audit_path = pathutil.pathauditor(repo.root)
1034 1042
1035 1043 ctx = repo[None]
1036 1044 dirstate = repo.dirstate
1037 1045 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
1038 1046 full=False)
1039 1047 for abs, st in walkresults.iteritems():
1040 1048 dstate = dirstate[abs]
1041 1049 if dstate == '?' and audit_path.check(abs):
1042 1050 unknown.append(abs)
1043 1051 elif dstate != 'r' and not st:
1044 1052 deleted.append(abs)
1045 1053 elif dstate == 'r' and st:
1046 1054 forgotten.append(abs)
1047 1055 # for finding renames
1048 1056 elif dstate == 'r' and not st:
1049 1057 removed.append(abs)
1050 1058 elif dstate == 'a':
1051 1059 added.append(abs)
1052 1060
1053 1061 return added, unknown, deleted, removed, forgotten
1054 1062
1055 1063 def _findrenames(repo, matcher, added, removed, similarity):
1056 1064 '''Find renames from removed files to added ones.'''
1057 1065 renames = {}
1058 1066 if similarity > 0:
1059 1067 for old, new, score in similar.findrenames(repo, added, removed,
1060 1068 similarity):
1061 1069 if (repo.ui.verbose or not matcher.exact(old)
1062 1070 or not matcher.exact(new)):
1063 1071 repo.ui.status(_('recording removal of %s as rename to %s '
1064 1072 '(%d%% similar)\n') %
1065 1073 (matcher.rel(old), matcher.rel(new),
1066 1074 score * 100))
1067 1075 renames[new] = old
1068 1076 return renames
1069 1077
1070 1078 def _markchanges(repo, unknown, deleted, renames):
1071 1079 '''Marks the files in unknown as added, the files in deleted as removed,
1072 1080 and the files in renames as copied.'''
1073 1081 wctx = repo[None]
1074 1082 with repo.wlock():
1075 1083 wctx.forget(deleted)
1076 1084 wctx.add(unknown)
1077 1085 for new, old in renames.iteritems():
1078 1086 wctx.copy(old, new)
1079 1087
1080 1088 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1081 1089 """Update the dirstate to reflect the intent of copying src to dst. For
1082 1090 different reasons it might not end with dst being marked as copied from src.
1083 1091 """
1084 1092 origsrc = repo.dirstate.copied(src) or src
1085 1093 if dst == origsrc: # copying back a copy?
1086 1094 if repo.dirstate[dst] not in 'mn' and not dryrun:
1087 1095 repo.dirstate.normallookup(dst)
1088 1096 else:
1089 1097 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1090 1098 if not ui.quiet:
1091 1099 ui.warn(_("%s has not been committed yet, so no copy "
1092 1100 "data will be stored for %s.\n")
1093 1101 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1094 1102 if repo.dirstate[dst] in '?r' and not dryrun:
1095 1103 wctx.add([dst])
1096 1104 elif not dryrun:
1097 1105 wctx.copy(origsrc, dst)
1098 1106
1099 1107 def readrequires(opener, supported):
1100 1108 '''Reads and parses .hg/requires and checks if all entries found
1101 1109 are in the list of supported features.'''
1102 1110 requirements = set(opener.read("requires").splitlines())
1103 1111 missings = []
1104 1112 for r in requirements:
1105 1113 if r not in supported:
1106 1114 if not r or not r[0].isalnum():
1107 1115 raise error.RequirementError(_(".hg/requires file is corrupt"))
1108 1116 missings.append(r)
1109 1117 missings.sort()
1110 1118 if missings:
1111 1119 raise error.RequirementError(
1112 1120 _("repository requires features unknown to this Mercurial: %s")
1113 1121 % " ".join(missings),
1114 1122 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1115 1123 " for more information"))
1116 1124 return requirements
1117 1125
1118 1126 def writerequires(opener, requirements):
1119 1127 with opener('requires', 'w') as fp:
1120 1128 for r in sorted(requirements):
1121 1129 fp.write("%s\n" % r)
1122 1130
1123 1131 class filecachesubentry(object):
1124 1132 def __init__(self, path, stat):
1125 1133 self.path = path
1126 1134 self.cachestat = None
1127 1135 self._cacheable = None
1128 1136
1129 1137 if stat:
1130 1138 self.cachestat = filecachesubentry.stat(self.path)
1131 1139
1132 1140 if self.cachestat:
1133 1141 self._cacheable = self.cachestat.cacheable()
1134 1142 else:
1135 1143 # None means we don't know yet
1136 1144 self._cacheable = None
1137 1145
1138 1146 def refresh(self):
1139 1147 if self.cacheable():
1140 1148 self.cachestat = filecachesubentry.stat(self.path)
1141 1149
1142 1150 def cacheable(self):
1143 1151 if self._cacheable is not None:
1144 1152 return self._cacheable
1145 1153
1146 1154 # we don't know yet, assume it is for now
1147 1155 return True
1148 1156
1149 1157 def changed(self):
1150 1158 # no point in going further if we can't cache it
1151 1159 if not self.cacheable():
1152 1160 return True
1153 1161
1154 1162 newstat = filecachesubentry.stat(self.path)
1155 1163
1156 1164 # we may not know if it's cacheable yet, check again now
1157 1165 if newstat and self._cacheable is None:
1158 1166 self._cacheable = newstat.cacheable()
1159 1167
1160 1168 # check again
1161 1169 if not self._cacheable:
1162 1170 return True
1163 1171
1164 1172 if self.cachestat != newstat:
1165 1173 self.cachestat = newstat
1166 1174 return True
1167 1175 else:
1168 1176 return False
1169 1177
1170 1178 @staticmethod
1171 1179 def stat(path):
1172 1180 try:
1173 1181 return util.cachestat(path)
1174 1182 except OSError as e:
1175 1183 if e.errno != errno.ENOENT:
1176 1184 raise
1177 1185
1178 1186 class filecacheentry(object):
1179 1187 def __init__(self, paths, stat=True):
1180 1188 self._entries = []
1181 1189 for path in paths:
1182 1190 self._entries.append(filecachesubentry(path, stat))
1183 1191
1184 1192 def changed(self):
1185 1193 '''true if any entry has changed'''
1186 1194 for entry in self._entries:
1187 1195 if entry.changed():
1188 1196 return True
1189 1197 return False
1190 1198
1191 1199 def refresh(self):
1192 1200 for entry in self._entries:
1193 1201 entry.refresh()
1194 1202
1195 1203 class filecache(object):
1196 1204 '''A property like decorator that tracks files under .hg/ for updates.
1197 1205
1198 1206 Records stat info when called in _filecache.
1199 1207
1200 1208 On subsequent calls, compares old stat info with new info, and recreates the
1201 1209 object when any of the files changes, updating the new stat info in
1202 1210 _filecache.
1203 1211
1204 1212 Mercurial either atomic renames or appends for files under .hg,
1205 1213 so to ensure the cache is reliable we need the filesystem to be able
1206 1214 to tell us if a file has been replaced. If it can't, we fallback to
1207 1215 recreating the object on every call (essentially the same behavior as
1208 1216 propertycache).
1209 1217
1210 1218 '''
1211 1219 def __init__(self, *paths):
1212 1220 self.paths = paths
1213 1221
1214 1222 def join(self, obj, fname):
1215 1223 """Used to compute the runtime path of a cached file.
1216 1224
1217 1225 Users should subclass filecache and provide their own version of this
1218 1226 function to call the appropriate join function on 'obj' (an instance
1219 1227 of the class that its member function was decorated).
1220 1228 """
1221 1229 return obj.join(fname)
1222 1230
1223 1231 def __call__(self, func):
1224 1232 self.func = func
1225 1233 self.name = func.__name__
1226 1234 return self
1227 1235
1228 1236 def __get__(self, obj, type=None):
1229 1237 # if accessed on the class, return the descriptor itself.
1230 1238 if obj is None:
1231 1239 return self
1232 1240 # do we need to check if the file changed?
1233 1241 if self.name in obj.__dict__:
1234 1242 assert self.name in obj._filecache, self.name
1235 1243 return obj.__dict__[self.name]
1236 1244
1237 1245 entry = obj._filecache.get(self.name)
1238 1246
1239 1247 if entry:
1240 1248 if entry.changed():
1241 1249 entry.obj = self.func(obj)
1242 1250 else:
1243 1251 paths = [self.join(obj, path) for path in self.paths]
1244 1252
1245 1253 # We stat -before- creating the object so our cache doesn't lie if
1246 1254 # a writer modified between the time we read and stat
1247 1255 entry = filecacheentry(paths, True)
1248 1256 entry.obj = self.func(obj)
1249 1257
1250 1258 obj._filecache[self.name] = entry
1251 1259
1252 1260 obj.__dict__[self.name] = entry.obj
1253 1261 return entry.obj
1254 1262
1255 1263 def __set__(self, obj, value):
1256 1264 if self.name not in obj._filecache:
1257 1265 # we add an entry for the missing value because X in __dict__
1258 1266 # implies X in _filecache
1259 1267 paths = [self.join(obj, path) for path in self.paths]
1260 1268 ce = filecacheentry(paths, False)
1261 1269 obj._filecache[self.name] = ce
1262 1270 else:
1263 1271 ce = obj._filecache[self.name]
1264 1272
1265 1273 ce.obj = value # update cached copy
1266 1274 obj.__dict__[self.name] = value # update copy returned by obj.x
1267 1275
1268 1276 def __delete__(self, obj):
1269 1277 try:
1270 1278 del obj.__dict__[self.name]
1271 1279 except KeyError:
1272 1280 raise AttributeError(self.name)
1273 1281
1274 1282 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1275 1283 if lock is None:
1276 1284 raise error.LockInheritanceContractViolation(
1277 1285 'lock can only be inherited while held')
1278 1286 if environ is None:
1279 1287 environ = {}
1280 1288 with lock.inherit() as locker:
1281 1289 environ[envvar] = locker
1282 1290 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1283 1291
1284 1292 def wlocksub(repo, cmd, *args, **kwargs):
1285 1293 """run cmd as a subprocess that allows inheriting repo's wlock
1286 1294
1287 1295 This can only be called while the wlock is held. This takes all the
1288 1296 arguments that ui.system does, and returns the exit code of the
1289 1297 subprocess."""
1290 1298 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1291 1299 **kwargs)
1292 1300
1293 1301 def gdinitconfig(ui):
1294 1302 """helper function to know if a repo should be created as general delta
1295 1303 """
1296 1304 # experimental config: format.generaldelta
1297 1305 return (ui.configbool('format', 'generaldelta', False)
1298 1306 or ui.configbool('format', 'usegeneraldelta', True))
1299 1307
1300 1308 def gddeltaconfig(ui):
1301 1309 """helper function to know if incoming delta should be optimised
1302 1310 """
1303 1311 # experimental config: format.generaldelta
1304 1312 return ui.configbool('format', 'generaldelta', False)
1305 1313
1306 1314 class delayclosedfile(object):
1307 1315 """Proxy for a file object whose close is delayed.
1308 1316
1309 1317 Do not instantiate outside of the vfs layer.
1310 1318 """
1311 1319
1312 1320 def __init__(self, fh, closer):
1313 1321 object.__setattr__(self, '_origfh', fh)
1314 1322 object.__setattr__(self, '_closer', closer)
1315 1323
1316 1324 def __getattr__(self, attr):
1317 1325 return getattr(self._origfh, attr)
1318 1326
1319 1327 def __setattr__(self, attr, value):
1320 1328 return setattr(self._origfh, attr, value)
1321 1329
1322 1330 def __delattr__(self, attr):
1323 1331 return delattr(self._origfh, attr)
1324 1332
1325 1333 def __enter__(self):
1326 1334 return self._origfh.__enter__()
1327 1335
1328 1336 def __exit__(self, exc_type, exc_value, exc_tb):
1329 1337 self._closer.close(self._origfh)
1330 1338
1331 1339 def close(self):
1332 1340 self._closer.close(self._origfh)
1333 1341
1334 1342 class backgroundfilecloser(object):
1335 1343 """Coordinates background closing of file handles on multiple threads."""
1336 1344 def __init__(self, ui, expectedcount=-1):
1337 1345 self._running = False
1338 1346 self._entered = False
1339 1347 self._threads = []
1340 1348 self._threadexception = None
1341 1349
1342 1350 # Only Windows/NTFS has slow file closing. So only enable by default
1343 1351 # on that platform. But allow to be enabled elsewhere for testing.
1344 1352 defaultenabled = os.name == 'nt'
1345 1353 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
1346 1354
1347 1355 if not enabled:
1348 1356 return
1349 1357
1350 1358 # There is overhead to starting and stopping the background threads.
1351 1359 # Don't do background processing unless the file count is large enough
1352 1360 # to justify it.
1353 1361 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
1354 1362 2048)
1355 1363 # FUTURE dynamically start background threads after minfilecount closes.
1356 1364 # (We don't currently have any callers that don't know their file count)
1357 1365 if expectedcount > 0 and expectedcount < minfilecount:
1358 1366 return
1359 1367
1360 1368 # Windows defaults to a limit of 512 open files. A buffer of 128
1361 1369 # should give us enough headway.
1362 1370 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
1363 1371 threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
1364 1372
1365 1373 ui.debug('starting %d threads for background file closing\n' %
1366 1374 threadcount)
1367 1375
1368 1376 self._queue = util.queue(maxsize=maxqueue)
1369 1377 self._running = True
1370 1378
1371 1379 for i in range(threadcount):
1372 1380 t = threading.Thread(target=self._worker, name='backgroundcloser')
1373 1381 self._threads.append(t)
1374 1382 t.start()
1375 1383
1376 1384 def __enter__(self):
1377 1385 self._entered = True
1378 1386 return self
1379 1387
1380 1388 def __exit__(self, exc_type, exc_value, exc_tb):
1381 1389 self._running = False
1382 1390
1383 1391 # Wait for threads to finish closing so open files don't linger for
1384 1392 # longer than lifetime of context manager.
1385 1393 for t in self._threads:
1386 1394 t.join()
1387 1395
1388 1396 def _worker(self):
1389 1397 """Main routine for worker thread."""
1390 1398 while True:
1391 1399 try:
1392 1400 fh = self._queue.get(block=True, timeout=0.100)
1393 1401 # Need to catch or the thread will terminate and
1394 1402 # we could orphan file descriptors.
1395 1403 try:
1396 1404 fh.close()
1397 1405 except Exception as e:
1398 1406 # Stash so can re-raise from main thread later.
1399 1407 self._threadexception = e
1400 1408 except util.empty:
1401 1409 if not self._running:
1402 1410 break
1403 1411
1404 1412 def close(self, fh):
1405 1413 """Schedule a file for closing."""
1406 1414 if not self._entered:
1407 1415 raise error.Abort(_('can only call close() when context manager '
1408 1416 'active'))
1409 1417
1410 1418 # If a background thread encountered an exception, raise now so we fail
1411 1419 # fast. Otherwise we may potentially go on for minutes until the error
1412 1420 # is acted on.
1413 1421 if self._threadexception:
1414 1422 e = self._threadexception
1415 1423 self._threadexception = None
1416 1424 raise e
1417 1425
1418 1426 # If we're not actively running, close synchronously.
1419 1427 if not self._running:
1420 1428 fh.close()
1421 1429 return
1422 1430
1423 1431 self._queue.put(fh, block=True, timeout=None)
General Comments 0
You need to be logged in to leave comments. Login now