##// END OF EJS Templates
vfs: use propertycache for open...
Pierre-Yves David -
r29718:2dd8c225 default
parent child Browse files
Show More
@@ -1,1431 +1,1429 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import contextlib
11 11 import errno
12 12 import glob
13 13 import hashlib
14 14 import os
15 15 import re
16 16 import shutil
17 17 import stat
18 18 import tempfile
19 19 import threading
20 20
21 21 from .i18n import _
22 22 from .node import wdirrev
23 23 from . import (
24 24 encoding,
25 25 error,
26 26 match as matchmod,
27 27 osutil,
28 28 pathutil,
29 29 phases,
30 30 revset,
31 31 similar,
32 32 util,
33 33 )
34 34
35 35 if os.name == 'nt':
36 36 from . import scmwindows as scmplatform
37 37 else:
38 38 from . import scmposix as scmplatform
39 39
40 40 systemrcpath = scmplatform.systemrcpath
41 41 userrcpath = scmplatform.userrcpath
42 42
43 43 class status(tuple):
44 44 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
45 45 and 'ignored' properties are only relevant to the working copy.
46 46 '''
47 47
48 48 __slots__ = ()
49 49
50 50 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
51 51 clean):
52 52 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
53 53 ignored, clean))
54 54
55 55 @property
56 56 def modified(self):
57 57 '''files that have been modified'''
58 58 return self[0]
59 59
60 60 @property
61 61 def added(self):
62 62 '''files that have been added'''
63 63 return self[1]
64 64
65 65 @property
66 66 def removed(self):
67 67 '''files that have been removed'''
68 68 return self[2]
69 69
70 70 @property
71 71 def deleted(self):
72 72 '''files that are in the dirstate, but have been deleted from the
73 73 working copy (aka "missing")
74 74 '''
75 75 return self[3]
76 76
77 77 @property
78 78 def unknown(self):
79 79 '''files not in the dirstate that are not ignored'''
80 80 return self[4]
81 81
82 82 @property
83 83 def ignored(self):
84 84 '''files not in the dirstate that are ignored (by _dirignore())'''
85 85 return self[5]
86 86
87 87 @property
88 88 def clean(self):
89 89 '''files that have not been modified'''
90 90 return self[6]
91 91
92 92 def __repr__(self, *args, **kwargs):
93 93 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
94 94 'unknown=%r, ignored=%r, clean=%r>') % self)
95 95
96 96 def itersubrepos(ctx1, ctx2):
97 97 """find subrepos in ctx1 or ctx2"""
98 98 # Create a (subpath, ctx) mapping where we prefer subpaths from
99 99 # ctx1. The subpaths from ctx2 are important when the .hgsub file
100 100 # has been modified (in ctx2) but not yet committed (in ctx1).
101 101 subpaths = dict.fromkeys(ctx2.substate, ctx2)
102 102 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
103 103
104 104 missing = set()
105 105
106 106 for subpath in ctx2.substate:
107 107 if subpath not in ctx1.substate:
108 108 del subpaths[subpath]
109 109 missing.add(subpath)
110 110
111 111 for subpath, ctx in sorted(subpaths.iteritems()):
112 112 yield subpath, ctx.sub(subpath)
113 113
114 114 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
115 115 # status and diff will have an accurate result when it does
116 116 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
117 117 # against itself.
118 118 for subpath in missing:
119 119 yield subpath, ctx2.nullsub(subpath, ctx1)
120 120
121 121 def nochangesfound(ui, repo, excluded=None):
122 122 '''Report no changes for push/pull, excluded is None or a list of
123 123 nodes excluded from the push/pull.
124 124 '''
125 125 secretlist = []
126 126 if excluded:
127 127 for n in excluded:
128 128 if n not in repo:
129 129 # discovery should not have included the filtered revision,
130 130 # we have to explicitly exclude it until discovery is cleanup.
131 131 continue
132 132 ctx = repo[n]
133 133 if ctx.phase() >= phases.secret and not ctx.extinct():
134 134 secretlist.append(n)
135 135
136 136 if secretlist:
137 137 ui.status(_("no changes found (ignored %d secret changesets)\n")
138 138 % len(secretlist))
139 139 else:
140 140 ui.status(_("no changes found\n"))
141 141
142 142 def checknewlabel(repo, lbl, kind):
143 143 # Do not use the "kind" parameter in ui output.
144 144 # It makes strings difficult to translate.
145 145 if lbl in ['tip', '.', 'null']:
146 146 raise error.Abort(_("the name '%s' is reserved") % lbl)
147 147 for c in (':', '\0', '\n', '\r'):
148 148 if c in lbl:
149 149 raise error.Abort(_("%r cannot be used in a name") % c)
150 150 try:
151 151 int(lbl)
152 152 raise error.Abort(_("cannot use an integer as a name"))
153 153 except ValueError:
154 154 pass
155 155
156 156 def checkfilename(f):
157 157 '''Check that the filename f is an acceptable filename for a tracked file'''
158 158 if '\r' in f or '\n' in f:
159 159 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
160 160
161 161 def checkportable(ui, f):
162 162 '''Check if filename f is portable and warn or abort depending on config'''
163 163 checkfilename(f)
164 164 abort, warn = checkportabilityalert(ui)
165 165 if abort or warn:
166 166 msg = util.checkwinfilename(f)
167 167 if msg:
168 168 msg = "%s: %r" % (msg, f)
169 169 if abort:
170 170 raise error.Abort(msg)
171 171 ui.warn(_("warning: %s\n") % msg)
172 172
173 173 def checkportabilityalert(ui):
174 174 '''check if the user's config requests nothing, a warning, or abort for
175 175 non-portable filenames'''
176 176 val = ui.config('ui', 'portablefilenames', 'warn')
177 177 lval = val.lower()
178 178 bval = util.parsebool(val)
179 179 abort = os.name == 'nt' or lval == 'abort'
180 180 warn = bval or lval == 'warn'
181 181 if bval is None and not (warn or abort or lval == 'ignore'):
182 182 raise error.ConfigError(
183 183 _("ui.portablefilenames value is invalid ('%s')") % val)
184 184 return abort, warn
185 185
186 186 class casecollisionauditor(object):
187 187 def __init__(self, ui, abort, dirstate):
188 188 self._ui = ui
189 189 self._abort = abort
190 190 allfiles = '\0'.join(dirstate._map)
191 191 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
192 192 self._dirstate = dirstate
193 193 # The purpose of _newfiles is so that we don't complain about
194 194 # case collisions if someone were to call this object with the
195 195 # same filename twice.
196 196 self._newfiles = set()
197 197
198 198 def __call__(self, f):
199 199 if f in self._newfiles:
200 200 return
201 201 fl = encoding.lower(f)
202 202 if fl in self._loweredfiles and f not in self._dirstate:
203 203 msg = _('possible case-folding collision for %s') % f
204 204 if self._abort:
205 205 raise error.Abort(msg)
206 206 self._ui.warn(_("warning: %s\n") % msg)
207 207 self._loweredfiles.add(fl)
208 208 self._newfiles.add(f)
209 209
210 210 def filteredhash(repo, maxrev):
211 211 """build hash of filtered revisions in the current repoview.
212 212
213 213 Multiple caches perform up-to-date validation by checking that the
214 214 tiprev and tipnode stored in the cache file match the current repository.
215 215 However, this is not sufficient for validating repoviews because the set
216 216 of revisions in the view may change without the repository tiprev and
217 217 tipnode changing.
218 218
219 219 This function hashes all the revs filtered from the view and returns
220 220 that SHA-1 digest.
221 221 """
222 222 cl = repo.changelog
223 223 if not cl.filteredrevs:
224 224 return None
225 225 key = None
226 226 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
227 227 if revs:
228 228 s = hashlib.sha1()
229 229 for rev in revs:
230 230 s.update('%s;' % rev)
231 231 key = s.digest()
232 232 return key
233 233
234 234 class abstractvfs(object):
235 235 """Abstract base class; cannot be instantiated"""
236 236
237 237 def __init__(self, *args, **kwargs):
238 238 '''Prevent instantiation; don't call this from subclasses.'''
239 239 raise NotImplementedError('attempted instantiating ' + str(type(self)))
240 240
241 241 def tryread(self, path):
242 242 '''gracefully return an empty string for missing files'''
243 243 try:
244 244 return self.read(path)
245 245 except IOError as inst:
246 246 if inst.errno != errno.ENOENT:
247 247 raise
248 248 return ""
249 249
250 250 def tryreadlines(self, path, mode='rb'):
251 251 '''gracefully return an empty array for missing files'''
252 252 try:
253 253 return self.readlines(path, mode=mode)
254 254 except IOError as inst:
255 255 if inst.errno != errno.ENOENT:
256 256 raise
257 257 return []
258 258
259 def open(self, path, mode="r", text=False, atomictemp=False,
260 notindexed=False, backgroundclose=False):
259 @util.propertycache
260 def open(self):
261 261 '''Open ``path`` file, which is relative to vfs root.
262 262
263 263 Newly created directories are marked as "not to be indexed by
264 264 the content indexing service", if ``notindexed`` is specified
265 265 for "write" mode access.
266 266 '''
267 self.open = self.__call__
268 return self.__call__(path, mode, text, atomictemp, notindexed,
269 backgroundclose=backgroundclose)
267 return self.__call__
270 268
271 269 def read(self, path):
272 270 with self(path, 'rb') as fp:
273 271 return fp.read()
274 272
275 273 def readlines(self, path, mode='rb'):
276 274 with self(path, mode=mode) as fp:
277 275 return fp.readlines()
278 276
279 277 def write(self, path, data, backgroundclose=False):
280 278 with self(path, 'wb', backgroundclose=backgroundclose) as fp:
281 279 return fp.write(data)
282 280
283 281 def writelines(self, path, data, mode='wb', notindexed=False):
284 282 with self(path, mode=mode, notindexed=notindexed) as fp:
285 283 return fp.writelines(data)
286 284
287 285 def append(self, path, data):
288 286 with self(path, 'ab') as fp:
289 287 return fp.write(data)
290 288
291 289 def basename(self, path):
292 290 """return base element of a path (as os.path.basename would do)
293 291
294 292 This exists to allow handling of strange encoding if needed."""
295 293 return os.path.basename(path)
296 294
297 295 def chmod(self, path, mode):
298 296 return os.chmod(self.join(path), mode)
299 297
300 298 def dirname(self, path):
301 299 """return dirname element of a path (as os.path.dirname would do)
302 300
303 301 This exists to allow handling of strange encoding if needed."""
304 302 return os.path.dirname(path)
305 303
306 304 def exists(self, path=None):
307 305 return os.path.exists(self.join(path))
308 306
309 307 def fstat(self, fp):
310 308 return util.fstat(fp)
311 309
312 310 def isdir(self, path=None):
313 311 return os.path.isdir(self.join(path))
314 312
315 313 def isfile(self, path=None):
316 314 return os.path.isfile(self.join(path))
317 315
318 316 def islink(self, path=None):
319 317 return os.path.islink(self.join(path))
320 318
321 319 def isfileorlink(self, path=None):
322 320 '''return whether path is a regular file or a symlink
323 321
324 322 Unlike isfile, this doesn't follow symlinks.'''
325 323 try:
326 324 st = self.lstat(path)
327 325 except OSError:
328 326 return False
329 327 mode = st.st_mode
330 328 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
331 329
332 330 def reljoin(self, *paths):
333 331 """join various elements of a path together (as os.path.join would do)
334 332
335 333 The vfs base is not injected so that path stay relative. This exists
336 334 to allow handling of strange encoding if needed."""
337 335 return os.path.join(*paths)
338 336
339 337 def split(self, path):
340 338 """split top-most element of a path (as os.path.split would do)
341 339
342 340 This exists to allow handling of strange encoding if needed."""
343 341 return os.path.split(path)
344 342
345 343 def lexists(self, path=None):
346 344 return os.path.lexists(self.join(path))
347 345
348 346 def lstat(self, path=None):
349 347 return os.lstat(self.join(path))
350 348
351 349 def listdir(self, path=None):
352 350 return os.listdir(self.join(path))
353 351
354 352 def makedir(self, path=None, notindexed=True):
355 353 return util.makedir(self.join(path), notindexed)
356 354
357 355 def makedirs(self, path=None, mode=None):
358 356 return util.makedirs(self.join(path), mode)
359 357
360 358 def makelock(self, info, path):
361 359 return util.makelock(info, self.join(path))
362 360
363 361 def mkdir(self, path=None):
364 362 return os.mkdir(self.join(path))
365 363
366 364 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
367 365 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
368 366 dir=self.join(dir), text=text)
369 367 dname, fname = util.split(name)
370 368 if dir:
371 369 return fd, os.path.join(dir, fname)
372 370 else:
373 371 return fd, fname
374 372
375 373 def readdir(self, path=None, stat=None, skip=None):
376 374 return osutil.listdir(self.join(path), stat, skip)
377 375
378 376 def readlock(self, path):
379 377 return util.readlock(self.join(path))
380 378
381 379 def rename(self, src, dst, checkambig=False):
382 380 """Rename from src to dst
383 381
384 382 checkambig argument is used with util.filestat, and is useful
385 383 only if destination file is guarded by any lock
386 384 (e.g. repo.lock or repo.wlock).
387 385 """
388 386 dstpath = self.join(dst)
389 387 oldstat = checkambig and util.filestat(dstpath)
390 388 if oldstat and oldstat.stat:
391 389 ret = util.rename(self.join(src), dstpath)
392 390 newstat = util.filestat(dstpath)
393 391 if newstat.isambig(oldstat):
394 392 # stat of renamed file is ambiguous to original one
395 393 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
396 394 os.utime(dstpath, (advanced, advanced))
397 395 return ret
398 396 return util.rename(self.join(src), dstpath)
399 397
400 398 def readlink(self, path):
401 399 return os.readlink(self.join(path))
402 400
403 401 def removedirs(self, path=None):
404 402 """Remove a leaf directory and all empty intermediate ones
405 403 """
406 404 return util.removedirs(self.join(path))
407 405
408 406 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
409 407 """Remove a directory tree recursively
410 408
411 409 If ``forcibly``, this tries to remove READ-ONLY files, too.
412 410 """
413 411 if forcibly:
414 412 def onerror(function, path, excinfo):
415 413 if function is not os.remove:
416 414 raise
417 415 # read-only files cannot be unlinked under Windows
418 416 s = os.stat(path)
419 417 if (s.st_mode & stat.S_IWRITE) != 0:
420 418 raise
421 419 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
422 420 os.remove(path)
423 421 else:
424 422 onerror = None
425 423 return shutil.rmtree(self.join(path),
426 424 ignore_errors=ignore_errors, onerror=onerror)
427 425
428 426 def setflags(self, path, l, x):
429 427 return util.setflags(self.join(path), l, x)
430 428
431 429 def stat(self, path=None):
432 430 return os.stat(self.join(path))
433 431
434 432 def unlink(self, path=None):
435 433 return util.unlink(self.join(path))
436 434
437 435 def unlinkpath(self, path=None, ignoremissing=False):
438 436 return util.unlinkpath(self.join(path), ignoremissing)
439 437
440 438 def utime(self, path=None, t=None):
441 439 return os.utime(self.join(path), t)
442 440
443 441 def walk(self, path=None, onerror=None):
444 442 """Yield (dirpath, dirs, files) tuple for each directories under path
445 443
446 444 ``dirpath`` is relative one from the root of this vfs. This
447 445 uses ``os.sep`` as path separator, even you specify POSIX
448 446 style ``path``.
449 447
450 448 "The root of this vfs" is represented as empty ``dirpath``.
451 449 """
452 450 root = os.path.normpath(self.join(None))
453 451 # when dirpath == root, dirpath[prefixlen:] becomes empty
454 452 # because len(dirpath) < prefixlen.
455 453 prefixlen = len(pathutil.normasprefix(root))
456 454 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
457 455 yield (dirpath[prefixlen:], dirs, files)
458 456
459 457 @contextlib.contextmanager
460 458 def backgroundclosing(self, ui, expectedcount=-1):
461 459 """Allow files to be closed asynchronously.
462 460
463 461 When this context manager is active, ``backgroundclose`` can be passed
464 462 to ``__call__``/``open`` to result in the file possibly being closed
465 463 asynchronously, on a background thread.
466 464 """
467 465 # This is an arbitrary restriction and could be changed if we ever
468 466 # have a use case.
469 467 vfs = getattr(self, 'vfs', self)
470 468 if getattr(vfs, '_backgroundfilecloser', None):
471 469 raise error.Abort(
472 470 _('can only have 1 active background file closer'))
473 471
474 472 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
475 473 try:
476 474 vfs._backgroundfilecloser = bfc
477 475 yield bfc
478 476 finally:
479 477 vfs._backgroundfilecloser = None
480 478
481 479 class vfs(abstractvfs):
482 480 '''Operate files relative to a base directory
483 481
484 482 This class is used to hide the details of COW semantics and
485 483 remote file access from higher level code.
486 484 '''
487 485 def __init__(self, base, audit=True, expandpath=False, realpath=False):
488 486 if expandpath:
489 487 base = util.expandpath(base)
490 488 if realpath:
491 489 base = os.path.realpath(base)
492 490 self.base = base
493 491 self.mustaudit = audit
494 492 self.createmode = None
495 493 self._trustnlink = None
496 494
497 495 @property
498 496 def mustaudit(self):
499 497 return self._audit
500 498
501 499 @mustaudit.setter
502 500 def mustaudit(self, onoff):
503 501 self._audit = onoff
504 502 if onoff:
505 503 self.audit = pathutil.pathauditor(self.base)
506 504 else:
507 505 self.audit = util.always
508 506
509 507 @util.propertycache
510 508 def _cansymlink(self):
511 509 return util.checklink(self.base)
512 510
513 511 @util.propertycache
514 512 def _chmod(self):
515 513 return util.checkexec(self.base)
516 514
517 515 def _fixfilemode(self, name):
518 516 if self.createmode is None or not self._chmod:
519 517 return
520 518 os.chmod(name, self.createmode & 0o666)
521 519
522 520 def __call__(self, path, mode="r", text=False, atomictemp=False,
523 521 notindexed=False, backgroundclose=False, checkambig=False):
524 522 '''Open ``path`` file, which is relative to vfs root.
525 523
526 524 Newly created directories are marked as "not to be indexed by
527 525 the content indexing service", if ``notindexed`` is specified
528 526 for "write" mode access.
529 527
530 528 If ``backgroundclose`` is passed, the file may be closed asynchronously.
531 529 It can only be used if the ``self.backgroundclosing()`` context manager
532 530 is active. This should only be specified if the following criteria hold:
533 531
534 532 1. There is a potential for writing thousands of files. Unless you
535 533 are writing thousands of files, the performance benefits of
536 534 asynchronously closing files is not realized.
537 535 2. Files are opened exactly once for the ``backgroundclosing``
538 536 active duration and are therefore free of race conditions between
539 537 closing a file on a background thread and reopening it. (If the
540 538 file were opened multiple times, there could be unflushed data
541 539 because the original file handle hasn't been flushed/closed yet.)
542 540
543 541 ``checkambig`` argument is passed to atomictemplfile (valid
544 542 only for writing), and is useful only if target file is
545 543 guarded by any lock (e.g. repo.lock or repo.wlock).
546 544 '''
547 545 if self._audit:
548 546 r = util.checkosfilename(path)
549 547 if r:
550 548 raise error.Abort("%s: %r" % (r, path))
551 549 self.audit(path)
552 550 f = self.join(path)
553 551
554 552 if not text and "b" not in mode:
555 553 mode += "b" # for that other OS
556 554
557 555 nlink = -1
558 556 if mode not in ('r', 'rb'):
559 557 dirname, basename = util.split(f)
560 558 # If basename is empty, then the path is malformed because it points
561 559 # to a directory. Let the posixfile() call below raise IOError.
562 560 if basename:
563 561 if atomictemp:
564 562 util.makedirs(dirname, self.createmode, notindexed)
565 563 return util.atomictempfile(f, mode, self.createmode,
566 564 checkambig=checkambig)
567 565 try:
568 566 if 'w' in mode:
569 567 util.unlink(f)
570 568 nlink = 0
571 569 else:
572 570 # nlinks() may behave differently for files on Windows
573 571 # shares if the file is open.
574 572 with util.posixfile(f):
575 573 nlink = util.nlinks(f)
576 574 if nlink < 1:
577 575 nlink = 2 # force mktempcopy (issue1922)
578 576 except (OSError, IOError) as e:
579 577 if e.errno != errno.ENOENT:
580 578 raise
581 579 nlink = 0
582 580 util.makedirs(dirname, self.createmode, notindexed)
583 581 if nlink > 0:
584 582 if self._trustnlink is None:
585 583 self._trustnlink = nlink > 1 or util.checknlink(f)
586 584 if nlink > 1 or not self._trustnlink:
587 585 util.rename(util.mktempcopy(f), f)
588 586 fp = util.posixfile(f, mode)
589 587 if nlink == 0:
590 588 self._fixfilemode(f)
591 589
592 590 if backgroundclose:
593 591 if not self._backgroundfilecloser:
594 592 raise error.Abort(_('backgroundclose can only be used when a '
595 593 'backgroundclosing context manager is active')
596 594 )
597 595
598 596 fp = delayclosedfile(fp, self._backgroundfilecloser)
599 597
600 598 return fp
601 599
602 600 def symlink(self, src, dst):
603 601 self.audit(dst)
604 602 linkname = self.join(dst)
605 603 try:
606 604 os.unlink(linkname)
607 605 except OSError:
608 606 pass
609 607
610 608 util.makedirs(os.path.dirname(linkname), self.createmode)
611 609
612 610 if self._cansymlink:
613 611 try:
614 612 os.symlink(src, linkname)
615 613 except OSError as err:
616 614 raise OSError(err.errno, _('could not symlink to %r: %s') %
617 615 (src, err.strerror), linkname)
618 616 else:
619 617 self.write(dst, src)
620 618
621 619 def join(self, path, *insidef):
622 620 if path:
623 621 return os.path.join(self.base, path, *insidef)
624 622 else:
625 623 return self.base
626 624
627 625 opener = vfs
628 626
629 627 class auditvfs(object):
630 628 def __init__(self, vfs):
631 629 self.vfs = vfs
632 630
633 631 @property
634 632 def mustaudit(self):
635 633 return self.vfs.mustaudit
636 634
637 635 @mustaudit.setter
638 636 def mustaudit(self, onoff):
639 637 self.vfs.mustaudit = onoff
640 638
641 639 @property
642 640 def options(self):
643 641 return self.vfs.options
644 642
645 643 @options.setter
646 644 def options(self, value):
647 645 self.vfs.options = value
648 646
649 647 class filtervfs(abstractvfs, auditvfs):
650 648 '''Wrapper vfs for filtering filenames with a function.'''
651 649
652 650 def __init__(self, vfs, filter):
653 651 auditvfs.__init__(self, vfs)
654 652 self._filter = filter
655 653
656 654 def __call__(self, path, *args, **kwargs):
657 655 return self.vfs(self._filter(path), *args, **kwargs)
658 656
659 657 def join(self, path, *insidef):
660 658 if path:
661 659 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
662 660 else:
663 661 return self.vfs.join(path)
664 662
665 663 filteropener = filtervfs
666 664
667 665 class readonlyvfs(abstractvfs, auditvfs):
668 666 '''Wrapper vfs preventing any writing.'''
669 667
670 668 def __init__(self, vfs):
671 669 auditvfs.__init__(self, vfs)
672 670
673 671 def __call__(self, path, mode='r', *args, **kw):
674 672 if mode not in ('r', 'rb'):
675 673 raise error.Abort(_('this vfs is read only'))
676 674 return self.vfs(path, mode, *args, **kw)
677 675
678 676 def join(self, path, *insidef):
679 677 return self.vfs.join(path, *insidef)
680 678
681 679 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
682 680 '''yield every hg repository under path, always recursively.
683 681 The recurse flag will only control recursion into repo working dirs'''
684 682 def errhandler(err):
685 683 if err.filename == path:
686 684 raise err
687 685 samestat = getattr(os.path, 'samestat', None)
688 686 if followsym and samestat is not None:
689 687 def adddir(dirlst, dirname):
690 688 match = False
691 689 dirstat = os.stat(dirname)
692 690 for lstdirstat in dirlst:
693 691 if samestat(dirstat, lstdirstat):
694 692 match = True
695 693 break
696 694 if not match:
697 695 dirlst.append(dirstat)
698 696 return not match
699 697 else:
700 698 followsym = False
701 699
702 700 if (seen_dirs is None) and followsym:
703 701 seen_dirs = []
704 702 adddir(seen_dirs, path)
705 703 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
706 704 dirs.sort()
707 705 if '.hg' in dirs:
708 706 yield root # found a repository
709 707 qroot = os.path.join(root, '.hg', 'patches')
710 708 if os.path.isdir(os.path.join(qroot, '.hg')):
711 709 yield qroot # we have a patch queue repo here
712 710 if recurse:
713 711 # avoid recursing inside the .hg directory
714 712 dirs.remove('.hg')
715 713 else:
716 714 dirs[:] = [] # don't descend further
717 715 elif followsym:
718 716 newdirs = []
719 717 for d in dirs:
720 718 fname = os.path.join(root, d)
721 719 if adddir(seen_dirs, fname):
722 720 if os.path.islink(fname):
723 721 for hgname in walkrepos(fname, True, seen_dirs):
724 722 yield hgname
725 723 else:
726 724 newdirs.append(d)
727 725 dirs[:] = newdirs
728 726
729 727 def osrcpath():
730 728 '''return default os-specific hgrc search path'''
731 729 path = []
732 730 defaultpath = os.path.join(util.datapath, 'default.d')
733 731 if os.path.isdir(defaultpath):
734 732 for f, kind in osutil.listdir(defaultpath):
735 733 if f.endswith('.rc'):
736 734 path.append(os.path.join(defaultpath, f))
737 735 path.extend(systemrcpath())
738 736 path.extend(userrcpath())
739 737 path = [os.path.normpath(f) for f in path]
740 738 return path
741 739
742 740 _rcpath = None
743 741
744 742 def rcpath():
745 743 '''return hgrc search path. if env var HGRCPATH is set, use it.
746 744 for each item in path, if directory, use files ending in .rc,
747 745 else use item.
748 746 make HGRCPATH empty to only look in .hg/hgrc of current repo.
749 747 if no HGRCPATH, use default os-specific path.'''
750 748 global _rcpath
751 749 if _rcpath is None:
752 750 if 'HGRCPATH' in os.environ:
753 751 _rcpath = []
754 752 for p in os.environ['HGRCPATH'].split(os.pathsep):
755 753 if not p:
756 754 continue
757 755 p = util.expandpath(p)
758 756 if os.path.isdir(p):
759 757 for f, kind in osutil.listdir(p):
760 758 if f.endswith('.rc'):
761 759 _rcpath.append(os.path.join(p, f))
762 760 else:
763 761 _rcpath.append(p)
764 762 else:
765 763 _rcpath = osrcpath()
766 764 return _rcpath
767 765
768 766 def intrev(rev):
769 767 """Return integer for a given revision that can be used in comparison or
770 768 arithmetic operation"""
771 769 if rev is None:
772 770 return wdirrev
773 771 return rev
774 772
775 773 def revsingle(repo, revspec, default='.'):
776 774 if not revspec and revspec != 0:
777 775 return repo[default]
778 776
779 777 l = revrange(repo, [revspec])
780 778 if not l:
781 779 raise error.Abort(_('empty revision set'))
782 780 return repo[l.last()]
783 781
784 782 def _pairspec(revspec):
785 783 tree = revset.parse(revspec)
786 784 tree = revset.optimize(tree) # fix up "x^:y" -> "(x^):y"
787 785 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
788 786
789 787 def revpair(repo, revs):
790 788 if not revs:
791 789 return repo.dirstate.p1(), None
792 790
793 791 l = revrange(repo, revs)
794 792
795 793 if not l:
796 794 first = second = None
797 795 elif l.isascending():
798 796 first = l.min()
799 797 second = l.max()
800 798 elif l.isdescending():
801 799 first = l.max()
802 800 second = l.min()
803 801 else:
804 802 first = l.first()
805 803 second = l.last()
806 804
807 805 if first is None:
808 806 raise error.Abort(_('empty revision range'))
809 807 if (first == second and len(revs) >= 2
810 808 and not all(revrange(repo, [r]) for r in revs)):
811 809 raise error.Abort(_('empty revision on one side of range'))
812 810
813 811 # if top-level is range expression, the result must always be a pair
814 812 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
815 813 return repo.lookup(first), None
816 814
817 815 return repo.lookup(first), repo.lookup(second)
818 816
819 817 def revrange(repo, specs):
820 818 """Execute 1 to many revsets and return the union.
821 819
822 820 This is the preferred mechanism for executing revsets using user-specified
823 821 config options, such as revset aliases.
824 822
825 823 The revsets specified by ``specs`` will be executed via a chained ``OR``
826 824 expression. If ``specs`` is empty, an empty result is returned.
827 825
828 826 ``specs`` can contain integers, in which case they are assumed to be
829 827 revision numbers.
830 828
831 829 It is assumed the revsets are already formatted. If you have arguments
832 830 that need to be expanded in the revset, call ``revset.formatspec()``
833 831 and pass the result as an element of ``specs``.
834 832
835 833 Specifying a single revset is allowed.
836 834
837 835 Returns a ``revset.abstractsmartset`` which is a list-like interface over
838 836 integer revisions.
839 837 """
840 838 allspecs = []
841 839 for spec in specs:
842 840 if isinstance(spec, int):
843 841 spec = revset.formatspec('rev(%d)', spec)
844 842 allspecs.append(spec)
845 843 m = revset.matchany(repo.ui, allspecs, repo)
846 844 return m(repo)
847 845
848 846 def meaningfulparents(repo, ctx):
849 847 """Return list of meaningful (or all if debug) parentrevs for rev.
850 848
851 849 For merges (two non-nullrev revisions) both parents are meaningful.
852 850 Otherwise the first parent revision is considered meaningful if it
853 851 is not the preceding revision.
854 852 """
855 853 parents = ctx.parents()
856 854 if len(parents) > 1:
857 855 return parents
858 856 if repo.ui.debugflag:
859 857 return [parents[0], repo['null']]
860 858 if parents[0].rev() >= intrev(ctx.rev()) - 1:
861 859 return []
862 860 return parents
863 861
864 862 def expandpats(pats):
865 863 '''Expand bare globs when running on windows.
866 864 On posix we assume it already has already been done by sh.'''
867 865 if not util.expandglobs:
868 866 return list(pats)
869 867 ret = []
870 868 for kindpat in pats:
871 869 kind, pat = matchmod._patsplit(kindpat, None)
872 870 if kind is None:
873 871 try:
874 872 globbed = glob.glob(pat)
875 873 except re.error:
876 874 globbed = [pat]
877 875 if globbed:
878 876 ret.extend(globbed)
879 877 continue
880 878 ret.append(kindpat)
881 879 return ret
882 880
883 881 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
884 882 badfn=None):
885 883 '''Return a matcher and the patterns that were used.
886 884 The matcher will warn about bad matches, unless an alternate badfn callback
887 885 is provided.'''
888 886 if pats == ("",):
889 887 pats = []
890 888 if opts is None:
891 889 opts = {}
892 890 if not globbed and default == 'relpath':
893 891 pats = expandpats(pats or [])
894 892
895 893 def bad(f, msg):
896 894 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
897 895
898 896 if badfn is None:
899 897 badfn = bad
900 898
901 899 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
902 900 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
903 901
904 902 if m.always():
905 903 pats = []
906 904 return m, pats
907 905
908 906 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
909 907 badfn=None):
910 908 '''Return a matcher that will warn about bad matches.'''
911 909 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
912 910
913 911 def matchall(repo):
914 912 '''Return a matcher that will efficiently match everything.'''
915 913 return matchmod.always(repo.root, repo.getcwd())
916 914
917 915 def matchfiles(repo, files, badfn=None):
918 916 '''Return a matcher that will efficiently match exactly these files.'''
919 917 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
920 918
921 919 def origpath(ui, repo, filepath):
922 920 '''customize where .orig files are created
923 921
924 922 Fetch user defined path from config file: [ui] origbackuppath = <path>
925 923 Fall back to default (filepath) if not specified
926 924 '''
927 925 origbackuppath = ui.config('ui', 'origbackuppath', None)
928 926 if origbackuppath is None:
929 927 return filepath + ".orig"
930 928
931 929 filepathfromroot = os.path.relpath(filepath, start=repo.root)
932 930 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
933 931
934 932 origbackupdir = repo.vfs.dirname(fullorigpath)
935 933 if not repo.vfs.exists(origbackupdir):
936 934 ui.note(_('creating directory: %s\n') % origbackupdir)
937 935 util.makedirs(origbackupdir)
938 936
939 937 return fullorigpath + ".orig"
940 938
941 939 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
942 940 if opts is None:
943 941 opts = {}
944 942 m = matcher
945 943 if dry_run is None:
946 944 dry_run = opts.get('dry_run')
947 945 if similarity is None:
948 946 similarity = float(opts.get('similarity') or 0)
949 947
950 948 ret = 0
951 949 join = lambda f: os.path.join(prefix, f)
952 950
953 951 def matchessubrepo(matcher, subpath):
954 952 if matcher.exact(subpath):
955 953 return True
956 954 for f in matcher.files():
957 955 if f.startswith(subpath):
958 956 return True
959 957 return False
960 958
961 959 wctx = repo[None]
962 960 for subpath in sorted(wctx.substate):
963 961 if opts.get('subrepos') or matchessubrepo(m, subpath):
964 962 sub = wctx.sub(subpath)
965 963 try:
966 964 submatch = matchmod.subdirmatcher(subpath, m)
967 965 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
968 966 ret = 1
969 967 except error.LookupError:
970 968 repo.ui.status(_("skipping missing subrepository: %s\n")
971 969 % join(subpath))
972 970
973 971 rejected = []
974 972 def badfn(f, msg):
975 973 if f in m.files():
976 974 m.bad(f, msg)
977 975 rejected.append(f)
978 976
979 977 badmatch = matchmod.badmatch(m, badfn)
980 978 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
981 979 badmatch)
982 980
983 981 unknownset = set(unknown + forgotten)
984 982 toprint = unknownset.copy()
985 983 toprint.update(deleted)
986 984 for abs in sorted(toprint):
987 985 if repo.ui.verbose or not m.exact(abs):
988 986 if abs in unknownset:
989 987 status = _('adding %s\n') % m.uipath(abs)
990 988 else:
991 989 status = _('removing %s\n') % m.uipath(abs)
992 990 repo.ui.status(status)
993 991
994 992 renames = _findrenames(repo, m, added + unknown, removed + deleted,
995 993 similarity)
996 994
997 995 if not dry_run:
998 996 _markchanges(repo, unknown + forgotten, deleted, renames)
999 997
1000 998 for f in rejected:
1001 999 if f in m.files():
1002 1000 return 1
1003 1001 return ret
1004 1002
1005 1003 def marktouched(repo, files, similarity=0.0):
1006 1004 '''Assert that files have somehow been operated upon. files are relative to
1007 1005 the repo root.'''
1008 1006 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1009 1007 rejected = []
1010 1008
1011 1009 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1012 1010
1013 1011 if repo.ui.verbose:
1014 1012 unknownset = set(unknown + forgotten)
1015 1013 toprint = unknownset.copy()
1016 1014 toprint.update(deleted)
1017 1015 for abs in sorted(toprint):
1018 1016 if abs in unknownset:
1019 1017 status = _('adding %s\n') % abs
1020 1018 else:
1021 1019 status = _('removing %s\n') % abs
1022 1020 repo.ui.status(status)
1023 1021
1024 1022 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1025 1023 similarity)
1026 1024
1027 1025 _markchanges(repo, unknown + forgotten, deleted, renames)
1028 1026
1029 1027 for f in rejected:
1030 1028 if f in m.files():
1031 1029 return 1
1032 1030 return 0
1033 1031
1034 1032 def _interestingfiles(repo, matcher):
1035 1033 '''Walk dirstate with matcher, looking for files that addremove would care
1036 1034 about.
1037 1035
1038 1036 This is different from dirstate.status because it doesn't care about
1039 1037 whether files are modified or clean.'''
1040 1038 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1041 1039 audit_path = pathutil.pathauditor(repo.root)
1042 1040
1043 1041 ctx = repo[None]
1044 1042 dirstate = repo.dirstate
1045 1043 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
1046 1044 full=False)
1047 1045 for abs, st in walkresults.iteritems():
1048 1046 dstate = dirstate[abs]
1049 1047 if dstate == '?' and audit_path.check(abs):
1050 1048 unknown.append(abs)
1051 1049 elif dstate != 'r' and not st:
1052 1050 deleted.append(abs)
1053 1051 elif dstate == 'r' and st:
1054 1052 forgotten.append(abs)
1055 1053 # for finding renames
1056 1054 elif dstate == 'r' and not st:
1057 1055 removed.append(abs)
1058 1056 elif dstate == 'a':
1059 1057 added.append(abs)
1060 1058
1061 1059 return added, unknown, deleted, removed, forgotten
1062 1060
1063 1061 def _findrenames(repo, matcher, added, removed, similarity):
1064 1062 '''Find renames from removed files to added ones.'''
1065 1063 renames = {}
1066 1064 if similarity > 0:
1067 1065 for old, new, score in similar.findrenames(repo, added, removed,
1068 1066 similarity):
1069 1067 if (repo.ui.verbose or not matcher.exact(old)
1070 1068 or not matcher.exact(new)):
1071 1069 repo.ui.status(_('recording removal of %s as rename to %s '
1072 1070 '(%d%% similar)\n') %
1073 1071 (matcher.rel(old), matcher.rel(new),
1074 1072 score * 100))
1075 1073 renames[new] = old
1076 1074 return renames
1077 1075
1078 1076 def _markchanges(repo, unknown, deleted, renames):
1079 1077 '''Marks the files in unknown as added, the files in deleted as removed,
1080 1078 and the files in renames as copied.'''
1081 1079 wctx = repo[None]
1082 1080 with repo.wlock():
1083 1081 wctx.forget(deleted)
1084 1082 wctx.add(unknown)
1085 1083 for new, old in renames.iteritems():
1086 1084 wctx.copy(old, new)
1087 1085
1088 1086 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1089 1087 """Update the dirstate to reflect the intent of copying src to dst. For
1090 1088 different reasons it might not end with dst being marked as copied from src.
1091 1089 """
1092 1090 origsrc = repo.dirstate.copied(src) or src
1093 1091 if dst == origsrc: # copying back a copy?
1094 1092 if repo.dirstate[dst] not in 'mn' and not dryrun:
1095 1093 repo.dirstate.normallookup(dst)
1096 1094 else:
1097 1095 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1098 1096 if not ui.quiet:
1099 1097 ui.warn(_("%s has not been committed yet, so no copy "
1100 1098 "data will be stored for %s.\n")
1101 1099 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1102 1100 if repo.dirstate[dst] in '?r' and not dryrun:
1103 1101 wctx.add([dst])
1104 1102 elif not dryrun:
1105 1103 wctx.copy(origsrc, dst)
1106 1104
1107 1105 def readrequires(opener, supported):
1108 1106 '''Reads and parses .hg/requires and checks if all entries found
1109 1107 are in the list of supported features.'''
1110 1108 requirements = set(opener.read("requires").splitlines())
1111 1109 missings = []
1112 1110 for r in requirements:
1113 1111 if r not in supported:
1114 1112 if not r or not r[0].isalnum():
1115 1113 raise error.RequirementError(_(".hg/requires file is corrupt"))
1116 1114 missings.append(r)
1117 1115 missings.sort()
1118 1116 if missings:
1119 1117 raise error.RequirementError(
1120 1118 _("repository requires features unknown to this Mercurial: %s")
1121 1119 % " ".join(missings),
1122 1120 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1123 1121 " for more information"))
1124 1122 return requirements
1125 1123
1126 1124 def writerequires(opener, requirements):
1127 1125 with opener('requires', 'w') as fp:
1128 1126 for r in sorted(requirements):
1129 1127 fp.write("%s\n" % r)
1130 1128
1131 1129 class filecachesubentry(object):
1132 1130 def __init__(self, path, stat):
1133 1131 self.path = path
1134 1132 self.cachestat = None
1135 1133 self._cacheable = None
1136 1134
1137 1135 if stat:
1138 1136 self.cachestat = filecachesubentry.stat(self.path)
1139 1137
1140 1138 if self.cachestat:
1141 1139 self._cacheable = self.cachestat.cacheable()
1142 1140 else:
1143 1141 # None means we don't know yet
1144 1142 self._cacheable = None
1145 1143
1146 1144 def refresh(self):
1147 1145 if self.cacheable():
1148 1146 self.cachestat = filecachesubentry.stat(self.path)
1149 1147
1150 1148 def cacheable(self):
1151 1149 if self._cacheable is not None:
1152 1150 return self._cacheable
1153 1151
1154 1152 # we don't know yet, assume it is for now
1155 1153 return True
1156 1154
1157 1155 def changed(self):
1158 1156 # no point in going further if we can't cache it
1159 1157 if not self.cacheable():
1160 1158 return True
1161 1159
1162 1160 newstat = filecachesubentry.stat(self.path)
1163 1161
1164 1162 # we may not know if it's cacheable yet, check again now
1165 1163 if newstat and self._cacheable is None:
1166 1164 self._cacheable = newstat.cacheable()
1167 1165
1168 1166 # check again
1169 1167 if not self._cacheable:
1170 1168 return True
1171 1169
1172 1170 if self.cachestat != newstat:
1173 1171 self.cachestat = newstat
1174 1172 return True
1175 1173 else:
1176 1174 return False
1177 1175
1178 1176 @staticmethod
1179 1177 def stat(path):
1180 1178 try:
1181 1179 return util.cachestat(path)
1182 1180 except OSError as e:
1183 1181 if e.errno != errno.ENOENT:
1184 1182 raise
1185 1183
1186 1184 class filecacheentry(object):
1187 1185 def __init__(self, paths, stat=True):
1188 1186 self._entries = []
1189 1187 for path in paths:
1190 1188 self._entries.append(filecachesubentry(path, stat))
1191 1189
1192 1190 def changed(self):
1193 1191 '''true if any entry has changed'''
1194 1192 for entry in self._entries:
1195 1193 if entry.changed():
1196 1194 return True
1197 1195 return False
1198 1196
1199 1197 def refresh(self):
1200 1198 for entry in self._entries:
1201 1199 entry.refresh()
1202 1200
1203 1201 class filecache(object):
1204 1202 '''A property like decorator that tracks files under .hg/ for updates.
1205 1203
1206 1204 Records stat info when called in _filecache.
1207 1205
1208 1206 On subsequent calls, compares old stat info with new info, and recreates the
1209 1207 object when any of the files changes, updating the new stat info in
1210 1208 _filecache.
1211 1209
1212 1210 Mercurial either atomic renames or appends for files under .hg,
1213 1211 so to ensure the cache is reliable we need the filesystem to be able
1214 1212 to tell us if a file has been replaced. If it can't, we fallback to
1215 1213 recreating the object on every call (essentially the same behavior as
1216 1214 propertycache).
1217 1215
1218 1216 '''
1219 1217 def __init__(self, *paths):
1220 1218 self.paths = paths
1221 1219
1222 1220 def join(self, obj, fname):
1223 1221 """Used to compute the runtime path of a cached file.
1224 1222
1225 1223 Users should subclass filecache and provide their own version of this
1226 1224 function to call the appropriate join function on 'obj' (an instance
1227 1225 of the class that its member function was decorated).
1228 1226 """
1229 1227 return obj.join(fname)
1230 1228
1231 1229 def __call__(self, func):
1232 1230 self.func = func
1233 1231 self.name = func.__name__
1234 1232 return self
1235 1233
1236 1234 def __get__(self, obj, type=None):
1237 1235 # if accessed on the class, return the descriptor itself.
1238 1236 if obj is None:
1239 1237 return self
1240 1238 # do we need to check if the file changed?
1241 1239 if self.name in obj.__dict__:
1242 1240 assert self.name in obj._filecache, self.name
1243 1241 return obj.__dict__[self.name]
1244 1242
1245 1243 entry = obj._filecache.get(self.name)
1246 1244
1247 1245 if entry:
1248 1246 if entry.changed():
1249 1247 entry.obj = self.func(obj)
1250 1248 else:
1251 1249 paths = [self.join(obj, path) for path in self.paths]
1252 1250
1253 1251 # We stat -before- creating the object so our cache doesn't lie if
1254 1252 # a writer modified between the time we read and stat
1255 1253 entry = filecacheentry(paths, True)
1256 1254 entry.obj = self.func(obj)
1257 1255
1258 1256 obj._filecache[self.name] = entry
1259 1257
1260 1258 obj.__dict__[self.name] = entry.obj
1261 1259 return entry.obj
1262 1260
1263 1261 def __set__(self, obj, value):
1264 1262 if self.name not in obj._filecache:
1265 1263 # we add an entry for the missing value because X in __dict__
1266 1264 # implies X in _filecache
1267 1265 paths = [self.join(obj, path) for path in self.paths]
1268 1266 ce = filecacheentry(paths, False)
1269 1267 obj._filecache[self.name] = ce
1270 1268 else:
1271 1269 ce = obj._filecache[self.name]
1272 1270
1273 1271 ce.obj = value # update cached copy
1274 1272 obj.__dict__[self.name] = value # update copy returned by obj.x
1275 1273
1276 1274 def __delete__(self, obj):
1277 1275 try:
1278 1276 del obj.__dict__[self.name]
1279 1277 except KeyError:
1280 1278 raise AttributeError(self.name)
1281 1279
1282 1280 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1283 1281 if lock is None:
1284 1282 raise error.LockInheritanceContractViolation(
1285 1283 'lock can only be inherited while held')
1286 1284 if environ is None:
1287 1285 environ = {}
1288 1286 with lock.inherit() as locker:
1289 1287 environ[envvar] = locker
1290 1288 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1291 1289
1292 1290 def wlocksub(repo, cmd, *args, **kwargs):
1293 1291 """run cmd as a subprocess that allows inheriting repo's wlock
1294 1292
1295 1293 This can only be called while the wlock is held. This takes all the
1296 1294 arguments that ui.system does, and returns the exit code of the
1297 1295 subprocess."""
1298 1296 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1299 1297 **kwargs)
1300 1298
1301 1299 def gdinitconfig(ui):
1302 1300 """helper function to know if a repo should be created as general delta
1303 1301 """
1304 1302 # experimental config: format.generaldelta
1305 1303 return (ui.configbool('format', 'generaldelta', False)
1306 1304 or ui.configbool('format', 'usegeneraldelta', True))
1307 1305
1308 1306 def gddeltaconfig(ui):
1309 1307 """helper function to know if incoming delta should be optimised
1310 1308 """
1311 1309 # experimental config: format.generaldelta
1312 1310 return ui.configbool('format', 'generaldelta', False)
1313 1311
1314 1312 class delayclosedfile(object):
1315 1313 """Proxy for a file object whose close is delayed.
1316 1314
1317 1315 Do not instantiate outside of the vfs layer.
1318 1316 """
1319 1317
1320 1318 def __init__(self, fh, closer):
1321 1319 object.__setattr__(self, '_origfh', fh)
1322 1320 object.__setattr__(self, '_closer', closer)
1323 1321
1324 1322 def __getattr__(self, attr):
1325 1323 return getattr(self._origfh, attr)
1326 1324
1327 1325 def __setattr__(self, attr, value):
1328 1326 return setattr(self._origfh, attr, value)
1329 1327
1330 1328 def __delattr__(self, attr):
1331 1329 return delattr(self._origfh, attr)
1332 1330
1333 1331 def __enter__(self):
1334 1332 return self._origfh.__enter__()
1335 1333
1336 1334 def __exit__(self, exc_type, exc_value, exc_tb):
1337 1335 self._closer.close(self._origfh)
1338 1336
1339 1337 def close(self):
1340 1338 self._closer.close(self._origfh)
1341 1339
1342 1340 class backgroundfilecloser(object):
1343 1341 """Coordinates background closing of file handles on multiple threads."""
1344 1342 def __init__(self, ui, expectedcount=-1):
1345 1343 self._running = False
1346 1344 self._entered = False
1347 1345 self._threads = []
1348 1346 self._threadexception = None
1349 1347
1350 1348 # Only Windows/NTFS has slow file closing. So only enable by default
1351 1349 # on that platform. But allow to be enabled elsewhere for testing.
1352 1350 defaultenabled = os.name == 'nt'
1353 1351 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
1354 1352
1355 1353 if not enabled:
1356 1354 return
1357 1355
1358 1356 # There is overhead to starting and stopping the background threads.
1359 1357 # Don't do background processing unless the file count is large enough
1360 1358 # to justify it.
1361 1359 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
1362 1360 2048)
1363 1361 # FUTURE dynamically start background threads after minfilecount closes.
1364 1362 # (We don't currently have any callers that don't know their file count)
1365 1363 if expectedcount > 0 and expectedcount < minfilecount:
1366 1364 return
1367 1365
1368 1366 # Windows defaults to a limit of 512 open files. A buffer of 128
1369 1367 # should give us enough headway.
1370 1368 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
1371 1369 threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
1372 1370
1373 1371 ui.debug('starting %d threads for background file closing\n' %
1374 1372 threadcount)
1375 1373
1376 1374 self._queue = util.queue(maxsize=maxqueue)
1377 1375 self._running = True
1378 1376
1379 1377 for i in range(threadcount):
1380 1378 t = threading.Thread(target=self._worker, name='backgroundcloser')
1381 1379 self._threads.append(t)
1382 1380 t.start()
1383 1381
1384 1382 def __enter__(self):
1385 1383 self._entered = True
1386 1384 return self
1387 1385
1388 1386 def __exit__(self, exc_type, exc_value, exc_tb):
1389 1387 self._running = False
1390 1388
1391 1389 # Wait for threads to finish closing so open files don't linger for
1392 1390 # longer than lifetime of context manager.
1393 1391 for t in self._threads:
1394 1392 t.join()
1395 1393
1396 1394 def _worker(self):
1397 1395 """Main routine for worker thread."""
1398 1396 while True:
1399 1397 try:
1400 1398 fh = self._queue.get(block=True, timeout=0.100)
1401 1399 # Need to catch or the thread will terminate and
1402 1400 # we could orphan file descriptors.
1403 1401 try:
1404 1402 fh.close()
1405 1403 except Exception as e:
1406 1404 # Stash so can re-raise from main thread later.
1407 1405 self._threadexception = e
1408 1406 except util.empty:
1409 1407 if not self._running:
1410 1408 break
1411 1409
1412 1410 def close(self, fh):
1413 1411 """Schedule a file for closing."""
1414 1412 if not self._entered:
1415 1413 raise error.Abort(_('can only call close() when context manager '
1416 1414 'active'))
1417 1415
1418 1416 # If a background thread encountered an exception, raise now so we fail
1419 1417 # fast. Otherwise we may potentially go on for minutes until the error
1420 1418 # is acted on.
1421 1419 if self._threadexception:
1422 1420 e = self._threadexception
1423 1421 self._threadexception = None
1424 1422 raise e
1425 1423
1426 1424 # If we're not actively running, close synchronously.
1427 1425 if not self._running:
1428 1426 fh.close()
1429 1427 return
1430 1428
1431 1429 self._queue.put(fh, block=True, timeout=None)
General Comments 0
You need to be logged in to leave comments. Login now