##// END OF EJS Templates
scmutil: factor out common logic of delayclosedfile to reuse it...
FUJIWARA Katsunori -
r29994:0c40e64d default
parent child Browse files
Show More
@@ -1,1420 +1,1433 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import contextlib
11 11 import errno
12 12 import glob
13 13 import hashlib
14 14 import os
15 15 import re
16 16 import shutil
17 17 import stat
18 18 import tempfile
19 19 import threading
20 20
21 21 from .i18n import _
22 22 from .node import wdirrev
23 23 from . import (
24 24 encoding,
25 25 error,
26 26 match as matchmod,
27 27 osutil,
28 28 pathutil,
29 29 phases,
30 30 revset,
31 31 similar,
32 32 util,
33 33 )
34 34
35 35 if os.name == 'nt':
36 36 from . import scmwindows as scmplatform
37 37 else:
38 38 from . import scmposix as scmplatform
39 39
40 40 systemrcpath = scmplatform.systemrcpath
41 41 userrcpath = scmplatform.userrcpath
42 42
43 43 class status(tuple):
44 44 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
45 45 and 'ignored' properties are only relevant to the working copy.
46 46 '''
47 47
48 48 __slots__ = ()
49 49
50 50 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
51 51 clean):
52 52 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
53 53 ignored, clean))
54 54
55 55 @property
56 56 def modified(self):
57 57 '''files that have been modified'''
58 58 return self[0]
59 59
60 60 @property
61 61 def added(self):
62 62 '''files that have been added'''
63 63 return self[1]
64 64
65 65 @property
66 66 def removed(self):
67 67 '''files that have been removed'''
68 68 return self[2]
69 69
70 70 @property
71 71 def deleted(self):
72 72 '''files that are in the dirstate, but have been deleted from the
73 73 working copy (aka "missing")
74 74 '''
75 75 return self[3]
76 76
77 77 @property
78 78 def unknown(self):
79 79 '''files not in the dirstate that are not ignored'''
80 80 return self[4]
81 81
82 82 @property
83 83 def ignored(self):
84 84 '''files not in the dirstate that are ignored (by _dirignore())'''
85 85 return self[5]
86 86
87 87 @property
88 88 def clean(self):
89 89 '''files that have not been modified'''
90 90 return self[6]
91 91
92 92 def __repr__(self, *args, **kwargs):
93 93 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
94 94 'unknown=%r, ignored=%r, clean=%r>') % self)
95 95
96 96 def itersubrepos(ctx1, ctx2):
97 97 """find subrepos in ctx1 or ctx2"""
98 98 # Create a (subpath, ctx) mapping where we prefer subpaths from
99 99 # ctx1. The subpaths from ctx2 are important when the .hgsub file
100 100 # has been modified (in ctx2) but not yet committed (in ctx1).
101 101 subpaths = dict.fromkeys(ctx2.substate, ctx2)
102 102 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
103 103
104 104 missing = set()
105 105
106 106 for subpath in ctx2.substate:
107 107 if subpath not in ctx1.substate:
108 108 del subpaths[subpath]
109 109 missing.add(subpath)
110 110
111 111 for subpath, ctx in sorted(subpaths.iteritems()):
112 112 yield subpath, ctx.sub(subpath)
113 113
114 114 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
115 115 # status and diff will have an accurate result when it does
116 116 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
117 117 # against itself.
118 118 for subpath in missing:
119 119 yield subpath, ctx2.nullsub(subpath, ctx1)
120 120
121 121 def nochangesfound(ui, repo, excluded=None):
122 122 '''Report no changes for push/pull, excluded is None or a list of
123 123 nodes excluded from the push/pull.
124 124 '''
125 125 secretlist = []
126 126 if excluded:
127 127 for n in excluded:
128 128 if n not in repo:
129 129 # discovery should not have included the filtered revision,
130 130 # we have to explicitly exclude it until discovery is cleanup.
131 131 continue
132 132 ctx = repo[n]
133 133 if ctx.phase() >= phases.secret and not ctx.extinct():
134 134 secretlist.append(n)
135 135
136 136 if secretlist:
137 137 ui.status(_("no changes found (ignored %d secret changesets)\n")
138 138 % len(secretlist))
139 139 else:
140 140 ui.status(_("no changes found\n"))
141 141
142 142 def checknewlabel(repo, lbl, kind):
143 143 # Do not use the "kind" parameter in ui output.
144 144 # It makes strings difficult to translate.
145 145 if lbl in ['tip', '.', 'null']:
146 146 raise error.Abort(_("the name '%s' is reserved") % lbl)
147 147 for c in (':', '\0', '\n', '\r'):
148 148 if c in lbl:
149 149 raise error.Abort(_("%r cannot be used in a name") % c)
150 150 try:
151 151 int(lbl)
152 152 raise error.Abort(_("cannot use an integer as a name"))
153 153 except ValueError:
154 154 pass
155 155
156 156 def checkfilename(f):
157 157 '''Check that the filename f is an acceptable filename for a tracked file'''
158 158 if '\r' in f or '\n' in f:
159 159 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
160 160
161 161 def checkportable(ui, f):
162 162 '''Check if filename f is portable and warn or abort depending on config'''
163 163 checkfilename(f)
164 164 abort, warn = checkportabilityalert(ui)
165 165 if abort or warn:
166 166 msg = util.checkwinfilename(f)
167 167 if msg:
168 168 msg = "%s: %r" % (msg, f)
169 169 if abort:
170 170 raise error.Abort(msg)
171 171 ui.warn(_("warning: %s\n") % msg)
172 172
173 173 def checkportabilityalert(ui):
174 174 '''check if the user's config requests nothing, a warning, or abort for
175 175 non-portable filenames'''
176 176 val = ui.config('ui', 'portablefilenames', 'warn')
177 177 lval = val.lower()
178 178 bval = util.parsebool(val)
179 179 abort = os.name == 'nt' or lval == 'abort'
180 180 warn = bval or lval == 'warn'
181 181 if bval is None and not (warn or abort or lval == 'ignore'):
182 182 raise error.ConfigError(
183 183 _("ui.portablefilenames value is invalid ('%s')") % val)
184 184 return abort, warn
185 185
186 186 class casecollisionauditor(object):
187 187 def __init__(self, ui, abort, dirstate):
188 188 self._ui = ui
189 189 self._abort = abort
190 190 allfiles = '\0'.join(dirstate._map)
191 191 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
192 192 self._dirstate = dirstate
193 193 # The purpose of _newfiles is so that we don't complain about
194 194 # case collisions if someone were to call this object with the
195 195 # same filename twice.
196 196 self._newfiles = set()
197 197
198 198 def __call__(self, f):
199 199 if f in self._newfiles:
200 200 return
201 201 fl = encoding.lower(f)
202 202 if fl in self._loweredfiles and f not in self._dirstate:
203 203 msg = _('possible case-folding collision for %s') % f
204 204 if self._abort:
205 205 raise error.Abort(msg)
206 206 self._ui.warn(_("warning: %s\n") % msg)
207 207 self._loweredfiles.add(fl)
208 208 self._newfiles.add(f)
209 209
210 210 def filteredhash(repo, maxrev):
211 211 """build hash of filtered revisions in the current repoview.
212 212
213 213 Multiple caches perform up-to-date validation by checking that the
214 214 tiprev and tipnode stored in the cache file match the current repository.
215 215 However, this is not sufficient for validating repoviews because the set
216 216 of revisions in the view may change without the repository tiprev and
217 217 tipnode changing.
218 218
219 219 This function hashes all the revs filtered from the view and returns
220 220 that SHA-1 digest.
221 221 """
222 222 cl = repo.changelog
223 223 if not cl.filteredrevs:
224 224 return None
225 225 key = None
226 226 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
227 227 if revs:
228 228 s = hashlib.sha1()
229 229 for rev in revs:
230 230 s.update('%s;' % rev)
231 231 key = s.digest()
232 232 return key
233 233
234 234 class abstractvfs(object):
235 235 """Abstract base class; cannot be instantiated"""
236 236
237 237 def __init__(self, *args, **kwargs):
238 238 '''Prevent instantiation; don't call this from subclasses.'''
239 239 raise NotImplementedError('attempted instantiating ' + str(type(self)))
240 240
241 241 def tryread(self, path):
242 242 '''gracefully return an empty string for missing files'''
243 243 try:
244 244 return self.read(path)
245 245 except IOError as inst:
246 246 if inst.errno != errno.ENOENT:
247 247 raise
248 248 return ""
249 249
250 250 def tryreadlines(self, path, mode='rb'):
251 251 '''gracefully return an empty array for missing files'''
252 252 try:
253 253 return self.readlines(path, mode=mode)
254 254 except IOError as inst:
255 255 if inst.errno != errno.ENOENT:
256 256 raise
257 257 return []
258 258
259 259 @util.propertycache
260 260 def open(self):
261 261 '''Open ``path`` file, which is relative to vfs root.
262 262
263 263 Newly created directories are marked as "not to be indexed by
264 264 the content indexing service", if ``notindexed`` is specified
265 265 for "write" mode access.
266 266 '''
267 267 return self.__call__
268 268
269 269 def read(self, path):
270 270 with self(path, 'rb') as fp:
271 271 return fp.read()
272 272
273 273 def readlines(self, path, mode='rb'):
274 274 with self(path, mode=mode) as fp:
275 275 return fp.readlines()
276 276
277 277 def write(self, path, data, backgroundclose=False):
278 278 with self(path, 'wb', backgroundclose=backgroundclose) as fp:
279 279 return fp.write(data)
280 280
281 281 def writelines(self, path, data, mode='wb', notindexed=False):
282 282 with self(path, mode=mode, notindexed=notindexed) as fp:
283 283 return fp.writelines(data)
284 284
285 285 def append(self, path, data):
286 286 with self(path, 'ab') as fp:
287 287 return fp.write(data)
288 288
289 289 def basename(self, path):
290 290 """return base element of a path (as os.path.basename would do)
291 291
292 292 This exists to allow handling of strange encoding if needed."""
293 293 return os.path.basename(path)
294 294
295 295 def chmod(self, path, mode):
296 296 return os.chmod(self.join(path), mode)
297 297
298 298 def dirname(self, path):
299 299 """return dirname element of a path (as os.path.dirname would do)
300 300
301 301 This exists to allow handling of strange encoding if needed."""
302 302 return os.path.dirname(path)
303 303
304 304 def exists(self, path=None):
305 305 return os.path.exists(self.join(path))
306 306
307 307 def fstat(self, fp):
308 308 return util.fstat(fp)
309 309
310 310 def isdir(self, path=None):
311 311 return os.path.isdir(self.join(path))
312 312
313 313 def isfile(self, path=None):
314 314 return os.path.isfile(self.join(path))
315 315
316 316 def islink(self, path=None):
317 317 return os.path.islink(self.join(path))
318 318
319 319 def isfileorlink(self, path=None):
320 320 '''return whether path is a regular file or a symlink
321 321
322 322 Unlike isfile, this doesn't follow symlinks.'''
323 323 try:
324 324 st = self.lstat(path)
325 325 except OSError:
326 326 return False
327 327 mode = st.st_mode
328 328 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
329 329
330 330 def reljoin(self, *paths):
331 331 """join various elements of a path together (as os.path.join would do)
332 332
333 333 The vfs base is not injected so that path stay relative. This exists
334 334 to allow handling of strange encoding if needed."""
335 335 return os.path.join(*paths)
336 336
337 337 def split(self, path):
338 338 """split top-most element of a path (as os.path.split would do)
339 339
340 340 This exists to allow handling of strange encoding if needed."""
341 341 return os.path.split(path)
342 342
343 343 def lexists(self, path=None):
344 344 return os.path.lexists(self.join(path))
345 345
346 346 def lstat(self, path=None):
347 347 return os.lstat(self.join(path))
348 348
349 349 def listdir(self, path=None):
350 350 return os.listdir(self.join(path))
351 351
352 352 def makedir(self, path=None, notindexed=True):
353 353 return util.makedir(self.join(path), notindexed)
354 354
355 355 def makedirs(self, path=None, mode=None):
356 356 return util.makedirs(self.join(path), mode)
357 357
358 358 def makelock(self, info, path):
359 359 return util.makelock(info, self.join(path))
360 360
361 361 def mkdir(self, path=None):
362 362 return os.mkdir(self.join(path))
363 363
364 364 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
365 365 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
366 366 dir=self.join(dir), text=text)
367 367 dname, fname = util.split(name)
368 368 if dir:
369 369 return fd, os.path.join(dir, fname)
370 370 else:
371 371 return fd, fname
372 372
373 373 def readdir(self, path=None, stat=None, skip=None):
374 374 return osutil.listdir(self.join(path), stat, skip)
375 375
376 376 def readlock(self, path):
377 377 return util.readlock(self.join(path))
378 378
379 379 def rename(self, src, dst, checkambig=False):
380 380 """Rename from src to dst
381 381
382 382 checkambig argument is used with util.filestat, and is useful
383 383 only if destination file is guarded by any lock
384 384 (e.g. repo.lock or repo.wlock).
385 385 """
386 386 dstpath = self.join(dst)
387 387 oldstat = checkambig and util.filestat(dstpath)
388 388 if oldstat and oldstat.stat:
389 389 ret = util.rename(self.join(src), dstpath)
390 390 newstat = util.filestat(dstpath)
391 391 if newstat.isambig(oldstat):
392 392 # stat of renamed file is ambiguous to original one
393 393 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
394 394 os.utime(dstpath, (advanced, advanced))
395 395 return ret
396 396 return util.rename(self.join(src), dstpath)
397 397
398 398 def readlink(self, path):
399 399 return os.readlink(self.join(path))
400 400
401 401 def removedirs(self, path=None):
402 402 """Remove a leaf directory and all empty intermediate ones
403 403 """
404 404 return util.removedirs(self.join(path))
405 405
406 406 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
407 407 """Remove a directory tree recursively
408 408
409 409 If ``forcibly``, this tries to remove READ-ONLY files, too.
410 410 """
411 411 if forcibly:
412 412 def onerror(function, path, excinfo):
413 413 if function is not os.remove:
414 414 raise
415 415 # read-only files cannot be unlinked under Windows
416 416 s = os.stat(path)
417 417 if (s.st_mode & stat.S_IWRITE) != 0:
418 418 raise
419 419 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
420 420 os.remove(path)
421 421 else:
422 422 onerror = None
423 423 return shutil.rmtree(self.join(path),
424 424 ignore_errors=ignore_errors, onerror=onerror)
425 425
426 426 def setflags(self, path, l, x):
427 427 return util.setflags(self.join(path), l, x)
428 428
429 429 def stat(self, path=None):
430 430 return os.stat(self.join(path))
431 431
432 432 def unlink(self, path=None):
433 433 return util.unlink(self.join(path))
434 434
435 435 def unlinkpath(self, path=None, ignoremissing=False):
436 436 return util.unlinkpath(self.join(path), ignoremissing)
437 437
438 438 def utime(self, path=None, t=None):
439 439 return os.utime(self.join(path), t)
440 440
441 441 def walk(self, path=None, onerror=None):
442 442 """Yield (dirpath, dirs, files) tuple for each directories under path
443 443
444 444 ``dirpath`` is relative one from the root of this vfs. This
445 445 uses ``os.sep`` as path separator, even you specify POSIX
446 446 style ``path``.
447 447
448 448 "The root of this vfs" is represented as empty ``dirpath``.
449 449 """
450 450 root = os.path.normpath(self.join(None))
451 451 # when dirpath == root, dirpath[prefixlen:] becomes empty
452 452 # because len(dirpath) < prefixlen.
453 453 prefixlen = len(pathutil.normasprefix(root))
454 454 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
455 455 yield (dirpath[prefixlen:], dirs, files)
456 456
457 457 @contextlib.contextmanager
458 458 def backgroundclosing(self, ui, expectedcount=-1):
459 459 """Allow files to be closed asynchronously.
460 460
461 461 When this context manager is active, ``backgroundclose`` can be passed
462 462 to ``__call__``/``open`` to result in the file possibly being closed
463 463 asynchronously, on a background thread.
464 464 """
465 465 # This is an arbitrary restriction and could be changed if we ever
466 466 # have a use case.
467 467 vfs = getattr(self, 'vfs', self)
468 468 if getattr(vfs, '_backgroundfilecloser', None):
469 469 raise error.Abort(
470 470 _('can only have 1 active background file closer'))
471 471
472 472 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
473 473 try:
474 474 vfs._backgroundfilecloser = bfc
475 475 yield bfc
476 476 finally:
477 477 vfs._backgroundfilecloser = None
478 478
479 479 class vfs(abstractvfs):
480 480 '''Operate files relative to a base directory
481 481
482 482 This class is used to hide the details of COW semantics and
483 483 remote file access from higher level code.
484 484 '''
485 485 def __init__(self, base, audit=True, expandpath=False, realpath=False):
486 486 if expandpath:
487 487 base = util.expandpath(base)
488 488 if realpath:
489 489 base = os.path.realpath(base)
490 490 self.base = base
491 491 self.mustaudit = audit
492 492 self.createmode = None
493 493 self._trustnlink = None
494 494
495 495 @property
496 496 def mustaudit(self):
497 497 return self._audit
498 498
499 499 @mustaudit.setter
500 500 def mustaudit(self, onoff):
501 501 self._audit = onoff
502 502 if onoff:
503 503 self.audit = pathutil.pathauditor(self.base)
504 504 else:
505 505 self.audit = util.always
506 506
507 507 @util.propertycache
508 508 def _cansymlink(self):
509 509 return util.checklink(self.base)
510 510
511 511 @util.propertycache
512 512 def _chmod(self):
513 513 return util.checkexec(self.base)
514 514
515 515 def _fixfilemode(self, name):
516 516 if self.createmode is None or not self._chmod:
517 517 return
518 518 os.chmod(name, self.createmode & 0o666)
519 519
520 520 def __call__(self, path, mode="r", text=False, atomictemp=False,
521 521 notindexed=False, backgroundclose=False, checkambig=False):
522 522 '''Open ``path`` file, which is relative to vfs root.
523 523
524 524 Newly created directories are marked as "not to be indexed by
525 525 the content indexing service", if ``notindexed`` is specified
526 526 for "write" mode access.
527 527
528 528 If ``backgroundclose`` is passed, the file may be closed asynchronously.
529 529 It can only be used if the ``self.backgroundclosing()`` context manager
530 530 is active. This should only be specified if the following criteria hold:
531 531
532 532 1. There is a potential for writing thousands of files. Unless you
533 533 are writing thousands of files, the performance benefits of
534 534 asynchronously closing files is not realized.
535 535 2. Files are opened exactly once for the ``backgroundclosing``
536 536 active duration and are therefore free of race conditions between
537 537 closing a file on a background thread and reopening it. (If the
538 538 file were opened multiple times, there could be unflushed data
539 539 because the original file handle hasn't been flushed/closed yet.)
540 540
541 541 ``checkambig`` argument is passed to atomictemplfile (valid
542 542 only for writing), and is useful only if target file is
543 543 guarded by any lock (e.g. repo.lock or repo.wlock).
544 544 '''
545 545 if self._audit:
546 546 r = util.checkosfilename(path)
547 547 if r:
548 548 raise error.Abort("%s: %r" % (r, path))
549 549 self.audit(path)
550 550 f = self.join(path)
551 551
552 552 if not text and "b" not in mode:
553 553 mode += "b" # for that other OS
554 554
555 555 nlink = -1
556 556 if mode not in ('r', 'rb'):
557 557 dirname, basename = util.split(f)
558 558 # If basename is empty, then the path is malformed because it points
559 559 # to a directory. Let the posixfile() call below raise IOError.
560 560 if basename:
561 561 if atomictemp:
562 562 util.makedirs(dirname, self.createmode, notindexed)
563 563 return util.atomictempfile(f, mode, self.createmode,
564 564 checkambig=checkambig)
565 565 try:
566 566 if 'w' in mode:
567 567 util.unlink(f)
568 568 nlink = 0
569 569 else:
570 570 # nlinks() may behave differently for files on Windows
571 571 # shares if the file is open.
572 572 with util.posixfile(f):
573 573 nlink = util.nlinks(f)
574 574 if nlink < 1:
575 575 nlink = 2 # force mktempcopy (issue1922)
576 576 except (OSError, IOError) as e:
577 577 if e.errno != errno.ENOENT:
578 578 raise
579 579 nlink = 0
580 580 util.makedirs(dirname, self.createmode, notindexed)
581 581 if nlink > 0:
582 582 if self._trustnlink is None:
583 583 self._trustnlink = nlink > 1 or util.checknlink(f)
584 584 if nlink > 1 or not self._trustnlink:
585 585 util.rename(util.mktempcopy(f), f)
586 586 fp = util.posixfile(f, mode)
587 587 if nlink == 0:
588 588 self._fixfilemode(f)
589 589
590 590 if backgroundclose:
591 591 if not self._backgroundfilecloser:
592 592 raise error.Abort(_('backgroundclose can only be used when a '
593 593 'backgroundclosing context manager is active')
594 594 )
595 595
596 596 fp = delayclosedfile(fp, self._backgroundfilecloser)
597 597
598 598 return fp
599 599
600 600 def symlink(self, src, dst):
601 601 self.audit(dst)
602 602 linkname = self.join(dst)
603 603 try:
604 604 os.unlink(linkname)
605 605 except OSError:
606 606 pass
607 607
608 608 util.makedirs(os.path.dirname(linkname), self.createmode)
609 609
610 610 if self._cansymlink:
611 611 try:
612 612 os.symlink(src, linkname)
613 613 except OSError as err:
614 614 raise OSError(err.errno, _('could not symlink to %r: %s') %
615 615 (src, err.strerror), linkname)
616 616 else:
617 617 self.write(dst, src)
618 618
619 619 def join(self, path, *insidef):
620 620 if path:
621 621 return os.path.join(self.base, path, *insidef)
622 622 else:
623 623 return self.base
624 624
625 625 opener = vfs
626 626
627 627 class auditvfs(object):
628 628 def __init__(self, vfs):
629 629 self.vfs = vfs
630 630
631 631 @property
632 632 def mustaudit(self):
633 633 return self.vfs.mustaudit
634 634
635 635 @mustaudit.setter
636 636 def mustaudit(self, onoff):
637 637 self.vfs.mustaudit = onoff
638 638
639 639 @property
640 640 def options(self):
641 641 return self.vfs.options
642 642
643 643 @options.setter
644 644 def options(self, value):
645 645 self.vfs.options = value
646 646
647 647 class filtervfs(abstractvfs, auditvfs):
648 648 '''Wrapper vfs for filtering filenames with a function.'''
649 649
650 650 def __init__(self, vfs, filter):
651 651 auditvfs.__init__(self, vfs)
652 652 self._filter = filter
653 653
654 654 def __call__(self, path, *args, **kwargs):
655 655 return self.vfs(self._filter(path), *args, **kwargs)
656 656
657 657 def join(self, path, *insidef):
658 658 if path:
659 659 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
660 660 else:
661 661 return self.vfs.join(path)
662 662
663 663 filteropener = filtervfs
664 664
665 665 class readonlyvfs(abstractvfs, auditvfs):
666 666 '''Wrapper vfs preventing any writing.'''
667 667
668 668 def __init__(self, vfs):
669 669 auditvfs.__init__(self, vfs)
670 670
671 671 def __call__(self, path, mode='r', *args, **kw):
672 672 if mode not in ('r', 'rb'):
673 673 raise error.Abort(_('this vfs is read only'))
674 674 return self.vfs(path, mode, *args, **kw)
675 675
676 676 def join(self, path, *insidef):
677 677 return self.vfs.join(path, *insidef)
678 678
679 679 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
680 680 '''yield every hg repository under path, always recursively.
681 681 The recurse flag will only control recursion into repo working dirs'''
682 682 def errhandler(err):
683 683 if err.filename == path:
684 684 raise err
685 685 samestat = getattr(os.path, 'samestat', None)
686 686 if followsym and samestat is not None:
687 687 def adddir(dirlst, dirname):
688 688 match = False
689 689 dirstat = os.stat(dirname)
690 690 for lstdirstat in dirlst:
691 691 if samestat(dirstat, lstdirstat):
692 692 match = True
693 693 break
694 694 if not match:
695 695 dirlst.append(dirstat)
696 696 return not match
697 697 else:
698 698 followsym = False
699 699
700 700 if (seen_dirs is None) and followsym:
701 701 seen_dirs = []
702 702 adddir(seen_dirs, path)
703 703 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
704 704 dirs.sort()
705 705 if '.hg' in dirs:
706 706 yield root # found a repository
707 707 qroot = os.path.join(root, '.hg', 'patches')
708 708 if os.path.isdir(os.path.join(qroot, '.hg')):
709 709 yield qroot # we have a patch queue repo here
710 710 if recurse:
711 711 # avoid recursing inside the .hg directory
712 712 dirs.remove('.hg')
713 713 else:
714 714 dirs[:] = [] # don't descend further
715 715 elif followsym:
716 716 newdirs = []
717 717 for d in dirs:
718 718 fname = os.path.join(root, d)
719 719 if adddir(seen_dirs, fname):
720 720 if os.path.islink(fname):
721 721 for hgname in walkrepos(fname, True, seen_dirs):
722 722 yield hgname
723 723 else:
724 724 newdirs.append(d)
725 725 dirs[:] = newdirs
726 726
727 727 def osrcpath():
728 728 '''return default os-specific hgrc search path'''
729 729 path = []
730 730 defaultpath = os.path.join(util.datapath, 'default.d')
731 731 if os.path.isdir(defaultpath):
732 732 for f, kind in osutil.listdir(defaultpath):
733 733 if f.endswith('.rc'):
734 734 path.append(os.path.join(defaultpath, f))
735 735 path.extend(systemrcpath())
736 736 path.extend(userrcpath())
737 737 path = [os.path.normpath(f) for f in path]
738 738 return path
739 739
740 740 _rcpath = None
741 741
742 742 def rcpath():
743 743 '''return hgrc search path. if env var HGRCPATH is set, use it.
744 744 for each item in path, if directory, use files ending in .rc,
745 745 else use item.
746 746 make HGRCPATH empty to only look in .hg/hgrc of current repo.
747 747 if no HGRCPATH, use default os-specific path.'''
748 748 global _rcpath
749 749 if _rcpath is None:
750 750 if 'HGRCPATH' in os.environ:
751 751 _rcpath = []
752 752 for p in os.environ['HGRCPATH'].split(os.pathsep):
753 753 if not p:
754 754 continue
755 755 p = util.expandpath(p)
756 756 if os.path.isdir(p):
757 757 for f, kind in osutil.listdir(p):
758 758 if f.endswith('.rc'):
759 759 _rcpath.append(os.path.join(p, f))
760 760 else:
761 761 _rcpath.append(p)
762 762 else:
763 763 _rcpath = osrcpath()
764 764 return _rcpath
765 765
766 766 def intrev(rev):
767 767 """Return integer for a given revision that can be used in comparison or
768 768 arithmetic operation"""
769 769 if rev is None:
770 770 return wdirrev
771 771 return rev
772 772
773 773 def revsingle(repo, revspec, default='.'):
774 774 if not revspec and revspec != 0:
775 775 return repo[default]
776 776
777 777 l = revrange(repo, [revspec])
778 778 if not l:
779 779 raise error.Abort(_('empty revision set'))
780 780 return repo[l.last()]
781 781
782 782 def _pairspec(revspec):
783 783 tree = revset.parse(revspec)
784 784 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
785 785
786 786 def revpair(repo, revs):
787 787 if not revs:
788 788 return repo.dirstate.p1(), None
789 789
790 790 l = revrange(repo, revs)
791 791
792 792 if not l:
793 793 first = second = None
794 794 elif l.isascending():
795 795 first = l.min()
796 796 second = l.max()
797 797 elif l.isdescending():
798 798 first = l.max()
799 799 second = l.min()
800 800 else:
801 801 first = l.first()
802 802 second = l.last()
803 803
804 804 if first is None:
805 805 raise error.Abort(_('empty revision range'))
806 806 if (first == second and len(revs) >= 2
807 807 and not all(revrange(repo, [r]) for r in revs)):
808 808 raise error.Abort(_('empty revision on one side of range'))
809 809
810 810 # if top-level is range expression, the result must always be a pair
811 811 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
812 812 return repo.lookup(first), None
813 813
814 814 return repo.lookup(first), repo.lookup(second)
815 815
816 816 def revrange(repo, specs):
817 817 """Execute 1 to many revsets and return the union.
818 818
819 819 This is the preferred mechanism for executing revsets using user-specified
820 820 config options, such as revset aliases.
821 821
822 822 The revsets specified by ``specs`` will be executed via a chained ``OR``
823 823 expression. If ``specs`` is empty, an empty result is returned.
824 824
825 825 ``specs`` can contain integers, in which case they are assumed to be
826 826 revision numbers.
827 827
828 828 It is assumed the revsets are already formatted. If you have arguments
829 829 that need to be expanded in the revset, call ``revset.formatspec()``
830 830 and pass the result as an element of ``specs``.
831 831
832 832 Specifying a single revset is allowed.
833 833
834 834 Returns a ``revset.abstractsmartset`` which is a list-like interface over
835 835 integer revisions.
836 836 """
837 837 allspecs = []
838 838 for spec in specs:
839 839 if isinstance(spec, int):
840 840 spec = revset.formatspec('rev(%d)', spec)
841 841 allspecs.append(spec)
842 842 m = revset.matchany(repo.ui, allspecs, repo)
843 843 return m(repo)
844 844
845 845 def meaningfulparents(repo, ctx):
846 846 """Return list of meaningful (or all if debug) parentrevs for rev.
847 847
848 848 For merges (two non-nullrev revisions) both parents are meaningful.
849 849 Otherwise the first parent revision is considered meaningful if it
850 850 is not the preceding revision.
851 851 """
852 852 parents = ctx.parents()
853 853 if len(parents) > 1:
854 854 return parents
855 855 if repo.ui.debugflag:
856 856 return [parents[0], repo['null']]
857 857 if parents[0].rev() >= intrev(ctx.rev()) - 1:
858 858 return []
859 859 return parents
860 860
861 861 def expandpats(pats):
862 862 '''Expand bare globs when running on windows.
863 863 On posix we assume it already has already been done by sh.'''
864 864 if not util.expandglobs:
865 865 return list(pats)
866 866 ret = []
867 867 for kindpat in pats:
868 868 kind, pat = matchmod._patsplit(kindpat, None)
869 869 if kind is None:
870 870 try:
871 871 globbed = glob.glob(pat)
872 872 except re.error:
873 873 globbed = [pat]
874 874 if globbed:
875 875 ret.extend(globbed)
876 876 continue
877 877 ret.append(kindpat)
878 878 return ret
879 879
880 880 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
881 881 badfn=None):
882 882 '''Return a matcher and the patterns that were used.
883 883 The matcher will warn about bad matches, unless an alternate badfn callback
884 884 is provided.'''
885 885 if pats == ("",):
886 886 pats = []
887 887 if opts is None:
888 888 opts = {}
889 889 if not globbed and default == 'relpath':
890 890 pats = expandpats(pats or [])
891 891
892 892 def bad(f, msg):
893 893 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
894 894
895 895 if badfn is None:
896 896 badfn = bad
897 897
898 898 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
899 899 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
900 900
901 901 if m.always():
902 902 pats = []
903 903 return m, pats
904 904
905 905 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
906 906 badfn=None):
907 907 '''Return a matcher that will warn about bad matches.'''
908 908 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
909 909
910 910 def matchall(repo):
911 911 '''Return a matcher that will efficiently match everything.'''
912 912 return matchmod.always(repo.root, repo.getcwd())
913 913
914 914 def matchfiles(repo, files, badfn=None):
915 915 '''Return a matcher that will efficiently match exactly these files.'''
916 916 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
917 917
918 918 def origpath(ui, repo, filepath):
919 919 '''customize where .orig files are created
920 920
921 921 Fetch user defined path from config file: [ui] origbackuppath = <path>
922 922 Fall back to default (filepath) if not specified
923 923 '''
924 924 origbackuppath = ui.config('ui', 'origbackuppath', None)
925 925 if origbackuppath is None:
926 926 return filepath + ".orig"
927 927
928 928 filepathfromroot = os.path.relpath(filepath, start=repo.root)
929 929 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
930 930
931 931 origbackupdir = repo.vfs.dirname(fullorigpath)
932 932 if not repo.vfs.exists(origbackupdir):
933 933 ui.note(_('creating directory: %s\n') % origbackupdir)
934 934 util.makedirs(origbackupdir)
935 935
936 936 return fullorigpath + ".orig"
937 937
938 938 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
939 939 if opts is None:
940 940 opts = {}
941 941 m = matcher
942 942 if dry_run is None:
943 943 dry_run = opts.get('dry_run')
944 944 if similarity is None:
945 945 similarity = float(opts.get('similarity') or 0)
946 946
947 947 ret = 0
948 948 join = lambda f: os.path.join(prefix, f)
949 949
950 950 wctx = repo[None]
951 951 for subpath in sorted(wctx.substate):
952 952 submatch = matchmod.subdirmatcher(subpath, m)
953 953 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
954 954 sub = wctx.sub(subpath)
955 955 try:
956 956 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
957 957 ret = 1
958 958 except error.LookupError:
959 959 repo.ui.status(_("skipping missing subrepository: %s\n")
960 960 % join(subpath))
961 961
962 962 rejected = []
963 963 def badfn(f, msg):
964 964 if f in m.files():
965 965 m.bad(f, msg)
966 966 rejected.append(f)
967 967
968 968 badmatch = matchmod.badmatch(m, badfn)
969 969 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
970 970 badmatch)
971 971
972 972 unknownset = set(unknown + forgotten)
973 973 toprint = unknownset.copy()
974 974 toprint.update(deleted)
975 975 for abs in sorted(toprint):
976 976 if repo.ui.verbose or not m.exact(abs):
977 977 if abs in unknownset:
978 978 status = _('adding %s\n') % m.uipath(abs)
979 979 else:
980 980 status = _('removing %s\n') % m.uipath(abs)
981 981 repo.ui.status(status)
982 982
983 983 renames = _findrenames(repo, m, added + unknown, removed + deleted,
984 984 similarity)
985 985
986 986 if not dry_run:
987 987 _markchanges(repo, unknown + forgotten, deleted, renames)
988 988
989 989 for f in rejected:
990 990 if f in m.files():
991 991 return 1
992 992 return ret
993 993
994 994 def marktouched(repo, files, similarity=0.0):
995 995 '''Assert that files have somehow been operated upon. files are relative to
996 996 the repo root.'''
997 997 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
998 998 rejected = []
999 999
1000 1000 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1001 1001
1002 1002 if repo.ui.verbose:
1003 1003 unknownset = set(unknown + forgotten)
1004 1004 toprint = unknownset.copy()
1005 1005 toprint.update(deleted)
1006 1006 for abs in sorted(toprint):
1007 1007 if abs in unknownset:
1008 1008 status = _('adding %s\n') % abs
1009 1009 else:
1010 1010 status = _('removing %s\n') % abs
1011 1011 repo.ui.status(status)
1012 1012
1013 1013 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1014 1014 similarity)
1015 1015
1016 1016 _markchanges(repo, unknown + forgotten, deleted, renames)
1017 1017
1018 1018 for f in rejected:
1019 1019 if f in m.files():
1020 1020 return 1
1021 1021 return 0
1022 1022
1023 1023 def _interestingfiles(repo, matcher):
1024 1024 '''Walk dirstate with matcher, looking for files that addremove would care
1025 1025 about.
1026 1026
1027 1027 This is different from dirstate.status because it doesn't care about
1028 1028 whether files are modified or clean.'''
1029 1029 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1030 1030 audit_path = pathutil.pathauditor(repo.root)
1031 1031
1032 1032 ctx = repo[None]
1033 1033 dirstate = repo.dirstate
1034 1034 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
1035 1035 full=False)
1036 1036 for abs, st in walkresults.iteritems():
1037 1037 dstate = dirstate[abs]
1038 1038 if dstate == '?' and audit_path.check(abs):
1039 1039 unknown.append(abs)
1040 1040 elif dstate != 'r' and not st:
1041 1041 deleted.append(abs)
1042 1042 elif dstate == 'r' and st:
1043 1043 forgotten.append(abs)
1044 1044 # for finding renames
1045 1045 elif dstate == 'r' and not st:
1046 1046 removed.append(abs)
1047 1047 elif dstate == 'a':
1048 1048 added.append(abs)
1049 1049
1050 1050 return added, unknown, deleted, removed, forgotten
1051 1051
1052 1052 def _findrenames(repo, matcher, added, removed, similarity):
1053 1053 '''Find renames from removed files to added ones.'''
1054 1054 renames = {}
1055 1055 if similarity > 0:
1056 1056 for old, new, score in similar.findrenames(repo, added, removed,
1057 1057 similarity):
1058 1058 if (repo.ui.verbose or not matcher.exact(old)
1059 1059 or not matcher.exact(new)):
1060 1060 repo.ui.status(_('recording removal of %s as rename to %s '
1061 1061 '(%d%% similar)\n') %
1062 1062 (matcher.rel(old), matcher.rel(new),
1063 1063 score * 100))
1064 1064 renames[new] = old
1065 1065 return renames
1066 1066
1067 1067 def _markchanges(repo, unknown, deleted, renames):
1068 1068 '''Marks the files in unknown as added, the files in deleted as removed,
1069 1069 and the files in renames as copied.'''
1070 1070 wctx = repo[None]
1071 1071 with repo.wlock():
1072 1072 wctx.forget(deleted)
1073 1073 wctx.add(unknown)
1074 1074 for new, old in renames.iteritems():
1075 1075 wctx.copy(old, new)
1076 1076
1077 1077 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1078 1078 """Update the dirstate to reflect the intent of copying src to dst. For
1079 1079 different reasons it might not end with dst being marked as copied from src.
1080 1080 """
1081 1081 origsrc = repo.dirstate.copied(src) or src
1082 1082 if dst == origsrc: # copying back a copy?
1083 1083 if repo.dirstate[dst] not in 'mn' and not dryrun:
1084 1084 repo.dirstate.normallookup(dst)
1085 1085 else:
1086 1086 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1087 1087 if not ui.quiet:
1088 1088 ui.warn(_("%s has not been committed yet, so no copy "
1089 1089 "data will be stored for %s.\n")
1090 1090 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1091 1091 if repo.dirstate[dst] in '?r' and not dryrun:
1092 1092 wctx.add([dst])
1093 1093 elif not dryrun:
1094 1094 wctx.copy(origsrc, dst)
1095 1095
1096 1096 def readrequires(opener, supported):
1097 1097 '''Reads and parses .hg/requires and checks if all entries found
1098 1098 are in the list of supported features.'''
1099 1099 requirements = set(opener.read("requires").splitlines())
1100 1100 missings = []
1101 1101 for r in requirements:
1102 1102 if r not in supported:
1103 1103 if not r or not r[0].isalnum():
1104 1104 raise error.RequirementError(_(".hg/requires file is corrupt"))
1105 1105 missings.append(r)
1106 1106 missings.sort()
1107 1107 if missings:
1108 1108 raise error.RequirementError(
1109 1109 _("repository requires features unknown to this Mercurial: %s")
1110 1110 % " ".join(missings),
1111 1111 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1112 1112 " for more information"))
1113 1113 return requirements
1114 1114
1115 1115 def writerequires(opener, requirements):
1116 1116 with opener('requires', 'w') as fp:
1117 1117 for r in sorted(requirements):
1118 1118 fp.write("%s\n" % r)
1119 1119
1120 1120 class filecachesubentry(object):
1121 1121 def __init__(self, path, stat):
1122 1122 self.path = path
1123 1123 self.cachestat = None
1124 1124 self._cacheable = None
1125 1125
1126 1126 if stat:
1127 1127 self.cachestat = filecachesubentry.stat(self.path)
1128 1128
1129 1129 if self.cachestat:
1130 1130 self._cacheable = self.cachestat.cacheable()
1131 1131 else:
1132 1132 # None means we don't know yet
1133 1133 self._cacheable = None
1134 1134
1135 1135 def refresh(self):
1136 1136 if self.cacheable():
1137 1137 self.cachestat = filecachesubentry.stat(self.path)
1138 1138
1139 1139 def cacheable(self):
1140 1140 if self._cacheable is not None:
1141 1141 return self._cacheable
1142 1142
1143 1143 # we don't know yet, assume it is for now
1144 1144 return True
1145 1145
1146 1146 def changed(self):
1147 1147 # no point in going further if we can't cache it
1148 1148 if not self.cacheable():
1149 1149 return True
1150 1150
1151 1151 newstat = filecachesubentry.stat(self.path)
1152 1152
1153 1153 # we may not know if it's cacheable yet, check again now
1154 1154 if newstat and self._cacheable is None:
1155 1155 self._cacheable = newstat.cacheable()
1156 1156
1157 1157 # check again
1158 1158 if not self._cacheable:
1159 1159 return True
1160 1160
1161 1161 if self.cachestat != newstat:
1162 1162 self.cachestat = newstat
1163 1163 return True
1164 1164 else:
1165 1165 return False
1166 1166
1167 1167 @staticmethod
1168 1168 def stat(path):
1169 1169 try:
1170 1170 return util.cachestat(path)
1171 1171 except OSError as e:
1172 1172 if e.errno != errno.ENOENT:
1173 1173 raise
1174 1174
1175 1175 class filecacheentry(object):
1176 1176 def __init__(self, paths, stat=True):
1177 1177 self._entries = []
1178 1178 for path in paths:
1179 1179 self._entries.append(filecachesubentry(path, stat))
1180 1180
1181 1181 def changed(self):
1182 1182 '''true if any entry has changed'''
1183 1183 for entry in self._entries:
1184 1184 if entry.changed():
1185 1185 return True
1186 1186 return False
1187 1187
1188 1188 def refresh(self):
1189 1189 for entry in self._entries:
1190 1190 entry.refresh()
1191 1191
1192 1192 class filecache(object):
1193 1193 '''A property like decorator that tracks files under .hg/ for updates.
1194 1194
1195 1195 Records stat info when called in _filecache.
1196 1196
1197 1197 On subsequent calls, compares old stat info with new info, and recreates the
1198 1198 object when any of the files changes, updating the new stat info in
1199 1199 _filecache.
1200 1200
1201 1201 Mercurial either atomic renames or appends for files under .hg,
1202 1202 so to ensure the cache is reliable we need the filesystem to be able
1203 1203 to tell us if a file has been replaced. If it can't, we fallback to
1204 1204 recreating the object on every call (essentially the same behavior as
1205 1205 propertycache).
1206 1206
1207 1207 '''
1208 1208 def __init__(self, *paths):
1209 1209 self.paths = paths
1210 1210
1211 1211 def join(self, obj, fname):
1212 1212 """Used to compute the runtime path of a cached file.
1213 1213
1214 1214 Users should subclass filecache and provide their own version of this
1215 1215 function to call the appropriate join function on 'obj' (an instance
1216 1216 of the class that its member function was decorated).
1217 1217 """
1218 1218 return obj.join(fname)
1219 1219
1220 1220 def __call__(self, func):
1221 1221 self.func = func
1222 1222 self.name = func.__name__
1223 1223 return self
1224 1224
1225 1225 def __get__(self, obj, type=None):
1226 1226 # if accessed on the class, return the descriptor itself.
1227 1227 if obj is None:
1228 1228 return self
1229 1229 # do we need to check if the file changed?
1230 1230 if self.name in obj.__dict__:
1231 1231 assert self.name in obj._filecache, self.name
1232 1232 return obj.__dict__[self.name]
1233 1233
1234 1234 entry = obj._filecache.get(self.name)
1235 1235
1236 1236 if entry:
1237 1237 if entry.changed():
1238 1238 entry.obj = self.func(obj)
1239 1239 else:
1240 1240 paths = [self.join(obj, path) for path in self.paths]
1241 1241
1242 1242 # We stat -before- creating the object so our cache doesn't lie if
1243 1243 # a writer modified between the time we read and stat
1244 1244 entry = filecacheentry(paths, True)
1245 1245 entry.obj = self.func(obj)
1246 1246
1247 1247 obj._filecache[self.name] = entry
1248 1248
1249 1249 obj.__dict__[self.name] = entry.obj
1250 1250 return entry.obj
1251 1251
1252 1252 def __set__(self, obj, value):
1253 1253 if self.name not in obj._filecache:
1254 1254 # we add an entry for the missing value because X in __dict__
1255 1255 # implies X in _filecache
1256 1256 paths = [self.join(obj, path) for path in self.paths]
1257 1257 ce = filecacheentry(paths, False)
1258 1258 obj._filecache[self.name] = ce
1259 1259 else:
1260 1260 ce = obj._filecache[self.name]
1261 1261
1262 1262 ce.obj = value # update cached copy
1263 1263 obj.__dict__[self.name] = value # update copy returned by obj.x
1264 1264
1265 1265 def __delete__(self, obj):
1266 1266 try:
1267 1267 del obj.__dict__[self.name]
1268 1268 except KeyError:
1269 1269 raise AttributeError(self.name)
1270 1270
1271 1271 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1272 1272 if lock is None:
1273 1273 raise error.LockInheritanceContractViolation(
1274 1274 'lock can only be inherited while held')
1275 1275 if environ is None:
1276 1276 environ = {}
1277 1277 with lock.inherit() as locker:
1278 1278 environ[envvar] = locker
1279 1279 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1280 1280
1281 1281 def wlocksub(repo, cmd, *args, **kwargs):
1282 1282 """run cmd as a subprocess that allows inheriting repo's wlock
1283 1283
1284 1284 This can only be called while the wlock is held. This takes all the
1285 1285 arguments that ui.system does, and returns the exit code of the
1286 1286 subprocess."""
1287 1287 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1288 1288 **kwargs)
1289 1289
1290 1290 def gdinitconfig(ui):
1291 1291 """helper function to know if a repo should be created as general delta
1292 1292 """
1293 1293 # experimental config: format.generaldelta
1294 1294 return (ui.configbool('format', 'generaldelta', False)
1295 1295 or ui.configbool('format', 'usegeneraldelta', True))
1296 1296
1297 1297 def gddeltaconfig(ui):
1298 1298 """helper function to know if incoming delta should be optimised
1299 1299 """
1300 1300 # experimental config: format.generaldelta
1301 1301 return ui.configbool('format', 'generaldelta', False)
1302 1302
1303 class delayclosedfile(object):
1304 """Proxy for a file object whose close is delayed.
1303 class closewrapbase(object):
1304 """Base class of wrapper, which hooks closing
1305 1305
1306 1306 Do not instantiate outside of the vfs layer.
1307 1307 """
1308
1309 def __init__(self, fh, closer):
1308 def __init__(self, fh):
1310 1309 object.__setattr__(self, '_origfh', fh)
1311 object.__setattr__(self, '_closer', closer)
1312 1310
1313 1311 def __getattr__(self, attr):
1314 1312 return getattr(self._origfh, attr)
1315 1313
1316 1314 def __setattr__(self, attr, value):
1317 1315 return setattr(self._origfh, attr, value)
1318 1316
1319 1317 def __delattr__(self, attr):
1320 1318 return delattr(self._origfh, attr)
1321 1319
1322 1320 def __enter__(self):
1323 1321 return self._origfh.__enter__()
1324 1322
1325 1323 def __exit__(self, exc_type, exc_value, exc_tb):
1324 raise NotImplementedError('attempted instantiating ' + str(type(self)))
1325
1326 def close(self):
1327 raise NotImplementedError('attempted instantiating ' + str(type(self)))
1328
1329 class delayclosedfile(closewrapbase):
1330 """Proxy for a file object whose close is delayed.
1331
1332 Do not instantiate outside of the vfs layer.
1333 """
1334 def __init__(self, fh, closer):
1335 super(delayclosedfile, self).__init__(fh)
1336 object.__setattr__(self, '_closer', closer)
1337
1338 def __exit__(self, exc_type, exc_value, exc_tb):
1326 1339 self._closer.close(self._origfh)
1327 1340
1328 1341 def close(self):
1329 1342 self._closer.close(self._origfh)
1330 1343
1331 1344 class backgroundfilecloser(object):
1332 1345 """Coordinates background closing of file handles on multiple threads."""
1333 1346 def __init__(self, ui, expectedcount=-1):
1334 1347 self._running = False
1335 1348 self._entered = False
1336 1349 self._threads = []
1337 1350 self._threadexception = None
1338 1351
1339 1352 # Only Windows/NTFS has slow file closing. So only enable by default
1340 1353 # on that platform. But allow to be enabled elsewhere for testing.
1341 1354 defaultenabled = os.name == 'nt'
1342 1355 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
1343 1356
1344 1357 if not enabled:
1345 1358 return
1346 1359
1347 1360 # There is overhead to starting and stopping the background threads.
1348 1361 # Don't do background processing unless the file count is large enough
1349 1362 # to justify it.
1350 1363 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
1351 1364 2048)
1352 1365 # FUTURE dynamically start background threads after minfilecount closes.
1353 1366 # (We don't currently have any callers that don't know their file count)
1354 1367 if expectedcount > 0 and expectedcount < minfilecount:
1355 1368 return
1356 1369
1357 1370 # Windows defaults to a limit of 512 open files. A buffer of 128
1358 1371 # should give us enough headway.
1359 1372 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
1360 1373 threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
1361 1374
1362 1375 ui.debug('starting %d threads for background file closing\n' %
1363 1376 threadcount)
1364 1377
1365 1378 self._queue = util.queue(maxsize=maxqueue)
1366 1379 self._running = True
1367 1380
1368 1381 for i in range(threadcount):
1369 1382 t = threading.Thread(target=self._worker, name='backgroundcloser')
1370 1383 self._threads.append(t)
1371 1384 t.start()
1372 1385
1373 1386 def __enter__(self):
1374 1387 self._entered = True
1375 1388 return self
1376 1389
1377 1390 def __exit__(self, exc_type, exc_value, exc_tb):
1378 1391 self._running = False
1379 1392
1380 1393 # Wait for threads to finish closing so open files don't linger for
1381 1394 # longer than lifetime of context manager.
1382 1395 for t in self._threads:
1383 1396 t.join()
1384 1397
1385 1398 def _worker(self):
1386 1399 """Main routine for worker thread."""
1387 1400 while True:
1388 1401 try:
1389 1402 fh = self._queue.get(block=True, timeout=0.100)
1390 1403 # Need to catch or the thread will terminate and
1391 1404 # we could orphan file descriptors.
1392 1405 try:
1393 1406 fh.close()
1394 1407 except Exception as e:
1395 1408 # Stash so can re-raise from main thread later.
1396 1409 self._threadexception = e
1397 1410 except util.empty:
1398 1411 if not self._running:
1399 1412 break
1400 1413
1401 1414 def close(self, fh):
1402 1415 """Schedule a file for closing."""
1403 1416 if not self._entered:
1404 1417 raise error.Abort(_('can only call close() when context manager '
1405 1418 'active'))
1406 1419
1407 1420 # If a background thread encountered an exception, raise now so we fail
1408 1421 # fast. Otherwise we may potentially go on for minutes until the error
1409 1422 # is acted on.
1410 1423 if self._threadexception:
1411 1424 e = self._threadexception
1412 1425 self._threadexception = None
1413 1426 raise e
1414 1427
1415 1428 # If we're not actively running, close synchronously.
1416 1429 if not self._running:
1417 1430 fh.close()
1418 1431 return
1419 1432
1420 1433 self._queue.put(fh, block=True, timeout=None)
General Comments 0
You need to be logged in to leave comments. Login now