##// END OF EJS Templates
scmutil: delete extra newline at EOF...
Augie Fackler -
r29336:9368ed12 default
parent child Browse files
Show More
@@ -1,1391 +1,1390
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import contextlib
11 11 import errno
12 12 import glob
13 13 import os
14 14 import re
15 15 import shutil
16 16 import stat
17 17 import tempfile
18 18 import threading
19 19
20 20 from .i18n import _
21 21 from .node import wdirrev
22 22 from . import (
23 23 encoding,
24 24 error,
25 25 match as matchmod,
26 26 osutil,
27 27 pathutil,
28 28 phases,
29 29 revset,
30 30 similar,
31 31 util,
32 32 )
33 33
34 34 if os.name == 'nt':
35 35 from . import scmwindows as scmplatform
36 36 else:
37 37 from . import scmposix as scmplatform
38 38
39 39 systemrcpath = scmplatform.systemrcpath
40 40 userrcpath = scmplatform.userrcpath
41 41
42 42 class status(tuple):
43 43 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
44 44 and 'ignored' properties are only relevant to the working copy.
45 45 '''
46 46
47 47 __slots__ = ()
48 48
49 49 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
50 50 clean):
51 51 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
52 52 ignored, clean))
53 53
54 54 @property
55 55 def modified(self):
56 56 '''files that have been modified'''
57 57 return self[0]
58 58
59 59 @property
60 60 def added(self):
61 61 '''files that have been added'''
62 62 return self[1]
63 63
64 64 @property
65 65 def removed(self):
66 66 '''files that have been removed'''
67 67 return self[2]
68 68
69 69 @property
70 70 def deleted(self):
71 71 '''files that are in the dirstate, but have been deleted from the
72 72 working copy (aka "missing")
73 73 '''
74 74 return self[3]
75 75
76 76 @property
77 77 def unknown(self):
78 78 '''files not in the dirstate that are not ignored'''
79 79 return self[4]
80 80
81 81 @property
82 82 def ignored(self):
83 83 '''files not in the dirstate that are ignored (by _dirignore())'''
84 84 return self[5]
85 85
86 86 @property
87 87 def clean(self):
88 88 '''files that have not been modified'''
89 89 return self[6]
90 90
91 91 def __repr__(self, *args, **kwargs):
92 92 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
93 93 'unknown=%r, ignored=%r, clean=%r>') % self)
94 94
95 95 def itersubrepos(ctx1, ctx2):
96 96 """find subrepos in ctx1 or ctx2"""
97 97 # Create a (subpath, ctx) mapping where we prefer subpaths from
98 98 # ctx1. The subpaths from ctx2 are important when the .hgsub file
99 99 # has been modified (in ctx2) but not yet committed (in ctx1).
100 100 subpaths = dict.fromkeys(ctx2.substate, ctx2)
101 101 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
102 102
103 103 missing = set()
104 104
105 105 for subpath in ctx2.substate:
106 106 if subpath not in ctx1.substate:
107 107 del subpaths[subpath]
108 108 missing.add(subpath)
109 109
110 110 for subpath, ctx in sorted(subpaths.iteritems()):
111 111 yield subpath, ctx.sub(subpath)
112 112
113 113 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
114 114 # status and diff will have an accurate result when it does
115 115 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
116 116 # against itself.
117 117 for subpath in missing:
118 118 yield subpath, ctx2.nullsub(subpath, ctx1)
119 119
120 120 def nochangesfound(ui, repo, excluded=None):
121 121 '''Report no changes for push/pull, excluded is None or a list of
122 122 nodes excluded from the push/pull.
123 123 '''
124 124 secretlist = []
125 125 if excluded:
126 126 for n in excluded:
127 127 if n not in repo:
128 128 # discovery should not have included the filtered revision,
129 129 # we have to explicitly exclude it until discovery is cleanup.
130 130 continue
131 131 ctx = repo[n]
132 132 if ctx.phase() >= phases.secret and not ctx.extinct():
133 133 secretlist.append(n)
134 134
135 135 if secretlist:
136 136 ui.status(_("no changes found (ignored %d secret changesets)\n")
137 137 % len(secretlist))
138 138 else:
139 139 ui.status(_("no changes found\n"))
140 140
141 141 def checknewlabel(repo, lbl, kind):
142 142 # Do not use the "kind" parameter in ui output.
143 143 # It makes strings difficult to translate.
144 144 if lbl in ['tip', '.', 'null']:
145 145 raise error.Abort(_("the name '%s' is reserved") % lbl)
146 146 for c in (':', '\0', '\n', '\r'):
147 147 if c in lbl:
148 148 raise error.Abort(_("%r cannot be used in a name") % c)
149 149 try:
150 150 int(lbl)
151 151 raise error.Abort(_("cannot use an integer as a name"))
152 152 except ValueError:
153 153 pass
154 154
155 155 def checkfilename(f):
156 156 '''Check that the filename f is an acceptable filename for a tracked file'''
157 157 if '\r' in f or '\n' in f:
158 158 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
159 159
160 160 def checkportable(ui, f):
161 161 '''Check if filename f is portable and warn or abort depending on config'''
162 162 checkfilename(f)
163 163 abort, warn = checkportabilityalert(ui)
164 164 if abort or warn:
165 165 msg = util.checkwinfilename(f)
166 166 if msg:
167 167 msg = "%s: %r" % (msg, f)
168 168 if abort:
169 169 raise error.Abort(msg)
170 170 ui.warn(_("warning: %s\n") % msg)
171 171
172 172 def checkportabilityalert(ui):
173 173 '''check if the user's config requests nothing, a warning, or abort for
174 174 non-portable filenames'''
175 175 val = ui.config('ui', 'portablefilenames', 'warn')
176 176 lval = val.lower()
177 177 bval = util.parsebool(val)
178 178 abort = os.name == 'nt' or lval == 'abort'
179 179 warn = bval or lval == 'warn'
180 180 if bval is None and not (warn or abort or lval == 'ignore'):
181 181 raise error.ConfigError(
182 182 _("ui.portablefilenames value is invalid ('%s')") % val)
183 183 return abort, warn
184 184
185 185 class casecollisionauditor(object):
186 186 def __init__(self, ui, abort, dirstate):
187 187 self._ui = ui
188 188 self._abort = abort
189 189 allfiles = '\0'.join(dirstate._map)
190 190 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
191 191 self._dirstate = dirstate
192 192 # The purpose of _newfiles is so that we don't complain about
193 193 # case collisions if someone were to call this object with the
194 194 # same filename twice.
195 195 self._newfiles = set()
196 196
197 197 def __call__(self, f):
198 198 if f in self._newfiles:
199 199 return
200 200 fl = encoding.lower(f)
201 201 if fl in self._loweredfiles and f not in self._dirstate:
202 202 msg = _('possible case-folding collision for %s') % f
203 203 if self._abort:
204 204 raise error.Abort(msg)
205 205 self._ui.warn(_("warning: %s\n") % msg)
206 206 self._loweredfiles.add(fl)
207 207 self._newfiles.add(f)
208 208
209 209 def filteredhash(repo, maxrev):
210 210 """build hash of filtered revisions in the current repoview.
211 211
212 212 Multiple caches perform up-to-date validation by checking that the
213 213 tiprev and tipnode stored in the cache file match the current repository.
214 214 However, this is not sufficient for validating repoviews because the set
215 215 of revisions in the view may change without the repository tiprev and
216 216 tipnode changing.
217 217
218 218 This function hashes all the revs filtered from the view and returns
219 219 that SHA-1 digest.
220 220 """
221 221 cl = repo.changelog
222 222 if not cl.filteredrevs:
223 223 return None
224 224 key = None
225 225 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
226 226 if revs:
227 227 s = util.sha1()
228 228 for rev in revs:
229 229 s.update('%s;' % rev)
230 230 key = s.digest()
231 231 return key
232 232
233 233 class abstractvfs(object):
234 234 """Abstract base class; cannot be instantiated"""
235 235
236 236 def __init__(self, *args, **kwargs):
237 237 '''Prevent instantiation; don't call this from subclasses.'''
238 238 raise NotImplementedError('attempted instantiating ' + str(type(self)))
239 239
240 240 def tryread(self, path):
241 241 '''gracefully return an empty string for missing files'''
242 242 try:
243 243 return self.read(path)
244 244 except IOError as inst:
245 245 if inst.errno != errno.ENOENT:
246 246 raise
247 247 return ""
248 248
249 249 def tryreadlines(self, path, mode='rb'):
250 250 '''gracefully return an empty array for missing files'''
251 251 try:
252 252 return self.readlines(path, mode=mode)
253 253 except IOError as inst:
254 254 if inst.errno != errno.ENOENT:
255 255 raise
256 256 return []
257 257
258 258 def open(self, path, mode="r", text=False, atomictemp=False,
259 259 notindexed=False, backgroundclose=False):
260 260 '''Open ``path`` file, which is relative to vfs root.
261 261
262 262 Newly created directories are marked as "not to be indexed by
263 263 the content indexing service", if ``notindexed`` is specified
264 264 for "write" mode access.
265 265 '''
266 266 self.open = self.__call__
267 267 return self.__call__(path, mode, text, atomictemp, notindexed,
268 268 backgroundclose=backgroundclose)
269 269
270 270 def read(self, path):
271 271 with self(path, 'rb') as fp:
272 272 return fp.read()
273 273
274 274 def readlines(self, path, mode='rb'):
275 275 with self(path, mode=mode) as fp:
276 276 return fp.readlines()
277 277
278 278 def write(self, path, data, backgroundclose=False):
279 279 with self(path, 'wb', backgroundclose=backgroundclose) as fp:
280 280 return fp.write(data)
281 281
282 282 def writelines(self, path, data, mode='wb', notindexed=False):
283 283 with self(path, mode=mode, notindexed=notindexed) as fp:
284 284 return fp.writelines(data)
285 285
286 286 def append(self, path, data):
287 287 with self(path, 'ab') as fp:
288 288 return fp.write(data)
289 289
290 290 def basename(self, path):
291 291 """return base element of a path (as os.path.basename would do)
292 292
293 293 This exists to allow handling of strange encoding if needed."""
294 294 return os.path.basename(path)
295 295
296 296 def chmod(self, path, mode):
297 297 return os.chmod(self.join(path), mode)
298 298
299 299 def dirname(self, path):
300 300 """return dirname element of a path (as os.path.dirname would do)
301 301
302 302 This exists to allow handling of strange encoding if needed."""
303 303 return os.path.dirname(path)
304 304
305 305 def exists(self, path=None):
306 306 return os.path.exists(self.join(path))
307 307
308 308 def fstat(self, fp):
309 309 return util.fstat(fp)
310 310
311 311 def isdir(self, path=None):
312 312 return os.path.isdir(self.join(path))
313 313
314 314 def isfile(self, path=None):
315 315 return os.path.isfile(self.join(path))
316 316
317 317 def islink(self, path=None):
318 318 return os.path.islink(self.join(path))
319 319
320 320 def isfileorlink(self, path=None):
321 321 '''return whether path is a regular file or a symlink
322 322
323 323 Unlike isfile, this doesn't follow symlinks.'''
324 324 try:
325 325 st = self.lstat(path)
326 326 except OSError:
327 327 return False
328 328 mode = st.st_mode
329 329 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
330 330
331 331 def reljoin(self, *paths):
332 332 """join various elements of a path together (as os.path.join would do)
333 333
334 334 The vfs base is not injected so that path stay relative. This exists
335 335 to allow handling of strange encoding if needed."""
336 336 return os.path.join(*paths)
337 337
338 338 def split(self, path):
339 339 """split top-most element of a path (as os.path.split would do)
340 340
341 341 This exists to allow handling of strange encoding if needed."""
342 342 return os.path.split(path)
343 343
344 344 def lexists(self, path=None):
345 345 return os.path.lexists(self.join(path))
346 346
347 347 def lstat(self, path=None):
348 348 return os.lstat(self.join(path))
349 349
350 350 def listdir(self, path=None):
351 351 return os.listdir(self.join(path))
352 352
353 353 def makedir(self, path=None, notindexed=True):
354 354 return util.makedir(self.join(path), notindexed)
355 355
356 356 def makedirs(self, path=None, mode=None):
357 357 return util.makedirs(self.join(path), mode)
358 358
359 359 def makelock(self, info, path):
360 360 return util.makelock(info, self.join(path))
361 361
362 362 def mkdir(self, path=None):
363 363 return os.mkdir(self.join(path))
364 364
365 365 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
366 366 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
367 367 dir=self.join(dir), text=text)
368 368 dname, fname = util.split(name)
369 369 if dir:
370 370 return fd, os.path.join(dir, fname)
371 371 else:
372 372 return fd, fname
373 373
374 374 def readdir(self, path=None, stat=None, skip=None):
375 375 return osutil.listdir(self.join(path), stat, skip)
376 376
377 377 def readlock(self, path):
378 378 return util.readlock(self.join(path))
379 379
380 380 def rename(self, src, dst, checkambig=False):
381 381 dstpath = self.join(dst)
382 382 oldstat = checkambig and util.filestat(dstpath)
383 383 if oldstat and oldstat.stat:
384 384 ret = util.rename(self.join(src), dstpath)
385 385 newstat = util.filestat(dstpath)
386 386 if newstat.isambig(oldstat):
387 387 # stat of renamed file is ambiguous to original one
388 388 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
389 389 os.utime(dstpath, (advanced, advanced))
390 390 return ret
391 391 return util.rename(self.join(src), dstpath)
392 392
393 393 def readlink(self, path):
394 394 return os.readlink(self.join(path))
395 395
396 396 def removedirs(self, path=None):
397 397 """Remove a leaf directory and all empty intermediate ones
398 398 """
399 399 return util.removedirs(self.join(path))
400 400
401 401 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
402 402 """Remove a directory tree recursively
403 403
404 404 If ``forcibly``, this tries to remove READ-ONLY files, too.
405 405 """
406 406 if forcibly:
407 407 def onerror(function, path, excinfo):
408 408 if function is not os.remove:
409 409 raise
410 410 # read-only files cannot be unlinked under Windows
411 411 s = os.stat(path)
412 412 if (s.st_mode & stat.S_IWRITE) != 0:
413 413 raise
414 414 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
415 415 os.remove(path)
416 416 else:
417 417 onerror = None
418 418 return shutil.rmtree(self.join(path),
419 419 ignore_errors=ignore_errors, onerror=onerror)
420 420
421 421 def setflags(self, path, l, x):
422 422 return util.setflags(self.join(path), l, x)
423 423
424 424 def stat(self, path=None):
425 425 return os.stat(self.join(path))
426 426
427 427 def unlink(self, path=None):
428 428 return util.unlink(self.join(path))
429 429
430 430 def unlinkpath(self, path=None, ignoremissing=False):
431 431 return util.unlinkpath(self.join(path), ignoremissing)
432 432
433 433 def utime(self, path=None, t=None):
434 434 return os.utime(self.join(path), t)
435 435
436 436 def walk(self, path=None, onerror=None):
437 437 """Yield (dirpath, dirs, files) tuple for each directories under path
438 438
439 439 ``dirpath`` is relative one from the root of this vfs. This
440 440 uses ``os.sep`` as path separator, even you specify POSIX
441 441 style ``path``.
442 442
443 443 "The root of this vfs" is represented as empty ``dirpath``.
444 444 """
445 445 root = os.path.normpath(self.join(None))
446 446 # when dirpath == root, dirpath[prefixlen:] becomes empty
447 447 # because len(dirpath) < prefixlen.
448 448 prefixlen = len(pathutil.normasprefix(root))
449 449 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
450 450 yield (dirpath[prefixlen:], dirs, files)
451 451
452 452 @contextlib.contextmanager
453 453 def backgroundclosing(self, ui, expectedcount=-1):
454 454 """Allow files to be closed asynchronously.
455 455
456 456 When this context manager is active, ``backgroundclose`` can be passed
457 457 to ``__call__``/``open`` to result in the file possibly being closed
458 458 asynchronously, on a background thread.
459 459 """
460 460 # This is an arbitrary restriction and could be changed if we ever
461 461 # have a use case.
462 462 vfs = getattr(self, 'vfs', self)
463 463 if getattr(vfs, '_backgroundfilecloser', None):
464 464 raise error.Abort('can only have 1 active background file closer')
465 465
466 466 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
467 467 try:
468 468 vfs._backgroundfilecloser = bfc
469 469 yield bfc
470 470 finally:
471 471 vfs._backgroundfilecloser = None
472 472
473 473 class vfs(abstractvfs):
474 474 '''Operate files relative to a base directory
475 475
476 476 This class is used to hide the details of COW semantics and
477 477 remote file access from higher level code.
478 478 '''
479 479 def __init__(self, base, audit=True, expandpath=False, realpath=False):
480 480 if expandpath:
481 481 base = util.expandpath(base)
482 482 if realpath:
483 483 base = os.path.realpath(base)
484 484 self.base = base
485 485 self.mustaudit = audit
486 486 self.createmode = None
487 487 self._trustnlink = None
488 488
489 489 @property
490 490 def mustaudit(self):
491 491 return self._audit
492 492
493 493 @mustaudit.setter
494 494 def mustaudit(self, onoff):
495 495 self._audit = onoff
496 496 if onoff:
497 497 self.audit = pathutil.pathauditor(self.base)
498 498 else:
499 499 self.audit = util.always
500 500
501 501 @util.propertycache
502 502 def _cansymlink(self):
503 503 return util.checklink(self.base)
504 504
505 505 @util.propertycache
506 506 def _chmod(self):
507 507 return util.checkexec(self.base)
508 508
509 509 def _fixfilemode(self, name):
510 510 if self.createmode is None or not self._chmod:
511 511 return
512 512 os.chmod(name, self.createmode & 0o666)
513 513
514 514 def __call__(self, path, mode="r", text=False, atomictemp=False,
515 515 notindexed=False, backgroundclose=False, checkambig=False):
516 516 '''Open ``path`` file, which is relative to vfs root.
517 517
518 518 Newly created directories are marked as "not to be indexed by
519 519 the content indexing service", if ``notindexed`` is specified
520 520 for "write" mode access.
521 521
522 522 If ``backgroundclose`` is passed, the file may be closed asynchronously.
523 523 It can only be used if the ``self.backgroundclosing()`` context manager
524 524 is active. This should only be specified if the following criteria hold:
525 525
526 526 1. There is a potential for writing thousands of files. Unless you
527 527 are writing thousands of files, the performance benefits of
528 528 asynchronously closing files is not realized.
529 529 2. Files are opened exactly once for the ``backgroundclosing``
530 530 active duration and are therefore free of race conditions between
531 531 closing a file on a background thread and reopening it. (If the
532 532 file were opened multiple times, there could be unflushed data
533 533 because the original file handle hasn't been flushed/closed yet.)
534 534
535 535 ``checkambig`` is passed to atomictempfile (valid only for writing).
536 536 '''
537 537 if self._audit:
538 538 r = util.checkosfilename(path)
539 539 if r:
540 540 raise error.Abort("%s: %r" % (r, path))
541 541 self.audit(path)
542 542 f = self.join(path)
543 543
544 544 if not text and "b" not in mode:
545 545 mode += "b" # for that other OS
546 546
547 547 nlink = -1
548 548 if mode not in ('r', 'rb'):
549 549 dirname, basename = util.split(f)
550 550 # If basename is empty, then the path is malformed because it points
551 551 # to a directory. Let the posixfile() call below raise IOError.
552 552 if basename:
553 553 if atomictemp:
554 554 util.makedirs(dirname, self.createmode, notindexed)
555 555 return util.atomictempfile(f, mode, self.createmode,
556 556 checkambig=checkambig)
557 557 try:
558 558 if 'w' in mode:
559 559 util.unlink(f)
560 560 nlink = 0
561 561 else:
562 562 # nlinks() may behave differently for files on Windows
563 563 # shares if the file is open.
564 564 with util.posixfile(f):
565 565 nlink = util.nlinks(f)
566 566 if nlink < 1:
567 567 nlink = 2 # force mktempcopy (issue1922)
568 568 except (OSError, IOError) as e:
569 569 if e.errno != errno.ENOENT:
570 570 raise
571 571 nlink = 0
572 572 util.makedirs(dirname, self.createmode, notindexed)
573 573 if nlink > 0:
574 574 if self._trustnlink is None:
575 575 self._trustnlink = nlink > 1 or util.checknlink(f)
576 576 if nlink > 1 or not self._trustnlink:
577 577 util.rename(util.mktempcopy(f), f)
578 578 fp = util.posixfile(f, mode)
579 579 if nlink == 0:
580 580 self._fixfilemode(f)
581 581
582 582 if backgroundclose:
583 583 if not self._backgroundfilecloser:
584 584 raise error.Abort('backgroundclose can only be used when a '
585 585 'backgroundclosing context manager is active')
586 586
587 587 fp = delayclosedfile(fp, self._backgroundfilecloser)
588 588
589 589 return fp
590 590
591 591 def symlink(self, src, dst):
592 592 self.audit(dst)
593 593 linkname = self.join(dst)
594 594 try:
595 595 os.unlink(linkname)
596 596 except OSError:
597 597 pass
598 598
599 599 util.makedirs(os.path.dirname(linkname), self.createmode)
600 600
601 601 if self._cansymlink:
602 602 try:
603 603 os.symlink(src, linkname)
604 604 except OSError as err:
605 605 raise OSError(err.errno, _('could not symlink to %r: %s') %
606 606 (src, err.strerror), linkname)
607 607 else:
608 608 self.write(dst, src)
609 609
610 610 def join(self, path, *insidef):
611 611 if path:
612 612 return os.path.join(self.base, path, *insidef)
613 613 else:
614 614 return self.base
615 615
616 616 opener = vfs
617 617
618 618 class auditvfs(object):
619 619 def __init__(self, vfs):
620 620 self.vfs = vfs
621 621
622 622 @property
623 623 def mustaudit(self):
624 624 return self.vfs.mustaudit
625 625
626 626 @mustaudit.setter
627 627 def mustaudit(self, onoff):
628 628 self.vfs.mustaudit = onoff
629 629
630 630 class filtervfs(abstractvfs, auditvfs):
631 631 '''Wrapper vfs for filtering filenames with a function.'''
632 632
633 633 def __init__(self, vfs, filter):
634 634 auditvfs.__init__(self, vfs)
635 635 self._filter = filter
636 636
637 637 def __call__(self, path, *args, **kwargs):
638 638 return self.vfs(self._filter(path), *args, **kwargs)
639 639
640 640 def join(self, path, *insidef):
641 641 if path:
642 642 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
643 643 else:
644 644 return self.vfs.join(path)
645 645
646 646 filteropener = filtervfs
647 647
648 648 class readonlyvfs(abstractvfs, auditvfs):
649 649 '''Wrapper vfs preventing any writing.'''
650 650
651 651 def __init__(self, vfs):
652 652 auditvfs.__init__(self, vfs)
653 653
654 654 def __call__(self, path, mode='r', *args, **kw):
655 655 if mode not in ('r', 'rb'):
656 656 raise error.Abort('this vfs is read only')
657 657 return self.vfs(path, mode, *args, **kw)
658 658
659 659 def join(self, path, *insidef):
660 660 return self.vfs.join(path, *insidef)
661 661
662 662 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
663 663 '''yield every hg repository under path, always recursively.
664 664 The recurse flag will only control recursion into repo working dirs'''
665 665 def errhandler(err):
666 666 if err.filename == path:
667 667 raise err
668 668 samestat = getattr(os.path, 'samestat', None)
669 669 if followsym and samestat is not None:
670 670 def adddir(dirlst, dirname):
671 671 match = False
672 672 dirstat = os.stat(dirname)
673 673 for lstdirstat in dirlst:
674 674 if samestat(dirstat, lstdirstat):
675 675 match = True
676 676 break
677 677 if not match:
678 678 dirlst.append(dirstat)
679 679 return not match
680 680 else:
681 681 followsym = False
682 682
683 683 if (seen_dirs is None) and followsym:
684 684 seen_dirs = []
685 685 adddir(seen_dirs, path)
686 686 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
687 687 dirs.sort()
688 688 if '.hg' in dirs:
689 689 yield root # found a repository
690 690 qroot = os.path.join(root, '.hg', 'patches')
691 691 if os.path.isdir(os.path.join(qroot, '.hg')):
692 692 yield qroot # we have a patch queue repo here
693 693 if recurse:
694 694 # avoid recursing inside the .hg directory
695 695 dirs.remove('.hg')
696 696 else:
697 697 dirs[:] = [] # don't descend further
698 698 elif followsym:
699 699 newdirs = []
700 700 for d in dirs:
701 701 fname = os.path.join(root, d)
702 702 if adddir(seen_dirs, fname):
703 703 if os.path.islink(fname):
704 704 for hgname in walkrepos(fname, True, seen_dirs):
705 705 yield hgname
706 706 else:
707 707 newdirs.append(d)
708 708 dirs[:] = newdirs
709 709
710 710 def osrcpath():
711 711 '''return default os-specific hgrc search path'''
712 712 path = []
713 713 defaultpath = os.path.join(util.datapath, 'default.d')
714 714 if os.path.isdir(defaultpath):
715 715 for f, kind in osutil.listdir(defaultpath):
716 716 if f.endswith('.rc'):
717 717 path.append(os.path.join(defaultpath, f))
718 718 path.extend(systemrcpath())
719 719 path.extend(userrcpath())
720 720 path = [os.path.normpath(f) for f in path]
721 721 return path
722 722
723 723 _rcpath = None
724 724
725 725 def rcpath():
726 726 '''return hgrc search path. if env var HGRCPATH is set, use it.
727 727 for each item in path, if directory, use files ending in .rc,
728 728 else use item.
729 729 make HGRCPATH empty to only look in .hg/hgrc of current repo.
730 730 if no HGRCPATH, use default os-specific path.'''
731 731 global _rcpath
732 732 if _rcpath is None:
733 733 if 'HGRCPATH' in os.environ:
734 734 _rcpath = []
735 735 for p in os.environ['HGRCPATH'].split(os.pathsep):
736 736 if not p:
737 737 continue
738 738 p = util.expandpath(p)
739 739 if os.path.isdir(p):
740 740 for f, kind in osutil.listdir(p):
741 741 if f.endswith('.rc'):
742 742 _rcpath.append(os.path.join(p, f))
743 743 else:
744 744 _rcpath.append(p)
745 745 else:
746 746 _rcpath = osrcpath()
747 747 return _rcpath
748 748
749 749 def intrev(rev):
750 750 """Return integer for a given revision that can be used in comparison or
751 751 arithmetic operation"""
752 752 if rev is None:
753 753 return wdirrev
754 754 return rev
755 755
756 756 def revsingle(repo, revspec, default='.'):
757 757 if not revspec and revspec != 0:
758 758 return repo[default]
759 759
760 760 l = revrange(repo, [revspec])
761 761 if not l:
762 762 raise error.Abort(_('empty revision set'))
763 763 return repo[l.last()]
764 764
765 765 def _pairspec(revspec):
766 766 tree = revset.parse(revspec)
767 767 tree = revset.optimize(tree) # fix up "x^:y" -> "(x^):y"
768 768 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
769 769
770 770 def revpair(repo, revs):
771 771 if not revs:
772 772 return repo.dirstate.p1(), None
773 773
774 774 l = revrange(repo, revs)
775 775
776 776 if not l:
777 777 first = second = None
778 778 elif l.isascending():
779 779 first = l.min()
780 780 second = l.max()
781 781 elif l.isdescending():
782 782 first = l.max()
783 783 second = l.min()
784 784 else:
785 785 first = l.first()
786 786 second = l.last()
787 787
788 788 if first is None:
789 789 raise error.Abort(_('empty revision range'))
790 790 if (first == second and len(revs) >= 2
791 791 and not all(revrange(repo, [r]) for r in revs)):
792 792 raise error.Abort(_('empty revision on one side of range'))
793 793
794 794 # if top-level is range expression, the result must always be a pair
795 795 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
796 796 return repo.lookup(first), None
797 797
798 798 return repo.lookup(first), repo.lookup(second)
799 799
800 800 def revrange(repo, revs):
801 801 """Yield revision as strings from a list of revision specifications."""
802 802 allspecs = []
803 803 for spec in revs:
804 804 if isinstance(spec, int):
805 805 spec = revset.formatspec('rev(%d)', spec)
806 806 allspecs.append(spec)
807 807 m = revset.matchany(repo.ui, allspecs, repo)
808 808 return m(repo)
809 809
810 810 def meaningfulparents(repo, ctx):
811 811 """Return list of meaningful (or all if debug) parentrevs for rev.
812 812
813 813 For merges (two non-nullrev revisions) both parents are meaningful.
814 814 Otherwise the first parent revision is considered meaningful if it
815 815 is not the preceding revision.
816 816 """
817 817 parents = ctx.parents()
818 818 if len(parents) > 1:
819 819 return parents
820 820 if repo.ui.debugflag:
821 821 return [parents[0], repo['null']]
822 822 if parents[0].rev() >= intrev(ctx.rev()) - 1:
823 823 return []
824 824 return parents
825 825
826 826 def expandpats(pats):
827 827 '''Expand bare globs when running on windows.
828 828 On posix we assume it already has already been done by sh.'''
829 829 if not util.expandglobs:
830 830 return list(pats)
831 831 ret = []
832 832 for kindpat in pats:
833 833 kind, pat = matchmod._patsplit(kindpat, None)
834 834 if kind is None:
835 835 try:
836 836 globbed = glob.glob(pat)
837 837 except re.error:
838 838 globbed = [pat]
839 839 if globbed:
840 840 ret.extend(globbed)
841 841 continue
842 842 ret.append(kindpat)
843 843 return ret
844 844
845 845 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
846 846 badfn=None):
847 847 '''Return a matcher and the patterns that were used.
848 848 The matcher will warn about bad matches, unless an alternate badfn callback
849 849 is provided.'''
850 850 if pats == ("",):
851 851 pats = []
852 852 if opts is None:
853 853 opts = {}
854 854 if not globbed and default == 'relpath':
855 855 pats = expandpats(pats or [])
856 856
857 857 def bad(f, msg):
858 858 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
859 859
860 860 if badfn is None:
861 861 badfn = bad
862 862
863 863 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
864 864 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
865 865
866 866 if m.always():
867 867 pats = []
868 868 return m, pats
869 869
870 870 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
871 871 badfn=None):
872 872 '''Return a matcher that will warn about bad matches.'''
873 873 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
874 874
875 875 def matchall(repo):
876 876 '''Return a matcher that will efficiently match everything.'''
877 877 return matchmod.always(repo.root, repo.getcwd())
878 878
879 879 def matchfiles(repo, files, badfn=None):
880 880 '''Return a matcher that will efficiently match exactly these files.'''
881 881 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
882 882
883 883 def origpath(ui, repo, filepath):
884 884 '''customize where .orig files are created
885 885
886 886 Fetch user defined path from config file: [ui] origbackuppath = <path>
887 887 Fall back to default (filepath) if not specified
888 888 '''
889 889 origbackuppath = ui.config('ui', 'origbackuppath', None)
890 890 if origbackuppath is None:
891 891 return filepath + ".orig"
892 892
893 893 filepathfromroot = os.path.relpath(filepath, start=repo.root)
894 894 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
895 895
896 896 origbackupdir = repo.vfs.dirname(fullorigpath)
897 897 if not repo.vfs.exists(origbackupdir):
898 898 ui.note(_('creating directory: %s\n') % origbackupdir)
899 899 util.makedirs(origbackupdir)
900 900
901 901 return fullorigpath + ".orig"
902 902
903 903 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
904 904 if opts is None:
905 905 opts = {}
906 906 m = matcher
907 907 if dry_run is None:
908 908 dry_run = opts.get('dry_run')
909 909 if similarity is None:
910 910 similarity = float(opts.get('similarity') or 0)
911 911
912 912 ret = 0
913 913 join = lambda f: os.path.join(prefix, f)
914 914
915 915 def matchessubrepo(matcher, subpath):
916 916 if matcher.exact(subpath):
917 917 return True
918 918 for f in matcher.files():
919 919 if f.startswith(subpath):
920 920 return True
921 921 return False
922 922
923 923 wctx = repo[None]
924 924 for subpath in sorted(wctx.substate):
925 925 if opts.get('subrepos') or matchessubrepo(m, subpath):
926 926 sub = wctx.sub(subpath)
927 927 try:
928 928 submatch = matchmod.subdirmatcher(subpath, m)
929 929 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
930 930 ret = 1
931 931 except error.LookupError:
932 932 repo.ui.status(_("skipping missing subrepository: %s\n")
933 933 % join(subpath))
934 934
935 935 rejected = []
936 936 def badfn(f, msg):
937 937 if f in m.files():
938 938 m.bad(f, msg)
939 939 rejected.append(f)
940 940
941 941 badmatch = matchmod.badmatch(m, badfn)
942 942 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
943 943 badmatch)
944 944
945 945 unknownset = set(unknown + forgotten)
946 946 toprint = unknownset.copy()
947 947 toprint.update(deleted)
948 948 for abs in sorted(toprint):
949 949 if repo.ui.verbose or not m.exact(abs):
950 950 if abs in unknownset:
951 951 status = _('adding %s\n') % m.uipath(abs)
952 952 else:
953 953 status = _('removing %s\n') % m.uipath(abs)
954 954 repo.ui.status(status)
955 955
956 956 renames = _findrenames(repo, m, added + unknown, removed + deleted,
957 957 similarity)
958 958
959 959 if not dry_run:
960 960 _markchanges(repo, unknown + forgotten, deleted, renames)
961 961
962 962 for f in rejected:
963 963 if f in m.files():
964 964 return 1
965 965 return ret
966 966
967 967 def marktouched(repo, files, similarity=0.0):
968 968 '''Assert that files have somehow been operated upon. files are relative to
969 969 the repo root.'''
970 970 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
971 971 rejected = []
972 972
973 973 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
974 974
975 975 if repo.ui.verbose:
976 976 unknownset = set(unknown + forgotten)
977 977 toprint = unknownset.copy()
978 978 toprint.update(deleted)
979 979 for abs in sorted(toprint):
980 980 if abs in unknownset:
981 981 status = _('adding %s\n') % abs
982 982 else:
983 983 status = _('removing %s\n') % abs
984 984 repo.ui.status(status)
985 985
986 986 renames = _findrenames(repo, m, added + unknown, removed + deleted,
987 987 similarity)
988 988
989 989 _markchanges(repo, unknown + forgotten, deleted, renames)
990 990
991 991 for f in rejected:
992 992 if f in m.files():
993 993 return 1
994 994 return 0
995 995
996 996 def _interestingfiles(repo, matcher):
997 997 '''Walk dirstate with matcher, looking for files that addremove would care
998 998 about.
999 999
1000 1000 This is different from dirstate.status because it doesn't care about
1001 1001 whether files are modified or clean.'''
1002 1002 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1003 1003 audit_path = pathutil.pathauditor(repo.root)
1004 1004
1005 1005 ctx = repo[None]
1006 1006 dirstate = repo.dirstate
1007 1007 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
1008 1008 full=False)
1009 1009 for abs, st in walkresults.iteritems():
1010 1010 dstate = dirstate[abs]
1011 1011 if dstate == '?' and audit_path.check(abs):
1012 1012 unknown.append(abs)
1013 1013 elif dstate != 'r' and not st:
1014 1014 deleted.append(abs)
1015 1015 elif dstate == 'r' and st:
1016 1016 forgotten.append(abs)
1017 1017 # for finding renames
1018 1018 elif dstate == 'r' and not st:
1019 1019 removed.append(abs)
1020 1020 elif dstate == 'a':
1021 1021 added.append(abs)
1022 1022
1023 1023 return added, unknown, deleted, removed, forgotten
1024 1024
1025 1025 def _findrenames(repo, matcher, added, removed, similarity):
1026 1026 '''Find renames from removed files to added ones.'''
1027 1027 renames = {}
1028 1028 if similarity > 0:
1029 1029 for old, new, score in similar.findrenames(repo, added, removed,
1030 1030 similarity):
1031 1031 if (repo.ui.verbose or not matcher.exact(old)
1032 1032 or not matcher.exact(new)):
1033 1033 repo.ui.status(_('recording removal of %s as rename to %s '
1034 1034 '(%d%% similar)\n') %
1035 1035 (matcher.rel(old), matcher.rel(new),
1036 1036 score * 100))
1037 1037 renames[new] = old
1038 1038 return renames
1039 1039
1040 1040 def _markchanges(repo, unknown, deleted, renames):
1041 1041 '''Marks the files in unknown as added, the files in deleted as removed,
1042 1042 and the files in renames as copied.'''
1043 1043 wctx = repo[None]
1044 1044 with repo.wlock():
1045 1045 wctx.forget(deleted)
1046 1046 wctx.add(unknown)
1047 1047 for new, old in renames.iteritems():
1048 1048 wctx.copy(old, new)
1049 1049
1050 1050 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1051 1051 """Update the dirstate to reflect the intent of copying src to dst. For
1052 1052 different reasons it might not end with dst being marked as copied from src.
1053 1053 """
1054 1054 origsrc = repo.dirstate.copied(src) or src
1055 1055 if dst == origsrc: # copying back a copy?
1056 1056 if repo.dirstate[dst] not in 'mn' and not dryrun:
1057 1057 repo.dirstate.normallookup(dst)
1058 1058 else:
1059 1059 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1060 1060 if not ui.quiet:
1061 1061 ui.warn(_("%s has not been committed yet, so no copy "
1062 1062 "data will be stored for %s.\n")
1063 1063 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1064 1064 if repo.dirstate[dst] in '?r' and not dryrun:
1065 1065 wctx.add([dst])
1066 1066 elif not dryrun:
1067 1067 wctx.copy(origsrc, dst)
1068 1068
1069 1069 def readrequires(opener, supported):
1070 1070 '''Reads and parses .hg/requires and checks if all entries found
1071 1071 are in the list of supported features.'''
1072 1072 requirements = set(opener.read("requires").splitlines())
1073 1073 missings = []
1074 1074 for r in requirements:
1075 1075 if r not in supported:
1076 1076 if not r or not r[0].isalnum():
1077 1077 raise error.RequirementError(_(".hg/requires file is corrupt"))
1078 1078 missings.append(r)
1079 1079 missings.sort()
1080 1080 if missings:
1081 1081 raise error.RequirementError(
1082 1082 _("repository requires features unknown to this Mercurial: %s")
1083 1083 % " ".join(missings),
1084 1084 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1085 1085 " for more information"))
1086 1086 return requirements
1087 1087
1088 1088 def writerequires(opener, requirements):
1089 1089 with opener('requires', 'w') as fp:
1090 1090 for r in sorted(requirements):
1091 1091 fp.write("%s\n" % r)
1092 1092
1093 1093 class filecachesubentry(object):
1094 1094 def __init__(self, path, stat):
1095 1095 self.path = path
1096 1096 self.cachestat = None
1097 1097 self._cacheable = None
1098 1098
1099 1099 if stat:
1100 1100 self.cachestat = filecachesubentry.stat(self.path)
1101 1101
1102 1102 if self.cachestat:
1103 1103 self._cacheable = self.cachestat.cacheable()
1104 1104 else:
1105 1105 # None means we don't know yet
1106 1106 self._cacheable = None
1107 1107
1108 1108 def refresh(self):
1109 1109 if self.cacheable():
1110 1110 self.cachestat = filecachesubentry.stat(self.path)
1111 1111
1112 1112 def cacheable(self):
1113 1113 if self._cacheable is not None:
1114 1114 return self._cacheable
1115 1115
1116 1116 # we don't know yet, assume it is for now
1117 1117 return True
1118 1118
1119 1119 def changed(self):
1120 1120 # no point in going further if we can't cache it
1121 1121 if not self.cacheable():
1122 1122 return True
1123 1123
1124 1124 newstat = filecachesubentry.stat(self.path)
1125 1125
1126 1126 # we may not know if it's cacheable yet, check again now
1127 1127 if newstat and self._cacheable is None:
1128 1128 self._cacheable = newstat.cacheable()
1129 1129
1130 1130 # check again
1131 1131 if not self._cacheable:
1132 1132 return True
1133 1133
1134 1134 if self.cachestat != newstat:
1135 1135 self.cachestat = newstat
1136 1136 return True
1137 1137 else:
1138 1138 return False
1139 1139
1140 1140 @staticmethod
1141 1141 def stat(path):
1142 1142 try:
1143 1143 return util.cachestat(path)
1144 1144 except OSError as e:
1145 1145 if e.errno != errno.ENOENT:
1146 1146 raise
1147 1147
1148 1148 class filecacheentry(object):
1149 1149 def __init__(self, paths, stat=True):
1150 1150 self._entries = []
1151 1151 for path in paths:
1152 1152 self._entries.append(filecachesubentry(path, stat))
1153 1153
1154 1154 def changed(self):
1155 1155 '''true if any entry has changed'''
1156 1156 for entry in self._entries:
1157 1157 if entry.changed():
1158 1158 return True
1159 1159 return False
1160 1160
1161 1161 def refresh(self):
1162 1162 for entry in self._entries:
1163 1163 entry.refresh()
1164 1164
1165 1165 class filecache(object):
1166 1166 '''A property like decorator that tracks files under .hg/ for updates.
1167 1167
1168 1168 Records stat info when called in _filecache.
1169 1169
1170 1170 On subsequent calls, compares old stat info with new info, and recreates the
1171 1171 object when any of the files changes, updating the new stat info in
1172 1172 _filecache.
1173 1173
1174 1174 Mercurial either atomic renames or appends for files under .hg,
1175 1175 so to ensure the cache is reliable we need the filesystem to be able
1176 1176 to tell us if a file has been replaced. If it can't, we fallback to
1177 1177 recreating the object on every call (essentially the same behavior as
1178 1178 propertycache).
1179 1179
1180 1180 '''
1181 1181 def __init__(self, *paths):
1182 1182 self.paths = paths
1183 1183
1184 1184 def join(self, obj, fname):
1185 1185 """Used to compute the runtime path of a cached file.
1186 1186
1187 1187 Users should subclass filecache and provide their own version of this
1188 1188 function to call the appropriate join function on 'obj' (an instance
1189 1189 of the class that its member function was decorated).
1190 1190 """
1191 1191 return obj.join(fname)
1192 1192
1193 1193 def __call__(self, func):
1194 1194 self.func = func
1195 1195 self.name = func.__name__
1196 1196 return self
1197 1197
1198 1198 def __get__(self, obj, type=None):
1199 1199 # do we need to check if the file changed?
1200 1200 if self.name in obj.__dict__:
1201 1201 assert self.name in obj._filecache, self.name
1202 1202 return obj.__dict__[self.name]
1203 1203
1204 1204 entry = obj._filecache.get(self.name)
1205 1205
1206 1206 if entry:
1207 1207 if entry.changed():
1208 1208 entry.obj = self.func(obj)
1209 1209 else:
1210 1210 paths = [self.join(obj, path) for path in self.paths]
1211 1211
1212 1212 # We stat -before- creating the object so our cache doesn't lie if
1213 1213 # a writer modified between the time we read and stat
1214 1214 entry = filecacheentry(paths, True)
1215 1215 entry.obj = self.func(obj)
1216 1216
1217 1217 obj._filecache[self.name] = entry
1218 1218
1219 1219 obj.__dict__[self.name] = entry.obj
1220 1220 return entry.obj
1221 1221
1222 1222 def __set__(self, obj, value):
1223 1223 if self.name not in obj._filecache:
1224 1224 # we add an entry for the missing value because X in __dict__
1225 1225 # implies X in _filecache
1226 1226 paths = [self.join(obj, path) for path in self.paths]
1227 1227 ce = filecacheentry(paths, False)
1228 1228 obj._filecache[self.name] = ce
1229 1229 else:
1230 1230 ce = obj._filecache[self.name]
1231 1231
1232 1232 ce.obj = value # update cached copy
1233 1233 obj.__dict__[self.name] = value # update copy returned by obj.x
1234 1234
1235 1235 def __delete__(self, obj):
1236 1236 try:
1237 1237 del obj.__dict__[self.name]
1238 1238 except KeyError:
1239 1239 raise AttributeError(self.name)
1240 1240
1241 1241 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1242 1242 if lock is None:
1243 1243 raise error.LockInheritanceContractViolation(
1244 1244 'lock can only be inherited while held')
1245 1245 if environ is None:
1246 1246 environ = {}
1247 1247 with lock.inherit() as locker:
1248 1248 environ[envvar] = locker
1249 1249 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1250 1250
1251 1251 def wlocksub(repo, cmd, *args, **kwargs):
1252 1252 """run cmd as a subprocess that allows inheriting repo's wlock
1253 1253
1254 1254 This can only be called while the wlock is held. This takes all the
1255 1255 arguments that ui.system does, and returns the exit code of the
1256 1256 subprocess."""
1257 1257 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1258 1258 **kwargs)
1259 1259
1260 1260 def gdinitconfig(ui):
1261 1261 """helper function to know if a repo should be created as general delta
1262 1262 """
1263 1263 # experimental config: format.generaldelta
1264 1264 return (ui.configbool('format', 'generaldelta', False)
1265 1265 or ui.configbool('format', 'usegeneraldelta', True))
1266 1266
1267 1267 def gddeltaconfig(ui):
1268 1268 """helper function to know if incoming delta should be optimised
1269 1269 """
1270 1270 # experimental config: format.generaldelta
1271 1271 return ui.configbool('format', 'generaldelta', False)
1272 1272
1273 1273 class delayclosedfile(object):
1274 1274 """Proxy for a file object whose close is delayed.
1275 1275
1276 1276 Do not instantiate outside of the vfs layer.
1277 1277 """
1278 1278
1279 1279 def __init__(self, fh, closer):
1280 1280 object.__setattr__(self, '_origfh', fh)
1281 1281 object.__setattr__(self, '_closer', closer)
1282 1282
1283 1283 def __getattr__(self, attr):
1284 1284 return getattr(self._origfh, attr)
1285 1285
1286 1286 def __setattr__(self, attr, value):
1287 1287 return setattr(self._origfh, attr, value)
1288 1288
1289 1289 def __delattr__(self, attr):
1290 1290 return delattr(self._origfh, attr)
1291 1291
1292 1292 def __enter__(self):
1293 1293 return self._origfh.__enter__()
1294 1294
1295 1295 def __exit__(self, exc_type, exc_value, exc_tb):
1296 1296 self._closer.close(self._origfh)
1297 1297
1298 1298 def close(self):
1299 1299 self._closer.close(self._origfh)
1300 1300
1301 1301 class backgroundfilecloser(object):
1302 1302 """Coordinates background closing of file handles on multiple threads."""
1303 1303 def __init__(self, ui, expectedcount=-1):
1304 1304 self._running = False
1305 1305 self._entered = False
1306 1306 self._threads = []
1307 1307 self._threadexception = None
1308 1308
1309 1309 # Only Windows/NTFS has slow file closing. So only enable by default
1310 1310 # on that platform. But allow to be enabled elsewhere for testing.
1311 1311 defaultenabled = os.name == 'nt'
1312 1312 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
1313 1313
1314 1314 if not enabled:
1315 1315 return
1316 1316
1317 1317 # There is overhead to starting and stopping the background threads.
1318 1318 # Don't do background processing unless the file count is large enough
1319 1319 # to justify it.
1320 1320 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
1321 1321 2048)
1322 1322 # FUTURE dynamically start background threads after minfilecount closes.
1323 1323 # (We don't currently have any callers that don't know their file count)
1324 1324 if expectedcount > 0 and expectedcount < minfilecount:
1325 1325 return
1326 1326
1327 1327 # Windows defaults to a limit of 512 open files. A buffer of 128
1328 1328 # should give us enough headway.
1329 1329 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
1330 1330 threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
1331 1331
1332 1332 ui.debug('starting %d threads for background file closing\n' %
1333 1333 threadcount)
1334 1334
1335 1335 self._queue = util.queue(maxsize=maxqueue)
1336 1336 self._running = True
1337 1337
1338 1338 for i in range(threadcount):
1339 1339 t = threading.Thread(target=self._worker, name='backgroundcloser')
1340 1340 self._threads.append(t)
1341 1341 t.start()
1342 1342
1343 1343 def __enter__(self):
1344 1344 self._entered = True
1345 1345 return self
1346 1346
1347 1347 def __exit__(self, exc_type, exc_value, exc_tb):
1348 1348 self._running = False
1349 1349
1350 1350 # Wait for threads to finish closing so open files don't linger for
1351 1351 # longer than lifetime of context manager.
1352 1352 for t in self._threads:
1353 1353 t.join()
1354 1354
1355 1355 def _worker(self):
1356 1356 """Main routine for worker thread."""
1357 1357 while True:
1358 1358 try:
1359 1359 fh = self._queue.get(block=True, timeout=0.100)
1360 1360 # Need to catch or the thread will terminate and
1361 1361 # we could orphan file descriptors.
1362 1362 try:
1363 1363 fh.close()
1364 1364 except Exception as e:
1365 1365 # Stash so can re-raise from main thread later.
1366 1366 self._threadexception = e
1367 1367 except util.empty:
1368 1368 if not self._running:
1369 1369 break
1370 1370
1371 1371 def close(self, fh):
1372 1372 """Schedule a file for closing."""
1373 1373 if not self._entered:
1374 1374 raise error.Abort('can only call close() when context manager '
1375 1375 'active')
1376 1376
1377 1377 # If a background thread encountered an exception, raise now so we fail
1378 1378 # fast. Otherwise we may potentially go on for minutes until the error
1379 1379 # is acted on.
1380 1380 if self._threadexception:
1381 1381 e = self._threadexception
1382 1382 self._threadexception = None
1383 1383 raise e
1384 1384
1385 1385 # If we're not actively running, close synchronously.
1386 1386 if not self._running:
1387 1387 fh.close()
1388 1388 return
1389 1389
1390 1390 self._queue.put(fh, block=True, timeout=None)
1391
General Comments 0
You need to be logged in to leave comments. Login now