##// END OF EJS Templates
vfs: make rename avoid ambiguity of file stat if needed...
FUJIWARA Katsunori -
r29203:731ced08 default
parent child Browse files
Show More
@@ -1,1381 +1,1391 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import contextlib
11 11 import errno
12 12 import glob
13 13 import os
14 14 import re
15 15 import shutil
16 16 import stat
17 17 import tempfile
18 18 import threading
19 19
20 20 from .i18n import _
21 21 from .node import wdirrev
22 22 from . import (
23 23 encoding,
24 24 error,
25 25 match as matchmod,
26 26 osutil,
27 27 pathutil,
28 28 phases,
29 29 revset,
30 30 similar,
31 31 util,
32 32 )
33 33
34 34 if os.name == 'nt':
35 35 from . import scmwindows as scmplatform
36 36 else:
37 37 from . import scmposix as scmplatform
38 38
39 39 systemrcpath = scmplatform.systemrcpath
40 40 userrcpath = scmplatform.userrcpath
41 41
42 42 class status(tuple):
43 43 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
44 44 and 'ignored' properties are only relevant to the working copy.
45 45 '''
46 46
47 47 __slots__ = ()
48 48
49 49 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
50 50 clean):
51 51 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
52 52 ignored, clean))
53 53
54 54 @property
55 55 def modified(self):
56 56 '''files that have been modified'''
57 57 return self[0]
58 58
59 59 @property
60 60 def added(self):
61 61 '''files that have been added'''
62 62 return self[1]
63 63
64 64 @property
65 65 def removed(self):
66 66 '''files that have been removed'''
67 67 return self[2]
68 68
69 69 @property
70 70 def deleted(self):
71 71 '''files that are in the dirstate, but have been deleted from the
72 72 working copy (aka "missing")
73 73 '''
74 74 return self[3]
75 75
76 76 @property
77 77 def unknown(self):
78 78 '''files not in the dirstate that are not ignored'''
79 79 return self[4]
80 80
81 81 @property
82 82 def ignored(self):
83 83 '''files not in the dirstate that are ignored (by _dirignore())'''
84 84 return self[5]
85 85
86 86 @property
87 87 def clean(self):
88 88 '''files that have not been modified'''
89 89 return self[6]
90 90
91 91 def __repr__(self, *args, **kwargs):
92 92 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
93 93 'unknown=%r, ignored=%r, clean=%r>') % self)
94 94
95 95 def itersubrepos(ctx1, ctx2):
96 96 """find subrepos in ctx1 or ctx2"""
97 97 # Create a (subpath, ctx) mapping where we prefer subpaths from
98 98 # ctx1. The subpaths from ctx2 are important when the .hgsub file
99 99 # has been modified (in ctx2) but not yet committed (in ctx1).
100 100 subpaths = dict.fromkeys(ctx2.substate, ctx2)
101 101 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
102 102
103 103 missing = set()
104 104
105 105 for subpath in ctx2.substate:
106 106 if subpath not in ctx1.substate:
107 107 del subpaths[subpath]
108 108 missing.add(subpath)
109 109
110 110 for subpath, ctx in sorted(subpaths.iteritems()):
111 111 yield subpath, ctx.sub(subpath)
112 112
113 113 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
114 114 # status and diff will have an accurate result when it does
115 115 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
116 116 # against itself.
117 117 for subpath in missing:
118 118 yield subpath, ctx2.nullsub(subpath, ctx1)
119 119
120 120 def nochangesfound(ui, repo, excluded=None):
121 121 '''Report no changes for push/pull, excluded is None or a list of
122 122 nodes excluded from the push/pull.
123 123 '''
124 124 secretlist = []
125 125 if excluded:
126 126 for n in excluded:
127 127 if n not in repo:
128 128 # discovery should not have included the filtered revision,
129 129 # we have to explicitly exclude it until discovery is cleanup.
130 130 continue
131 131 ctx = repo[n]
132 132 if ctx.phase() >= phases.secret and not ctx.extinct():
133 133 secretlist.append(n)
134 134
135 135 if secretlist:
136 136 ui.status(_("no changes found (ignored %d secret changesets)\n")
137 137 % len(secretlist))
138 138 else:
139 139 ui.status(_("no changes found\n"))
140 140
141 141 def checknewlabel(repo, lbl, kind):
142 142 # Do not use the "kind" parameter in ui output.
143 143 # It makes strings difficult to translate.
144 144 if lbl in ['tip', '.', 'null']:
145 145 raise error.Abort(_("the name '%s' is reserved") % lbl)
146 146 for c in (':', '\0', '\n', '\r'):
147 147 if c in lbl:
148 148 raise error.Abort(_("%r cannot be used in a name") % c)
149 149 try:
150 150 int(lbl)
151 151 raise error.Abort(_("cannot use an integer as a name"))
152 152 except ValueError:
153 153 pass
154 154
155 155 def checkfilename(f):
156 156 '''Check that the filename f is an acceptable filename for a tracked file'''
157 157 if '\r' in f or '\n' in f:
158 158 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
159 159
160 160 def checkportable(ui, f):
161 161 '''Check if filename f is portable and warn or abort depending on config'''
162 162 checkfilename(f)
163 163 abort, warn = checkportabilityalert(ui)
164 164 if abort or warn:
165 165 msg = util.checkwinfilename(f)
166 166 if msg:
167 167 msg = "%s: %r" % (msg, f)
168 168 if abort:
169 169 raise error.Abort(msg)
170 170 ui.warn(_("warning: %s\n") % msg)
171 171
172 172 def checkportabilityalert(ui):
173 173 '''check if the user's config requests nothing, a warning, or abort for
174 174 non-portable filenames'''
175 175 val = ui.config('ui', 'portablefilenames', 'warn')
176 176 lval = val.lower()
177 177 bval = util.parsebool(val)
178 178 abort = os.name == 'nt' or lval == 'abort'
179 179 warn = bval or lval == 'warn'
180 180 if bval is None and not (warn or abort or lval == 'ignore'):
181 181 raise error.ConfigError(
182 182 _("ui.portablefilenames value is invalid ('%s')") % val)
183 183 return abort, warn
184 184
185 185 class casecollisionauditor(object):
186 186 def __init__(self, ui, abort, dirstate):
187 187 self._ui = ui
188 188 self._abort = abort
189 189 allfiles = '\0'.join(dirstate._map)
190 190 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
191 191 self._dirstate = dirstate
192 192 # The purpose of _newfiles is so that we don't complain about
193 193 # case collisions if someone were to call this object with the
194 194 # same filename twice.
195 195 self._newfiles = set()
196 196
197 197 def __call__(self, f):
198 198 if f in self._newfiles:
199 199 return
200 200 fl = encoding.lower(f)
201 201 if fl in self._loweredfiles and f not in self._dirstate:
202 202 msg = _('possible case-folding collision for %s') % f
203 203 if self._abort:
204 204 raise error.Abort(msg)
205 205 self._ui.warn(_("warning: %s\n") % msg)
206 206 self._loweredfiles.add(fl)
207 207 self._newfiles.add(f)
208 208
209 209 def filteredhash(repo, maxrev):
210 210 """build hash of filtered revisions in the current repoview.
211 211
212 212 Multiple caches perform up-to-date validation by checking that the
213 213 tiprev and tipnode stored in the cache file match the current repository.
214 214 However, this is not sufficient for validating repoviews because the set
215 215 of revisions in the view may change without the repository tiprev and
216 216 tipnode changing.
217 217
218 218 This function hashes all the revs filtered from the view and returns
219 219 that SHA-1 digest.
220 220 """
221 221 cl = repo.changelog
222 222 if not cl.filteredrevs:
223 223 return None
224 224 key = None
225 225 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
226 226 if revs:
227 227 s = util.sha1()
228 228 for rev in revs:
229 229 s.update('%s;' % rev)
230 230 key = s.digest()
231 231 return key
232 232
233 233 class abstractvfs(object):
234 234 """Abstract base class; cannot be instantiated"""
235 235
236 236 def __init__(self, *args, **kwargs):
237 237 '''Prevent instantiation; don't call this from subclasses.'''
238 238 raise NotImplementedError('attempted instantiating ' + str(type(self)))
239 239
240 240 def tryread(self, path):
241 241 '''gracefully return an empty string for missing files'''
242 242 try:
243 243 return self.read(path)
244 244 except IOError as inst:
245 245 if inst.errno != errno.ENOENT:
246 246 raise
247 247 return ""
248 248
249 249 def tryreadlines(self, path, mode='rb'):
250 250 '''gracefully return an empty array for missing files'''
251 251 try:
252 252 return self.readlines(path, mode=mode)
253 253 except IOError as inst:
254 254 if inst.errno != errno.ENOENT:
255 255 raise
256 256 return []
257 257
258 258 def open(self, path, mode="r", text=False, atomictemp=False,
259 259 notindexed=False, backgroundclose=False):
260 260 '''Open ``path`` file, which is relative to vfs root.
261 261
262 262 Newly created directories are marked as "not to be indexed by
263 263 the content indexing service", if ``notindexed`` is specified
264 264 for "write" mode access.
265 265 '''
266 266 self.open = self.__call__
267 267 return self.__call__(path, mode, text, atomictemp, notindexed,
268 268 backgroundclose=backgroundclose)
269 269
270 270 def read(self, path):
271 271 with self(path, 'rb') as fp:
272 272 return fp.read()
273 273
274 274 def readlines(self, path, mode='rb'):
275 275 with self(path, mode=mode) as fp:
276 276 return fp.readlines()
277 277
278 278 def write(self, path, data, backgroundclose=False):
279 279 with self(path, 'wb', backgroundclose=backgroundclose) as fp:
280 280 return fp.write(data)
281 281
282 282 def writelines(self, path, data, mode='wb', notindexed=False):
283 283 with self(path, mode=mode, notindexed=notindexed) as fp:
284 284 return fp.writelines(data)
285 285
286 286 def append(self, path, data):
287 287 with self(path, 'ab') as fp:
288 288 return fp.write(data)
289 289
290 290 def basename(self, path):
291 291 """return base element of a path (as os.path.basename would do)
292 292
293 293 This exists to allow handling of strange encoding if needed."""
294 294 return os.path.basename(path)
295 295
296 296 def chmod(self, path, mode):
297 297 return os.chmod(self.join(path), mode)
298 298
299 299 def dirname(self, path):
300 300 """return dirname element of a path (as os.path.dirname would do)
301 301
302 302 This exists to allow handling of strange encoding if needed."""
303 303 return os.path.dirname(path)
304 304
305 305 def exists(self, path=None):
306 306 return os.path.exists(self.join(path))
307 307
308 308 def fstat(self, fp):
309 309 return util.fstat(fp)
310 310
311 311 def isdir(self, path=None):
312 312 return os.path.isdir(self.join(path))
313 313
314 314 def isfile(self, path=None):
315 315 return os.path.isfile(self.join(path))
316 316
317 317 def islink(self, path=None):
318 318 return os.path.islink(self.join(path))
319 319
320 320 def isfileorlink(self, path=None):
321 321 '''return whether path is a regular file or a symlink
322 322
323 323 Unlike isfile, this doesn't follow symlinks.'''
324 324 try:
325 325 st = self.lstat(path)
326 326 except OSError:
327 327 return False
328 328 mode = st.st_mode
329 329 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
330 330
331 331 def reljoin(self, *paths):
332 332 """join various elements of a path together (as os.path.join would do)
333 333
334 334 The vfs base is not injected so that path stay relative. This exists
335 335 to allow handling of strange encoding if needed."""
336 336 return os.path.join(*paths)
337 337
338 338 def split(self, path):
339 339 """split top-most element of a path (as os.path.split would do)
340 340
341 341 This exists to allow handling of strange encoding if needed."""
342 342 return os.path.split(path)
343 343
344 344 def lexists(self, path=None):
345 345 return os.path.lexists(self.join(path))
346 346
347 347 def lstat(self, path=None):
348 348 return os.lstat(self.join(path))
349 349
350 350 def listdir(self, path=None):
351 351 return os.listdir(self.join(path))
352 352
353 353 def makedir(self, path=None, notindexed=True):
354 354 return util.makedir(self.join(path), notindexed)
355 355
356 356 def makedirs(self, path=None, mode=None):
357 357 return util.makedirs(self.join(path), mode)
358 358
359 359 def makelock(self, info, path):
360 360 return util.makelock(info, self.join(path))
361 361
362 362 def mkdir(self, path=None):
363 363 return os.mkdir(self.join(path))
364 364
365 365 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
366 366 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
367 367 dir=self.join(dir), text=text)
368 368 dname, fname = util.split(name)
369 369 if dir:
370 370 return fd, os.path.join(dir, fname)
371 371 else:
372 372 return fd, fname
373 373
374 374 def readdir(self, path=None, stat=None, skip=None):
375 375 return osutil.listdir(self.join(path), stat, skip)
376 376
377 377 def readlock(self, path):
378 378 return util.readlock(self.join(path))
379 379
380 def rename(self, src, dst):
381 return util.rename(self.join(src), self.join(dst))
380 def rename(self, src, dst, checkambig=False):
381 dstpath = self.join(dst)
382 oldstat = checkambig and util.filestat(dstpath)
383 if oldstat and oldstat.stat:
384 ret = util.rename(self.join(src), dstpath)
385 newstat = util.filestat(dstpath)
386 if newstat.isambig(oldstat):
387 # stat of renamed file is ambiguous to original one
388 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
389 os.utime(dstpath, (advanced, advanced))
390 return ret
391 return util.rename(self.join(src), dstpath)
382 392
383 393 def readlink(self, path):
384 394 return os.readlink(self.join(path))
385 395
386 396 def removedirs(self, path=None):
387 397 """Remove a leaf directory and all empty intermediate ones
388 398 """
389 399 return util.removedirs(self.join(path))
390 400
391 401 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
392 402 """Remove a directory tree recursively
393 403
394 404 If ``forcibly``, this tries to remove READ-ONLY files, too.
395 405 """
396 406 if forcibly:
397 407 def onerror(function, path, excinfo):
398 408 if function is not os.remove:
399 409 raise
400 410 # read-only files cannot be unlinked under Windows
401 411 s = os.stat(path)
402 412 if (s.st_mode & stat.S_IWRITE) != 0:
403 413 raise
404 414 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
405 415 os.remove(path)
406 416 else:
407 417 onerror = None
408 418 return shutil.rmtree(self.join(path),
409 419 ignore_errors=ignore_errors, onerror=onerror)
410 420
411 421 def setflags(self, path, l, x):
412 422 return util.setflags(self.join(path), l, x)
413 423
414 424 def stat(self, path=None):
415 425 return os.stat(self.join(path))
416 426
417 427 def unlink(self, path=None):
418 428 return util.unlink(self.join(path))
419 429
420 430 def unlinkpath(self, path=None, ignoremissing=False):
421 431 return util.unlinkpath(self.join(path), ignoremissing)
422 432
423 433 def utime(self, path=None, t=None):
424 434 return os.utime(self.join(path), t)
425 435
426 436 def walk(self, path=None, onerror=None):
427 437 """Yield (dirpath, dirs, files) tuple for each directories under path
428 438
429 439 ``dirpath`` is relative one from the root of this vfs. This
430 440 uses ``os.sep`` as path separator, even you specify POSIX
431 441 style ``path``.
432 442
433 443 "The root of this vfs" is represented as empty ``dirpath``.
434 444 """
435 445 root = os.path.normpath(self.join(None))
436 446 # when dirpath == root, dirpath[prefixlen:] becomes empty
437 447 # because len(dirpath) < prefixlen.
438 448 prefixlen = len(pathutil.normasprefix(root))
439 449 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
440 450 yield (dirpath[prefixlen:], dirs, files)
441 451
442 452 @contextlib.contextmanager
443 453 def backgroundclosing(self, ui, expectedcount=-1):
444 454 """Allow files to be closed asynchronously.
445 455
446 456 When this context manager is active, ``backgroundclose`` can be passed
447 457 to ``__call__``/``open`` to result in the file possibly being closed
448 458 asynchronously, on a background thread.
449 459 """
450 460 # This is an arbitrary restriction and could be changed if we ever
451 461 # have a use case.
452 462 vfs = getattr(self, 'vfs', self)
453 463 if getattr(vfs, '_backgroundfilecloser', None):
454 464 raise error.Abort('can only have 1 active background file closer')
455 465
456 466 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
457 467 try:
458 468 vfs._backgroundfilecloser = bfc
459 469 yield bfc
460 470 finally:
461 471 vfs._backgroundfilecloser = None
462 472
463 473 class vfs(abstractvfs):
464 474 '''Operate files relative to a base directory
465 475
466 476 This class is used to hide the details of COW semantics and
467 477 remote file access from higher level code.
468 478 '''
469 479 def __init__(self, base, audit=True, expandpath=False, realpath=False):
470 480 if expandpath:
471 481 base = util.expandpath(base)
472 482 if realpath:
473 483 base = os.path.realpath(base)
474 484 self.base = base
475 485 self.mustaudit = audit
476 486 self.createmode = None
477 487 self._trustnlink = None
478 488
479 489 @property
480 490 def mustaudit(self):
481 491 return self._audit
482 492
483 493 @mustaudit.setter
484 494 def mustaudit(self, onoff):
485 495 self._audit = onoff
486 496 if onoff:
487 497 self.audit = pathutil.pathauditor(self.base)
488 498 else:
489 499 self.audit = util.always
490 500
491 501 @util.propertycache
492 502 def _cansymlink(self):
493 503 return util.checklink(self.base)
494 504
495 505 @util.propertycache
496 506 def _chmod(self):
497 507 return util.checkexec(self.base)
498 508
499 509 def _fixfilemode(self, name):
500 510 if self.createmode is None or not self._chmod:
501 511 return
502 512 os.chmod(name, self.createmode & 0o666)
503 513
504 514 def __call__(self, path, mode="r", text=False, atomictemp=False,
505 515 notindexed=False, backgroundclose=False, checkambig=False):
506 516 '''Open ``path`` file, which is relative to vfs root.
507 517
508 518 Newly created directories are marked as "not to be indexed by
509 519 the content indexing service", if ``notindexed`` is specified
510 520 for "write" mode access.
511 521
512 522 If ``backgroundclose`` is passed, the file may be closed asynchronously.
513 523 It can only be used if the ``self.backgroundclosing()`` context manager
514 524 is active. This should only be specified if the following criteria hold:
515 525
516 526 1. There is a potential for writing thousands of files. Unless you
517 527 are writing thousands of files, the performance benefits of
518 528 asynchronously closing files is not realized.
519 529 2. Files are opened exactly once for the ``backgroundclosing``
520 530 active duration and are therefore free of race conditions between
521 531 closing a file on a background thread and reopening it. (If the
522 532 file were opened multiple times, there could be unflushed data
523 533 because the original file handle hasn't been flushed/closed yet.)
524 534
525 535 ``checkambig`` is passed to atomictempfile (valid only for writing).
526 536 '''
527 537 if self._audit:
528 538 r = util.checkosfilename(path)
529 539 if r:
530 540 raise error.Abort("%s: %r" % (r, path))
531 541 self.audit(path)
532 542 f = self.join(path)
533 543
534 544 if not text and "b" not in mode:
535 545 mode += "b" # for that other OS
536 546
537 547 nlink = -1
538 548 if mode not in ('r', 'rb'):
539 549 dirname, basename = util.split(f)
540 550 # If basename is empty, then the path is malformed because it points
541 551 # to a directory. Let the posixfile() call below raise IOError.
542 552 if basename:
543 553 if atomictemp:
544 554 util.makedirs(dirname, self.createmode, notindexed)
545 555 return util.atomictempfile(f, mode, self.createmode,
546 556 checkambig=checkambig)
547 557 try:
548 558 if 'w' in mode:
549 559 util.unlink(f)
550 560 nlink = 0
551 561 else:
552 562 # nlinks() may behave differently for files on Windows
553 563 # shares if the file is open.
554 564 with util.posixfile(f):
555 565 nlink = util.nlinks(f)
556 566 if nlink < 1:
557 567 nlink = 2 # force mktempcopy (issue1922)
558 568 except (OSError, IOError) as e:
559 569 if e.errno != errno.ENOENT:
560 570 raise
561 571 nlink = 0
562 572 util.makedirs(dirname, self.createmode, notindexed)
563 573 if nlink > 0:
564 574 if self._trustnlink is None:
565 575 self._trustnlink = nlink > 1 or util.checknlink(f)
566 576 if nlink > 1 or not self._trustnlink:
567 577 util.rename(util.mktempcopy(f), f)
568 578 fp = util.posixfile(f, mode)
569 579 if nlink == 0:
570 580 self._fixfilemode(f)
571 581
572 582 if backgroundclose:
573 583 if not self._backgroundfilecloser:
574 584 raise error.Abort('backgroundclose can only be used when a '
575 585 'backgroundclosing context manager is active')
576 586
577 587 fp = delayclosedfile(fp, self._backgroundfilecloser)
578 588
579 589 return fp
580 590
581 591 def symlink(self, src, dst):
582 592 self.audit(dst)
583 593 linkname = self.join(dst)
584 594 try:
585 595 os.unlink(linkname)
586 596 except OSError:
587 597 pass
588 598
589 599 util.makedirs(os.path.dirname(linkname), self.createmode)
590 600
591 601 if self._cansymlink:
592 602 try:
593 603 os.symlink(src, linkname)
594 604 except OSError as err:
595 605 raise OSError(err.errno, _('could not symlink to %r: %s') %
596 606 (src, err.strerror), linkname)
597 607 else:
598 608 self.write(dst, src)
599 609
600 610 def join(self, path, *insidef):
601 611 if path:
602 612 return os.path.join(self.base, path, *insidef)
603 613 else:
604 614 return self.base
605 615
606 616 opener = vfs
607 617
608 618 class auditvfs(object):
609 619 def __init__(self, vfs):
610 620 self.vfs = vfs
611 621
612 622 @property
613 623 def mustaudit(self):
614 624 return self.vfs.mustaudit
615 625
616 626 @mustaudit.setter
617 627 def mustaudit(self, onoff):
618 628 self.vfs.mustaudit = onoff
619 629
620 630 class filtervfs(abstractvfs, auditvfs):
621 631 '''Wrapper vfs for filtering filenames with a function.'''
622 632
623 633 def __init__(self, vfs, filter):
624 634 auditvfs.__init__(self, vfs)
625 635 self._filter = filter
626 636
627 637 def __call__(self, path, *args, **kwargs):
628 638 return self.vfs(self._filter(path), *args, **kwargs)
629 639
630 640 def join(self, path, *insidef):
631 641 if path:
632 642 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
633 643 else:
634 644 return self.vfs.join(path)
635 645
636 646 filteropener = filtervfs
637 647
638 648 class readonlyvfs(abstractvfs, auditvfs):
639 649 '''Wrapper vfs preventing any writing.'''
640 650
641 651 def __init__(self, vfs):
642 652 auditvfs.__init__(self, vfs)
643 653
644 654 def __call__(self, path, mode='r', *args, **kw):
645 655 if mode not in ('r', 'rb'):
646 656 raise error.Abort('this vfs is read only')
647 657 return self.vfs(path, mode, *args, **kw)
648 658
649 659 def join(self, path, *insidef):
650 660 return self.vfs.join(path, *insidef)
651 661
652 662 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
653 663 '''yield every hg repository under path, always recursively.
654 664 The recurse flag will only control recursion into repo working dirs'''
655 665 def errhandler(err):
656 666 if err.filename == path:
657 667 raise err
658 668 samestat = getattr(os.path, 'samestat', None)
659 669 if followsym and samestat is not None:
660 670 def adddir(dirlst, dirname):
661 671 match = False
662 672 dirstat = os.stat(dirname)
663 673 for lstdirstat in dirlst:
664 674 if samestat(dirstat, lstdirstat):
665 675 match = True
666 676 break
667 677 if not match:
668 678 dirlst.append(dirstat)
669 679 return not match
670 680 else:
671 681 followsym = False
672 682
673 683 if (seen_dirs is None) and followsym:
674 684 seen_dirs = []
675 685 adddir(seen_dirs, path)
676 686 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
677 687 dirs.sort()
678 688 if '.hg' in dirs:
679 689 yield root # found a repository
680 690 qroot = os.path.join(root, '.hg', 'patches')
681 691 if os.path.isdir(os.path.join(qroot, '.hg')):
682 692 yield qroot # we have a patch queue repo here
683 693 if recurse:
684 694 # avoid recursing inside the .hg directory
685 695 dirs.remove('.hg')
686 696 else:
687 697 dirs[:] = [] # don't descend further
688 698 elif followsym:
689 699 newdirs = []
690 700 for d in dirs:
691 701 fname = os.path.join(root, d)
692 702 if adddir(seen_dirs, fname):
693 703 if os.path.islink(fname):
694 704 for hgname in walkrepos(fname, True, seen_dirs):
695 705 yield hgname
696 706 else:
697 707 newdirs.append(d)
698 708 dirs[:] = newdirs
699 709
700 710 def osrcpath():
701 711 '''return default os-specific hgrc search path'''
702 712 path = []
703 713 defaultpath = os.path.join(util.datapath, 'default.d')
704 714 if os.path.isdir(defaultpath):
705 715 for f, kind in osutil.listdir(defaultpath):
706 716 if f.endswith('.rc'):
707 717 path.append(os.path.join(defaultpath, f))
708 718 path.extend(systemrcpath())
709 719 path.extend(userrcpath())
710 720 path = [os.path.normpath(f) for f in path]
711 721 return path
712 722
713 723 _rcpath = None
714 724
715 725 def rcpath():
716 726 '''return hgrc search path. if env var HGRCPATH is set, use it.
717 727 for each item in path, if directory, use files ending in .rc,
718 728 else use item.
719 729 make HGRCPATH empty to only look in .hg/hgrc of current repo.
720 730 if no HGRCPATH, use default os-specific path.'''
721 731 global _rcpath
722 732 if _rcpath is None:
723 733 if 'HGRCPATH' in os.environ:
724 734 _rcpath = []
725 735 for p in os.environ['HGRCPATH'].split(os.pathsep):
726 736 if not p:
727 737 continue
728 738 p = util.expandpath(p)
729 739 if os.path.isdir(p):
730 740 for f, kind in osutil.listdir(p):
731 741 if f.endswith('.rc'):
732 742 _rcpath.append(os.path.join(p, f))
733 743 else:
734 744 _rcpath.append(p)
735 745 else:
736 746 _rcpath = osrcpath()
737 747 return _rcpath
738 748
739 749 def intrev(rev):
740 750 """Return integer for a given revision that can be used in comparison or
741 751 arithmetic operation"""
742 752 if rev is None:
743 753 return wdirrev
744 754 return rev
745 755
746 756 def revsingle(repo, revspec, default='.'):
747 757 if not revspec and revspec != 0:
748 758 return repo[default]
749 759
750 760 l = revrange(repo, [revspec])
751 761 if not l:
752 762 raise error.Abort(_('empty revision set'))
753 763 return repo[l.last()]
754 764
755 765 def _pairspec(revspec):
756 766 tree = revset.parse(revspec)
757 767 tree = revset.optimize(tree) # fix up "x^:y" -> "(x^):y"
758 768 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
759 769
760 770 def revpair(repo, revs):
761 771 if not revs:
762 772 return repo.dirstate.p1(), None
763 773
764 774 l = revrange(repo, revs)
765 775
766 776 if not l:
767 777 first = second = None
768 778 elif l.isascending():
769 779 first = l.min()
770 780 second = l.max()
771 781 elif l.isdescending():
772 782 first = l.max()
773 783 second = l.min()
774 784 else:
775 785 first = l.first()
776 786 second = l.last()
777 787
778 788 if first is None:
779 789 raise error.Abort(_('empty revision range'))
780 790 if (first == second and len(revs) >= 2
781 791 and not all(revrange(repo, [r]) for r in revs)):
782 792 raise error.Abort(_('empty revision on one side of range'))
783 793
784 794 # if top-level is range expression, the result must always be a pair
785 795 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
786 796 return repo.lookup(first), None
787 797
788 798 return repo.lookup(first), repo.lookup(second)
789 799
790 800 def revrange(repo, revs):
791 801 """Yield revision as strings from a list of revision specifications."""
792 802 allspecs = []
793 803 for spec in revs:
794 804 if isinstance(spec, int):
795 805 spec = revset.formatspec('rev(%d)', spec)
796 806 allspecs.append(spec)
797 807 m = revset.matchany(repo.ui, allspecs, repo)
798 808 return m(repo)
799 809
800 810 def meaningfulparents(repo, ctx):
801 811 """Return list of meaningful (or all if debug) parentrevs for rev.
802 812
803 813 For merges (two non-nullrev revisions) both parents are meaningful.
804 814 Otherwise the first parent revision is considered meaningful if it
805 815 is not the preceding revision.
806 816 """
807 817 parents = ctx.parents()
808 818 if len(parents) > 1:
809 819 return parents
810 820 if repo.ui.debugflag:
811 821 return [parents[0], repo['null']]
812 822 if parents[0].rev() >= intrev(ctx.rev()) - 1:
813 823 return []
814 824 return parents
815 825
816 826 def expandpats(pats):
817 827 '''Expand bare globs when running on windows.
818 828 On posix we assume it already has already been done by sh.'''
819 829 if not util.expandglobs:
820 830 return list(pats)
821 831 ret = []
822 832 for kindpat in pats:
823 833 kind, pat = matchmod._patsplit(kindpat, None)
824 834 if kind is None:
825 835 try:
826 836 globbed = glob.glob(pat)
827 837 except re.error:
828 838 globbed = [pat]
829 839 if globbed:
830 840 ret.extend(globbed)
831 841 continue
832 842 ret.append(kindpat)
833 843 return ret
834 844
835 845 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
836 846 badfn=None):
837 847 '''Return a matcher and the patterns that were used.
838 848 The matcher will warn about bad matches, unless an alternate badfn callback
839 849 is provided.'''
840 850 if pats == ("",):
841 851 pats = []
842 852 if opts is None:
843 853 opts = {}
844 854 if not globbed and default == 'relpath':
845 855 pats = expandpats(pats or [])
846 856
847 857 def bad(f, msg):
848 858 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
849 859
850 860 if badfn is None:
851 861 badfn = bad
852 862
853 863 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
854 864 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
855 865
856 866 if m.always():
857 867 pats = []
858 868 return m, pats
859 869
860 870 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
861 871 badfn=None):
862 872 '''Return a matcher that will warn about bad matches.'''
863 873 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
864 874
865 875 def matchall(repo):
866 876 '''Return a matcher that will efficiently match everything.'''
867 877 return matchmod.always(repo.root, repo.getcwd())
868 878
869 879 def matchfiles(repo, files, badfn=None):
870 880 '''Return a matcher that will efficiently match exactly these files.'''
871 881 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
872 882
873 883 def origpath(ui, repo, filepath):
874 884 '''customize where .orig files are created
875 885
876 886 Fetch user defined path from config file: [ui] origbackuppath = <path>
877 887 Fall back to default (filepath) if not specified
878 888 '''
879 889 origbackuppath = ui.config('ui', 'origbackuppath', None)
880 890 if origbackuppath is None:
881 891 return filepath + ".orig"
882 892
883 893 filepathfromroot = os.path.relpath(filepath, start=repo.root)
884 894 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
885 895
886 896 origbackupdir = repo.vfs.dirname(fullorigpath)
887 897 if not repo.vfs.exists(origbackupdir):
888 898 ui.note(_('creating directory: %s\n') % origbackupdir)
889 899 util.makedirs(origbackupdir)
890 900
891 901 return fullorigpath + ".orig"
892 902
893 903 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
894 904 if opts is None:
895 905 opts = {}
896 906 m = matcher
897 907 if dry_run is None:
898 908 dry_run = opts.get('dry_run')
899 909 if similarity is None:
900 910 similarity = float(opts.get('similarity') or 0)
901 911
902 912 ret = 0
903 913 join = lambda f: os.path.join(prefix, f)
904 914
905 915 def matchessubrepo(matcher, subpath):
906 916 if matcher.exact(subpath):
907 917 return True
908 918 for f in matcher.files():
909 919 if f.startswith(subpath):
910 920 return True
911 921 return False
912 922
913 923 wctx = repo[None]
914 924 for subpath in sorted(wctx.substate):
915 925 if opts.get('subrepos') or matchessubrepo(m, subpath):
916 926 sub = wctx.sub(subpath)
917 927 try:
918 928 submatch = matchmod.subdirmatcher(subpath, m)
919 929 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
920 930 ret = 1
921 931 except error.LookupError:
922 932 repo.ui.status(_("skipping missing subrepository: %s\n")
923 933 % join(subpath))
924 934
925 935 rejected = []
926 936 def badfn(f, msg):
927 937 if f in m.files():
928 938 m.bad(f, msg)
929 939 rejected.append(f)
930 940
931 941 badmatch = matchmod.badmatch(m, badfn)
932 942 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
933 943 badmatch)
934 944
935 945 unknownset = set(unknown + forgotten)
936 946 toprint = unknownset.copy()
937 947 toprint.update(deleted)
938 948 for abs in sorted(toprint):
939 949 if repo.ui.verbose or not m.exact(abs):
940 950 if abs in unknownset:
941 951 status = _('adding %s\n') % m.uipath(abs)
942 952 else:
943 953 status = _('removing %s\n') % m.uipath(abs)
944 954 repo.ui.status(status)
945 955
946 956 renames = _findrenames(repo, m, added + unknown, removed + deleted,
947 957 similarity)
948 958
949 959 if not dry_run:
950 960 _markchanges(repo, unknown + forgotten, deleted, renames)
951 961
952 962 for f in rejected:
953 963 if f in m.files():
954 964 return 1
955 965 return ret
956 966
957 967 def marktouched(repo, files, similarity=0.0):
958 968 '''Assert that files have somehow been operated upon. files are relative to
959 969 the repo root.'''
960 970 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
961 971 rejected = []
962 972
963 973 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
964 974
965 975 if repo.ui.verbose:
966 976 unknownset = set(unknown + forgotten)
967 977 toprint = unknownset.copy()
968 978 toprint.update(deleted)
969 979 for abs in sorted(toprint):
970 980 if abs in unknownset:
971 981 status = _('adding %s\n') % abs
972 982 else:
973 983 status = _('removing %s\n') % abs
974 984 repo.ui.status(status)
975 985
976 986 renames = _findrenames(repo, m, added + unknown, removed + deleted,
977 987 similarity)
978 988
979 989 _markchanges(repo, unknown + forgotten, deleted, renames)
980 990
981 991 for f in rejected:
982 992 if f in m.files():
983 993 return 1
984 994 return 0
985 995
986 996 def _interestingfiles(repo, matcher):
987 997 '''Walk dirstate with matcher, looking for files that addremove would care
988 998 about.
989 999
990 1000 This is different from dirstate.status because it doesn't care about
991 1001 whether files are modified or clean.'''
992 1002 added, unknown, deleted, removed, forgotten = [], [], [], [], []
993 1003 audit_path = pathutil.pathauditor(repo.root)
994 1004
995 1005 ctx = repo[None]
996 1006 dirstate = repo.dirstate
997 1007 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
998 1008 full=False)
999 1009 for abs, st in walkresults.iteritems():
1000 1010 dstate = dirstate[abs]
1001 1011 if dstate == '?' and audit_path.check(abs):
1002 1012 unknown.append(abs)
1003 1013 elif dstate != 'r' and not st:
1004 1014 deleted.append(abs)
1005 1015 elif dstate == 'r' and st:
1006 1016 forgotten.append(abs)
1007 1017 # for finding renames
1008 1018 elif dstate == 'r' and not st:
1009 1019 removed.append(abs)
1010 1020 elif dstate == 'a':
1011 1021 added.append(abs)
1012 1022
1013 1023 return added, unknown, deleted, removed, forgotten
1014 1024
1015 1025 def _findrenames(repo, matcher, added, removed, similarity):
1016 1026 '''Find renames from removed files to added ones.'''
1017 1027 renames = {}
1018 1028 if similarity > 0:
1019 1029 for old, new, score in similar.findrenames(repo, added, removed,
1020 1030 similarity):
1021 1031 if (repo.ui.verbose or not matcher.exact(old)
1022 1032 or not matcher.exact(new)):
1023 1033 repo.ui.status(_('recording removal of %s as rename to %s '
1024 1034 '(%d%% similar)\n') %
1025 1035 (matcher.rel(old), matcher.rel(new),
1026 1036 score * 100))
1027 1037 renames[new] = old
1028 1038 return renames
1029 1039
1030 1040 def _markchanges(repo, unknown, deleted, renames):
1031 1041 '''Marks the files in unknown as added, the files in deleted as removed,
1032 1042 and the files in renames as copied.'''
1033 1043 wctx = repo[None]
1034 1044 with repo.wlock():
1035 1045 wctx.forget(deleted)
1036 1046 wctx.add(unknown)
1037 1047 for new, old in renames.iteritems():
1038 1048 wctx.copy(old, new)
1039 1049
1040 1050 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1041 1051 """Update the dirstate to reflect the intent of copying src to dst. For
1042 1052 different reasons it might not end with dst being marked as copied from src.
1043 1053 """
1044 1054 origsrc = repo.dirstate.copied(src) or src
1045 1055 if dst == origsrc: # copying back a copy?
1046 1056 if repo.dirstate[dst] not in 'mn' and not dryrun:
1047 1057 repo.dirstate.normallookup(dst)
1048 1058 else:
1049 1059 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1050 1060 if not ui.quiet:
1051 1061 ui.warn(_("%s has not been committed yet, so no copy "
1052 1062 "data will be stored for %s.\n")
1053 1063 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1054 1064 if repo.dirstate[dst] in '?r' and not dryrun:
1055 1065 wctx.add([dst])
1056 1066 elif not dryrun:
1057 1067 wctx.copy(origsrc, dst)
1058 1068
1059 1069 def readrequires(opener, supported):
1060 1070 '''Reads and parses .hg/requires and checks if all entries found
1061 1071 are in the list of supported features.'''
1062 1072 requirements = set(opener.read("requires").splitlines())
1063 1073 missings = []
1064 1074 for r in requirements:
1065 1075 if r not in supported:
1066 1076 if not r or not r[0].isalnum():
1067 1077 raise error.RequirementError(_(".hg/requires file is corrupt"))
1068 1078 missings.append(r)
1069 1079 missings.sort()
1070 1080 if missings:
1071 1081 raise error.RequirementError(
1072 1082 _("repository requires features unknown to this Mercurial: %s")
1073 1083 % " ".join(missings),
1074 1084 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1075 1085 " for more information"))
1076 1086 return requirements
1077 1087
1078 1088 def writerequires(opener, requirements):
1079 1089 with opener('requires', 'w') as fp:
1080 1090 for r in sorted(requirements):
1081 1091 fp.write("%s\n" % r)
1082 1092
1083 1093 class filecachesubentry(object):
1084 1094 def __init__(self, path, stat):
1085 1095 self.path = path
1086 1096 self.cachestat = None
1087 1097 self._cacheable = None
1088 1098
1089 1099 if stat:
1090 1100 self.cachestat = filecachesubentry.stat(self.path)
1091 1101
1092 1102 if self.cachestat:
1093 1103 self._cacheable = self.cachestat.cacheable()
1094 1104 else:
1095 1105 # None means we don't know yet
1096 1106 self._cacheable = None
1097 1107
1098 1108 def refresh(self):
1099 1109 if self.cacheable():
1100 1110 self.cachestat = filecachesubentry.stat(self.path)
1101 1111
1102 1112 def cacheable(self):
1103 1113 if self._cacheable is not None:
1104 1114 return self._cacheable
1105 1115
1106 1116 # we don't know yet, assume it is for now
1107 1117 return True
1108 1118
1109 1119 def changed(self):
1110 1120 # no point in going further if we can't cache it
1111 1121 if not self.cacheable():
1112 1122 return True
1113 1123
1114 1124 newstat = filecachesubentry.stat(self.path)
1115 1125
1116 1126 # we may not know if it's cacheable yet, check again now
1117 1127 if newstat and self._cacheable is None:
1118 1128 self._cacheable = newstat.cacheable()
1119 1129
1120 1130 # check again
1121 1131 if not self._cacheable:
1122 1132 return True
1123 1133
1124 1134 if self.cachestat != newstat:
1125 1135 self.cachestat = newstat
1126 1136 return True
1127 1137 else:
1128 1138 return False
1129 1139
1130 1140 @staticmethod
1131 1141 def stat(path):
1132 1142 try:
1133 1143 return util.cachestat(path)
1134 1144 except OSError as e:
1135 1145 if e.errno != errno.ENOENT:
1136 1146 raise
1137 1147
1138 1148 class filecacheentry(object):
1139 1149 def __init__(self, paths, stat=True):
1140 1150 self._entries = []
1141 1151 for path in paths:
1142 1152 self._entries.append(filecachesubentry(path, stat))
1143 1153
1144 1154 def changed(self):
1145 1155 '''true if any entry has changed'''
1146 1156 for entry in self._entries:
1147 1157 if entry.changed():
1148 1158 return True
1149 1159 return False
1150 1160
1151 1161 def refresh(self):
1152 1162 for entry in self._entries:
1153 1163 entry.refresh()
1154 1164
1155 1165 class filecache(object):
1156 1166 '''A property like decorator that tracks files under .hg/ for updates.
1157 1167
1158 1168 Records stat info when called in _filecache.
1159 1169
1160 1170 On subsequent calls, compares old stat info with new info, and recreates the
1161 1171 object when any of the files changes, updating the new stat info in
1162 1172 _filecache.
1163 1173
1164 1174 Mercurial either atomic renames or appends for files under .hg,
1165 1175 so to ensure the cache is reliable we need the filesystem to be able
1166 1176 to tell us if a file has been replaced. If it can't, we fallback to
1167 1177 recreating the object on every call (essentially the same behavior as
1168 1178 propertycache).
1169 1179
1170 1180 '''
1171 1181 def __init__(self, *paths):
1172 1182 self.paths = paths
1173 1183
1174 1184 def join(self, obj, fname):
1175 1185 """Used to compute the runtime path of a cached file.
1176 1186
1177 1187 Users should subclass filecache and provide their own version of this
1178 1188 function to call the appropriate join function on 'obj' (an instance
1179 1189 of the class that its member function was decorated).
1180 1190 """
1181 1191 return obj.join(fname)
1182 1192
1183 1193 def __call__(self, func):
1184 1194 self.func = func
1185 1195 self.name = func.__name__
1186 1196 return self
1187 1197
1188 1198 def __get__(self, obj, type=None):
1189 1199 # do we need to check if the file changed?
1190 1200 if self.name in obj.__dict__:
1191 1201 assert self.name in obj._filecache, self.name
1192 1202 return obj.__dict__[self.name]
1193 1203
1194 1204 entry = obj._filecache.get(self.name)
1195 1205
1196 1206 if entry:
1197 1207 if entry.changed():
1198 1208 entry.obj = self.func(obj)
1199 1209 else:
1200 1210 paths = [self.join(obj, path) for path in self.paths]
1201 1211
1202 1212 # We stat -before- creating the object so our cache doesn't lie if
1203 1213 # a writer modified between the time we read and stat
1204 1214 entry = filecacheentry(paths, True)
1205 1215 entry.obj = self.func(obj)
1206 1216
1207 1217 obj._filecache[self.name] = entry
1208 1218
1209 1219 obj.__dict__[self.name] = entry.obj
1210 1220 return entry.obj
1211 1221
1212 1222 def __set__(self, obj, value):
1213 1223 if self.name not in obj._filecache:
1214 1224 # we add an entry for the missing value because X in __dict__
1215 1225 # implies X in _filecache
1216 1226 paths = [self.join(obj, path) for path in self.paths]
1217 1227 ce = filecacheentry(paths, False)
1218 1228 obj._filecache[self.name] = ce
1219 1229 else:
1220 1230 ce = obj._filecache[self.name]
1221 1231
1222 1232 ce.obj = value # update cached copy
1223 1233 obj.__dict__[self.name] = value # update copy returned by obj.x
1224 1234
1225 1235 def __delete__(self, obj):
1226 1236 try:
1227 1237 del obj.__dict__[self.name]
1228 1238 except KeyError:
1229 1239 raise AttributeError(self.name)
1230 1240
1231 1241 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1232 1242 if lock is None:
1233 1243 raise error.LockInheritanceContractViolation(
1234 1244 'lock can only be inherited while held')
1235 1245 if environ is None:
1236 1246 environ = {}
1237 1247 with lock.inherit() as locker:
1238 1248 environ[envvar] = locker
1239 1249 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1240 1250
1241 1251 def wlocksub(repo, cmd, *args, **kwargs):
1242 1252 """run cmd as a subprocess that allows inheriting repo's wlock
1243 1253
1244 1254 This can only be called while the wlock is held. This takes all the
1245 1255 arguments that ui.system does, and returns the exit code of the
1246 1256 subprocess."""
1247 1257 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1248 1258 **kwargs)
1249 1259
1250 1260 def gdinitconfig(ui):
1251 1261 """helper function to know if a repo should be created as general delta
1252 1262 """
1253 1263 # experimental config: format.generaldelta
1254 1264 return (ui.configbool('format', 'generaldelta', False)
1255 1265 or ui.configbool('format', 'usegeneraldelta', True))
1256 1266
1257 1267 def gddeltaconfig(ui):
1258 1268 """helper function to know if incoming delta should be optimised
1259 1269 """
1260 1270 # experimental config: format.generaldelta
1261 1271 return ui.configbool('format', 'generaldelta', False)
1262 1272
1263 1273 class delayclosedfile(object):
1264 1274 """Proxy for a file object whose close is delayed.
1265 1275
1266 1276 Do not instantiate outside of the vfs layer.
1267 1277 """
1268 1278
1269 1279 def __init__(self, fh, closer):
1270 1280 object.__setattr__(self, '_origfh', fh)
1271 1281 object.__setattr__(self, '_closer', closer)
1272 1282
1273 1283 def __getattr__(self, attr):
1274 1284 return getattr(self._origfh, attr)
1275 1285
1276 1286 def __setattr__(self, attr, value):
1277 1287 return setattr(self._origfh, attr, value)
1278 1288
1279 1289 def __delattr__(self, attr):
1280 1290 return delattr(self._origfh, attr)
1281 1291
1282 1292 def __enter__(self):
1283 1293 return self._origfh.__enter__()
1284 1294
1285 1295 def __exit__(self, exc_type, exc_value, exc_tb):
1286 1296 self._closer.close(self._origfh)
1287 1297
1288 1298 def close(self):
1289 1299 self._closer.close(self._origfh)
1290 1300
1291 1301 class backgroundfilecloser(object):
1292 1302 """Coordinates background closing of file handles on multiple threads."""
1293 1303 def __init__(self, ui, expectedcount=-1):
1294 1304 self._running = False
1295 1305 self._entered = False
1296 1306 self._threads = []
1297 1307 self._threadexception = None
1298 1308
1299 1309 # Only Windows/NTFS has slow file closing. So only enable by default
1300 1310 # on that platform. But allow to be enabled elsewhere for testing.
1301 1311 defaultenabled = os.name == 'nt'
1302 1312 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
1303 1313
1304 1314 if not enabled:
1305 1315 return
1306 1316
1307 1317 # There is overhead to starting and stopping the background threads.
1308 1318 # Don't do background processing unless the file count is large enough
1309 1319 # to justify it.
1310 1320 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
1311 1321 2048)
1312 1322 # FUTURE dynamically start background threads after minfilecount closes.
1313 1323 # (We don't currently have any callers that don't know their file count)
1314 1324 if expectedcount > 0 and expectedcount < minfilecount:
1315 1325 return
1316 1326
1317 1327 # Windows defaults to a limit of 512 open files. A buffer of 128
1318 1328 # should give us enough headway.
1319 1329 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
1320 1330 threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
1321 1331
1322 1332 ui.debug('starting %d threads for background file closing\n' %
1323 1333 threadcount)
1324 1334
1325 1335 self._queue = util.queue(maxsize=maxqueue)
1326 1336 self._running = True
1327 1337
1328 1338 for i in range(threadcount):
1329 1339 t = threading.Thread(target=self._worker, name='backgroundcloser')
1330 1340 self._threads.append(t)
1331 1341 t.start()
1332 1342
1333 1343 def __enter__(self):
1334 1344 self._entered = True
1335 1345 return self
1336 1346
1337 1347 def __exit__(self, exc_type, exc_value, exc_tb):
1338 1348 self._running = False
1339 1349
1340 1350 # Wait for threads to finish closing so open files don't linger for
1341 1351 # longer than lifetime of context manager.
1342 1352 for t in self._threads:
1343 1353 t.join()
1344 1354
1345 1355 def _worker(self):
1346 1356 """Main routine for worker thread."""
1347 1357 while True:
1348 1358 try:
1349 1359 fh = self._queue.get(block=True, timeout=0.100)
1350 1360 # Need to catch or the thread will terminate and
1351 1361 # we could orphan file descriptors.
1352 1362 try:
1353 1363 fh.close()
1354 1364 except Exception as e:
1355 1365 # Stash so can re-raise from main thread later.
1356 1366 self._threadexception = e
1357 1367 except util.empty:
1358 1368 if not self._running:
1359 1369 break
1360 1370
1361 1371 def close(self, fh):
1362 1372 """Schedule a file for closing."""
1363 1373 if not self._entered:
1364 1374 raise error.Abort('can only call close() when context manager '
1365 1375 'active')
1366 1376
1367 1377 # If a background thread encountered an exception, raise now so we fail
1368 1378 # fast. Otherwise we may potentially go on for minutes until the error
1369 1379 # is acted on.
1370 1380 if self._threadexception:
1371 1381 e = self._threadexception
1372 1382 self._threadexception = None
1373 1383 raise e
1374 1384
1375 1385 # If we're not actively running, close synchronously.
1376 1386 if not self._running:
1377 1387 fh.close()
1378 1388 return
1379 1389
1380 1390 self._queue.put(fh, block=True, timeout=None)
1381 1391
General Comments 0
You need to be logged in to leave comments. Login now