##// END OF EJS Templates
vfs: ignore EPERM at os.utime, which avoids ambiguity at renaming (issue5418)...
FUJIWARA Katsunori -
r30320:bff5ccbe stable
parent child Browse files
Show More
@@ -1,1470 +1,1469 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import contextlib
11 11 import errno
12 12 import glob
13 13 import hashlib
14 14 import os
15 15 import re
16 16 import shutil
17 17 import stat
18 18 import tempfile
19 19 import threading
20 20
21 21 from .i18n import _
22 22 from .node import wdirrev
23 23 from . import (
24 24 encoding,
25 25 error,
26 26 match as matchmod,
27 27 osutil,
28 28 pathutil,
29 29 phases,
30 30 revset,
31 31 similar,
32 32 util,
33 33 )
34 34
35 35 if os.name == 'nt':
36 36 from . import scmwindows as scmplatform
37 37 else:
38 38 from . import scmposix as scmplatform
39 39
40 40 systemrcpath = scmplatform.systemrcpath
41 41 userrcpath = scmplatform.userrcpath
42 42
43 43 class status(tuple):
44 44 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
45 45 and 'ignored' properties are only relevant to the working copy.
46 46 '''
47 47
48 48 __slots__ = ()
49 49
50 50 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
51 51 clean):
52 52 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
53 53 ignored, clean))
54 54
55 55 @property
56 56 def modified(self):
57 57 '''files that have been modified'''
58 58 return self[0]
59 59
60 60 @property
61 61 def added(self):
62 62 '''files that have been added'''
63 63 return self[1]
64 64
65 65 @property
66 66 def removed(self):
67 67 '''files that have been removed'''
68 68 return self[2]
69 69
70 70 @property
71 71 def deleted(self):
72 72 '''files that are in the dirstate, but have been deleted from the
73 73 working copy (aka "missing")
74 74 '''
75 75 return self[3]
76 76
77 77 @property
78 78 def unknown(self):
79 79 '''files not in the dirstate that are not ignored'''
80 80 return self[4]
81 81
82 82 @property
83 83 def ignored(self):
84 84 '''files not in the dirstate that are ignored (by _dirignore())'''
85 85 return self[5]
86 86
87 87 @property
88 88 def clean(self):
89 89 '''files that have not been modified'''
90 90 return self[6]
91 91
92 92 def __repr__(self, *args, **kwargs):
93 93 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
94 94 'unknown=%r, ignored=%r, clean=%r>') % self)
95 95
96 96 def itersubrepos(ctx1, ctx2):
97 97 """find subrepos in ctx1 or ctx2"""
98 98 # Create a (subpath, ctx) mapping where we prefer subpaths from
99 99 # ctx1. The subpaths from ctx2 are important when the .hgsub file
100 100 # has been modified (in ctx2) but not yet committed (in ctx1).
101 101 subpaths = dict.fromkeys(ctx2.substate, ctx2)
102 102 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
103 103
104 104 missing = set()
105 105
106 106 for subpath in ctx2.substate:
107 107 if subpath not in ctx1.substate:
108 108 del subpaths[subpath]
109 109 missing.add(subpath)
110 110
111 111 for subpath, ctx in sorted(subpaths.iteritems()):
112 112 yield subpath, ctx.sub(subpath)
113 113
114 114 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
115 115 # status and diff will have an accurate result when it does
116 116 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
117 117 # against itself.
118 118 for subpath in missing:
119 119 yield subpath, ctx2.nullsub(subpath, ctx1)
120 120
121 121 def nochangesfound(ui, repo, excluded=None):
122 122 '''Report no changes for push/pull, excluded is None or a list of
123 123 nodes excluded from the push/pull.
124 124 '''
125 125 secretlist = []
126 126 if excluded:
127 127 for n in excluded:
128 128 if n not in repo:
129 129 # discovery should not have included the filtered revision,
130 130 # we have to explicitly exclude it until discovery is cleanup.
131 131 continue
132 132 ctx = repo[n]
133 133 if ctx.phase() >= phases.secret and not ctx.extinct():
134 134 secretlist.append(n)
135 135
136 136 if secretlist:
137 137 ui.status(_("no changes found (ignored %d secret changesets)\n")
138 138 % len(secretlist))
139 139 else:
140 140 ui.status(_("no changes found\n"))
141 141
142 142 def checknewlabel(repo, lbl, kind):
143 143 # Do not use the "kind" parameter in ui output.
144 144 # It makes strings difficult to translate.
145 145 if lbl in ['tip', '.', 'null']:
146 146 raise error.Abort(_("the name '%s' is reserved") % lbl)
147 147 for c in (':', '\0', '\n', '\r'):
148 148 if c in lbl:
149 149 raise error.Abort(_("%r cannot be used in a name") % c)
150 150 try:
151 151 int(lbl)
152 152 raise error.Abort(_("cannot use an integer as a name"))
153 153 except ValueError:
154 154 pass
155 155
156 156 def checkfilename(f):
157 157 '''Check that the filename f is an acceptable filename for a tracked file'''
158 158 if '\r' in f or '\n' in f:
159 159 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
160 160
161 161 def checkportable(ui, f):
162 162 '''Check if filename f is portable and warn or abort depending on config'''
163 163 checkfilename(f)
164 164 abort, warn = checkportabilityalert(ui)
165 165 if abort or warn:
166 166 msg = util.checkwinfilename(f)
167 167 if msg:
168 168 msg = "%s: %r" % (msg, f)
169 169 if abort:
170 170 raise error.Abort(msg)
171 171 ui.warn(_("warning: %s\n") % msg)
172 172
173 173 def checkportabilityalert(ui):
174 174 '''check if the user's config requests nothing, a warning, or abort for
175 175 non-portable filenames'''
176 176 val = ui.config('ui', 'portablefilenames', 'warn')
177 177 lval = val.lower()
178 178 bval = util.parsebool(val)
179 179 abort = os.name == 'nt' or lval == 'abort'
180 180 warn = bval or lval == 'warn'
181 181 if bval is None and not (warn or abort or lval == 'ignore'):
182 182 raise error.ConfigError(
183 183 _("ui.portablefilenames value is invalid ('%s')") % val)
184 184 return abort, warn
185 185
186 186 class casecollisionauditor(object):
187 187 def __init__(self, ui, abort, dirstate):
188 188 self._ui = ui
189 189 self._abort = abort
190 190 allfiles = '\0'.join(dirstate._map)
191 191 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
192 192 self._dirstate = dirstate
193 193 # The purpose of _newfiles is so that we don't complain about
194 194 # case collisions if someone were to call this object with the
195 195 # same filename twice.
196 196 self._newfiles = set()
197 197
198 198 def __call__(self, f):
199 199 if f in self._newfiles:
200 200 return
201 201 fl = encoding.lower(f)
202 202 if fl in self._loweredfiles and f not in self._dirstate:
203 203 msg = _('possible case-folding collision for %s') % f
204 204 if self._abort:
205 205 raise error.Abort(msg)
206 206 self._ui.warn(_("warning: %s\n") % msg)
207 207 self._loweredfiles.add(fl)
208 208 self._newfiles.add(f)
209 209
210 210 def filteredhash(repo, maxrev):
211 211 """build hash of filtered revisions in the current repoview.
212 212
213 213 Multiple caches perform up-to-date validation by checking that the
214 214 tiprev and tipnode stored in the cache file match the current repository.
215 215 However, this is not sufficient for validating repoviews because the set
216 216 of revisions in the view may change without the repository tiprev and
217 217 tipnode changing.
218 218
219 219 This function hashes all the revs filtered from the view and returns
220 220 that SHA-1 digest.
221 221 """
222 222 cl = repo.changelog
223 223 if not cl.filteredrevs:
224 224 return None
225 225 key = None
226 226 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
227 227 if revs:
228 228 s = hashlib.sha1()
229 229 for rev in revs:
230 230 s.update('%s;' % rev)
231 231 key = s.digest()
232 232 return key
233 233
234 234 class abstractvfs(object):
235 235 """Abstract base class; cannot be instantiated"""
236 236
237 237 def __init__(self, *args, **kwargs):
238 238 '''Prevent instantiation; don't call this from subclasses.'''
239 239 raise NotImplementedError('attempted instantiating ' + str(type(self)))
240 240
241 241 def tryread(self, path):
242 242 '''gracefully return an empty string for missing files'''
243 243 try:
244 244 return self.read(path)
245 245 except IOError as inst:
246 246 if inst.errno != errno.ENOENT:
247 247 raise
248 248 return ""
249 249
250 250 def tryreadlines(self, path, mode='rb'):
251 251 '''gracefully return an empty array for missing files'''
252 252 try:
253 253 return self.readlines(path, mode=mode)
254 254 except IOError as inst:
255 255 if inst.errno != errno.ENOENT:
256 256 raise
257 257 return []
258 258
259 259 @util.propertycache
260 260 def open(self):
261 261 '''Open ``path`` file, which is relative to vfs root.
262 262
263 263 Newly created directories are marked as "not to be indexed by
264 264 the content indexing service", if ``notindexed`` is specified
265 265 for "write" mode access.
266 266 '''
267 267 return self.__call__
268 268
269 269 def read(self, path):
270 270 with self(path, 'rb') as fp:
271 271 return fp.read()
272 272
273 273 def readlines(self, path, mode='rb'):
274 274 with self(path, mode=mode) as fp:
275 275 return fp.readlines()
276 276
277 277 def write(self, path, data, backgroundclose=False):
278 278 with self(path, 'wb', backgroundclose=backgroundclose) as fp:
279 279 return fp.write(data)
280 280
281 281 def writelines(self, path, data, mode='wb', notindexed=False):
282 282 with self(path, mode=mode, notindexed=notindexed) as fp:
283 283 return fp.writelines(data)
284 284
285 285 def append(self, path, data):
286 286 with self(path, 'ab') as fp:
287 287 return fp.write(data)
288 288
289 289 def basename(self, path):
290 290 """return base element of a path (as os.path.basename would do)
291 291
292 292 This exists to allow handling of strange encoding if needed."""
293 293 return os.path.basename(path)
294 294
295 295 def chmod(self, path, mode):
296 296 return os.chmod(self.join(path), mode)
297 297
298 298 def dirname(self, path):
299 299 """return dirname element of a path (as os.path.dirname would do)
300 300
301 301 This exists to allow handling of strange encoding if needed."""
302 302 return os.path.dirname(path)
303 303
304 304 def exists(self, path=None):
305 305 return os.path.exists(self.join(path))
306 306
307 307 def fstat(self, fp):
308 308 return util.fstat(fp)
309 309
310 310 def isdir(self, path=None):
311 311 return os.path.isdir(self.join(path))
312 312
313 313 def isfile(self, path=None):
314 314 return os.path.isfile(self.join(path))
315 315
316 316 def islink(self, path=None):
317 317 return os.path.islink(self.join(path))
318 318
319 319 def isfileorlink(self, path=None):
320 320 '''return whether path is a regular file or a symlink
321 321
322 322 Unlike isfile, this doesn't follow symlinks.'''
323 323 try:
324 324 st = self.lstat(path)
325 325 except OSError:
326 326 return False
327 327 mode = st.st_mode
328 328 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
329 329
330 330 def reljoin(self, *paths):
331 331 """join various elements of a path together (as os.path.join would do)
332 332
333 333 The vfs base is not injected so that path stay relative. This exists
334 334 to allow handling of strange encoding if needed."""
335 335 return os.path.join(*paths)
336 336
337 337 def split(self, path):
338 338 """split top-most element of a path (as os.path.split would do)
339 339
340 340 This exists to allow handling of strange encoding if needed."""
341 341 return os.path.split(path)
342 342
343 343 def lexists(self, path=None):
344 344 return os.path.lexists(self.join(path))
345 345
346 346 def lstat(self, path=None):
347 347 return os.lstat(self.join(path))
348 348
349 349 def listdir(self, path=None):
350 350 return os.listdir(self.join(path))
351 351
352 352 def makedir(self, path=None, notindexed=True):
353 353 return util.makedir(self.join(path), notindexed)
354 354
355 355 def makedirs(self, path=None, mode=None):
356 356 return util.makedirs(self.join(path), mode)
357 357
358 358 def makelock(self, info, path):
359 359 return util.makelock(info, self.join(path))
360 360
361 361 def mkdir(self, path=None):
362 362 return os.mkdir(self.join(path))
363 363
364 364 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
365 365 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
366 366 dir=self.join(dir), text=text)
367 367 dname, fname = util.split(name)
368 368 if dir:
369 369 return fd, os.path.join(dir, fname)
370 370 else:
371 371 return fd, fname
372 372
373 373 def readdir(self, path=None, stat=None, skip=None):
374 374 return osutil.listdir(self.join(path), stat, skip)
375 375
376 376 def readlock(self, path):
377 377 return util.readlock(self.join(path))
378 378
379 379 def rename(self, src, dst, checkambig=False):
380 380 """Rename from src to dst
381 381
382 382 checkambig argument is used with util.filestat, and is useful
383 383 only if destination file is guarded by any lock
384 384 (e.g. repo.lock or repo.wlock).
385 385 """
386 386 dstpath = self.join(dst)
387 387 oldstat = checkambig and util.filestat(dstpath)
388 388 if oldstat and oldstat.stat:
389 389 ret = util.rename(self.join(src), dstpath)
390 390 newstat = util.filestat(dstpath)
391 391 if newstat.isambig(oldstat):
392 392 # stat of renamed file is ambiguous to original one
393 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
394 os.utime(dstpath, (advanced, advanced))
393 newstat.avoidambig(dstpath, oldstat)
395 394 return ret
396 395 return util.rename(self.join(src), dstpath)
397 396
398 397 def readlink(self, path):
399 398 return os.readlink(self.join(path))
400 399
401 400 def removedirs(self, path=None):
402 401 """Remove a leaf directory and all empty intermediate ones
403 402 """
404 403 return util.removedirs(self.join(path))
405 404
406 405 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
407 406 """Remove a directory tree recursively
408 407
409 408 If ``forcibly``, this tries to remove READ-ONLY files, too.
410 409 """
411 410 if forcibly:
412 411 def onerror(function, path, excinfo):
413 412 if function is not os.remove:
414 413 raise
415 414 # read-only files cannot be unlinked under Windows
416 415 s = os.stat(path)
417 416 if (s.st_mode & stat.S_IWRITE) != 0:
418 417 raise
419 418 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
420 419 os.remove(path)
421 420 else:
422 421 onerror = None
423 422 return shutil.rmtree(self.join(path),
424 423 ignore_errors=ignore_errors, onerror=onerror)
425 424
426 425 def setflags(self, path, l, x):
427 426 return util.setflags(self.join(path), l, x)
428 427
429 428 def stat(self, path=None):
430 429 return os.stat(self.join(path))
431 430
432 431 def unlink(self, path=None):
433 432 return util.unlink(self.join(path))
434 433
435 434 def unlinkpath(self, path=None, ignoremissing=False):
436 435 return util.unlinkpath(self.join(path), ignoremissing)
437 436
438 437 def utime(self, path=None, t=None):
439 438 return os.utime(self.join(path), t)
440 439
441 440 def walk(self, path=None, onerror=None):
442 441 """Yield (dirpath, dirs, files) tuple for each directories under path
443 442
444 443 ``dirpath`` is relative one from the root of this vfs. This
445 444 uses ``os.sep`` as path separator, even you specify POSIX
446 445 style ``path``.
447 446
448 447 "The root of this vfs" is represented as empty ``dirpath``.
449 448 """
450 449 root = os.path.normpath(self.join(None))
451 450 # when dirpath == root, dirpath[prefixlen:] becomes empty
452 451 # because len(dirpath) < prefixlen.
453 452 prefixlen = len(pathutil.normasprefix(root))
454 453 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
455 454 yield (dirpath[prefixlen:], dirs, files)
456 455
457 456 @contextlib.contextmanager
458 457 def backgroundclosing(self, ui, expectedcount=-1):
459 458 """Allow files to be closed asynchronously.
460 459
461 460 When this context manager is active, ``backgroundclose`` can be passed
462 461 to ``__call__``/``open`` to result in the file possibly being closed
463 462 asynchronously, on a background thread.
464 463 """
465 464 # This is an arbitrary restriction and could be changed if we ever
466 465 # have a use case.
467 466 vfs = getattr(self, 'vfs', self)
468 467 if getattr(vfs, '_backgroundfilecloser', None):
469 468 raise error.Abort(
470 469 _('can only have 1 active background file closer'))
471 470
472 471 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
473 472 try:
474 473 vfs._backgroundfilecloser = bfc
475 474 yield bfc
476 475 finally:
477 476 vfs._backgroundfilecloser = None
478 477
479 478 class vfs(abstractvfs):
480 479 '''Operate files relative to a base directory
481 480
482 481 This class is used to hide the details of COW semantics and
483 482 remote file access from higher level code.
484 483 '''
485 484 def __init__(self, base, audit=True, expandpath=False, realpath=False):
486 485 if expandpath:
487 486 base = util.expandpath(base)
488 487 if realpath:
489 488 base = os.path.realpath(base)
490 489 self.base = base
491 490 self.mustaudit = audit
492 491 self.createmode = None
493 492 self._trustnlink = None
494 493
495 494 @property
496 495 def mustaudit(self):
497 496 return self._audit
498 497
499 498 @mustaudit.setter
500 499 def mustaudit(self, onoff):
501 500 self._audit = onoff
502 501 if onoff:
503 502 self.audit = pathutil.pathauditor(self.base)
504 503 else:
505 504 self.audit = util.always
506 505
507 506 @util.propertycache
508 507 def _cansymlink(self):
509 508 return util.checklink(self.base)
510 509
511 510 @util.propertycache
512 511 def _chmod(self):
513 512 return util.checkexec(self.base)
514 513
515 514 def _fixfilemode(self, name):
516 515 if self.createmode is None or not self._chmod:
517 516 return
518 517 os.chmod(name, self.createmode & 0o666)
519 518
520 519 def __call__(self, path, mode="r", text=False, atomictemp=False,
521 520 notindexed=False, backgroundclose=False, checkambig=False):
522 521 '''Open ``path`` file, which is relative to vfs root.
523 522
524 523 Newly created directories are marked as "not to be indexed by
525 524 the content indexing service", if ``notindexed`` is specified
526 525 for "write" mode access.
527 526
528 527 If ``backgroundclose`` is passed, the file may be closed asynchronously.
529 528 It can only be used if the ``self.backgroundclosing()`` context manager
530 529 is active. This should only be specified if the following criteria hold:
531 530
532 531 1. There is a potential for writing thousands of files. Unless you
533 532 are writing thousands of files, the performance benefits of
534 533 asynchronously closing files is not realized.
535 534 2. Files are opened exactly once for the ``backgroundclosing``
536 535 active duration and are therefore free of race conditions between
537 536 closing a file on a background thread and reopening it. (If the
538 537 file were opened multiple times, there could be unflushed data
539 538 because the original file handle hasn't been flushed/closed yet.)
540 539
541 540 ``checkambig`` argument is passed to atomictemplfile (valid
542 541 only for writing), and is useful only if target file is
543 542 guarded by any lock (e.g. repo.lock or repo.wlock).
544 543 '''
545 544 if self._audit:
546 545 r = util.checkosfilename(path)
547 546 if r:
548 547 raise error.Abort("%s: %r" % (r, path))
549 548 self.audit(path)
550 549 f = self.join(path)
551 550
552 551 if not text and "b" not in mode:
553 552 mode += "b" # for that other OS
554 553
555 554 nlink = -1
556 555 if mode not in ('r', 'rb'):
557 556 dirname, basename = util.split(f)
558 557 # If basename is empty, then the path is malformed because it points
559 558 # to a directory. Let the posixfile() call below raise IOError.
560 559 if basename:
561 560 if atomictemp:
562 561 util.makedirs(dirname, self.createmode, notindexed)
563 562 return util.atomictempfile(f, mode, self.createmode,
564 563 checkambig=checkambig)
565 564 try:
566 565 if 'w' in mode:
567 566 util.unlink(f)
568 567 nlink = 0
569 568 else:
570 569 # nlinks() may behave differently for files on Windows
571 570 # shares if the file is open.
572 571 with util.posixfile(f):
573 572 nlink = util.nlinks(f)
574 573 if nlink < 1:
575 574 nlink = 2 # force mktempcopy (issue1922)
576 575 except (OSError, IOError) as e:
577 576 if e.errno != errno.ENOENT:
578 577 raise
579 578 nlink = 0
580 579 util.makedirs(dirname, self.createmode, notindexed)
581 580 if nlink > 0:
582 581 if self._trustnlink is None:
583 582 self._trustnlink = nlink > 1 or util.checknlink(f)
584 583 if nlink > 1 or not self._trustnlink:
585 584 util.rename(util.mktempcopy(f), f)
586 585 fp = util.posixfile(f, mode)
587 586 if nlink == 0:
588 587 self._fixfilemode(f)
589 588
590 589 if checkambig:
591 590 if mode in ('r', 'rb'):
592 591 raise error.Abort(_('implementation error: mode %s is not'
593 592 ' valid for checkambig=True') % mode)
594 593 fp = checkambigatclosing(fp)
595 594
596 595 if backgroundclose:
597 596 if not self._backgroundfilecloser:
598 597 raise error.Abort(_('backgroundclose can only be used when a '
599 598 'backgroundclosing context manager is active')
600 599 )
601 600
602 601 fp = delayclosedfile(fp, self._backgroundfilecloser)
603 602
604 603 return fp
605 604
606 605 def symlink(self, src, dst):
607 606 self.audit(dst)
608 607 linkname = self.join(dst)
609 608 try:
610 609 os.unlink(linkname)
611 610 except OSError:
612 611 pass
613 612
614 613 util.makedirs(os.path.dirname(linkname), self.createmode)
615 614
616 615 if self._cansymlink:
617 616 try:
618 617 os.symlink(src, linkname)
619 618 except OSError as err:
620 619 raise OSError(err.errno, _('could not symlink to %r: %s') %
621 620 (src, err.strerror), linkname)
622 621 else:
623 622 self.write(dst, src)
624 623
625 624 def join(self, path, *insidef):
626 625 if path:
627 626 return os.path.join(self.base, path, *insidef)
628 627 else:
629 628 return self.base
630 629
631 630 opener = vfs
632 631
633 632 class auditvfs(object):
634 633 def __init__(self, vfs):
635 634 self.vfs = vfs
636 635
637 636 @property
638 637 def mustaudit(self):
639 638 return self.vfs.mustaudit
640 639
641 640 @mustaudit.setter
642 641 def mustaudit(self, onoff):
643 642 self.vfs.mustaudit = onoff
644 643
645 644 @property
646 645 def options(self):
647 646 return self.vfs.options
648 647
649 648 @options.setter
650 649 def options(self, value):
651 650 self.vfs.options = value
652 651
653 652 class filtervfs(abstractvfs, auditvfs):
654 653 '''Wrapper vfs for filtering filenames with a function.'''
655 654
656 655 def __init__(self, vfs, filter):
657 656 auditvfs.__init__(self, vfs)
658 657 self._filter = filter
659 658
660 659 def __call__(self, path, *args, **kwargs):
661 660 return self.vfs(self._filter(path), *args, **kwargs)
662 661
663 662 def join(self, path, *insidef):
664 663 if path:
665 664 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
666 665 else:
667 666 return self.vfs.join(path)
668 667
669 668 filteropener = filtervfs
670 669
671 670 class readonlyvfs(abstractvfs, auditvfs):
672 671 '''Wrapper vfs preventing any writing.'''
673 672
674 673 def __init__(self, vfs):
675 674 auditvfs.__init__(self, vfs)
676 675
677 676 def __call__(self, path, mode='r', *args, **kw):
678 677 if mode not in ('r', 'rb'):
679 678 raise error.Abort(_('this vfs is read only'))
680 679 return self.vfs(path, mode, *args, **kw)
681 680
682 681 def join(self, path, *insidef):
683 682 return self.vfs.join(path, *insidef)
684 683
685 684 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
686 685 '''yield every hg repository under path, always recursively.
687 686 The recurse flag will only control recursion into repo working dirs'''
688 687 def errhandler(err):
689 688 if err.filename == path:
690 689 raise err
691 690 samestat = getattr(os.path, 'samestat', None)
692 691 if followsym and samestat is not None:
693 692 def adddir(dirlst, dirname):
694 693 match = False
695 694 dirstat = os.stat(dirname)
696 695 for lstdirstat in dirlst:
697 696 if samestat(dirstat, lstdirstat):
698 697 match = True
699 698 break
700 699 if not match:
701 700 dirlst.append(dirstat)
702 701 return not match
703 702 else:
704 703 followsym = False
705 704
706 705 if (seen_dirs is None) and followsym:
707 706 seen_dirs = []
708 707 adddir(seen_dirs, path)
709 708 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
710 709 dirs.sort()
711 710 if '.hg' in dirs:
712 711 yield root # found a repository
713 712 qroot = os.path.join(root, '.hg', 'patches')
714 713 if os.path.isdir(os.path.join(qroot, '.hg')):
715 714 yield qroot # we have a patch queue repo here
716 715 if recurse:
717 716 # avoid recursing inside the .hg directory
718 717 dirs.remove('.hg')
719 718 else:
720 719 dirs[:] = [] # don't descend further
721 720 elif followsym:
722 721 newdirs = []
723 722 for d in dirs:
724 723 fname = os.path.join(root, d)
725 724 if adddir(seen_dirs, fname):
726 725 if os.path.islink(fname):
727 726 for hgname in walkrepos(fname, True, seen_dirs):
728 727 yield hgname
729 728 else:
730 729 newdirs.append(d)
731 730 dirs[:] = newdirs
732 731
733 732 def osrcpath():
734 733 '''return default os-specific hgrc search path'''
735 734 path = []
736 735 defaultpath = os.path.join(util.datapath, 'default.d')
737 736 if os.path.isdir(defaultpath):
738 737 for f, kind in osutil.listdir(defaultpath):
739 738 if f.endswith('.rc'):
740 739 path.append(os.path.join(defaultpath, f))
741 740 path.extend(systemrcpath())
742 741 path.extend(userrcpath())
743 742 path = [os.path.normpath(f) for f in path]
744 743 return path
745 744
746 745 _rcpath = None
747 746
748 747 def rcpath():
749 748 '''return hgrc search path. if env var HGRCPATH is set, use it.
750 749 for each item in path, if directory, use files ending in .rc,
751 750 else use item.
752 751 make HGRCPATH empty to only look in .hg/hgrc of current repo.
753 752 if no HGRCPATH, use default os-specific path.'''
754 753 global _rcpath
755 754 if _rcpath is None:
756 755 if 'HGRCPATH' in encoding.environ:
757 756 _rcpath = []
758 757 for p in os.environ['HGRCPATH'].split(os.pathsep):
759 758 if not p:
760 759 continue
761 760 p = util.expandpath(p)
762 761 if os.path.isdir(p):
763 762 for f, kind in osutil.listdir(p):
764 763 if f.endswith('.rc'):
765 764 _rcpath.append(os.path.join(p, f))
766 765 else:
767 766 _rcpath.append(p)
768 767 else:
769 768 _rcpath = osrcpath()
770 769 return _rcpath
771 770
772 771 def intrev(rev):
773 772 """Return integer for a given revision that can be used in comparison or
774 773 arithmetic operation"""
775 774 if rev is None:
776 775 return wdirrev
777 776 return rev
778 777
779 778 def revsingle(repo, revspec, default='.'):
780 779 if not revspec and revspec != 0:
781 780 return repo[default]
782 781
783 782 l = revrange(repo, [revspec])
784 783 if not l:
785 784 raise error.Abort(_('empty revision set'))
786 785 return repo[l.last()]
787 786
788 787 def _pairspec(revspec):
789 788 tree = revset.parse(revspec)
790 789 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
791 790
792 791 def revpair(repo, revs):
793 792 if not revs:
794 793 return repo.dirstate.p1(), None
795 794
796 795 l = revrange(repo, revs)
797 796
798 797 if not l:
799 798 first = second = None
800 799 elif l.isascending():
801 800 first = l.min()
802 801 second = l.max()
803 802 elif l.isdescending():
804 803 first = l.max()
805 804 second = l.min()
806 805 else:
807 806 first = l.first()
808 807 second = l.last()
809 808
810 809 if first is None:
811 810 raise error.Abort(_('empty revision range'))
812 811 if (first == second and len(revs) >= 2
813 812 and not all(revrange(repo, [r]) for r in revs)):
814 813 raise error.Abort(_('empty revision on one side of range'))
815 814
816 815 # if top-level is range expression, the result must always be a pair
817 816 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
818 817 return repo.lookup(first), None
819 818
820 819 return repo.lookup(first), repo.lookup(second)
821 820
822 821 def revrange(repo, specs):
823 822 """Execute 1 to many revsets and return the union.
824 823
825 824 This is the preferred mechanism for executing revsets using user-specified
826 825 config options, such as revset aliases.
827 826
828 827 The revsets specified by ``specs`` will be executed via a chained ``OR``
829 828 expression. If ``specs`` is empty, an empty result is returned.
830 829
831 830 ``specs`` can contain integers, in which case they are assumed to be
832 831 revision numbers.
833 832
834 833 It is assumed the revsets are already formatted. If you have arguments
835 834 that need to be expanded in the revset, call ``revset.formatspec()``
836 835 and pass the result as an element of ``specs``.
837 836
838 837 Specifying a single revset is allowed.
839 838
840 839 Returns a ``revset.abstractsmartset`` which is a list-like interface over
841 840 integer revisions.
842 841 """
843 842 allspecs = []
844 843 for spec in specs:
845 844 if isinstance(spec, int):
846 845 spec = revset.formatspec('rev(%d)', spec)
847 846 allspecs.append(spec)
848 847 m = revset.matchany(repo.ui, allspecs, repo)
849 848 return m(repo)
850 849
851 850 def meaningfulparents(repo, ctx):
852 851 """Return list of meaningful (or all if debug) parentrevs for rev.
853 852
854 853 For merges (two non-nullrev revisions) both parents are meaningful.
855 854 Otherwise the first parent revision is considered meaningful if it
856 855 is not the preceding revision.
857 856 """
858 857 parents = ctx.parents()
859 858 if len(parents) > 1:
860 859 return parents
861 860 if repo.ui.debugflag:
862 861 return [parents[0], repo['null']]
863 862 if parents[0].rev() >= intrev(ctx.rev()) - 1:
864 863 return []
865 864 return parents
866 865
867 866 def expandpats(pats):
868 867 '''Expand bare globs when running on windows.
869 868 On posix we assume it already has already been done by sh.'''
870 869 if not util.expandglobs:
871 870 return list(pats)
872 871 ret = []
873 872 for kindpat in pats:
874 873 kind, pat = matchmod._patsplit(kindpat, None)
875 874 if kind is None:
876 875 try:
877 876 globbed = glob.glob(pat)
878 877 except re.error:
879 878 globbed = [pat]
880 879 if globbed:
881 880 ret.extend(globbed)
882 881 continue
883 882 ret.append(kindpat)
884 883 return ret
885 884
886 885 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
887 886 badfn=None):
888 887 '''Return a matcher and the patterns that were used.
889 888 The matcher will warn about bad matches, unless an alternate badfn callback
890 889 is provided.'''
891 890 if pats == ("",):
892 891 pats = []
893 892 if opts is None:
894 893 opts = {}
895 894 if not globbed and default == 'relpath':
896 895 pats = expandpats(pats or [])
897 896
898 897 def bad(f, msg):
899 898 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
900 899
901 900 if badfn is None:
902 901 badfn = bad
903 902
904 903 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
905 904 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
906 905
907 906 if m.always():
908 907 pats = []
909 908 return m, pats
910 909
911 910 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
912 911 badfn=None):
913 912 '''Return a matcher that will warn about bad matches.'''
914 913 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
915 914
916 915 def matchall(repo):
917 916 '''Return a matcher that will efficiently match everything.'''
918 917 return matchmod.always(repo.root, repo.getcwd())
919 918
920 919 def matchfiles(repo, files, badfn=None):
921 920 '''Return a matcher that will efficiently match exactly these files.'''
922 921 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
923 922
924 923 def origpath(ui, repo, filepath):
925 924 '''customize where .orig files are created
926 925
927 926 Fetch user defined path from config file: [ui] origbackuppath = <path>
928 927 Fall back to default (filepath) if not specified
929 928 '''
930 929 origbackuppath = ui.config('ui', 'origbackuppath', None)
931 930 if origbackuppath is None:
932 931 return filepath + ".orig"
933 932
934 933 filepathfromroot = os.path.relpath(filepath, start=repo.root)
935 934 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
936 935
937 936 origbackupdir = repo.vfs.dirname(fullorigpath)
938 937 if not repo.vfs.exists(origbackupdir):
939 938 ui.note(_('creating directory: %s\n') % origbackupdir)
940 939 util.makedirs(origbackupdir)
941 940
942 941 return fullorigpath + ".orig"
943 942
944 943 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
945 944 if opts is None:
946 945 opts = {}
947 946 m = matcher
948 947 if dry_run is None:
949 948 dry_run = opts.get('dry_run')
950 949 if similarity is None:
951 950 similarity = float(opts.get('similarity') or 0)
952 951
953 952 ret = 0
954 953 join = lambda f: os.path.join(prefix, f)
955 954
956 955 wctx = repo[None]
957 956 for subpath in sorted(wctx.substate):
958 957 submatch = matchmod.subdirmatcher(subpath, m)
959 958 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
960 959 sub = wctx.sub(subpath)
961 960 try:
962 961 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
963 962 ret = 1
964 963 except error.LookupError:
965 964 repo.ui.status(_("skipping missing subrepository: %s\n")
966 965 % join(subpath))
967 966
968 967 rejected = []
969 968 def badfn(f, msg):
970 969 if f in m.files():
971 970 m.bad(f, msg)
972 971 rejected.append(f)
973 972
974 973 badmatch = matchmod.badmatch(m, badfn)
975 974 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
976 975 badmatch)
977 976
978 977 unknownset = set(unknown + forgotten)
979 978 toprint = unknownset.copy()
980 979 toprint.update(deleted)
981 980 for abs in sorted(toprint):
982 981 if repo.ui.verbose or not m.exact(abs):
983 982 if abs in unknownset:
984 983 status = _('adding %s\n') % m.uipath(abs)
985 984 else:
986 985 status = _('removing %s\n') % m.uipath(abs)
987 986 repo.ui.status(status)
988 987
989 988 renames = _findrenames(repo, m, added + unknown, removed + deleted,
990 989 similarity)
991 990
992 991 if not dry_run:
993 992 _markchanges(repo, unknown + forgotten, deleted, renames)
994 993
995 994 for f in rejected:
996 995 if f in m.files():
997 996 return 1
998 997 return ret
999 998
1000 999 def marktouched(repo, files, similarity=0.0):
1001 1000 '''Assert that files have somehow been operated upon. files are relative to
1002 1001 the repo root.'''
1003 1002 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1004 1003 rejected = []
1005 1004
1006 1005 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1007 1006
1008 1007 if repo.ui.verbose:
1009 1008 unknownset = set(unknown + forgotten)
1010 1009 toprint = unknownset.copy()
1011 1010 toprint.update(deleted)
1012 1011 for abs in sorted(toprint):
1013 1012 if abs in unknownset:
1014 1013 status = _('adding %s\n') % abs
1015 1014 else:
1016 1015 status = _('removing %s\n') % abs
1017 1016 repo.ui.status(status)
1018 1017
1019 1018 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1020 1019 similarity)
1021 1020
1022 1021 _markchanges(repo, unknown + forgotten, deleted, renames)
1023 1022
1024 1023 for f in rejected:
1025 1024 if f in m.files():
1026 1025 return 1
1027 1026 return 0
1028 1027
1029 1028 def _interestingfiles(repo, matcher):
1030 1029 '''Walk dirstate with matcher, looking for files that addremove would care
1031 1030 about.
1032 1031
1033 1032 This is different from dirstate.status because it doesn't care about
1034 1033 whether files are modified or clean.'''
1035 1034 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1036 1035 audit_path = pathutil.pathauditor(repo.root)
1037 1036
1038 1037 ctx = repo[None]
1039 1038 dirstate = repo.dirstate
1040 1039 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
1041 1040 full=False)
1042 1041 for abs, st in walkresults.iteritems():
1043 1042 dstate = dirstate[abs]
1044 1043 if dstate == '?' and audit_path.check(abs):
1045 1044 unknown.append(abs)
1046 1045 elif dstate != 'r' and not st:
1047 1046 deleted.append(abs)
1048 1047 elif dstate == 'r' and st:
1049 1048 forgotten.append(abs)
1050 1049 # for finding renames
1051 1050 elif dstate == 'r' and not st:
1052 1051 removed.append(abs)
1053 1052 elif dstate == 'a':
1054 1053 added.append(abs)
1055 1054
1056 1055 return added, unknown, deleted, removed, forgotten
1057 1056
1058 1057 def _findrenames(repo, matcher, added, removed, similarity):
1059 1058 '''Find renames from removed files to added ones.'''
1060 1059 renames = {}
1061 1060 if similarity > 0:
1062 1061 for old, new, score in similar.findrenames(repo, added, removed,
1063 1062 similarity):
1064 1063 if (repo.ui.verbose or not matcher.exact(old)
1065 1064 or not matcher.exact(new)):
1066 1065 repo.ui.status(_('recording removal of %s as rename to %s '
1067 1066 '(%d%% similar)\n') %
1068 1067 (matcher.rel(old), matcher.rel(new),
1069 1068 score * 100))
1070 1069 renames[new] = old
1071 1070 return renames
1072 1071
1073 1072 def _markchanges(repo, unknown, deleted, renames):
1074 1073 '''Marks the files in unknown as added, the files in deleted as removed,
1075 1074 and the files in renames as copied.'''
1076 1075 wctx = repo[None]
1077 1076 with repo.wlock():
1078 1077 wctx.forget(deleted)
1079 1078 wctx.add(unknown)
1080 1079 for new, old in renames.iteritems():
1081 1080 wctx.copy(old, new)
1082 1081
1083 1082 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1084 1083 """Update the dirstate to reflect the intent of copying src to dst. For
1085 1084 different reasons it might not end with dst being marked as copied from src.
1086 1085 """
1087 1086 origsrc = repo.dirstate.copied(src) or src
1088 1087 if dst == origsrc: # copying back a copy?
1089 1088 if repo.dirstate[dst] not in 'mn' and not dryrun:
1090 1089 repo.dirstate.normallookup(dst)
1091 1090 else:
1092 1091 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1093 1092 if not ui.quiet:
1094 1093 ui.warn(_("%s has not been committed yet, so no copy "
1095 1094 "data will be stored for %s.\n")
1096 1095 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1097 1096 if repo.dirstate[dst] in '?r' and not dryrun:
1098 1097 wctx.add([dst])
1099 1098 elif not dryrun:
1100 1099 wctx.copy(origsrc, dst)
1101 1100
1102 1101 def readrequires(opener, supported):
1103 1102 '''Reads and parses .hg/requires and checks if all entries found
1104 1103 are in the list of supported features.'''
1105 1104 requirements = set(opener.read("requires").splitlines())
1106 1105 missings = []
1107 1106 for r in requirements:
1108 1107 if r not in supported:
1109 1108 if not r or not r[0].isalnum():
1110 1109 raise error.RequirementError(_(".hg/requires file is corrupt"))
1111 1110 missings.append(r)
1112 1111 missings.sort()
1113 1112 if missings:
1114 1113 raise error.RequirementError(
1115 1114 _("repository requires features unknown to this Mercurial: %s")
1116 1115 % " ".join(missings),
1117 1116 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1118 1117 " for more information"))
1119 1118 return requirements
1120 1119
1121 1120 def writerequires(opener, requirements):
1122 1121 with opener('requires', 'w') as fp:
1123 1122 for r in sorted(requirements):
1124 1123 fp.write("%s\n" % r)
1125 1124
1126 1125 class filecachesubentry(object):
1127 1126 def __init__(self, path, stat):
1128 1127 self.path = path
1129 1128 self.cachestat = None
1130 1129 self._cacheable = None
1131 1130
1132 1131 if stat:
1133 1132 self.cachestat = filecachesubentry.stat(self.path)
1134 1133
1135 1134 if self.cachestat:
1136 1135 self._cacheable = self.cachestat.cacheable()
1137 1136 else:
1138 1137 # None means we don't know yet
1139 1138 self._cacheable = None
1140 1139
1141 1140 def refresh(self):
1142 1141 if self.cacheable():
1143 1142 self.cachestat = filecachesubentry.stat(self.path)
1144 1143
1145 1144 def cacheable(self):
1146 1145 if self._cacheable is not None:
1147 1146 return self._cacheable
1148 1147
1149 1148 # we don't know yet, assume it is for now
1150 1149 return True
1151 1150
1152 1151 def changed(self):
1153 1152 # no point in going further if we can't cache it
1154 1153 if not self.cacheable():
1155 1154 return True
1156 1155
1157 1156 newstat = filecachesubentry.stat(self.path)
1158 1157
1159 1158 # we may not know if it's cacheable yet, check again now
1160 1159 if newstat and self._cacheable is None:
1161 1160 self._cacheable = newstat.cacheable()
1162 1161
1163 1162 # check again
1164 1163 if not self._cacheable:
1165 1164 return True
1166 1165
1167 1166 if self.cachestat != newstat:
1168 1167 self.cachestat = newstat
1169 1168 return True
1170 1169 else:
1171 1170 return False
1172 1171
1173 1172 @staticmethod
1174 1173 def stat(path):
1175 1174 try:
1176 1175 return util.cachestat(path)
1177 1176 except OSError as e:
1178 1177 if e.errno != errno.ENOENT:
1179 1178 raise
1180 1179
1181 1180 class filecacheentry(object):
1182 1181 def __init__(self, paths, stat=True):
1183 1182 self._entries = []
1184 1183 for path in paths:
1185 1184 self._entries.append(filecachesubentry(path, stat))
1186 1185
1187 1186 def changed(self):
1188 1187 '''true if any entry has changed'''
1189 1188 for entry in self._entries:
1190 1189 if entry.changed():
1191 1190 return True
1192 1191 return False
1193 1192
1194 1193 def refresh(self):
1195 1194 for entry in self._entries:
1196 1195 entry.refresh()
1197 1196
1198 1197 class filecache(object):
1199 1198 '''A property like decorator that tracks files under .hg/ for updates.
1200 1199
1201 1200 Records stat info when called in _filecache.
1202 1201
1203 1202 On subsequent calls, compares old stat info with new info, and recreates the
1204 1203 object when any of the files changes, updating the new stat info in
1205 1204 _filecache.
1206 1205
1207 1206 Mercurial either atomic renames or appends for files under .hg,
1208 1207 so to ensure the cache is reliable we need the filesystem to be able
1209 1208 to tell us if a file has been replaced. If it can't, we fallback to
1210 1209 recreating the object on every call (essentially the same behavior as
1211 1210 propertycache).
1212 1211
1213 1212 '''
1214 1213 def __init__(self, *paths):
1215 1214 self.paths = paths
1216 1215
1217 1216 def join(self, obj, fname):
1218 1217 """Used to compute the runtime path of a cached file.
1219 1218
1220 1219 Users should subclass filecache and provide their own version of this
1221 1220 function to call the appropriate join function on 'obj' (an instance
1222 1221 of the class that its member function was decorated).
1223 1222 """
1224 1223 return obj.join(fname)
1225 1224
1226 1225 def __call__(self, func):
1227 1226 self.func = func
1228 1227 self.name = func.__name__
1229 1228 return self
1230 1229
1231 1230 def __get__(self, obj, type=None):
1232 1231 # if accessed on the class, return the descriptor itself.
1233 1232 if obj is None:
1234 1233 return self
1235 1234 # do we need to check if the file changed?
1236 1235 if self.name in obj.__dict__:
1237 1236 assert self.name in obj._filecache, self.name
1238 1237 return obj.__dict__[self.name]
1239 1238
1240 1239 entry = obj._filecache.get(self.name)
1241 1240
1242 1241 if entry:
1243 1242 if entry.changed():
1244 1243 entry.obj = self.func(obj)
1245 1244 else:
1246 1245 paths = [self.join(obj, path) for path in self.paths]
1247 1246
1248 1247 # We stat -before- creating the object so our cache doesn't lie if
1249 1248 # a writer modified between the time we read and stat
1250 1249 entry = filecacheentry(paths, True)
1251 1250 entry.obj = self.func(obj)
1252 1251
1253 1252 obj._filecache[self.name] = entry
1254 1253
1255 1254 obj.__dict__[self.name] = entry.obj
1256 1255 return entry.obj
1257 1256
1258 1257 def __set__(self, obj, value):
1259 1258 if self.name not in obj._filecache:
1260 1259 # we add an entry for the missing value because X in __dict__
1261 1260 # implies X in _filecache
1262 1261 paths = [self.join(obj, path) for path in self.paths]
1263 1262 ce = filecacheentry(paths, False)
1264 1263 obj._filecache[self.name] = ce
1265 1264 else:
1266 1265 ce = obj._filecache[self.name]
1267 1266
1268 1267 ce.obj = value # update cached copy
1269 1268 obj.__dict__[self.name] = value # update copy returned by obj.x
1270 1269
1271 1270 def __delete__(self, obj):
1272 1271 try:
1273 1272 del obj.__dict__[self.name]
1274 1273 except KeyError:
1275 1274 raise AttributeError(self.name)
1276 1275
1277 1276 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1278 1277 if lock is None:
1279 1278 raise error.LockInheritanceContractViolation(
1280 1279 'lock can only be inherited while held')
1281 1280 if environ is None:
1282 1281 environ = {}
1283 1282 with lock.inherit() as locker:
1284 1283 environ[envvar] = locker
1285 1284 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1286 1285
1287 1286 def wlocksub(repo, cmd, *args, **kwargs):
1288 1287 """run cmd as a subprocess that allows inheriting repo's wlock
1289 1288
1290 1289 This can only be called while the wlock is held. This takes all the
1291 1290 arguments that ui.system does, and returns the exit code of the
1292 1291 subprocess."""
1293 1292 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1294 1293 **kwargs)
1295 1294
1296 1295 def gdinitconfig(ui):
1297 1296 """helper function to know if a repo should be created as general delta
1298 1297 """
1299 1298 # experimental config: format.generaldelta
1300 1299 return (ui.configbool('format', 'generaldelta', False)
1301 1300 or ui.configbool('format', 'usegeneraldelta', True))
1302 1301
1303 1302 def gddeltaconfig(ui):
1304 1303 """helper function to know if incoming delta should be optimised
1305 1304 """
1306 1305 # experimental config: format.generaldelta
1307 1306 return ui.configbool('format', 'generaldelta', False)
1308 1307
1309 1308 class closewrapbase(object):
1310 1309 """Base class of wrapper, which hooks closing
1311 1310
1312 1311 Do not instantiate outside of the vfs layer.
1313 1312 """
1314 1313 def __init__(self, fh):
1315 1314 object.__setattr__(self, '_origfh', fh)
1316 1315
1317 1316 def __getattr__(self, attr):
1318 1317 return getattr(self._origfh, attr)
1319 1318
1320 1319 def __setattr__(self, attr, value):
1321 1320 return setattr(self._origfh, attr, value)
1322 1321
1323 1322 def __delattr__(self, attr):
1324 1323 return delattr(self._origfh, attr)
1325 1324
1326 1325 def __enter__(self):
1327 1326 return self._origfh.__enter__()
1328 1327
1329 1328 def __exit__(self, exc_type, exc_value, exc_tb):
1330 1329 raise NotImplementedError('attempted instantiating ' + str(type(self)))
1331 1330
1332 1331 def close(self):
1333 1332 raise NotImplementedError('attempted instantiating ' + str(type(self)))
1334 1333
1335 1334 class delayclosedfile(closewrapbase):
1336 1335 """Proxy for a file object whose close is delayed.
1337 1336
1338 1337 Do not instantiate outside of the vfs layer.
1339 1338 """
1340 1339 def __init__(self, fh, closer):
1341 1340 super(delayclosedfile, self).__init__(fh)
1342 1341 object.__setattr__(self, '_closer', closer)
1343 1342
1344 1343 def __exit__(self, exc_type, exc_value, exc_tb):
1345 1344 self._closer.close(self._origfh)
1346 1345
1347 1346 def close(self):
1348 1347 self._closer.close(self._origfh)
1349 1348
1350 1349 class backgroundfilecloser(object):
1351 1350 """Coordinates background closing of file handles on multiple threads."""
1352 1351 def __init__(self, ui, expectedcount=-1):
1353 1352 self._running = False
1354 1353 self._entered = False
1355 1354 self._threads = []
1356 1355 self._threadexception = None
1357 1356
1358 1357 # Only Windows/NTFS has slow file closing. So only enable by default
1359 1358 # on that platform. But allow to be enabled elsewhere for testing.
1360 1359 defaultenabled = os.name == 'nt'
1361 1360 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
1362 1361
1363 1362 if not enabled:
1364 1363 return
1365 1364
1366 1365 # There is overhead to starting and stopping the background threads.
1367 1366 # Don't do background processing unless the file count is large enough
1368 1367 # to justify it.
1369 1368 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
1370 1369 2048)
1371 1370 # FUTURE dynamically start background threads after minfilecount closes.
1372 1371 # (We don't currently have any callers that don't know their file count)
1373 1372 if expectedcount > 0 and expectedcount < minfilecount:
1374 1373 return
1375 1374
1376 1375 # Windows defaults to a limit of 512 open files. A buffer of 128
1377 1376 # should give us enough headway.
1378 1377 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
1379 1378 threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
1380 1379
1381 1380 ui.debug('starting %d threads for background file closing\n' %
1382 1381 threadcount)
1383 1382
1384 1383 self._queue = util.queue(maxsize=maxqueue)
1385 1384 self._running = True
1386 1385
1387 1386 for i in range(threadcount):
1388 1387 t = threading.Thread(target=self._worker, name='backgroundcloser')
1389 1388 self._threads.append(t)
1390 1389 t.start()
1391 1390
1392 1391 def __enter__(self):
1393 1392 self._entered = True
1394 1393 return self
1395 1394
1396 1395 def __exit__(self, exc_type, exc_value, exc_tb):
1397 1396 self._running = False
1398 1397
1399 1398 # Wait for threads to finish closing so open files don't linger for
1400 1399 # longer than lifetime of context manager.
1401 1400 for t in self._threads:
1402 1401 t.join()
1403 1402
1404 1403 def _worker(self):
1405 1404 """Main routine for worker thread."""
1406 1405 while True:
1407 1406 try:
1408 1407 fh = self._queue.get(block=True, timeout=0.100)
1409 1408 # Need to catch or the thread will terminate and
1410 1409 # we could orphan file descriptors.
1411 1410 try:
1412 1411 fh.close()
1413 1412 except Exception as e:
1414 1413 # Stash so can re-raise from main thread later.
1415 1414 self._threadexception = e
1416 1415 except util.empty:
1417 1416 if not self._running:
1418 1417 break
1419 1418
1420 1419 def close(self, fh):
1421 1420 """Schedule a file for closing."""
1422 1421 if not self._entered:
1423 1422 raise error.Abort(_('can only call close() when context manager '
1424 1423 'active'))
1425 1424
1426 1425 # If a background thread encountered an exception, raise now so we fail
1427 1426 # fast. Otherwise we may potentially go on for minutes until the error
1428 1427 # is acted on.
1429 1428 if self._threadexception:
1430 1429 e = self._threadexception
1431 1430 self._threadexception = None
1432 1431 raise e
1433 1432
1434 1433 # If we're not actively running, close synchronously.
1435 1434 if not self._running:
1436 1435 fh.close()
1437 1436 return
1438 1437
1439 1438 self._queue.put(fh, block=True, timeout=None)
1440 1439
1441 1440 class checkambigatclosing(closewrapbase):
1442 1441 """Proxy for a file object, to avoid ambiguity of file stat
1443 1442
1444 1443 See also util.filestat for detail about "ambiguity of file stat".
1445 1444
1446 1445 This proxy is useful only if the target file is guarded by any
1447 1446 lock (e.g. repo.lock or repo.wlock)
1448 1447
1449 1448 Do not instantiate outside of the vfs layer.
1450 1449 """
1451 1450 def __init__(self, fh):
1452 1451 super(checkambigatclosing, self).__init__(fh)
1453 1452 object.__setattr__(self, '_oldstat', util.filestat(fh.name))
1454 1453
1455 1454 def _checkambig(self):
1456 1455 oldstat = self._oldstat
1457 1456 if oldstat.stat:
1458 1457 newstat = util.filestat(self._origfh.name)
1459 1458 if newstat.isambig(oldstat):
1460 1459 # stat of changed file is ambiguous to original one
1461 1460 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1462 1461 os.utime(self._origfh.name, (advanced, advanced))
1463 1462
1464 1463 def __exit__(self, exc_type, exc_value, exc_tb):
1465 1464 self._origfh.__exit__(exc_type, exc_value, exc_tb)
1466 1465 self._checkambig()
1467 1466
1468 1467 def close(self):
1469 1468 self._origfh.close()
1470 1469 self._checkambig()
General Comments 0
You need to be logged in to leave comments. Login now