##// END OF EJS Templates
scmutil: ignore EPERM at os.utime, which avoids ambiguity at closing...
FUJIWARA Katsunori -
r30321:e0ff4799 stable
parent child Browse files
Show More
@@ -1,1469 +1,1468 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import contextlib
11 11 import errno
12 12 import glob
13 13 import hashlib
14 14 import os
15 15 import re
16 16 import shutil
17 17 import stat
18 18 import tempfile
19 19 import threading
20 20
21 21 from .i18n import _
22 22 from .node import wdirrev
23 23 from . import (
24 24 encoding,
25 25 error,
26 26 match as matchmod,
27 27 osutil,
28 28 pathutil,
29 29 phases,
30 30 revset,
31 31 similar,
32 32 util,
33 33 )
34 34
35 35 if os.name == 'nt':
36 36 from . import scmwindows as scmplatform
37 37 else:
38 38 from . import scmposix as scmplatform
39 39
40 40 systemrcpath = scmplatform.systemrcpath
41 41 userrcpath = scmplatform.userrcpath
42 42
43 43 class status(tuple):
44 44 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
45 45 and 'ignored' properties are only relevant to the working copy.
46 46 '''
47 47
48 48 __slots__ = ()
49 49
50 50 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
51 51 clean):
52 52 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
53 53 ignored, clean))
54 54
55 55 @property
56 56 def modified(self):
57 57 '''files that have been modified'''
58 58 return self[0]
59 59
60 60 @property
61 61 def added(self):
62 62 '''files that have been added'''
63 63 return self[1]
64 64
65 65 @property
66 66 def removed(self):
67 67 '''files that have been removed'''
68 68 return self[2]
69 69
70 70 @property
71 71 def deleted(self):
72 72 '''files that are in the dirstate, but have been deleted from the
73 73 working copy (aka "missing")
74 74 '''
75 75 return self[3]
76 76
77 77 @property
78 78 def unknown(self):
79 79 '''files not in the dirstate that are not ignored'''
80 80 return self[4]
81 81
82 82 @property
83 83 def ignored(self):
84 84 '''files not in the dirstate that are ignored (by _dirignore())'''
85 85 return self[5]
86 86
87 87 @property
88 88 def clean(self):
89 89 '''files that have not been modified'''
90 90 return self[6]
91 91
92 92 def __repr__(self, *args, **kwargs):
93 93 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
94 94 'unknown=%r, ignored=%r, clean=%r>') % self)
95 95
96 96 def itersubrepos(ctx1, ctx2):
97 97 """find subrepos in ctx1 or ctx2"""
98 98 # Create a (subpath, ctx) mapping where we prefer subpaths from
99 99 # ctx1. The subpaths from ctx2 are important when the .hgsub file
100 100 # has been modified (in ctx2) but not yet committed (in ctx1).
101 101 subpaths = dict.fromkeys(ctx2.substate, ctx2)
102 102 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
103 103
104 104 missing = set()
105 105
106 106 for subpath in ctx2.substate:
107 107 if subpath not in ctx1.substate:
108 108 del subpaths[subpath]
109 109 missing.add(subpath)
110 110
111 111 for subpath, ctx in sorted(subpaths.iteritems()):
112 112 yield subpath, ctx.sub(subpath)
113 113
114 114 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
115 115 # status and diff will have an accurate result when it does
116 116 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
117 117 # against itself.
118 118 for subpath in missing:
119 119 yield subpath, ctx2.nullsub(subpath, ctx1)
120 120
121 121 def nochangesfound(ui, repo, excluded=None):
122 122 '''Report no changes for push/pull, excluded is None or a list of
123 123 nodes excluded from the push/pull.
124 124 '''
125 125 secretlist = []
126 126 if excluded:
127 127 for n in excluded:
128 128 if n not in repo:
129 129 # discovery should not have included the filtered revision,
130 130 # we have to explicitly exclude it until discovery is cleanup.
131 131 continue
132 132 ctx = repo[n]
133 133 if ctx.phase() >= phases.secret and not ctx.extinct():
134 134 secretlist.append(n)
135 135
136 136 if secretlist:
137 137 ui.status(_("no changes found (ignored %d secret changesets)\n")
138 138 % len(secretlist))
139 139 else:
140 140 ui.status(_("no changes found\n"))
141 141
142 142 def checknewlabel(repo, lbl, kind):
143 143 # Do not use the "kind" parameter in ui output.
144 144 # It makes strings difficult to translate.
145 145 if lbl in ['tip', '.', 'null']:
146 146 raise error.Abort(_("the name '%s' is reserved") % lbl)
147 147 for c in (':', '\0', '\n', '\r'):
148 148 if c in lbl:
149 149 raise error.Abort(_("%r cannot be used in a name") % c)
150 150 try:
151 151 int(lbl)
152 152 raise error.Abort(_("cannot use an integer as a name"))
153 153 except ValueError:
154 154 pass
155 155
156 156 def checkfilename(f):
157 157 '''Check that the filename f is an acceptable filename for a tracked file'''
158 158 if '\r' in f or '\n' in f:
159 159 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
160 160
161 161 def checkportable(ui, f):
162 162 '''Check if filename f is portable and warn or abort depending on config'''
163 163 checkfilename(f)
164 164 abort, warn = checkportabilityalert(ui)
165 165 if abort or warn:
166 166 msg = util.checkwinfilename(f)
167 167 if msg:
168 168 msg = "%s: %r" % (msg, f)
169 169 if abort:
170 170 raise error.Abort(msg)
171 171 ui.warn(_("warning: %s\n") % msg)
172 172
173 173 def checkportabilityalert(ui):
174 174 '''check if the user's config requests nothing, a warning, or abort for
175 175 non-portable filenames'''
176 176 val = ui.config('ui', 'portablefilenames', 'warn')
177 177 lval = val.lower()
178 178 bval = util.parsebool(val)
179 179 abort = os.name == 'nt' or lval == 'abort'
180 180 warn = bval or lval == 'warn'
181 181 if bval is None and not (warn or abort or lval == 'ignore'):
182 182 raise error.ConfigError(
183 183 _("ui.portablefilenames value is invalid ('%s')") % val)
184 184 return abort, warn
185 185
186 186 class casecollisionauditor(object):
187 187 def __init__(self, ui, abort, dirstate):
188 188 self._ui = ui
189 189 self._abort = abort
190 190 allfiles = '\0'.join(dirstate._map)
191 191 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
192 192 self._dirstate = dirstate
193 193 # The purpose of _newfiles is so that we don't complain about
194 194 # case collisions if someone were to call this object with the
195 195 # same filename twice.
196 196 self._newfiles = set()
197 197
198 198 def __call__(self, f):
199 199 if f in self._newfiles:
200 200 return
201 201 fl = encoding.lower(f)
202 202 if fl in self._loweredfiles and f not in self._dirstate:
203 203 msg = _('possible case-folding collision for %s') % f
204 204 if self._abort:
205 205 raise error.Abort(msg)
206 206 self._ui.warn(_("warning: %s\n") % msg)
207 207 self._loweredfiles.add(fl)
208 208 self._newfiles.add(f)
209 209
210 210 def filteredhash(repo, maxrev):
211 211 """build hash of filtered revisions in the current repoview.
212 212
213 213 Multiple caches perform up-to-date validation by checking that the
214 214 tiprev and tipnode stored in the cache file match the current repository.
215 215 However, this is not sufficient for validating repoviews because the set
216 216 of revisions in the view may change without the repository tiprev and
217 217 tipnode changing.
218 218
219 219 This function hashes all the revs filtered from the view and returns
220 220 that SHA-1 digest.
221 221 """
222 222 cl = repo.changelog
223 223 if not cl.filteredrevs:
224 224 return None
225 225 key = None
226 226 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
227 227 if revs:
228 228 s = hashlib.sha1()
229 229 for rev in revs:
230 230 s.update('%s;' % rev)
231 231 key = s.digest()
232 232 return key
233 233
234 234 class abstractvfs(object):
235 235 """Abstract base class; cannot be instantiated"""
236 236
237 237 def __init__(self, *args, **kwargs):
238 238 '''Prevent instantiation; don't call this from subclasses.'''
239 239 raise NotImplementedError('attempted instantiating ' + str(type(self)))
240 240
241 241 def tryread(self, path):
242 242 '''gracefully return an empty string for missing files'''
243 243 try:
244 244 return self.read(path)
245 245 except IOError as inst:
246 246 if inst.errno != errno.ENOENT:
247 247 raise
248 248 return ""
249 249
250 250 def tryreadlines(self, path, mode='rb'):
251 251 '''gracefully return an empty array for missing files'''
252 252 try:
253 253 return self.readlines(path, mode=mode)
254 254 except IOError as inst:
255 255 if inst.errno != errno.ENOENT:
256 256 raise
257 257 return []
258 258
259 259 @util.propertycache
260 260 def open(self):
261 261 '''Open ``path`` file, which is relative to vfs root.
262 262
263 263 Newly created directories are marked as "not to be indexed by
264 264 the content indexing service", if ``notindexed`` is specified
265 265 for "write" mode access.
266 266 '''
267 267 return self.__call__
268 268
269 269 def read(self, path):
270 270 with self(path, 'rb') as fp:
271 271 return fp.read()
272 272
273 273 def readlines(self, path, mode='rb'):
274 274 with self(path, mode=mode) as fp:
275 275 return fp.readlines()
276 276
277 277 def write(self, path, data, backgroundclose=False):
278 278 with self(path, 'wb', backgroundclose=backgroundclose) as fp:
279 279 return fp.write(data)
280 280
281 281 def writelines(self, path, data, mode='wb', notindexed=False):
282 282 with self(path, mode=mode, notindexed=notindexed) as fp:
283 283 return fp.writelines(data)
284 284
285 285 def append(self, path, data):
286 286 with self(path, 'ab') as fp:
287 287 return fp.write(data)
288 288
289 289 def basename(self, path):
290 290 """return base element of a path (as os.path.basename would do)
291 291
292 292 This exists to allow handling of strange encoding if needed."""
293 293 return os.path.basename(path)
294 294
295 295 def chmod(self, path, mode):
296 296 return os.chmod(self.join(path), mode)
297 297
298 298 def dirname(self, path):
299 299 """return dirname element of a path (as os.path.dirname would do)
300 300
301 301 This exists to allow handling of strange encoding if needed."""
302 302 return os.path.dirname(path)
303 303
304 304 def exists(self, path=None):
305 305 return os.path.exists(self.join(path))
306 306
307 307 def fstat(self, fp):
308 308 return util.fstat(fp)
309 309
310 310 def isdir(self, path=None):
311 311 return os.path.isdir(self.join(path))
312 312
313 313 def isfile(self, path=None):
314 314 return os.path.isfile(self.join(path))
315 315
316 316 def islink(self, path=None):
317 317 return os.path.islink(self.join(path))
318 318
319 319 def isfileorlink(self, path=None):
320 320 '''return whether path is a regular file or a symlink
321 321
322 322 Unlike isfile, this doesn't follow symlinks.'''
323 323 try:
324 324 st = self.lstat(path)
325 325 except OSError:
326 326 return False
327 327 mode = st.st_mode
328 328 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
329 329
330 330 def reljoin(self, *paths):
331 331 """join various elements of a path together (as os.path.join would do)
332 332
333 333 The vfs base is not injected so that path stay relative. This exists
334 334 to allow handling of strange encoding if needed."""
335 335 return os.path.join(*paths)
336 336
337 337 def split(self, path):
338 338 """split top-most element of a path (as os.path.split would do)
339 339
340 340 This exists to allow handling of strange encoding if needed."""
341 341 return os.path.split(path)
342 342
343 343 def lexists(self, path=None):
344 344 return os.path.lexists(self.join(path))
345 345
346 346 def lstat(self, path=None):
347 347 return os.lstat(self.join(path))
348 348
349 349 def listdir(self, path=None):
350 350 return os.listdir(self.join(path))
351 351
352 352 def makedir(self, path=None, notindexed=True):
353 353 return util.makedir(self.join(path), notindexed)
354 354
355 355 def makedirs(self, path=None, mode=None):
356 356 return util.makedirs(self.join(path), mode)
357 357
358 358 def makelock(self, info, path):
359 359 return util.makelock(info, self.join(path))
360 360
361 361 def mkdir(self, path=None):
362 362 return os.mkdir(self.join(path))
363 363
364 364 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
365 365 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
366 366 dir=self.join(dir), text=text)
367 367 dname, fname = util.split(name)
368 368 if dir:
369 369 return fd, os.path.join(dir, fname)
370 370 else:
371 371 return fd, fname
372 372
373 373 def readdir(self, path=None, stat=None, skip=None):
374 374 return osutil.listdir(self.join(path), stat, skip)
375 375
376 376 def readlock(self, path):
377 377 return util.readlock(self.join(path))
378 378
379 379 def rename(self, src, dst, checkambig=False):
380 380 """Rename from src to dst
381 381
382 382 checkambig argument is used with util.filestat, and is useful
383 383 only if destination file is guarded by any lock
384 384 (e.g. repo.lock or repo.wlock).
385 385 """
386 386 dstpath = self.join(dst)
387 387 oldstat = checkambig and util.filestat(dstpath)
388 388 if oldstat and oldstat.stat:
389 389 ret = util.rename(self.join(src), dstpath)
390 390 newstat = util.filestat(dstpath)
391 391 if newstat.isambig(oldstat):
392 392 # stat of renamed file is ambiguous to original one
393 393 newstat.avoidambig(dstpath, oldstat)
394 394 return ret
395 395 return util.rename(self.join(src), dstpath)
396 396
397 397 def readlink(self, path):
398 398 return os.readlink(self.join(path))
399 399
400 400 def removedirs(self, path=None):
401 401 """Remove a leaf directory and all empty intermediate ones
402 402 """
403 403 return util.removedirs(self.join(path))
404 404
405 405 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
406 406 """Remove a directory tree recursively
407 407
408 408 If ``forcibly``, this tries to remove READ-ONLY files, too.
409 409 """
410 410 if forcibly:
411 411 def onerror(function, path, excinfo):
412 412 if function is not os.remove:
413 413 raise
414 414 # read-only files cannot be unlinked under Windows
415 415 s = os.stat(path)
416 416 if (s.st_mode & stat.S_IWRITE) != 0:
417 417 raise
418 418 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
419 419 os.remove(path)
420 420 else:
421 421 onerror = None
422 422 return shutil.rmtree(self.join(path),
423 423 ignore_errors=ignore_errors, onerror=onerror)
424 424
425 425 def setflags(self, path, l, x):
426 426 return util.setflags(self.join(path), l, x)
427 427
428 428 def stat(self, path=None):
429 429 return os.stat(self.join(path))
430 430
431 431 def unlink(self, path=None):
432 432 return util.unlink(self.join(path))
433 433
434 434 def unlinkpath(self, path=None, ignoremissing=False):
435 435 return util.unlinkpath(self.join(path), ignoremissing)
436 436
437 437 def utime(self, path=None, t=None):
438 438 return os.utime(self.join(path), t)
439 439
440 440 def walk(self, path=None, onerror=None):
441 441 """Yield (dirpath, dirs, files) tuple for each directories under path
442 442
443 443 ``dirpath`` is relative one from the root of this vfs. This
444 444 uses ``os.sep`` as path separator, even you specify POSIX
445 445 style ``path``.
446 446
447 447 "The root of this vfs" is represented as empty ``dirpath``.
448 448 """
449 449 root = os.path.normpath(self.join(None))
450 450 # when dirpath == root, dirpath[prefixlen:] becomes empty
451 451 # because len(dirpath) < prefixlen.
452 452 prefixlen = len(pathutil.normasprefix(root))
453 453 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
454 454 yield (dirpath[prefixlen:], dirs, files)
455 455
456 456 @contextlib.contextmanager
457 457 def backgroundclosing(self, ui, expectedcount=-1):
458 458 """Allow files to be closed asynchronously.
459 459
460 460 When this context manager is active, ``backgroundclose`` can be passed
461 461 to ``__call__``/``open`` to result in the file possibly being closed
462 462 asynchronously, on a background thread.
463 463 """
464 464 # This is an arbitrary restriction and could be changed if we ever
465 465 # have a use case.
466 466 vfs = getattr(self, 'vfs', self)
467 467 if getattr(vfs, '_backgroundfilecloser', None):
468 468 raise error.Abort(
469 469 _('can only have 1 active background file closer'))
470 470
471 471 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
472 472 try:
473 473 vfs._backgroundfilecloser = bfc
474 474 yield bfc
475 475 finally:
476 476 vfs._backgroundfilecloser = None
477 477
478 478 class vfs(abstractvfs):
479 479 '''Operate files relative to a base directory
480 480
481 481 This class is used to hide the details of COW semantics and
482 482 remote file access from higher level code.
483 483 '''
484 484 def __init__(self, base, audit=True, expandpath=False, realpath=False):
485 485 if expandpath:
486 486 base = util.expandpath(base)
487 487 if realpath:
488 488 base = os.path.realpath(base)
489 489 self.base = base
490 490 self.mustaudit = audit
491 491 self.createmode = None
492 492 self._trustnlink = None
493 493
494 494 @property
495 495 def mustaudit(self):
496 496 return self._audit
497 497
498 498 @mustaudit.setter
499 499 def mustaudit(self, onoff):
500 500 self._audit = onoff
501 501 if onoff:
502 502 self.audit = pathutil.pathauditor(self.base)
503 503 else:
504 504 self.audit = util.always
505 505
506 506 @util.propertycache
507 507 def _cansymlink(self):
508 508 return util.checklink(self.base)
509 509
510 510 @util.propertycache
511 511 def _chmod(self):
512 512 return util.checkexec(self.base)
513 513
514 514 def _fixfilemode(self, name):
515 515 if self.createmode is None or not self._chmod:
516 516 return
517 517 os.chmod(name, self.createmode & 0o666)
518 518
519 519 def __call__(self, path, mode="r", text=False, atomictemp=False,
520 520 notindexed=False, backgroundclose=False, checkambig=False):
521 521 '''Open ``path`` file, which is relative to vfs root.
522 522
523 523 Newly created directories are marked as "not to be indexed by
524 524 the content indexing service", if ``notindexed`` is specified
525 525 for "write" mode access.
526 526
527 527 If ``backgroundclose`` is passed, the file may be closed asynchronously.
528 528 It can only be used if the ``self.backgroundclosing()`` context manager
529 529 is active. This should only be specified if the following criteria hold:
530 530
531 531 1. There is a potential for writing thousands of files. Unless you
532 532 are writing thousands of files, the performance benefits of
533 533 asynchronously closing files is not realized.
534 534 2. Files are opened exactly once for the ``backgroundclosing``
535 535 active duration and are therefore free of race conditions between
536 536 closing a file on a background thread and reopening it. (If the
537 537 file were opened multiple times, there could be unflushed data
538 538 because the original file handle hasn't been flushed/closed yet.)
539 539
540 540 ``checkambig`` argument is passed to atomictemplfile (valid
541 541 only for writing), and is useful only if target file is
542 542 guarded by any lock (e.g. repo.lock or repo.wlock).
543 543 '''
544 544 if self._audit:
545 545 r = util.checkosfilename(path)
546 546 if r:
547 547 raise error.Abort("%s: %r" % (r, path))
548 548 self.audit(path)
549 549 f = self.join(path)
550 550
551 551 if not text and "b" not in mode:
552 552 mode += "b" # for that other OS
553 553
554 554 nlink = -1
555 555 if mode not in ('r', 'rb'):
556 556 dirname, basename = util.split(f)
557 557 # If basename is empty, then the path is malformed because it points
558 558 # to a directory. Let the posixfile() call below raise IOError.
559 559 if basename:
560 560 if atomictemp:
561 561 util.makedirs(dirname, self.createmode, notindexed)
562 562 return util.atomictempfile(f, mode, self.createmode,
563 563 checkambig=checkambig)
564 564 try:
565 565 if 'w' in mode:
566 566 util.unlink(f)
567 567 nlink = 0
568 568 else:
569 569 # nlinks() may behave differently for files on Windows
570 570 # shares if the file is open.
571 571 with util.posixfile(f):
572 572 nlink = util.nlinks(f)
573 573 if nlink < 1:
574 574 nlink = 2 # force mktempcopy (issue1922)
575 575 except (OSError, IOError) as e:
576 576 if e.errno != errno.ENOENT:
577 577 raise
578 578 nlink = 0
579 579 util.makedirs(dirname, self.createmode, notindexed)
580 580 if nlink > 0:
581 581 if self._trustnlink is None:
582 582 self._trustnlink = nlink > 1 or util.checknlink(f)
583 583 if nlink > 1 or not self._trustnlink:
584 584 util.rename(util.mktempcopy(f), f)
585 585 fp = util.posixfile(f, mode)
586 586 if nlink == 0:
587 587 self._fixfilemode(f)
588 588
589 589 if checkambig:
590 590 if mode in ('r', 'rb'):
591 591 raise error.Abort(_('implementation error: mode %s is not'
592 592 ' valid for checkambig=True') % mode)
593 593 fp = checkambigatclosing(fp)
594 594
595 595 if backgroundclose:
596 596 if not self._backgroundfilecloser:
597 597 raise error.Abort(_('backgroundclose can only be used when a '
598 598 'backgroundclosing context manager is active')
599 599 )
600 600
601 601 fp = delayclosedfile(fp, self._backgroundfilecloser)
602 602
603 603 return fp
604 604
605 605 def symlink(self, src, dst):
606 606 self.audit(dst)
607 607 linkname = self.join(dst)
608 608 try:
609 609 os.unlink(linkname)
610 610 except OSError:
611 611 pass
612 612
613 613 util.makedirs(os.path.dirname(linkname), self.createmode)
614 614
615 615 if self._cansymlink:
616 616 try:
617 617 os.symlink(src, linkname)
618 618 except OSError as err:
619 619 raise OSError(err.errno, _('could not symlink to %r: %s') %
620 620 (src, err.strerror), linkname)
621 621 else:
622 622 self.write(dst, src)
623 623
624 624 def join(self, path, *insidef):
625 625 if path:
626 626 return os.path.join(self.base, path, *insidef)
627 627 else:
628 628 return self.base
629 629
630 630 opener = vfs
631 631
632 632 class auditvfs(object):
633 633 def __init__(self, vfs):
634 634 self.vfs = vfs
635 635
636 636 @property
637 637 def mustaudit(self):
638 638 return self.vfs.mustaudit
639 639
640 640 @mustaudit.setter
641 641 def mustaudit(self, onoff):
642 642 self.vfs.mustaudit = onoff
643 643
644 644 @property
645 645 def options(self):
646 646 return self.vfs.options
647 647
648 648 @options.setter
649 649 def options(self, value):
650 650 self.vfs.options = value
651 651
652 652 class filtervfs(abstractvfs, auditvfs):
653 653 '''Wrapper vfs for filtering filenames with a function.'''
654 654
655 655 def __init__(self, vfs, filter):
656 656 auditvfs.__init__(self, vfs)
657 657 self._filter = filter
658 658
659 659 def __call__(self, path, *args, **kwargs):
660 660 return self.vfs(self._filter(path), *args, **kwargs)
661 661
662 662 def join(self, path, *insidef):
663 663 if path:
664 664 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
665 665 else:
666 666 return self.vfs.join(path)
667 667
668 668 filteropener = filtervfs
669 669
670 670 class readonlyvfs(abstractvfs, auditvfs):
671 671 '''Wrapper vfs preventing any writing.'''
672 672
673 673 def __init__(self, vfs):
674 674 auditvfs.__init__(self, vfs)
675 675
676 676 def __call__(self, path, mode='r', *args, **kw):
677 677 if mode not in ('r', 'rb'):
678 678 raise error.Abort(_('this vfs is read only'))
679 679 return self.vfs(path, mode, *args, **kw)
680 680
681 681 def join(self, path, *insidef):
682 682 return self.vfs.join(path, *insidef)
683 683
684 684 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
685 685 '''yield every hg repository under path, always recursively.
686 686 The recurse flag will only control recursion into repo working dirs'''
687 687 def errhandler(err):
688 688 if err.filename == path:
689 689 raise err
690 690 samestat = getattr(os.path, 'samestat', None)
691 691 if followsym and samestat is not None:
692 692 def adddir(dirlst, dirname):
693 693 match = False
694 694 dirstat = os.stat(dirname)
695 695 for lstdirstat in dirlst:
696 696 if samestat(dirstat, lstdirstat):
697 697 match = True
698 698 break
699 699 if not match:
700 700 dirlst.append(dirstat)
701 701 return not match
702 702 else:
703 703 followsym = False
704 704
705 705 if (seen_dirs is None) and followsym:
706 706 seen_dirs = []
707 707 adddir(seen_dirs, path)
708 708 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
709 709 dirs.sort()
710 710 if '.hg' in dirs:
711 711 yield root # found a repository
712 712 qroot = os.path.join(root, '.hg', 'patches')
713 713 if os.path.isdir(os.path.join(qroot, '.hg')):
714 714 yield qroot # we have a patch queue repo here
715 715 if recurse:
716 716 # avoid recursing inside the .hg directory
717 717 dirs.remove('.hg')
718 718 else:
719 719 dirs[:] = [] # don't descend further
720 720 elif followsym:
721 721 newdirs = []
722 722 for d in dirs:
723 723 fname = os.path.join(root, d)
724 724 if adddir(seen_dirs, fname):
725 725 if os.path.islink(fname):
726 726 for hgname in walkrepos(fname, True, seen_dirs):
727 727 yield hgname
728 728 else:
729 729 newdirs.append(d)
730 730 dirs[:] = newdirs
731 731
732 732 def osrcpath():
733 733 '''return default os-specific hgrc search path'''
734 734 path = []
735 735 defaultpath = os.path.join(util.datapath, 'default.d')
736 736 if os.path.isdir(defaultpath):
737 737 for f, kind in osutil.listdir(defaultpath):
738 738 if f.endswith('.rc'):
739 739 path.append(os.path.join(defaultpath, f))
740 740 path.extend(systemrcpath())
741 741 path.extend(userrcpath())
742 742 path = [os.path.normpath(f) for f in path]
743 743 return path
744 744
745 745 _rcpath = None
746 746
747 747 def rcpath():
748 748 '''return hgrc search path. if env var HGRCPATH is set, use it.
749 749 for each item in path, if directory, use files ending in .rc,
750 750 else use item.
751 751 make HGRCPATH empty to only look in .hg/hgrc of current repo.
752 752 if no HGRCPATH, use default os-specific path.'''
753 753 global _rcpath
754 754 if _rcpath is None:
755 755 if 'HGRCPATH' in encoding.environ:
756 756 _rcpath = []
757 757 for p in os.environ['HGRCPATH'].split(os.pathsep):
758 758 if not p:
759 759 continue
760 760 p = util.expandpath(p)
761 761 if os.path.isdir(p):
762 762 for f, kind in osutil.listdir(p):
763 763 if f.endswith('.rc'):
764 764 _rcpath.append(os.path.join(p, f))
765 765 else:
766 766 _rcpath.append(p)
767 767 else:
768 768 _rcpath = osrcpath()
769 769 return _rcpath
770 770
771 771 def intrev(rev):
772 772 """Return integer for a given revision that can be used in comparison or
773 773 arithmetic operation"""
774 774 if rev is None:
775 775 return wdirrev
776 776 return rev
777 777
778 778 def revsingle(repo, revspec, default='.'):
779 779 if not revspec and revspec != 0:
780 780 return repo[default]
781 781
782 782 l = revrange(repo, [revspec])
783 783 if not l:
784 784 raise error.Abort(_('empty revision set'))
785 785 return repo[l.last()]
786 786
787 787 def _pairspec(revspec):
788 788 tree = revset.parse(revspec)
789 789 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
790 790
791 791 def revpair(repo, revs):
792 792 if not revs:
793 793 return repo.dirstate.p1(), None
794 794
795 795 l = revrange(repo, revs)
796 796
797 797 if not l:
798 798 first = second = None
799 799 elif l.isascending():
800 800 first = l.min()
801 801 second = l.max()
802 802 elif l.isdescending():
803 803 first = l.max()
804 804 second = l.min()
805 805 else:
806 806 first = l.first()
807 807 second = l.last()
808 808
809 809 if first is None:
810 810 raise error.Abort(_('empty revision range'))
811 811 if (first == second and len(revs) >= 2
812 812 and not all(revrange(repo, [r]) for r in revs)):
813 813 raise error.Abort(_('empty revision on one side of range'))
814 814
815 815 # if top-level is range expression, the result must always be a pair
816 816 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
817 817 return repo.lookup(first), None
818 818
819 819 return repo.lookup(first), repo.lookup(second)
820 820
821 821 def revrange(repo, specs):
822 822 """Execute 1 to many revsets and return the union.
823 823
824 824 This is the preferred mechanism for executing revsets using user-specified
825 825 config options, such as revset aliases.
826 826
827 827 The revsets specified by ``specs`` will be executed via a chained ``OR``
828 828 expression. If ``specs`` is empty, an empty result is returned.
829 829
830 830 ``specs`` can contain integers, in which case they are assumed to be
831 831 revision numbers.
832 832
833 833 It is assumed the revsets are already formatted. If you have arguments
834 834 that need to be expanded in the revset, call ``revset.formatspec()``
835 835 and pass the result as an element of ``specs``.
836 836
837 837 Specifying a single revset is allowed.
838 838
839 839 Returns a ``revset.abstractsmartset`` which is a list-like interface over
840 840 integer revisions.
841 841 """
842 842 allspecs = []
843 843 for spec in specs:
844 844 if isinstance(spec, int):
845 845 spec = revset.formatspec('rev(%d)', spec)
846 846 allspecs.append(spec)
847 847 m = revset.matchany(repo.ui, allspecs, repo)
848 848 return m(repo)
849 849
850 850 def meaningfulparents(repo, ctx):
851 851 """Return list of meaningful (or all if debug) parentrevs for rev.
852 852
853 853 For merges (two non-nullrev revisions) both parents are meaningful.
854 854 Otherwise the first parent revision is considered meaningful if it
855 855 is not the preceding revision.
856 856 """
857 857 parents = ctx.parents()
858 858 if len(parents) > 1:
859 859 return parents
860 860 if repo.ui.debugflag:
861 861 return [parents[0], repo['null']]
862 862 if parents[0].rev() >= intrev(ctx.rev()) - 1:
863 863 return []
864 864 return parents
865 865
866 866 def expandpats(pats):
867 867 '''Expand bare globs when running on windows.
868 868 On posix we assume it already has already been done by sh.'''
869 869 if not util.expandglobs:
870 870 return list(pats)
871 871 ret = []
872 872 for kindpat in pats:
873 873 kind, pat = matchmod._patsplit(kindpat, None)
874 874 if kind is None:
875 875 try:
876 876 globbed = glob.glob(pat)
877 877 except re.error:
878 878 globbed = [pat]
879 879 if globbed:
880 880 ret.extend(globbed)
881 881 continue
882 882 ret.append(kindpat)
883 883 return ret
884 884
885 885 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
886 886 badfn=None):
887 887 '''Return a matcher and the patterns that were used.
888 888 The matcher will warn about bad matches, unless an alternate badfn callback
889 889 is provided.'''
890 890 if pats == ("",):
891 891 pats = []
892 892 if opts is None:
893 893 opts = {}
894 894 if not globbed and default == 'relpath':
895 895 pats = expandpats(pats or [])
896 896
897 897 def bad(f, msg):
898 898 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
899 899
900 900 if badfn is None:
901 901 badfn = bad
902 902
903 903 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
904 904 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
905 905
906 906 if m.always():
907 907 pats = []
908 908 return m, pats
909 909
910 910 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
911 911 badfn=None):
912 912 '''Return a matcher that will warn about bad matches.'''
913 913 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
914 914
915 915 def matchall(repo):
916 916 '''Return a matcher that will efficiently match everything.'''
917 917 return matchmod.always(repo.root, repo.getcwd())
918 918
919 919 def matchfiles(repo, files, badfn=None):
920 920 '''Return a matcher that will efficiently match exactly these files.'''
921 921 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
922 922
923 923 def origpath(ui, repo, filepath):
924 924 '''customize where .orig files are created
925 925
926 926 Fetch user defined path from config file: [ui] origbackuppath = <path>
927 927 Fall back to default (filepath) if not specified
928 928 '''
929 929 origbackuppath = ui.config('ui', 'origbackuppath', None)
930 930 if origbackuppath is None:
931 931 return filepath + ".orig"
932 932
933 933 filepathfromroot = os.path.relpath(filepath, start=repo.root)
934 934 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
935 935
936 936 origbackupdir = repo.vfs.dirname(fullorigpath)
937 937 if not repo.vfs.exists(origbackupdir):
938 938 ui.note(_('creating directory: %s\n') % origbackupdir)
939 939 util.makedirs(origbackupdir)
940 940
941 941 return fullorigpath + ".orig"
942 942
943 943 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
944 944 if opts is None:
945 945 opts = {}
946 946 m = matcher
947 947 if dry_run is None:
948 948 dry_run = opts.get('dry_run')
949 949 if similarity is None:
950 950 similarity = float(opts.get('similarity') or 0)
951 951
952 952 ret = 0
953 953 join = lambda f: os.path.join(prefix, f)
954 954
955 955 wctx = repo[None]
956 956 for subpath in sorted(wctx.substate):
957 957 submatch = matchmod.subdirmatcher(subpath, m)
958 958 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
959 959 sub = wctx.sub(subpath)
960 960 try:
961 961 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
962 962 ret = 1
963 963 except error.LookupError:
964 964 repo.ui.status(_("skipping missing subrepository: %s\n")
965 965 % join(subpath))
966 966
967 967 rejected = []
968 968 def badfn(f, msg):
969 969 if f in m.files():
970 970 m.bad(f, msg)
971 971 rejected.append(f)
972 972
973 973 badmatch = matchmod.badmatch(m, badfn)
974 974 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
975 975 badmatch)
976 976
977 977 unknownset = set(unknown + forgotten)
978 978 toprint = unknownset.copy()
979 979 toprint.update(deleted)
980 980 for abs in sorted(toprint):
981 981 if repo.ui.verbose or not m.exact(abs):
982 982 if abs in unknownset:
983 983 status = _('adding %s\n') % m.uipath(abs)
984 984 else:
985 985 status = _('removing %s\n') % m.uipath(abs)
986 986 repo.ui.status(status)
987 987
988 988 renames = _findrenames(repo, m, added + unknown, removed + deleted,
989 989 similarity)
990 990
991 991 if not dry_run:
992 992 _markchanges(repo, unknown + forgotten, deleted, renames)
993 993
994 994 for f in rejected:
995 995 if f in m.files():
996 996 return 1
997 997 return ret
998 998
999 999 def marktouched(repo, files, similarity=0.0):
1000 1000 '''Assert that files have somehow been operated upon. files are relative to
1001 1001 the repo root.'''
1002 1002 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1003 1003 rejected = []
1004 1004
1005 1005 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1006 1006
1007 1007 if repo.ui.verbose:
1008 1008 unknownset = set(unknown + forgotten)
1009 1009 toprint = unknownset.copy()
1010 1010 toprint.update(deleted)
1011 1011 for abs in sorted(toprint):
1012 1012 if abs in unknownset:
1013 1013 status = _('adding %s\n') % abs
1014 1014 else:
1015 1015 status = _('removing %s\n') % abs
1016 1016 repo.ui.status(status)
1017 1017
1018 1018 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1019 1019 similarity)
1020 1020
1021 1021 _markchanges(repo, unknown + forgotten, deleted, renames)
1022 1022
1023 1023 for f in rejected:
1024 1024 if f in m.files():
1025 1025 return 1
1026 1026 return 0
1027 1027
1028 1028 def _interestingfiles(repo, matcher):
1029 1029 '''Walk dirstate with matcher, looking for files that addremove would care
1030 1030 about.
1031 1031
1032 1032 This is different from dirstate.status because it doesn't care about
1033 1033 whether files are modified or clean.'''
1034 1034 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1035 1035 audit_path = pathutil.pathauditor(repo.root)
1036 1036
1037 1037 ctx = repo[None]
1038 1038 dirstate = repo.dirstate
1039 1039 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
1040 1040 full=False)
1041 1041 for abs, st in walkresults.iteritems():
1042 1042 dstate = dirstate[abs]
1043 1043 if dstate == '?' and audit_path.check(abs):
1044 1044 unknown.append(abs)
1045 1045 elif dstate != 'r' and not st:
1046 1046 deleted.append(abs)
1047 1047 elif dstate == 'r' and st:
1048 1048 forgotten.append(abs)
1049 1049 # for finding renames
1050 1050 elif dstate == 'r' and not st:
1051 1051 removed.append(abs)
1052 1052 elif dstate == 'a':
1053 1053 added.append(abs)
1054 1054
1055 1055 return added, unknown, deleted, removed, forgotten
1056 1056
1057 1057 def _findrenames(repo, matcher, added, removed, similarity):
1058 1058 '''Find renames from removed files to added ones.'''
1059 1059 renames = {}
1060 1060 if similarity > 0:
1061 1061 for old, new, score in similar.findrenames(repo, added, removed,
1062 1062 similarity):
1063 1063 if (repo.ui.verbose or not matcher.exact(old)
1064 1064 or not matcher.exact(new)):
1065 1065 repo.ui.status(_('recording removal of %s as rename to %s '
1066 1066 '(%d%% similar)\n') %
1067 1067 (matcher.rel(old), matcher.rel(new),
1068 1068 score * 100))
1069 1069 renames[new] = old
1070 1070 return renames
1071 1071
1072 1072 def _markchanges(repo, unknown, deleted, renames):
1073 1073 '''Marks the files in unknown as added, the files in deleted as removed,
1074 1074 and the files in renames as copied.'''
1075 1075 wctx = repo[None]
1076 1076 with repo.wlock():
1077 1077 wctx.forget(deleted)
1078 1078 wctx.add(unknown)
1079 1079 for new, old in renames.iteritems():
1080 1080 wctx.copy(old, new)
1081 1081
1082 1082 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1083 1083 """Update the dirstate to reflect the intent of copying src to dst. For
1084 1084 different reasons it might not end with dst being marked as copied from src.
1085 1085 """
1086 1086 origsrc = repo.dirstate.copied(src) or src
1087 1087 if dst == origsrc: # copying back a copy?
1088 1088 if repo.dirstate[dst] not in 'mn' and not dryrun:
1089 1089 repo.dirstate.normallookup(dst)
1090 1090 else:
1091 1091 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1092 1092 if not ui.quiet:
1093 1093 ui.warn(_("%s has not been committed yet, so no copy "
1094 1094 "data will be stored for %s.\n")
1095 1095 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1096 1096 if repo.dirstate[dst] in '?r' and not dryrun:
1097 1097 wctx.add([dst])
1098 1098 elif not dryrun:
1099 1099 wctx.copy(origsrc, dst)
1100 1100
1101 1101 def readrequires(opener, supported):
1102 1102 '''Reads and parses .hg/requires and checks if all entries found
1103 1103 are in the list of supported features.'''
1104 1104 requirements = set(opener.read("requires").splitlines())
1105 1105 missings = []
1106 1106 for r in requirements:
1107 1107 if r not in supported:
1108 1108 if not r or not r[0].isalnum():
1109 1109 raise error.RequirementError(_(".hg/requires file is corrupt"))
1110 1110 missings.append(r)
1111 1111 missings.sort()
1112 1112 if missings:
1113 1113 raise error.RequirementError(
1114 1114 _("repository requires features unknown to this Mercurial: %s")
1115 1115 % " ".join(missings),
1116 1116 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1117 1117 " for more information"))
1118 1118 return requirements
1119 1119
1120 1120 def writerequires(opener, requirements):
1121 1121 with opener('requires', 'w') as fp:
1122 1122 for r in sorted(requirements):
1123 1123 fp.write("%s\n" % r)
1124 1124
1125 1125 class filecachesubentry(object):
1126 1126 def __init__(self, path, stat):
1127 1127 self.path = path
1128 1128 self.cachestat = None
1129 1129 self._cacheable = None
1130 1130
1131 1131 if stat:
1132 1132 self.cachestat = filecachesubentry.stat(self.path)
1133 1133
1134 1134 if self.cachestat:
1135 1135 self._cacheable = self.cachestat.cacheable()
1136 1136 else:
1137 1137 # None means we don't know yet
1138 1138 self._cacheable = None
1139 1139
1140 1140 def refresh(self):
1141 1141 if self.cacheable():
1142 1142 self.cachestat = filecachesubentry.stat(self.path)
1143 1143
1144 1144 def cacheable(self):
1145 1145 if self._cacheable is not None:
1146 1146 return self._cacheable
1147 1147
1148 1148 # we don't know yet, assume it is for now
1149 1149 return True
1150 1150
1151 1151 def changed(self):
1152 1152 # no point in going further if we can't cache it
1153 1153 if not self.cacheable():
1154 1154 return True
1155 1155
1156 1156 newstat = filecachesubentry.stat(self.path)
1157 1157
1158 1158 # we may not know if it's cacheable yet, check again now
1159 1159 if newstat and self._cacheable is None:
1160 1160 self._cacheable = newstat.cacheable()
1161 1161
1162 1162 # check again
1163 1163 if not self._cacheable:
1164 1164 return True
1165 1165
1166 1166 if self.cachestat != newstat:
1167 1167 self.cachestat = newstat
1168 1168 return True
1169 1169 else:
1170 1170 return False
1171 1171
1172 1172 @staticmethod
1173 1173 def stat(path):
1174 1174 try:
1175 1175 return util.cachestat(path)
1176 1176 except OSError as e:
1177 1177 if e.errno != errno.ENOENT:
1178 1178 raise
1179 1179
1180 1180 class filecacheentry(object):
1181 1181 def __init__(self, paths, stat=True):
1182 1182 self._entries = []
1183 1183 for path in paths:
1184 1184 self._entries.append(filecachesubentry(path, stat))
1185 1185
1186 1186 def changed(self):
1187 1187 '''true if any entry has changed'''
1188 1188 for entry in self._entries:
1189 1189 if entry.changed():
1190 1190 return True
1191 1191 return False
1192 1192
1193 1193 def refresh(self):
1194 1194 for entry in self._entries:
1195 1195 entry.refresh()
1196 1196
1197 1197 class filecache(object):
1198 1198 '''A property like decorator that tracks files under .hg/ for updates.
1199 1199
1200 1200 Records stat info when called in _filecache.
1201 1201
1202 1202 On subsequent calls, compares old stat info with new info, and recreates the
1203 1203 object when any of the files changes, updating the new stat info in
1204 1204 _filecache.
1205 1205
1206 1206 Mercurial either atomic renames or appends for files under .hg,
1207 1207 so to ensure the cache is reliable we need the filesystem to be able
1208 1208 to tell us if a file has been replaced. If it can't, we fallback to
1209 1209 recreating the object on every call (essentially the same behavior as
1210 1210 propertycache).
1211 1211
1212 1212 '''
1213 1213 def __init__(self, *paths):
1214 1214 self.paths = paths
1215 1215
1216 1216 def join(self, obj, fname):
1217 1217 """Used to compute the runtime path of a cached file.
1218 1218
1219 1219 Users should subclass filecache and provide their own version of this
1220 1220 function to call the appropriate join function on 'obj' (an instance
1221 1221 of the class that its member function was decorated).
1222 1222 """
1223 1223 return obj.join(fname)
1224 1224
1225 1225 def __call__(self, func):
1226 1226 self.func = func
1227 1227 self.name = func.__name__
1228 1228 return self
1229 1229
1230 1230 def __get__(self, obj, type=None):
1231 1231 # if accessed on the class, return the descriptor itself.
1232 1232 if obj is None:
1233 1233 return self
1234 1234 # do we need to check if the file changed?
1235 1235 if self.name in obj.__dict__:
1236 1236 assert self.name in obj._filecache, self.name
1237 1237 return obj.__dict__[self.name]
1238 1238
1239 1239 entry = obj._filecache.get(self.name)
1240 1240
1241 1241 if entry:
1242 1242 if entry.changed():
1243 1243 entry.obj = self.func(obj)
1244 1244 else:
1245 1245 paths = [self.join(obj, path) for path in self.paths]
1246 1246
1247 1247 # We stat -before- creating the object so our cache doesn't lie if
1248 1248 # a writer modified between the time we read and stat
1249 1249 entry = filecacheentry(paths, True)
1250 1250 entry.obj = self.func(obj)
1251 1251
1252 1252 obj._filecache[self.name] = entry
1253 1253
1254 1254 obj.__dict__[self.name] = entry.obj
1255 1255 return entry.obj
1256 1256
1257 1257 def __set__(self, obj, value):
1258 1258 if self.name not in obj._filecache:
1259 1259 # we add an entry for the missing value because X in __dict__
1260 1260 # implies X in _filecache
1261 1261 paths = [self.join(obj, path) for path in self.paths]
1262 1262 ce = filecacheentry(paths, False)
1263 1263 obj._filecache[self.name] = ce
1264 1264 else:
1265 1265 ce = obj._filecache[self.name]
1266 1266
1267 1267 ce.obj = value # update cached copy
1268 1268 obj.__dict__[self.name] = value # update copy returned by obj.x
1269 1269
1270 1270 def __delete__(self, obj):
1271 1271 try:
1272 1272 del obj.__dict__[self.name]
1273 1273 except KeyError:
1274 1274 raise AttributeError(self.name)
1275 1275
1276 1276 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1277 1277 if lock is None:
1278 1278 raise error.LockInheritanceContractViolation(
1279 1279 'lock can only be inherited while held')
1280 1280 if environ is None:
1281 1281 environ = {}
1282 1282 with lock.inherit() as locker:
1283 1283 environ[envvar] = locker
1284 1284 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1285 1285
1286 1286 def wlocksub(repo, cmd, *args, **kwargs):
1287 1287 """run cmd as a subprocess that allows inheriting repo's wlock
1288 1288
1289 1289 This can only be called while the wlock is held. This takes all the
1290 1290 arguments that ui.system does, and returns the exit code of the
1291 1291 subprocess."""
1292 1292 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1293 1293 **kwargs)
1294 1294
1295 1295 def gdinitconfig(ui):
1296 1296 """helper function to know if a repo should be created as general delta
1297 1297 """
1298 1298 # experimental config: format.generaldelta
1299 1299 return (ui.configbool('format', 'generaldelta', False)
1300 1300 or ui.configbool('format', 'usegeneraldelta', True))
1301 1301
1302 1302 def gddeltaconfig(ui):
1303 1303 """helper function to know if incoming delta should be optimised
1304 1304 """
1305 1305 # experimental config: format.generaldelta
1306 1306 return ui.configbool('format', 'generaldelta', False)
1307 1307
1308 1308 class closewrapbase(object):
1309 1309 """Base class of wrapper, which hooks closing
1310 1310
1311 1311 Do not instantiate outside of the vfs layer.
1312 1312 """
1313 1313 def __init__(self, fh):
1314 1314 object.__setattr__(self, '_origfh', fh)
1315 1315
1316 1316 def __getattr__(self, attr):
1317 1317 return getattr(self._origfh, attr)
1318 1318
1319 1319 def __setattr__(self, attr, value):
1320 1320 return setattr(self._origfh, attr, value)
1321 1321
1322 1322 def __delattr__(self, attr):
1323 1323 return delattr(self._origfh, attr)
1324 1324
1325 1325 def __enter__(self):
1326 1326 return self._origfh.__enter__()
1327 1327
1328 1328 def __exit__(self, exc_type, exc_value, exc_tb):
1329 1329 raise NotImplementedError('attempted instantiating ' + str(type(self)))
1330 1330
1331 1331 def close(self):
1332 1332 raise NotImplementedError('attempted instantiating ' + str(type(self)))
1333 1333
1334 1334 class delayclosedfile(closewrapbase):
1335 1335 """Proxy for a file object whose close is delayed.
1336 1336
1337 1337 Do not instantiate outside of the vfs layer.
1338 1338 """
1339 1339 def __init__(self, fh, closer):
1340 1340 super(delayclosedfile, self).__init__(fh)
1341 1341 object.__setattr__(self, '_closer', closer)
1342 1342
1343 1343 def __exit__(self, exc_type, exc_value, exc_tb):
1344 1344 self._closer.close(self._origfh)
1345 1345
1346 1346 def close(self):
1347 1347 self._closer.close(self._origfh)
1348 1348
1349 1349 class backgroundfilecloser(object):
1350 1350 """Coordinates background closing of file handles on multiple threads."""
1351 1351 def __init__(self, ui, expectedcount=-1):
1352 1352 self._running = False
1353 1353 self._entered = False
1354 1354 self._threads = []
1355 1355 self._threadexception = None
1356 1356
1357 1357 # Only Windows/NTFS has slow file closing. So only enable by default
1358 1358 # on that platform. But allow to be enabled elsewhere for testing.
1359 1359 defaultenabled = os.name == 'nt'
1360 1360 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
1361 1361
1362 1362 if not enabled:
1363 1363 return
1364 1364
1365 1365 # There is overhead to starting and stopping the background threads.
1366 1366 # Don't do background processing unless the file count is large enough
1367 1367 # to justify it.
1368 1368 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
1369 1369 2048)
1370 1370 # FUTURE dynamically start background threads after minfilecount closes.
1371 1371 # (We don't currently have any callers that don't know their file count)
1372 1372 if expectedcount > 0 and expectedcount < minfilecount:
1373 1373 return
1374 1374
1375 1375 # Windows defaults to a limit of 512 open files. A buffer of 128
1376 1376 # should give us enough headway.
1377 1377 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
1378 1378 threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
1379 1379
1380 1380 ui.debug('starting %d threads for background file closing\n' %
1381 1381 threadcount)
1382 1382
1383 1383 self._queue = util.queue(maxsize=maxqueue)
1384 1384 self._running = True
1385 1385
1386 1386 for i in range(threadcount):
1387 1387 t = threading.Thread(target=self._worker, name='backgroundcloser')
1388 1388 self._threads.append(t)
1389 1389 t.start()
1390 1390
1391 1391 def __enter__(self):
1392 1392 self._entered = True
1393 1393 return self
1394 1394
1395 1395 def __exit__(self, exc_type, exc_value, exc_tb):
1396 1396 self._running = False
1397 1397
1398 1398 # Wait for threads to finish closing so open files don't linger for
1399 1399 # longer than lifetime of context manager.
1400 1400 for t in self._threads:
1401 1401 t.join()
1402 1402
1403 1403 def _worker(self):
1404 1404 """Main routine for worker thread."""
1405 1405 while True:
1406 1406 try:
1407 1407 fh = self._queue.get(block=True, timeout=0.100)
1408 1408 # Need to catch or the thread will terminate and
1409 1409 # we could orphan file descriptors.
1410 1410 try:
1411 1411 fh.close()
1412 1412 except Exception as e:
1413 1413 # Stash so can re-raise from main thread later.
1414 1414 self._threadexception = e
1415 1415 except util.empty:
1416 1416 if not self._running:
1417 1417 break
1418 1418
1419 1419 def close(self, fh):
1420 1420 """Schedule a file for closing."""
1421 1421 if not self._entered:
1422 1422 raise error.Abort(_('can only call close() when context manager '
1423 1423 'active'))
1424 1424
1425 1425 # If a background thread encountered an exception, raise now so we fail
1426 1426 # fast. Otherwise we may potentially go on for minutes until the error
1427 1427 # is acted on.
1428 1428 if self._threadexception:
1429 1429 e = self._threadexception
1430 1430 self._threadexception = None
1431 1431 raise e
1432 1432
1433 1433 # If we're not actively running, close synchronously.
1434 1434 if not self._running:
1435 1435 fh.close()
1436 1436 return
1437 1437
1438 1438 self._queue.put(fh, block=True, timeout=None)
1439 1439
1440 1440 class checkambigatclosing(closewrapbase):
1441 1441 """Proxy for a file object, to avoid ambiguity of file stat
1442 1442
1443 1443 See also util.filestat for detail about "ambiguity of file stat".
1444 1444
1445 1445 This proxy is useful only if the target file is guarded by any
1446 1446 lock (e.g. repo.lock or repo.wlock)
1447 1447
1448 1448 Do not instantiate outside of the vfs layer.
1449 1449 """
1450 1450 def __init__(self, fh):
1451 1451 super(checkambigatclosing, self).__init__(fh)
1452 1452 object.__setattr__(self, '_oldstat', util.filestat(fh.name))
1453 1453
1454 1454 def _checkambig(self):
1455 1455 oldstat = self._oldstat
1456 1456 if oldstat.stat:
1457 1457 newstat = util.filestat(self._origfh.name)
1458 1458 if newstat.isambig(oldstat):
1459 1459 # stat of changed file is ambiguous to original one
1460 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1461 os.utime(self._origfh.name, (advanced, advanced))
1460 newstat.avoidambig(self._origfh.name, oldstat)
1462 1461
1463 1462 def __exit__(self, exc_type, exc_value, exc_tb):
1464 1463 self._origfh.__exit__(exc_type, exc_value, exc_tb)
1465 1464 self._checkambig()
1466 1465
1467 1466 def close(self):
1468 1467 self._origfh.close()
1469 1468 self._checkambig()
General Comments 0
You need to be logged in to leave comments. Login now