##// END OF EJS Templates
py3: use encoding.environ instead of os.environ...
Pulkit Goyal -
r30109:96a2278e default
parent child Browse files
Show More
@@ -1,1470 +1,1470
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import contextlib
11 11 import errno
12 12 import glob
13 13 import hashlib
14 14 import os
15 15 import re
16 16 import shutil
17 17 import stat
18 18 import tempfile
19 19 import threading
20 20
21 21 from .i18n import _
22 22 from .node import wdirrev
23 23 from . import (
24 24 encoding,
25 25 error,
26 26 match as matchmod,
27 27 osutil,
28 28 pathutil,
29 29 phases,
30 30 revset,
31 31 similar,
32 32 util,
33 33 )
34 34
35 35 if os.name == 'nt':
36 36 from . import scmwindows as scmplatform
37 37 else:
38 38 from . import scmposix as scmplatform
39 39
40 40 systemrcpath = scmplatform.systemrcpath
41 41 userrcpath = scmplatform.userrcpath
42 42
43 43 class status(tuple):
44 44 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
45 45 and 'ignored' properties are only relevant to the working copy.
46 46 '''
47 47
48 48 __slots__ = ()
49 49
50 50 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
51 51 clean):
52 52 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
53 53 ignored, clean))
54 54
55 55 @property
56 56 def modified(self):
57 57 '''files that have been modified'''
58 58 return self[0]
59 59
60 60 @property
61 61 def added(self):
62 62 '''files that have been added'''
63 63 return self[1]
64 64
65 65 @property
66 66 def removed(self):
67 67 '''files that have been removed'''
68 68 return self[2]
69 69
70 70 @property
71 71 def deleted(self):
72 72 '''files that are in the dirstate, but have been deleted from the
73 73 working copy (aka "missing")
74 74 '''
75 75 return self[3]
76 76
77 77 @property
78 78 def unknown(self):
79 79 '''files not in the dirstate that are not ignored'''
80 80 return self[4]
81 81
82 82 @property
83 83 def ignored(self):
84 84 '''files not in the dirstate that are ignored (by _dirignore())'''
85 85 return self[5]
86 86
87 87 @property
88 88 def clean(self):
89 89 '''files that have not been modified'''
90 90 return self[6]
91 91
92 92 def __repr__(self, *args, **kwargs):
93 93 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
94 94 'unknown=%r, ignored=%r, clean=%r>') % self)
95 95
96 96 def itersubrepos(ctx1, ctx2):
97 97 """find subrepos in ctx1 or ctx2"""
98 98 # Create a (subpath, ctx) mapping where we prefer subpaths from
99 99 # ctx1. The subpaths from ctx2 are important when the .hgsub file
100 100 # has been modified (in ctx2) but not yet committed (in ctx1).
101 101 subpaths = dict.fromkeys(ctx2.substate, ctx2)
102 102 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
103 103
104 104 missing = set()
105 105
106 106 for subpath in ctx2.substate:
107 107 if subpath not in ctx1.substate:
108 108 del subpaths[subpath]
109 109 missing.add(subpath)
110 110
111 111 for subpath, ctx in sorted(subpaths.iteritems()):
112 112 yield subpath, ctx.sub(subpath)
113 113
114 114 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
115 115 # status and diff will have an accurate result when it does
116 116 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
117 117 # against itself.
118 118 for subpath in missing:
119 119 yield subpath, ctx2.nullsub(subpath, ctx1)
120 120
121 121 def nochangesfound(ui, repo, excluded=None):
122 122 '''Report no changes for push/pull, excluded is None or a list of
123 123 nodes excluded from the push/pull.
124 124 '''
125 125 secretlist = []
126 126 if excluded:
127 127 for n in excluded:
128 128 if n not in repo:
129 129 # discovery should not have included the filtered revision,
130 130 # we have to explicitly exclude it until discovery is cleanup.
131 131 continue
132 132 ctx = repo[n]
133 133 if ctx.phase() >= phases.secret and not ctx.extinct():
134 134 secretlist.append(n)
135 135
136 136 if secretlist:
137 137 ui.status(_("no changes found (ignored %d secret changesets)\n")
138 138 % len(secretlist))
139 139 else:
140 140 ui.status(_("no changes found\n"))
141 141
142 142 def checknewlabel(repo, lbl, kind):
143 143 # Do not use the "kind" parameter in ui output.
144 144 # It makes strings difficult to translate.
145 145 if lbl in ['tip', '.', 'null']:
146 146 raise error.Abort(_("the name '%s' is reserved") % lbl)
147 147 for c in (':', '\0', '\n', '\r'):
148 148 if c in lbl:
149 149 raise error.Abort(_("%r cannot be used in a name") % c)
150 150 try:
151 151 int(lbl)
152 152 raise error.Abort(_("cannot use an integer as a name"))
153 153 except ValueError:
154 154 pass
155 155
156 156 def checkfilename(f):
157 157 '''Check that the filename f is an acceptable filename for a tracked file'''
158 158 if '\r' in f or '\n' in f:
159 159 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
160 160
161 161 def checkportable(ui, f):
162 162 '''Check if filename f is portable and warn or abort depending on config'''
163 163 checkfilename(f)
164 164 abort, warn = checkportabilityalert(ui)
165 165 if abort or warn:
166 166 msg = util.checkwinfilename(f)
167 167 if msg:
168 168 msg = "%s: %r" % (msg, f)
169 169 if abort:
170 170 raise error.Abort(msg)
171 171 ui.warn(_("warning: %s\n") % msg)
172 172
173 173 def checkportabilityalert(ui):
174 174 '''check if the user's config requests nothing, a warning, or abort for
175 175 non-portable filenames'''
176 176 val = ui.config('ui', 'portablefilenames', 'warn')
177 177 lval = val.lower()
178 178 bval = util.parsebool(val)
179 179 abort = os.name == 'nt' or lval == 'abort'
180 180 warn = bval or lval == 'warn'
181 181 if bval is None and not (warn or abort or lval == 'ignore'):
182 182 raise error.ConfigError(
183 183 _("ui.portablefilenames value is invalid ('%s')") % val)
184 184 return abort, warn
185 185
186 186 class casecollisionauditor(object):
187 187 def __init__(self, ui, abort, dirstate):
188 188 self._ui = ui
189 189 self._abort = abort
190 190 allfiles = '\0'.join(dirstate._map)
191 191 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
192 192 self._dirstate = dirstate
193 193 # The purpose of _newfiles is so that we don't complain about
194 194 # case collisions if someone were to call this object with the
195 195 # same filename twice.
196 196 self._newfiles = set()
197 197
198 198 def __call__(self, f):
199 199 if f in self._newfiles:
200 200 return
201 201 fl = encoding.lower(f)
202 202 if fl in self._loweredfiles and f not in self._dirstate:
203 203 msg = _('possible case-folding collision for %s') % f
204 204 if self._abort:
205 205 raise error.Abort(msg)
206 206 self._ui.warn(_("warning: %s\n") % msg)
207 207 self._loweredfiles.add(fl)
208 208 self._newfiles.add(f)
209 209
210 210 def filteredhash(repo, maxrev):
211 211 """build hash of filtered revisions in the current repoview.
212 212
213 213 Multiple caches perform up-to-date validation by checking that the
214 214 tiprev and tipnode stored in the cache file match the current repository.
215 215 However, this is not sufficient for validating repoviews because the set
216 216 of revisions in the view may change without the repository tiprev and
217 217 tipnode changing.
218 218
219 219 This function hashes all the revs filtered from the view and returns
220 220 that SHA-1 digest.
221 221 """
222 222 cl = repo.changelog
223 223 if not cl.filteredrevs:
224 224 return None
225 225 key = None
226 226 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
227 227 if revs:
228 228 s = hashlib.sha1()
229 229 for rev in revs:
230 230 s.update('%s;' % rev)
231 231 key = s.digest()
232 232 return key
233 233
234 234 class abstractvfs(object):
235 235 """Abstract base class; cannot be instantiated"""
236 236
237 237 def __init__(self, *args, **kwargs):
238 238 '''Prevent instantiation; don't call this from subclasses.'''
239 239 raise NotImplementedError('attempted instantiating ' + str(type(self)))
240 240
241 241 def tryread(self, path):
242 242 '''gracefully return an empty string for missing files'''
243 243 try:
244 244 return self.read(path)
245 245 except IOError as inst:
246 246 if inst.errno != errno.ENOENT:
247 247 raise
248 248 return ""
249 249
250 250 def tryreadlines(self, path, mode='rb'):
251 251 '''gracefully return an empty array for missing files'''
252 252 try:
253 253 return self.readlines(path, mode=mode)
254 254 except IOError as inst:
255 255 if inst.errno != errno.ENOENT:
256 256 raise
257 257 return []
258 258
259 259 @util.propertycache
260 260 def open(self):
261 261 '''Open ``path`` file, which is relative to vfs root.
262 262
263 263 Newly created directories are marked as "not to be indexed by
264 264 the content indexing service", if ``notindexed`` is specified
265 265 for "write" mode access.
266 266 '''
267 267 return self.__call__
268 268
269 269 def read(self, path):
270 270 with self(path, 'rb') as fp:
271 271 return fp.read()
272 272
273 273 def readlines(self, path, mode='rb'):
274 274 with self(path, mode=mode) as fp:
275 275 return fp.readlines()
276 276
277 277 def write(self, path, data, backgroundclose=False):
278 278 with self(path, 'wb', backgroundclose=backgroundclose) as fp:
279 279 return fp.write(data)
280 280
281 281 def writelines(self, path, data, mode='wb', notindexed=False):
282 282 with self(path, mode=mode, notindexed=notindexed) as fp:
283 283 return fp.writelines(data)
284 284
285 285 def append(self, path, data):
286 286 with self(path, 'ab') as fp:
287 287 return fp.write(data)
288 288
289 289 def basename(self, path):
290 290 """return base element of a path (as os.path.basename would do)
291 291
292 292 This exists to allow handling of strange encoding if needed."""
293 293 return os.path.basename(path)
294 294
295 295 def chmod(self, path, mode):
296 296 return os.chmod(self.join(path), mode)
297 297
298 298 def dirname(self, path):
299 299 """return dirname element of a path (as os.path.dirname would do)
300 300
301 301 This exists to allow handling of strange encoding if needed."""
302 302 return os.path.dirname(path)
303 303
304 304 def exists(self, path=None):
305 305 return os.path.exists(self.join(path))
306 306
307 307 def fstat(self, fp):
308 308 return util.fstat(fp)
309 309
310 310 def isdir(self, path=None):
311 311 return os.path.isdir(self.join(path))
312 312
313 313 def isfile(self, path=None):
314 314 return os.path.isfile(self.join(path))
315 315
316 316 def islink(self, path=None):
317 317 return os.path.islink(self.join(path))
318 318
319 319 def isfileorlink(self, path=None):
320 320 '''return whether path is a regular file or a symlink
321 321
322 322 Unlike isfile, this doesn't follow symlinks.'''
323 323 try:
324 324 st = self.lstat(path)
325 325 except OSError:
326 326 return False
327 327 mode = st.st_mode
328 328 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
329 329
330 330 def reljoin(self, *paths):
331 331 """join various elements of a path together (as os.path.join would do)
332 332
333 333 The vfs base is not injected so that path stay relative. This exists
334 334 to allow handling of strange encoding if needed."""
335 335 return os.path.join(*paths)
336 336
337 337 def split(self, path):
338 338 """split top-most element of a path (as os.path.split would do)
339 339
340 340 This exists to allow handling of strange encoding if needed."""
341 341 return os.path.split(path)
342 342
343 343 def lexists(self, path=None):
344 344 return os.path.lexists(self.join(path))
345 345
346 346 def lstat(self, path=None):
347 347 return os.lstat(self.join(path))
348 348
349 349 def listdir(self, path=None):
350 350 return os.listdir(self.join(path))
351 351
352 352 def makedir(self, path=None, notindexed=True):
353 353 return util.makedir(self.join(path), notindexed)
354 354
355 355 def makedirs(self, path=None, mode=None):
356 356 return util.makedirs(self.join(path), mode)
357 357
358 358 def makelock(self, info, path):
359 359 return util.makelock(info, self.join(path))
360 360
361 361 def mkdir(self, path=None):
362 362 return os.mkdir(self.join(path))
363 363
364 364 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
365 365 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
366 366 dir=self.join(dir), text=text)
367 367 dname, fname = util.split(name)
368 368 if dir:
369 369 return fd, os.path.join(dir, fname)
370 370 else:
371 371 return fd, fname
372 372
373 373 def readdir(self, path=None, stat=None, skip=None):
374 374 return osutil.listdir(self.join(path), stat, skip)
375 375
376 376 def readlock(self, path):
377 377 return util.readlock(self.join(path))
378 378
379 379 def rename(self, src, dst, checkambig=False):
380 380 """Rename from src to dst
381 381
382 382 checkambig argument is used with util.filestat, and is useful
383 383 only if destination file is guarded by any lock
384 384 (e.g. repo.lock or repo.wlock).
385 385 """
386 386 dstpath = self.join(dst)
387 387 oldstat = checkambig and util.filestat(dstpath)
388 388 if oldstat and oldstat.stat:
389 389 ret = util.rename(self.join(src), dstpath)
390 390 newstat = util.filestat(dstpath)
391 391 if newstat.isambig(oldstat):
392 392 # stat of renamed file is ambiguous to original one
393 393 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
394 394 os.utime(dstpath, (advanced, advanced))
395 395 return ret
396 396 return util.rename(self.join(src), dstpath)
397 397
398 398 def readlink(self, path):
399 399 return os.readlink(self.join(path))
400 400
401 401 def removedirs(self, path=None):
402 402 """Remove a leaf directory and all empty intermediate ones
403 403 """
404 404 return util.removedirs(self.join(path))
405 405
406 406 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
407 407 """Remove a directory tree recursively
408 408
409 409 If ``forcibly``, this tries to remove READ-ONLY files, too.
410 410 """
411 411 if forcibly:
412 412 def onerror(function, path, excinfo):
413 413 if function is not os.remove:
414 414 raise
415 415 # read-only files cannot be unlinked under Windows
416 416 s = os.stat(path)
417 417 if (s.st_mode & stat.S_IWRITE) != 0:
418 418 raise
419 419 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
420 420 os.remove(path)
421 421 else:
422 422 onerror = None
423 423 return shutil.rmtree(self.join(path),
424 424 ignore_errors=ignore_errors, onerror=onerror)
425 425
426 426 def setflags(self, path, l, x):
427 427 return util.setflags(self.join(path), l, x)
428 428
429 429 def stat(self, path=None):
430 430 return os.stat(self.join(path))
431 431
432 432 def unlink(self, path=None):
433 433 return util.unlink(self.join(path))
434 434
435 435 def unlinkpath(self, path=None, ignoremissing=False):
436 436 return util.unlinkpath(self.join(path), ignoremissing)
437 437
438 438 def utime(self, path=None, t=None):
439 439 return os.utime(self.join(path), t)
440 440
441 441 def walk(self, path=None, onerror=None):
442 442 """Yield (dirpath, dirs, files) tuple for each directories under path
443 443
444 444 ``dirpath`` is relative one from the root of this vfs. This
445 445 uses ``os.sep`` as path separator, even you specify POSIX
446 446 style ``path``.
447 447
448 448 "The root of this vfs" is represented as empty ``dirpath``.
449 449 """
450 450 root = os.path.normpath(self.join(None))
451 451 # when dirpath == root, dirpath[prefixlen:] becomes empty
452 452 # because len(dirpath) < prefixlen.
453 453 prefixlen = len(pathutil.normasprefix(root))
454 454 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
455 455 yield (dirpath[prefixlen:], dirs, files)
456 456
457 457 @contextlib.contextmanager
458 458 def backgroundclosing(self, ui, expectedcount=-1):
459 459 """Allow files to be closed asynchronously.
460 460
461 461 When this context manager is active, ``backgroundclose`` can be passed
462 462 to ``__call__``/``open`` to result in the file possibly being closed
463 463 asynchronously, on a background thread.
464 464 """
465 465 # This is an arbitrary restriction and could be changed if we ever
466 466 # have a use case.
467 467 vfs = getattr(self, 'vfs', self)
468 468 if getattr(vfs, '_backgroundfilecloser', None):
469 469 raise error.Abort(
470 470 _('can only have 1 active background file closer'))
471 471
472 472 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
473 473 try:
474 474 vfs._backgroundfilecloser = bfc
475 475 yield bfc
476 476 finally:
477 477 vfs._backgroundfilecloser = None
478 478
479 479 class vfs(abstractvfs):
480 480 '''Operate files relative to a base directory
481 481
482 482 This class is used to hide the details of COW semantics and
483 483 remote file access from higher level code.
484 484 '''
485 485 def __init__(self, base, audit=True, expandpath=False, realpath=False):
486 486 if expandpath:
487 487 base = util.expandpath(base)
488 488 if realpath:
489 489 base = os.path.realpath(base)
490 490 self.base = base
491 491 self.mustaudit = audit
492 492 self.createmode = None
493 493 self._trustnlink = None
494 494
495 495 @property
496 496 def mustaudit(self):
497 497 return self._audit
498 498
499 499 @mustaudit.setter
500 500 def mustaudit(self, onoff):
501 501 self._audit = onoff
502 502 if onoff:
503 503 self.audit = pathutil.pathauditor(self.base)
504 504 else:
505 505 self.audit = util.always
506 506
507 507 @util.propertycache
508 508 def _cansymlink(self):
509 509 return util.checklink(self.base)
510 510
511 511 @util.propertycache
512 512 def _chmod(self):
513 513 return util.checkexec(self.base)
514 514
515 515 def _fixfilemode(self, name):
516 516 if self.createmode is None or not self._chmod:
517 517 return
518 518 os.chmod(name, self.createmode & 0o666)
519 519
520 520 def __call__(self, path, mode="r", text=False, atomictemp=False,
521 521 notindexed=False, backgroundclose=False, checkambig=False):
522 522 '''Open ``path`` file, which is relative to vfs root.
523 523
524 524 Newly created directories are marked as "not to be indexed by
525 525 the content indexing service", if ``notindexed`` is specified
526 526 for "write" mode access.
527 527
528 528 If ``backgroundclose`` is passed, the file may be closed asynchronously.
529 529 It can only be used if the ``self.backgroundclosing()`` context manager
530 530 is active. This should only be specified if the following criteria hold:
531 531
532 532 1. There is a potential for writing thousands of files. Unless you
533 533 are writing thousands of files, the performance benefits of
534 534 asynchronously closing files is not realized.
535 535 2. Files are opened exactly once for the ``backgroundclosing``
536 536 active duration and are therefore free of race conditions between
537 537 closing a file on a background thread and reopening it. (If the
538 538 file were opened multiple times, there could be unflushed data
539 539 because the original file handle hasn't been flushed/closed yet.)
540 540
541 541 ``checkambig`` argument is passed to atomictemplfile (valid
542 542 only for writing), and is useful only if target file is
543 543 guarded by any lock (e.g. repo.lock or repo.wlock).
544 544 '''
545 545 if self._audit:
546 546 r = util.checkosfilename(path)
547 547 if r:
548 548 raise error.Abort("%s: %r" % (r, path))
549 549 self.audit(path)
550 550 f = self.join(path)
551 551
552 552 if not text and "b" not in mode:
553 553 mode += "b" # for that other OS
554 554
555 555 nlink = -1
556 556 if mode not in ('r', 'rb'):
557 557 dirname, basename = util.split(f)
558 558 # If basename is empty, then the path is malformed because it points
559 559 # to a directory. Let the posixfile() call below raise IOError.
560 560 if basename:
561 561 if atomictemp:
562 562 util.makedirs(dirname, self.createmode, notindexed)
563 563 return util.atomictempfile(f, mode, self.createmode,
564 564 checkambig=checkambig)
565 565 try:
566 566 if 'w' in mode:
567 567 util.unlink(f)
568 568 nlink = 0
569 569 else:
570 570 # nlinks() may behave differently for files on Windows
571 571 # shares if the file is open.
572 572 with util.posixfile(f):
573 573 nlink = util.nlinks(f)
574 574 if nlink < 1:
575 575 nlink = 2 # force mktempcopy (issue1922)
576 576 except (OSError, IOError) as e:
577 577 if e.errno != errno.ENOENT:
578 578 raise
579 579 nlink = 0
580 580 util.makedirs(dirname, self.createmode, notindexed)
581 581 if nlink > 0:
582 582 if self._trustnlink is None:
583 583 self._trustnlink = nlink > 1 or util.checknlink(f)
584 584 if nlink > 1 or not self._trustnlink:
585 585 util.rename(util.mktempcopy(f), f)
586 586 fp = util.posixfile(f, mode)
587 587 if nlink == 0:
588 588 self._fixfilemode(f)
589 589
590 590 if checkambig:
591 591 if mode in ('r', 'rb'):
592 592 raise error.Abort(_('implementation error: mode %s is not'
593 593 ' valid for checkambig=True') % mode)
594 594 fp = checkambigatclosing(fp)
595 595
596 596 if backgroundclose:
597 597 if not self._backgroundfilecloser:
598 598 raise error.Abort(_('backgroundclose can only be used when a '
599 599 'backgroundclosing context manager is active')
600 600 )
601 601
602 602 fp = delayclosedfile(fp, self._backgroundfilecloser)
603 603
604 604 return fp
605 605
606 606 def symlink(self, src, dst):
607 607 self.audit(dst)
608 608 linkname = self.join(dst)
609 609 try:
610 610 os.unlink(linkname)
611 611 except OSError:
612 612 pass
613 613
614 614 util.makedirs(os.path.dirname(linkname), self.createmode)
615 615
616 616 if self._cansymlink:
617 617 try:
618 618 os.symlink(src, linkname)
619 619 except OSError as err:
620 620 raise OSError(err.errno, _('could not symlink to %r: %s') %
621 621 (src, err.strerror), linkname)
622 622 else:
623 623 self.write(dst, src)
624 624
625 625 def join(self, path, *insidef):
626 626 if path:
627 627 return os.path.join(self.base, path, *insidef)
628 628 else:
629 629 return self.base
630 630
631 631 opener = vfs
632 632
633 633 class auditvfs(object):
634 634 def __init__(self, vfs):
635 635 self.vfs = vfs
636 636
637 637 @property
638 638 def mustaudit(self):
639 639 return self.vfs.mustaudit
640 640
641 641 @mustaudit.setter
642 642 def mustaudit(self, onoff):
643 643 self.vfs.mustaudit = onoff
644 644
645 645 @property
646 646 def options(self):
647 647 return self.vfs.options
648 648
649 649 @options.setter
650 650 def options(self, value):
651 651 self.vfs.options = value
652 652
653 653 class filtervfs(abstractvfs, auditvfs):
654 654 '''Wrapper vfs for filtering filenames with a function.'''
655 655
656 656 def __init__(self, vfs, filter):
657 657 auditvfs.__init__(self, vfs)
658 658 self._filter = filter
659 659
660 660 def __call__(self, path, *args, **kwargs):
661 661 return self.vfs(self._filter(path), *args, **kwargs)
662 662
663 663 def join(self, path, *insidef):
664 664 if path:
665 665 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
666 666 else:
667 667 return self.vfs.join(path)
668 668
669 669 filteropener = filtervfs
670 670
671 671 class readonlyvfs(abstractvfs, auditvfs):
672 672 '''Wrapper vfs preventing any writing.'''
673 673
674 674 def __init__(self, vfs):
675 675 auditvfs.__init__(self, vfs)
676 676
677 677 def __call__(self, path, mode='r', *args, **kw):
678 678 if mode not in ('r', 'rb'):
679 679 raise error.Abort(_('this vfs is read only'))
680 680 return self.vfs(path, mode, *args, **kw)
681 681
682 682 def join(self, path, *insidef):
683 683 return self.vfs.join(path, *insidef)
684 684
685 685 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
686 686 '''yield every hg repository under path, always recursively.
687 687 The recurse flag will only control recursion into repo working dirs'''
688 688 def errhandler(err):
689 689 if err.filename == path:
690 690 raise err
691 691 samestat = getattr(os.path, 'samestat', None)
692 692 if followsym and samestat is not None:
693 693 def adddir(dirlst, dirname):
694 694 match = False
695 695 dirstat = os.stat(dirname)
696 696 for lstdirstat in dirlst:
697 697 if samestat(dirstat, lstdirstat):
698 698 match = True
699 699 break
700 700 if not match:
701 701 dirlst.append(dirstat)
702 702 return not match
703 703 else:
704 704 followsym = False
705 705
706 706 if (seen_dirs is None) and followsym:
707 707 seen_dirs = []
708 708 adddir(seen_dirs, path)
709 709 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
710 710 dirs.sort()
711 711 if '.hg' in dirs:
712 712 yield root # found a repository
713 713 qroot = os.path.join(root, '.hg', 'patches')
714 714 if os.path.isdir(os.path.join(qroot, '.hg')):
715 715 yield qroot # we have a patch queue repo here
716 716 if recurse:
717 717 # avoid recursing inside the .hg directory
718 718 dirs.remove('.hg')
719 719 else:
720 720 dirs[:] = [] # don't descend further
721 721 elif followsym:
722 722 newdirs = []
723 723 for d in dirs:
724 724 fname = os.path.join(root, d)
725 725 if adddir(seen_dirs, fname):
726 726 if os.path.islink(fname):
727 727 for hgname in walkrepos(fname, True, seen_dirs):
728 728 yield hgname
729 729 else:
730 730 newdirs.append(d)
731 731 dirs[:] = newdirs
732 732
733 733 def osrcpath():
734 734 '''return default os-specific hgrc search path'''
735 735 path = []
736 736 defaultpath = os.path.join(util.datapath, 'default.d')
737 737 if os.path.isdir(defaultpath):
738 738 for f, kind in osutil.listdir(defaultpath):
739 739 if f.endswith('.rc'):
740 740 path.append(os.path.join(defaultpath, f))
741 741 path.extend(systemrcpath())
742 742 path.extend(userrcpath())
743 743 path = [os.path.normpath(f) for f in path]
744 744 return path
745 745
746 746 _rcpath = None
747 747
748 748 def rcpath():
749 749 '''return hgrc search path. if env var HGRCPATH is set, use it.
750 750 for each item in path, if directory, use files ending in .rc,
751 751 else use item.
752 752 make HGRCPATH empty to only look in .hg/hgrc of current repo.
753 753 if no HGRCPATH, use default os-specific path.'''
754 754 global _rcpath
755 755 if _rcpath is None:
756 if 'HGRCPATH' in os.environ:
756 if 'HGRCPATH' in encoding.environ:
757 757 _rcpath = []
758 758 for p in os.environ['HGRCPATH'].split(os.pathsep):
759 759 if not p:
760 760 continue
761 761 p = util.expandpath(p)
762 762 if os.path.isdir(p):
763 763 for f, kind in osutil.listdir(p):
764 764 if f.endswith('.rc'):
765 765 _rcpath.append(os.path.join(p, f))
766 766 else:
767 767 _rcpath.append(p)
768 768 else:
769 769 _rcpath = osrcpath()
770 770 return _rcpath
771 771
772 772 def intrev(rev):
773 773 """Return integer for a given revision that can be used in comparison or
774 774 arithmetic operation"""
775 775 if rev is None:
776 776 return wdirrev
777 777 return rev
778 778
779 779 def revsingle(repo, revspec, default='.'):
780 780 if not revspec and revspec != 0:
781 781 return repo[default]
782 782
783 783 l = revrange(repo, [revspec])
784 784 if not l:
785 785 raise error.Abort(_('empty revision set'))
786 786 return repo[l.last()]
787 787
788 788 def _pairspec(revspec):
789 789 tree = revset.parse(revspec)
790 790 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
791 791
792 792 def revpair(repo, revs):
793 793 if not revs:
794 794 return repo.dirstate.p1(), None
795 795
796 796 l = revrange(repo, revs)
797 797
798 798 if not l:
799 799 first = second = None
800 800 elif l.isascending():
801 801 first = l.min()
802 802 second = l.max()
803 803 elif l.isdescending():
804 804 first = l.max()
805 805 second = l.min()
806 806 else:
807 807 first = l.first()
808 808 second = l.last()
809 809
810 810 if first is None:
811 811 raise error.Abort(_('empty revision range'))
812 812 if (first == second and len(revs) >= 2
813 813 and not all(revrange(repo, [r]) for r in revs)):
814 814 raise error.Abort(_('empty revision on one side of range'))
815 815
816 816 # if top-level is range expression, the result must always be a pair
817 817 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
818 818 return repo.lookup(first), None
819 819
820 820 return repo.lookup(first), repo.lookup(second)
821 821
822 822 def revrange(repo, specs):
823 823 """Execute 1 to many revsets and return the union.
824 824
825 825 This is the preferred mechanism for executing revsets using user-specified
826 826 config options, such as revset aliases.
827 827
828 828 The revsets specified by ``specs`` will be executed via a chained ``OR``
829 829 expression. If ``specs`` is empty, an empty result is returned.
830 830
831 831 ``specs`` can contain integers, in which case they are assumed to be
832 832 revision numbers.
833 833
834 834 It is assumed the revsets are already formatted. If you have arguments
835 835 that need to be expanded in the revset, call ``revset.formatspec()``
836 836 and pass the result as an element of ``specs``.
837 837
838 838 Specifying a single revset is allowed.
839 839
840 840 Returns a ``revset.abstractsmartset`` which is a list-like interface over
841 841 integer revisions.
842 842 """
843 843 allspecs = []
844 844 for spec in specs:
845 845 if isinstance(spec, int):
846 846 spec = revset.formatspec('rev(%d)', spec)
847 847 allspecs.append(spec)
848 848 m = revset.matchany(repo.ui, allspecs, repo)
849 849 return m(repo)
850 850
851 851 def meaningfulparents(repo, ctx):
852 852 """Return list of meaningful (or all if debug) parentrevs for rev.
853 853
854 854 For merges (two non-nullrev revisions) both parents are meaningful.
855 855 Otherwise the first parent revision is considered meaningful if it
856 856 is not the preceding revision.
857 857 """
858 858 parents = ctx.parents()
859 859 if len(parents) > 1:
860 860 return parents
861 861 if repo.ui.debugflag:
862 862 return [parents[0], repo['null']]
863 863 if parents[0].rev() >= intrev(ctx.rev()) - 1:
864 864 return []
865 865 return parents
866 866
867 867 def expandpats(pats):
868 868 '''Expand bare globs when running on windows.
869 869 On posix we assume it already has already been done by sh.'''
870 870 if not util.expandglobs:
871 871 return list(pats)
872 872 ret = []
873 873 for kindpat in pats:
874 874 kind, pat = matchmod._patsplit(kindpat, None)
875 875 if kind is None:
876 876 try:
877 877 globbed = glob.glob(pat)
878 878 except re.error:
879 879 globbed = [pat]
880 880 if globbed:
881 881 ret.extend(globbed)
882 882 continue
883 883 ret.append(kindpat)
884 884 return ret
885 885
886 886 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
887 887 badfn=None):
888 888 '''Return a matcher and the patterns that were used.
889 889 The matcher will warn about bad matches, unless an alternate badfn callback
890 890 is provided.'''
891 891 if pats == ("",):
892 892 pats = []
893 893 if opts is None:
894 894 opts = {}
895 895 if not globbed and default == 'relpath':
896 896 pats = expandpats(pats or [])
897 897
898 898 def bad(f, msg):
899 899 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
900 900
901 901 if badfn is None:
902 902 badfn = bad
903 903
904 904 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
905 905 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
906 906
907 907 if m.always():
908 908 pats = []
909 909 return m, pats
910 910
911 911 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
912 912 badfn=None):
913 913 '''Return a matcher that will warn about bad matches.'''
914 914 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
915 915
916 916 def matchall(repo):
917 917 '''Return a matcher that will efficiently match everything.'''
918 918 return matchmod.always(repo.root, repo.getcwd())
919 919
920 920 def matchfiles(repo, files, badfn=None):
921 921 '''Return a matcher that will efficiently match exactly these files.'''
922 922 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
923 923
924 924 def origpath(ui, repo, filepath):
925 925 '''customize where .orig files are created
926 926
927 927 Fetch user defined path from config file: [ui] origbackuppath = <path>
928 928 Fall back to default (filepath) if not specified
929 929 '''
930 930 origbackuppath = ui.config('ui', 'origbackuppath', None)
931 931 if origbackuppath is None:
932 932 return filepath + ".orig"
933 933
934 934 filepathfromroot = os.path.relpath(filepath, start=repo.root)
935 935 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
936 936
937 937 origbackupdir = repo.vfs.dirname(fullorigpath)
938 938 if not repo.vfs.exists(origbackupdir):
939 939 ui.note(_('creating directory: %s\n') % origbackupdir)
940 940 util.makedirs(origbackupdir)
941 941
942 942 return fullorigpath + ".orig"
943 943
944 944 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
945 945 if opts is None:
946 946 opts = {}
947 947 m = matcher
948 948 if dry_run is None:
949 949 dry_run = opts.get('dry_run')
950 950 if similarity is None:
951 951 similarity = float(opts.get('similarity') or 0)
952 952
953 953 ret = 0
954 954 join = lambda f: os.path.join(prefix, f)
955 955
956 956 wctx = repo[None]
957 957 for subpath in sorted(wctx.substate):
958 958 submatch = matchmod.subdirmatcher(subpath, m)
959 959 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
960 960 sub = wctx.sub(subpath)
961 961 try:
962 962 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
963 963 ret = 1
964 964 except error.LookupError:
965 965 repo.ui.status(_("skipping missing subrepository: %s\n")
966 966 % join(subpath))
967 967
968 968 rejected = []
969 969 def badfn(f, msg):
970 970 if f in m.files():
971 971 m.bad(f, msg)
972 972 rejected.append(f)
973 973
974 974 badmatch = matchmod.badmatch(m, badfn)
975 975 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
976 976 badmatch)
977 977
978 978 unknownset = set(unknown + forgotten)
979 979 toprint = unknownset.copy()
980 980 toprint.update(deleted)
981 981 for abs in sorted(toprint):
982 982 if repo.ui.verbose or not m.exact(abs):
983 983 if abs in unknownset:
984 984 status = _('adding %s\n') % m.uipath(abs)
985 985 else:
986 986 status = _('removing %s\n') % m.uipath(abs)
987 987 repo.ui.status(status)
988 988
989 989 renames = _findrenames(repo, m, added + unknown, removed + deleted,
990 990 similarity)
991 991
992 992 if not dry_run:
993 993 _markchanges(repo, unknown + forgotten, deleted, renames)
994 994
995 995 for f in rejected:
996 996 if f in m.files():
997 997 return 1
998 998 return ret
999 999
1000 1000 def marktouched(repo, files, similarity=0.0):
1001 1001 '''Assert that files have somehow been operated upon. files are relative to
1002 1002 the repo root.'''
1003 1003 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1004 1004 rejected = []
1005 1005
1006 1006 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1007 1007
1008 1008 if repo.ui.verbose:
1009 1009 unknownset = set(unknown + forgotten)
1010 1010 toprint = unknownset.copy()
1011 1011 toprint.update(deleted)
1012 1012 for abs in sorted(toprint):
1013 1013 if abs in unknownset:
1014 1014 status = _('adding %s\n') % abs
1015 1015 else:
1016 1016 status = _('removing %s\n') % abs
1017 1017 repo.ui.status(status)
1018 1018
1019 1019 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1020 1020 similarity)
1021 1021
1022 1022 _markchanges(repo, unknown + forgotten, deleted, renames)
1023 1023
1024 1024 for f in rejected:
1025 1025 if f in m.files():
1026 1026 return 1
1027 1027 return 0
1028 1028
1029 1029 def _interestingfiles(repo, matcher):
1030 1030 '''Walk dirstate with matcher, looking for files that addremove would care
1031 1031 about.
1032 1032
1033 1033 This is different from dirstate.status because it doesn't care about
1034 1034 whether files are modified or clean.'''
1035 1035 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1036 1036 audit_path = pathutil.pathauditor(repo.root)
1037 1037
1038 1038 ctx = repo[None]
1039 1039 dirstate = repo.dirstate
1040 1040 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
1041 1041 full=False)
1042 1042 for abs, st in walkresults.iteritems():
1043 1043 dstate = dirstate[abs]
1044 1044 if dstate == '?' and audit_path.check(abs):
1045 1045 unknown.append(abs)
1046 1046 elif dstate != 'r' and not st:
1047 1047 deleted.append(abs)
1048 1048 elif dstate == 'r' and st:
1049 1049 forgotten.append(abs)
1050 1050 # for finding renames
1051 1051 elif dstate == 'r' and not st:
1052 1052 removed.append(abs)
1053 1053 elif dstate == 'a':
1054 1054 added.append(abs)
1055 1055
1056 1056 return added, unknown, deleted, removed, forgotten
1057 1057
1058 1058 def _findrenames(repo, matcher, added, removed, similarity):
1059 1059 '''Find renames from removed files to added ones.'''
1060 1060 renames = {}
1061 1061 if similarity > 0:
1062 1062 for old, new, score in similar.findrenames(repo, added, removed,
1063 1063 similarity):
1064 1064 if (repo.ui.verbose or not matcher.exact(old)
1065 1065 or not matcher.exact(new)):
1066 1066 repo.ui.status(_('recording removal of %s as rename to %s '
1067 1067 '(%d%% similar)\n') %
1068 1068 (matcher.rel(old), matcher.rel(new),
1069 1069 score * 100))
1070 1070 renames[new] = old
1071 1071 return renames
1072 1072
1073 1073 def _markchanges(repo, unknown, deleted, renames):
1074 1074 '''Marks the files in unknown as added, the files in deleted as removed,
1075 1075 and the files in renames as copied.'''
1076 1076 wctx = repo[None]
1077 1077 with repo.wlock():
1078 1078 wctx.forget(deleted)
1079 1079 wctx.add(unknown)
1080 1080 for new, old in renames.iteritems():
1081 1081 wctx.copy(old, new)
1082 1082
1083 1083 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1084 1084 """Update the dirstate to reflect the intent of copying src to dst. For
1085 1085 different reasons it might not end with dst being marked as copied from src.
1086 1086 """
1087 1087 origsrc = repo.dirstate.copied(src) or src
1088 1088 if dst == origsrc: # copying back a copy?
1089 1089 if repo.dirstate[dst] not in 'mn' and not dryrun:
1090 1090 repo.dirstate.normallookup(dst)
1091 1091 else:
1092 1092 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1093 1093 if not ui.quiet:
1094 1094 ui.warn(_("%s has not been committed yet, so no copy "
1095 1095 "data will be stored for %s.\n")
1096 1096 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1097 1097 if repo.dirstate[dst] in '?r' and not dryrun:
1098 1098 wctx.add([dst])
1099 1099 elif not dryrun:
1100 1100 wctx.copy(origsrc, dst)
1101 1101
1102 1102 def readrequires(opener, supported):
1103 1103 '''Reads and parses .hg/requires and checks if all entries found
1104 1104 are in the list of supported features.'''
1105 1105 requirements = set(opener.read("requires").splitlines())
1106 1106 missings = []
1107 1107 for r in requirements:
1108 1108 if r not in supported:
1109 1109 if not r or not r[0].isalnum():
1110 1110 raise error.RequirementError(_(".hg/requires file is corrupt"))
1111 1111 missings.append(r)
1112 1112 missings.sort()
1113 1113 if missings:
1114 1114 raise error.RequirementError(
1115 1115 _("repository requires features unknown to this Mercurial: %s")
1116 1116 % " ".join(missings),
1117 1117 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1118 1118 " for more information"))
1119 1119 return requirements
1120 1120
1121 1121 def writerequires(opener, requirements):
1122 1122 with opener('requires', 'w') as fp:
1123 1123 for r in sorted(requirements):
1124 1124 fp.write("%s\n" % r)
1125 1125
1126 1126 class filecachesubentry(object):
1127 1127 def __init__(self, path, stat):
1128 1128 self.path = path
1129 1129 self.cachestat = None
1130 1130 self._cacheable = None
1131 1131
1132 1132 if stat:
1133 1133 self.cachestat = filecachesubentry.stat(self.path)
1134 1134
1135 1135 if self.cachestat:
1136 1136 self._cacheable = self.cachestat.cacheable()
1137 1137 else:
1138 1138 # None means we don't know yet
1139 1139 self._cacheable = None
1140 1140
1141 1141 def refresh(self):
1142 1142 if self.cacheable():
1143 1143 self.cachestat = filecachesubentry.stat(self.path)
1144 1144
1145 1145 def cacheable(self):
1146 1146 if self._cacheable is not None:
1147 1147 return self._cacheable
1148 1148
1149 1149 # we don't know yet, assume it is for now
1150 1150 return True
1151 1151
1152 1152 def changed(self):
1153 1153 # no point in going further if we can't cache it
1154 1154 if not self.cacheable():
1155 1155 return True
1156 1156
1157 1157 newstat = filecachesubentry.stat(self.path)
1158 1158
1159 1159 # we may not know if it's cacheable yet, check again now
1160 1160 if newstat and self._cacheable is None:
1161 1161 self._cacheable = newstat.cacheable()
1162 1162
1163 1163 # check again
1164 1164 if not self._cacheable:
1165 1165 return True
1166 1166
1167 1167 if self.cachestat != newstat:
1168 1168 self.cachestat = newstat
1169 1169 return True
1170 1170 else:
1171 1171 return False
1172 1172
1173 1173 @staticmethod
1174 1174 def stat(path):
1175 1175 try:
1176 1176 return util.cachestat(path)
1177 1177 except OSError as e:
1178 1178 if e.errno != errno.ENOENT:
1179 1179 raise
1180 1180
1181 1181 class filecacheentry(object):
1182 1182 def __init__(self, paths, stat=True):
1183 1183 self._entries = []
1184 1184 for path in paths:
1185 1185 self._entries.append(filecachesubentry(path, stat))
1186 1186
1187 1187 def changed(self):
1188 1188 '''true if any entry has changed'''
1189 1189 for entry in self._entries:
1190 1190 if entry.changed():
1191 1191 return True
1192 1192 return False
1193 1193
1194 1194 def refresh(self):
1195 1195 for entry in self._entries:
1196 1196 entry.refresh()
1197 1197
1198 1198 class filecache(object):
1199 1199 '''A property like decorator that tracks files under .hg/ for updates.
1200 1200
1201 1201 Records stat info when called in _filecache.
1202 1202
1203 1203 On subsequent calls, compares old stat info with new info, and recreates the
1204 1204 object when any of the files changes, updating the new stat info in
1205 1205 _filecache.
1206 1206
1207 1207 Mercurial either atomic renames or appends for files under .hg,
1208 1208 so to ensure the cache is reliable we need the filesystem to be able
1209 1209 to tell us if a file has been replaced. If it can't, we fallback to
1210 1210 recreating the object on every call (essentially the same behavior as
1211 1211 propertycache).
1212 1212
1213 1213 '''
1214 1214 def __init__(self, *paths):
1215 1215 self.paths = paths
1216 1216
1217 1217 def join(self, obj, fname):
1218 1218 """Used to compute the runtime path of a cached file.
1219 1219
1220 1220 Users should subclass filecache and provide their own version of this
1221 1221 function to call the appropriate join function on 'obj' (an instance
1222 1222 of the class that its member function was decorated).
1223 1223 """
1224 1224 return obj.join(fname)
1225 1225
1226 1226 def __call__(self, func):
1227 1227 self.func = func
1228 1228 self.name = func.__name__
1229 1229 return self
1230 1230
1231 1231 def __get__(self, obj, type=None):
1232 1232 # if accessed on the class, return the descriptor itself.
1233 1233 if obj is None:
1234 1234 return self
1235 1235 # do we need to check if the file changed?
1236 1236 if self.name in obj.__dict__:
1237 1237 assert self.name in obj._filecache, self.name
1238 1238 return obj.__dict__[self.name]
1239 1239
1240 1240 entry = obj._filecache.get(self.name)
1241 1241
1242 1242 if entry:
1243 1243 if entry.changed():
1244 1244 entry.obj = self.func(obj)
1245 1245 else:
1246 1246 paths = [self.join(obj, path) for path in self.paths]
1247 1247
1248 1248 # We stat -before- creating the object so our cache doesn't lie if
1249 1249 # a writer modified between the time we read and stat
1250 1250 entry = filecacheentry(paths, True)
1251 1251 entry.obj = self.func(obj)
1252 1252
1253 1253 obj._filecache[self.name] = entry
1254 1254
1255 1255 obj.__dict__[self.name] = entry.obj
1256 1256 return entry.obj
1257 1257
1258 1258 def __set__(self, obj, value):
1259 1259 if self.name not in obj._filecache:
1260 1260 # we add an entry for the missing value because X in __dict__
1261 1261 # implies X in _filecache
1262 1262 paths = [self.join(obj, path) for path in self.paths]
1263 1263 ce = filecacheentry(paths, False)
1264 1264 obj._filecache[self.name] = ce
1265 1265 else:
1266 1266 ce = obj._filecache[self.name]
1267 1267
1268 1268 ce.obj = value # update cached copy
1269 1269 obj.__dict__[self.name] = value # update copy returned by obj.x
1270 1270
1271 1271 def __delete__(self, obj):
1272 1272 try:
1273 1273 del obj.__dict__[self.name]
1274 1274 except KeyError:
1275 1275 raise AttributeError(self.name)
1276 1276
1277 1277 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1278 1278 if lock is None:
1279 1279 raise error.LockInheritanceContractViolation(
1280 1280 'lock can only be inherited while held')
1281 1281 if environ is None:
1282 1282 environ = {}
1283 1283 with lock.inherit() as locker:
1284 1284 environ[envvar] = locker
1285 1285 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1286 1286
1287 1287 def wlocksub(repo, cmd, *args, **kwargs):
1288 1288 """run cmd as a subprocess that allows inheriting repo's wlock
1289 1289
1290 1290 This can only be called while the wlock is held. This takes all the
1291 1291 arguments that ui.system does, and returns the exit code of the
1292 1292 subprocess."""
1293 1293 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1294 1294 **kwargs)
1295 1295
1296 1296 def gdinitconfig(ui):
1297 1297 """helper function to know if a repo should be created as general delta
1298 1298 """
1299 1299 # experimental config: format.generaldelta
1300 1300 return (ui.configbool('format', 'generaldelta', False)
1301 1301 or ui.configbool('format', 'usegeneraldelta', True))
1302 1302
1303 1303 def gddeltaconfig(ui):
1304 1304 """helper function to know if incoming delta should be optimised
1305 1305 """
1306 1306 # experimental config: format.generaldelta
1307 1307 return ui.configbool('format', 'generaldelta', False)
1308 1308
1309 1309 class closewrapbase(object):
1310 1310 """Base class of wrapper, which hooks closing
1311 1311
1312 1312 Do not instantiate outside of the vfs layer.
1313 1313 """
1314 1314 def __init__(self, fh):
1315 1315 object.__setattr__(self, '_origfh', fh)
1316 1316
1317 1317 def __getattr__(self, attr):
1318 1318 return getattr(self._origfh, attr)
1319 1319
1320 1320 def __setattr__(self, attr, value):
1321 1321 return setattr(self._origfh, attr, value)
1322 1322
1323 1323 def __delattr__(self, attr):
1324 1324 return delattr(self._origfh, attr)
1325 1325
1326 1326 def __enter__(self):
1327 1327 return self._origfh.__enter__()
1328 1328
1329 1329 def __exit__(self, exc_type, exc_value, exc_tb):
1330 1330 raise NotImplementedError('attempted instantiating ' + str(type(self)))
1331 1331
1332 1332 def close(self):
1333 1333 raise NotImplementedError('attempted instantiating ' + str(type(self)))
1334 1334
1335 1335 class delayclosedfile(closewrapbase):
1336 1336 """Proxy for a file object whose close is delayed.
1337 1337
1338 1338 Do not instantiate outside of the vfs layer.
1339 1339 """
1340 1340 def __init__(self, fh, closer):
1341 1341 super(delayclosedfile, self).__init__(fh)
1342 1342 object.__setattr__(self, '_closer', closer)
1343 1343
1344 1344 def __exit__(self, exc_type, exc_value, exc_tb):
1345 1345 self._closer.close(self._origfh)
1346 1346
1347 1347 def close(self):
1348 1348 self._closer.close(self._origfh)
1349 1349
1350 1350 class backgroundfilecloser(object):
1351 1351 """Coordinates background closing of file handles on multiple threads."""
1352 1352 def __init__(self, ui, expectedcount=-1):
1353 1353 self._running = False
1354 1354 self._entered = False
1355 1355 self._threads = []
1356 1356 self._threadexception = None
1357 1357
1358 1358 # Only Windows/NTFS has slow file closing. So only enable by default
1359 1359 # on that platform. But allow to be enabled elsewhere for testing.
1360 1360 defaultenabled = os.name == 'nt'
1361 1361 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
1362 1362
1363 1363 if not enabled:
1364 1364 return
1365 1365
1366 1366 # There is overhead to starting and stopping the background threads.
1367 1367 # Don't do background processing unless the file count is large enough
1368 1368 # to justify it.
1369 1369 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
1370 1370 2048)
1371 1371 # FUTURE dynamically start background threads after minfilecount closes.
1372 1372 # (We don't currently have any callers that don't know their file count)
1373 1373 if expectedcount > 0 and expectedcount < minfilecount:
1374 1374 return
1375 1375
1376 1376 # Windows defaults to a limit of 512 open files. A buffer of 128
1377 1377 # should give us enough headway.
1378 1378 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
1379 1379 threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
1380 1380
1381 1381 ui.debug('starting %d threads for background file closing\n' %
1382 1382 threadcount)
1383 1383
1384 1384 self._queue = util.queue(maxsize=maxqueue)
1385 1385 self._running = True
1386 1386
1387 1387 for i in range(threadcount):
1388 1388 t = threading.Thread(target=self._worker, name='backgroundcloser')
1389 1389 self._threads.append(t)
1390 1390 t.start()
1391 1391
1392 1392 def __enter__(self):
1393 1393 self._entered = True
1394 1394 return self
1395 1395
1396 1396 def __exit__(self, exc_type, exc_value, exc_tb):
1397 1397 self._running = False
1398 1398
1399 1399 # Wait for threads to finish closing so open files don't linger for
1400 1400 # longer than lifetime of context manager.
1401 1401 for t in self._threads:
1402 1402 t.join()
1403 1403
1404 1404 def _worker(self):
1405 1405 """Main routine for worker thread."""
1406 1406 while True:
1407 1407 try:
1408 1408 fh = self._queue.get(block=True, timeout=0.100)
1409 1409 # Need to catch or the thread will terminate and
1410 1410 # we could orphan file descriptors.
1411 1411 try:
1412 1412 fh.close()
1413 1413 except Exception as e:
1414 1414 # Stash so can re-raise from main thread later.
1415 1415 self._threadexception = e
1416 1416 except util.empty:
1417 1417 if not self._running:
1418 1418 break
1419 1419
1420 1420 def close(self, fh):
1421 1421 """Schedule a file for closing."""
1422 1422 if not self._entered:
1423 1423 raise error.Abort(_('can only call close() when context manager '
1424 1424 'active'))
1425 1425
1426 1426 # If a background thread encountered an exception, raise now so we fail
1427 1427 # fast. Otherwise we may potentially go on for minutes until the error
1428 1428 # is acted on.
1429 1429 if self._threadexception:
1430 1430 e = self._threadexception
1431 1431 self._threadexception = None
1432 1432 raise e
1433 1433
1434 1434 # If we're not actively running, close synchronously.
1435 1435 if not self._running:
1436 1436 fh.close()
1437 1437 return
1438 1438
1439 1439 self._queue.put(fh, block=True, timeout=None)
1440 1440
1441 1441 class checkambigatclosing(closewrapbase):
1442 1442 """Proxy for a file object, to avoid ambiguity of file stat
1443 1443
1444 1444 See also util.filestat for detail about "ambiguity of file stat".
1445 1445
1446 1446 This proxy is useful only if the target file is guarded by any
1447 1447 lock (e.g. repo.lock or repo.wlock)
1448 1448
1449 1449 Do not instantiate outside of the vfs layer.
1450 1450 """
1451 1451 def __init__(self, fh):
1452 1452 super(checkambigatclosing, self).__init__(fh)
1453 1453 object.__setattr__(self, '_oldstat', util.filestat(fh.name))
1454 1454
1455 1455 def _checkambig(self):
1456 1456 oldstat = self._oldstat
1457 1457 if oldstat.stat:
1458 1458 newstat = util.filestat(self._origfh.name)
1459 1459 if newstat.isambig(oldstat):
1460 1460 # stat of changed file is ambiguous to original one
1461 1461 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1462 1462 os.utime(self._origfh.name, (advanced, advanced))
1463 1463
1464 1464 def __exit__(self, exc_type, exc_value, exc_tb):
1465 1465 self._origfh.__exit__(exc_type, exc_value, exc_tb)
1466 1466 self._checkambig()
1467 1467
1468 1468 def close(self):
1469 1469 self._origfh.close()
1470 1470 self._checkambig()
General Comments 0
You need to be logged in to leave comments. Login now