##// END OF EJS Templates
addremove: remove a mutable default argument...
Pierre-Yves David -
r26329:d9537ce6 default
parent child Browse files
Show More
@@ -1,1132 +1,1134 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from mercurial.node import wdirrev
10 10 import util, error, osutil, revset, similar, encoding, phases
11 11 import pathutil
12 12 import match as matchmod
13 13 import os, errno, re, glob, tempfile, shutil, stat
14 14
15 15 if os.name == 'nt':
16 16 import scmwindows as scmplatform
17 17 else:
18 18 import scmposix as scmplatform
19 19
20 20 systemrcpath = scmplatform.systemrcpath
21 21 userrcpath = scmplatform.userrcpath
22 22
23 23 class status(tuple):
24 24 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
25 25 and 'ignored' properties are only relevant to the working copy.
26 26 '''
27 27
28 28 __slots__ = ()
29 29
30 30 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
31 31 clean):
32 32 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
33 33 ignored, clean))
34 34
35 35 @property
36 36 def modified(self):
37 37 '''files that have been modified'''
38 38 return self[0]
39 39
40 40 @property
41 41 def added(self):
42 42 '''files that have been added'''
43 43 return self[1]
44 44
45 45 @property
46 46 def removed(self):
47 47 '''files that have been removed'''
48 48 return self[2]
49 49
50 50 @property
51 51 def deleted(self):
52 52 '''files that are in the dirstate, but have been deleted from the
53 53 working copy (aka "missing")
54 54 '''
55 55 return self[3]
56 56
57 57 @property
58 58 def unknown(self):
59 59 '''files not in the dirstate that are not ignored'''
60 60 return self[4]
61 61
62 62 @property
63 63 def ignored(self):
64 64 '''files not in the dirstate that are ignored (by _dirignore())'''
65 65 return self[5]
66 66
67 67 @property
68 68 def clean(self):
69 69 '''files that have not been modified'''
70 70 return self[6]
71 71
72 72 def __repr__(self, *args, **kwargs):
73 73 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
74 74 'unknown=%r, ignored=%r, clean=%r>') % self)
75 75
76 76 def itersubrepos(ctx1, ctx2):
77 77 """find subrepos in ctx1 or ctx2"""
78 78 # Create a (subpath, ctx) mapping where we prefer subpaths from
79 79 # ctx1. The subpaths from ctx2 are important when the .hgsub file
80 80 # has been modified (in ctx2) but not yet committed (in ctx1).
81 81 subpaths = dict.fromkeys(ctx2.substate, ctx2)
82 82 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
83 83
84 84 missing = set()
85 85
86 86 for subpath in ctx2.substate:
87 87 if subpath not in ctx1.substate:
88 88 del subpaths[subpath]
89 89 missing.add(subpath)
90 90
91 91 for subpath, ctx in sorted(subpaths.iteritems()):
92 92 yield subpath, ctx.sub(subpath)
93 93
94 94 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
95 95 # status and diff will have an accurate result when it does
96 96 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
97 97 # against itself.
98 98 for subpath in missing:
99 99 yield subpath, ctx2.nullsub(subpath, ctx1)
100 100
101 101 def nochangesfound(ui, repo, excluded=None):
102 102 '''Report no changes for push/pull, excluded is None or a list of
103 103 nodes excluded from the push/pull.
104 104 '''
105 105 secretlist = []
106 106 if excluded:
107 107 for n in excluded:
108 108 if n not in repo:
109 109 # discovery should not have included the filtered revision,
110 110 # we have to explicitly exclude it until discovery is cleanup.
111 111 continue
112 112 ctx = repo[n]
113 113 if ctx.phase() >= phases.secret and not ctx.extinct():
114 114 secretlist.append(n)
115 115
116 116 if secretlist:
117 117 ui.status(_("no changes found (ignored %d secret changesets)\n")
118 118 % len(secretlist))
119 119 else:
120 120 ui.status(_("no changes found\n"))
121 121
122 122 def checknewlabel(repo, lbl, kind):
123 123 # Do not use the "kind" parameter in ui output.
124 124 # It makes strings difficult to translate.
125 125 if lbl in ['tip', '.', 'null']:
126 126 raise util.Abort(_("the name '%s' is reserved") % lbl)
127 127 for c in (':', '\0', '\n', '\r'):
128 128 if c in lbl:
129 129 raise util.Abort(_("%r cannot be used in a name") % c)
130 130 try:
131 131 int(lbl)
132 132 raise util.Abort(_("cannot use an integer as a name"))
133 133 except ValueError:
134 134 pass
135 135
136 136 def checkfilename(f):
137 137 '''Check that the filename f is an acceptable filename for a tracked file'''
138 138 if '\r' in f or '\n' in f:
139 139 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
140 140
141 141 def checkportable(ui, f):
142 142 '''Check if filename f is portable and warn or abort depending on config'''
143 143 checkfilename(f)
144 144 abort, warn = checkportabilityalert(ui)
145 145 if abort or warn:
146 146 msg = util.checkwinfilename(f)
147 147 if msg:
148 148 msg = "%s: %r" % (msg, f)
149 149 if abort:
150 150 raise util.Abort(msg)
151 151 ui.warn(_("warning: %s\n") % msg)
152 152
153 153 def checkportabilityalert(ui):
154 154 '''check if the user's config requests nothing, a warning, or abort for
155 155 non-portable filenames'''
156 156 val = ui.config('ui', 'portablefilenames', 'warn')
157 157 lval = val.lower()
158 158 bval = util.parsebool(val)
159 159 abort = os.name == 'nt' or lval == 'abort'
160 160 warn = bval or lval == 'warn'
161 161 if bval is None and not (warn or abort or lval == 'ignore'):
162 162 raise error.ConfigError(
163 163 _("ui.portablefilenames value is invalid ('%s')") % val)
164 164 return abort, warn
165 165
166 166 class casecollisionauditor(object):
167 167 def __init__(self, ui, abort, dirstate):
168 168 self._ui = ui
169 169 self._abort = abort
170 170 allfiles = '\0'.join(dirstate._map)
171 171 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
172 172 self._dirstate = dirstate
173 173 # The purpose of _newfiles is so that we don't complain about
174 174 # case collisions if someone were to call this object with the
175 175 # same filename twice.
176 176 self._newfiles = set()
177 177
178 178 def __call__(self, f):
179 179 if f in self._newfiles:
180 180 return
181 181 fl = encoding.lower(f)
182 182 if fl in self._loweredfiles and f not in self._dirstate:
183 183 msg = _('possible case-folding collision for %s') % f
184 184 if self._abort:
185 185 raise util.Abort(msg)
186 186 self._ui.warn(_("warning: %s\n") % msg)
187 187 self._loweredfiles.add(fl)
188 188 self._newfiles.add(f)
189 189
190 190 def filteredhash(repo, maxrev):
191 191 """build hash of filtered revisions in the current repoview.
192 192
193 193 Multiple caches perform up-to-date validation by checking that the
194 194 tiprev and tipnode stored in the cache file match the current repository.
195 195 However, this is not sufficient for validating repoviews because the set
196 196 of revisions in the view may change without the repository tiprev and
197 197 tipnode changing.
198 198
199 199 This function hashes all the revs filtered from the view and returns
200 200 that SHA-1 digest.
201 201 """
202 202 cl = repo.changelog
203 203 if not cl.filteredrevs:
204 204 return None
205 205 key = None
206 206 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
207 207 if revs:
208 208 s = util.sha1()
209 209 for rev in revs:
210 210 s.update('%s;' % rev)
211 211 key = s.digest()
212 212 return key
213 213
214 214 class abstractvfs(object):
215 215 """Abstract base class; cannot be instantiated"""
216 216
217 217 def __init__(self, *args, **kwargs):
218 218 '''Prevent instantiation; don't call this from subclasses.'''
219 219 raise NotImplementedError('attempted instantiating ' + str(type(self)))
220 220
221 221 def tryread(self, path):
222 222 '''gracefully return an empty string for missing files'''
223 223 try:
224 224 return self.read(path)
225 225 except IOError as inst:
226 226 if inst.errno != errno.ENOENT:
227 227 raise
228 228 return ""
229 229
230 230 def tryreadlines(self, path, mode='rb'):
231 231 '''gracefully return an empty array for missing files'''
232 232 try:
233 233 return self.readlines(path, mode=mode)
234 234 except IOError as inst:
235 235 if inst.errno != errno.ENOENT:
236 236 raise
237 237 return []
238 238
239 239 def open(self, path, mode="r", text=False, atomictemp=False,
240 240 notindexed=False):
241 241 '''Open ``path`` file, which is relative to vfs root.
242 242
243 243 Newly created directories are marked as "not to be indexed by
244 244 the content indexing service", if ``notindexed`` is specified
245 245 for "write" mode access.
246 246 '''
247 247 self.open = self.__call__
248 248 return self.__call__(path, mode, text, atomictemp, notindexed)
249 249
250 250 def read(self, path):
251 251 fp = self(path, 'rb')
252 252 try:
253 253 return fp.read()
254 254 finally:
255 255 fp.close()
256 256
257 257 def readlines(self, path, mode='rb'):
258 258 fp = self(path, mode=mode)
259 259 try:
260 260 return fp.readlines()
261 261 finally:
262 262 fp.close()
263 263
264 264 def write(self, path, data):
265 265 fp = self(path, 'wb')
266 266 try:
267 267 return fp.write(data)
268 268 finally:
269 269 fp.close()
270 270
271 271 def writelines(self, path, data, mode='wb', notindexed=False):
272 272 fp = self(path, mode=mode, notindexed=notindexed)
273 273 try:
274 274 return fp.writelines(data)
275 275 finally:
276 276 fp.close()
277 277
278 278 def append(self, path, data):
279 279 fp = self(path, 'ab')
280 280 try:
281 281 return fp.write(data)
282 282 finally:
283 283 fp.close()
284 284
285 285 def basename(self, path):
286 286 """return base element of a path (as os.path.basename would do)
287 287
288 288 This exists to allow handling of strange encoding if needed."""
289 289 return os.path.basename(path)
290 290
291 291 def chmod(self, path, mode):
292 292 return os.chmod(self.join(path), mode)
293 293
294 294 def dirname(self, path):
295 295 """return dirname element of a path (as os.path.dirname would do)
296 296
297 297 This exists to allow handling of strange encoding if needed."""
298 298 return os.path.dirname(path)
299 299
300 300 def exists(self, path=None):
301 301 return os.path.exists(self.join(path))
302 302
303 303 def fstat(self, fp):
304 304 return util.fstat(fp)
305 305
306 306 def isdir(self, path=None):
307 307 return os.path.isdir(self.join(path))
308 308
309 309 def isfile(self, path=None):
310 310 return os.path.isfile(self.join(path))
311 311
312 312 def islink(self, path=None):
313 313 return os.path.islink(self.join(path))
314 314
315 315 def reljoin(self, *paths):
316 316 """join various elements of a path together (as os.path.join would do)
317 317
318 318 The vfs base is not injected so that path stay relative. This exists
319 319 to allow handling of strange encoding if needed."""
320 320 return os.path.join(*paths)
321 321
322 322 def split(self, path):
323 323 """split top-most element of a path (as os.path.split would do)
324 324
325 325 This exists to allow handling of strange encoding if needed."""
326 326 return os.path.split(path)
327 327
328 328 def lexists(self, path=None):
329 329 return os.path.lexists(self.join(path))
330 330
331 331 def lstat(self, path=None):
332 332 return os.lstat(self.join(path))
333 333
334 334 def listdir(self, path=None):
335 335 return os.listdir(self.join(path))
336 336
337 337 def makedir(self, path=None, notindexed=True):
338 338 return util.makedir(self.join(path), notindexed)
339 339
340 340 def makedirs(self, path=None, mode=None):
341 341 return util.makedirs(self.join(path), mode)
342 342
343 343 def makelock(self, info, path):
344 344 return util.makelock(info, self.join(path))
345 345
346 346 def mkdir(self, path=None):
347 347 return os.mkdir(self.join(path))
348 348
349 349 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
350 350 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
351 351 dir=self.join(dir), text=text)
352 352 dname, fname = util.split(name)
353 353 if dir:
354 354 return fd, os.path.join(dir, fname)
355 355 else:
356 356 return fd, fname
357 357
358 358 def readdir(self, path=None, stat=None, skip=None):
359 359 return osutil.listdir(self.join(path), stat, skip)
360 360
361 361 def readlock(self, path):
362 362 return util.readlock(self.join(path))
363 363
364 364 def rename(self, src, dst):
365 365 return util.rename(self.join(src), self.join(dst))
366 366
367 367 def readlink(self, path):
368 368 return os.readlink(self.join(path))
369 369
370 370 def removedirs(self, path=None):
371 371 """Remove a leaf directory and all empty intermediate ones
372 372 """
373 373 return util.removedirs(self.join(path))
374 374
375 375 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
376 376 """Remove a directory tree recursively
377 377
378 378 If ``forcibly``, this tries to remove READ-ONLY files, too.
379 379 """
380 380 if forcibly:
381 381 def onerror(function, path, excinfo):
382 382 if function is not os.remove:
383 383 raise
384 384 # read-only files cannot be unlinked under Windows
385 385 s = os.stat(path)
386 386 if (s.st_mode & stat.S_IWRITE) != 0:
387 387 raise
388 388 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
389 389 os.remove(path)
390 390 else:
391 391 onerror = None
392 392 return shutil.rmtree(self.join(path),
393 393 ignore_errors=ignore_errors, onerror=onerror)
394 394
395 395 def setflags(self, path, l, x):
396 396 return util.setflags(self.join(path), l, x)
397 397
398 398 def stat(self, path=None):
399 399 return os.stat(self.join(path))
400 400
401 401 def unlink(self, path=None):
402 402 return util.unlink(self.join(path))
403 403
404 404 def unlinkpath(self, path=None, ignoremissing=False):
405 405 return util.unlinkpath(self.join(path), ignoremissing)
406 406
407 407 def utime(self, path=None, t=None):
408 408 return os.utime(self.join(path), t)
409 409
410 410 def walk(self, path=None, onerror=None):
411 411 """Yield (dirpath, dirs, files) tuple for each directories under path
412 412
413 413 ``dirpath`` is relative one from the root of this vfs. This
414 414 uses ``os.sep`` as path separator, even you specify POSIX
415 415 style ``path``.
416 416
417 417 "The root of this vfs" is represented as empty ``dirpath``.
418 418 """
419 419 root = os.path.normpath(self.join(None))
420 420 # when dirpath == root, dirpath[prefixlen:] becomes empty
421 421 # because len(dirpath) < prefixlen.
422 422 prefixlen = len(pathutil.normasprefix(root))
423 423 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
424 424 yield (dirpath[prefixlen:], dirs, files)
425 425
426 426 class vfs(abstractvfs):
427 427 '''Operate files relative to a base directory
428 428
429 429 This class is used to hide the details of COW semantics and
430 430 remote file access from higher level code.
431 431 '''
432 432 def __init__(self, base, audit=True, expandpath=False, realpath=False):
433 433 if expandpath:
434 434 base = util.expandpath(base)
435 435 if realpath:
436 436 base = os.path.realpath(base)
437 437 self.base = base
438 438 self._setmustaudit(audit)
439 439 self.createmode = None
440 440 self._trustnlink = None
441 441
442 442 def _getmustaudit(self):
443 443 return self._audit
444 444
445 445 def _setmustaudit(self, onoff):
446 446 self._audit = onoff
447 447 if onoff:
448 448 self.audit = pathutil.pathauditor(self.base)
449 449 else:
450 450 self.audit = util.always
451 451
452 452 mustaudit = property(_getmustaudit, _setmustaudit)
453 453
454 454 @util.propertycache
455 455 def _cansymlink(self):
456 456 return util.checklink(self.base)
457 457
458 458 @util.propertycache
459 459 def _chmod(self):
460 460 return util.checkexec(self.base)
461 461
462 462 def _fixfilemode(self, name):
463 463 if self.createmode is None or not self._chmod:
464 464 return
465 465 os.chmod(name, self.createmode & 0o666)
466 466
467 467 def __call__(self, path, mode="r", text=False, atomictemp=False,
468 468 notindexed=False):
469 469 '''Open ``path`` file, which is relative to vfs root.
470 470
471 471 Newly created directories are marked as "not to be indexed by
472 472 the content indexing service", if ``notindexed`` is specified
473 473 for "write" mode access.
474 474 '''
475 475 if self._audit:
476 476 r = util.checkosfilename(path)
477 477 if r:
478 478 raise util.Abort("%s: %r" % (r, path))
479 479 self.audit(path)
480 480 f = self.join(path)
481 481
482 482 if not text and "b" not in mode:
483 483 mode += "b" # for that other OS
484 484
485 485 nlink = -1
486 486 if mode not in ('r', 'rb'):
487 487 dirname, basename = util.split(f)
488 488 # If basename is empty, then the path is malformed because it points
489 489 # to a directory. Let the posixfile() call below raise IOError.
490 490 if basename:
491 491 if atomictemp:
492 492 util.ensuredirs(dirname, self.createmode, notindexed)
493 493 return util.atomictempfile(f, mode, self.createmode)
494 494 try:
495 495 if 'w' in mode:
496 496 util.unlink(f)
497 497 nlink = 0
498 498 else:
499 499 # nlinks() may behave differently for files on Windows
500 500 # shares if the file is open.
501 501 fd = util.posixfile(f)
502 502 nlink = util.nlinks(f)
503 503 if nlink < 1:
504 504 nlink = 2 # force mktempcopy (issue1922)
505 505 fd.close()
506 506 except (OSError, IOError) as e:
507 507 if e.errno != errno.ENOENT:
508 508 raise
509 509 nlink = 0
510 510 util.ensuredirs(dirname, self.createmode, notindexed)
511 511 if nlink > 0:
512 512 if self._trustnlink is None:
513 513 self._trustnlink = nlink > 1 or util.checknlink(f)
514 514 if nlink > 1 or not self._trustnlink:
515 515 util.rename(util.mktempcopy(f), f)
516 516 fp = util.posixfile(f, mode)
517 517 if nlink == 0:
518 518 self._fixfilemode(f)
519 519 return fp
520 520
521 521 def symlink(self, src, dst):
522 522 self.audit(dst)
523 523 linkname = self.join(dst)
524 524 try:
525 525 os.unlink(linkname)
526 526 except OSError:
527 527 pass
528 528
529 529 util.ensuredirs(os.path.dirname(linkname), self.createmode)
530 530
531 531 if self._cansymlink:
532 532 try:
533 533 os.symlink(src, linkname)
534 534 except OSError as err:
535 535 raise OSError(err.errno, _('could not symlink to %r: %s') %
536 536 (src, err.strerror), linkname)
537 537 else:
538 538 self.write(dst, src)
539 539
540 540 def join(self, path, *insidef):
541 541 if path:
542 542 return os.path.join(self.base, path, *insidef)
543 543 else:
544 544 return self.base
545 545
546 546 opener = vfs
547 547
548 548 class auditvfs(object):
549 549 def __init__(self, vfs):
550 550 self.vfs = vfs
551 551
552 552 def _getmustaudit(self):
553 553 return self.vfs.mustaudit
554 554
555 555 def _setmustaudit(self, onoff):
556 556 self.vfs.mustaudit = onoff
557 557
558 558 mustaudit = property(_getmustaudit, _setmustaudit)
559 559
560 560 class filtervfs(abstractvfs, auditvfs):
561 561 '''Wrapper vfs for filtering filenames with a function.'''
562 562
563 563 def __init__(self, vfs, filter):
564 564 auditvfs.__init__(self, vfs)
565 565 self._filter = filter
566 566
567 567 def __call__(self, path, *args, **kwargs):
568 568 return self.vfs(self._filter(path), *args, **kwargs)
569 569
570 570 def join(self, path, *insidef):
571 571 if path:
572 572 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
573 573 else:
574 574 return self.vfs.join(path)
575 575
576 576 filteropener = filtervfs
577 577
578 578 class readonlyvfs(abstractvfs, auditvfs):
579 579 '''Wrapper vfs preventing any writing.'''
580 580
581 581 def __init__(self, vfs):
582 582 auditvfs.__init__(self, vfs)
583 583
584 584 def __call__(self, path, mode='r', *args, **kw):
585 585 if mode not in ('r', 'rb'):
586 586 raise util.Abort('this vfs is read only')
587 587 return self.vfs(path, mode, *args, **kw)
588 588
589 589 def join(self, path, *insidef):
590 590 return self.vfs.join(path, *insidef)
591 591
592 592 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
593 593 '''yield every hg repository under path, always recursively.
594 594 The recurse flag will only control recursion into repo working dirs'''
595 595 def errhandler(err):
596 596 if err.filename == path:
597 597 raise err
598 598 samestat = getattr(os.path, 'samestat', None)
599 599 if followsym and samestat is not None:
600 600 def adddir(dirlst, dirname):
601 601 match = False
602 602 dirstat = os.stat(dirname)
603 603 for lstdirstat in dirlst:
604 604 if samestat(dirstat, lstdirstat):
605 605 match = True
606 606 break
607 607 if not match:
608 608 dirlst.append(dirstat)
609 609 return not match
610 610 else:
611 611 followsym = False
612 612
613 613 if (seen_dirs is None) and followsym:
614 614 seen_dirs = []
615 615 adddir(seen_dirs, path)
616 616 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
617 617 dirs.sort()
618 618 if '.hg' in dirs:
619 619 yield root # found a repository
620 620 qroot = os.path.join(root, '.hg', 'patches')
621 621 if os.path.isdir(os.path.join(qroot, '.hg')):
622 622 yield qroot # we have a patch queue repo here
623 623 if recurse:
624 624 # avoid recursing inside the .hg directory
625 625 dirs.remove('.hg')
626 626 else:
627 627 dirs[:] = [] # don't descend further
628 628 elif followsym:
629 629 newdirs = []
630 630 for d in dirs:
631 631 fname = os.path.join(root, d)
632 632 if adddir(seen_dirs, fname):
633 633 if os.path.islink(fname):
634 634 for hgname in walkrepos(fname, True, seen_dirs):
635 635 yield hgname
636 636 else:
637 637 newdirs.append(d)
638 638 dirs[:] = newdirs
639 639
640 640 def osrcpath():
641 641 '''return default os-specific hgrc search path'''
642 642 path = []
643 643 defaultpath = os.path.join(util.datapath, 'default.d')
644 644 if os.path.isdir(defaultpath):
645 645 for f, kind in osutil.listdir(defaultpath):
646 646 if f.endswith('.rc'):
647 647 path.append(os.path.join(defaultpath, f))
648 648 path.extend(systemrcpath())
649 649 path.extend(userrcpath())
650 650 path = [os.path.normpath(f) for f in path]
651 651 return path
652 652
653 653 _rcpath = None
654 654
655 655 def rcpath():
656 656 '''return hgrc search path. if env var HGRCPATH is set, use it.
657 657 for each item in path, if directory, use files ending in .rc,
658 658 else use item.
659 659 make HGRCPATH empty to only look in .hg/hgrc of current repo.
660 660 if no HGRCPATH, use default os-specific path.'''
661 661 global _rcpath
662 662 if _rcpath is None:
663 663 if 'HGRCPATH' in os.environ:
664 664 _rcpath = []
665 665 for p in os.environ['HGRCPATH'].split(os.pathsep):
666 666 if not p:
667 667 continue
668 668 p = util.expandpath(p)
669 669 if os.path.isdir(p):
670 670 for f, kind in osutil.listdir(p):
671 671 if f.endswith('.rc'):
672 672 _rcpath.append(os.path.join(p, f))
673 673 else:
674 674 _rcpath.append(p)
675 675 else:
676 676 _rcpath = osrcpath()
677 677 return _rcpath
678 678
679 679 def intrev(rev):
680 680 """Return integer for a given revision that can be used in comparison or
681 681 arithmetic operation"""
682 682 if rev is None:
683 683 return wdirrev
684 684 return rev
685 685
686 686 def revsingle(repo, revspec, default='.'):
687 687 if not revspec and revspec != 0:
688 688 return repo[default]
689 689
690 690 l = revrange(repo, [revspec])
691 691 if not l:
692 692 raise util.Abort(_('empty revision set'))
693 693 return repo[l.last()]
694 694
695 695 def _pairspec(revspec):
696 696 tree = revset.parse(revspec)
697 697 tree = revset.optimize(tree, True)[1] # fix up "x^:y" -> "(x^):y"
698 698 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
699 699
700 700 def revpair(repo, revs):
701 701 if not revs:
702 702 return repo.dirstate.p1(), None
703 703
704 704 l = revrange(repo, revs)
705 705
706 706 if not l:
707 707 first = second = None
708 708 elif l.isascending():
709 709 first = l.min()
710 710 second = l.max()
711 711 elif l.isdescending():
712 712 first = l.max()
713 713 second = l.min()
714 714 else:
715 715 first = l.first()
716 716 second = l.last()
717 717
718 718 if first is None:
719 719 raise util.Abort(_('empty revision range'))
720 720
721 721 # if top-level is range expression, the result must always be a pair
722 722 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
723 723 return repo.lookup(first), None
724 724
725 725 return repo.lookup(first), repo.lookup(second)
726 726
727 727 def revrange(repo, revs):
728 728 """Yield revision as strings from a list of revision specifications."""
729 729 allspecs = []
730 730 for spec in revs:
731 731 if isinstance(spec, int):
732 732 spec = revset.formatspec('rev(%d)', spec)
733 733 allspecs.append(spec)
734 734 m = revset.matchany(repo.ui, allspecs, repo)
735 735 return m(repo)
736 736
737 737 def expandpats(pats):
738 738 '''Expand bare globs when running on windows.
739 739 On posix we assume it already has already been done by sh.'''
740 740 if not util.expandglobs:
741 741 return list(pats)
742 742 ret = []
743 743 for kindpat in pats:
744 744 kind, pat = matchmod._patsplit(kindpat, None)
745 745 if kind is None:
746 746 try:
747 747 globbed = glob.glob(pat)
748 748 except re.error:
749 749 globbed = [pat]
750 750 if globbed:
751 751 ret.extend(globbed)
752 752 continue
753 753 ret.append(kindpat)
754 754 return ret
755 755
756 756 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
757 757 badfn=None):
758 758 '''Return a matcher and the patterns that were used.
759 759 The matcher will warn about bad matches, unless an alternate badfn callback
760 760 is provided.'''
761 761 if pats == ("",):
762 762 pats = []
763 763 if opts is None:
764 764 opts = {}
765 765 if not globbed and default == 'relpath':
766 766 pats = expandpats(pats or [])
767 767
768 768 def bad(f, msg):
769 769 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
770 770
771 771 if badfn is None:
772 772 badfn = bad
773 773
774 774 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
775 775 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
776 776
777 777 if m.always():
778 778 pats = []
779 779 return m, pats
780 780
781 781 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
782 782 badfn=None):
783 783 '''Return a matcher that will warn about bad matches.'''
784 784 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
785 785
786 786 def matchall(repo):
787 787 '''Return a matcher that will efficiently match everything.'''
788 788 return matchmod.always(repo.root, repo.getcwd())
789 789
790 790 def matchfiles(repo, files, badfn=None):
791 791 '''Return a matcher that will efficiently match exactly these files.'''
792 792 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
793 793
794 def addremove(repo, matcher, prefix, opts={}, dry_run=None, similarity=None):
794 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
795 if opts is None:
796 opts = {}
795 797 m = matcher
796 798 if dry_run is None:
797 799 dry_run = opts.get('dry_run')
798 800 if similarity is None:
799 801 similarity = float(opts.get('similarity') or 0)
800 802
801 803 ret = 0
802 804 join = lambda f: os.path.join(prefix, f)
803 805
804 806 def matchessubrepo(matcher, subpath):
805 807 if matcher.exact(subpath):
806 808 return True
807 809 for f in matcher.files():
808 810 if f.startswith(subpath):
809 811 return True
810 812 return False
811 813
812 814 wctx = repo[None]
813 815 for subpath in sorted(wctx.substate):
814 816 if opts.get('subrepos') or matchessubrepo(m, subpath):
815 817 sub = wctx.sub(subpath)
816 818 try:
817 819 submatch = matchmod.narrowmatcher(subpath, m)
818 820 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
819 821 ret = 1
820 822 except error.LookupError:
821 823 repo.ui.status(_("skipping missing subrepository: %s\n")
822 824 % join(subpath))
823 825
824 826 rejected = []
825 827 def badfn(f, msg):
826 828 if f in m.files():
827 829 m.bad(f, msg)
828 830 rejected.append(f)
829 831
830 832 badmatch = matchmod.badmatch(m, badfn)
831 833 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
832 834 badmatch)
833 835
834 836 unknownset = set(unknown + forgotten)
835 837 toprint = unknownset.copy()
836 838 toprint.update(deleted)
837 839 for abs in sorted(toprint):
838 840 if repo.ui.verbose or not m.exact(abs):
839 841 if abs in unknownset:
840 842 status = _('adding %s\n') % m.uipath(abs)
841 843 else:
842 844 status = _('removing %s\n') % m.uipath(abs)
843 845 repo.ui.status(status)
844 846
845 847 renames = _findrenames(repo, m, added + unknown, removed + deleted,
846 848 similarity)
847 849
848 850 if not dry_run:
849 851 _markchanges(repo, unknown + forgotten, deleted, renames)
850 852
851 853 for f in rejected:
852 854 if f in m.files():
853 855 return 1
854 856 return ret
855 857
856 858 def marktouched(repo, files, similarity=0.0):
857 859 '''Assert that files have somehow been operated upon. files are relative to
858 860 the repo root.'''
859 861 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
860 862 rejected = []
861 863
862 864 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
863 865
864 866 if repo.ui.verbose:
865 867 unknownset = set(unknown + forgotten)
866 868 toprint = unknownset.copy()
867 869 toprint.update(deleted)
868 870 for abs in sorted(toprint):
869 871 if abs in unknownset:
870 872 status = _('adding %s\n') % abs
871 873 else:
872 874 status = _('removing %s\n') % abs
873 875 repo.ui.status(status)
874 876
875 877 renames = _findrenames(repo, m, added + unknown, removed + deleted,
876 878 similarity)
877 879
878 880 _markchanges(repo, unknown + forgotten, deleted, renames)
879 881
880 882 for f in rejected:
881 883 if f in m.files():
882 884 return 1
883 885 return 0
884 886
885 887 def _interestingfiles(repo, matcher):
886 888 '''Walk dirstate with matcher, looking for files that addremove would care
887 889 about.
888 890
889 891 This is different from dirstate.status because it doesn't care about
890 892 whether files are modified or clean.'''
891 893 added, unknown, deleted, removed, forgotten = [], [], [], [], []
892 894 audit_path = pathutil.pathauditor(repo.root)
893 895
894 896 ctx = repo[None]
895 897 dirstate = repo.dirstate
896 898 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
897 899 full=False)
898 900 for abs, st in walkresults.iteritems():
899 901 dstate = dirstate[abs]
900 902 if dstate == '?' and audit_path.check(abs):
901 903 unknown.append(abs)
902 904 elif dstate != 'r' and not st:
903 905 deleted.append(abs)
904 906 elif dstate == 'r' and st:
905 907 forgotten.append(abs)
906 908 # for finding renames
907 909 elif dstate == 'r' and not st:
908 910 removed.append(abs)
909 911 elif dstate == 'a':
910 912 added.append(abs)
911 913
912 914 return added, unknown, deleted, removed, forgotten
913 915
914 916 def _findrenames(repo, matcher, added, removed, similarity):
915 917 '''Find renames from removed files to added ones.'''
916 918 renames = {}
917 919 if similarity > 0:
918 920 for old, new, score in similar.findrenames(repo, added, removed,
919 921 similarity):
920 922 if (repo.ui.verbose or not matcher.exact(old)
921 923 or not matcher.exact(new)):
922 924 repo.ui.status(_('recording removal of %s as rename to %s '
923 925 '(%d%% similar)\n') %
924 926 (matcher.rel(old), matcher.rel(new),
925 927 score * 100))
926 928 renames[new] = old
927 929 return renames
928 930
929 931 def _markchanges(repo, unknown, deleted, renames):
930 932 '''Marks the files in unknown as added, the files in deleted as removed,
931 933 and the files in renames as copied.'''
932 934 wctx = repo[None]
933 935 wlock = repo.wlock()
934 936 try:
935 937 wctx.forget(deleted)
936 938 wctx.add(unknown)
937 939 for new, old in renames.iteritems():
938 940 wctx.copy(old, new)
939 941 finally:
940 942 wlock.release()
941 943
942 944 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
943 945 """Update the dirstate to reflect the intent of copying src to dst. For
944 946 different reasons it might not end with dst being marked as copied from src.
945 947 """
946 948 origsrc = repo.dirstate.copied(src) or src
947 949 if dst == origsrc: # copying back a copy?
948 950 if repo.dirstate[dst] not in 'mn' and not dryrun:
949 951 repo.dirstate.normallookup(dst)
950 952 else:
951 953 if repo.dirstate[origsrc] == 'a' and origsrc == src:
952 954 if not ui.quiet:
953 955 ui.warn(_("%s has not been committed yet, so no copy "
954 956 "data will be stored for %s.\n")
955 957 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
956 958 if repo.dirstate[dst] in '?r' and not dryrun:
957 959 wctx.add([dst])
958 960 elif not dryrun:
959 961 wctx.copy(origsrc, dst)
960 962
961 963 def readrequires(opener, supported):
962 964 '''Reads and parses .hg/requires and checks if all entries found
963 965 are in the list of supported features.'''
964 966 requirements = set(opener.read("requires").splitlines())
965 967 missings = []
966 968 for r in requirements:
967 969 if r not in supported:
968 970 if not r or not r[0].isalnum():
969 971 raise error.RequirementError(_(".hg/requires file is corrupt"))
970 972 missings.append(r)
971 973 missings.sort()
972 974 if missings:
973 975 raise error.RequirementError(
974 976 _("repository requires features unknown to this Mercurial: %s")
975 977 % " ".join(missings),
976 978 hint=_("see http://mercurial.selenic.com/wiki/MissingRequirement"
977 979 " for more information"))
978 980 return requirements
979 981
980 982 def writerequires(opener, requirements):
981 983 reqfile = opener("requires", "w")
982 984 for r in sorted(requirements):
983 985 reqfile.write("%s\n" % r)
984 986 reqfile.close()
985 987
986 988 class filecachesubentry(object):
987 989 def __init__(self, path, stat):
988 990 self.path = path
989 991 self.cachestat = None
990 992 self._cacheable = None
991 993
992 994 if stat:
993 995 self.cachestat = filecachesubentry.stat(self.path)
994 996
995 997 if self.cachestat:
996 998 self._cacheable = self.cachestat.cacheable()
997 999 else:
998 1000 # None means we don't know yet
999 1001 self._cacheable = None
1000 1002
1001 1003 def refresh(self):
1002 1004 if self.cacheable():
1003 1005 self.cachestat = filecachesubentry.stat(self.path)
1004 1006
1005 1007 def cacheable(self):
1006 1008 if self._cacheable is not None:
1007 1009 return self._cacheable
1008 1010
1009 1011 # we don't know yet, assume it is for now
1010 1012 return True
1011 1013
1012 1014 def changed(self):
1013 1015 # no point in going further if we can't cache it
1014 1016 if not self.cacheable():
1015 1017 return True
1016 1018
1017 1019 newstat = filecachesubentry.stat(self.path)
1018 1020
1019 1021 # we may not know if it's cacheable yet, check again now
1020 1022 if newstat and self._cacheable is None:
1021 1023 self._cacheable = newstat.cacheable()
1022 1024
1023 1025 # check again
1024 1026 if not self._cacheable:
1025 1027 return True
1026 1028
1027 1029 if self.cachestat != newstat:
1028 1030 self.cachestat = newstat
1029 1031 return True
1030 1032 else:
1031 1033 return False
1032 1034
1033 1035 @staticmethod
1034 1036 def stat(path):
1035 1037 try:
1036 1038 return util.cachestat(path)
1037 1039 except OSError as e:
1038 1040 if e.errno != errno.ENOENT:
1039 1041 raise
1040 1042
1041 1043 class filecacheentry(object):
1042 1044 def __init__(self, paths, stat=True):
1043 1045 self._entries = []
1044 1046 for path in paths:
1045 1047 self._entries.append(filecachesubentry(path, stat))
1046 1048
1047 1049 def changed(self):
1048 1050 '''true if any entry has changed'''
1049 1051 for entry in self._entries:
1050 1052 if entry.changed():
1051 1053 return True
1052 1054 return False
1053 1055
1054 1056 def refresh(self):
1055 1057 for entry in self._entries:
1056 1058 entry.refresh()
1057 1059
1058 1060 class filecache(object):
1059 1061 '''A property like decorator that tracks files under .hg/ for updates.
1060 1062
1061 1063 Records stat info when called in _filecache.
1062 1064
1063 1065 On subsequent calls, compares old stat info with new info, and recreates the
1064 1066 object when any of the files changes, updating the new stat info in
1065 1067 _filecache.
1066 1068
1067 1069 Mercurial either atomic renames or appends for files under .hg,
1068 1070 so to ensure the cache is reliable we need the filesystem to be able
1069 1071 to tell us if a file has been replaced. If it can't, we fallback to
1070 1072 recreating the object on every call (essentially the same behavior as
1071 1073 propertycache).
1072 1074
1073 1075 '''
1074 1076 def __init__(self, *paths):
1075 1077 self.paths = paths
1076 1078
1077 1079 def join(self, obj, fname):
1078 1080 """Used to compute the runtime path of a cached file.
1079 1081
1080 1082 Users should subclass filecache and provide their own version of this
1081 1083 function to call the appropriate join function on 'obj' (an instance
1082 1084 of the class that its member function was decorated).
1083 1085 """
1084 1086 return obj.join(fname)
1085 1087
1086 1088 def __call__(self, func):
1087 1089 self.func = func
1088 1090 self.name = func.__name__
1089 1091 return self
1090 1092
1091 1093 def __get__(self, obj, type=None):
1092 1094 # do we need to check if the file changed?
1093 1095 if self.name in obj.__dict__:
1094 1096 assert self.name in obj._filecache, self.name
1095 1097 return obj.__dict__[self.name]
1096 1098
1097 1099 entry = obj._filecache.get(self.name)
1098 1100
1099 1101 if entry:
1100 1102 if entry.changed():
1101 1103 entry.obj = self.func(obj)
1102 1104 else:
1103 1105 paths = [self.join(obj, path) for path in self.paths]
1104 1106
1105 1107 # We stat -before- creating the object so our cache doesn't lie if
1106 1108 # a writer modified between the time we read and stat
1107 1109 entry = filecacheentry(paths, True)
1108 1110 entry.obj = self.func(obj)
1109 1111
1110 1112 obj._filecache[self.name] = entry
1111 1113
1112 1114 obj.__dict__[self.name] = entry.obj
1113 1115 return entry.obj
1114 1116
1115 1117 def __set__(self, obj, value):
1116 1118 if self.name not in obj._filecache:
1117 1119 # we add an entry for the missing value because X in __dict__
1118 1120 # implies X in _filecache
1119 1121 paths = [self.join(obj, path) for path in self.paths]
1120 1122 ce = filecacheentry(paths, False)
1121 1123 obj._filecache[self.name] = ce
1122 1124 else:
1123 1125 ce = obj._filecache[self.name]
1124 1126
1125 1127 ce.obj = value # update cached copy
1126 1128 obj.__dict__[self.name] = value # update copy returned by obj.x
1127 1129
1128 1130 def __delete__(self, obj):
1129 1131 try:
1130 1132 del obj.__dict__[self.name]
1131 1133 except KeyError:
1132 1134 raise AttributeError(self.name)
General Comments 0
You need to be logged in to leave comments. Login now