##// END OF EJS Templates
revpair: drop useless conditional...
Pierre-Yves David -
r20819:202291a2 default
parent child Browse files
Show More
@@ -1,934 +1,932 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from mercurial.node import nullrev
10 10 import util, error, osutil, revset, similar, encoding, phases, parsers
11 11 import pathutil
12 12 import match as matchmod
13 13 import os, errno, re, glob
14 14
15 15 if os.name == 'nt':
16 16 import scmwindows as scmplatform
17 17 else:
18 18 import scmposix as scmplatform
19 19
20 20 systemrcpath = scmplatform.systemrcpath
21 21 userrcpath = scmplatform.userrcpath
22 22
23 23 def itersubrepos(ctx1, ctx2):
24 24 """find subrepos in ctx1 or ctx2"""
25 25 # Create a (subpath, ctx) mapping where we prefer subpaths from
26 26 # ctx1. The subpaths from ctx2 are important when the .hgsub file
27 27 # has been modified (in ctx2) but not yet committed (in ctx1).
28 28 subpaths = dict.fromkeys(ctx2.substate, ctx2)
29 29 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
30 30 for subpath, ctx in sorted(subpaths.iteritems()):
31 31 yield subpath, ctx.sub(subpath)
32 32
33 33 def nochangesfound(ui, repo, excluded=None):
34 34 '''Report no changes for push/pull, excluded is None or a list of
35 35 nodes excluded from the push/pull.
36 36 '''
37 37 secretlist = []
38 38 if excluded:
39 39 for n in excluded:
40 40 if n not in repo:
41 41 # discovery should not have included the filtered revision,
42 42 # we have to explicitly exclude it until discovery is cleanup.
43 43 continue
44 44 ctx = repo[n]
45 45 if ctx.phase() >= phases.secret and not ctx.extinct():
46 46 secretlist.append(n)
47 47
48 48 if secretlist:
49 49 ui.status(_("no changes found (ignored %d secret changesets)\n")
50 50 % len(secretlist))
51 51 else:
52 52 ui.status(_("no changes found\n"))
53 53
54 54 def checknewlabel(repo, lbl, kind):
55 55 # Do not use the "kind" parameter in ui output.
56 56 # It makes strings difficult to translate.
57 57 if lbl in ['tip', '.', 'null']:
58 58 raise util.Abort(_("the name '%s' is reserved") % lbl)
59 59 for c in (':', '\0', '\n', '\r'):
60 60 if c in lbl:
61 61 raise util.Abort(_("%r cannot be used in a name") % c)
62 62 try:
63 63 int(lbl)
64 64 raise util.Abort(_("cannot use an integer as a name"))
65 65 except ValueError:
66 66 pass
67 67
68 68 def checkfilename(f):
69 69 '''Check that the filename f is an acceptable filename for a tracked file'''
70 70 if '\r' in f or '\n' in f:
71 71 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
72 72
73 73 def checkportable(ui, f):
74 74 '''Check if filename f is portable and warn or abort depending on config'''
75 75 checkfilename(f)
76 76 abort, warn = checkportabilityalert(ui)
77 77 if abort or warn:
78 78 msg = util.checkwinfilename(f)
79 79 if msg:
80 80 msg = "%s: %r" % (msg, f)
81 81 if abort:
82 82 raise util.Abort(msg)
83 83 ui.warn(_("warning: %s\n") % msg)
84 84
85 85 def checkportabilityalert(ui):
86 86 '''check if the user's config requests nothing, a warning, or abort for
87 87 non-portable filenames'''
88 88 val = ui.config('ui', 'portablefilenames', 'warn')
89 89 lval = val.lower()
90 90 bval = util.parsebool(val)
91 91 abort = os.name == 'nt' or lval == 'abort'
92 92 warn = bval or lval == 'warn'
93 93 if bval is None and not (warn or abort or lval == 'ignore'):
94 94 raise error.ConfigError(
95 95 _("ui.portablefilenames value is invalid ('%s')") % val)
96 96 return abort, warn
97 97
98 98 class casecollisionauditor(object):
99 99 def __init__(self, ui, abort, dirstate):
100 100 self._ui = ui
101 101 self._abort = abort
102 102 allfiles = '\0'.join(dirstate._map)
103 103 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
104 104 self._dirstate = dirstate
105 105 # The purpose of _newfiles is so that we don't complain about
106 106 # case collisions if someone were to call this object with the
107 107 # same filename twice.
108 108 self._newfiles = set()
109 109
110 110 def __call__(self, f):
111 111 if f in self._newfiles:
112 112 return
113 113 fl = encoding.lower(f)
114 114 if fl in self._loweredfiles and f not in self._dirstate:
115 115 msg = _('possible case-folding collision for %s') % f
116 116 if self._abort:
117 117 raise util.Abort(msg)
118 118 self._ui.warn(_("warning: %s\n") % msg)
119 119 self._loweredfiles.add(fl)
120 120 self._newfiles.add(f)
121 121
122 122 class abstractvfs(object):
123 123 """Abstract base class; cannot be instantiated"""
124 124
125 125 def __init__(self, *args, **kwargs):
126 126 '''Prevent instantiation; don't call this from subclasses.'''
127 127 raise NotImplementedError('attempted instantiating ' + str(type(self)))
128 128
129 129 def tryread(self, path):
130 130 '''gracefully return an empty string for missing files'''
131 131 try:
132 132 return self.read(path)
133 133 except IOError, inst:
134 134 if inst.errno != errno.ENOENT:
135 135 raise
136 136 return ""
137 137
138 138 def open(self, path, mode="r", text=False, atomictemp=False):
139 139 self.open = self.__call__
140 140 return self.__call__(path, mode, text, atomictemp)
141 141
142 142 def read(self, path):
143 143 fp = self(path, 'rb')
144 144 try:
145 145 return fp.read()
146 146 finally:
147 147 fp.close()
148 148
149 149 def write(self, path, data):
150 150 fp = self(path, 'wb')
151 151 try:
152 152 return fp.write(data)
153 153 finally:
154 154 fp.close()
155 155
156 156 def append(self, path, data):
157 157 fp = self(path, 'ab')
158 158 try:
159 159 return fp.write(data)
160 160 finally:
161 161 fp.close()
162 162
163 163 def chmod(self, path, mode):
164 164 return os.chmod(self.join(path), mode)
165 165
166 166 def exists(self, path=None):
167 167 return os.path.exists(self.join(path))
168 168
169 169 def fstat(self, fp):
170 170 return util.fstat(fp)
171 171
172 172 def isdir(self, path=None):
173 173 return os.path.isdir(self.join(path))
174 174
175 175 def isfile(self, path=None):
176 176 return os.path.isfile(self.join(path))
177 177
178 178 def islink(self, path=None):
179 179 return os.path.islink(self.join(path))
180 180
181 181 def lstat(self, path=None):
182 182 return os.lstat(self.join(path))
183 183
184 184 def makedir(self, path=None, notindexed=True):
185 185 return util.makedir(self.join(path), notindexed)
186 186
187 187 def makedirs(self, path=None, mode=None):
188 188 return util.makedirs(self.join(path), mode)
189 189
190 190 def makelock(self, info, path):
191 191 return util.makelock(info, self.join(path))
192 192
193 193 def mkdir(self, path=None):
194 194 return os.mkdir(self.join(path))
195 195
196 196 def readdir(self, path=None, stat=None, skip=None):
197 197 return osutil.listdir(self.join(path), stat, skip)
198 198
199 199 def readlock(self, path):
200 200 return util.readlock(self.join(path))
201 201
202 202 def rename(self, src, dst):
203 203 return util.rename(self.join(src), self.join(dst))
204 204
205 205 def readlink(self, path):
206 206 return os.readlink(self.join(path))
207 207
208 208 def setflags(self, path, l, x):
209 209 return util.setflags(self.join(path), l, x)
210 210
211 211 def stat(self, path=None):
212 212 return os.stat(self.join(path))
213 213
214 214 def unlink(self, path=None):
215 215 return util.unlink(self.join(path))
216 216
217 217 def utime(self, path=None, t=None):
218 218 return os.utime(self.join(path), t)
219 219
220 220 class vfs(abstractvfs):
221 221 '''Operate files relative to a base directory
222 222
223 223 This class is used to hide the details of COW semantics and
224 224 remote file access from higher level code.
225 225 '''
226 226 def __init__(self, base, audit=True, expandpath=False, realpath=False):
227 227 if expandpath:
228 228 base = util.expandpath(base)
229 229 if realpath:
230 230 base = os.path.realpath(base)
231 231 self.base = base
232 232 self._setmustaudit(audit)
233 233 self.createmode = None
234 234 self._trustnlink = None
235 235
236 236 def _getmustaudit(self):
237 237 return self._audit
238 238
239 239 def _setmustaudit(self, onoff):
240 240 self._audit = onoff
241 241 if onoff:
242 242 self.audit = pathutil.pathauditor(self.base)
243 243 else:
244 244 self.audit = util.always
245 245
246 246 mustaudit = property(_getmustaudit, _setmustaudit)
247 247
248 248 @util.propertycache
249 249 def _cansymlink(self):
250 250 return util.checklink(self.base)
251 251
252 252 @util.propertycache
253 253 def _chmod(self):
254 254 return util.checkexec(self.base)
255 255
256 256 def _fixfilemode(self, name):
257 257 if self.createmode is None or not self._chmod:
258 258 return
259 259 os.chmod(name, self.createmode & 0666)
260 260
261 261 def __call__(self, path, mode="r", text=False, atomictemp=False):
262 262 if self._audit:
263 263 r = util.checkosfilename(path)
264 264 if r:
265 265 raise util.Abort("%s: %r" % (r, path))
266 266 self.audit(path)
267 267 f = self.join(path)
268 268
269 269 if not text and "b" not in mode:
270 270 mode += "b" # for that other OS
271 271
272 272 nlink = -1
273 273 if mode not in ('r', 'rb'):
274 274 dirname, basename = util.split(f)
275 275 # If basename is empty, then the path is malformed because it points
276 276 # to a directory. Let the posixfile() call below raise IOError.
277 277 if basename:
278 278 if atomictemp:
279 279 util.ensuredirs(dirname, self.createmode)
280 280 return util.atomictempfile(f, mode, self.createmode)
281 281 try:
282 282 if 'w' in mode:
283 283 util.unlink(f)
284 284 nlink = 0
285 285 else:
286 286 # nlinks() may behave differently for files on Windows
287 287 # shares if the file is open.
288 288 fd = util.posixfile(f)
289 289 nlink = util.nlinks(f)
290 290 if nlink < 1:
291 291 nlink = 2 # force mktempcopy (issue1922)
292 292 fd.close()
293 293 except (OSError, IOError), e:
294 294 if e.errno != errno.ENOENT:
295 295 raise
296 296 nlink = 0
297 297 util.ensuredirs(dirname, self.createmode)
298 298 if nlink > 0:
299 299 if self._trustnlink is None:
300 300 self._trustnlink = nlink > 1 or util.checknlink(f)
301 301 if nlink > 1 or not self._trustnlink:
302 302 util.rename(util.mktempcopy(f), f)
303 303 fp = util.posixfile(f, mode)
304 304 if nlink == 0:
305 305 self._fixfilemode(f)
306 306 return fp
307 307
308 308 def symlink(self, src, dst):
309 309 self.audit(dst)
310 310 linkname = self.join(dst)
311 311 try:
312 312 os.unlink(linkname)
313 313 except OSError:
314 314 pass
315 315
316 316 util.ensuredirs(os.path.dirname(linkname), self.createmode)
317 317
318 318 if self._cansymlink:
319 319 try:
320 320 os.symlink(src, linkname)
321 321 except OSError, err:
322 322 raise OSError(err.errno, _('could not symlink to %r: %s') %
323 323 (src, err.strerror), linkname)
324 324 else:
325 325 self.write(dst, src)
326 326
327 327 def join(self, path):
328 328 if path:
329 329 return os.path.join(self.base, path)
330 330 else:
331 331 return self.base
332 332
333 333 opener = vfs
334 334
335 335 class auditvfs(object):
336 336 def __init__(self, vfs):
337 337 self.vfs = vfs
338 338
339 339 def _getmustaudit(self):
340 340 return self.vfs.mustaudit
341 341
342 342 def _setmustaudit(self, onoff):
343 343 self.vfs.mustaudit = onoff
344 344
345 345 mustaudit = property(_getmustaudit, _setmustaudit)
346 346
347 347 class filtervfs(abstractvfs, auditvfs):
348 348 '''Wrapper vfs for filtering filenames with a function.'''
349 349
350 350 def __init__(self, vfs, filter):
351 351 auditvfs.__init__(self, vfs)
352 352 self._filter = filter
353 353
354 354 def __call__(self, path, *args, **kwargs):
355 355 return self.vfs(self._filter(path), *args, **kwargs)
356 356
357 357 def join(self, path):
358 358 if path:
359 359 return self.vfs.join(self._filter(path))
360 360 else:
361 361 return self.vfs.join(path)
362 362
363 363 filteropener = filtervfs
364 364
365 365 class readonlyvfs(abstractvfs, auditvfs):
366 366 '''Wrapper vfs preventing any writing.'''
367 367
368 368 def __init__(self, vfs):
369 369 auditvfs.__init__(self, vfs)
370 370
371 371 def __call__(self, path, mode='r', *args, **kw):
372 372 if mode not in ('r', 'rb'):
373 373 raise util.Abort('this vfs is read only')
374 374 return self.vfs(path, mode, *args, **kw)
375 375
376 376
377 377 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
378 378 '''yield every hg repository under path, always recursively.
379 379 The recurse flag will only control recursion into repo working dirs'''
380 380 def errhandler(err):
381 381 if err.filename == path:
382 382 raise err
383 383 samestat = getattr(os.path, 'samestat', None)
384 384 if followsym and samestat is not None:
385 385 def adddir(dirlst, dirname):
386 386 match = False
387 387 dirstat = os.stat(dirname)
388 388 for lstdirstat in dirlst:
389 389 if samestat(dirstat, lstdirstat):
390 390 match = True
391 391 break
392 392 if not match:
393 393 dirlst.append(dirstat)
394 394 return not match
395 395 else:
396 396 followsym = False
397 397
398 398 if (seen_dirs is None) and followsym:
399 399 seen_dirs = []
400 400 adddir(seen_dirs, path)
401 401 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
402 402 dirs.sort()
403 403 if '.hg' in dirs:
404 404 yield root # found a repository
405 405 qroot = os.path.join(root, '.hg', 'patches')
406 406 if os.path.isdir(os.path.join(qroot, '.hg')):
407 407 yield qroot # we have a patch queue repo here
408 408 if recurse:
409 409 # avoid recursing inside the .hg directory
410 410 dirs.remove('.hg')
411 411 else:
412 412 dirs[:] = [] # don't descend further
413 413 elif followsym:
414 414 newdirs = []
415 415 for d in dirs:
416 416 fname = os.path.join(root, d)
417 417 if adddir(seen_dirs, fname):
418 418 if os.path.islink(fname):
419 419 for hgname in walkrepos(fname, True, seen_dirs):
420 420 yield hgname
421 421 else:
422 422 newdirs.append(d)
423 423 dirs[:] = newdirs
424 424
425 425 def osrcpath():
426 426 '''return default os-specific hgrc search path'''
427 427 path = systemrcpath()
428 428 path.extend(userrcpath())
429 429 path = [os.path.normpath(f) for f in path]
430 430 return path
431 431
432 432 _rcpath = None
433 433
434 434 def rcpath():
435 435 '''return hgrc search path. if env var HGRCPATH is set, use it.
436 436 for each item in path, if directory, use files ending in .rc,
437 437 else use item.
438 438 make HGRCPATH empty to only look in .hg/hgrc of current repo.
439 439 if no HGRCPATH, use default os-specific path.'''
440 440 global _rcpath
441 441 if _rcpath is None:
442 442 if 'HGRCPATH' in os.environ:
443 443 _rcpath = []
444 444 for p in os.environ['HGRCPATH'].split(os.pathsep):
445 445 if not p:
446 446 continue
447 447 p = util.expandpath(p)
448 448 if os.path.isdir(p):
449 449 for f, kind in osutil.listdir(p):
450 450 if f.endswith('.rc'):
451 451 _rcpath.append(os.path.join(p, f))
452 452 else:
453 453 _rcpath.append(p)
454 454 else:
455 455 _rcpath = osrcpath()
456 456 return _rcpath
457 457
458 458 def revsingle(repo, revspec, default='.'):
459 459 if not revspec and revspec != 0:
460 460 return repo[default]
461 461
462 462 l = revrange(repo, [revspec])
463 463 if len(l) < 1:
464 464 raise util.Abort(_('empty revision set'))
465 465 return repo[l[-1]]
466 466
467 467 def revpair(repo, revs):
468 468 if not revs:
469 469 return repo.dirstate.p1(), None
470 470
471 471 l = revrange(repo, revs)
472 472
473 473 if len(l) == 0:
474 if revs:
475 raise util.Abort(_('empty revision range'))
476 return repo.dirstate.p1(), None
474 raise util.Abort(_('empty revision range'))
477 475
478 476 if len(l) == 1 and len(revs) == 1 and _revrangesep not in revs[0]:
479 477 return repo.lookup(l[0]), None
480 478
481 479 return repo.lookup(l[0]), repo.lookup(l[-1])
482 480
483 481 _revrangesep = ':'
484 482
485 483 def revrange(repo, revs):
486 484 """Yield revision as strings from a list of revision specifications."""
487 485
488 486 def revfix(repo, val, defval):
489 487 if not val and val != 0 and defval is not None:
490 488 return defval
491 489 return repo[val].rev()
492 490
493 491 seen, l = set(), revset.baseset([])
494 492 for spec in revs:
495 493 if l and not seen:
496 494 seen = set(l)
497 495 # attempt to parse old-style ranges first to deal with
498 496 # things like old-tag which contain query metacharacters
499 497 try:
500 498 if isinstance(spec, int):
501 499 seen.add(spec)
502 500 l = l + revset.baseset([spec])
503 501 continue
504 502
505 503 if _revrangesep in spec:
506 504 start, end = spec.split(_revrangesep, 1)
507 505 start = revfix(repo, start, 0)
508 506 end = revfix(repo, end, len(repo) - 1)
509 507 if end == nullrev and start < 0:
510 508 start = nullrev
511 509 rangeiter = repo.changelog.revs(start, end)
512 510 if not seen and not l:
513 511 # by far the most common case: revs = ["-1:0"]
514 512 l = revset.baseset(rangeiter)
515 513 # defer syncing seen until next iteration
516 514 continue
517 515 newrevs = set(rangeiter)
518 516 if seen:
519 517 newrevs.difference_update(seen)
520 518 seen.update(newrevs)
521 519 else:
522 520 seen = newrevs
523 521 l = l + revset.baseset(sorted(newrevs, reverse=start > end))
524 522 continue
525 523 elif spec and spec in repo: # single unquoted rev
526 524 rev = revfix(repo, spec, None)
527 525 if rev in seen:
528 526 continue
529 527 seen.add(rev)
530 528 l = l + revset.baseset([rev])
531 529 continue
532 530 except error.RepoLookupError:
533 531 pass
534 532
535 533 # fall through to new-style queries if old-style fails
536 534 m = revset.match(repo.ui, spec, repo)
537 535 if seen or l:
538 536 dl = [r for r in m(repo, revset.spanset(repo)) if r not in seen]
539 537 l = l + revset.baseset(dl)
540 538 seen.update(dl)
541 539 else:
542 540 l = m(repo, revset.spanset(repo))
543 541
544 542 return l
545 543
546 544 def expandpats(pats):
547 545 if not util.expandglobs:
548 546 return list(pats)
549 547 ret = []
550 548 for p in pats:
551 549 kind, name = matchmod._patsplit(p, None)
552 550 if kind is None:
553 551 try:
554 552 globbed = glob.glob(name)
555 553 except re.error:
556 554 globbed = [name]
557 555 if globbed:
558 556 ret.extend(globbed)
559 557 continue
560 558 ret.append(p)
561 559 return ret
562 560
563 561 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
564 562 if pats == ("",):
565 563 pats = []
566 564 if not globbed and default == 'relpath':
567 565 pats = expandpats(pats or [])
568 566
569 567 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
570 568 default)
571 569 def badfn(f, msg):
572 570 ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
573 571 m.bad = badfn
574 572 return m, pats
575 573
576 574 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
577 575 return matchandpats(ctx, pats, opts, globbed, default)[0]
578 576
579 577 def matchall(repo):
580 578 return matchmod.always(repo.root, repo.getcwd())
581 579
582 580 def matchfiles(repo, files):
583 581 return matchmod.exact(repo.root, repo.getcwd(), files)
584 582
585 583 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
586 584 if dry_run is None:
587 585 dry_run = opts.get('dry_run')
588 586 if similarity is None:
589 587 similarity = float(opts.get('similarity') or 0)
590 588 # we'd use status here, except handling of symlinks and ignore is tricky
591 589 m = match(repo[None], pats, opts)
592 590 rejected = []
593 591 m.bad = lambda x, y: rejected.append(x)
594 592
595 593 added, unknown, deleted, removed = _interestingfiles(repo, m)
596 594
597 595 unknownset = set(unknown)
598 596 toprint = unknownset.copy()
599 597 toprint.update(deleted)
600 598 for abs in sorted(toprint):
601 599 if repo.ui.verbose or not m.exact(abs):
602 600 rel = m.rel(abs)
603 601 if abs in unknownset:
604 602 status = _('adding %s\n') % ((pats and rel) or abs)
605 603 else:
606 604 status = _('removing %s\n') % ((pats and rel) or abs)
607 605 repo.ui.status(status)
608 606
609 607 renames = _findrenames(repo, m, added + unknown, removed + deleted,
610 608 similarity)
611 609
612 610 if not dry_run:
613 611 _markchanges(repo, unknown, deleted, renames)
614 612
615 613 for f in rejected:
616 614 if f in m.files():
617 615 return 1
618 616 return 0
619 617
620 618 def marktouched(repo, files, similarity=0.0):
621 619 '''Assert that files have somehow been operated upon. files are relative to
622 620 the repo root.'''
623 621 m = matchfiles(repo, files)
624 622 rejected = []
625 623 m.bad = lambda x, y: rejected.append(x)
626 624
627 625 added, unknown, deleted, removed = _interestingfiles(repo, m)
628 626
629 627 if repo.ui.verbose:
630 628 unknownset = set(unknown)
631 629 toprint = unknownset.copy()
632 630 toprint.update(deleted)
633 631 for abs in sorted(toprint):
634 632 if abs in unknownset:
635 633 status = _('adding %s\n') % abs
636 634 else:
637 635 status = _('removing %s\n') % abs
638 636 repo.ui.status(status)
639 637
640 638 renames = _findrenames(repo, m, added + unknown, removed + deleted,
641 639 similarity)
642 640
643 641 _markchanges(repo, unknown, deleted, renames)
644 642
645 643 for f in rejected:
646 644 if f in m.files():
647 645 return 1
648 646 return 0
649 647
650 648 def _interestingfiles(repo, matcher):
651 649 '''Walk dirstate with matcher, looking for files that addremove would care
652 650 about.
653 651
654 652 This is different from dirstate.status because it doesn't care about
655 653 whether files are modified or clean.'''
656 654 added, unknown, deleted, removed = [], [], [], []
657 655 audit_path = pathutil.pathauditor(repo.root)
658 656
659 657 ctx = repo[None]
660 658 dirstate = repo.dirstate
661 659 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
662 660 full=False)
663 661 for abs, st in walkresults.iteritems():
664 662 dstate = dirstate[abs]
665 663 if dstate == '?' and audit_path.check(abs):
666 664 unknown.append(abs)
667 665 elif dstate != 'r' and not st:
668 666 deleted.append(abs)
669 667 # for finding renames
670 668 elif dstate == 'r':
671 669 removed.append(abs)
672 670 elif dstate == 'a':
673 671 added.append(abs)
674 672
675 673 return added, unknown, deleted, removed
676 674
677 675 def _findrenames(repo, matcher, added, removed, similarity):
678 676 '''Find renames from removed files to added ones.'''
679 677 renames = {}
680 678 if similarity > 0:
681 679 for old, new, score in similar.findrenames(repo, added, removed,
682 680 similarity):
683 681 if (repo.ui.verbose or not matcher.exact(old)
684 682 or not matcher.exact(new)):
685 683 repo.ui.status(_('recording removal of %s as rename to %s '
686 684 '(%d%% similar)\n') %
687 685 (matcher.rel(old), matcher.rel(new),
688 686 score * 100))
689 687 renames[new] = old
690 688 return renames
691 689
692 690 def _markchanges(repo, unknown, deleted, renames):
693 691 '''Marks the files in unknown as added, the files in deleted as removed,
694 692 and the files in renames as copied.'''
695 693 wctx = repo[None]
696 694 wlock = repo.wlock()
697 695 try:
698 696 wctx.forget(deleted)
699 697 wctx.add(unknown)
700 698 for new, old in renames.iteritems():
701 699 wctx.copy(old, new)
702 700 finally:
703 701 wlock.release()
704 702
705 703 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
706 704 """Update the dirstate to reflect the intent of copying src to dst. For
707 705 different reasons it might not end with dst being marked as copied from src.
708 706 """
709 707 origsrc = repo.dirstate.copied(src) or src
710 708 if dst == origsrc: # copying back a copy?
711 709 if repo.dirstate[dst] not in 'mn' and not dryrun:
712 710 repo.dirstate.normallookup(dst)
713 711 else:
714 712 if repo.dirstate[origsrc] == 'a' and origsrc == src:
715 713 if not ui.quiet:
716 714 ui.warn(_("%s has not been committed yet, so no copy "
717 715 "data will be stored for %s.\n")
718 716 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
719 717 if repo.dirstate[dst] in '?r' and not dryrun:
720 718 wctx.add([dst])
721 719 elif not dryrun:
722 720 wctx.copy(origsrc, dst)
723 721
724 722 def readrequires(opener, supported):
725 723 '''Reads and parses .hg/requires and checks if all entries found
726 724 are in the list of supported features.'''
727 725 requirements = set(opener.read("requires").splitlines())
728 726 missings = []
729 727 for r in requirements:
730 728 if r not in supported:
731 729 if not r or not r[0].isalnum():
732 730 raise error.RequirementError(_(".hg/requires file is corrupt"))
733 731 missings.append(r)
734 732 missings.sort()
735 733 if missings:
736 734 raise error.RequirementError(
737 735 _("unknown repository format: requires features '%s' (upgrade "
738 736 "Mercurial)") % "', '".join(missings),
739 737 hint=_("see http://mercurial.selenic.com/wiki/MissingRequirement"
740 738 " for details"))
741 739 return requirements
742 740
743 741 class filecachesubentry(object):
744 742 def __init__(self, path, stat):
745 743 self.path = path
746 744 self.cachestat = None
747 745 self._cacheable = None
748 746
749 747 if stat:
750 748 self.cachestat = filecachesubentry.stat(self.path)
751 749
752 750 if self.cachestat:
753 751 self._cacheable = self.cachestat.cacheable()
754 752 else:
755 753 # None means we don't know yet
756 754 self._cacheable = None
757 755
758 756 def refresh(self):
759 757 if self.cacheable():
760 758 self.cachestat = filecachesubentry.stat(self.path)
761 759
762 760 def cacheable(self):
763 761 if self._cacheable is not None:
764 762 return self._cacheable
765 763
766 764 # we don't know yet, assume it is for now
767 765 return True
768 766
769 767 def changed(self):
770 768 # no point in going further if we can't cache it
771 769 if not self.cacheable():
772 770 return True
773 771
774 772 newstat = filecachesubentry.stat(self.path)
775 773
776 774 # we may not know if it's cacheable yet, check again now
777 775 if newstat and self._cacheable is None:
778 776 self._cacheable = newstat.cacheable()
779 777
780 778 # check again
781 779 if not self._cacheable:
782 780 return True
783 781
784 782 if self.cachestat != newstat:
785 783 self.cachestat = newstat
786 784 return True
787 785 else:
788 786 return False
789 787
790 788 @staticmethod
791 789 def stat(path):
792 790 try:
793 791 return util.cachestat(path)
794 792 except OSError, e:
795 793 if e.errno != errno.ENOENT:
796 794 raise
797 795
798 796 class filecacheentry(object):
799 797 def __init__(self, paths, stat=True):
800 798 self._entries = []
801 799 for path in paths:
802 800 self._entries.append(filecachesubentry(path, stat))
803 801
804 802 def changed(self):
805 803 '''true if any entry has changed'''
806 804 for entry in self._entries:
807 805 if entry.changed():
808 806 return True
809 807 return False
810 808
811 809 def refresh(self):
812 810 for entry in self._entries:
813 811 entry.refresh()
814 812
815 813 class filecache(object):
816 814 '''A property like decorator that tracks files under .hg/ for updates.
817 815
818 816 Records stat info when called in _filecache.
819 817
820 818 On subsequent calls, compares old stat info with new info, and recreates the
821 819 object when any of the files changes, updating the new stat info in
822 820 _filecache.
823 821
824 822 Mercurial either atomic renames or appends for files under .hg,
825 823 so to ensure the cache is reliable we need the filesystem to be able
826 824 to tell us if a file has been replaced. If it can't, we fallback to
827 825 recreating the object on every call (essentially the same behaviour as
828 826 propertycache).
829 827
830 828 '''
831 829 def __init__(self, *paths):
832 830 self.paths = paths
833 831
834 832 def join(self, obj, fname):
835 833 """Used to compute the runtime path of a cached file.
836 834
837 835 Users should subclass filecache and provide their own version of this
838 836 function to call the appropriate join function on 'obj' (an instance
839 837 of the class that its member function was decorated).
840 838 """
841 839 return obj.join(fname)
842 840
843 841 def __call__(self, func):
844 842 self.func = func
845 843 self.name = func.__name__
846 844 return self
847 845
848 846 def __get__(self, obj, type=None):
849 847 # do we need to check if the file changed?
850 848 if self.name in obj.__dict__:
851 849 assert self.name in obj._filecache, self.name
852 850 return obj.__dict__[self.name]
853 851
854 852 entry = obj._filecache.get(self.name)
855 853
856 854 if entry:
857 855 if entry.changed():
858 856 entry.obj = self.func(obj)
859 857 else:
860 858 paths = [self.join(obj, path) for path in self.paths]
861 859
862 860 # We stat -before- creating the object so our cache doesn't lie if
863 861 # a writer modified between the time we read and stat
864 862 entry = filecacheentry(paths, True)
865 863 entry.obj = self.func(obj)
866 864
867 865 obj._filecache[self.name] = entry
868 866
869 867 obj.__dict__[self.name] = entry.obj
870 868 return entry.obj
871 869
872 870 def __set__(self, obj, value):
873 871 if self.name not in obj._filecache:
874 872 # we add an entry for the missing value because X in __dict__
875 873 # implies X in _filecache
876 874 paths = [self.join(obj, path) for path in self.paths]
877 875 ce = filecacheentry(paths, False)
878 876 obj._filecache[self.name] = ce
879 877 else:
880 878 ce = obj._filecache[self.name]
881 879
882 880 ce.obj = value # update cached copy
883 881 obj.__dict__[self.name] = value # update copy returned by obj.x
884 882
885 883 def __delete__(self, obj):
886 884 try:
887 885 del obj.__dict__[self.name]
888 886 except KeyError:
889 887 raise AttributeError(self.name)
890 888
891 889 class dirs(object):
892 890 '''a multiset of directory names from a dirstate or manifest'''
893 891
894 892 def __init__(self, map, skip=None):
895 893 self._dirs = {}
896 894 addpath = self.addpath
897 895 if util.safehasattr(map, 'iteritems') and skip is not None:
898 896 for f, s in map.iteritems():
899 897 if s[0] != skip:
900 898 addpath(f)
901 899 else:
902 900 for f in map:
903 901 addpath(f)
904 902
905 903 def addpath(self, path):
906 904 dirs = self._dirs
907 905 for base in finddirs(path):
908 906 if base in dirs:
909 907 dirs[base] += 1
910 908 return
911 909 dirs[base] = 1
912 910
913 911 def delpath(self, path):
914 912 dirs = self._dirs
915 913 for base in finddirs(path):
916 914 if dirs[base] > 1:
917 915 dirs[base] -= 1
918 916 return
919 917 del dirs[base]
920 918
921 919 def __iter__(self):
922 920 return self._dirs.iterkeys()
923 921
924 922 def __contains__(self, d):
925 923 return d in self._dirs
926 924
927 925 if util.safehasattr(parsers, 'dirs'):
928 926 dirs = parsers.dirs
929 927
930 928 def finddirs(path):
931 929 pos = path.rfind('/')
932 930 while pos != -1:
933 931 yield path[:pos]
934 932 pos = path.rfind('/', 0, pos)
General Comments 0
You need to be logged in to leave comments. Login now