##// END OF EJS Templates
scmutil: changed revrange code not to use append...
Lucas Moscovicz -
r20559:165b117f default
parent child Browse files
Show More
@@ -1,932 +1,932
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from mercurial.node import nullrev
10 10 import util, error, osutil, revset, similar, encoding, phases, parsers
11 11 import pathutil
12 12 import match as matchmod
13 13 import os, errno, re, glob
14 14
15 15 if os.name == 'nt':
16 16 import scmwindows as scmplatform
17 17 else:
18 18 import scmposix as scmplatform
19 19
20 20 systemrcpath = scmplatform.systemrcpath
21 21 userrcpath = scmplatform.userrcpath
22 22
23 23 def itersubrepos(ctx1, ctx2):
24 24 """find subrepos in ctx1 or ctx2"""
25 25 # Create a (subpath, ctx) mapping where we prefer subpaths from
26 26 # ctx1. The subpaths from ctx2 are important when the .hgsub file
27 27 # has been modified (in ctx2) but not yet committed (in ctx1).
28 28 subpaths = dict.fromkeys(ctx2.substate, ctx2)
29 29 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
30 30 for subpath, ctx in sorted(subpaths.iteritems()):
31 31 yield subpath, ctx.sub(subpath)
32 32
33 33 def nochangesfound(ui, repo, excluded=None):
34 34 '''Report no changes for push/pull, excluded is None or a list of
35 35 nodes excluded from the push/pull.
36 36 '''
37 37 secretlist = []
38 38 if excluded:
39 39 for n in excluded:
40 40 if n not in repo:
41 41 # discovery should not have included the filtered revision,
42 42 # we have to explicitly exclude it until discovery is cleanup.
43 43 continue
44 44 ctx = repo[n]
45 45 if ctx.phase() >= phases.secret and not ctx.extinct():
46 46 secretlist.append(n)
47 47
48 48 if secretlist:
49 49 ui.status(_("no changes found (ignored %d secret changesets)\n")
50 50 % len(secretlist))
51 51 else:
52 52 ui.status(_("no changes found\n"))
53 53
54 54 def checknewlabel(repo, lbl, kind):
55 55 # Do not use the "kind" parameter in ui output.
56 56 # It makes strings difficult to translate.
57 57 if lbl in ['tip', '.', 'null']:
58 58 raise util.Abort(_("the name '%s' is reserved") % lbl)
59 59 for c in (':', '\0', '\n', '\r'):
60 60 if c in lbl:
61 61 raise util.Abort(_("%r cannot be used in a name") % c)
62 62 try:
63 63 int(lbl)
64 64 raise util.Abort(_("cannot use an integer as a name"))
65 65 except ValueError:
66 66 pass
67 67
68 68 def checkfilename(f):
69 69 '''Check that the filename f is an acceptable filename for a tracked file'''
70 70 if '\r' in f or '\n' in f:
71 71 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
72 72
73 73 def checkportable(ui, f):
74 74 '''Check if filename f is portable and warn or abort depending on config'''
75 75 checkfilename(f)
76 76 abort, warn = checkportabilityalert(ui)
77 77 if abort or warn:
78 78 msg = util.checkwinfilename(f)
79 79 if msg:
80 80 msg = "%s: %r" % (msg, f)
81 81 if abort:
82 82 raise util.Abort(msg)
83 83 ui.warn(_("warning: %s\n") % msg)
84 84
85 85 def checkportabilityalert(ui):
86 86 '''check if the user's config requests nothing, a warning, or abort for
87 87 non-portable filenames'''
88 88 val = ui.config('ui', 'portablefilenames', 'warn')
89 89 lval = val.lower()
90 90 bval = util.parsebool(val)
91 91 abort = os.name == 'nt' or lval == 'abort'
92 92 warn = bval or lval == 'warn'
93 93 if bval is None and not (warn or abort or lval == 'ignore'):
94 94 raise error.ConfigError(
95 95 _("ui.portablefilenames value is invalid ('%s')") % val)
96 96 return abort, warn
97 97
98 98 class casecollisionauditor(object):
99 99 def __init__(self, ui, abort, dirstate):
100 100 self._ui = ui
101 101 self._abort = abort
102 102 allfiles = '\0'.join(dirstate._map)
103 103 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
104 104 self._dirstate = dirstate
105 105 # The purpose of _newfiles is so that we don't complain about
106 106 # case collisions if someone were to call this object with the
107 107 # same filename twice.
108 108 self._newfiles = set()
109 109
110 110 def __call__(self, f):
111 111 if f in self._newfiles:
112 112 return
113 113 fl = encoding.lower(f)
114 114 if fl in self._loweredfiles and f not in self._dirstate:
115 115 msg = _('possible case-folding collision for %s') % f
116 116 if self._abort:
117 117 raise util.Abort(msg)
118 118 self._ui.warn(_("warning: %s\n") % msg)
119 119 self._loweredfiles.add(fl)
120 120 self._newfiles.add(f)
121 121
122 122 class abstractvfs(object):
123 123 """Abstract base class; cannot be instantiated"""
124 124
125 125 def __init__(self, *args, **kwargs):
126 126 '''Prevent instantiation; don't call this from subclasses.'''
127 127 raise NotImplementedError('attempted instantiating ' + str(type(self)))
128 128
129 129 def tryread(self, path):
130 130 '''gracefully return an empty string for missing files'''
131 131 try:
132 132 return self.read(path)
133 133 except IOError, inst:
134 134 if inst.errno != errno.ENOENT:
135 135 raise
136 136 return ""
137 137
138 138 def open(self, path, mode="r", text=False, atomictemp=False):
139 139 self.open = self.__call__
140 140 return self.__call__(path, mode, text, atomictemp)
141 141
142 142 def read(self, path):
143 143 fp = self(path, 'rb')
144 144 try:
145 145 return fp.read()
146 146 finally:
147 147 fp.close()
148 148
149 149 def write(self, path, data):
150 150 fp = self(path, 'wb')
151 151 try:
152 152 return fp.write(data)
153 153 finally:
154 154 fp.close()
155 155
156 156 def append(self, path, data):
157 157 fp = self(path, 'ab')
158 158 try:
159 159 return fp.write(data)
160 160 finally:
161 161 fp.close()
162 162
163 163 def chmod(self, path, mode):
164 164 return os.chmod(self.join(path), mode)
165 165
166 166 def exists(self, path=None):
167 167 return os.path.exists(self.join(path))
168 168
169 169 def fstat(self, fp):
170 170 return util.fstat(fp)
171 171
172 172 def isdir(self, path=None):
173 173 return os.path.isdir(self.join(path))
174 174
175 175 def isfile(self, path=None):
176 176 return os.path.isfile(self.join(path))
177 177
178 178 def islink(self, path=None):
179 179 return os.path.islink(self.join(path))
180 180
181 181 def lstat(self, path=None):
182 182 return os.lstat(self.join(path))
183 183
184 184 def makedir(self, path=None, notindexed=True):
185 185 return util.makedir(self.join(path), notindexed)
186 186
187 187 def makedirs(self, path=None, mode=None):
188 188 return util.makedirs(self.join(path), mode)
189 189
190 190 def makelock(self, info, path):
191 191 return util.makelock(info, self.join(path))
192 192
193 193 def mkdir(self, path=None):
194 194 return os.mkdir(self.join(path))
195 195
196 196 def readdir(self, path=None, stat=None, skip=None):
197 197 return osutil.listdir(self.join(path), stat, skip)
198 198
199 199 def readlock(self, path):
200 200 return util.readlock(self.join(path))
201 201
202 202 def rename(self, src, dst):
203 203 return util.rename(self.join(src), self.join(dst))
204 204
205 205 def readlink(self, path):
206 206 return os.readlink(self.join(path))
207 207
208 208 def setflags(self, path, l, x):
209 209 return util.setflags(self.join(path), l, x)
210 210
211 211 def stat(self, path=None):
212 212 return os.stat(self.join(path))
213 213
214 214 def unlink(self, path=None):
215 215 return util.unlink(self.join(path))
216 216
217 217 def utime(self, path=None, t=None):
218 218 return os.utime(self.join(path), t)
219 219
220 220 class vfs(abstractvfs):
221 221 '''Operate files relative to a base directory
222 222
223 223 This class is used to hide the details of COW semantics and
224 224 remote file access from higher level code.
225 225 '''
226 226 def __init__(self, base, audit=True, expandpath=False, realpath=False):
227 227 if expandpath:
228 228 base = util.expandpath(base)
229 229 if realpath:
230 230 base = os.path.realpath(base)
231 231 self.base = base
232 232 self._setmustaudit(audit)
233 233 self.createmode = None
234 234 self._trustnlink = None
235 235
236 236 def _getmustaudit(self):
237 237 return self._audit
238 238
239 239 def _setmustaudit(self, onoff):
240 240 self._audit = onoff
241 241 if onoff:
242 242 self.audit = pathutil.pathauditor(self.base)
243 243 else:
244 244 self.audit = util.always
245 245
246 246 mustaudit = property(_getmustaudit, _setmustaudit)
247 247
248 248 @util.propertycache
249 249 def _cansymlink(self):
250 250 return util.checklink(self.base)
251 251
252 252 @util.propertycache
253 253 def _chmod(self):
254 254 return util.checkexec(self.base)
255 255
256 256 def _fixfilemode(self, name):
257 257 if self.createmode is None or not self._chmod:
258 258 return
259 259 os.chmod(name, self.createmode & 0666)
260 260
261 261 def __call__(self, path, mode="r", text=False, atomictemp=False):
262 262 if self._audit:
263 263 r = util.checkosfilename(path)
264 264 if r:
265 265 raise util.Abort("%s: %r" % (r, path))
266 266 self.audit(path)
267 267 f = self.join(path)
268 268
269 269 if not text and "b" not in mode:
270 270 mode += "b" # for that other OS
271 271
272 272 nlink = -1
273 273 if mode not in ('r', 'rb'):
274 274 dirname, basename = util.split(f)
275 275 # If basename is empty, then the path is malformed because it points
276 276 # to a directory. Let the posixfile() call below raise IOError.
277 277 if basename:
278 278 if atomictemp:
279 279 util.ensuredirs(dirname, self.createmode)
280 280 return util.atomictempfile(f, mode, self.createmode)
281 281 try:
282 282 if 'w' in mode:
283 283 util.unlink(f)
284 284 nlink = 0
285 285 else:
286 286 # nlinks() may behave differently for files on Windows
287 287 # shares if the file is open.
288 288 fd = util.posixfile(f)
289 289 nlink = util.nlinks(f)
290 290 if nlink < 1:
291 291 nlink = 2 # force mktempcopy (issue1922)
292 292 fd.close()
293 293 except (OSError, IOError), e:
294 294 if e.errno != errno.ENOENT:
295 295 raise
296 296 nlink = 0
297 297 util.ensuredirs(dirname, self.createmode)
298 298 if nlink > 0:
299 299 if self._trustnlink is None:
300 300 self._trustnlink = nlink > 1 or util.checknlink(f)
301 301 if nlink > 1 or not self._trustnlink:
302 302 util.rename(util.mktempcopy(f), f)
303 303 fp = util.posixfile(f, mode)
304 304 if nlink == 0:
305 305 self._fixfilemode(f)
306 306 return fp
307 307
308 308 def symlink(self, src, dst):
309 309 self.audit(dst)
310 310 linkname = self.join(dst)
311 311 try:
312 312 os.unlink(linkname)
313 313 except OSError:
314 314 pass
315 315
316 316 util.ensuredirs(os.path.dirname(linkname), self.createmode)
317 317
318 318 if self._cansymlink:
319 319 try:
320 320 os.symlink(src, linkname)
321 321 except OSError, err:
322 322 raise OSError(err.errno, _('could not symlink to %r: %s') %
323 323 (src, err.strerror), linkname)
324 324 else:
325 325 self.write(dst, src)
326 326
327 327 def join(self, path):
328 328 if path:
329 329 return os.path.join(self.base, path)
330 330 else:
331 331 return self.base
332 332
333 333 opener = vfs
334 334
335 335 class auditvfs(object):
336 336 def __init__(self, vfs):
337 337 self.vfs = vfs
338 338
339 339 def _getmustaudit(self):
340 340 return self.vfs.mustaudit
341 341
342 342 def _setmustaudit(self, onoff):
343 343 self.vfs.mustaudit = onoff
344 344
345 345 mustaudit = property(_getmustaudit, _setmustaudit)
346 346
347 347 class filtervfs(abstractvfs, auditvfs):
348 348 '''Wrapper vfs for filtering filenames with a function.'''
349 349
350 350 def __init__(self, vfs, filter):
351 351 auditvfs.__init__(self, vfs)
352 352 self._filter = filter
353 353
354 354 def __call__(self, path, *args, **kwargs):
355 355 return self.vfs(self._filter(path), *args, **kwargs)
356 356
357 357 def join(self, path):
358 358 if path:
359 359 return self.vfs.join(self._filter(path))
360 360 else:
361 361 return self.vfs.join(path)
362 362
363 363 filteropener = filtervfs
364 364
365 365 class readonlyvfs(abstractvfs, auditvfs):
366 366 '''Wrapper vfs preventing any writing.'''
367 367
368 368 def __init__(self, vfs):
369 369 auditvfs.__init__(self, vfs)
370 370
371 371 def __call__(self, path, mode='r', *args, **kw):
372 372 if mode not in ('r', 'rb'):
373 373 raise util.Abort('this vfs is read only')
374 374 return self.vfs(path, mode, *args, **kw)
375 375
376 376
377 377 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
378 378 '''yield every hg repository under path, always recursively.
379 379 The recurse flag will only control recursion into repo working dirs'''
380 380 def errhandler(err):
381 381 if err.filename == path:
382 382 raise err
383 383 samestat = getattr(os.path, 'samestat', None)
384 384 if followsym and samestat is not None:
385 385 def adddir(dirlst, dirname):
386 386 match = False
387 387 dirstat = os.stat(dirname)
388 388 for lstdirstat in dirlst:
389 389 if samestat(dirstat, lstdirstat):
390 390 match = True
391 391 break
392 392 if not match:
393 393 dirlst.append(dirstat)
394 394 return not match
395 395 else:
396 396 followsym = False
397 397
398 398 if (seen_dirs is None) and followsym:
399 399 seen_dirs = []
400 400 adddir(seen_dirs, path)
401 401 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
402 402 dirs.sort()
403 403 if '.hg' in dirs:
404 404 yield root # found a repository
405 405 qroot = os.path.join(root, '.hg', 'patches')
406 406 if os.path.isdir(os.path.join(qroot, '.hg')):
407 407 yield qroot # we have a patch queue repo here
408 408 if recurse:
409 409 # avoid recursing inside the .hg directory
410 410 dirs.remove('.hg')
411 411 else:
412 412 dirs[:] = [] # don't descend further
413 413 elif followsym:
414 414 newdirs = []
415 415 for d in dirs:
416 416 fname = os.path.join(root, d)
417 417 if adddir(seen_dirs, fname):
418 418 if os.path.islink(fname):
419 419 for hgname in walkrepos(fname, True, seen_dirs):
420 420 yield hgname
421 421 else:
422 422 newdirs.append(d)
423 423 dirs[:] = newdirs
424 424
425 425 def osrcpath():
426 426 '''return default os-specific hgrc search path'''
427 427 path = systemrcpath()
428 428 path.extend(userrcpath())
429 429 path = [os.path.normpath(f) for f in path]
430 430 return path
431 431
432 432 _rcpath = None
433 433
434 434 def rcpath():
435 435 '''return hgrc search path. if env var HGRCPATH is set, use it.
436 436 for each item in path, if directory, use files ending in .rc,
437 437 else use item.
438 438 make HGRCPATH empty to only look in .hg/hgrc of current repo.
439 439 if no HGRCPATH, use default os-specific path.'''
440 440 global _rcpath
441 441 if _rcpath is None:
442 442 if 'HGRCPATH' in os.environ:
443 443 _rcpath = []
444 444 for p in os.environ['HGRCPATH'].split(os.pathsep):
445 445 if not p:
446 446 continue
447 447 p = util.expandpath(p)
448 448 if os.path.isdir(p):
449 449 for f, kind in osutil.listdir(p):
450 450 if f.endswith('.rc'):
451 451 _rcpath.append(os.path.join(p, f))
452 452 else:
453 453 _rcpath.append(p)
454 454 else:
455 455 _rcpath = osrcpath()
456 456 return _rcpath
457 457
458 458 def revsingle(repo, revspec, default='.'):
459 459 if not revspec and revspec != 0:
460 460 return repo[default]
461 461
462 462 l = revrange(repo, [revspec])
463 463 if len(l) < 1:
464 464 raise util.Abort(_('empty revision set'))
465 465 return repo[l[-1]]
466 466
467 467 def revpair(repo, revs):
468 468 if not revs:
469 469 return repo.dirstate.p1(), None
470 470
471 471 l = revrange(repo, revs)
472 472
473 473 if len(l) == 0:
474 474 if revs:
475 475 raise util.Abort(_('empty revision range'))
476 476 return repo.dirstate.p1(), None
477 477
478 478 if len(l) == 1 and len(revs) == 1 and _revrangesep not in revs[0]:
479 479 return repo.lookup(l[0]), None
480 480
481 481 return repo.lookup(l[0]), repo.lookup(l[-1])
482 482
483 483 _revrangesep = ':'
484 484
485 485 def revrange(repo, revs):
486 486 """Yield revision as strings from a list of revision specifications."""
487 487
488 488 def revfix(repo, val, defval):
489 489 if not val and val != 0 and defval is not None:
490 490 return defval
491 491 return repo[val].rev()
492 492
493 seen, l = set(), []
493 seen, l = set(), revset.baseset([])
494 494 for spec in revs:
495 495 if l and not seen:
496 496 seen = set(l)
497 497 # attempt to parse old-style ranges first to deal with
498 498 # things like old-tag which contain query metacharacters
499 499 try:
500 500 if isinstance(spec, int):
501 501 seen.add(spec)
502 502 l = l + [spec]
503 503 continue
504 504
505 505 if _revrangesep in spec:
506 506 start, end = spec.split(_revrangesep, 1)
507 507 start = revfix(repo, start, 0)
508 508 end = revfix(repo, end, len(repo) - 1)
509 509 if end == nullrev and start <= 0:
510 510 start = nullrev
511 511 rangeiter = repo.changelog.revs(start, end)
512 512 if not seen and not l:
513 513 # by far the most common case: revs = ["-1:0"]
514 l = list(rangeiter)
514 l = revset.baseset(rangeiter)
515 515 # defer syncing seen until next iteration
516 516 continue
517 517 newrevs = set(rangeiter)
518 518 if seen:
519 519 newrevs.difference_update(seen)
520 520 seen.update(newrevs)
521 521 else:
522 522 seen = newrevs
523 523 l = l + sorted(newrevs, reverse=start > end)
524 524 continue
525 525 elif spec and spec in repo: # single unquoted rev
526 526 rev = revfix(repo, spec, None)
527 527 if rev in seen:
528 528 continue
529 529 seen.add(rev)
530 l.append(rev)
530 l = l + [rev]
531 531 continue
532 532 except error.RepoLookupError:
533 533 pass
534 534
535 535 # fall through to new-style queries if old-style fails
536 536 m = revset.match(repo.ui, spec)
537 537 if seen or l:
538 538 dl = [r for r in m(repo, revset.spanset(repo)) if r not in seen]
539 539 l = l + dl
540 540 seen.update(dl)
541 541 else:
542 542 l = m(repo, revset.spanset(repo))
543 543
544 544 return l
545 545
546 546 def expandpats(pats):
547 547 if not util.expandglobs:
548 548 return list(pats)
549 549 ret = []
550 550 for p in pats:
551 551 kind, name = matchmod._patsplit(p, None)
552 552 if kind is None:
553 553 try:
554 554 globbed = glob.glob(name)
555 555 except re.error:
556 556 globbed = [name]
557 557 if globbed:
558 558 ret.extend(globbed)
559 559 continue
560 560 ret.append(p)
561 561 return ret
562 562
563 563 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
564 564 if pats == ("",):
565 565 pats = []
566 566 if not globbed and default == 'relpath':
567 567 pats = expandpats(pats or [])
568 568
569 569 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
570 570 default)
571 571 def badfn(f, msg):
572 572 ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
573 573 m.bad = badfn
574 574 return m, pats
575 575
576 576 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
577 577 return matchandpats(ctx, pats, opts, globbed, default)[0]
578 578
579 579 def matchall(repo):
580 580 return matchmod.always(repo.root, repo.getcwd())
581 581
582 582 def matchfiles(repo, files):
583 583 return matchmod.exact(repo.root, repo.getcwd(), files)
584 584
585 585 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
586 586 if dry_run is None:
587 587 dry_run = opts.get('dry_run')
588 588 if similarity is None:
589 589 similarity = float(opts.get('similarity') or 0)
590 590 # we'd use status here, except handling of symlinks and ignore is tricky
591 591 m = match(repo[None], pats, opts)
592 592 rejected = []
593 593 m.bad = lambda x, y: rejected.append(x)
594 594
595 595 added, unknown, deleted, removed = _interestingfiles(repo, m)
596 596
597 597 unknownset = set(unknown)
598 598 toprint = unknownset.copy()
599 599 toprint.update(deleted)
600 600 for abs in sorted(toprint):
601 601 if repo.ui.verbose or not m.exact(abs):
602 602 rel = m.rel(abs)
603 603 if abs in unknownset:
604 604 status = _('adding %s\n') % ((pats and rel) or abs)
605 605 else:
606 606 status = _('removing %s\n') % ((pats and rel) or abs)
607 607 repo.ui.status(status)
608 608
609 609 renames = _findrenames(repo, m, added + unknown, removed + deleted,
610 610 similarity)
611 611
612 612 if not dry_run:
613 613 _markchanges(repo, unknown, deleted, renames)
614 614
615 615 for f in rejected:
616 616 if f in m.files():
617 617 return 1
618 618 return 0
619 619
620 620 def marktouched(repo, files, similarity=0.0):
621 621 '''Assert that files have somehow been operated upon. files are relative to
622 622 the repo root.'''
623 623 m = matchfiles(repo, files)
624 624 rejected = []
625 625 m.bad = lambda x, y: rejected.append(x)
626 626
627 627 added, unknown, deleted, removed = _interestingfiles(repo, m)
628 628
629 629 if repo.ui.verbose:
630 630 unknownset = set(unknown)
631 631 toprint = unknownset.copy()
632 632 toprint.update(deleted)
633 633 for abs in sorted(toprint):
634 634 if abs in unknownset:
635 635 status = _('adding %s\n') % abs
636 636 else:
637 637 status = _('removing %s\n') % abs
638 638 repo.ui.status(status)
639 639
640 640 renames = _findrenames(repo, m, added + unknown, removed + deleted,
641 641 similarity)
642 642
643 643 _markchanges(repo, unknown, deleted, renames)
644 644
645 645 for f in rejected:
646 646 if f in m.files():
647 647 return 1
648 648 return 0
649 649
650 650 def _interestingfiles(repo, matcher):
651 651 '''Walk dirstate with matcher, looking for files that addremove would care
652 652 about.
653 653
654 654 This is different from dirstate.status because it doesn't care about
655 655 whether files are modified or clean.'''
656 656 added, unknown, deleted, removed = [], [], [], []
657 657 audit_path = pathutil.pathauditor(repo.root)
658 658
659 659 ctx = repo[None]
660 660 dirstate = repo.dirstate
661 661 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
662 662 full=False)
663 663 for abs, st in walkresults.iteritems():
664 664 dstate = dirstate[abs]
665 665 if dstate == '?' and audit_path.check(abs):
666 666 unknown.append(abs)
667 667 elif dstate != 'r' and not st:
668 668 deleted.append(abs)
669 669 # for finding renames
670 670 elif dstate == 'r':
671 671 removed.append(abs)
672 672 elif dstate == 'a':
673 673 added.append(abs)
674 674
675 675 return added, unknown, deleted, removed
676 676
677 677 def _findrenames(repo, matcher, added, removed, similarity):
678 678 '''Find renames from removed files to added ones.'''
679 679 renames = {}
680 680 if similarity > 0:
681 681 for old, new, score in similar.findrenames(repo, added, removed,
682 682 similarity):
683 683 if (repo.ui.verbose or not matcher.exact(old)
684 684 or not matcher.exact(new)):
685 685 repo.ui.status(_('recording removal of %s as rename to %s '
686 686 '(%d%% similar)\n') %
687 687 (matcher.rel(old), matcher.rel(new),
688 688 score * 100))
689 689 renames[new] = old
690 690 return renames
691 691
692 692 def _markchanges(repo, unknown, deleted, renames):
693 693 '''Marks the files in unknown as added, the files in deleted as removed,
694 694 and the files in renames as copied.'''
695 695 wctx = repo[None]
696 696 wlock = repo.wlock()
697 697 try:
698 698 wctx.forget(deleted)
699 699 wctx.add(unknown)
700 700 for new, old in renames.iteritems():
701 701 wctx.copy(old, new)
702 702 finally:
703 703 wlock.release()
704 704
705 705 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
706 706 """Update the dirstate to reflect the intent of copying src to dst. For
707 707 different reasons it might not end with dst being marked as copied from src.
708 708 """
709 709 origsrc = repo.dirstate.copied(src) or src
710 710 if dst == origsrc: # copying back a copy?
711 711 if repo.dirstate[dst] not in 'mn' and not dryrun:
712 712 repo.dirstate.normallookup(dst)
713 713 else:
714 714 if repo.dirstate[origsrc] == 'a' and origsrc == src:
715 715 if not ui.quiet:
716 716 ui.warn(_("%s has not been committed yet, so no copy "
717 717 "data will be stored for %s.\n")
718 718 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
719 719 if repo.dirstate[dst] in '?r' and not dryrun:
720 720 wctx.add([dst])
721 721 elif not dryrun:
722 722 wctx.copy(origsrc, dst)
723 723
724 724 def readrequires(opener, supported):
725 725 '''Reads and parses .hg/requires and checks if all entries found
726 726 are in the list of supported features.'''
727 727 requirements = set(opener.read("requires").splitlines())
728 728 missings = []
729 729 for r in requirements:
730 730 if r not in supported:
731 731 if not r or not r[0].isalnum():
732 732 raise error.RequirementError(_(".hg/requires file is corrupt"))
733 733 missings.append(r)
734 734 missings.sort()
735 735 if missings:
736 736 raise error.RequirementError(
737 737 _("unknown repository format: requires features '%s' (upgrade "
738 738 "Mercurial)") % "', '".join(missings))
739 739 return requirements
740 740
741 741 class filecachesubentry(object):
742 742 def __init__(self, path, stat):
743 743 self.path = path
744 744 self.cachestat = None
745 745 self._cacheable = None
746 746
747 747 if stat:
748 748 self.cachestat = filecachesubentry.stat(self.path)
749 749
750 750 if self.cachestat:
751 751 self._cacheable = self.cachestat.cacheable()
752 752 else:
753 753 # None means we don't know yet
754 754 self._cacheable = None
755 755
756 756 def refresh(self):
757 757 if self.cacheable():
758 758 self.cachestat = filecachesubentry.stat(self.path)
759 759
760 760 def cacheable(self):
761 761 if self._cacheable is not None:
762 762 return self._cacheable
763 763
764 764 # we don't know yet, assume it is for now
765 765 return True
766 766
767 767 def changed(self):
768 768 # no point in going further if we can't cache it
769 769 if not self.cacheable():
770 770 return True
771 771
772 772 newstat = filecachesubentry.stat(self.path)
773 773
774 774 # we may not know if it's cacheable yet, check again now
775 775 if newstat and self._cacheable is None:
776 776 self._cacheable = newstat.cacheable()
777 777
778 778 # check again
779 779 if not self._cacheable:
780 780 return True
781 781
782 782 if self.cachestat != newstat:
783 783 self.cachestat = newstat
784 784 return True
785 785 else:
786 786 return False
787 787
788 788 @staticmethod
789 789 def stat(path):
790 790 try:
791 791 return util.cachestat(path)
792 792 except OSError, e:
793 793 if e.errno != errno.ENOENT:
794 794 raise
795 795
796 796 class filecacheentry(object):
797 797 def __init__(self, paths, stat=True):
798 798 self._entries = []
799 799 for path in paths:
800 800 self._entries.append(filecachesubentry(path, stat))
801 801
802 802 def changed(self):
803 803 '''true if any entry has changed'''
804 804 for entry in self._entries:
805 805 if entry.changed():
806 806 return True
807 807 return False
808 808
809 809 def refresh(self):
810 810 for entry in self._entries:
811 811 entry.refresh()
812 812
813 813 class filecache(object):
814 814 '''A property like decorator that tracks files under .hg/ for updates.
815 815
816 816 Records stat info when called in _filecache.
817 817
818 818 On subsequent calls, compares old stat info with new info, and recreates the
819 819 object when any of the files changes, updating the new stat info in
820 820 _filecache.
821 821
822 822 Mercurial either atomic renames or appends for files under .hg,
823 823 so to ensure the cache is reliable we need the filesystem to be able
824 824 to tell us if a file has been replaced. If it can't, we fallback to
825 825 recreating the object on every call (essentially the same behaviour as
826 826 propertycache).
827 827
828 828 '''
829 829 def __init__(self, *paths):
830 830 self.paths = paths
831 831
832 832 def join(self, obj, fname):
833 833 """Used to compute the runtime path of a cached file.
834 834
835 835 Users should subclass filecache and provide their own version of this
836 836 function to call the appropriate join function on 'obj' (an instance
837 837 of the class that its member function was decorated).
838 838 """
839 839 return obj.join(fname)
840 840
841 841 def __call__(self, func):
842 842 self.func = func
843 843 self.name = func.__name__
844 844 return self
845 845
846 846 def __get__(self, obj, type=None):
847 847 # do we need to check if the file changed?
848 848 if self.name in obj.__dict__:
849 849 assert self.name in obj._filecache, self.name
850 850 return obj.__dict__[self.name]
851 851
852 852 entry = obj._filecache.get(self.name)
853 853
854 854 if entry:
855 855 if entry.changed():
856 856 entry.obj = self.func(obj)
857 857 else:
858 858 paths = [self.join(obj, path) for path in self.paths]
859 859
860 860 # We stat -before- creating the object so our cache doesn't lie if
861 861 # a writer modified between the time we read and stat
862 862 entry = filecacheentry(paths, True)
863 863 entry.obj = self.func(obj)
864 864
865 865 obj._filecache[self.name] = entry
866 866
867 867 obj.__dict__[self.name] = entry.obj
868 868 return entry.obj
869 869
870 870 def __set__(self, obj, value):
871 871 if self.name not in obj._filecache:
872 872 # we add an entry for the missing value because X in __dict__
873 873 # implies X in _filecache
874 874 paths = [self.join(obj, path) for path in self.paths]
875 875 ce = filecacheentry(paths, False)
876 876 obj._filecache[self.name] = ce
877 877 else:
878 878 ce = obj._filecache[self.name]
879 879
880 880 ce.obj = value # update cached copy
881 881 obj.__dict__[self.name] = value # update copy returned by obj.x
882 882
883 883 def __delete__(self, obj):
884 884 try:
885 885 del obj.__dict__[self.name]
886 886 except KeyError:
887 887 raise AttributeError(self.name)
888 888
889 889 class dirs(object):
890 890 '''a multiset of directory names from a dirstate or manifest'''
891 891
892 892 def __init__(self, map, skip=None):
893 893 self._dirs = {}
894 894 addpath = self.addpath
895 895 if util.safehasattr(map, 'iteritems') and skip is not None:
896 896 for f, s in map.iteritems():
897 897 if s[0] != skip:
898 898 addpath(f)
899 899 else:
900 900 for f in map:
901 901 addpath(f)
902 902
903 903 def addpath(self, path):
904 904 dirs = self._dirs
905 905 for base in finddirs(path):
906 906 if base in dirs:
907 907 dirs[base] += 1
908 908 return
909 909 dirs[base] = 1
910 910
911 911 def delpath(self, path):
912 912 dirs = self._dirs
913 913 for base in finddirs(path):
914 914 if dirs[base] > 1:
915 915 dirs[base] -= 1
916 916 return
917 917 del dirs[base]
918 918
919 919 def __iter__(self):
920 920 return self._dirs.iterkeys()
921 921
922 922 def __contains__(self, d):
923 923 return d in self._dirs
924 924
925 925 if util.safehasattr(parsers, 'dirs'):
926 926 dirs = parsers.dirs
927 927
928 928 def finddirs(path):
929 929 pos = path.rfind('/')
930 930 while pos != -1:
931 931 yield path[:pos]
932 932 pos = path.rfind('/', 0, pos)
General Comments 0
You need to be logged in to leave comments. Login now