##// END OF EJS Templates
addremove: restore the relative path printing when files are named...
Matt Harbison -
r23481:94091ab9 default
parent child Browse files
Show More
@@ -1,1067 +1,1066 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from mercurial.node import nullrev
10 10 import util, error, osutil, revset, similar, encoding, phases, parsers
11 11 import pathutil
12 12 import match as matchmod
13 13 import os, errno, re, glob, tempfile
14 14
15 15 if os.name == 'nt':
16 16 import scmwindows as scmplatform
17 17 else:
18 18 import scmposix as scmplatform
19 19
20 20 systemrcpath = scmplatform.systemrcpath
21 21 userrcpath = scmplatform.userrcpath
22 22
23 23 class status(tuple):
24 24 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
25 25 and 'ignored' properties are only relevant to the working copy.
26 26 '''
27 27
28 28 __slots__ = ()
29 29
30 30 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
31 31 clean):
32 32 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
33 33 ignored, clean))
34 34
35 35 @property
36 36 def modified(self):
37 37 '''files that have been modified'''
38 38 return self[0]
39 39
40 40 @property
41 41 def added(self):
42 42 '''files that have been added'''
43 43 return self[1]
44 44
45 45 @property
46 46 def removed(self):
47 47 '''files that have been removed'''
48 48 return self[2]
49 49
50 50 @property
51 51 def deleted(self):
52 52 '''files that are in the dirstate, but have been deleted from the
53 53 working copy (aka "missing")
54 54 '''
55 55 return self[3]
56 56
57 57 @property
58 58 def unknown(self):
59 59 '''files not in the dirstate that are not ignored'''
60 60 return self[4]
61 61
62 62 @property
63 63 def ignored(self):
64 64 '''files not in the dirstate that are ignored (by _dirignore())'''
65 65 return self[5]
66 66
67 67 @property
68 68 def clean(self):
69 69 '''files that have not been modified'''
70 70 return self[6]
71 71
72 72 def __repr__(self, *args, **kwargs):
73 73 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
74 74 'unknown=%r, ignored=%r, clean=%r>') % self)
75 75
76 76 def itersubrepos(ctx1, ctx2):
77 77 """find subrepos in ctx1 or ctx2"""
78 78 # Create a (subpath, ctx) mapping where we prefer subpaths from
79 79 # ctx1. The subpaths from ctx2 are important when the .hgsub file
80 80 # has been modified (in ctx2) but not yet committed (in ctx1).
81 81 subpaths = dict.fromkeys(ctx2.substate, ctx2)
82 82 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
83 83 for subpath, ctx in sorted(subpaths.iteritems()):
84 84 yield subpath, ctx.sub(subpath)
85 85
86 86 def nochangesfound(ui, repo, excluded=None):
87 87 '''Report no changes for push/pull, excluded is None or a list of
88 88 nodes excluded from the push/pull.
89 89 '''
90 90 secretlist = []
91 91 if excluded:
92 92 for n in excluded:
93 93 if n not in repo:
94 94 # discovery should not have included the filtered revision,
95 95 # we have to explicitly exclude it until discovery is cleanup.
96 96 continue
97 97 ctx = repo[n]
98 98 if ctx.phase() >= phases.secret and not ctx.extinct():
99 99 secretlist.append(n)
100 100
101 101 if secretlist:
102 102 ui.status(_("no changes found (ignored %d secret changesets)\n")
103 103 % len(secretlist))
104 104 else:
105 105 ui.status(_("no changes found\n"))
106 106
107 107 def checknewlabel(repo, lbl, kind):
108 108 # Do not use the "kind" parameter in ui output.
109 109 # It makes strings difficult to translate.
110 110 if lbl in ['tip', '.', 'null']:
111 111 raise util.Abort(_("the name '%s' is reserved") % lbl)
112 112 for c in (':', '\0', '\n', '\r'):
113 113 if c in lbl:
114 114 raise util.Abort(_("%r cannot be used in a name") % c)
115 115 try:
116 116 int(lbl)
117 117 raise util.Abort(_("cannot use an integer as a name"))
118 118 except ValueError:
119 119 pass
120 120
121 121 def checkfilename(f):
122 122 '''Check that the filename f is an acceptable filename for a tracked file'''
123 123 if '\r' in f or '\n' in f:
124 124 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
125 125
126 126 def checkportable(ui, f):
127 127 '''Check if filename f is portable and warn or abort depending on config'''
128 128 checkfilename(f)
129 129 abort, warn = checkportabilityalert(ui)
130 130 if abort or warn:
131 131 msg = util.checkwinfilename(f)
132 132 if msg:
133 133 msg = "%s: %r" % (msg, f)
134 134 if abort:
135 135 raise util.Abort(msg)
136 136 ui.warn(_("warning: %s\n") % msg)
137 137
138 138 def checkportabilityalert(ui):
139 139 '''check if the user's config requests nothing, a warning, or abort for
140 140 non-portable filenames'''
141 141 val = ui.config('ui', 'portablefilenames', 'warn')
142 142 lval = val.lower()
143 143 bval = util.parsebool(val)
144 144 abort = os.name == 'nt' or lval == 'abort'
145 145 warn = bval or lval == 'warn'
146 146 if bval is None and not (warn or abort or lval == 'ignore'):
147 147 raise error.ConfigError(
148 148 _("ui.portablefilenames value is invalid ('%s')") % val)
149 149 return abort, warn
150 150
151 151 class casecollisionauditor(object):
152 152 def __init__(self, ui, abort, dirstate):
153 153 self._ui = ui
154 154 self._abort = abort
155 155 allfiles = '\0'.join(dirstate._map)
156 156 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
157 157 self._dirstate = dirstate
158 158 # The purpose of _newfiles is so that we don't complain about
159 159 # case collisions if someone were to call this object with the
160 160 # same filename twice.
161 161 self._newfiles = set()
162 162
163 163 def __call__(self, f):
164 164 if f in self._newfiles:
165 165 return
166 166 fl = encoding.lower(f)
167 167 if fl in self._loweredfiles and f not in self._dirstate:
168 168 msg = _('possible case-folding collision for %s') % f
169 169 if self._abort:
170 170 raise util.Abort(msg)
171 171 self._ui.warn(_("warning: %s\n") % msg)
172 172 self._loweredfiles.add(fl)
173 173 self._newfiles.add(f)
174 174
175 175 class abstractvfs(object):
176 176 """Abstract base class; cannot be instantiated"""
177 177
178 178 def __init__(self, *args, **kwargs):
179 179 '''Prevent instantiation; don't call this from subclasses.'''
180 180 raise NotImplementedError('attempted instantiating ' + str(type(self)))
181 181
182 182 def tryread(self, path):
183 183 '''gracefully return an empty string for missing files'''
184 184 try:
185 185 return self.read(path)
186 186 except IOError, inst:
187 187 if inst.errno != errno.ENOENT:
188 188 raise
189 189 return ""
190 190
191 191 def tryreadlines(self, path, mode='rb'):
192 192 '''gracefully return an empty array for missing files'''
193 193 try:
194 194 return self.readlines(path, mode=mode)
195 195 except IOError, inst:
196 196 if inst.errno != errno.ENOENT:
197 197 raise
198 198 return []
199 199
200 200 def open(self, path, mode="r", text=False, atomictemp=False,
201 201 notindexed=False):
202 202 '''Open ``path`` file, which is relative to vfs root.
203 203
204 204 Newly created directories are marked as "not to be indexed by
205 205 the content indexing service", if ``notindexed`` is specified
206 206 for "write" mode access.
207 207 '''
208 208 self.open = self.__call__
209 209 return self.__call__(path, mode, text, atomictemp, notindexed)
210 210
211 211 def read(self, path):
212 212 fp = self(path, 'rb')
213 213 try:
214 214 return fp.read()
215 215 finally:
216 216 fp.close()
217 217
218 218 def readlines(self, path, mode='rb'):
219 219 fp = self(path, mode=mode)
220 220 try:
221 221 return fp.readlines()
222 222 finally:
223 223 fp.close()
224 224
225 225 def write(self, path, data):
226 226 fp = self(path, 'wb')
227 227 try:
228 228 return fp.write(data)
229 229 finally:
230 230 fp.close()
231 231
232 232 def writelines(self, path, data, mode='wb', notindexed=False):
233 233 fp = self(path, mode=mode, notindexed=notindexed)
234 234 try:
235 235 return fp.writelines(data)
236 236 finally:
237 237 fp.close()
238 238
239 239 def append(self, path, data):
240 240 fp = self(path, 'ab')
241 241 try:
242 242 return fp.write(data)
243 243 finally:
244 244 fp.close()
245 245
246 246 def chmod(self, path, mode):
247 247 return os.chmod(self.join(path), mode)
248 248
249 249 def exists(self, path=None):
250 250 return os.path.exists(self.join(path))
251 251
252 252 def fstat(self, fp):
253 253 return util.fstat(fp)
254 254
255 255 def isdir(self, path=None):
256 256 return os.path.isdir(self.join(path))
257 257
258 258 def isfile(self, path=None):
259 259 return os.path.isfile(self.join(path))
260 260
261 261 def islink(self, path=None):
262 262 return os.path.islink(self.join(path))
263 263
264 264 def lexists(self, path=None):
265 265 return os.path.lexists(self.join(path))
266 266
267 267 def lstat(self, path=None):
268 268 return os.lstat(self.join(path))
269 269
270 270 def listdir(self, path=None):
271 271 return os.listdir(self.join(path))
272 272
273 273 def makedir(self, path=None, notindexed=True):
274 274 return util.makedir(self.join(path), notindexed)
275 275
276 276 def makedirs(self, path=None, mode=None):
277 277 return util.makedirs(self.join(path), mode)
278 278
279 279 def makelock(self, info, path):
280 280 return util.makelock(info, self.join(path))
281 281
282 282 def mkdir(self, path=None):
283 283 return os.mkdir(self.join(path))
284 284
285 285 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
286 286 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
287 287 dir=self.join(dir), text=text)
288 288 dname, fname = util.split(name)
289 289 if dir:
290 290 return fd, os.path.join(dir, fname)
291 291 else:
292 292 return fd, fname
293 293
294 294 def readdir(self, path=None, stat=None, skip=None):
295 295 return osutil.listdir(self.join(path), stat, skip)
296 296
297 297 def readlock(self, path):
298 298 return util.readlock(self.join(path))
299 299
300 300 def rename(self, src, dst):
301 301 return util.rename(self.join(src), self.join(dst))
302 302
303 303 def readlink(self, path):
304 304 return os.readlink(self.join(path))
305 305
306 306 def setflags(self, path, l, x):
307 307 return util.setflags(self.join(path), l, x)
308 308
309 309 def stat(self, path=None):
310 310 return os.stat(self.join(path))
311 311
312 312 def unlink(self, path=None):
313 313 return util.unlink(self.join(path))
314 314
315 315 def unlinkpath(self, path=None, ignoremissing=False):
316 316 return util.unlinkpath(self.join(path), ignoremissing)
317 317
318 318 def utime(self, path=None, t=None):
319 319 return os.utime(self.join(path), t)
320 320
321 321 class vfs(abstractvfs):
322 322 '''Operate files relative to a base directory
323 323
324 324 This class is used to hide the details of COW semantics and
325 325 remote file access from higher level code.
326 326 '''
327 327 def __init__(self, base, audit=True, expandpath=False, realpath=False):
328 328 if expandpath:
329 329 base = util.expandpath(base)
330 330 if realpath:
331 331 base = os.path.realpath(base)
332 332 self.base = base
333 333 self._setmustaudit(audit)
334 334 self.createmode = None
335 335 self._trustnlink = None
336 336
337 337 def _getmustaudit(self):
338 338 return self._audit
339 339
340 340 def _setmustaudit(self, onoff):
341 341 self._audit = onoff
342 342 if onoff:
343 343 self.audit = pathutil.pathauditor(self.base)
344 344 else:
345 345 self.audit = util.always
346 346
347 347 mustaudit = property(_getmustaudit, _setmustaudit)
348 348
349 349 @util.propertycache
350 350 def _cansymlink(self):
351 351 return util.checklink(self.base)
352 352
353 353 @util.propertycache
354 354 def _chmod(self):
355 355 return util.checkexec(self.base)
356 356
357 357 def _fixfilemode(self, name):
358 358 if self.createmode is None or not self._chmod:
359 359 return
360 360 os.chmod(name, self.createmode & 0666)
361 361
362 362 def __call__(self, path, mode="r", text=False, atomictemp=False,
363 363 notindexed=False):
364 364 '''Open ``path`` file, which is relative to vfs root.
365 365
366 366 Newly created directories are marked as "not to be indexed by
367 367 the content indexing service", if ``notindexed`` is specified
368 368 for "write" mode access.
369 369 '''
370 370 if self._audit:
371 371 r = util.checkosfilename(path)
372 372 if r:
373 373 raise util.Abort("%s: %r" % (r, path))
374 374 self.audit(path)
375 375 f = self.join(path)
376 376
377 377 if not text and "b" not in mode:
378 378 mode += "b" # for that other OS
379 379
380 380 nlink = -1
381 381 if mode not in ('r', 'rb'):
382 382 dirname, basename = util.split(f)
383 383 # If basename is empty, then the path is malformed because it points
384 384 # to a directory. Let the posixfile() call below raise IOError.
385 385 if basename:
386 386 if atomictemp:
387 387 util.ensuredirs(dirname, self.createmode, notindexed)
388 388 return util.atomictempfile(f, mode, self.createmode)
389 389 try:
390 390 if 'w' in mode:
391 391 util.unlink(f)
392 392 nlink = 0
393 393 else:
394 394 # nlinks() may behave differently for files on Windows
395 395 # shares if the file is open.
396 396 fd = util.posixfile(f)
397 397 nlink = util.nlinks(f)
398 398 if nlink < 1:
399 399 nlink = 2 # force mktempcopy (issue1922)
400 400 fd.close()
401 401 except (OSError, IOError), e:
402 402 if e.errno != errno.ENOENT:
403 403 raise
404 404 nlink = 0
405 405 util.ensuredirs(dirname, self.createmode, notindexed)
406 406 if nlink > 0:
407 407 if self._trustnlink is None:
408 408 self._trustnlink = nlink > 1 or util.checknlink(f)
409 409 if nlink > 1 or not self._trustnlink:
410 410 util.rename(util.mktempcopy(f), f)
411 411 fp = util.posixfile(f, mode)
412 412 if nlink == 0:
413 413 self._fixfilemode(f)
414 414 return fp
415 415
416 416 def symlink(self, src, dst):
417 417 self.audit(dst)
418 418 linkname = self.join(dst)
419 419 try:
420 420 os.unlink(linkname)
421 421 except OSError:
422 422 pass
423 423
424 424 util.ensuredirs(os.path.dirname(linkname), self.createmode)
425 425
426 426 if self._cansymlink:
427 427 try:
428 428 os.symlink(src, linkname)
429 429 except OSError, err:
430 430 raise OSError(err.errno, _('could not symlink to %r: %s') %
431 431 (src, err.strerror), linkname)
432 432 else:
433 433 self.write(dst, src)
434 434
435 435 def join(self, path):
436 436 if path:
437 437 return os.path.join(self.base, path)
438 438 else:
439 439 return self.base
440 440
441 441 opener = vfs
442 442
443 443 class auditvfs(object):
444 444 def __init__(self, vfs):
445 445 self.vfs = vfs
446 446
447 447 def _getmustaudit(self):
448 448 return self.vfs.mustaudit
449 449
450 450 def _setmustaudit(self, onoff):
451 451 self.vfs.mustaudit = onoff
452 452
453 453 mustaudit = property(_getmustaudit, _setmustaudit)
454 454
455 455 class filtervfs(abstractvfs, auditvfs):
456 456 '''Wrapper vfs for filtering filenames with a function.'''
457 457
458 458 def __init__(self, vfs, filter):
459 459 auditvfs.__init__(self, vfs)
460 460 self._filter = filter
461 461
462 462 def __call__(self, path, *args, **kwargs):
463 463 return self.vfs(self._filter(path), *args, **kwargs)
464 464
465 465 def join(self, path):
466 466 if path:
467 467 return self.vfs.join(self._filter(path))
468 468 else:
469 469 return self.vfs.join(path)
470 470
471 471 filteropener = filtervfs
472 472
473 473 class readonlyvfs(abstractvfs, auditvfs):
474 474 '''Wrapper vfs preventing any writing.'''
475 475
476 476 def __init__(self, vfs):
477 477 auditvfs.__init__(self, vfs)
478 478
479 479 def __call__(self, path, mode='r', *args, **kw):
480 480 if mode not in ('r', 'rb'):
481 481 raise util.Abort('this vfs is read only')
482 482 return self.vfs(path, mode, *args, **kw)
483 483
484 484
485 485 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
486 486 '''yield every hg repository under path, always recursively.
487 487 The recurse flag will only control recursion into repo working dirs'''
488 488 def errhandler(err):
489 489 if err.filename == path:
490 490 raise err
491 491 samestat = getattr(os.path, 'samestat', None)
492 492 if followsym and samestat is not None:
493 493 def adddir(dirlst, dirname):
494 494 match = False
495 495 dirstat = os.stat(dirname)
496 496 for lstdirstat in dirlst:
497 497 if samestat(dirstat, lstdirstat):
498 498 match = True
499 499 break
500 500 if not match:
501 501 dirlst.append(dirstat)
502 502 return not match
503 503 else:
504 504 followsym = False
505 505
506 506 if (seen_dirs is None) and followsym:
507 507 seen_dirs = []
508 508 adddir(seen_dirs, path)
509 509 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
510 510 dirs.sort()
511 511 if '.hg' in dirs:
512 512 yield root # found a repository
513 513 qroot = os.path.join(root, '.hg', 'patches')
514 514 if os.path.isdir(os.path.join(qroot, '.hg')):
515 515 yield qroot # we have a patch queue repo here
516 516 if recurse:
517 517 # avoid recursing inside the .hg directory
518 518 dirs.remove('.hg')
519 519 else:
520 520 dirs[:] = [] # don't descend further
521 521 elif followsym:
522 522 newdirs = []
523 523 for d in dirs:
524 524 fname = os.path.join(root, d)
525 525 if adddir(seen_dirs, fname):
526 526 if os.path.islink(fname):
527 527 for hgname in walkrepos(fname, True, seen_dirs):
528 528 yield hgname
529 529 else:
530 530 newdirs.append(d)
531 531 dirs[:] = newdirs
532 532
533 533 def osrcpath():
534 534 '''return default os-specific hgrc search path'''
535 535 path = []
536 536 defaultpath = os.path.join(util.datapath, 'default.d')
537 537 if os.path.isdir(defaultpath):
538 538 for f, kind in osutil.listdir(defaultpath):
539 539 if f.endswith('.rc'):
540 540 path.append(os.path.join(defaultpath, f))
541 541 path.extend(systemrcpath())
542 542 path.extend(userrcpath())
543 543 path = [os.path.normpath(f) for f in path]
544 544 return path
545 545
546 546 _rcpath = None
547 547
548 548 def rcpath():
549 549 '''return hgrc search path. if env var HGRCPATH is set, use it.
550 550 for each item in path, if directory, use files ending in .rc,
551 551 else use item.
552 552 make HGRCPATH empty to only look in .hg/hgrc of current repo.
553 553 if no HGRCPATH, use default os-specific path.'''
554 554 global _rcpath
555 555 if _rcpath is None:
556 556 if 'HGRCPATH' in os.environ:
557 557 _rcpath = []
558 558 for p in os.environ['HGRCPATH'].split(os.pathsep):
559 559 if not p:
560 560 continue
561 561 p = util.expandpath(p)
562 562 if os.path.isdir(p):
563 563 for f, kind in osutil.listdir(p):
564 564 if f.endswith('.rc'):
565 565 _rcpath.append(os.path.join(p, f))
566 566 else:
567 567 _rcpath.append(p)
568 568 else:
569 569 _rcpath = osrcpath()
570 570 return _rcpath
571 571
572 572 def revsingle(repo, revspec, default='.'):
573 573 if not revspec and revspec != 0:
574 574 return repo[default]
575 575
576 576 l = revrange(repo, [revspec])
577 577 if not l:
578 578 raise util.Abort(_('empty revision set'))
579 579 return repo[l.last()]
580 580
581 581 def revpair(repo, revs):
582 582 if not revs:
583 583 return repo.dirstate.p1(), None
584 584
585 585 l = revrange(repo, revs)
586 586
587 587 if not l:
588 588 first = second = None
589 589 elif l.isascending():
590 590 first = l.min()
591 591 second = l.max()
592 592 elif l.isdescending():
593 593 first = l.max()
594 594 second = l.min()
595 595 else:
596 596 first = l.first()
597 597 second = l.last()
598 598
599 599 if first is None:
600 600 raise util.Abort(_('empty revision range'))
601 601
602 602 if first == second and len(revs) == 1 and _revrangesep not in revs[0]:
603 603 return repo.lookup(first), None
604 604
605 605 return repo.lookup(first), repo.lookup(second)
606 606
607 607 _revrangesep = ':'
608 608
609 609 def revrange(repo, revs):
610 610 """Yield revision as strings from a list of revision specifications."""
611 611
612 612 def revfix(repo, val, defval):
613 613 if not val and val != 0 and defval is not None:
614 614 return defval
615 615 return repo[val].rev()
616 616
617 617 seen, l = set(), revset.baseset([])
618 618 for spec in revs:
619 619 if l and not seen:
620 620 seen = set(l)
621 621 # attempt to parse old-style ranges first to deal with
622 622 # things like old-tag which contain query metacharacters
623 623 try:
624 624 if isinstance(spec, int):
625 625 seen.add(spec)
626 626 l = l + revset.baseset([spec])
627 627 continue
628 628
629 629 if _revrangesep in spec:
630 630 start, end = spec.split(_revrangesep, 1)
631 631 start = revfix(repo, start, 0)
632 632 end = revfix(repo, end, len(repo) - 1)
633 633 if end == nullrev and start < 0:
634 634 start = nullrev
635 635 rangeiter = repo.changelog.revs(start, end)
636 636 if not seen and not l:
637 637 # by far the most common case: revs = ["-1:0"]
638 638 l = revset.baseset(rangeiter)
639 639 # defer syncing seen until next iteration
640 640 continue
641 641 newrevs = set(rangeiter)
642 642 if seen:
643 643 newrevs.difference_update(seen)
644 644 seen.update(newrevs)
645 645 else:
646 646 seen = newrevs
647 647 l = l + revset.baseset(sorted(newrevs, reverse=start > end))
648 648 continue
649 649 elif spec and spec in repo: # single unquoted rev
650 650 rev = revfix(repo, spec, None)
651 651 if rev in seen:
652 652 continue
653 653 seen.add(rev)
654 654 l = l + revset.baseset([rev])
655 655 continue
656 656 except error.RepoLookupError:
657 657 pass
658 658
659 659 # fall through to new-style queries if old-style fails
660 660 m = revset.match(repo.ui, spec, repo)
661 661 if seen or l:
662 662 dl = [r for r in m(repo, revset.spanset(repo)) if r not in seen]
663 663 l = l + revset.baseset(dl)
664 664 seen.update(dl)
665 665 else:
666 666 l = m(repo, revset.spanset(repo))
667 667
668 668 return l
669 669
670 670 def expandpats(pats):
671 671 '''Expand bare globs when running on windows.
672 672 On posix we assume it already has already been done by sh.'''
673 673 if not util.expandglobs:
674 674 return list(pats)
675 675 ret = []
676 676 for kindpat in pats:
677 677 kind, pat = matchmod._patsplit(kindpat, None)
678 678 if kind is None:
679 679 try:
680 680 globbed = glob.glob(pat)
681 681 except re.error:
682 682 globbed = [pat]
683 683 if globbed:
684 684 ret.extend(globbed)
685 685 continue
686 686 ret.append(kindpat)
687 687 return ret
688 688
689 689 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
690 690 '''Return a matcher and the patterns that were used.
691 691 The matcher will warn about bad matches.'''
692 692 if pats == ("",):
693 693 pats = []
694 694 if not globbed and default == 'relpath':
695 695 pats = expandpats(pats or [])
696 696
697 697 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
698 698 default)
699 699 def badfn(f, msg):
700 700 ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
701 701 m.bad = badfn
702 702 return m, pats
703 703
704 704 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
705 705 '''Return a matcher that will warn about bad matches.'''
706 706 return matchandpats(ctx, pats, opts, globbed, default)[0]
707 707
708 708 def matchall(repo):
709 709 '''Return a matcher that will efficiently match everything.'''
710 710 return matchmod.always(repo.root, repo.getcwd())
711 711
712 712 def matchfiles(repo, files):
713 713 '''Return a matcher that will efficiently match exactly these files.'''
714 714 return matchmod.exact(repo.root, repo.getcwd(), files)
715 715
716 716 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
717 717 if dry_run is None:
718 718 dry_run = opts.get('dry_run')
719 719 if similarity is None:
720 720 similarity = float(opts.get('similarity') or 0)
721 721 # we'd use status here, except handling of symlinks and ignore is tricky
722 722 m = match(repo[None], pats, opts)
723 723 rejected = []
724 724 m.bad = lambda x, y: rejected.append(x)
725 725
726 726 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
727 727
728 728 unknownset = set(unknown + forgotten)
729 729 toprint = unknownset.copy()
730 730 toprint.update(deleted)
731 731 for abs in sorted(toprint):
732 732 if repo.ui.verbose or not m.exact(abs):
733 rel = m.rel(abs)
734 733 if abs in unknownset:
735 status = _('adding %s\n') % ((m.anypats() and rel) or abs)
734 status = _('adding %s\n') % m.uipath(abs)
736 735 else:
737 status = _('removing %s\n') % ((m.anypats() and rel) or abs)
736 status = _('removing %s\n') % m.uipath(abs)
738 737 repo.ui.status(status)
739 738
740 739 renames = _findrenames(repo, m, added + unknown, removed + deleted,
741 740 similarity)
742 741
743 742 if not dry_run:
744 743 _markchanges(repo, unknown + forgotten, deleted, renames)
745 744
746 745 for f in rejected:
747 746 if f in m.files():
748 747 return 1
749 748 return 0
750 749
751 750 def marktouched(repo, files, similarity=0.0):
752 751 '''Assert that files have somehow been operated upon. files are relative to
753 752 the repo root.'''
754 753 m = matchfiles(repo, files)
755 754 rejected = []
756 755 m.bad = lambda x, y: rejected.append(x)
757 756
758 757 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
759 758
760 759 if repo.ui.verbose:
761 760 unknownset = set(unknown + forgotten)
762 761 toprint = unknownset.copy()
763 762 toprint.update(deleted)
764 763 for abs in sorted(toprint):
765 764 if abs in unknownset:
766 765 status = _('adding %s\n') % abs
767 766 else:
768 767 status = _('removing %s\n') % abs
769 768 repo.ui.status(status)
770 769
771 770 renames = _findrenames(repo, m, added + unknown, removed + deleted,
772 771 similarity)
773 772
774 773 _markchanges(repo, unknown + forgotten, deleted, renames)
775 774
776 775 for f in rejected:
777 776 if f in m.files():
778 777 return 1
779 778 return 0
780 779
781 780 def _interestingfiles(repo, matcher):
782 781 '''Walk dirstate with matcher, looking for files that addremove would care
783 782 about.
784 783
785 784 This is different from dirstate.status because it doesn't care about
786 785 whether files are modified or clean.'''
787 786 added, unknown, deleted, removed, forgotten = [], [], [], [], []
788 787 audit_path = pathutil.pathauditor(repo.root)
789 788
790 789 ctx = repo[None]
791 790 dirstate = repo.dirstate
792 791 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
793 792 full=False)
794 793 for abs, st in walkresults.iteritems():
795 794 dstate = dirstate[abs]
796 795 if dstate == '?' and audit_path.check(abs):
797 796 unknown.append(abs)
798 797 elif dstate != 'r' and not st:
799 798 deleted.append(abs)
800 799 elif dstate == 'r' and st:
801 800 forgotten.append(abs)
802 801 # for finding renames
803 802 elif dstate == 'r' and not st:
804 803 removed.append(abs)
805 804 elif dstate == 'a':
806 805 added.append(abs)
807 806
808 807 return added, unknown, deleted, removed, forgotten
809 808
810 809 def _findrenames(repo, matcher, added, removed, similarity):
811 810 '''Find renames from removed files to added ones.'''
812 811 renames = {}
813 812 if similarity > 0:
814 813 for old, new, score in similar.findrenames(repo, added, removed,
815 814 similarity):
816 815 if (repo.ui.verbose or not matcher.exact(old)
817 816 or not matcher.exact(new)):
818 817 repo.ui.status(_('recording removal of %s as rename to %s '
819 818 '(%d%% similar)\n') %
820 819 (matcher.rel(old), matcher.rel(new),
821 820 score * 100))
822 821 renames[new] = old
823 822 return renames
824 823
825 824 def _markchanges(repo, unknown, deleted, renames):
826 825 '''Marks the files in unknown as added, the files in deleted as removed,
827 826 and the files in renames as copied.'''
828 827 wctx = repo[None]
829 828 wlock = repo.wlock()
830 829 try:
831 830 wctx.forget(deleted)
832 831 wctx.add(unknown)
833 832 for new, old in renames.iteritems():
834 833 wctx.copy(old, new)
835 834 finally:
836 835 wlock.release()
837 836
838 837 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
839 838 """Update the dirstate to reflect the intent of copying src to dst. For
840 839 different reasons it might not end with dst being marked as copied from src.
841 840 """
842 841 origsrc = repo.dirstate.copied(src) or src
843 842 if dst == origsrc: # copying back a copy?
844 843 if repo.dirstate[dst] not in 'mn' and not dryrun:
845 844 repo.dirstate.normallookup(dst)
846 845 else:
847 846 if repo.dirstate[origsrc] == 'a' and origsrc == src:
848 847 if not ui.quiet:
849 848 ui.warn(_("%s has not been committed yet, so no copy "
850 849 "data will be stored for %s.\n")
851 850 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
852 851 if repo.dirstate[dst] in '?r' and not dryrun:
853 852 wctx.add([dst])
854 853 elif not dryrun:
855 854 wctx.copy(origsrc, dst)
856 855
857 856 def readrequires(opener, supported):
858 857 '''Reads and parses .hg/requires and checks if all entries found
859 858 are in the list of supported features.'''
860 859 requirements = set(opener.read("requires").splitlines())
861 860 missings = []
862 861 for r in requirements:
863 862 if r not in supported:
864 863 if not r or not r[0].isalnum():
865 864 raise error.RequirementError(_(".hg/requires file is corrupt"))
866 865 missings.append(r)
867 866 missings.sort()
868 867 if missings:
869 868 raise error.RequirementError(
870 869 _("repository requires features unknown to this Mercurial: %s")
871 870 % " ".join(missings),
872 871 hint=_("see http://mercurial.selenic.com/wiki/MissingRequirement"
873 872 " for more information"))
874 873 return requirements
875 874
876 875 class filecachesubentry(object):
877 876 def __init__(self, path, stat):
878 877 self.path = path
879 878 self.cachestat = None
880 879 self._cacheable = None
881 880
882 881 if stat:
883 882 self.cachestat = filecachesubentry.stat(self.path)
884 883
885 884 if self.cachestat:
886 885 self._cacheable = self.cachestat.cacheable()
887 886 else:
888 887 # None means we don't know yet
889 888 self._cacheable = None
890 889
891 890 def refresh(self):
892 891 if self.cacheable():
893 892 self.cachestat = filecachesubentry.stat(self.path)
894 893
895 894 def cacheable(self):
896 895 if self._cacheable is not None:
897 896 return self._cacheable
898 897
899 898 # we don't know yet, assume it is for now
900 899 return True
901 900
902 901 def changed(self):
903 902 # no point in going further if we can't cache it
904 903 if not self.cacheable():
905 904 return True
906 905
907 906 newstat = filecachesubentry.stat(self.path)
908 907
909 908 # we may not know if it's cacheable yet, check again now
910 909 if newstat and self._cacheable is None:
911 910 self._cacheable = newstat.cacheable()
912 911
913 912 # check again
914 913 if not self._cacheable:
915 914 return True
916 915
917 916 if self.cachestat != newstat:
918 917 self.cachestat = newstat
919 918 return True
920 919 else:
921 920 return False
922 921
923 922 @staticmethod
924 923 def stat(path):
925 924 try:
926 925 return util.cachestat(path)
927 926 except OSError, e:
928 927 if e.errno != errno.ENOENT:
929 928 raise
930 929
931 930 class filecacheentry(object):
932 931 def __init__(self, paths, stat=True):
933 932 self._entries = []
934 933 for path in paths:
935 934 self._entries.append(filecachesubentry(path, stat))
936 935
937 936 def changed(self):
938 937 '''true if any entry has changed'''
939 938 for entry in self._entries:
940 939 if entry.changed():
941 940 return True
942 941 return False
943 942
944 943 def refresh(self):
945 944 for entry in self._entries:
946 945 entry.refresh()
947 946
948 947 class filecache(object):
949 948 '''A property like decorator that tracks files under .hg/ for updates.
950 949
951 950 Records stat info when called in _filecache.
952 951
953 952 On subsequent calls, compares old stat info with new info, and recreates the
954 953 object when any of the files changes, updating the new stat info in
955 954 _filecache.
956 955
957 956 Mercurial either atomic renames or appends for files under .hg,
958 957 so to ensure the cache is reliable we need the filesystem to be able
959 958 to tell us if a file has been replaced. If it can't, we fallback to
960 959 recreating the object on every call (essentially the same behaviour as
961 960 propertycache).
962 961
963 962 '''
964 963 def __init__(self, *paths):
965 964 self.paths = paths
966 965
967 966 def join(self, obj, fname):
968 967 """Used to compute the runtime path of a cached file.
969 968
970 969 Users should subclass filecache and provide their own version of this
971 970 function to call the appropriate join function on 'obj' (an instance
972 971 of the class that its member function was decorated).
973 972 """
974 973 return obj.join(fname)
975 974
976 975 def __call__(self, func):
977 976 self.func = func
978 977 self.name = func.__name__
979 978 return self
980 979
981 980 def __get__(self, obj, type=None):
982 981 # do we need to check if the file changed?
983 982 if self.name in obj.__dict__:
984 983 assert self.name in obj._filecache, self.name
985 984 return obj.__dict__[self.name]
986 985
987 986 entry = obj._filecache.get(self.name)
988 987
989 988 if entry:
990 989 if entry.changed():
991 990 entry.obj = self.func(obj)
992 991 else:
993 992 paths = [self.join(obj, path) for path in self.paths]
994 993
995 994 # We stat -before- creating the object so our cache doesn't lie if
996 995 # a writer modified between the time we read and stat
997 996 entry = filecacheentry(paths, True)
998 997 entry.obj = self.func(obj)
999 998
1000 999 obj._filecache[self.name] = entry
1001 1000
1002 1001 obj.__dict__[self.name] = entry.obj
1003 1002 return entry.obj
1004 1003
1005 1004 def __set__(self, obj, value):
1006 1005 if self.name not in obj._filecache:
1007 1006 # we add an entry for the missing value because X in __dict__
1008 1007 # implies X in _filecache
1009 1008 paths = [self.join(obj, path) for path in self.paths]
1010 1009 ce = filecacheentry(paths, False)
1011 1010 obj._filecache[self.name] = ce
1012 1011 else:
1013 1012 ce = obj._filecache[self.name]
1014 1013
1015 1014 ce.obj = value # update cached copy
1016 1015 obj.__dict__[self.name] = value # update copy returned by obj.x
1017 1016
1018 1017 def __delete__(self, obj):
1019 1018 try:
1020 1019 del obj.__dict__[self.name]
1021 1020 except KeyError:
1022 1021 raise AttributeError(self.name)
1023 1022
1024 1023 class dirs(object):
1025 1024 '''a multiset of directory names from a dirstate or manifest'''
1026 1025
1027 1026 def __init__(self, map, skip=None):
1028 1027 self._dirs = {}
1029 1028 addpath = self.addpath
1030 1029 if util.safehasattr(map, 'iteritems') and skip is not None:
1031 1030 for f, s in map.iteritems():
1032 1031 if s[0] != skip:
1033 1032 addpath(f)
1034 1033 else:
1035 1034 for f in map:
1036 1035 addpath(f)
1037 1036
1038 1037 def addpath(self, path):
1039 1038 dirs = self._dirs
1040 1039 for base in finddirs(path):
1041 1040 if base in dirs:
1042 1041 dirs[base] += 1
1043 1042 return
1044 1043 dirs[base] = 1
1045 1044
1046 1045 def delpath(self, path):
1047 1046 dirs = self._dirs
1048 1047 for base in finddirs(path):
1049 1048 if dirs[base] > 1:
1050 1049 dirs[base] -= 1
1051 1050 return
1052 1051 del dirs[base]
1053 1052
1054 1053 def __iter__(self):
1055 1054 return self._dirs.iterkeys()
1056 1055
1057 1056 def __contains__(self, d):
1058 1057 return d in self._dirs
1059 1058
1060 1059 if util.safehasattr(parsers, 'dirs'):
1061 1060 dirs = parsers.dirs
1062 1061
1063 1062 def finddirs(path):
1064 1063 pos = path.rfind('/')
1065 1064 while pos != -1:
1066 1065 yield path[:pos]
1067 1066 pos = path.rfind('/', 0, pos)
@@ -1,102 +1,102 b''
1 1 $ hg init rep; cd rep
2 2
3 3 $ touch empty-file
4 4 $ $PYTHON -c 'for x in range(10000): print x' > large-file
5 5
6 6 $ hg addremove
7 7 adding empty-file
8 8 adding large-file
9 9
10 10 $ hg commit -m A
11 11
12 12 $ rm large-file empty-file
13 13 $ $PYTHON -c 'for x in range(10,10000): print x' > another-file
14 14
15 15 $ hg addremove -s50
16 16 adding another-file
17 17 removing empty-file
18 18 removing large-file
19 19 recording removal of large-file as rename to another-file (99% similar)
20 20
21 21 $ hg commit -m B
22 22
23 23 comparing two empty files caused ZeroDivisionError in the past
24 24
25 25 $ hg update -C 0
26 26 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
27 27 $ rm empty-file
28 28 $ touch another-empty-file
29 29 $ hg addremove -s50
30 30 adding another-empty-file
31 31 removing empty-file
32 32
33 33 $ cd ..
34 34
35 35 $ hg init rep2; cd rep2
36 36
37 37 $ $PYTHON -c 'for x in range(10000): print x' > large-file
38 38 $ $PYTHON -c 'for x in range(50): print x' > tiny-file
39 39
40 40 $ hg addremove
41 41 adding large-file
42 42 adding tiny-file
43 43
44 44 $ hg commit -m A
45 45
46 46 $ $PYTHON -c 'for x in range(70): print x' > small-file
47 47 $ rm tiny-file
48 48 $ rm large-file
49 49
50 50 $ hg addremove -s50
51 51 removing large-file
52 52 adding small-file
53 53 removing tiny-file
54 54 recording removal of tiny-file as rename to small-file (82% similar)
55 55
56 56 $ hg commit -m B
57 57
58 58 should all fail
59 59
60 60 $ hg addremove -s foo
61 61 abort: similarity must be a number
62 62 [255]
63 63 $ hg addremove -s -1
64 64 abort: similarity must be between 0 and 100
65 65 [255]
66 66 $ hg addremove -s 1e6
67 67 abort: similarity must be between 0 and 100
68 68 [255]
69 69
70 70 $ cd ..
71 71
72 72 Issue1527: repeated addremove causes util.Abort
73 73
74 74 $ hg init rep3; cd rep3
75 75 $ mkdir d
76 76 $ echo a > d/a
77 77 $ hg add d/a
78 78 $ hg commit -m 1
79 79
80 80 $ mv d/a d/b
81 81 $ hg addremove -s80
82 82 removing d/a
83 83 adding d/b
84 84 recording removal of d/a as rename to d/b (100% similar) (glob)
85 85 $ hg debugstate
86 86 r 0 0 1970-01-01 00:00:00 d/a
87 87 a 0 -1 unset d/b
88 88 copy: d/a -> d/b
89 89 $ mv d/b c
90 90
91 91 no copies found here (since the target isn't in d
92 92
93 93 $ hg addremove -s80 d
94 removing d/b
94 removing d/b (glob)
95 95
96 96 copies here
97 97
98 98 $ hg addremove -s80
99 99 adding c
100 100 recording removal of d/a as rename to c (100% similar) (glob)
101 101
102 102 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now