##// END OF EJS Templates
scmutil: add a function to mark that files have been operated on...
Siddharth Agarwal -
r19154:0c7cf411 default
parent child Browse files
Show More
@@ -1,977 +1,1007
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from mercurial.node import nullrev
10 10 import util, error, osutil, revset, similar, encoding, phases, parsers
11 11 import match as matchmod
12 12 import os, errno, re, stat, glob
13 13
14 14 if os.name == 'nt':
15 15 import scmwindows as scmplatform
16 16 else:
17 17 import scmposix as scmplatform
18 18
19 19 systemrcpath = scmplatform.systemrcpath
20 20 userrcpath = scmplatform.userrcpath
21 21
22 22 def nochangesfound(ui, repo, excluded=None):
23 23 '''Report no changes for push/pull, excluded is None or a list of
24 24 nodes excluded from the push/pull.
25 25 '''
26 26 secretlist = []
27 27 if excluded:
28 28 for n in excluded:
29 29 if n not in repo:
30 30 # discovery should not have included the filtered revision,
31 31 # we have to explicitly exclude it until discovery is cleanup.
32 32 continue
33 33 ctx = repo[n]
34 34 if ctx.phase() >= phases.secret and not ctx.extinct():
35 35 secretlist.append(n)
36 36
37 37 if secretlist:
38 38 ui.status(_("no changes found (ignored %d secret changesets)\n")
39 39 % len(secretlist))
40 40 else:
41 41 ui.status(_("no changes found\n"))
42 42
43 43 def checknewlabel(repo, lbl, kind):
44 44 # Do not use the "kind" parameter in ui output.
45 45 # It makes strings difficult to translate.
46 46 if lbl in ['tip', '.', 'null']:
47 47 raise util.Abort(_("the name '%s' is reserved") % lbl)
48 48 for c in (':', '\0', '\n', '\r'):
49 49 if c in lbl:
50 50 raise util.Abort(_("%r cannot be used in a name") % c)
51 51 try:
52 52 int(lbl)
53 53 raise util.Abort(_("cannot use an integer as a name"))
54 54 except ValueError:
55 55 pass
56 56
57 57 def checkfilename(f):
58 58 '''Check that the filename f is an acceptable filename for a tracked file'''
59 59 if '\r' in f or '\n' in f:
60 60 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
61 61
62 62 def checkportable(ui, f):
63 63 '''Check if filename f is portable and warn or abort depending on config'''
64 64 checkfilename(f)
65 65 abort, warn = checkportabilityalert(ui)
66 66 if abort or warn:
67 67 msg = util.checkwinfilename(f)
68 68 if msg:
69 69 msg = "%s: %r" % (msg, f)
70 70 if abort:
71 71 raise util.Abort(msg)
72 72 ui.warn(_("warning: %s\n") % msg)
73 73
74 74 def checkportabilityalert(ui):
75 75 '''check if the user's config requests nothing, a warning, or abort for
76 76 non-portable filenames'''
77 77 val = ui.config('ui', 'portablefilenames', 'warn')
78 78 lval = val.lower()
79 79 bval = util.parsebool(val)
80 80 abort = os.name == 'nt' or lval == 'abort'
81 81 warn = bval or lval == 'warn'
82 82 if bval is None and not (warn or abort or lval == 'ignore'):
83 83 raise error.ConfigError(
84 84 _("ui.portablefilenames value is invalid ('%s')") % val)
85 85 return abort, warn
86 86
87 87 class casecollisionauditor(object):
88 88 def __init__(self, ui, abort, dirstate):
89 89 self._ui = ui
90 90 self._abort = abort
91 91 allfiles = '\0'.join(dirstate._map)
92 92 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
93 93 self._dirstate = dirstate
94 94 # The purpose of _newfiles is so that we don't complain about
95 95 # case collisions if someone were to call this object with the
96 96 # same filename twice.
97 97 self._newfiles = set()
98 98
99 99 def __call__(self, f):
100 100 fl = encoding.lower(f)
101 101 if (fl in self._loweredfiles and f not in self._dirstate and
102 102 f not in self._newfiles):
103 103 msg = _('possible case-folding collision for %s') % f
104 104 if self._abort:
105 105 raise util.Abort(msg)
106 106 self._ui.warn(_("warning: %s\n") % msg)
107 107 self._loweredfiles.add(fl)
108 108 self._newfiles.add(f)
109 109
110 110 class pathauditor(object):
111 111 '''ensure that a filesystem path contains no banned components.
112 112 the following properties of a path are checked:
113 113
114 114 - ends with a directory separator
115 115 - under top-level .hg
116 116 - starts at the root of a windows drive
117 117 - contains ".."
118 118 - traverses a symlink (e.g. a/symlink_here/b)
119 119 - inside a nested repository (a callback can be used to approve
120 120 some nested repositories, e.g., subrepositories)
121 121 '''
122 122
123 123 def __init__(self, root, callback=None):
124 124 self.audited = set()
125 125 self.auditeddir = set()
126 126 self.root = root
127 127 self.callback = callback
128 128 if os.path.lexists(root) and not util.checkcase(root):
129 129 self.normcase = util.normcase
130 130 else:
131 131 self.normcase = lambda x: x
132 132
133 133 def __call__(self, path):
134 134 '''Check the relative path.
135 135 path may contain a pattern (e.g. foodir/**.txt)'''
136 136
137 137 path = util.localpath(path)
138 138 normpath = self.normcase(path)
139 139 if normpath in self.audited:
140 140 return
141 141 # AIX ignores "/" at end of path, others raise EISDIR.
142 142 if util.endswithsep(path):
143 143 raise util.Abort(_("path ends in directory separator: %s") % path)
144 144 parts = util.splitpath(path)
145 145 if (os.path.splitdrive(path)[0]
146 146 or parts[0].lower() in ('.hg', '.hg.', '')
147 147 or os.pardir in parts):
148 148 raise util.Abort(_("path contains illegal component: %s") % path)
149 149 if '.hg' in path.lower():
150 150 lparts = [p.lower() for p in parts]
151 151 for p in '.hg', '.hg.':
152 152 if p in lparts[1:]:
153 153 pos = lparts.index(p)
154 154 base = os.path.join(*parts[:pos])
155 155 raise util.Abort(_("path '%s' is inside nested repo %r")
156 156 % (path, base))
157 157
158 158 normparts = util.splitpath(normpath)
159 159 assert len(parts) == len(normparts)
160 160
161 161 parts.pop()
162 162 normparts.pop()
163 163 prefixes = []
164 164 while parts:
165 165 prefix = os.sep.join(parts)
166 166 normprefix = os.sep.join(normparts)
167 167 if normprefix in self.auditeddir:
168 168 break
169 169 curpath = os.path.join(self.root, prefix)
170 170 try:
171 171 st = os.lstat(curpath)
172 172 except OSError, err:
173 173 # EINVAL can be raised as invalid path syntax under win32.
174 174 # They must be ignored for patterns can be checked too.
175 175 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
176 176 raise
177 177 else:
178 178 if stat.S_ISLNK(st.st_mode):
179 179 raise util.Abort(
180 180 _('path %r traverses symbolic link %r')
181 181 % (path, prefix))
182 182 elif (stat.S_ISDIR(st.st_mode) and
183 183 os.path.isdir(os.path.join(curpath, '.hg'))):
184 184 if not self.callback or not self.callback(curpath):
185 185 raise util.Abort(_("path '%s' is inside nested "
186 186 "repo %r")
187 187 % (path, prefix))
188 188 prefixes.append(normprefix)
189 189 parts.pop()
190 190 normparts.pop()
191 191
192 192 self.audited.add(normpath)
193 193 # only add prefixes to the cache after checking everything: we don't
194 194 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
195 195 self.auditeddir.update(prefixes)
196 196
197 197 def check(self, path):
198 198 try:
199 199 self(path)
200 200 return True
201 201 except (OSError, util.Abort):
202 202 return False
203 203
204 204 class abstractvfs(object):
205 205 """Abstract base class; cannot be instantiated"""
206 206
207 207 def __init__(self, *args, **kwargs):
208 208 '''Prevent instantiation; don't call this from subclasses.'''
209 209 raise NotImplementedError('attempted instantiating ' + str(type(self)))
210 210
211 211 def tryread(self, path):
212 212 '''gracefully return an empty string for missing files'''
213 213 try:
214 214 return self.read(path)
215 215 except IOError, inst:
216 216 if inst.errno != errno.ENOENT:
217 217 raise
218 218 return ""
219 219
220 220 def read(self, path):
221 221 fp = self(path, 'rb')
222 222 try:
223 223 return fp.read()
224 224 finally:
225 225 fp.close()
226 226
227 227 def write(self, path, data):
228 228 fp = self(path, 'wb')
229 229 try:
230 230 return fp.write(data)
231 231 finally:
232 232 fp.close()
233 233
234 234 def append(self, path, data):
235 235 fp = self(path, 'ab')
236 236 try:
237 237 return fp.write(data)
238 238 finally:
239 239 fp.close()
240 240
241 241 def exists(self, path=None):
242 242 return os.path.exists(self.join(path))
243 243
244 244 def isdir(self, path=None):
245 245 return os.path.isdir(self.join(path))
246 246
247 247 def islink(self, path=None):
248 248 return os.path.islink(self.join(path))
249 249
250 250 def makedir(self, path=None, notindexed=True):
251 251 return util.makedir(self.join(path), notindexed)
252 252
253 253 def makedirs(self, path=None, mode=None):
254 254 return util.makedirs(self.join(path), mode)
255 255
256 256 def mkdir(self, path=None):
257 257 return os.mkdir(self.join(path))
258 258
259 259 def readdir(self, path=None, stat=None, skip=None):
260 260 return osutil.listdir(self.join(path), stat, skip)
261 261
262 262 def rename(self, src, dst):
263 263 return util.rename(self.join(src), self.join(dst))
264 264
265 265 def readlink(self, path):
266 266 return os.readlink(self.join(path))
267 267
268 268 def setflags(self, path, l, x):
269 269 return util.setflags(self.join(path), l, x)
270 270
271 271 def stat(self, path=None):
272 272 return os.stat(self.join(path))
273 273
274 274 class vfs(abstractvfs):
275 275 '''Operate files relative to a base directory
276 276
277 277 This class is used to hide the details of COW semantics and
278 278 remote file access from higher level code.
279 279 '''
280 280 def __init__(self, base, audit=True, expandpath=False, realpath=False):
281 281 if expandpath:
282 282 base = util.expandpath(base)
283 283 if realpath:
284 284 base = os.path.realpath(base)
285 285 self.base = base
286 286 self._setmustaudit(audit)
287 287 self.createmode = None
288 288 self._trustnlink = None
289 289
290 290 def _getmustaudit(self):
291 291 return self._audit
292 292
293 293 def _setmustaudit(self, onoff):
294 294 self._audit = onoff
295 295 if onoff:
296 296 self.audit = pathauditor(self.base)
297 297 else:
298 298 self.audit = util.always
299 299
300 300 mustaudit = property(_getmustaudit, _setmustaudit)
301 301
302 302 @util.propertycache
303 303 def _cansymlink(self):
304 304 return util.checklink(self.base)
305 305
306 306 @util.propertycache
307 307 def _chmod(self):
308 308 return util.checkexec(self.base)
309 309
310 310 def _fixfilemode(self, name):
311 311 if self.createmode is None or not self._chmod:
312 312 return
313 313 os.chmod(name, self.createmode & 0666)
314 314
315 315 def __call__(self, path, mode="r", text=False, atomictemp=False):
316 316 if self._audit:
317 317 r = util.checkosfilename(path)
318 318 if r:
319 319 raise util.Abort("%s: %r" % (r, path))
320 320 self.audit(path)
321 321 f = self.join(path)
322 322
323 323 if not text and "b" not in mode:
324 324 mode += "b" # for that other OS
325 325
326 326 nlink = -1
327 327 if mode not in ('r', 'rb'):
328 328 dirname, basename = util.split(f)
329 329 # If basename is empty, then the path is malformed because it points
330 330 # to a directory. Let the posixfile() call below raise IOError.
331 331 if basename:
332 332 if atomictemp:
333 333 util.ensuredirs(dirname, self.createmode)
334 334 return util.atomictempfile(f, mode, self.createmode)
335 335 try:
336 336 if 'w' in mode:
337 337 util.unlink(f)
338 338 nlink = 0
339 339 else:
340 340 # nlinks() may behave differently for files on Windows
341 341 # shares if the file is open.
342 342 fd = util.posixfile(f)
343 343 nlink = util.nlinks(f)
344 344 if nlink < 1:
345 345 nlink = 2 # force mktempcopy (issue1922)
346 346 fd.close()
347 347 except (OSError, IOError), e:
348 348 if e.errno != errno.ENOENT:
349 349 raise
350 350 nlink = 0
351 351 util.ensuredirs(dirname, self.createmode)
352 352 if nlink > 0:
353 353 if self._trustnlink is None:
354 354 self._trustnlink = nlink > 1 or util.checknlink(f)
355 355 if nlink > 1 or not self._trustnlink:
356 356 util.rename(util.mktempcopy(f), f)
357 357 fp = util.posixfile(f, mode)
358 358 if nlink == 0:
359 359 self._fixfilemode(f)
360 360 return fp
361 361
362 362 def symlink(self, src, dst):
363 363 self.audit(dst)
364 364 linkname = self.join(dst)
365 365 try:
366 366 os.unlink(linkname)
367 367 except OSError:
368 368 pass
369 369
370 370 util.ensuredirs(os.path.dirname(linkname), self.createmode)
371 371
372 372 if self._cansymlink:
373 373 try:
374 374 os.symlink(src, linkname)
375 375 except OSError, err:
376 376 raise OSError(err.errno, _('could not symlink to %r: %s') %
377 377 (src, err.strerror), linkname)
378 378 else:
379 379 self.write(dst, src)
380 380
381 381 def join(self, path):
382 382 if path:
383 383 return os.path.join(self.base, path)
384 384 else:
385 385 return self.base
386 386
387 387 opener = vfs
388 388
389 389 class auditvfs(object):
390 390 def __init__(self, vfs):
391 391 self.vfs = vfs
392 392
393 393 def _getmustaudit(self):
394 394 return self.vfs.mustaudit
395 395
396 396 def _setmustaudit(self, onoff):
397 397 self.vfs.mustaudit = onoff
398 398
399 399 mustaudit = property(_getmustaudit, _setmustaudit)
400 400
401 401 class filtervfs(abstractvfs, auditvfs):
402 402 '''Wrapper vfs for filtering filenames with a function.'''
403 403
404 404 def __init__(self, vfs, filter):
405 405 auditvfs.__init__(self, vfs)
406 406 self._filter = filter
407 407
408 408 def __call__(self, path, *args, **kwargs):
409 409 return self.vfs(self._filter(path), *args, **kwargs)
410 410
411 411 def join(self, path):
412 412 if path:
413 413 return self.vfs.join(self._filter(path))
414 414 else:
415 415 return self.vfs.join(path)
416 416
417 417 filteropener = filtervfs
418 418
419 419 class readonlyvfs(abstractvfs, auditvfs):
420 420 '''Wrapper vfs preventing any writing.'''
421 421
422 422 def __init__(self, vfs):
423 423 auditvfs.__init__(self, vfs)
424 424
425 425 def __call__(self, path, mode='r', *args, **kw):
426 426 if mode not in ('r', 'rb'):
427 427 raise util.Abort('this vfs is read only')
428 428 return self.vfs(path, mode, *args, **kw)
429 429
430 430
431 431 def canonpath(root, cwd, myname, auditor=None):
432 432 '''return the canonical path of myname, given cwd and root'''
433 433 if util.endswithsep(root):
434 434 rootsep = root
435 435 else:
436 436 rootsep = root + os.sep
437 437 name = myname
438 438 if not os.path.isabs(name):
439 439 name = os.path.join(root, cwd, name)
440 440 name = os.path.normpath(name)
441 441 if auditor is None:
442 442 auditor = pathauditor(root)
443 443 if name != rootsep and name.startswith(rootsep):
444 444 name = name[len(rootsep):]
445 445 auditor(name)
446 446 return util.pconvert(name)
447 447 elif name == root:
448 448 return ''
449 449 else:
450 450 # Determine whether `name' is in the hierarchy at or beneath `root',
451 451 # by iterating name=dirname(name) until that causes no change (can't
452 452 # check name == '/', because that doesn't work on windows). The list
453 453 # `rel' holds the reversed list of components making up the relative
454 454 # file name we want.
455 455 rel = []
456 456 while True:
457 457 try:
458 458 s = util.samefile(name, root)
459 459 except OSError:
460 460 s = False
461 461 if s:
462 462 if not rel:
463 463 # name was actually the same as root (maybe a symlink)
464 464 return ''
465 465 rel.reverse()
466 466 name = os.path.join(*rel)
467 467 auditor(name)
468 468 return util.pconvert(name)
469 469 dirname, basename = util.split(name)
470 470 rel.append(basename)
471 471 if dirname == name:
472 472 break
473 473 name = dirname
474 474
475 475 raise util.Abort(_("%s not under root '%s'") % (myname, root))
476 476
477 477 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
478 478 '''yield every hg repository under path, always recursively.
479 479 The recurse flag will only control recursion into repo working dirs'''
480 480 def errhandler(err):
481 481 if err.filename == path:
482 482 raise err
483 483 samestat = getattr(os.path, 'samestat', None)
484 484 if followsym and samestat is not None:
485 485 def adddir(dirlst, dirname):
486 486 match = False
487 487 dirstat = os.stat(dirname)
488 488 for lstdirstat in dirlst:
489 489 if samestat(dirstat, lstdirstat):
490 490 match = True
491 491 break
492 492 if not match:
493 493 dirlst.append(dirstat)
494 494 return not match
495 495 else:
496 496 followsym = False
497 497
498 498 if (seen_dirs is None) and followsym:
499 499 seen_dirs = []
500 500 adddir(seen_dirs, path)
501 501 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
502 502 dirs.sort()
503 503 if '.hg' in dirs:
504 504 yield root # found a repository
505 505 qroot = os.path.join(root, '.hg', 'patches')
506 506 if os.path.isdir(os.path.join(qroot, '.hg')):
507 507 yield qroot # we have a patch queue repo here
508 508 if recurse:
509 509 # avoid recursing inside the .hg directory
510 510 dirs.remove('.hg')
511 511 else:
512 512 dirs[:] = [] # don't descend further
513 513 elif followsym:
514 514 newdirs = []
515 515 for d in dirs:
516 516 fname = os.path.join(root, d)
517 517 if adddir(seen_dirs, fname):
518 518 if os.path.islink(fname):
519 519 for hgname in walkrepos(fname, True, seen_dirs):
520 520 yield hgname
521 521 else:
522 522 newdirs.append(d)
523 523 dirs[:] = newdirs
524 524
525 525 def osrcpath():
526 526 '''return default os-specific hgrc search path'''
527 527 path = systemrcpath()
528 528 path.extend(userrcpath())
529 529 path = [os.path.normpath(f) for f in path]
530 530 return path
531 531
532 532 _rcpath = None
533 533
534 534 def rcpath():
535 535 '''return hgrc search path. if env var HGRCPATH is set, use it.
536 536 for each item in path, if directory, use files ending in .rc,
537 537 else use item.
538 538 make HGRCPATH empty to only look in .hg/hgrc of current repo.
539 539 if no HGRCPATH, use default os-specific path.'''
540 540 global _rcpath
541 541 if _rcpath is None:
542 542 if 'HGRCPATH' in os.environ:
543 543 _rcpath = []
544 544 for p in os.environ['HGRCPATH'].split(os.pathsep):
545 545 if not p:
546 546 continue
547 547 p = util.expandpath(p)
548 548 if os.path.isdir(p):
549 549 for f, kind in osutil.listdir(p):
550 550 if f.endswith('.rc'):
551 551 _rcpath.append(os.path.join(p, f))
552 552 else:
553 553 _rcpath.append(p)
554 554 else:
555 555 _rcpath = osrcpath()
556 556 return _rcpath
557 557
558 558 def revsingle(repo, revspec, default='.'):
559 559 if not revspec:
560 560 return repo[default]
561 561
562 562 l = revrange(repo, [revspec])
563 563 if len(l) < 1:
564 564 raise util.Abort(_('empty revision set'))
565 565 return repo[l[-1]]
566 566
567 567 def revpair(repo, revs):
568 568 if not revs:
569 569 return repo.dirstate.p1(), None
570 570
571 571 l = revrange(repo, revs)
572 572
573 573 if len(l) == 0:
574 574 if revs:
575 575 raise util.Abort(_('empty revision range'))
576 576 return repo.dirstate.p1(), None
577 577
578 578 if len(l) == 1 and len(revs) == 1 and _revrangesep not in revs[0]:
579 579 return repo.lookup(l[0]), None
580 580
581 581 return repo.lookup(l[0]), repo.lookup(l[-1])
582 582
583 583 _revrangesep = ':'
584 584
585 585 def revrange(repo, revs):
586 586 """Yield revision as strings from a list of revision specifications."""
587 587
588 588 def revfix(repo, val, defval):
589 589 if not val and val != 0 and defval is not None:
590 590 return defval
591 591 return repo[val].rev()
592 592
593 593 seen, l = set(), []
594 594 for spec in revs:
595 595 if l and not seen:
596 596 seen = set(l)
597 597 # attempt to parse old-style ranges first to deal with
598 598 # things like old-tag which contain query metacharacters
599 599 try:
600 600 if isinstance(spec, int):
601 601 seen.add(spec)
602 602 l.append(spec)
603 603 continue
604 604
605 605 if _revrangesep in spec:
606 606 start, end = spec.split(_revrangesep, 1)
607 607 start = revfix(repo, start, 0)
608 608 end = revfix(repo, end, len(repo) - 1)
609 609 if end == nullrev and start <= 0:
610 610 start = nullrev
611 611 rangeiter = repo.changelog.revs(start, end)
612 612 if not seen and not l:
613 613 # by far the most common case: revs = ["-1:0"]
614 614 l = list(rangeiter)
615 615 # defer syncing seen until next iteration
616 616 continue
617 617 newrevs = set(rangeiter)
618 618 if seen:
619 619 newrevs.difference_update(seen)
620 620 seen.update(newrevs)
621 621 else:
622 622 seen = newrevs
623 623 l.extend(sorted(newrevs, reverse=start > end))
624 624 continue
625 625 elif spec and spec in repo: # single unquoted rev
626 626 rev = revfix(repo, spec, None)
627 627 if rev in seen:
628 628 continue
629 629 seen.add(rev)
630 630 l.append(rev)
631 631 continue
632 632 except error.RepoLookupError:
633 633 pass
634 634
635 635 # fall through to new-style queries if old-style fails
636 636 m = revset.match(repo.ui, spec)
637 637 dl = [r for r in m(repo, list(repo)) if r not in seen]
638 638 l.extend(dl)
639 639 seen.update(dl)
640 640
641 641 return l
642 642
643 643 def expandpats(pats):
644 644 if not util.expandglobs:
645 645 return list(pats)
646 646 ret = []
647 647 for p in pats:
648 648 kind, name = matchmod._patsplit(p, None)
649 649 if kind is None:
650 650 try:
651 651 globbed = glob.glob(name)
652 652 except re.error:
653 653 globbed = [name]
654 654 if globbed:
655 655 ret.extend(globbed)
656 656 continue
657 657 ret.append(p)
658 658 return ret
659 659
660 660 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
661 661 if pats == ("",):
662 662 pats = []
663 663 if not globbed and default == 'relpath':
664 664 pats = expandpats(pats or [])
665 665
666 666 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
667 667 default)
668 668 def badfn(f, msg):
669 669 ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
670 670 m.bad = badfn
671 671 return m, pats
672 672
673 673 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
674 674 return matchandpats(ctx, pats, opts, globbed, default)[0]
675 675
676 676 def matchall(repo):
677 677 return matchmod.always(repo.root, repo.getcwd())
678 678
679 679 def matchfiles(repo, files):
680 680 return matchmod.exact(repo.root, repo.getcwd(), files)
681 681
682 682 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
683 683 if dry_run is None:
684 684 dry_run = opts.get('dry_run')
685 685 if similarity is None:
686 686 similarity = float(opts.get('similarity') or 0)
687 687 # we'd use status here, except handling of symlinks and ignore is tricky
688 688 m = match(repo[None], pats, opts)
689 689 rejected = []
690 690 m.bad = lambda x, y: rejected.append(x)
691 691
692 692 added, unknown, deleted, removed = _interestingfiles(repo, m)
693 693
694 694 unknownset = set(unknown)
695 695 toprint = unknownset.copy()
696 696 toprint.update(deleted)
697 697 for abs in sorted(toprint):
698 698 if repo.ui.verbose or not m.exact(abs):
699 699 rel = m.rel(abs)
700 700 if abs in unknownset:
701 701 status = _('adding %s\n') % ((pats and rel) or abs)
702 702 else:
703 703 status = _('removing %s\n') % ((pats and rel) or abs)
704 704 repo.ui.status(status)
705 705
706 706 renames = _findrenames(repo, m, added + unknown, removed + deleted,
707 707 similarity)
708 708
709 709 if not dry_run:
710 710 _markchanges(repo, unknown, deleted, renames)
711 711
712 712 for f in rejected:
713 713 if f in m.files():
714 714 return 1
715 715 return 0
716 716
717 def marktouched(repo, files, similarity=0.0):
718 '''Assert that files have somehow been operated upon. files are relative to
719 the repo root.'''
720 m = matchfiles(repo, files)
721 rejected = []
722 m.bad = lambda x, y: rejected.append(x)
723
724 added, unknown, deleted, removed = _interestingfiles(repo, m)
725
726 if repo.ui.verbose:
727 unknownset = set(unknown)
728 toprint = unknownset.copy()
729 toprint.update(deleted)
730 for abs in sorted(toprint):
731 if abs in unknownset:
732 status = _('adding %s\n') % abs
733 else:
734 status = _('removing %s\n') % abs
735 repo.ui.status(status)
736
737 renames = _findrenames(repo, m, added + unknown, removed + deleted,
738 similarity)
739
740 _markchanges(repo, unknown, deleted, renames)
741
742 for f in rejected:
743 if f in m.files():
744 return 1
745 return 0
746
717 747 def _interestingfiles(repo, matcher):
718 748 '''Walk dirstate with matcher, looking for files that addremove would care
719 749 about.
720 750
721 751 This is different from dirstate.status because it doesn't care about
722 752 whether files are modified or clean.'''
723 753 added, unknown, deleted, removed = [], [], [], []
724 754 audit_path = pathauditor(repo.root)
725 755
726 756 ctx = repo[None]
727 757 dirstate = repo.dirstate
728 758 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False)
729 759 for abs, st in walkresults.iteritems():
730 760 dstate = dirstate[abs]
731 761 if dstate == '?' and audit_path.check(abs):
732 762 unknown.append(abs)
733 763 elif dstate != 'r' and not st:
734 764 deleted.append(abs)
735 765 # for finding renames
736 766 elif dstate == 'r':
737 767 removed.append(abs)
738 768 elif dstate == 'a':
739 769 added.append(abs)
740 770
741 771 return added, unknown, deleted, removed
742 772
743 773 def _findrenames(repo, matcher, added, removed, similarity):
744 774 '''Find renames from removed files to added ones.'''
745 775 renames = {}
746 776 if similarity > 0:
747 777 for old, new, score in similar.findrenames(repo, added, removed,
748 778 similarity):
749 779 if (repo.ui.verbose or not matcher.exact(old)
750 780 or not matcher.exact(new)):
751 781 repo.ui.status(_('recording removal of %s as rename to %s '
752 782 '(%d%% similar)\n') %
753 783 (matcher.rel(old), matcher.rel(new),
754 784 score * 100))
755 785 renames[new] = old
756 786 return renames
757 787
758 788 def _markchanges(repo, unknown, deleted, renames):
759 789 '''Marks the files in unknown as added, the files in deleted as removed,
760 790 and the files in renames as copied.'''
761 791 wctx = repo[None]
762 792 wlock = repo.wlock()
763 793 try:
764 794 wctx.forget(deleted)
765 795 wctx.add(unknown)
766 796 for new, old in renames.iteritems():
767 797 wctx.copy(old, new)
768 798 finally:
769 799 wlock.release()
770 800
771 801 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
772 802 """Update the dirstate to reflect the intent of copying src to dst. For
773 803 different reasons it might not end with dst being marked as copied from src.
774 804 """
775 805 origsrc = repo.dirstate.copied(src) or src
776 806 if dst == origsrc: # copying back a copy?
777 807 if repo.dirstate[dst] not in 'mn' and not dryrun:
778 808 repo.dirstate.normallookup(dst)
779 809 else:
780 810 if repo.dirstate[origsrc] == 'a' and origsrc == src:
781 811 if not ui.quiet:
782 812 ui.warn(_("%s has not been committed yet, so no copy "
783 813 "data will be stored for %s.\n")
784 814 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
785 815 if repo.dirstate[dst] in '?r' and not dryrun:
786 816 wctx.add([dst])
787 817 elif not dryrun:
788 818 wctx.copy(origsrc, dst)
789 819
790 820 def readrequires(opener, supported):
791 821 '''Reads and parses .hg/requires and checks if all entries found
792 822 are in the list of supported features.'''
793 823 requirements = set(opener.read("requires").splitlines())
794 824 missings = []
795 825 for r in requirements:
796 826 if r not in supported:
797 827 if not r or not r[0].isalnum():
798 828 raise error.RequirementError(_(".hg/requires file is corrupt"))
799 829 missings.append(r)
800 830 missings.sort()
801 831 if missings:
802 832 raise error.RequirementError(
803 833 _("unknown repository format: requires features '%s' (upgrade "
804 834 "Mercurial)") % "', '".join(missings))
805 835 return requirements
806 836
807 837 class filecacheentry(object):
808 838 def __init__(self, path, stat=True):
809 839 self.path = path
810 840 self.cachestat = None
811 841 self._cacheable = None
812 842
813 843 if stat:
814 844 self.cachestat = filecacheentry.stat(self.path)
815 845
816 846 if self.cachestat:
817 847 self._cacheable = self.cachestat.cacheable()
818 848 else:
819 849 # None means we don't know yet
820 850 self._cacheable = None
821 851
822 852 def refresh(self):
823 853 if self.cacheable():
824 854 self.cachestat = filecacheentry.stat(self.path)
825 855
826 856 def cacheable(self):
827 857 if self._cacheable is not None:
828 858 return self._cacheable
829 859
830 860 # we don't know yet, assume it is for now
831 861 return True
832 862
833 863 def changed(self):
834 864 # no point in going further if we can't cache it
835 865 if not self.cacheable():
836 866 return True
837 867
838 868 newstat = filecacheentry.stat(self.path)
839 869
840 870 # we may not know if it's cacheable yet, check again now
841 871 if newstat and self._cacheable is None:
842 872 self._cacheable = newstat.cacheable()
843 873
844 874 # check again
845 875 if not self._cacheable:
846 876 return True
847 877
848 878 if self.cachestat != newstat:
849 879 self.cachestat = newstat
850 880 return True
851 881 else:
852 882 return False
853 883
854 884 @staticmethod
855 885 def stat(path):
856 886 try:
857 887 return util.cachestat(path)
858 888 except OSError, e:
859 889 if e.errno != errno.ENOENT:
860 890 raise
861 891
862 892 class filecache(object):
863 893 '''A property like decorator that tracks a file under .hg/ for updates.
864 894
865 895 Records stat info when called in _filecache.
866 896
867 897 On subsequent calls, compares old stat info with new info, and recreates
868 898 the object when needed, updating the new stat info in _filecache.
869 899
870 900 Mercurial either atomic renames or appends for files under .hg,
871 901 so to ensure the cache is reliable we need the filesystem to be able
872 902 to tell us if a file has been replaced. If it can't, we fallback to
873 903 recreating the object on every call (essentially the same behaviour as
874 904 propertycache).'''
875 905 def __init__(self, path):
876 906 self.path = path
877 907
878 908 def join(self, obj, fname):
879 909 """Used to compute the runtime path of the cached file.
880 910
881 911 Users should subclass filecache and provide their own version of this
882 912 function to call the appropriate join function on 'obj' (an instance
883 913 of the class that its member function was decorated).
884 914 """
885 915 return obj.join(fname)
886 916
887 917 def __call__(self, func):
888 918 self.func = func
889 919 self.name = func.__name__
890 920 return self
891 921
892 922 def __get__(self, obj, type=None):
893 923 # do we need to check if the file changed?
894 924 if self.name in obj.__dict__:
895 925 assert self.name in obj._filecache, self.name
896 926 return obj.__dict__[self.name]
897 927
898 928 entry = obj._filecache.get(self.name)
899 929
900 930 if entry:
901 931 if entry.changed():
902 932 entry.obj = self.func(obj)
903 933 else:
904 934 path = self.join(obj, self.path)
905 935
906 936 # We stat -before- creating the object so our cache doesn't lie if
907 937 # a writer modified between the time we read and stat
908 938 entry = filecacheentry(path)
909 939 entry.obj = self.func(obj)
910 940
911 941 obj._filecache[self.name] = entry
912 942
913 943 obj.__dict__[self.name] = entry.obj
914 944 return entry.obj
915 945
916 946 def __set__(self, obj, value):
917 947 if self.name not in obj._filecache:
918 948 # we add an entry for the missing value because X in __dict__
919 949 # implies X in _filecache
920 950 ce = filecacheentry(self.join(obj, self.path), False)
921 951 obj._filecache[self.name] = ce
922 952 else:
923 953 ce = obj._filecache[self.name]
924 954
925 955 ce.obj = value # update cached copy
926 956 obj.__dict__[self.name] = value # update copy returned by obj.x
927 957
928 958 def __delete__(self, obj):
929 959 try:
930 960 del obj.__dict__[self.name]
931 961 except KeyError:
932 962 raise AttributeError(self.name)
933 963
934 964 class dirs(object):
935 965 '''a multiset of directory names from a dirstate or manifest'''
936 966
937 967 def __init__(self, map, skip=None):
938 968 self._dirs = {}
939 969 addpath = self.addpath
940 970 if util.safehasattr(map, 'iteritems') and skip is not None:
941 971 for f, s in map.iteritems():
942 972 if s[0] != skip:
943 973 addpath(f)
944 974 else:
945 975 for f in map:
946 976 addpath(f)
947 977
948 978 def addpath(self, path):
949 979 dirs = self._dirs
950 980 for base in finddirs(path):
951 981 if base in dirs:
952 982 dirs[base] += 1
953 983 return
954 984 dirs[base] = 1
955 985
956 986 def delpath(self, path):
957 987 dirs = self._dirs
958 988 for base in finddirs(path):
959 989 if dirs[base] > 1:
960 990 dirs[base] -= 1
961 991 return
962 992 del dirs[base]
963 993
964 994 def __iter__(self):
965 995 return self._dirs.iterkeys()
966 996
967 997 def __contains__(self, d):
968 998 return d in self._dirs
969 999
970 1000 if util.safehasattr(parsers, 'dirs'):
971 1001 dirs = parsers.dirs
972 1002
973 1003 def finddirs(path):
974 1004 pos = path.rfind('/')
975 1005 while pos != -1:
976 1006 yield path[:pos]
977 1007 pos = path.rfind('/', 0, pos)
General Comments 0
You need to be logged in to leave comments. Login now