##// END OF EJS Templates
dirstate: preserve path components case on renames (issue3402)...
Patrick Mezard -
r16542:e596a631 stable
parent child Browse files
Show More
@@ -1,1639 +1,1644 b''
1 1 # cmdutil.py - help for command processing in mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import os, sys, errno, re, tempfile
11 11 import util, scmutil, templater, patch, error, templatekw, revlog, copies
12 12 import match as matchmod
13 13 import subrepo, context, repair, bookmarks
14 14
15 15 def parsealiases(cmd):
16 16 return cmd.lstrip("^").split("|")
17 17
18 18 def findpossible(cmd, table, strict=False):
19 19 """
20 20 Return cmd -> (aliases, command table entry)
21 21 for each matching command.
22 22 Return debug commands (or their aliases) only if no normal command matches.
23 23 """
24 24 choice = {}
25 25 debugchoice = {}
26 26
27 27 if cmd in table:
28 28 # short-circuit exact matches, "log" alias beats "^log|history"
29 29 keys = [cmd]
30 30 else:
31 31 keys = table.keys()
32 32
33 33 for e in keys:
34 34 aliases = parsealiases(e)
35 35 found = None
36 36 if cmd in aliases:
37 37 found = cmd
38 38 elif not strict:
39 39 for a in aliases:
40 40 if a.startswith(cmd):
41 41 found = a
42 42 break
43 43 if found is not None:
44 44 if aliases[0].startswith("debug") or found.startswith("debug"):
45 45 debugchoice[found] = (aliases, table[e])
46 46 else:
47 47 choice[found] = (aliases, table[e])
48 48
49 49 if not choice and debugchoice:
50 50 choice = debugchoice
51 51
52 52 return choice
53 53
54 54 def findcmd(cmd, table, strict=True):
55 55 """Return (aliases, command table entry) for command string."""
56 56 choice = findpossible(cmd, table, strict)
57 57
58 58 if cmd in choice:
59 59 return choice[cmd]
60 60
61 61 if len(choice) > 1:
62 62 clist = choice.keys()
63 63 clist.sort()
64 64 raise error.AmbiguousCommand(cmd, clist)
65 65
66 66 if choice:
67 67 return choice.values()[0]
68 68
69 69 raise error.UnknownCommand(cmd)
70 70
71 71 def findrepo(p):
72 72 while not os.path.isdir(os.path.join(p, ".hg")):
73 73 oldp, p = p, os.path.dirname(p)
74 74 if p == oldp:
75 75 return None
76 76
77 77 return p
78 78
79 79 def bailifchanged(repo):
80 80 if repo.dirstate.p2() != nullid:
81 81 raise util.Abort(_('outstanding uncommitted merge'))
82 82 modified, added, removed, deleted = repo.status()[:4]
83 83 if modified or added or removed or deleted:
84 84 raise util.Abort(_("outstanding uncommitted changes"))
85 85 ctx = repo[None]
86 86 for s in ctx.substate:
87 87 if ctx.sub(s).dirty():
88 88 raise util.Abort(_("uncommitted changes in subrepo %s") % s)
89 89
90 90 def logmessage(ui, opts):
91 91 """ get the log message according to -m and -l option """
92 92 message = opts.get('message')
93 93 logfile = opts.get('logfile')
94 94
95 95 if message and logfile:
96 96 raise util.Abort(_('options --message and --logfile are mutually '
97 97 'exclusive'))
98 98 if not message and logfile:
99 99 try:
100 100 if logfile == '-':
101 101 message = ui.fin.read()
102 102 else:
103 103 message = '\n'.join(util.readfile(logfile).splitlines())
104 104 except IOError, inst:
105 105 raise util.Abort(_("can't read commit message '%s': %s") %
106 106 (logfile, inst.strerror))
107 107 return message
108 108
109 109 def loglimit(opts):
110 110 """get the log limit according to option -l/--limit"""
111 111 limit = opts.get('limit')
112 112 if limit:
113 113 try:
114 114 limit = int(limit)
115 115 except ValueError:
116 116 raise util.Abort(_('limit must be a positive integer'))
117 117 if limit <= 0:
118 118 raise util.Abort(_('limit must be positive'))
119 119 else:
120 120 limit = None
121 121 return limit
122 122
123 123 def makefilename(repo, pat, node, desc=None,
124 124 total=None, seqno=None, revwidth=None, pathname=None):
125 125 node_expander = {
126 126 'H': lambda: hex(node),
127 127 'R': lambda: str(repo.changelog.rev(node)),
128 128 'h': lambda: short(node),
129 129 'm': lambda: re.sub('[^\w]', '_', str(desc))
130 130 }
131 131 expander = {
132 132 '%': lambda: '%',
133 133 'b': lambda: os.path.basename(repo.root),
134 134 }
135 135
136 136 try:
137 137 if node:
138 138 expander.update(node_expander)
139 139 if node:
140 140 expander['r'] = (lambda:
141 141 str(repo.changelog.rev(node)).zfill(revwidth or 0))
142 142 if total is not None:
143 143 expander['N'] = lambda: str(total)
144 144 if seqno is not None:
145 145 expander['n'] = lambda: str(seqno)
146 146 if total is not None and seqno is not None:
147 147 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
148 148 if pathname is not None:
149 149 expander['s'] = lambda: os.path.basename(pathname)
150 150 expander['d'] = lambda: os.path.dirname(pathname) or '.'
151 151 expander['p'] = lambda: pathname
152 152
153 153 newname = []
154 154 patlen = len(pat)
155 155 i = 0
156 156 while i < patlen:
157 157 c = pat[i]
158 158 if c == '%':
159 159 i += 1
160 160 c = pat[i]
161 161 c = expander[c]()
162 162 newname.append(c)
163 163 i += 1
164 164 return ''.join(newname)
165 165 except KeyError, inst:
166 166 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
167 167 inst.args[0])
168 168
169 169 def makefileobj(repo, pat, node=None, desc=None, total=None,
170 170 seqno=None, revwidth=None, mode='wb', pathname=None):
171 171
172 172 writable = mode not in ('r', 'rb')
173 173
174 174 if not pat or pat == '-':
175 175 fp = writable and repo.ui.fout or repo.ui.fin
176 176 if util.safehasattr(fp, 'fileno'):
177 177 return os.fdopen(os.dup(fp.fileno()), mode)
178 178 else:
179 179 # if this fp can't be duped properly, return
180 180 # a dummy object that can be closed
181 181 class wrappedfileobj(object):
182 182 noop = lambda x: None
183 183 def __init__(self, f):
184 184 self.f = f
185 185 def __getattr__(self, attr):
186 186 if attr == 'close':
187 187 return self.noop
188 188 else:
189 189 return getattr(self.f, attr)
190 190
191 191 return wrappedfileobj(fp)
192 192 if util.safehasattr(pat, 'write') and writable:
193 193 return pat
194 194 if util.safehasattr(pat, 'read') and 'r' in mode:
195 195 return pat
196 196 return open(makefilename(repo, pat, node, desc, total, seqno, revwidth,
197 197 pathname),
198 198 mode)
199 199
200 200 def openrevlog(repo, cmd, file_, opts):
201 201 """opens the changelog, manifest, a filelog or a given revlog"""
202 202 cl = opts['changelog']
203 203 mf = opts['manifest']
204 204 msg = None
205 205 if cl and mf:
206 206 msg = _('cannot specify --changelog and --manifest at the same time')
207 207 elif cl or mf:
208 208 if file_:
209 209 msg = _('cannot specify filename with --changelog or --manifest')
210 210 elif not repo:
211 211 msg = _('cannot specify --changelog or --manifest '
212 212 'without a repository')
213 213 if msg:
214 214 raise util.Abort(msg)
215 215
216 216 r = None
217 217 if repo:
218 218 if cl:
219 219 r = repo.changelog
220 220 elif mf:
221 221 r = repo.manifest
222 222 elif file_:
223 223 filelog = repo.file(file_)
224 224 if len(filelog):
225 225 r = filelog
226 226 if not r:
227 227 if not file_:
228 228 raise error.CommandError(cmd, _('invalid arguments'))
229 229 if not os.path.isfile(file_):
230 230 raise util.Abort(_("revlog '%s' not found") % file_)
231 231 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
232 232 file_[:-2] + ".i")
233 233 return r
234 234
235 235 def copy(ui, repo, pats, opts, rename=False):
236 236 # called with the repo lock held
237 237 #
238 238 # hgsep => pathname that uses "/" to separate directories
239 239 # ossep => pathname that uses os.sep to separate directories
240 240 cwd = repo.getcwd()
241 241 targets = {}
242 242 after = opts.get("after")
243 243 dryrun = opts.get("dry_run")
244 244 wctx = repo[None]
245 245
246 246 def walkpat(pat):
247 247 srcs = []
248 248 badstates = after and '?' or '?r'
249 249 m = scmutil.match(repo[None], [pat], opts, globbed=True)
250 250 for abs in repo.walk(m):
251 251 state = repo.dirstate[abs]
252 252 rel = m.rel(abs)
253 253 exact = m.exact(abs)
254 254 if state in badstates:
255 255 if exact and state == '?':
256 256 ui.warn(_('%s: not copying - file is not managed\n') % rel)
257 257 if exact and state == 'r':
258 258 ui.warn(_('%s: not copying - file has been marked for'
259 259 ' remove\n') % rel)
260 260 continue
261 261 # abs: hgsep
262 262 # rel: ossep
263 263 srcs.append((abs, rel, exact))
264 264 return srcs
265 265
266 266 # abssrc: hgsep
267 267 # relsrc: ossep
268 268 # otarget: ossep
269 269 def copyfile(abssrc, relsrc, otarget, exact):
270 270 abstarget = scmutil.canonpath(repo.root, cwd, otarget)
271 if '/' in abstarget:
272 # We cannot normalize abstarget itself, this would prevent
273 # case only renames, like a => A.
274 abspath, absname = abstarget.rsplit('/', 1)
275 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
271 276 reltarget = repo.pathto(abstarget, cwd)
272 277 target = repo.wjoin(abstarget)
273 278 src = repo.wjoin(abssrc)
274 279 state = repo.dirstate[abstarget]
275 280
276 281 scmutil.checkportable(ui, abstarget)
277 282
278 283 # check for collisions
279 284 prevsrc = targets.get(abstarget)
280 285 if prevsrc is not None:
281 286 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
282 287 (reltarget, repo.pathto(abssrc, cwd),
283 288 repo.pathto(prevsrc, cwd)))
284 289 return
285 290
286 291 # check for overwrites
287 292 exists = os.path.lexists(target)
288 293 samefile = False
289 294 if exists and abssrc != abstarget:
290 295 if (repo.dirstate.normalize(abssrc) ==
291 296 repo.dirstate.normalize(abstarget)):
292 297 if not rename:
293 298 ui.warn(_("%s: can't copy - same file\n") % reltarget)
294 299 return
295 300 exists = False
296 301 samefile = True
297 302
298 303 if not after and exists or after and state in 'mn':
299 304 if not opts['force']:
300 305 ui.warn(_('%s: not overwriting - file exists\n') %
301 306 reltarget)
302 307 return
303 308
304 309 if after:
305 310 if not exists:
306 311 if rename:
307 312 ui.warn(_('%s: not recording move - %s does not exist\n') %
308 313 (relsrc, reltarget))
309 314 else:
310 315 ui.warn(_('%s: not recording copy - %s does not exist\n') %
311 316 (relsrc, reltarget))
312 317 return
313 318 elif not dryrun:
314 319 try:
315 320 if exists:
316 321 os.unlink(target)
317 322 targetdir = os.path.dirname(target) or '.'
318 323 if not os.path.isdir(targetdir):
319 324 os.makedirs(targetdir)
320 325 if samefile:
321 326 tmp = target + "~hgrename"
322 327 os.rename(src, tmp)
323 328 os.rename(tmp, target)
324 329 else:
325 330 util.copyfile(src, target)
326 331 srcexists = True
327 332 except IOError, inst:
328 333 if inst.errno == errno.ENOENT:
329 334 ui.warn(_('%s: deleted in working copy\n') % relsrc)
330 335 srcexists = False
331 336 else:
332 337 ui.warn(_('%s: cannot copy - %s\n') %
333 338 (relsrc, inst.strerror))
334 339 return True # report a failure
335 340
336 341 if ui.verbose or not exact:
337 342 if rename:
338 343 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
339 344 else:
340 345 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
341 346
342 347 targets[abstarget] = abssrc
343 348
344 349 # fix up dirstate
345 350 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
346 351 dryrun=dryrun, cwd=cwd)
347 352 if rename and not dryrun:
348 353 if not after and srcexists and not samefile:
349 354 util.unlinkpath(repo.wjoin(abssrc))
350 355 wctx.forget([abssrc])
351 356
352 357 # pat: ossep
353 358 # dest ossep
354 359 # srcs: list of (hgsep, hgsep, ossep, bool)
355 360 # return: function that takes hgsep and returns ossep
356 361 def targetpathfn(pat, dest, srcs):
357 362 if os.path.isdir(pat):
358 363 abspfx = scmutil.canonpath(repo.root, cwd, pat)
359 364 abspfx = util.localpath(abspfx)
360 365 if destdirexists:
361 366 striplen = len(os.path.split(abspfx)[0])
362 367 else:
363 368 striplen = len(abspfx)
364 369 if striplen:
365 370 striplen += len(os.sep)
366 371 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
367 372 elif destdirexists:
368 373 res = lambda p: os.path.join(dest,
369 374 os.path.basename(util.localpath(p)))
370 375 else:
371 376 res = lambda p: dest
372 377 return res
373 378
374 379 # pat: ossep
375 380 # dest ossep
376 381 # srcs: list of (hgsep, hgsep, ossep, bool)
377 382 # return: function that takes hgsep and returns ossep
378 383 def targetpathafterfn(pat, dest, srcs):
379 384 if matchmod.patkind(pat):
380 385 # a mercurial pattern
381 386 res = lambda p: os.path.join(dest,
382 387 os.path.basename(util.localpath(p)))
383 388 else:
384 389 abspfx = scmutil.canonpath(repo.root, cwd, pat)
385 390 if len(abspfx) < len(srcs[0][0]):
386 391 # A directory. Either the target path contains the last
387 392 # component of the source path or it does not.
388 393 def evalpath(striplen):
389 394 score = 0
390 395 for s in srcs:
391 396 t = os.path.join(dest, util.localpath(s[0])[striplen:])
392 397 if os.path.lexists(t):
393 398 score += 1
394 399 return score
395 400
396 401 abspfx = util.localpath(abspfx)
397 402 striplen = len(abspfx)
398 403 if striplen:
399 404 striplen += len(os.sep)
400 405 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
401 406 score = evalpath(striplen)
402 407 striplen1 = len(os.path.split(abspfx)[0])
403 408 if striplen1:
404 409 striplen1 += len(os.sep)
405 410 if evalpath(striplen1) > score:
406 411 striplen = striplen1
407 412 res = lambda p: os.path.join(dest,
408 413 util.localpath(p)[striplen:])
409 414 else:
410 415 # a file
411 416 if destdirexists:
412 417 res = lambda p: os.path.join(dest,
413 418 os.path.basename(util.localpath(p)))
414 419 else:
415 420 res = lambda p: dest
416 421 return res
417 422
418 423
419 424 pats = scmutil.expandpats(pats)
420 425 if not pats:
421 426 raise util.Abort(_('no source or destination specified'))
422 427 if len(pats) == 1:
423 428 raise util.Abort(_('no destination specified'))
424 429 dest = pats.pop()
425 430 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
426 431 if not destdirexists:
427 432 if len(pats) > 1 or matchmod.patkind(pats[0]):
428 433 raise util.Abort(_('with multiple sources, destination must be an '
429 434 'existing directory'))
430 435 if util.endswithsep(dest):
431 436 raise util.Abort(_('destination %s is not a directory') % dest)
432 437
433 438 tfn = targetpathfn
434 439 if after:
435 440 tfn = targetpathafterfn
436 441 copylist = []
437 442 for pat in pats:
438 443 srcs = walkpat(pat)
439 444 if not srcs:
440 445 continue
441 446 copylist.append((tfn(pat, dest, srcs), srcs))
442 447 if not copylist:
443 448 raise util.Abort(_('no files to copy'))
444 449
445 450 errors = 0
446 451 for targetpath, srcs in copylist:
447 452 for abssrc, relsrc, exact in srcs:
448 453 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
449 454 errors += 1
450 455
451 456 if errors:
452 457 ui.warn(_('(consider using --after)\n'))
453 458
454 459 return errors != 0
455 460
456 461 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
457 462 runargs=None, appendpid=False):
458 463 '''Run a command as a service.'''
459 464
460 465 if opts['daemon'] and not opts['daemon_pipefds']:
461 466 # Signal child process startup with file removal
462 467 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
463 468 os.close(lockfd)
464 469 try:
465 470 if not runargs:
466 471 runargs = util.hgcmd() + sys.argv[1:]
467 472 runargs.append('--daemon-pipefds=%s' % lockpath)
468 473 # Don't pass --cwd to the child process, because we've already
469 474 # changed directory.
470 475 for i in xrange(1, len(runargs)):
471 476 if runargs[i].startswith('--cwd='):
472 477 del runargs[i]
473 478 break
474 479 elif runargs[i].startswith('--cwd'):
475 480 del runargs[i:i + 2]
476 481 break
477 482 def condfn():
478 483 return not os.path.exists(lockpath)
479 484 pid = util.rundetached(runargs, condfn)
480 485 if pid < 0:
481 486 raise util.Abort(_('child process failed to start'))
482 487 finally:
483 488 try:
484 489 os.unlink(lockpath)
485 490 except OSError, e:
486 491 if e.errno != errno.ENOENT:
487 492 raise
488 493 if parentfn:
489 494 return parentfn(pid)
490 495 else:
491 496 return
492 497
493 498 if initfn:
494 499 initfn()
495 500
496 501 if opts['pid_file']:
497 502 mode = appendpid and 'a' or 'w'
498 503 fp = open(opts['pid_file'], mode)
499 504 fp.write(str(os.getpid()) + '\n')
500 505 fp.close()
501 506
502 507 if opts['daemon_pipefds']:
503 508 lockpath = opts['daemon_pipefds']
504 509 try:
505 510 os.setsid()
506 511 except AttributeError:
507 512 pass
508 513 os.unlink(lockpath)
509 514 util.hidewindow()
510 515 sys.stdout.flush()
511 516 sys.stderr.flush()
512 517
513 518 nullfd = os.open(util.nulldev, os.O_RDWR)
514 519 logfilefd = nullfd
515 520 if logfile:
516 521 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
517 522 os.dup2(nullfd, 0)
518 523 os.dup2(logfilefd, 1)
519 524 os.dup2(logfilefd, 2)
520 525 if nullfd not in (0, 1, 2):
521 526 os.close(nullfd)
522 527 if logfile and logfilefd not in (0, 1, 2):
523 528 os.close(logfilefd)
524 529
525 530 if runfn:
526 531 return runfn()
527 532
528 533 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
529 534 opts=None):
530 535 '''export changesets as hg patches.'''
531 536
532 537 total = len(revs)
533 538 revwidth = max([len(str(rev)) for rev in revs])
534 539
535 540 def single(rev, seqno, fp):
536 541 ctx = repo[rev]
537 542 node = ctx.node()
538 543 parents = [p.node() for p in ctx.parents() if p]
539 544 branch = ctx.branch()
540 545 if switch_parent:
541 546 parents.reverse()
542 547 prev = (parents and parents[0]) or nullid
543 548
544 549 shouldclose = False
545 550 if not fp:
546 551 desc_lines = ctx.description().rstrip().split('\n')
547 552 desc = desc_lines[0] #Commit always has a first line.
548 553 fp = makefileobj(repo, template, node, desc=desc, total=total,
549 554 seqno=seqno, revwidth=revwidth, mode='ab')
550 555 if fp != template:
551 556 shouldclose = True
552 557 if fp != sys.stdout and util.safehasattr(fp, 'name'):
553 558 repo.ui.note("%s\n" % fp.name)
554 559
555 560 fp.write("# HG changeset patch\n")
556 561 fp.write("# User %s\n" % ctx.user())
557 562 fp.write("# Date %d %d\n" % ctx.date())
558 563 if branch and branch != 'default':
559 564 fp.write("# Branch %s\n" % branch)
560 565 fp.write("# Node ID %s\n" % hex(node))
561 566 fp.write("# Parent %s\n" % hex(prev))
562 567 if len(parents) > 1:
563 568 fp.write("# Parent %s\n" % hex(parents[1]))
564 569 fp.write(ctx.description().rstrip())
565 570 fp.write("\n\n")
566 571
567 572 for chunk in patch.diff(repo, prev, node, opts=opts):
568 573 fp.write(chunk)
569 574
570 575 if shouldclose:
571 576 fp.close()
572 577
573 578 for seqno, rev in enumerate(revs):
574 579 single(rev, seqno + 1, fp)
575 580
576 581 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
577 582 changes=None, stat=False, fp=None, prefix='',
578 583 listsubrepos=False):
579 584 '''show diff or diffstat.'''
580 585 if fp is None:
581 586 write = ui.write
582 587 else:
583 588 def write(s, **kw):
584 589 fp.write(s)
585 590
586 591 if stat:
587 592 diffopts = diffopts.copy(context=0)
588 593 width = 80
589 594 if not ui.plain():
590 595 width = ui.termwidth()
591 596 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
592 597 prefix=prefix)
593 598 for chunk, label in patch.diffstatui(util.iterlines(chunks),
594 599 width=width,
595 600 git=diffopts.git):
596 601 write(chunk, label=label)
597 602 else:
598 603 for chunk, label in patch.diffui(repo, node1, node2, match,
599 604 changes, diffopts, prefix=prefix):
600 605 write(chunk, label=label)
601 606
602 607 if listsubrepos:
603 608 ctx1 = repo[node1]
604 609 ctx2 = repo[node2]
605 610 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
606 611 tempnode2 = node2
607 612 try:
608 613 if node2 is not None:
609 614 tempnode2 = ctx2.substate[subpath][1]
610 615 except KeyError:
611 616 # A subrepo that existed in node1 was deleted between node1 and
612 617 # node2 (inclusive). Thus, ctx2's substate won't contain that
613 618 # subpath. The best we can do is to ignore it.
614 619 tempnode2 = None
615 620 submatch = matchmod.narrowmatcher(subpath, match)
616 621 sub.diff(diffopts, tempnode2, submatch, changes=changes,
617 622 stat=stat, fp=fp, prefix=prefix)
618 623
619 624 class changeset_printer(object):
620 625 '''show changeset information when templating not requested.'''
621 626
622 627 def __init__(self, ui, repo, patch, diffopts, buffered):
623 628 self.ui = ui
624 629 self.repo = repo
625 630 self.buffered = buffered
626 631 self.patch = patch
627 632 self.diffopts = diffopts
628 633 self.header = {}
629 634 self.hunk = {}
630 635 self.lastheader = None
631 636 self.footer = None
632 637
633 638 def flush(self, rev):
634 639 if rev in self.header:
635 640 h = self.header[rev]
636 641 if h != self.lastheader:
637 642 self.lastheader = h
638 643 self.ui.write(h)
639 644 del self.header[rev]
640 645 if rev in self.hunk:
641 646 self.ui.write(self.hunk[rev])
642 647 del self.hunk[rev]
643 648 return 1
644 649 return 0
645 650
646 651 def close(self):
647 652 if self.footer:
648 653 self.ui.write(self.footer)
649 654
650 655 def show(self, ctx, copies=None, matchfn=None, **props):
651 656 if self.buffered:
652 657 self.ui.pushbuffer()
653 658 self._show(ctx, copies, matchfn, props)
654 659 self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
655 660 else:
656 661 self._show(ctx, copies, matchfn, props)
657 662
658 663 def _show(self, ctx, copies, matchfn, props):
659 664 '''show a single changeset or file revision'''
660 665 changenode = ctx.node()
661 666 rev = ctx.rev()
662 667
663 668 if self.ui.quiet:
664 669 self.ui.write("%d:%s\n" % (rev, short(changenode)),
665 670 label='log.node')
666 671 return
667 672
668 673 log = self.repo.changelog
669 674 date = util.datestr(ctx.date())
670 675
671 676 hexfunc = self.ui.debugflag and hex or short
672 677
673 678 parents = [(p, hexfunc(log.node(p)))
674 679 for p in self._meaningful_parentrevs(log, rev)]
675 680
676 681 self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)),
677 682 label='log.changeset')
678 683
679 684 branch = ctx.branch()
680 685 # don't show the default branch name
681 686 if branch != 'default':
682 687 self.ui.write(_("branch: %s\n") % branch,
683 688 label='log.branch')
684 689 for bookmark in self.repo.nodebookmarks(changenode):
685 690 self.ui.write(_("bookmark: %s\n") % bookmark,
686 691 label='log.bookmark')
687 692 for tag in self.repo.nodetags(changenode):
688 693 self.ui.write(_("tag: %s\n") % tag,
689 694 label='log.tag')
690 695 if self.ui.debugflag and ctx.phase():
691 696 self.ui.write(_("phase: %s\n") % _(ctx.phasestr()),
692 697 label='log.phase')
693 698 for parent in parents:
694 699 self.ui.write(_("parent: %d:%s\n") % parent,
695 700 label='log.parent')
696 701
697 702 if self.ui.debugflag:
698 703 mnode = ctx.manifestnode()
699 704 self.ui.write(_("manifest: %d:%s\n") %
700 705 (self.repo.manifest.rev(mnode), hex(mnode)),
701 706 label='ui.debug log.manifest')
702 707 self.ui.write(_("user: %s\n") % ctx.user(),
703 708 label='log.user')
704 709 self.ui.write(_("date: %s\n") % date,
705 710 label='log.date')
706 711
707 712 if self.ui.debugflag:
708 713 files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
709 714 for key, value in zip([_("files:"), _("files+:"), _("files-:")],
710 715 files):
711 716 if value:
712 717 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
713 718 label='ui.debug log.files')
714 719 elif ctx.files() and self.ui.verbose:
715 720 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
716 721 label='ui.note log.files')
717 722 if copies and self.ui.verbose:
718 723 copies = ['%s (%s)' % c for c in copies]
719 724 self.ui.write(_("copies: %s\n") % ' '.join(copies),
720 725 label='ui.note log.copies')
721 726
722 727 extra = ctx.extra()
723 728 if extra and self.ui.debugflag:
724 729 for key, value in sorted(extra.items()):
725 730 self.ui.write(_("extra: %s=%s\n")
726 731 % (key, value.encode('string_escape')),
727 732 label='ui.debug log.extra')
728 733
729 734 description = ctx.description().strip()
730 735 if description:
731 736 if self.ui.verbose:
732 737 self.ui.write(_("description:\n"),
733 738 label='ui.note log.description')
734 739 self.ui.write(description,
735 740 label='ui.note log.description')
736 741 self.ui.write("\n\n")
737 742 else:
738 743 self.ui.write(_("summary: %s\n") %
739 744 description.splitlines()[0],
740 745 label='log.summary')
741 746 self.ui.write("\n")
742 747
743 748 self.showpatch(changenode, matchfn)
744 749
745 750 def showpatch(self, node, matchfn):
746 751 if not matchfn:
747 752 matchfn = self.patch
748 753 if matchfn:
749 754 stat = self.diffopts.get('stat')
750 755 diff = self.diffopts.get('patch')
751 756 diffopts = patch.diffopts(self.ui, self.diffopts)
752 757 prev = self.repo.changelog.parents(node)[0]
753 758 if stat:
754 759 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
755 760 match=matchfn, stat=True)
756 761 if diff:
757 762 if stat:
758 763 self.ui.write("\n")
759 764 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
760 765 match=matchfn, stat=False)
761 766 self.ui.write("\n")
762 767
763 768 def _meaningful_parentrevs(self, log, rev):
764 769 """Return list of meaningful (or all if debug) parentrevs for rev.
765 770
766 771 For merges (two non-nullrev revisions) both parents are meaningful.
767 772 Otherwise the first parent revision is considered meaningful if it
768 773 is not the preceding revision.
769 774 """
770 775 parents = log.parentrevs(rev)
771 776 if not self.ui.debugflag and parents[1] == nullrev:
772 777 if parents[0] >= rev - 1:
773 778 parents = []
774 779 else:
775 780 parents = [parents[0]]
776 781 return parents
777 782
778 783
779 784 class changeset_templater(changeset_printer):
780 785 '''format changeset information.'''
781 786
782 787 def __init__(self, ui, repo, patch, diffopts, mapfile, buffered):
783 788 changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered)
784 789 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
785 790 defaulttempl = {
786 791 'parent': '{rev}:{node|formatnode} ',
787 792 'manifest': '{rev}:{node|formatnode}',
788 793 'file_copy': '{name} ({source})',
789 794 'extra': '{key}={value|stringescape}'
790 795 }
791 796 # filecopy is preserved for compatibility reasons
792 797 defaulttempl['filecopy'] = defaulttempl['file_copy']
793 798 self.t = templater.templater(mapfile, {'formatnode': formatnode},
794 799 cache=defaulttempl)
795 800 self.cache = {}
796 801
797 802 def use_template(self, t):
798 803 '''set template string to use'''
799 804 self.t.cache['changeset'] = t
800 805
801 806 def _meaningful_parentrevs(self, ctx):
802 807 """Return list of meaningful (or all if debug) parentrevs for rev.
803 808 """
804 809 parents = ctx.parents()
805 810 if len(parents) > 1:
806 811 return parents
807 812 if self.ui.debugflag:
808 813 return [parents[0], self.repo['null']]
809 814 if parents[0].rev() >= ctx.rev() - 1:
810 815 return []
811 816 return parents
812 817
813 818 def _show(self, ctx, copies, matchfn, props):
814 819 '''show a single changeset or file revision'''
815 820
816 821 showlist = templatekw.showlist
817 822
818 823 # showparents() behaviour depends on ui trace level which
819 824 # causes unexpected behaviours at templating level and makes
820 825 # it harder to extract it in a standalone function. Its
821 826 # behaviour cannot be changed so leave it here for now.
822 827 def showparents(**args):
823 828 ctx = args['ctx']
824 829 parents = [[('rev', p.rev()), ('node', p.hex())]
825 830 for p in self._meaningful_parentrevs(ctx)]
826 831 return showlist('parent', parents, **args)
827 832
828 833 props = props.copy()
829 834 props.update(templatekw.keywords)
830 835 props['parents'] = showparents
831 836 props['templ'] = self.t
832 837 props['ctx'] = ctx
833 838 props['repo'] = self.repo
834 839 props['revcache'] = {'copies': copies}
835 840 props['cache'] = self.cache
836 841
837 842 # find correct templates for current mode
838 843
839 844 tmplmodes = [
840 845 (True, None),
841 846 (self.ui.verbose, 'verbose'),
842 847 (self.ui.quiet, 'quiet'),
843 848 (self.ui.debugflag, 'debug'),
844 849 ]
845 850
846 851 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
847 852 for mode, postfix in tmplmodes:
848 853 for type in types:
849 854 cur = postfix and ('%s_%s' % (type, postfix)) or type
850 855 if mode and cur in self.t:
851 856 types[type] = cur
852 857
853 858 try:
854 859
855 860 # write header
856 861 if types['header']:
857 862 h = templater.stringify(self.t(types['header'], **props))
858 863 if self.buffered:
859 864 self.header[ctx.rev()] = h
860 865 else:
861 866 if self.lastheader != h:
862 867 self.lastheader = h
863 868 self.ui.write(h)
864 869
865 870 # write changeset metadata, then patch if requested
866 871 key = types['changeset']
867 872 self.ui.write(templater.stringify(self.t(key, **props)))
868 873 self.showpatch(ctx.node(), matchfn)
869 874
870 875 if types['footer']:
871 876 if not self.footer:
872 877 self.footer = templater.stringify(self.t(types['footer'],
873 878 **props))
874 879
875 880 except KeyError, inst:
876 881 msg = _("%s: no key named '%s'")
877 882 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
878 883 except SyntaxError, inst:
879 884 raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
880 885
881 886 def show_changeset(ui, repo, opts, buffered=False):
882 887 """show one changeset using template or regular display.
883 888
884 889 Display format will be the first non-empty hit of:
885 890 1. option 'template'
886 891 2. option 'style'
887 892 3. [ui] setting 'logtemplate'
888 893 4. [ui] setting 'style'
889 894 If all of these values are either the unset or the empty string,
890 895 regular display via changeset_printer() is done.
891 896 """
892 897 # options
893 898 patch = False
894 899 if opts.get('patch') or opts.get('stat'):
895 900 patch = scmutil.matchall(repo)
896 901
897 902 tmpl = opts.get('template')
898 903 style = None
899 904 if tmpl:
900 905 tmpl = templater.parsestring(tmpl, quoted=False)
901 906 else:
902 907 style = opts.get('style')
903 908
904 909 # ui settings
905 910 if not (tmpl or style):
906 911 tmpl = ui.config('ui', 'logtemplate')
907 912 if tmpl:
908 913 tmpl = templater.parsestring(tmpl)
909 914 else:
910 915 style = util.expandpath(ui.config('ui', 'style', ''))
911 916
912 917 if not (tmpl or style):
913 918 return changeset_printer(ui, repo, patch, opts, buffered)
914 919
915 920 mapfile = None
916 921 if style and not tmpl:
917 922 mapfile = style
918 923 if not os.path.split(mapfile)[0]:
919 924 mapname = (templater.templatepath('map-cmdline.' + mapfile)
920 925 or templater.templatepath(mapfile))
921 926 if mapname:
922 927 mapfile = mapname
923 928
924 929 try:
925 930 t = changeset_templater(ui, repo, patch, opts, mapfile, buffered)
926 931 except SyntaxError, inst:
927 932 raise util.Abort(inst.args[0])
928 933 if tmpl:
929 934 t.use_template(tmpl)
930 935 return t
931 936
932 937 def finddate(ui, repo, date):
933 938 """Find the tipmost changeset that matches the given date spec"""
934 939
935 940 df = util.matchdate(date)
936 941 m = scmutil.matchall(repo)
937 942 results = {}
938 943
939 944 def prep(ctx, fns):
940 945 d = ctx.date()
941 946 if df(d[0]):
942 947 results[ctx.rev()] = d
943 948
944 949 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
945 950 rev = ctx.rev()
946 951 if rev in results:
947 952 ui.status(_("Found revision %s from %s\n") %
948 953 (rev, util.datestr(results[rev])))
949 954 return str(rev)
950 955
951 956 raise util.Abort(_("revision matching date not found"))
952 957
953 958 def walkchangerevs(repo, match, opts, prepare):
954 959 '''Iterate over files and the revs in which they changed.
955 960
956 961 Callers most commonly need to iterate backwards over the history
957 962 in which they are interested. Doing so has awful (quadratic-looking)
958 963 performance, so we use iterators in a "windowed" way.
959 964
960 965 We walk a window of revisions in the desired order. Within the
961 966 window, we first walk forwards to gather data, then in the desired
962 967 order (usually backwards) to display it.
963 968
964 969 This function returns an iterator yielding contexts. Before
965 970 yielding each context, the iterator will first call the prepare
966 971 function on each context in the window in forward order.'''
967 972
968 973 def increasing_windows(start, end, windowsize=8, sizelimit=512):
969 974 if start < end:
970 975 while start < end:
971 976 yield start, min(windowsize, end - start)
972 977 start += windowsize
973 978 if windowsize < sizelimit:
974 979 windowsize *= 2
975 980 else:
976 981 while start > end:
977 982 yield start, min(windowsize, start - end - 1)
978 983 start -= windowsize
979 984 if windowsize < sizelimit:
980 985 windowsize *= 2
981 986
982 987 follow = opts.get('follow') or opts.get('follow_first')
983 988
984 989 if not len(repo):
985 990 return []
986 991
987 992 if follow:
988 993 defrange = '%s:0' % repo['.'].rev()
989 994 else:
990 995 defrange = '-1:0'
991 996 revs = scmutil.revrange(repo, opts['rev'] or [defrange])
992 997 if not revs:
993 998 return []
994 999 wanted = set()
995 1000 slowpath = match.anypats() or (match.files() and opts.get('removed'))
996 1001 fncache = {}
997 1002 change = repo.changectx
998 1003
999 1004 # First step is to fill wanted, the set of revisions that we want to yield.
1000 1005 # When it does not induce extra cost, we also fill fncache for revisions in
1001 1006 # wanted: a cache of filenames that were changed (ctx.files()) and that
1002 1007 # match the file filtering conditions.
1003 1008
1004 1009 if not slowpath and not match.files():
1005 1010 # No files, no patterns. Display all revs.
1006 1011 wanted = set(revs)
1007 1012 copies = []
1008 1013
1009 1014 if not slowpath and match.files():
1010 1015 # We only have to read through the filelog to find wanted revisions
1011 1016
1012 1017 minrev, maxrev = min(revs), max(revs)
1013 1018 def filerevgen(filelog, last):
1014 1019 """
1015 1020 Only files, no patterns. Check the history of each file.
1016 1021
1017 1022 Examines filelog entries within minrev, maxrev linkrev range
1018 1023 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1019 1024 tuples in backwards order
1020 1025 """
1021 1026 cl_count = len(repo)
1022 1027 revs = []
1023 1028 for j in xrange(0, last + 1):
1024 1029 linkrev = filelog.linkrev(j)
1025 1030 if linkrev < minrev:
1026 1031 continue
1027 1032 # only yield rev for which we have the changelog, it can
1028 1033 # happen while doing "hg log" during a pull or commit
1029 1034 if linkrev >= cl_count:
1030 1035 break
1031 1036
1032 1037 parentlinkrevs = []
1033 1038 for p in filelog.parentrevs(j):
1034 1039 if p != nullrev:
1035 1040 parentlinkrevs.append(filelog.linkrev(p))
1036 1041 n = filelog.node(j)
1037 1042 revs.append((linkrev, parentlinkrevs,
1038 1043 follow and filelog.renamed(n)))
1039 1044
1040 1045 return reversed(revs)
1041 1046 def iterfiles():
1042 1047 pctx = repo['.']
1043 1048 for filename in match.files():
1044 1049 if follow:
1045 1050 if filename not in pctx:
1046 1051 raise util.Abort(_('cannot follow file not in parent '
1047 1052 'revision: "%s"') % filename)
1048 1053 yield filename, pctx[filename].filenode()
1049 1054 else:
1050 1055 yield filename, None
1051 1056 for filename_node in copies:
1052 1057 yield filename_node
1053 1058 for file_, node in iterfiles():
1054 1059 filelog = repo.file(file_)
1055 1060 if not len(filelog):
1056 1061 if node is None:
1057 1062 # A zero count may be a directory or deleted file, so
1058 1063 # try to find matching entries on the slow path.
1059 1064 if follow:
1060 1065 raise util.Abort(
1061 1066 _('cannot follow nonexistent file: "%s"') % file_)
1062 1067 slowpath = True
1063 1068 break
1064 1069 else:
1065 1070 continue
1066 1071
1067 1072 if node is None:
1068 1073 last = len(filelog) - 1
1069 1074 else:
1070 1075 last = filelog.rev(node)
1071 1076
1072 1077
1073 1078 # keep track of all ancestors of the file
1074 1079 ancestors = set([filelog.linkrev(last)])
1075 1080
1076 1081 # iterate from latest to oldest revision
1077 1082 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1078 1083 if not follow:
1079 1084 if rev > maxrev:
1080 1085 continue
1081 1086 else:
1082 1087 # Note that last might not be the first interesting
1083 1088 # rev to us:
1084 1089 # if the file has been changed after maxrev, we'll
1085 1090 # have linkrev(last) > maxrev, and we still need
1086 1091 # to explore the file graph
1087 1092 if rev not in ancestors:
1088 1093 continue
1089 1094 # XXX insert 1327 fix here
1090 1095 if flparentlinkrevs:
1091 1096 ancestors.update(flparentlinkrevs)
1092 1097
1093 1098 fncache.setdefault(rev, []).append(file_)
1094 1099 wanted.add(rev)
1095 1100 if copied:
1096 1101 copies.append(copied)
1097 1102 if slowpath:
1098 1103 # We have to read the changelog to match filenames against
1099 1104 # changed files
1100 1105
1101 1106 if follow:
1102 1107 raise util.Abort(_('can only follow copies/renames for explicit '
1103 1108 'filenames'))
1104 1109
1105 1110 # The slow path checks files modified in every changeset.
1106 1111 for i in sorted(revs):
1107 1112 ctx = change(i)
1108 1113 matches = filter(match, ctx.files())
1109 1114 if matches:
1110 1115 fncache[i] = matches
1111 1116 wanted.add(i)
1112 1117
1113 1118 class followfilter(object):
1114 1119 def __init__(self, onlyfirst=False):
1115 1120 self.startrev = nullrev
1116 1121 self.roots = set()
1117 1122 self.onlyfirst = onlyfirst
1118 1123
1119 1124 def match(self, rev):
1120 1125 def realparents(rev):
1121 1126 if self.onlyfirst:
1122 1127 return repo.changelog.parentrevs(rev)[0:1]
1123 1128 else:
1124 1129 return filter(lambda x: x != nullrev,
1125 1130 repo.changelog.parentrevs(rev))
1126 1131
1127 1132 if self.startrev == nullrev:
1128 1133 self.startrev = rev
1129 1134 return True
1130 1135
1131 1136 if rev > self.startrev:
1132 1137 # forward: all descendants
1133 1138 if not self.roots:
1134 1139 self.roots.add(self.startrev)
1135 1140 for parent in realparents(rev):
1136 1141 if parent in self.roots:
1137 1142 self.roots.add(rev)
1138 1143 return True
1139 1144 else:
1140 1145 # backwards: all parents
1141 1146 if not self.roots:
1142 1147 self.roots.update(realparents(self.startrev))
1143 1148 if rev in self.roots:
1144 1149 self.roots.remove(rev)
1145 1150 self.roots.update(realparents(rev))
1146 1151 return True
1147 1152
1148 1153 return False
1149 1154
1150 1155 # it might be worthwhile to do this in the iterator if the rev range
1151 1156 # is descending and the prune args are all within that range
1152 1157 for rev in opts.get('prune', ()):
1153 1158 rev = repo[rev].rev()
1154 1159 ff = followfilter()
1155 1160 stop = min(revs[0], revs[-1])
1156 1161 for x in xrange(rev, stop - 1, -1):
1157 1162 if ff.match(x):
1158 1163 wanted.discard(x)
1159 1164
1160 1165 # Now that wanted is correctly initialized, we can iterate over the
1161 1166 # revision range, yielding only revisions in wanted.
1162 1167 def iterate():
1163 1168 if follow and not match.files():
1164 1169 ff = followfilter(onlyfirst=opts.get('follow_first'))
1165 1170 def want(rev):
1166 1171 return ff.match(rev) and rev in wanted
1167 1172 else:
1168 1173 def want(rev):
1169 1174 return rev in wanted
1170 1175
1171 1176 for i, window in increasing_windows(0, len(revs)):
1172 1177 nrevs = [rev for rev in revs[i:i + window] if want(rev)]
1173 1178 for rev in sorted(nrevs):
1174 1179 fns = fncache.get(rev)
1175 1180 ctx = change(rev)
1176 1181 if not fns:
1177 1182 def fns_generator():
1178 1183 for f in ctx.files():
1179 1184 if match(f):
1180 1185 yield f
1181 1186 fns = fns_generator()
1182 1187 prepare(ctx, fns)
1183 1188 for rev in nrevs:
1184 1189 yield change(rev)
1185 1190 return iterate()
1186 1191
1187 1192 def add(ui, repo, match, dryrun, listsubrepos, prefix, explicitonly):
1188 1193 join = lambda f: os.path.join(prefix, f)
1189 1194 bad = []
1190 1195 oldbad = match.bad
1191 1196 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
1192 1197 names = []
1193 1198 wctx = repo[None]
1194 1199 cca = None
1195 1200 abort, warn = scmutil.checkportabilityalert(ui)
1196 1201 if abort or warn:
1197 1202 cca = scmutil.casecollisionauditor(ui, abort, wctx)
1198 1203 for f in repo.walk(match):
1199 1204 exact = match.exact(f)
1200 1205 if exact or not explicitonly and f not in repo.dirstate:
1201 1206 if cca:
1202 1207 cca(f)
1203 1208 names.append(f)
1204 1209 if ui.verbose or not exact:
1205 1210 ui.status(_('adding %s\n') % match.rel(join(f)))
1206 1211
1207 1212 for subpath in wctx.substate:
1208 1213 sub = wctx.sub(subpath)
1209 1214 try:
1210 1215 submatch = matchmod.narrowmatcher(subpath, match)
1211 1216 if listsubrepos:
1212 1217 bad.extend(sub.add(ui, submatch, dryrun, listsubrepos, prefix,
1213 1218 False))
1214 1219 else:
1215 1220 bad.extend(sub.add(ui, submatch, dryrun, listsubrepos, prefix,
1216 1221 True))
1217 1222 except error.LookupError:
1218 1223 ui.status(_("skipping missing subrepository: %s\n")
1219 1224 % join(subpath))
1220 1225
1221 1226 if not dryrun:
1222 1227 rejected = wctx.add(names, prefix)
1223 1228 bad.extend(f for f in rejected if f in match.files())
1224 1229 return bad
1225 1230
1226 1231 def forget(ui, repo, match, prefix, explicitonly):
1227 1232 join = lambda f: os.path.join(prefix, f)
1228 1233 bad = []
1229 1234 oldbad = match.bad
1230 1235 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
1231 1236 wctx = repo[None]
1232 1237 forgot = []
1233 1238 s = repo.status(match=match, clean=True)
1234 1239 forget = sorted(s[0] + s[1] + s[3] + s[6])
1235 1240 if explicitonly:
1236 1241 forget = [f for f in forget if match.exact(f)]
1237 1242
1238 1243 for subpath in wctx.substate:
1239 1244 sub = wctx.sub(subpath)
1240 1245 try:
1241 1246 submatch = matchmod.narrowmatcher(subpath, match)
1242 1247 subbad, subforgot = sub.forget(ui, submatch, prefix)
1243 1248 bad.extend([subpath + '/' + f for f in subbad])
1244 1249 forgot.extend([subpath + '/' + f for f in subforgot])
1245 1250 except error.LookupError:
1246 1251 ui.status(_("skipping missing subrepository: %s\n")
1247 1252 % join(subpath))
1248 1253
1249 1254 if not explicitonly:
1250 1255 for f in match.files():
1251 1256 if f not in repo.dirstate and not os.path.isdir(match.rel(join(f))):
1252 1257 if f not in forgot:
1253 1258 if os.path.exists(match.rel(join(f))):
1254 1259 ui.warn(_('not removing %s: '
1255 1260 'file is already untracked\n')
1256 1261 % match.rel(join(f)))
1257 1262 bad.append(f)
1258 1263
1259 1264 for f in forget:
1260 1265 if ui.verbose or not match.exact(f):
1261 1266 ui.status(_('removing %s\n') % match.rel(join(f)))
1262 1267
1263 1268 rejected = wctx.forget(forget, prefix)
1264 1269 bad.extend(f for f in rejected if f in match.files())
1265 1270 forgot.extend(forget)
1266 1271 return bad, forgot
1267 1272
1268 1273 def duplicatecopies(repo, rev, p1):
1269 1274 "Reproduce copies found in the source revision in the dirstate for grafts"
1270 1275 for dst, src in copies.pathcopies(repo[p1], repo[rev]).iteritems():
1271 1276 repo.dirstate.copy(src, dst)
1272 1277
1273 1278 def commit(ui, repo, commitfunc, pats, opts):
1274 1279 '''commit the specified files or all outstanding changes'''
1275 1280 date = opts.get('date')
1276 1281 if date:
1277 1282 opts['date'] = util.parsedate(date)
1278 1283 message = logmessage(ui, opts)
1279 1284
1280 1285 # extract addremove carefully -- this function can be called from a command
1281 1286 # that doesn't support addremove
1282 1287 if opts.get('addremove'):
1283 1288 scmutil.addremove(repo, pats, opts)
1284 1289
1285 1290 return commitfunc(ui, repo, message,
1286 1291 scmutil.match(repo[None], pats, opts), opts)
1287 1292
1288 1293 def amend(ui, repo, commitfunc, old, extra, pats, opts):
1289 1294 ui.note(_('amending changeset %s\n') % old)
1290 1295 base = old.p1()
1291 1296
1292 1297 wlock = repo.wlock()
1293 1298 try:
1294 1299 # Fix up dirstate for copies and renames
1295 1300 duplicatecopies(repo, None, base.node())
1296 1301
1297 1302 # First, do a regular commit to record all changes in the working
1298 1303 # directory (if there are any)
1299 1304 node = commit(ui, repo, commitfunc, pats, opts)
1300 1305 ctx = repo[node]
1301 1306
1302 1307 # Participating changesets:
1303 1308 #
1304 1309 # node/ctx o - new (intermediate) commit that contains changes from
1305 1310 # | working dir to go into amending commit (or a workingctx
1306 1311 # | if there were no changes)
1307 1312 # |
1308 1313 # old o - changeset to amend
1309 1314 # |
1310 1315 # base o - parent of amending changeset
1311 1316
1312 1317 files = set(old.files())
1313 1318
1314 1319 # Second, we use either the commit we just did, or if there were no
1315 1320 # changes the parent of the working directory as the version of the
1316 1321 # files in the final amend commit
1317 1322 if node:
1318 1323 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
1319 1324
1320 1325 user = ctx.user()
1321 1326 date = ctx.date()
1322 1327 message = ctx.description()
1323 1328 extra = ctx.extra()
1324 1329
1325 1330 # Prune files which were reverted by the updates: if old introduced
1326 1331 # file X and our intermediate commit, node, renamed that file, then
1327 1332 # those two files are the same and we can discard X from our list
1328 1333 # of files. Likewise if X was deleted, it's no longer relevant
1329 1334 files.update(ctx.files())
1330 1335
1331 1336 def samefile(f):
1332 1337 if f in ctx.manifest():
1333 1338 a = ctx.filectx(f)
1334 1339 if f in base.manifest():
1335 1340 b = base.filectx(f)
1336 1341 return (a.data() == b.data()
1337 1342 and a.flags() == b.flags()
1338 1343 and a.renamed() == b.renamed())
1339 1344 else:
1340 1345 return False
1341 1346 else:
1342 1347 return f not in base.manifest()
1343 1348 files = [f for f in files if not samefile(f)]
1344 1349
1345 1350 def filectxfn(repo, ctx_, path):
1346 1351 try:
1347 1352 return ctx.filectx(path)
1348 1353 except KeyError:
1349 1354 raise IOError()
1350 1355 else:
1351 1356 ui.note(_('copying changeset %s to %s\n') % (old, base))
1352 1357
1353 1358 # Use version of files as in the old cset
1354 1359 def filectxfn(repo, ctx_, path):
1355 1360 try:
1356 1361 return old.filectx(path)
1357 1362 except KeyError:
1358 1363 raise IOError()
1359 1364
1360 1365 # See if we got a message from -m or -l, if not, open the editor
1361 1366 # with the message of the changeset to amend
1362 1367 user = opts.get('user') or old.user()
1363 1368 date = opts.get('date') or old.date()
1364 1369 message = logmessage(ui, opts)
1365 1370 if not message:
1366 1371 cctx = context.workingctx(repo, old.description(), user, date,
1367 1372 extra,
1368 1373 repo.status(base.node(), old.node()))
1369 1374 message = commitforceeditor(repo, cctx, [])
1370 1375
1371 1376 new = context.memctx(repo,
1372 1377 parents=[base.node(), nullid],
1373 1378 text=message,
1374 1379 files=files,
1375 1380 filectxfn=filectxfn,
1376 1381 user=user,
1377 1382 date=date,
1378 1383 extra=extra)
1379 1384 newid = repo.commitctx(new)
1380 1385 if newid != old.node():
1381 1386 # Reroute the working copy parent to the new changeset
1382 1387 repo.dirstate.setparents(newid, nullid)
1383 1388
1384 1389 # Move bookmarks from old parent to amend commit
1385 1390 bms = repo.nodebookmarks(old.node())
1386 1391 if bms:
1387 1392 for bm in bms:
1388 1393 repo._bookmarks[bm] = newid
1389 1394 bookmarks.write(repo)
1390 1395
1391 1396 # Strip the intermediate commit (if there was one) and the amended
1392 1397 # commit
1393 1398 lock = repo.lock()
1394 1399 try:
1395 1400 if node:
1396 1401 ui.note(_('stripping intermediate changeset %s\n') % ctx)
1397 1402 ui.note(_('stripping amended changeset %s\n') % old)
1398 1403 repair.strip(ui, repo, old.node(), topic='amend-backup')
1399 1404 finally:
1400 1405 lock.release()
1401 1406 finally:
1402 1407 wlock.release()
1403 1408 return newid
1404 1409
1405 1410 def commiteditor(repo, ctx, subs):
1406 1411 if ctx.description():
1407 1412 return ctx.description()
1408 1413 return commitforceeditor(repo, ctx, subs)
1409 1414
1410 1415 def commitforceeditor(repo, ctx, subs):
1411 1416 edittext = []
1412 1417 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
1413 1418 if ctx.description():
1414 1419 edittext.append(ctx.description())
1415 1420 edittext.append("")
1416 1421 edittext.append("") # Empty line between message and comments.
1417 1422 edittext.append(_("HG: Enter commit message."
1418 1423 " Lines beginning with 'HG:' are removed."))
1419 1424 edittext.append(_("HG: Leave message empty to abort commit."))
1420 1425 edittext.append("HG: --")
1421 1426 edittext.append(_("HG: user: %s") % ctx.user())
1422 1427 if ctx.p2():
1423 1428 edittext.append(_("HG: branch merge"))
1424 1429 if ctx.branch():
1425 1430 edittext.append(_("HG: branch '%s'") % ctx.branch())
1426 1431 edittext.extend([_("HG: subrepo %s") % s for s in subs])
1427 1432 edittext.extend([_("HG: added %s") % f for f in added])
1428 1433 edittext.extend([_("HG: changed %s") % f for f in modified])
1429 1434 edittext.extend([_("HG: removed %s") % f for f in removed])
1430 1435 if not added and not modified and not removed:
1431 1436 edittext.append(_("HG: no files changed"))
1432 1437 edittext.append("")
1433 1438 # run editor in the repository root
1434 1439 olddir = os.getcwd()
1435 1440 os.chdir(repo.root)
1436 1441 text = repo.ui.edit("\n".join(edittext), ctx.user())
1437 1442 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
1438 1443 os.chdir(olddir)
1439 1444
1440 1445 if not text.strip():
1441 1446 raise util.Abort(_("empty commit message"))
1442 1447
1443 1448 return text
1444 1449
1445 1450 def revert(ui, repo, ctx, parents, *pats, **opts):
1446 1451 parent, p2 = parents
1447 1452 node = ctx.node()
1448 1453
1449 1454 mf = ctx.manifest()
1450 1455 if node == parent:
1451 1456 pmf = mf
1452 1457 else:
1453 1458 pmf = None
1454 1459
1455 1460 # need all matching names in dirstate and manifest of target rev,
1456 1461 # so have to walk both. do not print errors if files exist in one
1457 1462 # but not other.
1458 1463
1459 1464 names = {}
1460 1465
1461 1466 wlock = repo.wlock()
1462 1467 try:
1463 1468 # walk dirstate.
1464 1469
1465 1470 m = scmutil.match(repo[None], pats, opts)
1466 1471 m.bad = lambda x, y: False
1467 1472 for abs in repo.walk(m):
1468 1473 names[abs] = m.rel(abs), m.exact(abs)
1469 1474
1470 1475 # walk target manifest.
1471 1476
1472 1477 def badfn(path, msg):
1473 1478 if path in names:
1474 1479 return
1475 1480 if path in repo[node].substate:
1476 1481 return
1477 1482 path_ = path + '/'
1478 1483 for f in names:
1479 1484 if f.startswith(path_):
1480 1485 return
1481 1486 ui.warn("%s: %s\n" % (m.rel(path), msg))
1482 1487
1483 1488 m = scmutil.match(repo[node], pats, opts)
1484 1489 m.bad = badfn
1485 1490 for abs in repo[node].walk(m):
1486 1491 if abs not in names:
1487 1492 names[abs] = m.rel(abs), m.exact(abs)
1488 1493
1489 1494 # get the list of subrepos that must be reverted
1490 1495 targetsubs = [s for s in repo[node].substate if m(s)]
1491 1496 m = scmutil.matchfiles(repo, names)
1492 1497 changes = repo.status(match=m)[:4]
1493 1498 modified, added, removed, deleted = map(set, changes)
1494 1499
1495 1500 # if f is a rename, also revert the source
1496 1501 cwd = repo.getcwd()
1497 1502 for f in added:
1498 1503 src = repo.dirstate.copied(f)
1499 1504 if src and src not in names and repo.dirstate[src] == 'r':
1500 1505 removed.add(src)
1501 1506 names[src] = (repo.pathto(src, cwd), True)
1502 1507
1503 1508 def removeforget(abs):
1504 1509 if repo.dirstate[abs] == 'a':
1505 1510 return _('forgetting %s\n')
1506 1511 return _('removing %s\n')
1507 1512
1508 1513 revert = ([], _('reverting %s\n'))
1509 1514 add = ([], _('adding %s\n'))
1510 1515 remove = ([], removeforget)
1511 1516 undelete = ([], _('undeleting %s\n'))
1512 1517
1513 1518 disptable = (
1514 1519 # dispatch table:
1515 1520 # file state
1516 1521 # action if in target manifest
1517 1522 # action if not in target manifest
1518 1523 # make backup if in target manifest
1519 1524 # make backup if not in target manifest
1520 1525 (modified, revert, remove, True, True),
1521 1526 (added, revert, remove, True, False),
1522 1527 (removed, undelete, None, False, False),
1523 1528 (deleted, revert, remove, False, False),
1524 1529 )
1525 1530
1526 1531 for abs, (rel, exact) in sorted(names.items()):
1527 1532 mfentry = mf.get(abs)
1528 1533 target = repo.wjoin(abs)
1529 1534 def handle(xlist, dobackup):
1530 1535 xlist[0].append(abs)
1531 1536 if (dobackup and not opts.get('no_backup') and
1532 1537 os.path.lexists(target)):
1533 1538 bakname = "%s.orig" % rel
1534 1539 ui.note(_('saving current version of %s as %s\n') %
1535 1540 (rel, bakname))
1536 1541 if not opts.get('dry_run'):
1537 1542 util.rename(target, bakname)
1538 1543 if ui.verbose or not exact:
1539 1544 msg = xlist[1]
1540 1545 if not isinstance(msg, basestring):
1541 1546 msg = msg(abs)
1542 1547 ui.status(msg % rel)
1543 1548 for table, hitlist, misslist, backuphit, backupmiss in disptable:
1544 1549 if abs not in table:
1545 1550 continue
1546 1551 # file has changed in dirstate
1547 1552 if mfentry:
1548 1553 handle(hitlist, backuphit)
1549 1554 elif misslist is not None:
1550 1555 handle(misslist, backupmiss)
1551 1556 break
1552 1557 else:
1553 1558 if abs not in repo.dirstate:
1554 1559 if mfentry:
1555 1560 handle(add, True)
1556 1561 elif exact:
1557 1562 ui.warn(_('file not managed: %s\n') % rel)
1558 1563 continue
1559 1564 # file has not changed in dirstate
1560 1565 if node == parent:
1561 1566 if exact:
1562 1567 ui.warn(_('no changes needed to %s\n') % rel)
1563 1568 continue
1564 1569 if pmf is None:
1565 1570 # only need parent manifest in this unlikely case,
1566 1571 # so do not read by default
1567 1572 pmf = repo[parent].manifest()
1568 1573 if abs in pmf and mfentry:
1569 1574 # if version of file is same in parent and target
1570 1575 # manifests, do nothing
1571 1576 if (pmf[abs] != mfentry or
1572 1577 pmf.flags(abs) != mf.flags(abs)):
1573 1578 handle(revert, False)
1574 1579 else:
1575 1580 handle(remove, False)
1576 1581
1577 1582 if not opts.get('dry_run'):
1578 1583 def checkout(f):
1579 1584 fc = ctx[f]
1580 1585 repo.wwrite(f, fc.data(), fc.flags())
1581 1586
1582 1587 audit_path = scmutil.pathauditor(repo.root)
1583 1588 for f in remove[0]:
1584 1589 if repo.dirstate[f] == 'a':
1585 1590 repo.dirstate.drop(f)
1586 1591 continue
1587 1592 audit_path(f)
1588 1593 try:
1589 1594 util.unlinkpath(repo.wjoin(f))
1590 1595 except OSError:
1591 1596 pass
1592 1597 repo.dirstate.remove(f)
1593 1598
1594 1599 normal = None
1595 1600 if node == parent:
1596 1601 # We're reverting to our parent. If possible, we'd like status
1597 1602 # to report the file as clean. We have to use normallookup for
1598 1603 # merges to avoid losing information about merged/dirty files.
1599 1604 if p2 != nullid:
1600 1605 normal = repo.dirstate.normallookup
1601 1606 else:
1602 1607 normal = repo.dirstate.normal
1603 1608 for f in revert[0]:
1604 1609 checkout(f)
1605 1610 if normal:
1606 1611 normal(f)
1607 1612
1608 1613 for f in add[0]:
1609 1614 checkout(f)
1610 1615 repo.dirstate.add(f)
1611 1616
1612 1617 normal = repo.dirstate.normallookup
1613 1618 if node == parent and p2 == nullid:
1614 1619 normal = repo.dirstate.normal
1615 1620 for f in undelete[0]:
1616 1621 checkout(f)
1617 1622 normal(f)
1618 1623
1619 1624 if targetsubs:
1620 1625 # Revert the subrepos on the revert list
1621 1626 for sub in targetsubs:
1622 1627 ctx.sub(sub).revert(ui, ctx.substate[sub], *pats, **opts)
1623 1628 finally:
1624 1629 wlock.release()
1625 1630
1626 1631 def command(table):
1627 1632 '''returns a function object bound to table which can be used as
1628 1633 a decorator for populating table as a command table'''
1629 1634
1630 1635 def cmd(name, options, synopsis=None):
1631 1636 def decorator(func):
1632 1637 if synopsis:
1633 1638 table[name] = func, options[:], synopsis
1634 1639 else:
1635 1640 table[name] = func, options[:]
1636 1641 return func
1637 1642 return decorator
1638 1643
1639 1644 return cmd
@@ -1,767 +1,783 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 import errno
8 8
9 9 from node import nullid
10 10 from i18n import _
11 11 import scmutil, util, ignore, osutil, parsers, encoding
12 12 import struct, os, stat, errno
13 13 import cStringIO
14 14
15 15 _format = ">cllll"
16 16 propertycache = util.propertycache
17 17 filecache = scmutil.filecache
18 18
19 19 class repocache(filecache):
20 20 """filecache for files in .hg/"""
21 21 def join(self, obj, fname):
22 22 return obj._opener.join(fname)
23 23
24 24 class rootcache(filecache):
25 25 """filecache for files in the repository root"""
26 26 def join(self, obj, fname):
27 27 return obj._join(fname)
28 28
29 29 def _finddirs(path):
30 30 pos = path.rfind('/')
31 31 while pos != -1:
32 32 yield path[:pos]
33 33 pos = path.rfind('/', 0, pos)
34 34
35 35 def _incdirs(dirs, path):
36 36 for base in _finddirs(path):
37 37 if base in dirs:
38 38 dirs[base] += 1
39 39 return
40 40 dirs[base] = 1
41 41
42 42 def _decdirs(dirs, path):
43 43 for base in _finddirs(path):
44 44 if dirs[base] > 1:
45 45 dirs[base] -= 1
46 46 return
47 47 del dirs[base]
48 48
49 49 class dirstate(object):
50 50
51 51 def __init__(self, opener, ui, root, validate):
52 52 '''Create a new dirstate object.
53 53
54 54 opener is an open()-like callable that can be used to open the
55 55 dirstate file; root is the root of the directory tracked by
56 56 the dirstate.
57 57 '''
58 58 self._opener = opener
59 59 self._validate = validate
60 60 self._root = root
61 61 self._rootdir = os.path.join(root, '')
62 62 self._dirty = False
63 63 self._dirtypl = False
64 64 self._lastnormaltime = 0
65 65 self._ui = ui
66 66 self._filecache = {}
67 67
68 68 @propertycache
69 69 def _map(self):
70 70 '''Return the dirstate contents as a map from filename to
71 71 (state, mode, size, time).'''
72 72 self._read()
73 73 return self._map
74 74
75 75 @propertycache
76 76 def _copymap(self):
77 77 self._read()
78 78 return self._copymap
79 79
80 80 @propertycache
81 81 def _foldmap(self):
82 82 f = {}
83 83 for name in self._map:
84 84 f[util.normcase(name)] = name
85 85 for name in self._dirs:
86 86 f[util.normcase(name)] = name
87 87 f['.'] = '.' # prevents useless util.fspath() invocation
88 88 return f
89 89
90 90 @repocache('branch')
91 91 def _branch(self):
92 92 try:
93 93 return self._opener.read("branch").strip() or "default"
94 94 except IOError, inst:
95 95 if inst.errno != errno.ENOENT:
96 96 raise
97 97 return "default"
98 98
99 99 @propertycache
100 100 def _pl(self):
101 101 try:
102 102 fp = self._opener("dirstate")
103 103 st = fp.read(40)
104 104 fp.close()
105 105 l = len(st)
106 106 if l == 40:
107 107 return st[:20], st[20:40]
108 108 elif l > 0 and l < 40:
109 109 raise util.Abort(_('working directory state appears damaged!'))
110 110 except IOError, err:
111 111 if err.errno != errno.ENOENT:
112 112 raise
113 113 return [nullid, nullid]
114 114
115 115 @propertycache
116 116 def _dirs(self):
117 117 dirs = {}
118 118 for f, s in self._map.iteritems():
119 119 if s[0] != 'r':
120 120 _incdirs(dirs, f)
121 121 return dirs
122 122
123 123 def dirs(self):
124 124 return self._dirs
125 125
126 126 @rootcache('.hgignore')
127 127 def _ignore(self):
128 128 files = [self._join('.hgignore')]
129 129 for name, path in self._ui.configitems("ui"):
130 130 if name == 'ignore' or name.startswith('ignore.'):
131 131 files.append(util.expandpath(path))
132 132 return ignore.ignore(self._root, files, self._ui.warn)
133 133
134 134 @propertycache
135 135 def _slash(self):
136 136 return self._ui.configbool('ui', 'slash') and os.sep != '/'
137 137
138 138 @propertycache
139 139 def _checklink(self):
140 140 return util.checklink(self._root)
141 141
142 142 @propertycache
143 143 def _checkexec(self):
144 144 return util.checkexec(self._root)
145 145
146 146 @propertycache
147 147 def _checkcase(self):
148 148 return not util.checkcase(self._join('.hg'))
149 149
150 150 def _join(self, f):
151 151 # much faster than os.path.join()
152 152 # it's safe because f is always a relative path
153 153 return self._rootdir + f
154 154
155 155 def flagfunc(self, buildfallback):
156 156 if self._checklink and self._checkexec:
157 157 def f(x):
158 158 p = self._join(x)
159 159 if os.path.islink(p):
160 160 return 'l'
161 161 if util.isexec(p):
162 162 return 'x'
163 163 return ''
164 164 return f
165 165
166 166 fallback = buildfallback()
167 167 if self._checklink:
168 168 def f(x):
169 169 if os.path.islink(self._join(x)):
170 170 return 'l'
171 171 if 'x' in fallback(x):
172 172 return 'x'
173 173 return ''
174 174 return f
175 175 if self._checkexec:
176 176 def f(x):
177 177 if 'l' in fallback(x):
178 178 return 'l'
179 179 if util.isexec(self._join(x)):
180 180 return 'x'
181 181 return ''
182 182 return f
183 183 else:
184 184 return fallback
185 185
186 186 def getcwd(self):
187 187 cwd = os.getcwd()
188 188 if cwd == self._root:
189 189 return ''
190 190 # self._root ends with a path separator if self._root is '/' or 'C:\'
191 191 rootsep = self._root
192 192 if not util.endswithsep(rootsep):
193 193 rootsep += os.sep
194 194 if cwd.startswith(rootsep):
195 195 return cwd[len(rootsep):]
196 196 else:
197 197 # we're outside the repo. return an absolute path.
198 198 return cwd
199 199
200 200 def pathto(self, f, cwd=None):
201 201 if cwd is None:
202 202 cwd = self.getcwd()
203 203 path = util.pathto(self._root, cwd, f)
204 204 if self._slash:
205 205 return util.normpath(path)
206 206 return path
207 207
208 208 def __getitem__(self, key):
209 209 '''Return the current state of key (a filename) in the dirstate.
210 210
211 211 States are:
212 212 n normal
213 213 m needs merging
214 214 r marked for removal
215 215 a marked for addition
216 216 ? not tracked
217 217 '''
218 218 return self._map.get(key, ("?",))[0]
219 219
220 220 def __contains__(self, key):
221 221 return key in self._map
222 222
223 223 def __iter__(self):
224 224 for x in sorted(self._map):
225 225 yield x
226 226
227 227 def parents(self):
228 228 return [self._validate(p) for p in self._pl]
229 229
230 230 def p1(self):
231 231 return self._validate(self._pl[0])
232 232
233 233 def p2(self):
234 234 return self._validate(self._pl[1])
235 235
236 236 def branch(self):
237 237 return encoding.tolocal(self._branch)
238 238
239 239 def setparents(self, p1, p2=nullid):
240 240 self._dirty = self._dirtypl = True
241 241 oldp2 = self._pl[1]
242 242 self._pl = p1, p2
243 243 if oldp2 != nullid and p2 == nullid:
244 244 # Discard 'm' markers when moving away from a merge state
245 245 for f, s in self._map.iteritems():
246 246 if s[0] == 'm':
247 247 self.normallookup(f)
248 248
249 249 def setbranch(self, branch):
250 250 if branch in ['tip', '.', 'null']:
251 251 raise util.Abort(_('the name \'%s\' is reserved') % branch)
252 252 self._branch = encoding.fromlocal(branch)
253 253 f = self._opener('branch', 'w', atomictemp=True)
254 254 try:
255 255 f.write(self._branch + '\n')
256 256 finally:
257 257 f.close()
258 258
259 259 def _read(self):
260 260 self._map = {}
261 261 self._copymap = {}
262 262 try:
263 263 st = self._opener.read("dirstate")
264 264 except IOError, err:
265 265 if err.errno != errno.ENOENT:
266 266 raise
267 267 return
268 268 if not st:
269 269 return
270 270
271 271 p = parsers.parse_dirstate(self._map, self._copymap, st)
272 272 if not self._dirtypl:
273 273 self._pl = p
274 274
275 275 def invalidate(self):
276 276 for a in ("_map", "_copymap", "_foldmap", "_branch", "_pl", "_dirs",
277 277 "_ignore"):
278 278 if a in self.__dict__:
279 279 delattr(self, a)
280 280 self._lastnormaltime = 0
281 281 self._dirty = False
282 282
283 283 def copy(self, source, dest):
284 284 """Mark dest as a copy of source. Unmark dest if source is None."""
285 285 if source == dest:
286 286 return
287 287 self._dirty = True
288 288 if source is not None:
289 289 self._copymap[dest] = source
290 290 elif dest in self._copymap:
291 291 del self._copymap[dest]
292 292
293 293 def copied(self, file):
294 294 return self._copymap.get(file, None)
295 295
296 296 def copies(self):
297 297 return self._copymap
298 298
299 299 def _droppath(self, f):
300 300 if self[f] not in "?r" and "_dirs" in self.__dict__:
301 301 _decdirs(self._dirs, f)
302 302
303 303 def _addpath(self, f, check=False):
304 304 oldstate = self[f]
305 305 if check or oldstate == "r":
306 306 scmutil.checkfilename(f)
307 307 if f in self._dirs:
308 308 raise util.Abort(_('directory %r already in dirstate') % f)
309 309 # shadows
310 310 for d in _finddirs(f):
311 311 if d in self._dirs:
312 312 break
313 313 if d in self._map and self[d] != 'r':
314 314 raise util.Abort(
315 315 _('file %r in dirstate clashes with %r') % (d, f))
316 316 if oldstate in "?r" and "_dirs" in self.__dict__:
317 317 _incdirs(self._dirs, f)
318 318
319 319 def normal(self, f):
320 320 '''Mark a file normal and clean.'''
321 321 self._dirty = True
322 322 self._addpath(f)
323 323 s = os.lstat(self._join(f))
324 324 mtime = int(s.st_mtime)
325 325 self._map[f] = ('n', s.st_mode, s.st_size, mtime)
326 326 if f in self._copymap:
327 327 del self._copymap[f]
328 328 if mtime > self._lastnormaltime:
329 329 # Remember the most recent modification timeslot for status(),
330 330 # to make sure we won't miss future size-preserving file content
331 331 # modifications that happen within the same timeslot.
332 332 self._lastnormaltime = mtime
333 333
334 334 def normallookup(self, f):
335 335 '''Mark a file normal, but possibly dirty.'''
336 336 if self._pl[1] != nullid and f in self._map:
337 337 # if there is a merge going on and the file was either
338 338 # in state 'm' (-1) or coming from other parent (-2) before
339 339 # being removed, restore that state.
340 340 entry = self._map[f]
341 341 if entry[0] == 'r' and entry[2] in (-1, -2):
342 342 source = self._copymap.get(f)
343 343 if entry[2] == -1:
344 344 self.merge(f)
345 345 elif entry[2] == -2:
346 346 self.otherparent(f)
347 347 if source:
348 348 self.copy(source, f)
349 349 return
350 350 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
351 351 return
352 352 self._dirty = True
353 353 self._addpath(f)
354 354 self._map[f] = ('n', 0, -1, -1)
355 355 if f in self._copymap:
356 356 del self._copymap[f]
357 357
358 358 def otherparent(self, f):
359 359 '''Mark as coming from the other parent, always dirty.'''
360 360 if self._pl[1] == nullid:
361 361 raise util.Abort(_("setting %r to other parent "
362 362 "only allowed in merges") % f)
363 363 self._dirty = True
364 364 self._addpath(f)
365 365 self._map[f] = ('n', 0, -2, -1)
366 366 if f in self._copymap:
367 367 del self._copymap[f]
368 368
369 369 def add(self, f):
370 370 '''Mark a file added.'''
371 371 self._dirty = True
372 372 self._addpath(f, True)
373 373 self._map[f] = ('a', 0, -1, -1)
374 374 if f in self._copymap:
375 375 del self._copymap[f]
376 376
377 377 def remove(self, f):
378 378 '''Mark a file removed.'''
379 379 self._dirty = True
380 380 self._droppath(f)
381 381 size = 0
382 382 if self._pl[1] != nullid and f in self._map:
383 383 # backup the previous state
384 384 entry = self._map[f]
385 385 if entry[0] == 'm': # merge
386 386 size = -1
387 387 elif entry[0] == 'n' and entry[2] == -2: # other parent
388 388 size = -2
389 389 self._map[f] = ('r', 0, size, 0)
390 390 if size == 0 and f in self._copymap:
391 391 del self._copymap[f]
392 392
393 393 def merge(self, f):
394 394 '''Mark a file merged.'''
395 395 if self._pl[1] == nullid:
396 396 return self.normallookup(f)
397 397 self._dirty = True
398 398 s = os.lstat(self._join(f))
399 399 self._addpath(f)
400 400 self._map[f] = ('m', s.st_mode, s.st_size, int(s.st_mtime))
401 401 if f in self._copymap:
402 402 del self._copymap[f]
403 403
404 404 def drop(self, f):
405 405 '''Drop a file from the dirstate'''
406 406 if f in self._map:
407 407 self._dirty = True
408 408 self._droppath(f)
409 409 del self._map[f]
410 410
411 def _normalize(self, path, isknown):
411 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
412 412 normed = util.normcase(path)
413 413 folded = self._foldmap.get(normed, None)
414 414 if folded is None:
415 if isknown or not os.path.lexists(os.path.join(self._root, path)):
415 if isknown:
416 416 folded = path
417 417 else:
418 # recursively normalize leading directory components
419 # against dirstate
420 if '/' in normed:
421 d, f = normed.rsplit('/', 1)
422 d = self._normalize(d, isknown)
423 r = self._root + "/" + d
424 folded = d + "/" + util.fspath(f, r)
418 if exists is None:
419 exists = os.path.lexists(os.path.join(self._root, path))
420 if not exists:
421 # Maybe a path component exists
422 if not ignoremissing and '/' in path:
423 d, f = path.rsplit('/', 1)
424 d = self._normalize(d, isknown, ignoremissing, None)
425 folded = d + "/" + f
426 else:
427 # No path components, preserve original case
428 folded = path
425 429 else:
426 folded = util.fspath(normed, self._root)
427 self._foldmap[normed] = folded
430 # recursively normalize leading directory components
431 # against dirstate
432 if '/' in normed:
433 d, f = normed.rsplit('/', 1)
434 d = self._normalize(d, isknown, ignoremissing, True)
435 r = self._root + "/" + d
436 folded = d + "/" + util.fspath(f, r)
437 else:
438 folded = util.fspath(normed, self._root)
439 self._foldmap[normed] = folded
428 440
429 441 return folded
430 442
431 def normalize(self, path, isknown=False):
443 def normalize(self, path, isknown=False, ignoremissing=False):
432 444 '''
433 445 normalize the case of a pathname when on a casefolding filesystem
434 446
435 447 isknown specifies whether the filename came from walking the
436 disk, to avoid extra filesystem access
448 disk, to avoid extra filesystem access.
449
450 If ignoremissing is True, missing path are returned
451 unchanged. Otherwise, we try harder to normalize possibly
452 existing path components.
437 453
438 454 The normalized case is determined based on the following precedence:
439 455
440 456 - version of name already stored in the dirstate
441 457 - version of name stored on disk
442 458 - version provided via command arguments
443 459 '''
444 460
445 461 if self._checkcase:
446 return self._normalize(path, isknown)
462 return self._normalize(path, isknown, ignoremissing)
447 463 return path
448 464
449 465 def clear(self):
450 466 self._map = {}
451 467 if "_dirs" in self.__dict__:
452 468 delattr(self, "_dirs")
453 469 self._copymap = {}
454 470 self._pl = [nullid, nullid]
455 471 self._lastnormaltime = 0
456 472 self._dirty = True
457 473
458 474 def rebuild(self, parent, files):
459 475 self.clear()
460 476 for f in files:
461 477 if 'x' in files.flags(f):
462 478 self._map[f] = ('n', 0777, -1, 0)
463 479 else:
464 480 self._map[f] = ('n', 0666, -1, 0)
465 481 self._pl = (parent, nullid)
466 482 self._dirty = True
467 483
468 484 def write(self):
469 485 if not self._dirty:
470 486 return
471 487 st = self._opener("dirstate", "w", atomictemp=True)
472 488
473 489 # use the modification time of the newly created temporary file as the
474 490 # filesystem's notion of 'now'
475 491 now = int(util.fstat(st).st_mtime)
476 492
477 493 cs = cStringIO.StringIO()
478 494 copymap = self._copymap
479 495 pack = struct.pack
480 496 write = cs.write
481 497 write("".join(self._pl))
482 498 for f, e in self._map.iteritems():
483 499 if e[0] == 'n' and e[3] == now:
484 500 # The file was last modified "simultaneously" with the current
485 501 # write to dirstate (i.e. within the same second for file-
486 502 # systems with a granularity of 1 sec). This commonly happens
487 503 # for at least a couple of files on 'update'.
488 504 # The user could change the file without changing its size
489 505 # within the same second. Invalidate the file's stat data in
490 506 # dirstate, forcing future 'status' calls to compare the
491 507 # contents of the file. This prevents mistakenly treating such
492 508 # files as clean.
493 509 e = (e[0], 0, -1, -1) # mark entry as 'unset'
494 510 self._map[f] = e
495 511
496 512 if f in copymap:
497 513 f = "%s\0%s" % (f, copymap[f])
498 514 e = pack(_format, e[0], e[1], e[2], e[3], len(f))
499 515 write(e)
500 516 write(f)
501 517 st.write(cs.getvalue())
502 518 st.close()
503 519 self._lastnormaltime = 0
504 520 self._dirty = self._dirtypl = False
505 521
506 522 def _dirignore(self, f):
507 523 if f == '.':
508 524 return False
509 525 if self._ignore(f):
510 526 return True
511 527 for p in _finddirs(f):
512 528 if self._ignore(p):
513 529 return True
514 530 return False
515 531
516 532 def walk(self, match, subrepos, unknown, ignored):
517 533 '''
518 534 Walk recursively through the directory tree, finding all files
519 535 matched by match.
520 536
521 537 Return a dict mapping filename to stat-like object (either
522 538 mercurial.osutil.stat instance or return value of os.stat()).
523 539 '''
524 540
525 541 def fwarn(f, msg):
526 542 self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
527 543 return False
528 544
529 545 def badtype(mode):
530 546 kind = _('unknown')
531 547 if stat.S_ISCHR(mode):
532 548 kind = _('character device')
533 549 elif stat.S_ISBLK(mode):
534 550 kind = _('block device')
535 551 elif stat.S_ISFIFO(mode):
536 552 kind = _('fifo')
537 553 elif stat.S_ISSOCK(mode):
538 554 kind = _('socket')
539 555 elif stat.S_ISDIR(mode):
540 556 kind = _('directory')
541 557 return _('unsupported file type (type is %s)') % kind
542 558
543 559 ignore = self._ignore
544 560 dirignore = self._dirignore
545 561 if ignored:
546 562 ignore = util.never
547 563 dirignore = util.never
548 564 elif not unknown:
549 565 # if unknown and ignored are False, skip step 2
550 566 ignore = util.always
551 567 dirignore = util.always
552 568
553 569 matchfn = match.matchfn
554 570 badfn = match.bad
555 571 dmap = self._map
556 572 normpath = util.normpath
557 573 listdir = osutil.listdir
558 574 lstat = os.lstat
559 575 getkind = stat.S_IFMT
560 576 dirkind = stat.S_IFDIR
561 577 regkind = stat.S_IFREG
562 578 lnkkind = stat.S_IFLNK
563 579 join = self._join
564 580 work = []
565 581 wadd = work.append
566 582
567 583 exact = skipstep3 = False
568 584 if matchfn == match.exact: # match.exact
569 585 exact = True
570 586 dirignore = util.always # skip step 2
571 587 elif match.files() and not match.anypats(): # match.match, no patterns
572 588 skipstep3 = True
573 589
574 590 if not exact and self._checkcase:
575 591 normalize = self._normalize
576 592 skipstep3 = False
577 593 else:
578 normalize = lambda x, y: x
594 normalize = lambda x, y, z: x
579 595
580 596 files = sorted(match.files())
581 597 subrepos.sort()
582 598 i, j = 0, 0
583 599 while i < len(files) and j < len(subrepos):
584 600 subpath = subrepos[j] + "/"
585 601 if files[i] < subpath:
586 602 i += 1
587 603 continue
588 604 while i < len(files) and files[i].startswith(subpath):
589 605 del files[i]
590 606 j += 1
591 607
592 608 if not files or '.' in files:
593 609 files = ['']
594 610 results = dict.fromkeys(subrepos)
595 611 results['.hg'] = None
596 612
597 613 # step 1: find all explicit files
598 614 for ff in files:
599 nf = normalize(normpath(ff), False)
615 nf = normalize(normpath(ff), False, True)
600 616 if nf in results:
601 617 continue
602 618
603 619 try:
604 620 st = lstat(join(nf))
605 621 kind = getkind(st.st_mode)
606 622 if kind == dirkind:
607 623 skipstep3 = False
608 624 if nf in dmap:
609 625 #file deleted on disk but still in dirstate
610 626 results[nf] = None
611 627 match.dir(nf)
612 628 if not dirignore(nf):
613 629 wadd(nf)
614 630 elif kind == regkind or kind == lnkkind:
615 631 results[nf] = st
616 632 else:
617 633 badfn(ff, badtype(kind))
618 634 if nf in dmap:
619 635 results[nf] = None
620 636 except OSError, inst:
621 637 if nf in dmap: # does it exactly match a file?
622 638 results[nf] = None
623 639 else: # does it match a directory?
624 640 prefix = nf + "/"
625 641 for fn in dmap:
626 642 if fn.startswith(prefix):
627 643 match.dir(nf)
628 644 skipstep3 = False
629 645 break
630 646 else:
631 647 badfn(ff, inst.strerror)
632 648
633 649 # step 2: visit subdirectories
634 650 while work:
635 651 nd = work.pop()
636 652 skip = None
637 653 if nd == '.':
638 654 nd = ''
639 655 else:
640 656 skip = '.hg'
641 657 try:
642 658 entries = listdir(join(nd), stat=True, skip=skip)
643 659 except OSError, inst:
644 660 if inst.errno == errno.EACCES:
645 661 fwarn(nd, inst.strerror)
646 662 continue
647 663 raise
648 664 for f, kind, st in entries:
649 nf = normalize(nd and (nd + "/" + f) or f, True)
665 nf = normalize(nd and (nd + "/" + f) or f, True, True)
650 666 if nf not in results:
651 667 if kind == dirkind:
652 668 if not ignore(nf):
653 669 match.dir(nf)
654 670 wadd(nf)
655 671 if nf in dmap and matchfn(nf):
656 672 results[nf] = None
657 673 elif kind == regkind or kind == lnkkind:
658 674 if nf in dmap:
659 675 if matchfn(nf):
660 676 results[nf] = st
661 677 elif matchfn(nf) and not ignore(nf):
662 678 results[nf] = st
663 679 elif nf in dmap and matchfn(nf):
664 680 results[nf] = None
665 681
666 682 # step 3: report unseen items in the dmap hash
667 683 if not skipstep3 and not exact:
668 684 visit = sorted([f for f in dmap if f not in results and matchfn(f)])
669 685 for nf, st in zip(visit, util.statfiles([join(i) for i in visit])):
670 686 if not st is None and not getkind(st.st_mode) in (regkind, lnkkind):
671 687 st = None
672 688 results[nf] = st
673 689 for s in subrepos:
674 690 del results[s]
675 691 del results['.hg']
676 692 return results
677 693
678 694 def status(self, match, subrepos, ignored, clean, unknown):
679 695 '''Determine the status of the working copy relative to the
680 696 dirstate and return a tuple of lists (unsure, modified, added,
681 697 removed, deleted, unknown, ignored, clean), where:
682 698
683 699 unsure:
684 700 files that might have been modified since the dirstate was
685 701 written, but need to be read to be sure (size is the same
686 702 but mtime differs)
687 703 modified:
688 704 files that have definitely been modified since the dirstate
689 705 was written (different size or mode)
690 706 added:
691 707 files that have been explicitly added with hg add
692 708 removed:
693 709 files that have been explicitly removed with hg remove
694 710 deleted:
695 711 files that have been deleted through other means ("missing")
696 712 unknown:
697 713 files not in the dirstate that are not ignored
698 714 ignored:
699 715 files not in the dirstate that are ignored
700 716 (by _dirignore())
701 717 clean:
702 718 files that have definitely not been modified since the
703 719 dirstate was written
704 720 '''
705 721 listignored, listclean, listunknown = ignored, clean, unknown
706 722 lookup, modified, added, unknown, ignored = [], [], [], [], []
707 723 removed, deleted, clean = [], [], []
708 724
709 725 dmap = self._map
710 726 ladd = lookup.append # aka "unsure"
711 727 madd = modified.append
712 728 aadd = added.append
713 729 uadd = unknown.append
714 730 iadd = ignored.append
715 731 radd = removed.append
716 732 dadd = deleted.append
717 733 cadd = clean.append
718 734
719 735 lnkkind = stat.S_IFLNK
720 736
721 737 for fn, st in self.walk(match, subrepos, listunknown,
722 738 listignored).iteritems():
723 739 if fn not in dmap:
724 740 if (listignored or match.exact(fn)) and self._dirignore(fn):
725 741 if listignored:
726 742 iadd(fn)
727 743 elif listunknown:
728 744 uadd(fn)
729 745 continue
730 746
731 747 state, mode, size, time = dmap[fn]
732 748
733 749 if not st and state in "nma":
734 750 dadd(fn)
735 751 elif state == 'n':
736 752 # The "mode & lnkkind != lnkkind or self._checklink"
737 753 # lines are an expansion of "islink => checklink"
738 754 # where islink means "is this a link?" and checklink
739 755 # means "can we check links?".
740 756 mtime = int(st.st_mtime)
741 757 if (size >= 0 and
742 758 (size != st.st_size
743 759 or ((mode ^ st.st_mode) & 0100 and self._checkexec))
744 760 and (mode & lnkkind != lnkkind or self._checklink)
745 761 or size == -2 # other parent
746 762 or fn in self._copymap):
747 763 madd(fn)
748 764 elif (mtime != time
749 765 and (mode & lnkkind != lnkkind or self._checklink)):
750 766 ladd(fn)
751 767 elif mtime == self._lastnormaltime:
752 768 # fn may have been changed in the same timeslot without
753 769 # changing its size. This can happen if we quickly do
754 770 # multiple commits in a single transaction.
755 771 # Force lookup, so we don't miss such a racy file change.
756 772 ladd(fn)
757 773 elif listclean:
758 774 cadd(fn)
759 775 elif state == 'm':
760 776 madd(fn)
761 777 elif state == 'a':
762 778 aadd(fn)
763 779 elif state == 'r':
764 780 radd(fn)
765 781
766 782 return (lookup, modified, added, removed, deleted, unknown, ignored,
767 783 clean)
@@ -1,127 +1,163 b''
1 1 $ "$TESTDIR/hghave" icasefs || exit 80
2 2
3 3 $ hg debugfs | grep 'case-sensitive:'
4 4 case-sensitive: no
5 5
6 6 test file addition with bad case
7 7
8 8 $ hg init repo1
9 9 $ cd repo1
10 10 $ echo a > a
11 11 $ hg add A
12 12 adding a
13 13 $ hg st
14 14 A a
15 15 $ hg ci -m adda
16 16 $ hg manifest
17 17 a
18 18 $ cd ..
19 19
20 20 test case collision on rename (issue750)
21 21
22 22 $ hg init repo2
23 23 $ cd repo2
24 24 $ echo a > a
25 25 $ hg --debug ci -Am adda
26 26 adding a
27 27 a
28 28 committed changeset 0:07f4944404050f47db2e5c5071e0e84e7a27bba9
29 29
30 30 Case-changing renames should work:
31 31
32 32 $ hg mv a A
33 33 $ hg mv A a
34 34 $ hg st
35
36 test changing case of path components
37
38 $ mkdir D
39 $ echo b > D/b
40 $ hg ci -Am addb D/b
41 $ hg mv D/b d/b
42 D/b: not overwriting - file exists
43 $ hg mv D/b d/c
44 $ hg st
45 A D/c
46 R D/b
47 $ mv D temp
48 $ mv temp d
49 $ hg st
50 A D/c
51 R D/b
52 $ hg revert -aq
53 $ rm d/c
54 $ echo c > D/c
55 $ hg add D/c
56 $ hg st
57 A D/c
58 $ hg ci -m addc D/c
59 $ hg mv d/b d/e
60 moving D/b to D/e
61 $ hg st
62 A D/e
63 R D/b
64 $ hg revert -aq
65 $ rm d/e
66 $ hg mv d/b D/B
67 moving D/b to D/B
68 $ hg st
69 A D/B
70 R D/b
35 71 $ cd ..
36 72
37 73 test case collision between revisions (issue912)
38 74
39 75 $ hg init repo3
40 76 $ cd repo3
41 77 $ echo a > a
42 78 $ hg ci -Am adda
43 79 adding a
44 80 $ hg rm a
45 81 $ hg ci -Am removea
46 82 $ echo A > A
47 83
48 84 on linux hfs keeps the old case stored, force it
49 85
50 86 $ mv a aa
51 87 $ mv aa A
52 88 $ hg ci -Am addA
53 89 adding A
54 90
55 91 used to fail under case insensitive fs
56 92
57 93 $ hg up -C 0
58 94 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
59 95 $ hg up -C
60 96 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
61 97
62 98 no clobbering of untracked files with wrong casing
63 99
64 100 $ hg up -r null
65 101 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
66 102 $ echo gold > a
67 103 $ hg up
68 104 A: untracked file differs
69 105 abort: untracked files in working directory differ from files in requested revision
70 106 [255]
71 107 $ cat a
72 108 gold
73 109
74 110 $ cd ..
75 111
76 112 issue 3342: file in nested directory causes unexpected abort
77 113
78 114 $ hg init issue3342
79 115 $ cd issue3342
80 116
81 117 $ mkdir -p a/B/c/D
82 118 $ echo e > a/B/c/D/e
83 119 $ hg add a/B/c/D/e
84 120
85 121 $ cd ..
86 122
87 123 issue 3340: mq does not handle case changes correctly
88 124
89 125 in addition to reported case, 'hg qrefresh' is also tested against
90 126 case changes.
91 127
92 128 $ echo "[extensions]" >> $HGRCPATH
93 129 $ echo "mq=" >> $HGRCPATH
94 130
95 131 $ hg init issue3340
96 132 $ cd issue3340
97 133
98 134 $ echo a > mIxEdCaSe
99 135 $ hg add mIxEdCaSe
100 136 $ hg commit -m '#0'
101 137 $ hg rename mIxEdCaSe tmp
102 138 $ hg rename tmp MiXeDcAsE
103 139 $ hg status -A
104 140 A MiXeDcAsE
105 141 mIxEdCaSe
106 142 R mIxEdCaSe
107 143 $ hg qnew changecase
108 144 $ hg status -A
109 145 C MiXeDcAsE
110 146
111 147 $ hg qpop -a
112 148 popping changecase
113 149 patch queue now empty
114 150 $ hg qnew refresh-casechange
115 151 $ hg status -A
116 152 C mIxEdCaSe
117 153 $ hg rename mIxEdCaSe tmp
118 154 $ hg rename tmp MiXeDcAsE
119 155 $ hg status -A
120 156 A MiXeDcAsE
121 157 mIxEdCaSe
122 158 R mIxEdCaSe
123 159 $ hg qrefresh
124 160 $ hg status -A
125 161 C MiXeDcAsE
126 162
127 163 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now