##// END OF EJS Templates
rebase: do not crash in panic when cwd disapear in the process (issue4121)...
Pierre-Yves David -
r20335:e4052064 stable
parent child Browse files
Show More
@@ -1,852 +1,856 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 import errno
8 8
9 9 from node import nullid
10 10 from i18n import _
11 11 import scmutil, util, ignore, osutil, parsers, encoding, pathutil
12 12 import os, stat, errno, gc
13 13
14 14 propertycache = util.propertycache
15 15 filecache = scmutil.filecache
16 16 _rangemask = 0x7fffffff
17 17
18 18 class repocache(filecache):
19 19 """filecache for files in .hg/"""
20 20 def join(self, obj, fname):
21 21 return obj._opener.join(fname)
22 22
23 23 class rootcache(filecache):
24 24 """filecache for files in the repository root"""
25 25 def join(self, obj, fname):
26 26 return obj._join(fname)
27 27
28 28 class dirstate(object):
29 29
30 30 def __init__(self, opener, ui, root, validate):
31 31 '''Create a new dirstate object.
32 32
33 33 opener is an open()-like callable that can be used to open the
34 34 dirstate file; root is the root of the directory tracked by
35 35 the dirstate.
36 36 '''
37 37 self._opener = opener
38 38 self._validate = validate
39 39 self._root = root
40 40 self._rootdir = os.path.join(root, '')
41 41 self._dirty = False
42 42 self._dirtypl = False
43 43 self._lastnormaltime = 0
44 44 self._ui = ui
45 45 self._filecache = {}
46 46
47 47 @propertycache
48 48 def _map(self):
49 49 '''Return the dirstate contents as a map from filename to
50 50 (state, mode, size, time).'''
51 51 self._read()
52 52 return self._map
53 53
54 54 @propertycache
55 55 def _copymap(self):
56 56 self._read()
57 57 return self._copymap
58 58
59 59 @propertycache
60 60 def _foldmap(self):
61 61 f = {}
62 62 for name, s in self._map.iteritems():
63 63 if s[0] != 'r':
64 64 f[util.normcase(name)] = name
65 65 for name in self._dirs:
66 66 f[util.normcase(name)] = name
67 67 f['.'] = '.' # prevents useless util.fspath() invocation
68 68 return f
69 69
70 70 @repocache('branch')
71 71 def _branch(self):
72 72 try:
73 73 return self._opener.read("branch").strip() or "default"
74 74 except IOError, inst:
75 75 if inst.errno != errno.ENOENT:
76 76 raise
77 77 return "default"
78 78
79 79 @propertycache
80 80 def _pl(self):
81 81 try:
82 82 fp = self._opener("dirstate")
83 83 st = fp.read(40)
84 84 fp.close()
85 85 l = len(st)
86 86 if l == 40:
87 87 return st[:20], st[20:40]
88 88 elif l > 0 and l < 40:
89 89 raise util.Abort(_('working directory state appears damaged!'))
90 90 except IOError, err:
91 91 if err.errno != errno.ENOENT:
92 92 raise
93 93 return [nullid, nullid]
94 94
95 95 @propertycache
96 96 def _dirs(self):
97 97 return scmutil.dirs(self._map, 'r')
98 98
99 99 def dirs(self):
100 100 return self._dirs
101 101
102 102 @rootcache('.hgignore')
103 103 def _ignore(self):
104 104 files = [self._join('.hgignore')]
105 105 for name, path in self._ui.configitems("ui"):
106 106 if name == 'ignore' or name.startswith('ignore.'):
107 107 files.append(util.expandpath(path))
108 108 return ignore.ignore(self._root, files, self._ui.warn)
109 109
110 110 @propertycache
111 111 def _slash(self):
112 112 return self._ui.configbool('ui', 'slash') and os.sep != '/'
113 113
114 114 @propertycache
115 115 def _checklink(self):
116 116 return util.checklink(self._root)
117 117
118 118 @propertycache
119 119 def _checkexec(self):
120 120 return util.checkexec(self._root)
121 121
122 122 @propertycache
123 123 def _checkcase(self):
124 124 return not util.checkcase(self._join('.hg'))
125 125
126 126 def _join(self, f):
127 127 # much faster than os.path.join()
128 128 # it's safe because f is always a relative path
129 129 return self._rootdir + f
130 130
131 131 def flagfunc(self, buildfallback):
132 132 if self._checklink and self._checkexec:
133 133 def f(x):
134 134 try:
135 135 st = os.lstat(self._join(x))
136 136 if util.statislink(st):
137 137 return 'l'
138 138 if util.statisexec(st):
139 139 return 'x'
140 140 except OSError:
141 141 pass
142 142 return ''
143 143 return f
144 144
145 145 fallback = buildfallback()
146 146 if self._checklink:
147 147 def f(x):
148 148 if os.path.islink(self._join(x)):
149 149 return 'l'
150 150 if 'x' in fallback(x):
151 151 return 'x'
152 152 return ''
153 153 return f
154 154 if self._checkexec:
155 155 def f(x):
156 156 if 'l' in fallback(x):
157 157 return 'l'
158 158 if util.isexec(self._join(x)):
159 159 return 'x'
160 160 return ''
161 161 return f
162 162 else:
163 163 return fallback
164 164
165 @propertycache
166 def _cwd(self):
167 return os.getcwd()
168
165 169 def getcwd(self):
166 cwd = os.getcwd()
170 cwd = self._cwd
167 171 if cwd == self._root:
168 172 return ''
169 173 # self._root ends with a path separator if self._root is '/' or 'C:\'
170 174 rootsep = self._root
171 175 if not util.endswithsep(rootsep):
172 176 rootsep += os.sep
173 177 if cwd.startswith(rootsep):
174 178 return cwd[len(rootsep):]
175 179 else:
176 180 # we're outside the repo. return an absolute path.
177 181 return cwd
178 182
179 183 def pathto(self, f, cwd=None):
180 184 if cwd is None:
181 185 cwd = self.getcwd()
182 186 path = util.pathto(self._root, cwd, f)
183 187 if self._slash:
184 188 return util.pconvert(path)
185 189 return path
186 190
187 191 def __getitem__(self, key):
188 192 '''Return the current state of key (a filename) in the dirstate.
189 193
190 194 States are:
191 195 n normal
192 196 m needs merging
193 197 r marked for removal
194 198 a marked for addition
195 199 ? not tracked
196 200 '''
197 201 return self._map.get(key, ("?",))[0]
198 202
199 203 def __contains__(self, key):
200 204 return key in self._map
201 205
202 206 def __iter__(self):
203 207 for x in sorted(self._map):
204 208 yield x
205 209
206 210 def iteritems(self):
207 211 return self._map.iteritems()
208 212
209 213 def parents(self):
210 214 return [self._validate(p) for p in self._pl]
211 215
212 216 def p1(self):
213 217 return self._validate(self._pl[0])
214 218
215 219 def p2(self):
216 220 return self._validate(self._pl[1])
217 221
218 222 def branch(self):
219 223 return encoding.tolocal(self._branch)
220 224
221 225 def setparents(self, p1, p2=nullid):
222 226 """Set dirstate parents to p1 and p2.
223 227
224 228 When moving from two parents to one, 'm' merged entries a
225 229 adjusted to normal and previous copy records discarded and
226 230 returned by the call.
227 231
228 232 See localrepo.setparents()
229 233 """
230 234 self._dirty = self._dirtypl = True
231 235 oldp2 = self._pl[1]
232 236 self._pl = p1, p2
233 237 copies = {}
234 238 if oldp2 != nullid and p2 == nullid:
235 239 # Discard 'm' markers when moving away from a merge state
236 240 for f, s in self._map.iteritems():
237 241 if s[0] == 'm':
238 242 if f in self._copymap:
239 243 copies[f] = self._copymap[f]
240 244 self.normallookup(f)
241 245 return copies
242 246
243 247 def setbranch(self, branch):
244 248 self._branch = encoding.fromlocal(branch)
245 249 f = self._opener('branch', 'w', atomictemp=True)
246 250 try:
247 251 f.write(self._branch + '\n')
248 252 f.close()
249 253
250 254 # make sure filecache has the correct stat info for _branch after
251 255 # replacing the underlying file
252 256 ce = self._filecache['_branch']
253 257 if ce:
254 258 ce.refresh()
255 259 except: # re-raises
256 260 f.discard()
257 261 raise
258 262
259 263 def _read(self):
260 264 self._map = {}
261 265 self._copymap = {}
262 266 try:
263 267 st = self._opener.read("dirstate")
264 268 except IOError, err:
265 269 if err.errno != errno.ENOENT:
266 270 raise
267 271 return
268 272 if not st:
269 273 return
270 274
271 275 # Python's garbage collector triggers a GC each time a certain number
272 276 # of container objects (the number being defined by
273 277 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
274 278 # for each file in the dirstate. The C version then immediately marks
275 279 # them as not to be tracked by the collector. However, this has no
276 280 # effect on when GCs are triggered, only on what objects the GC looks
277 281 # into. This means that O(number of files) GCs are unavoidable.
278 282 # Depending on when in the process's lifetime the dirstate is parsed,
279 283 # this can get very expensive. As a workaround, disable GC while
280 284 # parsing the dirstate.
281 285 gcenabled = gc.isenabled()
282 286 gc.disable()
283 287 try:
284 288 p = parsers.parse_dirstate(self._map, self._copymap, st)
285 289 finally:
286 290 if gcenabled:
287 291 gc.enable()
288 292 if not self._dirtypl:
289 293 self._pl = p
290 294
291 295 def invalidate(self):
292 296 for a in ("_map", "_copymap", "_foldmap", "_branch", "_pl", "_dirs",
293 297 "_ignore"):
294 298 if a in self.__dict__:
295 299 delattr(self, a)
296 300 self._lastnormaltime = 0
297 301 self._dirty = False
298 302
299 303 def copy(self, source, dest):
300 304 """Mark dest as a copy of source. Unmark dest if source is None."""
301 305 if source == dest:
302 306 return
303 307 self._dirty = True
304 308 if source is not None:
305 309 self._copymap[dest] = source
306 310 elif dest in self._copymap:
307 311 del self._copymap[dest]
308 312
309 313 def copied(self, file):
310 314 return self._copymap.get(file, None)
311 315
312 316 def copies(self):
313 317 return self._copymap
314 318
315 319 def _droppath(self, f):
316 320 if self[f] not in "?r" and "_dirs" in self.__dict__:
317 321 self._dirs.delpath(f)
318 322
319 323 def _addpath(self, f, state, mode, size, mtime):
320 324 oldstate = self[f]
321 325 if state == 'a' or oldstate == 'r':
322 326 scmutil.checkfilename(f)
323 327 if f in self._dirs:
324 328 raise util.Abort(_('directory %r already in dirstate') % f)
325 329 # shadows
326 330 for d in scmutil.finddirs(f):
327 331 if d in self._dirs:
328 332 break
329 333 if d in self._map and self[d] != 'r':
330 334 raise util.Abort(
331 335 _('file %r in dirstate clashes with %r') % (d, f))
332 336 if oldstate in "?r" and "_dirs" in self.__dict__:
333 337 self._dirs.addpath(f)
334 338 self._dirty = True
335 339 self._map[f] = (state, mode, size, mtime)
336 340
337 341 def normal(self, f):
338 342 '''Mark a file normal and clean.'''
339 343 s = os.lstat(self._join(f))
340 344 mtime = int(s.st_mtime)
341 345 self._addpath(f, 'n', s.st_mode,
342 346 s.st_size & _rangemask, mtime & _rangemask)
343 347 if f in self._copymap:
344 348 del self._copymap[f]
345 349 if mtime > self._lastnormaltime:
346 350 # Remember the most recent modification timeslot for status(),
347 351 # to make sure we won't miss future size-preserving file content
348 352 # modifications that happen within the same timeslot.
349 353 self._lastnormaltime = mtime
350 354
351 355 def normallookup(self, f):
352 356 '''Mark a file normal, but possibly dirty.'''
353 357 if self._pl[1] != nullid and f in self._map:
354 358 # if there is a merge going on and the file was either
355 359 # in state 'm' (-1) or coming from other parent (-2) before
356 360 # being removed, restore that state.
357 361 entry = self._map[f]
358 362 if entry[0] == 'r' and entry[2] in (-1, -2):
359 363 source = self._copymap.get(f)
360 364 if entry[2] == -1:
361 365 self.merge(f)
362 366 elif entry[2] == -2:
363 367 self.otherparent(f)
364 368 if source:
365 369 self.copy(source, f)
366 370 return
367 371 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
368 372 return
369 373 self._addpath(f, 'n', 0, -1, -1)
370 374 if f in self._copymap:
371 375 del self._copymap[f]
372 376
373 377 def otherparent(self, f):
374 378 '''Mark as coming from the other parent, always dirty.'''
375 379 if self._pl[1] == nullid:
376 380 raise util.Abort(_("setting %r to other parent "
377 381 "only allowed in merges") % f)
378 382 self._addpath(f, 'n', 0, -2, -1)
379 383 if f in self._copymap:
380 384 del self._copymap[f]
381 385
382 386 def add(self, f):
383 387 '''Mark a file added.'''
384 388 self._addpath(f, 'a', 0, -1, -1)
385 389 if f in self._copymap:
386 390 del self._copymap[f]
387 391
388 392 def remove(self, f):
389 393 '''Mark a file removed.'''
390 394 self._dirty = True
391 395 self._droppath(f)
392 396 size = 0
393 397 if self._pl[1] != nullid and f in self._map:
394 398 # backup the previous state
395 399 entry = self._map[f]
396 400 if entry[0] == 'm': # merge
397 401 size = -1
398 402 elif entry[0] == 'n' and entry[2] == -2: # other parent
399 403 size = -2
400 404 self._map[f] = ('r', 0, size, 0)
401 405 if size == 0 and f in self._copymap:
402 406 del self._copymap[f]
403 407
404 408 def merge(self, f):
405 409 '''Mark a file merged.'''
406 410 if self._pl[1] == nullid:
407 411 return self.normallookup(f)
408 412 s = os.lstat(self._join(f))
409 413 self._addpath(f, 'm', s.st_mode,
410 414 s.st_size & _rangemask, int(s.st_mtime) & _rangemask)
411 415 if f in self._copymap:
412 416 del self._copymap[f]
413 417
414 418 def drop(self, f):
415 419 '''Drop a file from the dirstate'''
416 420 if f in self._map:
417 421 self._dirty = True
418 422 self._droppath(f)
419 423 del self._map[f]
420 424
421 425 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
422 426 normed = util.normcase(path)
423 427 folded = self._foldmap.get(normed, None)
424 428 if folded is None:
425 429 if isknown:
426 430 folded = path
427 431 else:
428 432 if exists is None:
429 433 exists = os.path.lexists(os.path.join(self._root, path))
430 434 if not exists:
431 435 # Maybe a path component exists
432 436 if not ignoremissing and '/' in path:
433 437 d, f = path.rsplit('/', 1)
434 438 d = self._normalize(d, isknown, ignoremissing, None)
435 439 folded = d + "/" + f
436 440 else:
437 441 # No path components, preserve original case
438 442 folded = path
439 443 else:
440 444 # recursively normalize leading directory components
441 445 # against dirstate
442 446 if '/' in normed:
443 447 d, f = normed.rsplit('/', 1)
444 448 d = self._normalize(d, isknown, ignoremissing, True)
445 449 r = self._root + "/" + d
446 450 folded = d + "/" + util.fspath(f, r)
447 451 else:
448 452 folded = util.fspath(normed, self._root)
449 453 self._foldmap[normed] = folded
450 454
451 455 return folded
452 456
453 457 def normalize(self, path, isknown=False, ignoremissing=False):
454 458 '''
455 459 normalize the case of a pathname when on a casefolding filesystem
456 460
457 461 isknown specifies whether the filename came from walking the
458 462 disk, to avoid extra filesystem access.
459 463
460 464 If ignoremissing is True, missing path are returned
461 465 unchanged. Otherwise, we try harder to normalize possibly
462 466 existing path components.
463 467
464 468 The normalized case is determined based on the following precedence:
465 469
466 470 - version of name already stored in the dirstate
467 471 - version of name stored on disk
468 472 - version provided via command arguments
469 473 '''
470 474
471 475 if self._checkcase:
472 476 return self._normalize(path, isknown, ignoremissing)
473 477 return path
474 478
475 479 def clear(self):
476 480 self._map = {}
477 481 if "_dirs" in self.__dict__:
478 482 delattr(self, "_dirs")
479 483 self._copymap = {}
480 484 self._pl = [nullid, nullid]
481 485 self._lastnormaltime = 0
482 486 self._dirty = True
483 487
484 488 def rebuild(self, parent, allfiles, changedfiles=None):
485 489 changedfiles = changedfiles or allfiles
486 490 oldmap = self._map
487 491 self.clear()
488 492 for f in allfiles:
489 493 if f not in changedfiles:
490 494 self._map[f] = oldmap[f]
491 495 else:
492 496 if 'x' in allfiles.flags(f):
493 497 self._map[f] = ('n', 0777, -1, 0)
494 498 else:
495 499 self._map[f] = ('n', 0666, -1, 0)
496 500 self._pl = (parent, nullid)
497 501 self._dirty = True
498 502
499 503 def write(self):
500 504 if not self._dirty:
501 505 return
502 506 st = self._opener("dirstate", "w", atomictemp=True)
503 507
504 508 def finish(s):
505 509 st.write(s)
506 510 st.close()
507 511 self._lastnormaltime = 0
508 512 self._dirty = self._dirtypl = False
509 513
510 514 # use the modification time of the newly created temporary file as the
511 515 # filesystem's notion of 'now'
512 516 now = util.fstat(st).st_mtime
513 517 finish(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
514 518
515 519 def _dirignore(self, f):
516 520 if f == '.':
517 521 return False
518 522 if self._ignore(f):
519 523 return True
520 524 for p in scmutil.finddirs(f):
521 525 if self._ignore(p):
522 526 return True
523 527 return False
524 528
525 529 def _walkexplicit(self, match, subrepos):
526 530 '''Get stat data about the files explicitly specified by match.
527 531
528 532 Return a triple (results, dirsfound, dirsnotfound).
529 533 - results is a mapping from filename to stat result. It also contains
530 534 listings mapping subrepos and .hg to None.
531 535 - dirsfound is a list of files found to be directories.
532 536 - dirsnotfound is a list of files that the dirstate thinks are
533 537 directories and that were not found.'''
534 538
535 539 def badtype(mode):
536 540 kind = _('unknown')
537 541 if stat.S_ISCHR(mode):
538 542 kind = _('character device')
539 543 elif stat.S_ISBLK(mode):
540 544 kind = _('block device')
541 545 elif stat.S_ISFIFO(mode):
542 546 kind = _('fifo')
543 547 elif stat.S_ISSOCK(mode):
544 548 kind = _('socket')
545 549 elif stat.S_ISDIR(mode):
546 550 kind = _('directory')
547 551 return _('unsupported file type (type is %s)') % kind
548 552
549 553 matchedir = match.explicitdir
550 554 badfn = match.bad
551 555 dmap = self._map
552 556 normpath = util.normpath
553 557 lstat = os.lstat
554 558 getkind = stat.S_IFMT
555 559 dirkind = stat.S_IFDIR
556 560 regkind = stat.S_IFREG
557 561 lnkkind = stat.S_IFLNK
558 562 join = self._join
559 563 dirsfound = []
560 564 foundadd = dirsfound.append
561 565 dirsnotfound = []
562 566 notfoundadd = dirsnotfound.append
563 567
564 568 if match.matchfn != match.exact and self._checkcase:
565 569 normalize = self._normalize
566 570 else:
567 571 normalize = None
568 572
569 573 files = sorted(match.files())
570 574 subrepos.sort()
571 575 i, j = 0, 0
572 576 while i < len(files) and j < len(subrepos):
573 577 subpath = subrepos[j] + "/"
574 578 if files[i] < subpath:
575 579 i += 1
576 580 continue
577 581 while i < len(files) and files[i].startswith(subpath):
578 582 del files[i]
579 583 j += 1
580 584
581 585 if not files or '.' in files:
582 586 files = ['']
583 587 results = dict.fromkeys(subrepos)
584 588 results['.hg'] = None
585 589
586 590 for ff in files:
587 591 if normalize:
588 592 nf = normalize(normpath(ff), False, True)
589 593 else:
590 594 nf = normpath(ff)
591 595 if nf in results:
592 596 continue
593 597
594 598 try:
595 599 st = lstat(join(nf))
596 600 kind = getkind(st.st_mode)
597 601 if kind == dirkind:
598 602 if nf in dmap:
599 603 #file deleted on disk but still in dirstate
600 604 results[nf] = None
601 605 if matchedir:
602 606 matchedir(nf)
603 607 foundadd(nf)
604 608 elif kind == regkind or kind == lnkkind:
605 609 results[nf] = st
606 610 else:
607 611 badfn(ff, badtype(kind))
608 612 if nf in dmap:
609 613 results[nf] = None
610 614 except OSError, inst:
611 615 if nf in dmap: # does it exactly match a file?
612 616 results[nf] = None
613 617 else: # does it match a directory?
614 618 prefix = nf + "/"
615 619 for fn in dmap:
616 620 if fn.startswith(prefix):
617 621 if matchedir:
618 622 matchedir(nf)
619 623 notfoundadd(nf)
620 624 break
621 625 else:
622 626 badfn(ff, inst.strerror)
623 627
624 628 return results, dirsfound, dirsnotfound
625 629
626 630 def walk(self, match, subrepos, unknown, ignored, full=True):
627 631 '''
628 632 Walk recursively through the directory tree, finding all files
629 633 matched by match.
630 634
631 635 If full is False, maybe skip some known-clean files.
632 636
633 637 Return a dict mapping filename to stat-like object (either
634 638 mercurial.osutil.stat instance or return value of os.stat()).
635 639
636 640 '''
637 641 # full is a flag that extensions that hook into walk can use -- this
638 642 # implementation doesn't use it at all. This satisfies the contract
639 643 # because we only guarantee a "maybe".
640 644
641 645 def fwarn(f, msg):
642 646 self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
643 647 return False
644 648
645 649 ignore = self._ignore
646 650 dirignore = self._dirignore
647 651 if ignored:
648 652 ignore = util.never
649 653 dirignore = util.never
650 654 elif not unknown:
651 655 # if unknown and ignored are False, skip step 2
652 656 ignore = util.always
653 657 dirignore = util.always
654 658
655 659 matchfn = match.matchfn
656 660 matchalways = match.always()
657 661 matchtdir = match.traversedir
658 662 dmap = self._map
659 663 listdir = osutil.listdir
660 664 lstat = os.lstat
661 665 dirkind = stat.S_IFDIR
662 666 regkind = stat.S_IFREG
663 667 lnkkind = stat.S_IFLNK
664 668 join = self._join
665 669
666 670 exact = skipstep3 = False
667 671 if matchfn == match.exact: # match.exact
668 672 exact = True
669 673 dirignore = util.always # skip step 2
670 674 elif match.files() and not match.anypats(): # match.match, no patterns
671 675 skipstep3 = True
672 676
673 677 if not exact and self._checkcase:
674 678 normalize = self._normalize
675 679 skipstep3 = False
676 680 else:
677 681 normalize = None
678 682
679 683 # step 1: find all explicit files
680 684 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
681 685
682 686 skipstep3 = skipstep3 and not (work or dirsnotfound)
683 687 work = [d for d in work if not dirignore(d)]
684 688 wadd = work.append
685 689
686 690 # step 2: visit subdirectories
687 691 while work:
688 692 nd = work.pop()
689 693 skip = None
690 694 if nd == '.':
691 695 nd = ''
692 696 else:
693 697 skip = '.hg'
694 698 try:
695 699 entries = listdir(join(nd), stat=True, skip=skip)
696 700 except OSError, inst:
697 701 if inst.errno in (errno.EACCES, errno.ENOENT):
698 702 fwarn(nd, inst.strerror)
699 703 continue
700 704 raise
701 705 for f, kind, st in entries:
702 706 if normalize:
703 707 nf = normalize(nd and (nd + "/" + f) or f, True, True)
704 708 else:
705 709 nf = nd and (nd + "/" + f) or f
706 710 if nf not in results:
707 711 if kind == dirkind:
708 712 if not ignore(nf):
709 713 if matchtdir:
710 714 matchtdir(nf)
711 715 wadd(nf)
712 716 if nf in dmap and (matchalways or matchfn(nf)):
713 717 results[nf] = None
714 718 elif kind == regkind or kind == lnkkind:
715 719 if nf in dmap:
716 720 if matchalways or matchfn(nf):
717 721 results[nf] = st
718 722 elif (matchalways or matchfn(nf)) and not ignore(nf):
719 723 results[nf] = st
720 724 elif nf in dmap and (matchalways or matchfn(nf)):
721 725 results[nf] = None
722 726
723 727 for s in subrepos:
724 728 del results[s]
725 729 del results['.hg']
726 730
727 731 # step 3: report unseen items in the dmap hash
728 732 if not skipstep3 and not exact:
729 733 if not results and matchalways:
730 734 visit = dmap.keys()
731 735 else:
732 736 visit = [f for f in dmap if f not in results and matchfn(f)]
733 737 visit.sort()
734 738
735 739 if unknown:
736 740 # unknown == True means we walked the full directory tree above.
737 741 # So if a file is not seen it was either a) not matching matchfn
738 742 # b) ignored, c) missing, or d) under a symlink directory.
739 743 audit_path = pathutil.pathauditor(self._root)
740 744
741 745 for nf in iter(visit):
742 746 # Report ignored items in the dmap as long as they are not
743 747 # under a symlink directory.
744 748 if audit_path.check(nf):
745 749 try:
746 750 results[nf] = lstat(join(nf))
747 751 except OSError:
748 752 # file doesn't exist
749 753 results[nf] = None
750 754 else:
751 755 # It's either missing or under a symlink directory
752 756 results[nf] = None
753 757 else:
754 758 # We may not have walked the full directory tree above,
755 759 # so stat everything we missed.
756 760 nf = iter(visit).next
757 761 for st in util.statfiles([join(i) for i in visit]):
758 762 results[nf()] = st
759 763 return results
760 764
761 765 def status(self, match, subrepos, ignored, clean, unknown):
762 766 '''Determine the status of the working copy relative to the
763 767 dirstate and return a tuple of lists (unsure, modified, added,
764 768 removed, deleted, unknown, ignored, clean), where:
765 769
766 770 unsure:
767 771 files that might have been modified since the dirstate was
768 772 written, but need to be read to be sure (size is the same
769 773 but mtime differs)
770 774 modified:
771 775 files that have definitely been modified since the dirstate
772 776 was written (different size or mode)
773 777 added:
774 778 files that have been explicitly added with hg add
775 779 removed:
776 780 files that have been explicitly removed with hg remove
777 781 deleted:
778 782 files that have been deleted through other means ("missing")
779 783 unknown:
780 784 files not in the dirstate that are not ignored
781 785 ignored:
782 786 files not in the dirstate that are ignored
783 787 (by _dirignore())
784 788 clean:
785 789 files that have definitely not been modified since the
786 790 dirstate was written
787 791 '''
788 792 listignored, listclean, listunknown = ignored, clean, unknown
789 793 lookup, modified, added, unknown, ignored = [], [], [], [], []
790 794 removed, deleted, clean = [], [], []
791 795
792 796 dmap = self._map
793 797 ladd = lookup.append # aka "unsure"
794 798 madd = modified.append
795 799 aadd = added.append
796 800 uadd = unknown.append
797 801 iadd = ignored.append
798 802 radd = removed.append
799 803 dadd = deleted.append
800 804 cadd = clean.append
801 805 mexact = match.exact
802 806 dirignore = self._dirignore
803 807 checkexec = self._checkexec
804 808 copymap = self._copymap
805 809 lastnormaltime = self._lastnormaltime
806 810
807 811 # We need to do full walks when either
808 812 # - we're listing all clean files, or
809 813 # - match.traversedir does something, because match.traversedir should
810 814 # be called for every dir in the working dir
811 815 full = listclean or match.traversedir is not None
812 816 for fn, st in self.walk(match, subrepos, listunknown, listignored,
813 817 full=full).iteritems():
814 818 if fn not in dmap:
815 819 if (listignored or mexact(fn)) and dirignore(fn):
816 820 if listignored:
817 821 iadd(fn)
818 822 else:
819 823 uadd(fn)
820 824 continue
821 825
822 826 state, mode, size, time = dmap[fn]
823 827
824 828 if not st and state in "nma":
825 829 dadd(fn)
826 830 elif state == 'n':
827 831 mtime = int(st.st_mtime)
828 832 if (size >= 0 and
829 833 ((size != st.st_size and size != st.st_size & _rangemask)
830 834 or ((mode ^ st.st_mode) & 0100 and checkexec))
831 835 or size == -2 # other parent
832 836 or fn in copymap):
833 837 madd(fn)
834 838 elif time != mtime and time != mtime & _rangemask:
835 839 ladd(fn)
836 840 elif mtime == lastnormaltime:
837 841 # fn may have been changed in the same timeslot without
838 842 # changing its size. This can happen if we quickly do
839 843 # multiple commits in a single transaction.
840 844 # Force lookup, so we don't miss such a racy file change.
841 845 ladd(fn)
842 846 elif listclean:
843 847 cadd(fn)
844 848 elif state == 'm':
845 849 madd(fn)
846 850 elif state == 'a':
847 851 aadd(fn)
848 852 elif state == 'r':
849 853 radd(fn)
850 854
851 855 return (lookup, modified, added, removed, deleted, unknown, ignored,
852 856 clean)
@@ -1,651 +1,676 b''
1 1 $ cat >> $HGRCPATH <<EOF
2 2 > [extensions]
3 3 > rebase=
4 4 >
5 5 > [phases]
6 6 > publish=False
7 7 >
8 8 > [alias]
9 9 > tglog = log -G --template "{rev}: '{desc}' {branches}\n"
10 10 > EOF
11 11
12 12
13 13 $ hg init a
14 14 $ cd a
15 15 $ hg unbundle "$TESTDIR/bundles/rebase.hg"
16 16 adding changesets
17 17 adding manifests
18 18 adding file changes
19 19 added 8 changesets with 7 changes to 7 files (+2 heads)
20 20 (run 'hg heads' to see heads, 'hg merge' to merge)
21 21 $ hg up tip
22 22 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
23 23 $ cd ..
24 24
25 25
26 26 Rebasing
27 27 D onto H - simple rebase:
28 28
29 29 $ hg clone -q -u . a a1
30 30 $ cd a1
31 31
32 32 $ hg tglog
33 33 @ 7: 'H'
34 34 |
35 35 | o 6: 'G'
36 36 |/|
37 37 o | 5: 'F'
38 38 | |
39 39 | o 4: 'E'
40 40 |/
41 41 | o 3: 'D'
42 42 | |
43 43 | o 2: 'C'
44 44 | |
45 45 | o 1: 'B'
46 46 |/
47 47 o 0: 'A'
48 48
49 49
50 50 $ hg rebase -s 3 -d 7
51 51 saved backup bundle to $TESTTMP/a1/.hg/strip-backup/*-backup.hg (glob)
52 52
53 53 $ hg tglog
54 54 o 7: 'D'
55 55 |
56 56 @ 6: 'H'
57 57 |
58 58 | o 5: 'G'
59 59 |/|
60 60 o | 4: 'F'
61 61 | |
62 62 | o 3: 'E'
63 63 |/
64 64 | o 2: 'C'
65 65 | |
66 66 | o 1: 'B'
67 67 |/
68 68 o 0: 'A'
69 69
70 70 $ cd ..
71 71
72 72
73 73 D onto F - intermediate point:
74 74
75 75 $ hg clone -q -u . a a2
76 76 $ cd a2
77 77
78 78 $ hg rebase -s 3 -d 5
79 79 saved backup bundle to $TESTTMP/a2/.hg/strip-backup/*-backup.hg (glob)
80 80
81 81 $ hg tglog
82 82 o 7: 'D'
83 83 |
84 84 | @ 6: 'H'
85 85 |/
86 86 | o 5: 'G'
87 87 |/|
88 88 o | 4: 'F'
89 89 | |
90 90 | o 3: 'E'
91 91 |/
92 92 | o 2: 'C'
93 93 | |
94 94 | o 1: 'B'
95 95 |/
96 96 o 0: 'A'
97 97
98 98 $ cd ..
99 99
100 100
101 101 E onto H - skip of G:
102 102
103 103 $ hg clone -q -u . a a3
104 104 $ cd a3
105 105
106 106 $ hg rebase -s 4 -d 7
107 107 saved backup bundle to $TESTTMP/a3/.hg/strip-backup/*-backup.hg (glob)
108 108
109 109 $ hg tglog
110 110 o 6: 'E'
111 111 |
112 112 @ 5: 'H'
113 113 |
114 114 o 4: 'F'
115 115 |
116 116 | o 3: 'D'
117 117 | |
118 118 | o 2: 'C'
119 119 | |
120 120 | o 1: 'B'
121 121 |/
122 122 o 0: 'A'
123 123
124 124 $ cd ..
125 125
126 126
127 127 F onto E - rebase of a branching point (skip G):
128 128
129 129 $ hg clone -q -u . a a4
130 130 $ cd a4
131 131
132 132 $ hg rebase -s 5 -d 4
133 133 saved backup bundle to $TESTTMP/a4/.hg/strip-backup/*-backup.hg (glob)
134 134
135 135 $ hg tglog
136 136 @ 6: 'H'
137 137 |
138 138 o 5: 'F'
139 139 |
140 140 o 4: 'E'
141 141 |
142 142 | o 3: 'D'
143 143 | |
144 144 | o 2: 'C'
145 145 | |
146 146 | o 1: 'B'
147 147 |/
148 148 o 0: 'A'
149 149
150 150 $ cd ..
151 151
152 152
153 153 G onto H - merged revision having a parent in ancestors of target:
154 154
155 155 $ hg clone -q -u . a a5
156 156 $ cd a5
157 157
158 158 $ hg rebase -s 6 -d 7
159 159 saved backup bundle to $TESTTMP/a5/.hg/strip-backup/*-backup.hg (glob)
160 160
161 161 $ hg tglog
162 162 o 7: 'G'
163 163 |\
164 164 | @ 6: 'H'
165 165 | |
166 166 | o 5: 'F'
167 167 | |
168 168 o | 4: 'E'
169 169 |/
170 170 | o 3: 'D'
171 171 | |
172 172 | o 2: 'C'
173 173 | |
174 174 | o 1: 'B'
175 175 |/
176 176 o 0: 'A'
177 177
178 178 $ cd ..
179 179
180 180
181 181 F onto B - G maintains E as parent:
182 182
183 183 $ hg clone -q -u . a a6
184 184 $ cd a6
185 185
186 186 $ hg rebase -s 5 -d 1
187 187 saved backup bundle to $TESTTMP/a6/.hg/strip-backup/*-backup.hg (glob)
188 188
189 189 $ hg tglog
190 190 @ 7: 'H'
191 191 |
192 192 | o 6: 'G'
193 193 |/|
194 194 o | 5: 'F'
195 195 | |
196 196 | o 4: 'E'
197 197 | |
198 198 | | o 3: 'D'
199 199 | | |
200 200 +---o 2: 'C'
201 201 | |
202 202 o | 1: 'B'
203 203 |/
204 204 o 0: 'A'
205 205
206 206 $ cd ..
207 207
208 208
209 209 These will fail (using --source):
210 210
211 211 G onto F - rebase onto an ancestor:
212 212
213 213 $ hg clone -q -u . a a7
214 214 $ cd a7
215 215
216 216 $ hg rebase -s 6 -d 5
217 217 nothing to rebase
218 218 [1]
219 219
220 220 F onto G - rebase onto a descendant:
221 221
222 222 $ hg rebase -s 5 -d 6
223 223 abort: source is ancestor of destination
224 224 [255]
225 225
226 226 G onto B - merge revision with both parents not in ancestors of target:
227 227
228 228 $ hg rebase -s 6 -d 1
229 229 abort: cannot use revision 6 as base, result would have 3 parents
230 230 [255]
231 231
232 232
233 233 These will abort gracefully (using --base):
234 234
235 235 G onto G - rebase onto same changeset:
236 236
237 237 $ hg rebase -b 6 -d 6
238 238 nothing to rebase - eea13746799a is both "base" and destination
239 239 [1]
240 240
241 241 G onto F - rebase onto an ancestor:
242 242
243 243 $ hg rebase -b 6 -d 5
244 244 nothing to rebase
245 245 [1]
246 246
247 247 F onto G - rebase onto a descendant:
248 248
249 249 $ hg rebase -b 5 -d 6
250 250 nothing to rebase - "base" 24b6387c8c8c is already an ancestor of destination eea13746799a
251 251 [1]
252 252
253 253 C onto A - rebase onto an ancestor:
254 254
255 255 $ hg rebase -d 0 -s 2
256 256 saved backup bundle to $TESTTMP/a7/.hg/strip-backup/5fddd98957c8-backup.hg (glob)
257 257 $ hg tglog
258 258 o 7: 'D'
259 259 |
260 260 o 6: 'C'
261 261 |
262 262 | @ 5: 'H'
263 263 | |
264 264 | | o 4: 'G'
265 265 | |/|
266 266 | o | 3: 'F'
267 267 |/ /
268 268 | o 2: 'E'
269 269 |/
270 270 | o 1: 'B'
271 271 |/
272 272 o 0: 'A'
273 273
274 274
275 275 Check rebasing public changeset
276 276
277 277 $ hg pull --config phases.publish=True -q -r 6 . # update phase of 6
278 278 $ hg rebase -d 0 -b 6
279 279 nothing to rebase
280 280 [1]
281 281 $ hg rebase -d 5 -b 6
282 282 abort: can't rebase immutable changeset e1c4361dd923
283 283 (see hg help phases for details)
284 284 [255]
285 285
286 286 $ hg rebase -d 5 -b 6 --keep
287 287
288 288 Check rebasing mutable changeset
289 289 Source phase greater or equal to destination phase: new changeset get the phase of source:
290 290 $ hg rebase -s9 -d0
291 291 saved backup bundle to $TESTTMP/a7/.hg/strip-backup/2b23e52411f4-backup.hg (glob)
292 292 $ hg log --template "{phase}\n" -r 9
293 293 draft
294 294 $ hg rebase -s9 -d1
295 295 saved backup bundle to $TESTTMP/a7/.hg/strip-backup/2cb10d0cfc6c-backup.hg (glob)
296 296 $ hg log --template "{phase}\n" -r 9
297 297 draft
298 298 $ hg phase --force --secret 9
299 299 $ hg rebase -s9 -d0
300 300 saved backup bundle to $TESTTMP/a7/.hg/strip-backup/c5b12b67163a-backup.hg (glob)
301 301 $ hg log --template "{phase}\n" -r 9
302 302 secret
303 303 $ hg rebase -s9 -d1
304 304 saved backup bundle to $TESTTMP/a7/.hg/strip-backup/2a0524f868ac-backup.hg (glob)
305 305 $ hg log --template "{phase}\n" -r 9
306 306 secret
307 307 Source phase lower than destination phase: new changeset get the phase of destination:
308 308 $ hg rebase -s8 -d9
309 309 saved backup bundle to $TESTTMP/a7/.hg/strip-backup/6d4f22462821-backup.hg (glob)
310 310 $ hg log --template "{phase}\n" -r 'rev(9)'
311 311 secret
312 312
313 313 $ cd ..
314 314
315 315 Test for revset
316 316
317 317 We need a bit different graph
318 318 All destination are B
319 319
320 320 $ hg init ah
321 321 $ cd ah
322 322 $ hg unbundle "$TESTDIR/bundles/rebase-revset.hg"
323 323 adding changesets
324 324 adding manifests
325 325 adding file changes
326 326 added 9 changesets with 9 changes to 9 files (+2 heads)
327 327 (run 'hg heads' to see heads, 'hg merge' to merge)
328 328 $ hg tglog
329 329 o 8: 'I'
330 330 |
331 331 o 7: 'H'
332 332 |
333 333 o 6: 'G'
334 334 |
335 335 | o 5: 'F'
336 336 | |
337 337 | o 4: 'E'
338 338 |/
339 339 o 3: 'D'
340 340 |
341 341 o 2: 'C'
342 342 |
343 343 | o 1: 'B'
344 344 |/
345 345 o 0: 'A'
346 346
347 347 $ cd ..
348 348
349 349
350 350 Simple case with keep:
351 351
352 352 Source on have two descendant heads but ask for one
353 353
354 354 $ hg clone -q -u . ah ah1
355 355 $ cd ah1
356 356 $ hg rebase -r '2::8' -d 1
357 357 abort: can't remove original changesets with unrebased descendants
358 358 (use --keep to keep original changesets)
359 359 [255]
360 360 $ hg rebase -r '2::8' -d 1 --keep
361 361 $ hg tglog
362 362 o 13: 'I'
363 363 |
364 364 o 12: 'H'
365 365 |
366 366 o 11: 'G'
367 367 |
368 368 o 10: 'D'
369 369 |
370 370 o 9: 'C'
371 371 |
372 372 | o 8: 'I'
373 373 | |
374 374 | o 7: 'H'
375 375 | |
376 376 | o 6: 'G'
377 377 | |
378 378 | | o 5: 'F'
379 379 | | |
380 380 | | o 4: 'E'
381 381 | |/
382 382 | o 3: 'D'
383 383 | |
384 384 | o 2: 'C'
385 385 | |
386 386 o | 1: 'B'
387 387 |/
388 388 o 0: 'A'
389 389
390 390
391 391 $ cd ..
392 392
393 393 Base on have one descendant heads we ask for but common ancestor have two
394 394
395 395 $ hg clone -q -u . ah ah2
396 396 $ cd ah2
397 397 $ hg rebase -r '3::8' -d 1
398 398 abort: can't remove original changesets with unrebased descendants
399 399 (use --keep to keep original changesets)
400 400 [255]
401 401 $ hg rebase -r '3::8' -d 1 --keep
402 402 $ hg tglog
403 403 o 12: 'I'
404 404 |
405 405 o 11: 'H'
406 406 |
407 407 o 10: 'G'
408 408 |
409 409 o 9: 'D'
410 410 |
411 411 | o 8: 'I'
412 412 | |
413 413 | o 7: 'H'
414 414 | |
415 415 | o 6: 'G'
416 416 | |
417 417 | | o 5: 'F'
418 418 | | |
419 419 | | o 4: 'E'
420 420 | |/
421 421 | o 3: 'D'
422 422 | |
423 423 | o 2: 'C'
424 424 | |
425 425 o | 1: 'B'
426 426 |/
427 427 o 0: 'A'
428 428
429 429
430 430 $ cd ..
431 431
432 432 rebase subset
433 433
434 434 $ hg clone -q -u . ah ah3
435 435 $ cd ah3
436 436 $ hg rebase -r '3::7' -d 1
437 437 abort: can't remove original changesets with unrebased descendants
438 438 (use --keep to keep original changesets)
439 439 [255]
440 440 $ hg rebase -r '3::7' -d 1 --keep
441 441 $ hg tglog
442 442 o 11: 'H'
443 443 |
444 444 o 10: 'G'
445 445 |
446 446 o 9: 'D'
447 447 |
448 448 | o 8: 'I'
449 449 | |
450 450 | o 7: 'H'
451 451 | |
452 452 | o 6: 'G'
453 453 | |
454 454 | | o 5: 'F'
455 455 | | |
456 456 | | o 4: 'E'
457 457 | |/
458 458 | o 3: 'D'
459 459 | |
460 460 | o 2: 'C'
461 461 | |
462 462 o | 1: 'B'
463 463 |/
464 464 o 0: 'A'
465 465
466 466
467 467 $ cd ..
468 468
469 469 rebase subset with multiple head
470 470
471 471 $ hg clone -q -u . ah ah4
472 472 $ cd ah4
473 473 $ hg rebase -r '3::(7+5)' -d 1
474 474 abort: can't remove original changesets with unrebased descendants
475 475 (use --keep to keep original changesets)
476 476 [255]
477 477 $ hg rebase -r '3::(7+5)' -d 1 --keep
478 478 $ hg tglog
479 479 o 13: 'H'
480 480 |
481 481 o 12: 'G'
482 482 |
483 483 | o 11: 'F'
484 484 | |
485 485 | o 10: 'E'
486 486 |/
487 487 o 9: 'D'
488 488 |
489 489 | o 8: 'I'
490 490 | |
491 491 | o 7: 'H'
492 492 | |
493 493 | o 6: 'G'
494 494 | |
495 495 | | o 5: 'F'
496 496 | | |
497 497 | | o 4: 'E'
498 498 | |/
499 499 | o 3: 'D'
500 500 | |
501 501 | o 2: 'C'
502 502 | |
503 503 o | 1: 'B'
504 504 |/
505 505 o 0: 'A'
506 506
507 507
508 508 $ cd ..
509 509
510 510 More advanced tests
511 511
512 512 rebase on ancestor with revset
513 513
514 514 $ hg clone -q -u . ah ah5
515 515 $ cd ah5
516 516 $ hg rebase -r '6::' -d 2
517 517 saved backup bundle to $TESTTMP/ah5/.hg/strip-backup/3d8a618087a7-backup.hg (glob)
518 518 $ hg tglog
519 519 o 8: 'I'
520 520 |
521 521 o 7: 'H'
522 522 |
523 523 o 6: 'G'
524 524 |
525 525 | o 5: 'F'
526 526 | |
527 527 | o 4: 'E'
528 528 | |
529 529 | o 3: 'D'
530 530 |/
531 531 o 2: 'C'
532 532 |
533 533 | o 1: 'B'
534 534 |/
535 535 o 0: 'A'
536 536
537 537 $ cd ..
538 538
539 539
540 540 rebase with multiple root.
541 541 We rebase E and G on B
542 542 We would expect heads are I, F if it was supported
543 543
544 544 $ hg clone -q -u . ah ah6
545 545 $ cd ah6
546 546 $ hg rebase -r '(4+6)::' -d 1
547 547 saved backup bundle to $TESTTMP/ah6/.hg/strip-backup/3d8a618087a7-backup.hg (glob)
548 548 $ hg tglog
549 549 o 8: 'I'
550 550 |
551 551 o 7: 'H'
552 552 |
553 553 o 6: 'G'
554 554 |
555 555 | o 5: 'F'
556 556 | |
557 557 | o 4: 'E'
558 558 |/
559 559 | o 3: 'D'
560 560 | |
561 561 | o 2: 'C'
562 562 | |
563 563 o | 1: 'B'
564 564 |/
565 565 o 0: 'A'
566 566
567 567 $ cd ..
568 568
569 569 More complex rebase with multiple roots
570 570 each root have a different common ancestor with the destination and this is a detach
571 571
572 572 (setup)
573 573
574 574 $ hg clone -q -u . a a8
575 575 $ cd a8
576 576 $ echo I > I
577 577 $ hg add I
578 578 $ hg commit -m I
579 579 $ hg up 4
580 580 1 files updated, 0 files merged, 3 files removed, 0 files unresolved
581 581 $ echo I > J
582 582 $ hg add J
583 583 $ hg commit -m J
584 584 created new head
585 585 $ echo I > K
586 586 $ hg add K
587 587 $ hg commit -m K
588 588 $ hg tglog
589 589 @ 10: 'K'
590 590 |
591 591 o 9: 'J'
592 592 |
593 593 | o 8: 'I'
594 594 | |
595 595 | o 7: 'H'
596 596 | |
597 597 +---o 6: 'G'
598 598 | |/
599 599 | o 5: 'F'
600 600 | |
601 601 o | 4: 'E'
602 602 |/
603 603 | o 3: 'D'
604 604 | |
605 605 | o 2: 'C'
606 606 | |
607 607 | o 1: 'B'
608 608 |/
609 609 o 0: 'A'
610 610
611 611 (actual test)
612 612
613 613 $ hg rebase --dest 'desc(G)' --rev 'desc(K) + desc(I)'
614 614 saved backup bundle to $TESTTMP/a8/.hg/strip-backup/23a4ace37988-backup.hg (glob)
615 615 $ hg log --rev 'children(desc(G))'
616 616 changeset: 9:adb617877056
617 617 parent: 6:eea13746799a
618 618 user: test
619 619 date: Thu Jan 01 00:00:00 1970 +0000
620 620 summary: I
621 621
622 622 changeset: 10:882431a34a0e
623 623 tag: tip
624 624 parent: 6:eea13746799a
625 625 user: test
626 626 date: Thu Jan 01 00:00:00 1970 +0000
627 627 summary: K
628 628
629 629 $ hg tglog
630 630 @ 10: 'K'
631 631 |
632 632 | o 9: 'I'
633 633 |/
634 634 | o 8: 'J'
635 635 | |
636 636 | | o 7: 'H'
637 637 | | |
638 638 o---+ 6: 'G'
639 639 |/ /
640 640 | o 5: 'F'
641 641 | |
642 642 o | 4: 'E'
643 643 |/
644 644 | o 3: 'D'
645 645 | |
646 646 | o 2: 'C'
647 647 | |
648 648 | o 1: 'B'
649 649 |/
650 650 o 0: 'A'
651 651
652
653 Test that rebase is not confused by $CWD disapearing during rebase (issue 4121)
654
655 $ cd ..
656 $ hg init cwd-vanish
657 $ cd cwd-vanish
658 $ touch initial-file
659 $ hg add initial-file
660 $ hg commit -m 'initial commit'
661 $ touch dest-file
662 $ hg add dest-file
663 $ hg commit -m 'dest commit'
664 $ hg up 0
665 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
666 $ touch other-file
667 $ hg add other-file
668 $ hg commit -m 'first source commit'
669 created new head
670 $ mkdir subdir
671 $ cd subdir
672 $ touch subfile
673 $ hg add subfile
674 $ hg commit -m 'second source with subdir'
675 $ hg rebase -b . -d 1 --traceback
676 saved backup bundle to $TESTTMP/cwd-vanish/.hg/strip-backup/779a07b1b7a0-backup.hg (glob)
General Comments 0
You need to be logged in to leave comments. Login now