##// END OF EJS Templates
Merge with crew-stable
Brendan Cully -
r4182:ba51a822 merge default
parent child Browse files
Show More
@@ -1,2222 +1,2227 b''
1 1 # queue.py - patch queues for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 '''patch management and development
9 9
10 10 This extension lets you work with a stack of patches in a Mercurial
11 11 repository. It manages two stacks of patches - all known patches, and
12 12 applied patches (subset of known patches).
13 13
14 14 Known patches are represented as patch files in the .hg/patches
15 15 directory. Applied patches are both patch files and changesets.
16 16
17 17 Common tasks (use "hg help command" for more details):
18 18
19 19 prepare repository to work with patches qinit
20 20 create new patch qnew
21 21 import existing patch qimport
22 22
23 23 print patch series qseries
24 24 print applied patches qapplied
25 25 print name of top applied patch qtop
26 26
27 27 add known patch to applied stack qpush
28 28 remove patch from applied stack qpop
29 29 refresh contents of top applied patch qrefresh
30 30 '''
31 31
32 32 from mercurial.i18n import _
33 33 from mercurial import commands, cmdutil, hg, patch, revlog, util, changegroup
34 34 import os, sys, re, errno
35 35
36 36 commands.norepo += " qclone qversion"
37 37
38 38 # Patch names looks like unix-file names.
39 39 # They must be joinable with queue directory and result in the patch path.
40 40 normname = util.normpath
41 41
42 42 class statusentry:
43 43 def __init__(self, rev, name=None):
44 44 if not name:
45 45 fields = rev.split(':', 1)
46 46 if len(fields) == 2:
47 47 self.rev, self.name = fields
48 48 else:
49 49 self.rev, self.name = None, None
50 50 else:
51 51 self.rev, self.name = rev, name
52 52
53 53 def __str__(self):
54 54 return self.rev + ':' + self.name
55 55
56 56 class queue:
57 57 def __init__(self, ui, path, patchdir=None):
58 58 self.basepath = path
59 59 self.path = patchdir or os.path.join(path, "patches")
60 60 self.opener = util.opener(self.path)
61 61 self.ui = ui
62 62 self.applied = []
63 63 self.full_series = []
64 64 self.applied_dirty = 0
65 65 self.series_dirty = 0
66 66 self.series_path = "series"
67 67 self.status_path = "status"
68 68 self.guards_path = "guards"
69 69 self.active_guards = None
70 70 self.guards_dirty = False
71 71 self._diffopts = None
72 72
73 73 if os.path.exists(self.join(self.series_path)):
74 74 self.full_series = self.opener(self.series_path).read().splitlines()
75 75 self.parse_series()
76 76
77 77 if os.path.exists(self.join(self.status_path)):
78 78 lines = self.opener(self.status_path).read().splitlines()
79 79 self.applied = [statusentry(l) for l in lines]
80 80
81 81 def diffopts(self):
82 82 if self._diffopts is None:
83 83 self._diffopts = patch.diffopts(self.ui)
84 84 return self._diffopts
85 85
86 86 def join(self, *p):
87 87 return os.path.join(self.path, *p)
88 88
89 89 def find_series(self, patch):
90 90 pre = re.compile("(\s*)([^#]+)")
91 91 index = 0
92 92 for l in self.full_series:
93 93 m = pre.match(l)
94 94 if m:
95 95 s = m.group(2)
96 96 s = s.rstrip()
97 97 if s == patch:
98 98 return index
99 99 index += 1
100 100 return None
101 101
102 102 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
103 103
104 104 def parse_series(self):
105 105 self.series = []
106 106 self.series_guards = []
107 107 for l in self.full_series:
108 108 h = l.find('#')
109 109 if h == -1:
110 110 patch = l
111 111 comment = ''
112 112 elif h == 0:
113 113 continue
114 114 else:
115 115 patch = l[:h]
116 116 comment = l[h:]
117 117 patch = patch.strip()
118 118 if patch:
119 119 if patch in self.series:
120 120 raise util.Abort(_('%s appears more than once in %s') %
121 121 (patch, self.join(self.series_path)))
122 122 self.series.append(patch)
123 123 self.series_guards.append(self.guard_re.findall(comment))
124 124
125 125 def check_guard(self, guard):
126 126 bad_chars = '# \t\r\n\f'
127 127 first = guard[0]
128 128 for c in '-+':
129 129 if first == c:
130 130 return (_('guard %r starts with invalid character: %r') %
131 131 (guard, c))
132 132 for c in bad_chars:
133 133 if c in guard:
134 134 return _('invalid character in guard %r: %r') % (guard, c)
135 135
136 136 def set_active(self, guards):
137 137 for guard in guards:
138 138 bad = self.check_guard(guard)
139 139 if bad:
140 140 raise util.Abort(bad)
141 141 guards = dict.fromkeys(guards).keys()
142 142 guards.sort()
143 143 self.ui.debug('active guards: %s\n' % ' '.join(guards))
144 144 self.active_guards = guards
145 145 self.guards_dirty = True
146 146
147 147 def active(self):
148 148 if self.active_guards is None:
149 149 self.active_guards = []
150 150 try:
151 151 guards = self.opener(self.guards_path).read().split()
152 152 except IOError, err:
153 153 if err.errno != errno.ENOENT: raise
154 154 guards = []
155 155 for i, guard in enumerate(guards):
156 156 bad = self.check_guard(guard)
157 157 if bad:
158 158 self.ui.warn('%s:%d: %s\n' %
159 159 (self.join(self.guards_path), i + 1, bad))
160 160 else:
161 161 self.active_guards.append(guard)
162 162 return self.active_guards
163 163
164 164 def set_guards(self, idx, guards):
165 165 for g in guards:
166 166 if len(g) < 2:
167 167 raise util.Abort(_('guard %r too short') % g)
168 168 if g[0] not in '-+':
169 169 raise util.Abort(_('guard %r starts with invalid char') % g)
170 170 bad = self.check_guard(g[1:])
171 171 if bad:
172 172 raise util.Abort(bad)
173 173 drop = self.guard_re.sub('', self.full_series[idx])
174 174 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
175 175 self.parse_series()
176 176 self.series_dirty = True
177 177
178 178 def pushable(self, idx):
179 179 if isinstance(idx, str):
180 180 idx = self.series.index(idx)
181 181 patchguards = self.series_guards[idx]
182 182 if not patchguards:
183 183 return True, None
184 184 default = False
185 185 guards = self.active()
186 186 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
187 187 if exactneg:
188 188 return False, exactneg[0]
189 189 pos = [g for g in patchguards if g[0] == '+']
190 190 exactpos = [g for g in pos if g[1:] in guards]
191 191 if pos:
192 192 if exactpos:
193 193 return True, exactpos[0]
194 194 return False, pos
195 195 return True, ''
196 196
197 197 def explain_pushable(self, idx, all_patches=False):
198 198 write = all_patches and self.ui.write or self.ui.warn
199 199 if all_patches or self.ui.verbose:
200 200 if isinstance(idx, str):
201 201 idx = self.series.index(idx)
202 202 pushable, why = self.pushable(idx)
203 203 if all_patches and pushable:
204 204 if why is None:
205 205 write(_('allowing %s - no guards in effect\n') %
206 206 self.series[idx])
207 207 else:
208 208 if not why:
209 209 write(_('allowing %s - no matching negative guards\n') %
210 210 self.series[idx])
211 211 else:
212 212 write(_('allowing %s - guarded by %r\n') %
213 213 (self.series[idx], why))
214 214 if not pushable:
215 215 if why:
216 216 write(_('skipping %s - guarded by %r\n') %
217 217 (self.series[idx], why))
218 218 else:
219 219 write(_('skipping %s - no matching guards\n') %
220 220 self.series[idx])
221 221
222 222 def save_dirty(self):
223 223 def write_list(items, path):
224 224 fp = self.opener(path, 'w')
225 225 for i in items:
226 226 print >> fp, i
227 227 fp.close()
228 228 if self.applied_dirty: write_list(map(str, self.applied), self.status_path)
229 229 if self.series_dirty: write_list(self.full_series, self.series_path)
230 230 if self.guards_dirty: write_list(self.active_guards, self.guards_path)
231 231
232 232 def readheaders(self, patch):
233 233 def eatdiff(lines):
234 234 while lines:
235 235 l = lines[-1]
236 236 if (l.startswith("diff -") or
237 237 l.startswith("Index:") or
238 238 l.startswith("===========")):
239 239 del lines[-1]
240 240 else:
241 241 break
242 242 def eatempty(lines):
243 243 while lines:
244 244 l = lines[-1]
245 245 if re.match('\s*$', l):
246 246 del lines[-1]
247 247 else:
248 248 break
249 249
250 250 pf = self.join(patch)
251 251 message = []
252 252 comments = []
253 253 user = None
254 254 date = None
255 255 format = None
256 256 subject = None
257 257 diffstart = 0
258 258
259 259 for line in file(pf):
260 260 line = line.rstrip()
261 261 if line.startswith('diff --git'):
262 262 diffstart = 2
263 263 break
264 264 if diffstart:
265 265 if line.startswith('+++ '):
266 266 diffstart = 2
267 267 break
268 268 if line.startswith("--- "):
269 269 diffstart = 1
270 270 continue
271 271 elif format == "hgpatch":
272 272 # parse values when importing the result of an hg export
273 273 if line.startswith("# User "):
274 274 user = line[7:]
275 275 elif line.startswith("# Date "):
276 276 date = line[7:]
277 277 elif not line.startswith("# ") and line:
278 278 message.append(line)
279 279 format = None
280 280 elif line == '# HG changeset patch':
281 281 format = "hgpatch"
282 282 elif (format != "tagdone" and (line.startswith("Subject: ") or
283 283 line.startswith("subject: "))):
284 284 subject = line[9:]
285 285 format = "tag"
286 286 elif (format != "tagdone" and (line.startswith("From: ") or
287 287 line.startswith("from: "))):
288 288 user = line[6:]
289 289 format = "tag"
290 290 elif format == "tag" and line == "":
291 291 # when looking for tags (subject: from: etc) they
292 292 # end once you find a blank line in the source
293 293 format = "tagdone"
294 294 elif message or line:
295 295 message.append(line)
296 296 comments.append(line)
297 297
298 298 eatdiff(message)
299 299 eatdiff(comments)
300 300 eatempty(message)
301 301 eatempty(comments)
302 302
303 303 # make sure message isn't empty
304 304 if format and format.startswith("tag") and subject:
305 305 message.insert(0, "")
306 306 message.insert(0, subject)
307 307 return (message, comments, user, date, diffstart > 1)
308 308
309 309 def printdiff(self, repo, node1, node2=None, files=None,
310 310 fp=None, changes=None, opts={}):
311 311 fns, matchfn, anypats = cmdutil.matchpats(repo, files, opts)
312 312
313 313 patch.diff(repo, node1, node2, fns, match=matchfn,
314 314 fp=fp, changes=changes, opts=self.diffopts())
315 315
316 316 def mergeone(self, repo, mergeq, head, patch, rev, wlock):
317 317 # first try just applying the patch
318 318 (err, n) = self.apply(repo, [ patch ], update_status=False,
319 319 strict=True, merge=rev, wlock=wlock)
320 320
321 321 if err == 0:
322 322 return (err, n)
323 323
324 324 if n is None:
325 325 raise util.Abort(_("apply failed for patch %s") % patch)
326 326
327 327 self.ui.warn("patch didn't work out, merging %s\n" % patch)
328 328
329 329 # apply failed, strip away that rev and merge.
330 330 hg.clean(repo, head, wlock=wlock)
331 331 self.strip(repo, n, update=False, backup='strip', wlock=wlock)
332 332
333 333 ctx = repo.changectx(rev)
334 334 ret = hg.merge(repo, rev, wlock=wlock)
335 335 if ret:
336 336 raise util.Abort(_("update returned %d") % ret)
337 337 n = repo.commit(None, ctx.description(), ctx.user(),
338 338 force=1, wlock=wlock)
339 339 if n == None:
340 340 raise util.Abort(_("repo commit failed"))
341 341 try:
342 342 message, comments, user, date, patchfound = mergeq.readheaders(patch)
343 343 except:
344 344 raise util.Abort(_("unable to read %s") % patch)
345 345
346 346 patchf = self.opener(patch, "w")
347 347 if comments:
348 348 comments = "\n".join(comments) + '\n\n'
349 349 patchf.write(comments)
350 350 self.printdiff(repo, head, n, fp=patchf)
351 351 patchf.close()
352 352 return (0, n)
353 353
354 354 def qparents(self, repo, rev=None):
355 355 if rev is None:
356 356 (p1, p2) = repo.dirstate.parents()
357 357 if p2 == revlog.nullid:
358 358 return p1
359 359 if len(self.applied) == 0:
360 360 return None
361 361 return revlog.bin(self.applied[-1].rev)
362 362 pp = repo.changelog.parents(rev)
363 363 if pp[1] != revlog.nullid:
364 364 arevs = [ x.rev for x in self.applied ]
365 365 p0 = revlog.hex(pp[0])
366 366 p1 = revlog.hex(pp[1])
367 367 if p0 in arevs:
368 368 return pp[0]
369 369 if p1 in arevs:
370 370 return pp[1]
371 371 return pp[0]
372 372
373 373 def mergepatch(self, repo, mergeq, series, wlock):
374 374 if len(self.applied) == 0:
375 375 # each of the patches merged in will have two parents. This
376 376 # can confuse the qrefresh, qdiff, and strip code because it
377 377 # needs to know which parent is actually in the patch queue.
378 378 # so, we insert a merge marker with only one parent. This way
379 379 # the first patch in the queue is never a merge patch
380 380 #
381 381 pname = ".hg.patches.merge.marker"
382 382 n = repo.commit(None, '[mq]: merge marker', user=None, force=1,
383 383 wlock=wlock)
384 384 self.applied.append(statusentry(revlog.hex(n), pname))
385 385 self.applied_dirty = 1
386 386
387 387 head = self.qparents(repo)
388 388
389 389 for patch in series:
390 390 patch = mergeq.lookup(patch, strict=True)
391 391 if not patch:
392 392 self.ui.warn("patch %s does not exist\n" % patch)
393 393 return (1, None)
394 394 pushable, reason = self.pushable(patch)
395 395 if not pushable:
396 396 self.explain_pushable(patch, all_patches=True)
397 397 continue
398 398 info = mergeq.isapplied(patch)
399 399 if not info:
400 400 self.ui.warn("patch %s is not applied\n" % patch)
401 401 return (1, None)
402 402 rev = revlog.bin(info[1])
403 403 (err, head) = self.mergeone(repo, mergeq, head, patch, rev, wlock)
404 404 if head:
405 405 self.applied.append(statusentry(revlog.hex(head), patch))
406 406 self.applied_dirty = 1
407 407 if err:
408 408 return (err, head)
409 409 return (0, head)
410 410
411 411 def patch(self, repo, patchfile):
412 412 '''Apply patchfile to the working directory.
413 413 patchfile: file name of patch'''
414 414 files = {}
415 415 try:
416 416 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
417 417 files=files)
418 418 except Exception, inst:
419 419 self.ui.note(str(inst) + '\n')
420 420 if not self.ui.verbose:
421 421 self.ui.warn("patch failed, unable to continue (try -v)\n")
422 422 return (False, files, False)
423 423
424 424 return (True, files, fuzz)
425 425
426 426 def apply(self, repo, series, list=False, update_status=True,
427 427 strict=False, patchdir=None, merge=None, wlock=None):
428 428 # TODO unify with commands.py
429 429 if not patchdir:
430 430 patchdir = self.path
431 431 err = 0
432 432 if not wlock:
433 433 wlock = repo.wlock()
434 434 lock = repo.lock()
435 435 tr = repo.transaction()
436 436 n = None
437 437 for patchname in series:
438 438 pushable, reason = self.pushable(patchname)
439 439 if not pushable:
440 440 self.explain_pushable(patchname, all_patches=True)
441 441 continue
442 442 self.ui.warn("applying %s\n" % patchname)
443 443 pf = os.path.join(patchdir, patchname)
444 444
445 445 try:
446 446 message, comments, user, date, patchfound = self.readheaders(patchname)
447 447 except:
448 448 self.ui.warn("Unable to read %s\n" % patchname)
449 449 err = 1
450 450 break
451 451
452 452 if not message:
453 453 message = "imported patch %s\n" % patchname
454 454 else:
455 455 if list:
456 456 message.append("\nimported patch %s" % patchname)
457 457 message = '\n'.join(message)
458 458
459 459 (patcherr, files, fuzz) = self.patch(repo, pf)
460 460 patcherr = not patcherr
461 461
462 462 if merge and files:
463 463 # Mark as merged and update dirstate parent info
464 464 repo.dirstate.update(repo.dirstate.filterfiles(files.keys()), 'm')
465 465 p1, p2 = repo.dirstate.parents()
466 466 repo.dirstate.setparents(p1, merge)
467 467 files = patch.updatedir(self.ui, repo, files, wlock=wlock)
468 468 n = repo.commit(files, message, user, date, force=1, lock=lock,
469 469 wlock=wlock)
470 470
471 471 if n == None:
472 472 raise util.Abort(_("repo commit failed"))
473 473
474 474 if update_status:
475 475 self.applied.append(statusentry(revlog.hex(n), patchname))
476 476
477 477 if patcherr:
478 478 if not patchfound:
479 479 self.ui.warn("patch %s is empty\n" % patchname)
480 480 err = 0
481 481 else:
482 482 self.ui.warn("patch failed, rejects left in working dir\n")
483 483 err = 1
484 484 break
485 485
486 486 if fuzz and strict:
487 487 self.ui.warn("fuzz found when applying patch, stopping\n")
488 488 err = 1
489 489 break
490 490 tr.close()
491 491 return (err, n)
492 492
493 493 def delete(self, repo, patches, opts):
494 494 realpatches = []
495 495 for patch in patches:
496 496 patch = self.lookup(patch, strict=True)
497 497 info = self.isapplied(patch)
498 498 if info:
499 499 raise util.Abort(_("cannot delete applied patch %s") % patch)
500 500 if patch not in self.series:
501 501 raise util.Abort(_("patch %s not in series file") % patch)
502 502 realpatches.append(patch)
503 503
504 504 appliedbase = 0
505 505 if opts.get('rev'):
506 506 if not self.applied:
507 507 raise util.Abort(_('no patches applied'))
508 508 revs = cmdutil.revrange(repo, opts['rev'])
509 509 if len(revs) > 1 and revs[0] > revs[1]:
510 510 revs.reverse()
511 511 for rev in revs:
512 512 if appliedbase >= len(self.applied):
513 513 raise util.Abort(_("revision %d is not managed") % rev)
514 514
515 515 base = revlog.bin(self.applied[appliedbase].rev)
516 516 node = repo.changelog.node(rev)
517 517 if node != base:
518 518 raise util.Abort(_("cannot delete revision %d above "
519 519 "applied patches") % rev)
520 520 realpatches.append(self.applied[appliedbase].name)
521 521 appliedbase += 1
522 522
523 523 if not opts.get('keep'):
524 524 r = self.qrepo()
525 525 if r:
526 526 r.remove(realpatches, True)
527 527 else:
528 528 for p in realpatches:
529 529 os.unlink(self.join(p))
530 530
531 531 if appliedbase:
532 532 del self.applied[:appliedbase]
533 533 self.applied_dirty = 1
534 534 indices = [self.find_series(p) for p in realpatches]
535 535 indices.sort()
536 536 for i in indices[-1::-1]:
537 537 del self.full_series[i]
538 538 self.parse_series()
539 539 self.series_dirty = 1
540 540
541 541 def check_toppatch(self, repo):
542 542 if len(self.applied) > 0:
543 543 top = revlog.bin(self.applied[-1].rev)
544 544 pp = repo.dirstate.parents()
545 545 if top not in pp:
546 546 raise util.Abort(_("queue top not at same revision as working directory"))
547 547 return top
548 548 return None
549 549 def check_localchanges(self, repo, force=False, refresh=True):
550 550 m, a, r, d = repo.status()[:4]
551 551 if m or a or r or d:
552 552 if not force:
553 553 if refresh:
554 554 raise util.Abort(_("local changes found, refresh first"))
555 555 else:
556 556 raise util.Abort(_("local changes found"))
557 557 return m, a, r, d
558 558 def new(self, repo, patch, msg=None, force=None):
559 559 if os.path.exists(self.join(patch)):
560 560 raise util.Abort(_('patch "%s" already exists') % patch)
561 561 m, a, r, d = self.check_localchanges(repo, force)
562 562 commitfiles = m + a + r
563 563 self.check_toppatch(repo)
564 564 wlock = repo.wlock()
565 565 insert = self.full_series_end()
566 566 if msg:
567 567 n = repo.commit(commitfiles, "[mq]: %s" % msg, force=True,
568 568 wlock=wlock)
569 569 else:
570 570 n = repo.commit(commitfiles,
571 571 "New patch: %s" % patch, force=True, wlock=wlock)
572 572 if n == None:
573 573 raise util.Abort(_("repo commit failed"))
574 574 self.full_series[insert:insert] = [patch]
575 575 self.applied.append(statusentry(revlog.hex(n), patch))
576 576 self.parse_series()
577 577 self.series_dirty = 1
578 578 self.applied_dirty = 1
579 579 p = self.opener(patch, "w")
580 580 if msg:
581 581 msg = msg + "\n"
582 582 p.write(msg)
583 583 p.close()
584 584 wlock = None
585 585 r = self.qrepo()
586 586 if r: r.add([patch])
587 587 if commitfiles:
588 588 self.refresh(repo, short=True)
589 589
590 590 def strip(self, repo, rev, update=True, backup="all", wlock=None):
591 591 def limitheads(chlog, stop):
592 592 """return the list of all nodes that have no children"""
593 593 p = {}
594 594 h = []
595 595 stoprev = 0
596 596 if stop in chlog.nodemap:
597 597 stoprev = chlog.rev(stop)
598 598
599 599 for r in xrange(chlog.count() - 1, -1, -1):
600 600 n = chlog.node(r)
601 601 if n not in p:
602 602 h.append(n)
603 603 if n == stop:
604 604 break
605 605 if r < stoprev:
606 606 break
607 607 for pn in chlog.parents(n):
608 608 p[pn] = 1
609 609 return h
610 610
611 611 def bundle(cg):
612 612 backupdir = repo.join("strip-backup")
613 613 if not os.path.isdir(backupdir):
614 614 os.mkdir(backupdir)
615 615 name = os.path.join(backupdir, "%s" % revlog.short(rev))
616 616 name = savename(name)
617 617 self.ui.warn("saving bundle to %s\n" % name)
618 618 return changegroup.writebundle(cg, name, "HG10BZ")
619 619
620 620 def stripall(revnum):
621 621 mm = repo.changectx(rev).manifest()
622 622 seen = {}
623 623
624 624 for x in xrange(revnum, repo.changelog.count()):
625 625 for f in repo.changectx(x).files():
626 626 if f in seen:
627 627 continue
628 628 seen[f] = 1
629 629 if f in mm:
630 630 filerev = mm[f]
631 631 else:
632 632 filerev = 0
633 633 seen[f] = filerev
634 634 # we go in two steps here so the strip loop happens in a
635 635 # sensible order. When stripping many files, this helps keep
636 636 # our disk access patterns under control.
637 637 seen_list = seen.keys()
638 638 seen_list.sort()
639 639 for f in seen_list:
640 640 ff = repo.file(f)
641 641 filerev = seen[f]
642 642 if filerev != 0:
643 643 if filerev in ff.nodemap:
644 644 filerev = ff.rev(filerev)
645 645 else:
646 646 filerev = 0
647 647 ff.strip(filerev, revnum)
648 648
649 649 if not wlock:
650 650 wlock = repo.wlock()
651 651 lock = repo.lock()
652 652 chlog = repo.changelog
653 653 # TODO delete the undo files, and handle undo of merge sets
654 654 pp = chlog.parents(rev)
655 655 revnum = chlog.rev(rev)
656 656
657 657 if update:
658 658 self.check_localchanges(repo, refresh=False)
659 659 urev = self.qparents(repo, rev)
660 660 hg.clean(repo, urev, wlock=wlock)
661 661 repo.dirstate.write()
662 662
663 663 # save is a list of all the branches we are truncating away
664 664 # that we actually want to keep. changegroup will be used
665 665 # to preserve them and add them back after the truncate
666 666 saveheads = []
667 667 savebases = {}
668 668
669 669 heads = limitheads(chlog, rev)
670 670 seen = {}
671 671
672 672 # search through all the heads, finding those where the revision
673 673 # we want to strip away is an ancestor. Also look for merges
674 674 # that might be turned into new heads by the strip.
675 675 while heads:
676 676 h = heads.pop()
677 677 n = h
678 678 while True:
679 679 seen[n] = 1
680 680 pp = chlog.parents(n)
681 681 if pp[1] != revlog.nullid:
682 682 for p in pp:
683 683 if chlog.rev(p) > revnum and p not in seen:
684 684 heads.append(p)
685 685 if pp[0] == revlog.nullid:
686 686 break
687 687 if chlog.rev(pp[0]) < revnum:
688 688 break
689 689 n = pp[0]
690 690 if n == rev:
691 691 break
692 692 r = chlog.reachable(h, rev)
693 693 if rev not in r:
694 694 saveheads.append(h)
695 695 for x in r:
696 696 if chlog.rev(x) > revnum:
697 697 savebases[x] = 1
698 698
699 699 # create a changegroup for all the branches we need to keep
700 700 if backup == "all":
701 701 backupch = repo.changegroupsubset([rev], chlog.heads(), 'strip')
702 702 bundle(backupch)
703 703 if saveheads:
704 704 backupch = repo.changegroupsubset(savebases.keys(), saveheads, 'strip')
705 705 chgrpfile = bundle(backupch)
706 706
707 707 stripall(revnum)
708 708
709 709 change = chlog.read(rev)
710 710 chlog.strip(revnum, revnum)
711 711 repo.manifest.strip(repo.manifest.rev(change[0]), revnum)
712 712 if saveheads:
713 713 self.ui.status("adding branch\n")
714 714 commands.unbundle(self.ui, repo, "file:%s" % chgrpfile,
715 715 update=False)
716 716 if backup != "strip":
717 717 os.unlink(chgrpfile)
718 718
719 719 def isapplied(self, patch):
720 720 """returns (index, rev, patch)"""
721 721 for i in xrange(len(self.applied)):
722 722 a = self.applied[i]
723 723 if a.name == patch:
724 724 return (i, a.rev, a.name)
725 725 return None
726 726
727 727 # if the exact patch name does not exist, we try a few
728 728 # variations. If strict is passed, we try only #1
729 729 #
730 730 # 1) a number to indicate an offset in the series file
731 731 # 2) a unique substring of the patch name was given
732 732 # 3) patchname[-+]num to indicate an offset in the series file
733 733 def lookup(self, patch, strict=False):
734 734 patch = patch and str(patch)
735 735
736 736 def partial_name(s):
737 737 if s in self.series:
738 738 return s
739 739 matches = [x for x in self.series if s in x]
740 740 if len(matches) > 1:
741 741 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
742 742 for m in matches:
743 743 self.ui.warn(' %s\n' % m)
744 744 return None
745 745 if matches:
746 746 return matches[0]
747 747 if len(self.series) > 0 and len(self.applied) > 0:
748 748 if s == 'qtip':
749 749 return self.series[self.series_end(True)-1]
750 750 if s == 'qbase':
751 751 return self.series[0]
752 752 return None
753 753 if patch == None:
754 754 return None
755 755
756 756 # we don't want to return a partial match until we make
757 757 # sure the file name passed in does not exist (checked below)
758 758 res = partial_name(patch)
759 759 if res and res == patch:
760 760 return res
761 761
762 762 if not os.path.isfile(self.join(patch)):
763 763 try:
764 764 sno = int(patch)
765 765 except(ValueError, OverflowError):
766 766 pass
767 767 else:
768 768 if sno < len(self.series):
769 769 return self.series[sno]
770 770 if not strict:
771 771 # return any partial match made above
772 772 if res:
773 773 return res
774 774 minus = patch.rfind('-')
775 775 if minus >= 0:
776 776 res = partial_name(patch[:minus])
777 777 if res:
778 778 i = self.series.index(res)
779 779 try:
780 780 off = int(patch[minus+1:] or 1)
781 781 except(ValueError, OverflowError):
782 782 pass
783 783 else:
784 784 if i - off >= 0:
785 785 return self.series[i - off]
786 786 plus = patch.rfind('+')
787 787 if plus >= 0:
788 788 res = partial_name(patch[:plus])
789 789 if res:
790 790 i = self.series.index(res)
791 791 try:
792 792 off = int(patch[plus+1:] or 1)
793 793 except(ValueError, OverflowError):
794 794 pass
795 795 else:
796 796 if i + off < len(self.series):
797 797 return self.series[i + off]
798 798 raise util.Abort(_("patch %s not in series") % patch)
799 799
800 800 def push(self, repo, patch=None, force=False, list=False,
801 801 mergeq=None, wlock=None):
802 802 if not wlock:
803 803 wlock = repo.wlock()
804 804 patch = self.lookup(patch)
805 805 # Suppose our series file is: A B C and the current 'top' patch is B.
806 806 # qpush C should be performed (moving forward)
807 807 # qpush B is a NOP (no change)
808 808 # qpush A is an error (can't go backwards with qpush)
809 809 if patch:
810 810 info = self.isapplied(patch)
811 811 if info:
812 812 if info[0] < len(self.applied) - 1:
813 813 raise util.Abort(_("cannot push to a previous patch: %s") %
814 814 patch)
815 815 if info[0] < len(self.series) - 1:
816 816 self.ui.warn(_('qpush: %s is already at the top\n') % patch)
817 817 else:
818 818 self.ui.warn(_('all patches are currently applied\n'))
819 819 return
820 820
821 821 # Following the above example, starting at 'top' of B:
822 822 # qpush should be performed (pushes C), but a subsequent qpush without
823 823 # an argument is an error (nothing to apply). This allows a loop
824 824 # of "...while hg qpush..." to work as it detects an error when done
825 825 if self.series_end() == len(self.series):
826 826 self.ui.warn(_('patch series already fully applied\n'))
827 827 return 1
828 828 if not force:
829 829 self.check_localchanges(repo)
830 830
831 831 self.applied_dirty = 1;
832 832 start = self.series_end()
833 833 if start > 0:
834 834 self.check_toppatch(repo)
835 835 if not patch:
836 836 patch = self.series[start]
837 837 end = start + 1
838 838 else:
839 839 end = self.series.index(patch, start) + 1
840 840 s = self.series[start:end]
841 841 if mergeq:
842 842 ret = self.mergepatch(repo, mergeq, s, wlock)
843 843 else:
844 844 ret = self.apply(repo, s, list, wlock=wlock)
845 845 top = self.applied[-1].name
846 846 if ret[0]:
847 847 self.ui.write("Errors during apply, please fix and refresh %s\n" %
848 848 top)
849 849 else:
850 850 self.ui.write("Now at: %s\n" % top)
851 851 return ret[0]
852 852
853 853 def pop(self, repo, patch=None, force=False, update=True, all=False,
854 854 wlock=None):
855 855 def getfile(f, rev):
856 856 t = repo.file(f).read(rev)
857 857 repo.wfile(f, "w").write(t)
858 858
859 859 if not wlock:
860 860 wlock = repo.wlock()
861 861 if patch:
862 862 # index, rev, patch
863 863 info = self.isapplied(patch)
864 864 if not info:
865 865 patch = self.lookup(patch)
866 866 info = self.isapplied(patch)
867 867 if not info:
868 868 raise util.Abort(_("patch %s is not applied") % patch)
869 869
870 870 if len(self.applied) == 0:
871 871 # Allow qpop -a to work repeatedly,
872 872 # but not qpop without an argument
873 873 self.ui.warn(_("no patches applied\n"))
874 874 return not all
875 875
876 876 if not update:
877 877 parents = repo.dirstate.parents()
878 878 rr = [ revlog.bin(x.rev) for x in self.applied ]
879 879 for p in parents:
880 880 if p in rr:
881 881 self.ui.warn("qpop: forcing dirstate update\n")
882 882 update = True
883 883
884 884 if not force and update:
885 885 self.check_localchanges(repo)
886 886
887 887 self.applied_dirty = 1;
888 888 end = len(self.applied)
889 889 if not patch:
890 890 if all:
891 891 popi = 0
892 892 else:
893 893 popi = len(self.applied) - 1
894 894 else:
895 895 popi = info[0] + 1
896 896 if popi >= end:
897 897 self.ui.warn("qpop: %s is already at the top\n" % patch)
898 898 return
899 899 info = [ popi ] + [self.applied[popi].rev, self.applied[popi].name]
900 900
901 901 start = info[0]
902 902 rev = revlog.bin(info[1])
903 903
904 904 # we know there are no local changes, so we can make a simplified
905 905 # form of hg.update.
906 906 if update:
907 907 top = self.check_toppatch(repo)
908 908 qp = self.qparents(repo, rev)
909 909 changes = repo.changelog.read(qp)
910 910 mmap = repo.manifest.read(changes[0])
911 911 m, a, r, d, u = repo.status(qp, top)[:5]
912 912 if d:
913 913 raise util.Abort("deletions found between repo revs")
914 914 for f in m:
915 915 getfile(f, mmap[f])
916 916 for f in r:
917 917 getfile(f, mmap[f])
918 918 util.set_exec(repo.wjoin(f), mmap.execf(f))
919 919 repo.dirstate.update(m + r, 'n')
920 920 for f in a:
921 921 try:
922 922 os.unlink(repo.wjoin(f))
923 923 except OSError, e:
924 924 if e.errno != errno.ENOENT:
925 925 raise
926 926 try: os.removedirs(os.path.dirname(repo.wjoin(f)))
927 927 except: pass
928 928 if a:
929 929 repo.dirstate.forget(a)
930 930 repo.dirstate.setparents(qp, revlog.nullid)
931 931 self.strip(repo, rev, update=False, backup='strip', wlock=wlock)
932 932 del self.applied[start:end]
933 933 if len(self.applied):
934 934 self.ui.write("Now at: %s\n" % self.applied[-1].name)
935 935 else:
936 936 self.ui.write("Patch queue now empty\n")
937 937
938 938 def diff(self, repo, pats, opts):
939 939 top = self.check_toppatch(repo)
940 940 if not top:
941 941 self.ui.write("No patches applied\n")
942 942 return
943 943 qp = self.qparents(repo, top)
944 944 if opts.get('git'):
945 945 self.diffopts().git = True
946 946 self.printdiff(repo, qp, files=pats, opts=opts)
947 947
948 948 def refresh(self, repo, pats=None, **opts):
949 949 if len(self.applied) == 0:
950 950 self.ui.write("No patches applied\n")
951 951 return 1
952 952 wlock = repo.wlock()
953 953 self.check_toppatch(repo)
954 954 (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
955 955 top = revlog.bin(top)
956 956 cparents = repo.changelog.parents(top)
957 957 patchparent = self.qparents(repo, top)
958 958 message, comments, user, date, patchfound = self.readheaders(patchfn)
959 959
960 960 patchf = self.opener(patchfn, "w")
961 961 msg = opts.get('msg', '').rstrip()
962 962 if msg:
963 963 if comments:
964 964 # Remove existing message.
965 965 ci = 0
966 966 for mi in xrange(len(message)):
967 967 while message[mi] != comments[ci]:
968 968 ci += 1
969 969 del comments[ci]
970 970 comments.append(msg)
971 971 if comments:
972 972 comments = "\n".join(comments) + '\n\n'
973 973 patchf.write(comments)
974 974
975 975 if opts.get('git'):
976 976 self.diffopts().git = True
977 977 fns, matchfn, anypats = cmdutil.matchpats(repo, pats, opts)
978 978 tip = repo.changelog.tip()
979 979 if top == tip:
980 980 # if the top of our patch queue is also the tip, there is an
981 981 # optimization here. We update the dirstate in place and strip
982 982 # off the tip commit. Then just commit the current directory
983 983 # tree. We can also send repo.commit the list of files
984 984 # changed to speed up the diff
985 985 #
986 986 # in short mode, we only diff the files included in the
987 987 # patch already
988 988 #
989 989 # this should really read:
990 990 # mm, dd, aa, aa2, uu = repo.status(tip, patchparent)[:5]
991 991 # but we do it backwards to take advantage of manifest/chlog
992 992 # caching against the next repo.status call
993 993 #
994 994 mm, aa, dd, aa2, uu = repo.status(patchparent, tip)[:5]
995 995 changes = repo.changelog.read(tip)
996 996 man = repo.manifest.read(changes[0])
997 997 aaa = aa[:]
998 998 if opts.get('short'):
999 999 filelist = mm + aa + dd
1000 1000 else:
1001 1001 filelist = None
1002 1002 m, a, r, d, u = repo.status(files=filelist)[:5]
1003 1003
1004 1004 # we might end up with files that were added between tip and
1005 1005 # the dirstate parent, but then changed in the local dirstate.
1006 1006 # in this case, we want them to only show up in the added section
1007 1007 for x in m:
1008 1008 if x not in aa:
1009 1009 mm.append(x)
1010 1010 # we might end up with files added by the local dirstate that
1011 1011 # were deleted by the patch. In this case, they should only
1012 1012 # show up in the changed section.
1013 1013 for x in a:
1014 1014 if x in dd:
1015 1015 del dd[dd.index(x)]
1016 1016 mm.append(x)
1017 1017 else:
1018 1018 aa.append(x)
1019 1019 # make sure any files deleted in the local dirstate
1020 1020 # are not in the add or change column of the patch
1021 1021 forget = []
1022 1022 for x in d + r:
1023 1023 if x in aa:
1024 1024 del aa[aa.index(x)]
1025 1025 forget.append(x)
1026 1026 continue
1027 1027 elif x in mm:
1028 1028 del mm[mm.index(x)]
1029 1029 dd.append(x)
1030 1030
1031 1031 m = util.unique(mm)
1032 1032 r = util.unique(dd)
1033 1033 a = util.unique(aa)
1034 filelist = filter(matchfn, util.unique(m + r + a))
1034 c = [filter(matchfn, l) for l in (m, a, r, [], u)]
1035 filelist = util.unique(c[0] + c[1] + c[2])
1035 1036 patch.diff(repo, patchparent, files=filelist, match=matchfn,
1036 fp=patchf, changes=(m, a, r, [], u),
1037 opts=self.diffopts())
1037 fp=patchf, changes=c, opts=self.diffopts())
1038 1038 patchf.close()
1039 1039
1040 1040 repo.dirstate.setparents(*cparents)
1041 1041 copies = {}
1042 1042 for dst in a:
1043 1043 src = repo.dirstate.copied(dst)
1044 1044 if src is None:
1045 1045 continue
1046 1046 copies.setdefault(src, []).append(dst)
1047 1047 repo.dirstate.update(a, 'a')
1048 1048 # remember the copies between patchparent and tip
1049 1049 # this may be slow, so don't do it if we're not tracking copies
1050 1050 if self.diffopts().git:
1051 1051 for dst in aaa:
1052 1052 f = repo.file(dst)
1053 1053 src = f.renamed(man[dst])
1054 1054 if src:
1055 1055 copies[src[0]] = copies.get(dst, [])
1056 1056 if dst in a:
1057 1057 copies[src[0]].append(dst)
1058 1058 # we can't copy a file created by the patch itself
1059 1059 if dst in copies:
1060 1060 del copies[dst]
1061 1061 for src, dsts in copies.iteritems():
1062 1062 for dst in dsts:
1063 1063 repo.dirstate.copy(src, dst)
1064 1064 repo.dirstate.update(r, 'r')
1065 1065 # if the patch excludes a modified file, mark that file with mtime=0
1066 1066 # so status can see it.
1067 1067 mm = []
1068 1068 for i in xrange(len(m)-1, -1, -1):
1069 1069 if not matchfn(m[i]):
1070 1070 mm.append(m[i])
1071 1071 del m[i]
1072 1072 repo.dirstate.update(m, 'n')
1073 1073 repo.dirstate.update(mm, 'n', st_mtime=0)
1074 1074 repo.dirstate.forget(forget)
1075 1075
1076 1076 if not msg:
1077 1077 if not message:
1078 1078 message = "patch queue: %s\n" % patchfn
1079 1079 else:
1080 1080 message = "\n".join(message)
1081 1081 else:
1082 1082 message = msg
1083 1083
1084 1084 self.strip(repo, top, update=False, backup='strip', wlock=wlock)
1085 n = repo.commit(filelist, message, changes[1], force=1, wlock=wlock)
1085 n = repo.commit(filelist, message, changes[1], match=matchfn,
1086 force=1, wlock=wlock)
1086 1087 self.applied[-1] = statusentry(revlog.hex(n), patchfn)
1087 1088 self.applied_dirty = 1
1088 1089 else:
1089 1090 self.printdiff(repo, patchparent, fp=patchf)
1090 1091 patchf.close()
1091 1092 added = repo.status()[1]
1092 1093 for a in added:
1093 1094 f = repo.wjoin(a)
1094 1095 try:
1095 1096 os.unlink(f)
1096 1097 except OSError, e:
1097 1098 if e.errno != errno.ENOENT:
1098 1099 raise
1099 1100 try: os.removedirs(os.path.dirname(f))
1100 1101 except: pass
1101 1102 # forget the file copies in the dirstate
1102 1103 # push should readd the files later on
1103 1104 repo.dirstate.forget(added)
1104 1105 self.pop(repo, force=True, wlock=wlock)
1105 1106 self.push(repo, force=True, wlock=wlock)
1106 1107
1107 1108 def init(self, repo, create=False):
1108 1109 if not create and os.path.isdir(self.path):
1109 1110 raise util.Abort(_("patch queue directory already exists"))
1110 1111 try:
1111 1112 os.mkdir(self.path)
1112 1113 except OSError, inst:
1113 1114 if inst.errno != errno.EEXIST or not create:
1114 1115 raise
1115 1116 if create:
1116 1117 return self.qrepo(create=True)
1117 1118
1118 1119 def unapplied(self, repo, patch=None):
1119 1120 if patch and patch not in self.series:
1120 1121 raise util.Abort(_("patch %s is not in series file") % patch)
1121 1122 if not patch:
1122 1123 start = self.series_end()
1123 1124 else:
1124 1125 start = self.series.index(patch) + 1
1125 1126 unapplied = []
1126 1127 for i in xrange(start, len(self.series)):
1127 1128 pushable, reason = self.pushable(i)
1128 1129 if pushable:
1129 1130 unapplied.append((i, self.series[i]))
1130 1131 self.explain_pushable(i)
1131 1132 return unapplied
1132 1133
1133 1134 def qseries(self, repo, missing=None, start=0, length=0, status=None,
1134 1135 summary=False):
1135 1136 def displayname(patchname):
1136 1137 if summary:
1137 1138 msg = self.readheaders(patchname)[0]
1138 1139 msg = msg and ': ' + msg[0] or ': '
1139 1140 else:
1140 1141 msg = ''
1141 1142 return '%s%s' % (patchname, msg)
1142 1143
1143 1144 def pname(i):
1144 1145 if status == 'A':
1145 1146 return self.applied[i].name
1146 1147 else:
1147 1148 return self.series[i]
1148 1149
1149 1150 applied = dict.fromkeys([p.name for p in self.applied])
1150 1151 if not length:
1151 1152 length = len(self.series) - start
1152 1153 if not missing:
1153 1154 for i in xrange(start, start+length):
1154 1155 pfx = ''
1155 1156 patch = pname(i)
1156 1157 if self.ui.verbose:
1157 1158 if patch in applied:
1158 1159 stat = 'A'
1159 1160 elif self.pushable(i)[0]:
1160 1161 stat = 'U'
1161 1162 else:
1162 1163 stat = 'G'
1163 1164 pfx = '%d %s ' % (i, stat)
1164 1165 self.ui.write('%s%s\n' % (pfx, displayname(patch)))
1165 1166 else:
1166 1167 msng_list = []
1167 1168 for root, dirs, files in os.walk(self.path):
1168 1169 d = root[len(self.path) + 1:]
1169 1170 for f in files:
1170 1171 fl = os.path.join(d, f)
1171 1172 if (fl not in self.series and
1172 1173 fl not in (self.status_path, self.series_path)
1173 1174 and not fl.startswith('.')):
1174 1175 msng_list.append(fl)
1175 1176 msng_list.sort()
1176 1177 for x in msng_list:
1177 1178 pfx = self.ui.verbose and ('D ') or ''
1178 1179 self.ui.write("%s%s\n" % (pfx, displayname(x)))
1179 1180
1180 1181 def issaveline(self, l):
1181 1182 if l.name == '.hg.patches.save.line':
1182 1183 return True
1183 1184
1184 1185 def qrepo(self, create=False):
1185 1186 if create or os.path.isdir(self.join(".hg")):
1186 1187 return hg.repository(self.ui, path=self.path, create=create)
1187 1188
1188 1189 def restore(self, repo, rev, delete=None, qupdate=None):
1189 1190 c = repo.changelog.read(rev)
1190 1191 desc = c[4].strip()
1191 1192 lines = desc.splitlines()
1192 1193 i = 0
1193 1194 datastart = None
1194 1195 series = []
1195 1196 applied = []
1196 1197 qpp = None
1197 1198 for i in xrange(0, len(lines)):
1198 1199 if lines[i] == 'Patch Data:':
1199 1200 datastart = i + 1
1200 1201 elif lines[i].startswith('Dirstate:'):
1201 1202 l = lines[i].rstrip()
1202 1203 l = l[10:].split(' ')
1203 1204 qpp = [ hg.bin(x) for x in l ]
1204 1205 elif datastart != None:
1205 1206 l = lines[i].rstrip()
1206 1207 se = statusentry(l)
1207 1208 file_ = se.name
1208 1209 if se.rev:
1209 1210 applied.append(se)
1210 1211 else:
1211 1212 series.append(file_)
1212 1213 if datastart == None:
1213 1214 self.ui.warn("No saved patch data found\n")
1214 1215 return 1
1215 1216 self.ui.warn("restoring status: %s\n" % lines[0])
1216 1217 self.full_series = series
1217 1218 self.applied = applied
1218 1219 self.parse_series()
1219 1220 self.series_dirty = 1
1220 1221 self.applied_dirty = 1
1221 1222 heads = repo.changelog.heads()
1222 1223 if delete:
1223 1224 if rev not in heads:
1224 1225 self.ui.warn("save entry has children, leaving it alone\n")
1225 1226 else:
1226 1227 self.ui.warn("removing save entry %s\n" % hg.short(rev))
1227 1228 pp = repo.dirstate.parents()
1228 1229 if rev in pp:
1229 1230 update = True
1230 1231 else:
1231 1232 update = False
1232 1233 self.strip(repo, rev, update=update, backup='strip')
1233 1234 if qpp:
1234 1235 self.ui.warn("saved queue repository parents: %s %s\n" %
1235 1236 (hg.short(qpp[0]), hg.short(qpp[1])))
1236 1237 if qupdate:
1237 1238 print "queue directory updating"
1238 1239 r = self.qrepo()
1239 1240 if not r:
1240 1241 self.ui.warn("Unable to load queue repository\n")
1241 1242 return 1
1242 1243 hg.clean(r, qpp[0])
1243 1244
1244 1245 def save(self, repo, msg=None):
1245 1246 if len(self.applied) == 0:
1246 1247 self.ui.warn("save: no patches applied, exiting\n")
1247 1248 return 1
1248 1249 if self.issaveline(self.applied[-1]):
1249 1250 self.ui.warn("status is already saved\n")
1250 1251 return 1
1251 1252
1252 1253 ar = [ ':' + x for x in self.full_series ]
1253 1254 if not msg:
1254 1255 msg = "hg patches saved state"
1255 1256 else:
1256 1257 msg = "hg patches: " + msg.rstrip('\r\n')
1257 1258 r = self.qrepo()
1258 1259 if r:
1259 1260 pp = r.dirstate.parents()
1260 1261 msg += "\nDirstate: %s %s" % (hg.hex(pp[0]), hg.hex(pp[1]))
1261 1262 msg += "\n\nPatch Data:\n"
1262 1263 text = msg + "\n".join([str(x) for x in self.applied]) + '\n' + (ar and
1263 1264 "\n".join(ar) + '\n' or "")
1264 1265 n = repo.commit(None, text, user=None, force=1)
1265 1266 if not n:
1266 1267 self.ui.warn("repo commit failed\n")
1267 1268 return 1
1268 1269 self.applied.append(statusentry(revlog.hex(n),'.hg.patches.save.line'))
1269 1270 self.applied_dirty = 1
1270 1271
1271 1272 def full_series_end(self):
1272 1273 if len(self.applied) > 0:
1273 1274 p = self.applied[-1].name
1274 1275 end = self.find_series(p)
1275 1276 if end == None:
1276 1277 return len(self.full_series)
1277 1278 return end + 1
1278 1279 return 0
1279 1280
1280 1281 def series_end(self, all_patches=False):
1281 1282 end = 0
1282 1283 def next(start):
1283 1284 if all_patches:
1284 1285 return start
1285 1286 i = start
1286 1287 while i < len(self.series):
1287 1288 p, reason = self.pushable(i)
1288 1289 if p:
1289 1290 break
1290 1291 self.explain_pushable(i)
1291 1292 i += 1
1292 1293 return i
1293 1294 if len(self.applied) > 0:
1294 1295 p = self.applied[-1].name
1295 1296 try:
1296 1297 end = self.series.index(p)
1297 1298 except ValueError:
1298 1299 return 0
1299 1300 return next(end + 1)
1300 1301 return next(end)
1301 1302
1302 1303 def appliedname(self, index):
1303 1304 pname = self.applied[index].name
1304 1305 if not self.ui.verbose:
1305 1306 p = pname
1306 1307 else:
1307 1308 p = str(self.series.index(pname)) + " " + pname
1308 1309 return p
1309 1310
1310 1311 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1311 1312 force=None, git=False):
1312 1313 def checkseries(patchname):
1313 1314 if patchname in self.series:
1314 1315 raise util.Abort(_('patch %s is already in the series file')
1315 1316 % patchname)
1316 1317 def checkfile(patchname):
1317 1318 if not force and os.path.exists(self.join(patchname)):
1318 1319 raise util.Abort(_('patch "%s" already exists')
1319 1320 % patchname)
1320 1321
1321 1322 if rev:
1322 1323 if files:
1323 1324 raise util.Abort(_('option "-r" not valid when importing '
1324 1325 'files'))
1325 1326 rev = cmdutil.revrange(repo, rev)
1326 1327 rev.sort(lambda x, y: cmp(y, x))
1327 1328 if (len(files) > 1 or len(rev) > 1) and patchname:
1328 1329 raise util.Abort(_('option "-n" not valid when importing multiple '
1329 1330 'patches'))
1330 1331 i = 0
1331 1332 added = []
1332 1333 if rev:
1333 1334 # If mq patches are applied, we can only import revisions
1334 1335 # that form a linear path to qbase.
1335 1336 # Otherwise, they should form a linear path to a head.
1336 1337 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1337 1338 if len(heads) > 1:
1338 1339 raise util.Abort(_('revision %d is the root of more than one '
1339 1340 'branch') % rev[-1])
1340 1341 if self.applied:
1341 1342 base = revlog.hex(repo.changelog.node(rev[0]))
1342 1343 if base in [n.rev for n in self.applied]:
1343 1344 raise util.Abort(_('revision %d is already managed')
1344 1345 % rev[0])
1345 1346 if heads != [revlog.bin(self.applied[-1].rev)]:
1346 1347 raise util.Abort(_('revision %d is not the parent of '
1347 1348 'the queue') % rev[0])
1348 1349 base = repo.changelog.rev(revlog.bin(self.applied[0].rev))
1349 1350 lastparent = repo.changelog.parentrevs(base)[0]
1350 1351 else:
1351 1352 if heads != [repo.changelog.node(rev[0])]:
1352 1353 raise util.Abort(_('revision %d has unmanaged children')
1353 1354 % rev[0])
1354 1355 lastparent = None
1355 1356
1356 1357 if git:
1357 1358 self.diffopts().git = True
1358 1359
1359 1360 for r in rev:
1360 1361 p1, p2 = repo.changelog.parentrevs(r)
1361 1362 n = repo.changelog.node(r)
1362 1363 if p2 != revlog.nullrev:
1363 1364 raise util.Abort(_('cannot import merge revision %d') % r)
1364 1365 if lastparent and lastparent != r:
1365 1366 raise util.Abort(_('revision %d is not the parent of %d')
1366 1367 % (r, lastparent))
1367 1368 lastparent = p1
1368 1369
1369 1370 if not patchname:
1370 1371 patchname = normname('%d.diff' % r)
1371 1372 checkseries(patchname)
1372 1373 checkfile(patchname)
1373 1374 self.full_series.insert(0, patchname)
1374 1375
1375 1376 patchf = self.opener(patchname, "w")
1376 1377 patch.export(repo, [n], fp=patchf, opts=self.diffopts())
1377 1378 patchf.close()
1378 1379
1379 1380 se = statusentry(revlog.hex(n), patchname)
1380 1381 self.applied.insert(0, se)
1381 1382
1382 1383 added.append(patchname)
1383 1384 patchname = None
1384 1385 self.parse_series()
1385 1386 self.applied_dirty = 1
1386 1387
1387 1388 for filename in files:
1388 1389 if existing:
1389 1390 if filename == '-':
1390 1391 raise util.Abort(_('-e is incompatible with import from -'))
1391 1392 if not patchname:
1392 1393 patchname = normname(filename)
1393 1394 if not os.path.isfile(self.join(patchname)):
1394 1395 raise util.Abort(_("patch %s does not exist") % patchname)
1395 1396 else:
1396 1397 try:
1397 1398 if filename == '-':
1398 1399 if not patchname:
1399 1400 raise util.Abort(_('need --name to import a patch from -'))
1400 1401 text = sys.stdin.read()
1401 1402 else:
1402 1403 text = file(filename).read()
1403 1404 except IOError:
1404 1405 raise util.Abort(_("unable to read %s") % patchname)
1405 1406 if not patchname:
1406 1407 patchname = normname(os.path.basename(filename))
1407 1408 checkfile(patchname)
1408 1409 patchf = self.opener(patchname, "w")
1409 1410 patchf.write(text)
1410 1411 checkseries(patchname)
1411 1412 index = self.full_series_end() + i
1412 1413 self.full_series[index:index] = [patchname]
1413 1414 self.parse_series()
1414 1415 self.ui.warn("adding %s to series file\n" % patchname)
1415 1416 i += 1
1416 1417 added.append(patchname)
1417 1418 patchname = None
1418 1419 self.series_dirty = 1
1419 1420 qrepo = self.qrepo()
1420 1421 if qrepo:
1421 1422 qrepo.add(added)
1422 1423
1423 1424 def delete(ui, repo, *patches, **opts):
1424 1425 """remove patches from queue
1425 1426
1426 1427 With --rev, mq will stop managing the named revisions. The
1427 1428 patches must be applied and at the base of the stack. This option
1428 1429 is useful when the patches have been applied upstream.
1429 1430
1430 1431 Otherwise, the patches must not be applied.
1431 1432
1432 1433 With --keep, the patch files are preserved in the patch directory."""
1433 1434 q = repo.mq
1434 1435 q.delete(repo, patches, opts)
1435 1436 q.save_dirty()
1436 1437 return 0
1437 1438
1438 1439 def applied(ui, repo, patch=None, **opts):
1439 1440 """print the patches already applied"""
1440 1441 q = repo.mq
1441 1442 if patch:
1442 1443 if patch not in q.series:
1443 1444 raise util.Abort(_("patch %s is not in series file") % patch)
1444 1445 end = q.series.index(patch) + 1
1445 1446 else:
1446 1447 end = len(q.applied)
1447 1448 if not end:
1448 1449 return
1449 1450
1450 1451 return q.qseries(repo, length=end, status='A', summary=opts.get('summary'))
1451 1452
1452 1453 def unapplied(ui, repo, patch=None, **opts):
1453 1454 """print the patches not yet applied"""
1454 1455 q = repo.mq
1455 1456 if patch:
1456 1457 if patch not in q.series:
1457 1458 raise util.Abort(_("patch %s is not in series file") % patch)
1458 1459 start = q.series.index(patch) + 1
1459 1460 else:
1460 1461 start = q.series_end()
1461 1462 q.qseries(repo, start=start, summary=opts.get('summary'))
1462 1463
1463 1464 def qimport(ui, repo, *filename, **opts):
1464 1465 """import a patch
1465 1466
1466 1467 The patch will have the same name as its source file unless you
1467 1468 give it a new one with --name.
1468 1469
1469 1470 You can register an existing patch inside the patch directory
1470 1471 with the --existing flag.
1471 1472
1472 1473 With --force, an existing patch of the same name will be overwritten.
1473 1474
1474 1475 An existing changeset may be placed under mq control with --rev
1475 1476 (e.g. qimport --rev tip -n patch will place tip under mq control).
1476 1477 With --git, patches imported with --rev will use the git diff
1477 1478 format.
1478 1479 """
1479 1480 q = repo.mq
1480 1481 q.qimport(repo, filename, patchname=opts['name'],
1481 1482 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1482 1483 git=opts['git'])
1483 1484 q.save_dirty()
1484 1485 return 0
1485 1486
1486 1487 def init(ui, repo, **opts):
1487 1488 """init a new queue repository
1488 1489
1489 1490 The queue repository is unversioned by default. If -c is
1490 1491 specified, qinit will create a separate nested repository
1491 1492 for patches. Use qcommit to commit changes to this queue
1492 1493 repository."""
1493 1494 q = repo.mq
1494 1495 r = q.init(repo, create=opts['create_repo'])
1495 1496 q.save_dirty()
1496 1497 if r:
1497 1498 if not os.path.exists(r.wjoin('.hgignore')):
1498 1499 fp = r.wopener('.hgignore', 'w')
1499 1500 fp.write('syntax: glob\n')
1500 1501 fp.write('status\n')
1501 1502 fp.write('guards\n')
1502 1503 fp.close()
1503 1504 if not os.path.exists(r.wjoin('series')):
1504 1505 r.wopener('series', 'w').close()
1505 1506 r.add(['.hgignore', 'series'])
1506 1507 commands.add(ui, r)
1507 1508 return 0
1508 1509
1509 1510 def clone(ui, source, dest=None, **opts):
1510 1511 '''clone main and patch repository at same time
1511 1512
1512 1513 If source is local, destination will have no patches applied. If
1513 1514 source is remote, this command can not check if patches are
1514 1515 applied in source, so cannot guarantee that patches are not
1515 1516 applied in destination. If you clone remote repository, be sure
1516 1517 before that it has no patches applied.
1517 1518
1518 1519 Source patch repository is looked for in <src>/.hg/patches by
1519 1520 default. Use -p <url> to change.
1520 1521 '''
1521 1522 commands.setremoteconfig(ui, opts)
1522 1523 if dest is None:
1523 1524 dest = hg.defaultdest(source)
1524 1525 sr = hg.repository(ui, ui.expandpath(source))
1525 1526 qbase, destrev = None, None
1526 1527 if sr.local():
1527 1528 if sr.mq.applied:
1528 1529 qbase = revlog.bin(sr.mq.applied[0].rev)
1529 1530 if not hg.islocal(dest):
1530 destrev = sr.parents(qbase)[0]
1531 heads = dict.fromkeys(sr.heads())
1532 for h in sr.heads(qbase):
1533 del heads[h]
1534 destrev = heads.keys()
1535 destrev.append(sr.changelog.parents(qbase)[0])
1531 1536 ui.note(_('cloning main repo\n'))
1532 1537 sr, dr = hg.clone(ui, sr, dest,
1533 1538 pull=opts['pull'],
1534 1539 rev=destrev,
1535 1540 update=False,
1536 1541 stream=opts['uncompressed'])
1537 1542 ui.note(_('cloning patch repo\n'))
1538 1543 spr, dpr = hg.clone(ui, opts['patches'] or (sr.url() + '/.hg/patches'),
1539 1544 dr.url() + '/.hg/patches',
1540 1545 pull=opts['pull'],
1541 1546 update=not opts['noupdate'],
1542 1547 stream=opts['uncompressed'])
1543 1548 if dr.local():
1544 1549 if qbase:
1545 1550 ui.note(_('stripping applied patches from destination repo\n'))
1546 1551 dr.mq.strip(dr, qbase, update=False, backup=None)
1547 1552 if not opts['noupdate']:
1548 1553 ui.note(_('updating destination repo\n'))
1549 1554 hg.update(dr, dr.changelog.tip())
1550 1555
1551 1556 def commit(ui, repo, *pats, **opts):
1552 1557 """commit changes in the queue repository"""
1553 1558 q = repo.mq
1554 1559 r = q.qrepo()
1555 1560 if not r: raise util.Abort('no queue repository')
1556 1561 commands.commit(r.ui, r, *pats, **opts)
1557 1562
1558 1563 def series(ui, repo, **opts):
1559 1564 """print the entire series file"""
1560 1565 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1561 1566 return 0
1562 1567
1563 1568 def top(ui, repo, **opts):
1564 1569 """print the name of the current patch"""
1565 1570 q = repo.mq
1566 1571 t = len(q.applied)
1567 1572 if t:
1568 1573 return q.qseries(repo, start=t-1, length=1, status='A',
1569 1574 summary=opts.get('summary'))
1570 1575 else:
1571 1576 ui.write("No patches applied\n")
1572 1577 return 1
1573 1578
1574 1579 def next(ui, repo, **opts):
1575 1580 """print the name of the next patch"""
1576 1581 q = repo.mq
1577 1582 end = q.series_end()
1578 1583 if end == len(q.series):
1579 1584 ui.write("All patches applied\n")
1580 1585 return 1
1581 1586 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1582 1587
1583 1588 def prev(ui, repo, **opts):
1584 1589 """print the name of the previous patch"""
1585 1590 q = repo.mq
1586 1591 l = len(q.applied)
1587 1592 if l == 1:
1588 1593 ui.write("Only one patch applied\n")
1589 1594 return 1
1590 1595 if not l:
1591 1596 ui.write("No patches applied\n")
1592 1597 return 1
1593 1598 return q.qseries(repo, start=l-2, length=1, status='A',
1594 1599 summary=opts.get('summary'))
1595 1600
1596 1601 def new(ui, repo, patch, **opts):
1597 1602 """create a new patch
1598 1603
1599 1604 qnew creates a new patch on top of the currently-applied patch
1600 1605 (if any). It will refuse to run if there are any outstanding
1601 1606 changes unless -f is specified, in which case the patch will
1602 1607 be initialised with them.
1603 1608
1604 1609 -e, -m or -l set the patch header as well as the commit message.
1605 1610 If none is specified, the patch header is empty and the
1606 1611 commit message is 'New patch: PATCH'"""
1607 1612 q = repo.mq
1608 1613 message = commands.logmessage(opts)
1609 1614 if opts['edit']:
1610 1615 message = ui.edit(message, ui.username())
1611 1616 q.new(repo, patch, msg=message, force=opts['force'])
1612 1617 q.save_dirty()
1613 1618 return 0
1614 1619
1615 1620 def refresh(ui, repo, *pats, **opts):
1616 1621 """update the current patch
1617 1622
1618 1623 If any file patterns are provided, the refreshed patch will contain only
1619 1624 the modifications that match those patterns; the remaining modifications
1620 1625 will remain in the working directory.
1621 1626
1622 1627 hg add/remove/copy/rename work as usual, though you might want to use
1623 1628 git-style patches (--git or [diff] git=1) to track copies and renames.
1624 1629 """
1625 1630 q = repo.mq
1626 1631 message = commands.logmessage(opts)
1627 1632 if opts['edit']:
1628 1633 if message:
1629 1634 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1630 1635 patch = q.applied[-1].name
1631 1636 (message, comment, user, date, hasdiff) = q.readheaders(patch)
1632 1637 message = ui.edit('\n'.join(message), user or ui.username())
1633 1638 ret = q.refresh(repo, pats, msg=message, **opts)
1634 1639 q.save_dirty()
1635 1640 return ret
1636 1641
1637 1642 def diff(ui, repo, *pats, **opts):
1638 1643 """diff of the current patch"""
1639 1644 repo.mq.diff(repo, pats, opts)
1640 1645 return 0
1641 1646
1642 1647 def fold(ui, repo, *files, **opts):
1643 1648 """fold the named patches into the current patch
1644 1649
1645 1650 Patches must not yet be applied. Each patch will be successively
1646 1651 applied to the current patch in the order given. If all the
1647 1652 patches apply successfully, the current patch will be refreshed
1648 1653 with the new cumulative patch, and the folded patches will
1649 1654 be deleted. With -k/--keep, the folded patch files will not
1650 1655 be removed afterwards.
1651 1656
1652 1657 The header for each folded patch will be concatenated with
1653 1658 the current patch header, separated by a line of '* * *'."""
1654 1659
1655 1660 q = repo.mq
1656 1661
1657 1662 if not files:
1658 1663 raise util.Abort(_('qfold requires at least one patch name'))
1659 1664 if not q.check_toppatch(repo):
1660 1665 raise util.Abort(_('No patches applied'))
1661 1666
1662 1667 message = commands.logmessage(opts)
1663 1668 if opts['edit']:
1664 1669 if message:
1665 1670 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1666 1671
1667 1672 parent = q.lookup('qtip')
1668 1673 patches = []
1669 1674 messages = []
1670 1675 for f in files:
1671 1676 p = q.lookup(f)
1672 1677 if p in patches or p == parent:
1673 1678 ui.warn(_('Skipping already folded patch %s') % p)
1674 1679 if q.isapplied(p):
1675 1680 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
1676 1681 patches.append(p)
1677 1682
1678 1683 for p in patches:
1679 1684 if not message:
1680 1685 messages.append(q.readheaders(p)[0])
1681 1686 pf = q.join(p)
1682 1687 (patchsuccess, files, fuzz) = q.patch(repo, pf)
1683 1688 if not patchsuccess:
1684 1689 raise util.Abort(_('Error folding patch %s') % p)
1685 1690 patch.updatedir(ui, repo, files)
1686 1691
1687 1692 if not message:
1688 1693 message, comments, user = q.readheaders(parent)[0:3]
1689 1694 for msg in messages:
1690 1695 message.append('* * *')
1691 1696 message.extend(msg)
1692 1697 message = '\n'.join(message)
1693 1698
1694 1699 if opts['edit']:
1695 1700 message = ui.edit(message, user or ui.username())
1696 1701
1697 1702 q.refresh(repo, msg=message)
1698 1703 q.delete(repo, patches, opts)
1699 1704 q.save_dirty()
1700 1705
1701 1706 def guard(ui, repo, *args, **opts):
1702 1707 '''set or print guards for a patch
1703 1708
1704 1709 Guards control whether a patch can be pushed. A patch with no
1705 1710 guards is always pushed. A patch with a positive guard ("+foo") is
1706 1711 pushed only if the qselect command has activated it. A patch with
1707 1712 a negative guard ("-foo") is never pushed if the qselect command
1708 1713 has activated it.
1709 1714
1710 1715 With no arguments, print the currently active guards.
1711 1716 With arguments, set guards for the named patch.
1712 1717
1713 1718 To set a negative guard "-foo" on topmost patch ("--" is needed so
1714 1719 hg will not interpret "-foo" as an option):
1715 1720 hg qguard -- -foo
1716 1721
1717 1722 To set guards on another patch:
1718 1723 hg qguard other.patch +2.6.17 -stable
1719 1724 '''
1720 1725 def status(idx):
1721 1726 guards = q.series_guards[idx] or ['unguarded']
1722 1727 ui.write('%s: %s\n' % (q.series[idx], ' '.join(guards)))
1723 1728 q = repo.mq
1724 1729 patch = None
1725 1730 args = list(args)
1726 1731 if opts['list']:
1727 1732 if args or opts['none']:
1728 1733 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
1729 1734 for i in xrange(len(q.series)):
1730 1735 status(i)
1731 1736 return
1732 1737 if not args or args[0][0:1] in '-+':
1733 1738 if not q.applied:
1734 1739 raise util.Abort(_('no patches applied'))
1735 1740 patch = q.applied[-1].name
1736 1741 if patch is None and args[0][0:1] not in '-+':
1737 1742 patch = args.pop(0)
1738 1743 if patch is None:
1739 1744 raise util.Abort(_('no patch to work with'))
1740 1745 if args or opts['none']:
1741 1746 idx = q.find_series(patch)
1742 1747 if idx is None:
1743 1748 raise util.Abort(_('no patch named %s') % patch)
1744 1749 q.set_guards(idx, args)
1745 1750 q.save_dirty()
1746 1751 else:
1747 1752 status(q.series.index(q.lookup(patch)))
1748 1753
1749 1754 def header(ui, repo, patch=None):
1750 1755 """Print the header of the topmost or specified patch"""
1751 1756 q = repo.mq
1752 1757
1753 1758 if patch:
1754 1759 patch = q.lookup(patch)
1755 1760 else:
1756 1761 if not q.applied:
1757 1762 ui.write('No patches applied\n')
1758 1763 return 1
1759 1764 patch = q.lookup('qtip')
1760 1765 message = repo.mq.readheaders(patch)[0]
1761 1766
1762 1767 ui.write('\n'.join(message) + '\n')
1763 1768
1764 1769 def lastsavename(path):
1765 1770 (directory, base) = os.path.split(path)
1766 1771 names = os.listdir(directory)
1767 1772 namere = re.compile("%s.([0-9]+)" % base)
1768 1773 maxindex = None
1769 1774 maxname = None
1770 1775 for f in names:
1771 1776 m = namere.match(f)
1772 1777 if m:
1773 1778 index = int(m.group(1))
1774 1779 if maxindex == None or index > maxindex:
1775 1780 maxindex = index
1776 1781 maxname = f
1777 1782 if maxname:
1778 1783 return (os.path.join(directory, maxname), maxindex)
1779 1784 return (None, None)
1780 1785
1781 1786 def savename(path):
1782 1787 (last, index) = lastsavename(path)
1783 1788 if last is None:
1784 1789 index = 0
1785 1790 newpath = path + ".%d" % (index + 1)
1786 1791 return newpath
1787 1792
1788 1793 def push(ui, repo, patch=None, **opts):
1789 1794 """push the next patch onto the stack"""
1790 1795 q = repo.mq
1791 1796 mergeq = None
1792 1797
1793 1798 if opts['all']:
1794 1799 if not q.series:
1795 1800 ui.warn(_('no patches in series\n'))
1796 1801 return 0
1797 1802 patch = q.series[-1]
1798 1803 if opts['merge']:
1799 1804 if opts['name']:
1800 1805 newpath = opts['name']
1801 1806 else:
1802 1807 newpath, i = lastsavename(q.path)
1803 1808 if not newpath:
1804 1809 ui.warn("no saved queues found, please use -n\n")
1805 1810 return 1
1806 1811 mergeq = queue(ui, repo.join(""), newpath)
1807 1812 ui.warn("merging with queue at: %s\n" % mergeq.path)
1808 1813 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
1809 1814 mergeq=mergeq)
1810 1815 q.save_dirty()
1811 1816 return ret
1812 1817
1813 1818 def pop(ui, repo, patch=None, **opts):
1814 1819 """pop the current patch off the stack"""
1815 1820 localupdate = True
1816 1821 if opts['name']:
1817 1822 q = queue(ui, repo.join(""), repo.join(opts['name']))
1818 1823 ui.warn('using patch queue: %s\n' % q.path)
1819 1824 localupdate = False
1820 1825 else:
1821 1826 q = repo.mq
1822 1827 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
1823 1828 all=opts['all'])
1824 1829 q.save_dirty()
1825 1830 return ret
1826 1831
1827 1832 def rename(ui, repo, patch, name=None, **opts):
1828 1833 """rename a patch
1829 1834
1830 1835 With one argument, renames the current patch to PATCH1.
1831 1836 With two arguments, renames PATCH1 to PATCH2."""
1832 1837
1833 1838 q = repo.mq
1834 1839
1835 1840 if not name:
1836 1841 name = patch
1837 1842 patch = None
1838 1843
1839 1844 if patch:
1840 1845 patch = q.lookup(patch)
1841 1846 else:
1842 1847 if not q.applied:
1843 1848 ui.write(_('No patches applied\n'))
1844 1849 return
1845 1850 patch = q.lookup('qtip')
1846 1851 absdest = q.join(name)
1847 1852 if os.path.isdir(absdest):
1848 1853 name = normname(os.path.join(name, os.path.basename(patch)))
1849 1854 absdest = q.join(name)
1850 1855 if os.path.exists(absdest):
1851 1856 raise util.Abort(_('%s already exists') % absdest)
1852 1857
1853 1858 if name in q.series:
1854 1859 raise util.Abort(_('A patch named %s already exists in the series file') % name)
1855 1860
1856 1861 if ui.verbose:
1857 1862 ui.write('Renaming %s to %s\n' % (patch, name))
1858 1863 i = q.find_series(patch)
1859 1864 guards = q.guard_re.findall(q.full_series[i])
1860 1865 q.full_series[i] = name + ''.join([' #' + g for g in guards])
1861 1866 q.parse_series()
1862 1867 q.series_dirty = 1
1863 1868
1864 1869 info = q.isapplied(patch)
1865 1870 if info:
1866 1871 q.applied[info[0]] = statusentry(info[1], name)
1867 1872 q.applied_dirty = 1
1868 1873
1869 1874 util.rename(q.join(patch), absdest)
1870 1875 r = q.qrepo()
1871 1876 if r:
1872 1877 wlock = r.wlock()
1873 1878 if r.dirstate.state(name) == 'r':
1874 1879 r.undelete([name], wlock)
1875 1880 r.copy(patch, name, wlock)
1876 1881 r.remove([patch], False, wlock)
1877 1882
1878 1883 q.save_dirty()
1879 1884
1880 1885 def restore(ui, repo, rev, **opts):
1881 1886 """restore the queue state saved by a rev"""
1882 1887 rev = repo.lookup(rev)
1883 1888 q = repo.mq
1884 1889 q.restore(repo, rev, delete=opts['delete'],
1885 1890 qupdate=opts['update'])
1886 1891 q.save_dirty()
1887 1892 return 0
1888 1893
1889 1894 def save(ui, repo, **opts):
1890 1895 """save current queue state"""
1891 1896 q = repo.mq
1892 1897 message = commands.logmessage(opts)
1893 1898 ret = q.save(repo, msg=message)
1894 1899 if ret:
1895 1900 return ret
1896 1901 q.save_dirty()
1897 1902 if opts['copy']:
1898 1903 path = q.path
1899 1904 if opts['name']:
1900 1905 newpath = os.path.join(q.basepath, opts['name'])
1901 1906 if os.path.exists(newpath):
1902 1907 if not os.path.isdir(newpath):
1903 1908 raise util.Abort(_('destination %s exists and is not '
1904 1909 'a directory') % newpath)
1905 1910 if not opts['force']:
1906 1911 raise util.Abort(_('destination %s exists, '
1907 1912 'use -f to force') % newpath)
1908 1913 else:
1909 1914 newpath = savename(path)
1910 1915 ui.warn("copy %s to %s\n" % (path, newpath))
1911 1916 util.copyfiles(path, newpath)
1912 1917 if opts['empty']:
1913 1918 try:
1914 1919 os.unlink(q.join(q.status_path))
1915 1920 except:
1916 1921 pass
1917 1922 return 0
1918 1923
1919 1924 def strip(ui, repo, rev, **opts):
1920 1925 """strip a revision and all later revs on the same branch"""
1921 1926 rev = repo.lookup(rev)
1922 1927 backup = 'all'
1923 1928 if opts['backup']:
1924 1929 backup = 'strip'
1925 1930 elif opts['nobackup']:
1926 1931 backup = 'none'
1927 1932 update = repo.dirstate.parents()[0] != revlog.nullid
1928 1933 repo.mq.strip(repo, rev, backup=backup, update=update)
1929 1934 return 0
1930 1935
1931 1936 def select(ui, repo, *args, **opts):
1932 1937 '''set or print guarded patches to push
1933 1938
1934 1939 Use the qguard command to set or print guards on patch, then use
1935 1940 qselect to tell mq which guards to use. A patch will be pushed if it
1936 1941 has no guards or any positive guards match the currently selected guard,
1937 1942 but will not be pushed if any negative guards match the current guard.
1938 1943 For example:
1939 1944
1940 1945 qguard foo.patch -stable (negative guard)
1941 1946 qguard bar.patch +stable (positive guard)
1942 1947 qselect stable
1943 1948
1944 1949 This activates the "stable" guard. mq will skip foo.patch (because
1945 1950 it has a negative match) but push bar.patch (because it
1946 1951 has a positive match).
1947 1952
1948 1953 With no arguments, prints the currently active guards.
1949 1954 With one argument, sets the active guard.
1950 1955
1951 1956 Use -n/--none to deactivate guards (no other arguments needed).
1952 1957 When no guards are active, patches with positive guards are skipped
1953 1958 and patches with negative guards are pushed.
1954 1959
1955 1960 qselect can change the guards on applied patches. It does not pop
1956 1961 guarded patches by default. Use --pop to pop back to the last applied
1957 1962 patch that is not guarded. Use --reapply (which implies --pop) to push
1958 1963 back to the current patch afterwards, but skip guarded patches.
1959 1964
1960 1965 Use -s/--series to print a list of all guards in the series file (no
1961 1966 other arguments needed). Use -v for more information.'''
1962 1967
1963 1968 q = repo.mq
1964 1969 guards = q.active()
1965 1970 if args or opts['none']:
1966 1971 old_unapplied = q.unapplied(repo)
1967 1972 old_guarded = [i for i in xrange(len(q.applied)) if
1968 1973 not q.pushable(i)[0]]
1969 1974 q.set_active(args)
1970 1975 q.save_dirty()
1971 1976 if not args:
1972 1977 ui.status(_('guards deactivated\n'))
1973 1978 if not opts['pop'] and not opts['reapply']:
1974 1979 unapplied = q.unapplied(repo)
1975 1980 guarded = [i for i in xrange(len(q.applied))
1976 1981 if not q.pushable(i)[0]]
1977 1982 if len(unapplied) != len(old_unapplied):
1978 1983 ui.status(_('number of unguarded, unapplied patches has '
1979 1984 'changed from %d to %d\n') %
1980 1985 (len(old_unapplied), len(unapplied)))
1981 1986 if len(guarded) != len(old_guarded):
1982 1987 ui.status(_('number of guarded, applied patches has changed '
1983 1988 'from %d to %d\n') %
1984 1989 (len(old_guarded), len(guarded)))
1985 1990 elif opts['series']:
1986 1991 guards = {}
1987 1992 noguards = 0
1988 1993 for gs in q.series_guards:
1989 1994 if not gs:
1990 1995 noguards += 1
1991 1996 for g in gs:
1992 1997 guards.setdefault(g, 0)
1993 1998 guards[g] += 1
1994 1999 if ui.verbose:
1995 2000 guards['NONE'] = noguards
1996 2001 guards = guards.items()
1997 2002 guards.sort(lambda a, b: cmp(a[0][1:], b[0][1:]))
1998 2003 if guards:
1999 2004 ui.note(_('guards in series file:\n'))
2000 2005 for guard, count in guards:
2001 2006 ui.note('%2d ' % count)
2002 2007 ui.write(guard, '\n')
2003 2008 else:
2004 2009 ui.note(_('no guards in series file\n'))
2005 2010 else:
2006 2011 if guards:
2007 2012 ui.note(_('active guards:\n'))
2008 2013 for g in guards:
2009 2014 ui.write(g, '\n')
2010 2015 else:
2011 2016 ui.write(_('no active guards\n'))
2012 2017 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2013 2018 popped = False
2014 2019 if opts['pop'] or opts['reapply']:
2015 2020 for i in xrange(len(q.applied)):
2016 2021 pushable, reason = q.pushable(i)
2017 2022 if not pushable:
2018 2023 ui.status(_('popping guarded patches\n'))
2019 2024 popped = True
2020 2025 if i == 0:
2021 2026 q.pop(repo, all=True)
2022 2027 else:
2023 2028 q.pop(repo, i-1)
2024 2029 break
2025 2030 if popped:
2026 2031 try:
2027 2032 if reapply:
2028 2033 ui.status(_('reapplying unguarded patches\n'))
2029 2034 q.push(repo, reapply)
2030 2035 finally:
2031 2036 q.save_dirty()
2032 2037
2033 2038 def reposetup(ui, repo):
2034 2039 class mqrepo(repo.__class__):
2035 2040 def abort_if_wdir_patched(self, errmsg, force=False):
2036 2041 if self.mq.applied and not force:
2037 2042 parent = revlog.hex(self.dirstate.parents()[0])
2038 2043 if parent in [s.rev for s in self.mq.applied]:
2039 2044 raise util.Abort(errmsg)
2040 2045
2041 2046 def commit(self, *args, **opts):
2042 2047 if len(args) >= 6:
2043 2048 force = args[5]
2044 2049 else:
2045 2050 force = opts.get('force')
2046 2051 self.abort_if_wdir_patched(
2047 2052 _('cannot commit over an applied mq patch'),
2048 2053 force)
2049 2054
2050 2055 return super(mqrepo, self).commit(*args, **opts)
2051 2056
2052 2057 def push(self, remote, force=False, revs=None):
2053 2058 if self.mq.applied and not force and not revs:
2054 2059 raise util.Abort(_('source has mq patches applied'))
2055 2060 return super(mqrepo, self).push(remote, force, revs)
2056 2061
2057 2062 def tags(self):
2058 2063 if self.tagscache:
2059 2064 return self.tagscache
2060 2065
2061 2066 tagscache = super(mqrepo, self).tags()
2062 2067
2063 2068 q = self.mq
2064 2069 if not q.applied:
2065 2070 return tagscache
2066 2071
2067 2072 mqtags = [(patch.rev, patch.name) for patch in q.applied]
2068 2073 mqtags.append((mqtags[-1][0], 'qtip'))
2069 2074 mqtags.append((mqtags[0][0], 'qbase'))
2070 2075 for patch in mqtags:
2071 2076 if patch[1] in tagscache:
2072 2077 self.ui.warn('Tag %s overrides mq patch of the same name\n' % patch[1])
2073 2078 else:
2074 2079 tagscache[patch[1]] = revlog.bin(patch[0])
2075 2080
2076 2081 return tagscache
2077 2082
2078 2083 def _branchtags(self):
2079 2084 q = self.mq
2080 2085 if not q.applied:
2081 2086 return super(mqrepo, self)._branchtags()
2082 2087
2083 2088 self.branchcache = {} # avoid recursion in changectx
2084 2089 cl = self.changelog
2085 2090 partial, last, lrev = self._readbranchcache()
2086 2091
2087 2092 qbase = cl.rev(revlog.bin(q.applied[0].rev))
2088 2093 start = lrev + 1
2089 2094 if start < qbase:
2090 2095 # update the cache (excluding the patches) and save it
2091 2096 self._updatebranchcache(partial, lrev+1, qbase)
2092 2097 self._writebranchcache(partial, cl.node(qbase-1), qbase-1)
2093 2098 start = qbase
2094 2099 # if start = qbase, the cache is as updated as it should be.
2095 2100 # if start > qbase, the cache includes (part of) the patches.
2096 2101 # we might as well use it, but we won't save it.
2097 2102
2098 2103 # update the cache up to the tip
2099 2104 self._updatebranchcache(partial, start, cl.count())
2100 2105
2101 2106 return partial
2102 2107
2103 2108 if repo.local():
2104 2109 repo.__class__ = mqrepo
2105 2110 repo.mq = queue(ui, repo.join(""))
2106 2111
2107 2112 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2108 2113
2109 2114 cmdtable = {
2110 2115 "qapplied": (applied, [] + seriesopts, 'hg qapplied [-s] [PATCH]'),
2111 2116 "qclone": (clone,
2112 2117 [('', 'pull', None, _('use pull protocol to copy metadata')),
2113 2118 ('U', 'noupdate', None, _('do not update the new working directories')),
2114 2119 ('', 'uncompressed', None,
2115 2120 _('use uncompressed transfer (fast over LAN)')),
2116 2121 ('e', 'ssh', '', _('specify ssh command to use')),
2117 2122 ('p', 'patches', '', _('location of source patch repo')),
2118 2123 ('', 'remotecmd', '',
2119 2124 _('specify hg command to run on the remote side'))],
2120 2125 'hg qclone [OPTION]... SOURCE [DEST]'),
2121 2126 "qcommit|qci":
2122 2127 (commit,
2123 2128 commands.table["^commit|ci"][1],
2124 2129 'hg qcommit [OPTION]... [FILE]...'),
2125 2130 "^qdiff": (diff,
2126 2131 [('g', 'git', None, _('use git extended diff format')),
2127 2132 ('I', 'include', [], _('include names matching the given patterns')),
2128 2133 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2129 2134 'hg qdiff [-I] [-X] [FILE]...'),
2130 2135 "qdelete|qremove|qrm":
2131 2136 (delete,
2132 2137 [('k', 'keep', None, _('keep patch file')),
2133 2138 ('r', 'rev', [], _('stop managing a revision'))],
2134 2139 'hg qdelete [-k] [-r REV]... PATCH...'),
2135 2140 'qfold':
2136 2141 (fold,
2137 2142 [('e', 'edit', None, _('edit patch header')),
2138 2143 ('k', 'keep', None, _('keep folded patch files'))
2139 2144 ] + commands.commitopts,
2140 2145 'hg qfold [-e] [-m <text>] [-l <file] PATCH...'),
2141 2146 'qguard': (guard, [('l', 'list', None, _('list all patches and guards')),
2142 2147 ('n', 'none', None, _('drop all guards'))],
2143 2148 'hg qguard [PATCH] [+GUARD...] [-GUARD...]'),
2144 2149 'qheader': (header, [],
2145 2150 _('hg qheader [PATCH]')),
2146 2151 "^qimport":
2147 2152 (qimport,
2148 2153 [('e', 'existing', None, 'import file in patch dir'),
2149 2154 ('n', 'name', '', 'patch file name'),
2150 2155 ('f', 'force', None, 'overwrite existing files'),
2151 2156 ('r', 'rev', [], 'place existing revisions under mq control'),
2152 2157 ('g', 'git', None, _('use git extended diff format'))],
2153 2158 'hg qimport [-e] [-n NAME] [-f] [-g] [-r REV]... FILE...'),
2154 2159 "^qinit":
2155 2160 (init,
2156 2161 [('c', 'create-repo', None, 'create queue repository')],
2157 2162 'hg qinit [-c]'),
2158 2163 "qnew":
2159 2164 (new,
2160 2165 [('e', 'edit', None, _('edit commit message')),
2161 2166 ('f', 'force', None, _('import uncommitted changes into patch'))
2162 2167 ] + commands.commitopts,
2163 2168 'hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH'),
2164 2169 "qnext": (next, [] + seriesopts, 'hg qnext [-s]'),
2165 2170 "qprev": (prev, [] + seriesopts, 'hg qprev [-s]'),
2166 2171 "^qpop":
2167 2172 (pop,
2168 2173 [('a', 'all', None, 'pop all patches'),
2169 2174 ('n', 'name', '', 'queue name to pop'),
2170 2175 ('f', 'force', None, 'forget any local changes')],
2171 2176 'hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]'),
2172 2177 "^qpush":
2173 2178 (push,
2174 2179 [('f', 'force', None, 'apply if the patch has rejects'),
2175 2180 ('l', 'list', None, 'list patch name in commit text'),
2176 2181 ('a', 'all', None, 'apply all patches'),
2177 2182 ('m', 'merge', None, 'merge from another queue'),
2178 2183 ('n', 'name', '', 'merge queue name')],
2179 2184 'hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]'),
2180 2185 "^qrefresh":
2181 2186 (refresh,
2182 2187 [('e', 'edit', None, _('edit commit message')),
2183 2188 ('g', 'git', None, _('use git extended diff format')),
2184 2189 ('s', 'short', None, 'refresh only files already in the patch'),
2185 2190 ('I', 'include', [], _('include names matching the given patterns')),
2186 2191 ('X', 'exclude', [], _('exclude names matching the given patterns'))
2187 2192 ] + commands.commitopts,
2188 2193 'hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] FILES...'),
2189 2194 'qrename|qmv':
2190 2195 (rename, [], 'hg qrename PATCH1 [PATCH2]'),
2191 2196 "qrestore":
2192 2197 (restore,
2193 2198 [('d', 'delete', None, 'delete save entry'),
2194 2199 ('u', 'update', None, 'update queue working dir')],
2195 2200 'hg qrestore [-d] [-u] REV'),
2196 2201 "qsave":
2197 2202 (save,
2198 2203 [('c', 'copy', None, 'copy patch directory'),
2199 2204 ('n', 'name', '', 'copy directory name'),
2200 2205 ('e', 'empty', None, 'clear queue status file'),
2201 2206 ('f', 'force', None, 'force copy')] + commands.commitopts,
2202 2207 'hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'),
2203 2208 "qselect": (select,
2204 2209 [('n', 'none', None, _('disable all guards')),
2205 2210 ('s', 'series', None, _('list all guards in series file')),
2206 2211 ('', 'pop', None,
2207 2212 _('pop to before first guarded applied patch')),
2208 2213 ('', 'reapply', None, _('pop, then reapply patches'))],
2209 2214 'hg qselect [OPTION...] [GUARD...]'),
2210 2215 "qseries":
2211 2216 (series,
2212 2217 [('m', 'missing', None, 'print patches not in series')] + seriesopts,
2213 2218 'hg qseries [-ms]'),
2214 2219 "^strip":
2215 2220 (strip,
2216 2221 [('f', 'force', None, 'force multi-head removal'),
2217 2222 ('b', 'backup', None, 'bundle unrelated changesets'),
2218 2223 ('n', 'nobackup', None, 'no backups')],
2219 2224 'hg strip [-f] [-b] [-n] REV'),
2220 2225 "qtop": (top, [] + seriesopts, 'hg qtop [-s]'),
2221 2226 "qunapplied": (unapplied, [] + seriesopts, 'hg qunapplied [-s] [PATCH]'),
2222 2227 }
@@ -1,534 +1,534 b''
1 1 """
2 2 dirstate.py - working directory tracking for mercurial
3 3
4 4 Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
5 5
6 6 This software may be used and distributed according to the terms
7 7 of the GNU General Public License, incorporated herein by reference.
8 8 """
9 9
10 10 from node import *
11 11 from i18n import _
12 12 import struct, os, time, bisect, stat, strutil, util, re, errno
13 13
14 14 class dirstate(object):
15 15 format = ">cllll"
16 16
17 17 def __init__(self, opener, ui, root):
18 18 self.opener = opener
19 19 self.root = root
20 20 self.dirty = 0
21 21 self.ui = ui
22 22 self.map = None
23 23 self.pl = None
24 24 self.dirs = None
25 25 self.copymap = {}
26 26 self.ignorefunc = None
27 27
28 28 def wjoin(self, f):
29 29 return os.path.join(self.root, f)
30 30
31 31 def getcwd(self):
32 32 cwd = os.getcwd()
33 33 if cwd == self.root: return ''
34 34 # self.root ends with a path separator if self.root is '/' or 'C:\'
35 35 common_prefix_len = len(self.root)
36 36 if not self.root.endswith(os.sep):
37 37 common_prefix_len += 1
38 38 return cwd[common_prefix_len:]
39 39
40 40 def hgignore(self):
41 41 '''return the contents of .hgignore files as a list of patterns.
42 42
43 43 the files parsed for patterns include:
44 44 .hgignore in the repository root
45 45 any additional files specified in the [ui] section of ~/.hgrc
46 46
47 47 trailing white space is dropped.
48 48 the escape character is backslash.
49 49 comments start with #.
50 50 empty lines are skipped.
51 51
52 52 lines can be of the following formats:
53 53
54 54 syntax: regexp # defaults following lines to non-rooted regexps
55 55 syntax: glob # defaults following lines to non-rooted globs
56 56 re:pattern # non-rooted regular expression
57 57 glob:pattern # non-rooted glob
58 58 pattern # pattern of the current default type'''
59 59 syntaxes = {'re': 'relre:', 'regexp': 'relre:', 'glob': 'relglob:'}
60 60 def parselines(fp):
61 61 for line in fp:
62 62 escape = False
63 63 for i in xrange(len(line)):
64 64 if escape: escape = False
65 65 elif line[i] == '\\': escape = True
66 66 elif line[i] == '#': break
67 67 line = line[:i].rstrip()
68 68 if line: yield line
69 69 repoignore = self.wjoin('.hgignore')
70 70 files = [repoignore]
71 71 files.extend(self.ui.hgignorefiles())
72 72 pats = {}
73 73 for f in files:
74 74 try:
75 75 pats[f] = []
76 76 fp = open(f)
77 77 syntax = 'relre:'
78 78 for line in parselines(fp):
79 79 if line.startswith('syntax:'):
80 80 s = line[7:].strip()
81 81 try:
82 82 syntax = syntaxes[s]
83 83 except KeyError:
84 84 self.ui.warn(_("%s: ignoring invalid "
85 85 "syntax '%s'\n") % (f, s))
86 86 continue
87 87 pat = syntax + line
88 88 for s in syntaxes.values():
89 89 if line.startswith(s):
90 90 pat = line
91 91 break
92 92 pats[f].append(pat)
93 93 except IOError, inst:
94 94 if f != repoignore:
95 95 self.ui.warn(_("skipping unreadable ignore file"
96 96 " '%s': %s\n") % (f, inst.strerror))
97 97 return pats
98 98
99 99 def ignore(self, fn):
100 100 '''default match function used by dirstate and
101 101 localrepository. this honours the repository .hgignore file
102 102 and any other files specified in the [ui] section of .hgrc.'''
103 103 if not self.ignorefunc:
104 104 ignore = self.hgignore()
105 105 allpats = []
106 106 [allpats.extend(patlist) for patlist in ignore.values()]
107 107 if allpats:
108 108 try:
109 109 files, self.ignorefunc, anypats = (
110 110 util.matcher(self.root, inc=allpats, src='.hgignore'))
111 111 except util.Abort:
112 112 # Re-raise an exception where the src is the right file
113 113 for f, patlist in ignore.items():
114 114 files, self.ignorefunc, anypats = (
115 115 util.matcher(self.root, inc=patlist, src=f))
116 116 else:
117 117 self.ignorefunc = util.never
118 118 return self.ignorefunc(fn)
119 119
120 120 def __del__(self):
121 121 if self.dirty:
122 122 self.write()
123 123
124 124 def __getitem__(self, key):
125 125 try:
126 126 return self.map[key]
127 127 except TypeError:
128 128 self.lazyread()
129 129 return self[key]
130 130
131 131 def __contains__(self, key):
132 132 self.lazyread()
133 133 return key in self.map
134 134
135 135 def parents(self):
136 136 self.lazyread()
137 137 return self.pl
138 138
139 139 def markdirty(self):
140 140 if not self.dirty:
141 141 self.dirty = 1
142 142
143 143 def setparents(self, p1, p2=nullid):
144 144 self.lazyread()
145 145 self.markdirty()
146 146 self.pl = p1, p2
147 147
148 148 def state(self, key):
149 149 try:
150 150 return self[key][0]
151 151 except KeyError:
152 152 return "?"
153 153
154 154 def lazyread(self):
155 155 if self.map is None:
156 156 self.read()
157 157
158 158 def parse(self, st):
159 159 self.pl = [st[:20], st[20: 40]]
160 160
161 161 # deref fields so they will be local in loop
162 162 map = self.map
163 163 copymap = self.copymap
164 164 format = self.format
165 165 unpack = struct.unpack
166 166
167 167 pos = 40
168 168 e_size = struct.calcsize(format)
169 169
170 170 while pos < len(st):
171 171 newpos = pos + e_size
172 172 e = unpack(format, st[pos:newpos])
173 173 l = e[4]
174 174 pos = newpos
175 175 newpos = pos + l
176 176 f = st[pos:newpos]
177 177 if '\0' in f:
178 178 f, c = f.split('\0')
179 179 copymap[f] = c
180 180 map[f] = e[:4]
181 181 pos = newpos
182 182
183 183 def read(self):
184 184 self.map = {}
185 185 self.pl = [nullid, nullid]
186 186 try:
187 187 st = self.opener("dirstate").read()
188 188 if st:
189 189 self.parse(st)
190 190 except IOError, err:
191 191 if err.errno != errno.ENOENT: raise
192 192
193 193 def copy(self, source, dest):
194 194 self.lazyread()
195 195 self.markdirty()
196 196 self.copymap[dest] = source
197 197
198 198 def copied(self, file):
199 199 return self.copymap.get(file, None)
200 200
201 201 def copies(self):
202 202 return self.copymap
203 203
204 204 def initdirs(self):
205 205 if self.dirs is None:
206 206 self.dirs = {}
207 207 for f in self.map:
208 208 self.updatedirs(f, 1)
209 209
210 210 def updatedirs(self, path, delta):
211 211 if self.dirs is not None:
212 212 for c in strutil.findall(path, '/'):
213 213 pc = path[:c]
214 214 self.dirs.setdefault(pc, 0)
215 215 self.dirs[pc] += delta
216 216
217 217 def checkinterfering(self, files):
218 218 def prefixes(f):
219 219 for c in strutil.rfindall(f, '/'):
220 220 yield f[:c]
221 221 self.lazyread()
222 222 self.initdirs()
223 223 seendirs = {}
224 224 for f in files:
225 225 # shadows
226 226 if self.dirs.get(f):
227 227 raise util.Abort(_('directory named %r already in dirstate') %
228 228 f)
229 229 for d in prefixes(f):
230 230 if d in seendirs:
231 231 break
232 232 if d in self.map:
233 233 raise util.Abort(_('file named %r already in dirstate') %
234 234 d)
235 235 seendirs[d] = True
236 236 # disallowed
237 237 if '\r' in f or '\n' in f:
238 238 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames"))
239 239
240 240 def update(self, files, state, **kw):
241 241 ''' current states:
242 242 n normal
243 243 m needs merging
244 244 r marked for removal
245 245 a marked for addition'''
246 246
247 247 if not files: return
248 248 self.lazyread()
249 249 self.markdirty()
250 250 if state == "a":
251 251 self.initdirs()
252 252 self.checkinterfering(files)
253 253 for f in files:
254 254 if state == "r":
255 255 self.map[f] = ('r', 0, 0, 0)
256 256 self.updatedirs(f, -1)
257 257 else:
258 258 if state == "a":
259 259 self.updatedirs(f, 1)
260 260 s = os.lstat(self.wjoin(f))
261 261 st_size = kw.get('st_size', s.st_size)
262 262 st_mtime = kw.get('st_mtime', s.st_mtime)
263 263 self.map[f] = (state, s.st_mode, st_size, st_mtime)
264 264 if self.copymap.has_key(f):
265 265 del self.copymap[f]
266 266
267 267 def forget(self, files):
268 268 if not files: return
269 269 self.lazyread()
270 270 self.markdirty()
271 271 self.initdirs()
272 272 for f in files:
273 273 try:
274 274 del self.map[f]
275 275 self.updatedirs(f, -1)
276 276 except KeyError:
277 277 self.ui.warn(_("not in dirstate: %s!\n") % f)
278 278 pass
279 279
280 280 def clear(self):
281 281 self.map = {}
282 282 self.copymap = {}
283 283 self.dirs = None
284 284 self.markdirty()
285 285
286 286 def rebuild(self, parent, files):
287 287 self.clear()
288 288 for f in files:
289 289 if files.execf(f):
290 290 self.map[f] = ('n', 0777, -1, 0)
291 291 else:
292 292 self.map[f] = ('n', 0666, -1, 0)
293 293 self.pl = (parent, nullid)
294 294 self.markdirty()
295 295
296 296 def write(self):
297 297 if not self.dirty:
298 298 return
299 299 st = self.opener("dirstate", "w", atomic=True)
300 300 st.write("".join(self.pl))
301 301 for f, e in self.map.items():
302 302 c = self.copied(f)
303 303 if c:
304 304 f = f + "\0" + c
305 305 e = struct.pack(self.format, e[0], e[1], e[2], e[3], len(f))
306 306 st.write(e + f)
307 307 self.dirty = 0
308 308
309 309 def filterfiles(self, files):
310 310 ret = {}
311 311 unknown = []
312 312
313 313 for x in files:
314 314 if x == '.':
315 315 return self.map.copy()
316 316 if x not in self.map:
317 317 unknown.append(x)
318 318 else:
319 319 ret[x] = self.map[x]
320 320
321 321 if not unknown:
322 322 return ret
323 323
324 324 b = self.map.keys()
325 325 b.sort()
326 326 blen = len(b)
327 327
328 328 for x in unknown:
329 329 bs = bisect.bisect(b, "%s%s" % (x, '/'))
330 330 while bs < blen:
331 331 s = b[bs]
332 332 if len(s) > len(x) and s.startswith(x):
333 333 ret[s] = self.map[s]
334 334 else:
335 335 break
336 336 bs += 1
337 337 return ret
338 338
339 339 def supported_type(self, f, st, verbose=False):
340 340 if stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode):
341 341 return True
342 342 if verbose:
343 343 kind = 'unknown'
344 344 if stat.S_ISCHR(st.st_mode): kind = _('character device')
345 345 elif stat.S_ISBLK(st.st_mode): kind = _('block device')
346 346 elif stat.S_ISFIFO(st.st_mode): kind = _('fifo')
347 347 elif stat.S_ISSOCK(st.st_mode): kind = _('socket')
348 348 elif stat.S_ISDIR(st.st_mode): kind = _('directory')
349 349 self.ui.warn(_('%s: unsupported file type (type is %s)\n') % (
350 350 util.pathto(self.getcwd(), f),
351 351 kind))
352 352 return False
353 353
354 354 def walk(self, files=None, match=util.always, badmatch=None):
355 355 # filter out the stat
356 356 for src, f, st in self.statwalk(files, match, badmatch=badmatch):
357 357 yield src, f
358 358
359 359 def statwalk(self, files=None, match=util.always, ignored=False,
360 360 badmatch=None, directories=False):
361 361 '''
362 362 walk recursively through the directory tree, finding all files
363 363 matched by the match function
364 364
365 365 results are yielded in a tuple (src, filename, st), where src
366 366 is one of:
367 367 'f' the file was found in the directory tree
368 368 'd' the file is a directory of the tree
369 369 'm' the file was only in the dirstate and not in the tree
370 370 'b' file was not found and matched badmatch
371 371
372 372 and st is the stat result if the file was found in the directory.
373 373 '''
374 374 self.lazyread()
375 375
376 376 # walk all files by default
377 377 if not files:
378 files = [self.root]
378 files = ['.']
379 379 dc = self.map.copy()
380 380 else:
381 381 files = util.unique(files)
382 382 dc = self.filterfiles(files)
383 383
384 384 def imatch(file_):
385 385 if file_ not in dc and self.ignore(file_):
386 386 return False
387 387 return match(file_)
388 388
389 389 if ignored: imatch = match
390 390
391 391 # self.root may end with a path separator when self.root == '/'
392 392 common_prefix_len = len(self.root)
393 393 if not self.root.endswith(os.sep):
394 394 common_prefix_len += 1
395 395 # recursion free walker, faster than os.walk.
396 396 def findfiles(s):
397 397 work = [s]
398 398 if directories:
399 399 yield 'd', util.normpath(s[common_prefix_len:]), os.lstat(s)
400 400 while work:
401 401 top = work.pop()
402 402 names = os.listdir(top)
403 403 names.sort()
404 404 # nd is the top of the repository dir tree
405 405 nd = util.normpath(top[common_prefix_len:])
406 406 if nd == '.':
407 407 nd = ''
408 408 else:
409 409 # do not recurse into a repo contained in this
410 410 # one. use bisect to find .hg directory so speed
411 411 # is good on big directory.
412 412 hg = bisect.bisect_left(names, '.hg')
413 413 if hg < len(names) and names[hg] == '.hg':
414 414 if os.path.isdir(os.path.join(top, '.hg')):
415 415 continue
416 416 for f in names:
417 417 np = util.pconvert(os.path.join(nd, f))
418 418 if seen(np):
419 419 continue
420 420 p = os.path.join(top, f)
421 421 # don't trip over symlinks
422 422 st = os.lstat(p)
423 423 if stat.S_ISDIR(st.st_mode):
424 424 ds = util.pconvert(os.path.join(nd, f +'/'))
425 425 if imatch(ds):
426 426 work.append(p)
427 427 if directories:
428 428 yield 'd', np, st
429 429 if imatch(np) and np in dc:
430 430 yield 'm', np, st
431 431 elif imatch(np):
432 432 if self.supported_type(np, st):
433 433 yield 'f', np, st
434 434 elif np in dc:
435 435 yield 'm', np, st
436 436
437 437 known = {'.hg': 1}
438 438 def seen(fn):
439 439 if fn in known: return True
440 440 known[fn] = 1
441 441
442 442 # step one, find all files that match our criteria
443 443 files.sort()
444 444 for ff in files:
445 445 nf = util.normpath(ff)
446 446 f = self.wjoin(ff)
447 447 try:
448 448 st = os.lstat(f)
449 449 except OSError, inst:
450 450 found = False
451 451 for fn in dc:
452 452 if nf == fn or (fn.startswith(nf) and fn[len(nf)] == '/'):
453 453 found = True
454 454 break
455 455 if not found:
456 456 if inst.errno != errno.ENOENT or not badmatch:
457 457 self.ui.warn('%s: %s\n' % (
458 458 util.pathto(self.getcwd(), ff),
459 459 inst.strerror))
460 460 elif badmatch and badmatch(ff) and imatch(nf):
461 461 yield 'b', ff, None
462 462 continue
463 463 if stat.S_ISDIR(st.st_mode):
464 464 cmp1 = (lambda x, y: cmp(x[1], y[1]))
465 465 sorted_ = [ x for x in findfiles(f) ]
466 466 sorted_.sort(cmp1)
467 467 for e in sorted_:
468 468 yield e
469 469 else:
470 470 if not seen(nf) and match(nf):
471 471 if self.supported_type(ff, st, verbose=True):
472 472 yield 'f', nf, st
473 473 elif ff in dc:
474 474 yield 'm', nf, st
475 475
476 476 # step two run through anything left in the dc hash and yield
477 477 # if we haven't already seen it
478 478 ks = dc.keys()
479 479 ks.sort()
480 480 for k in ks:
481 481 if not seen(k) and imatch(k):
482 482 yield 'm', k, None
483 483
484 484 def status(self, files=None, match=util.always, list_ignored=False,
485 485 list_clean=False):
486 486 lookup, modified, added, unknown, ignored = [], [], [], [], []
487 487 removed, deleted, clean = [], [], []
488 488
489 489 for src, fn, st in self.statwalk(files, match, ignored=list_ignored):
490 490 try:
491 491 type_, mode, size, time = self[fn]
492 492 except KeyError:
493 493 if list_ignored and self.ignore(fn):
494 494 ignored.append(fn)
495 495 else:
496 496 unknown.append(fn)
497 497 continue
498 498 if src == 'm':
499 499 nonexistent = True
500 500 if not st:
501 501 try:
502 502 st = os.lstat(self.wjoin(fn))
503 503 except OSError, inst:
504 504 if inst.errno != errno.ENOENT:
505 505 raise
506 506 st = None
507 507 # We need to re-check that it is a valid file
508 508 if st and self.supported_type(fn, st):
509 509 nonexistent = False
510 510 # XXX: what to do with file no longer present in the fs
511 511 # who are not removed in the dirstate ?
512 512 if nonexistent and type_ in "nm":
513 513 deleted.append(fn)
514 514 continue
515 515 # check the common case first
516 516 if type_ == 'n':
517 517 if not st:
518 518 st = os.lstat(self.wjoin(fn))
519 519 if size >= 0 and (size != st.st_size
520 520 or (mode ^ st.st_mode) & 0100):
521 521 modified.append(fn)
522 522 elif time != int(st.st_mtime):
523 523 lookup.append(fn)
524 524 elif list_clean:
525 525 clean.append(fn)
526 526 elif type_ == 'm':
527 527 modified.append(fn)
528 528 elif type_ == 'a':
529 529 added.append(fn)
530 530 elif type_ == 'r':
531 531 removed.append(fn)
532 532
533 533 return (lookup, modified, added, removed, deleted, unknown, ignored,
534 534 clean)
@@ -1,1949 +1,1949 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import _
10 10 import repo, appendfile, changegroup
11 11 import changelog, dirstate, filelog, manifest, context
12 12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
13 13 import os, revlog, time, util
14 14
15 15 class localrepository(repo.repository):
16 16 capabilities = ('lookup', 'changegroupsubset')
17 17 supported = ('revlogv1', 'store')
18 18 branchcache_features = ('unnamed',)
19 19
20 20 def __del__(self):
21 21 self.transhandle = None
22 22 def __init__(self, parentui, path=None, create=0):
23 23 repo.repository.__init__(self)
24 24 if not path:
25 25 p = os.getcwd()
26 26 while not os.path.isdir(os.path.join(p, ".hg")):
27 27 oldp = p
28 28 p = os.path.dirname(p)
29 29 if p == oldp:
30 30 raise repo.RepoError(_("There is no Mercurial repository"
31 31 " here (.hg not found)"))
32 32 path = p
33 33
34 self.path = os.path.join(path, ".hg")
35 34 self.root = os.path.realpath(path)
35 self.path = os.path.join(self.root, ".hg")
36 36 self.origroot = path
37 37 self.opener = util.opener(self.path)
38 38 self.wopener = util.opener(self.root)
39 39
40 40 if not os.path.isdir(self.path):
41 41 if create:
42 42 if not os.path.exists(path):
43 43 os.mkdir(path)
44 44 os.mkdir(self.path)
45 45 requirements = ["revlogv1"]
46 46 if parentui.configbool('format', 'usestore', True):
47 47 os.mkdir(os.path.join(self.path, "store"))
48 48 requirements.append("store")
49 49 # create an invalid changelog
50 50 self.opener("00changelog.i", "a").write(
51 51 '\0\0\0\2' # represents revlogv2
52 52 ' dummy changelog to prevent using the old repo layout'
53 53 )
54 54 reqfile = self.opener("requires", "w")
55 55 for r in requirements:
56 56 reqfile.write("%s\n" % r)
57 57 reqfile.close()
58 58 else:
59 59 raise repo.RepoError(_("repository %s not found") % path)
60 60 elif create:
61 61 raise repo.RepoError(_("repository %s already exists") % path)
62 62 else:
63 63 # find requirements
64 64 try:
65 65 requirements = self.opener("requires").read().splitlines()
66 66 except IOError, inst:
67 67 if inst.errno != errno.ENOENT:
68 68 raise
69 69 requirements = []
70 70 # check them
71 71 for r in requirements:
72 72 if r not in self.supported:
73 73 raise repo.RepoError(_("requirement '%s' not supported") % r)
74 74
75 75 # setup store
76 76 if "store" in requirements:
77 77 self.encodefn = util.encodefilename
78 78 self.decodefn = util.decodefilename
79 79 self.spath = os.path.join(self.path, "store")
80 80 else:
81 81 self.encodefn = lambda x: x
82 82 self.decodefn = lambda x: x
83 83 self.spath = self.path
84 84 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
85 85
86 86 self.ui = ui.ui(parentui=parentui)
87 87 try:
88 88 self.ui.readconfig(self.join("hgrc"), self.root)
89 89 except IOError:
90 90 pass
91 91
92 92 v = self.ui.configrevlog()
93 93 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
94 94 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
95 95 fl = v.get('flags', None)
96 96 flags = 0
97 97 if fl != None:
98 98 for x in fl.split():
99 99 flags |= revlog.flagstr(x)
100 100 elif self.revlogv1:
101 101 flags = revlog.REVLOG_DEFAULT_FLAGS
102 102
103 103 v = self.revlogversion | flags
104 104 self.manifest = manifest.manifest(self.sopener, v)
105 105 self.changelog = changelog.changelog(self.sopener, v)
106 106
107 107 fallback = self.ui.config('ui', 'fallbackencoding')
108 108 if fallback:
109 109 util._fallbackencoding = fallback
110 110
111 111 # the changelog might not have the inline index flag
112 112 # on. If the format of the changelog is the same as found in
113 113 # .hgrc, apply any flags found in the .hgrc as well.
114 114 # Otherwise, just version from the changelog
115 115 v = self.changelog.version
116 116 if v == self.revlogversion:
117 117 v |= flags
118 118 self.revlogversion = v
119 119
120 120 self.tagscache = None
121 121 self.branchcache = None
122 122 self.nodetagscache = None
123 123 self.filterpats = {}
124 124 self.transhandle = None
125 125
126 126 self._link = lambda x: False
127 127 if util.checklink(self.root):
128 128 r = self.root # avoid circular reference in lambda
129 129 self._link = lambda x: util.is_link(os.path.join(r, x))
130 130
131 131 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
132 132
133 133 def url(self):
134 134 return 'file:' + self.root
135 135
136 136 def hook(self, name, throw=False, **args):
137 137 def callhook(hname, funcname):
138 138 '''call python hook. hook is callable object, looked up as
139 139 name in python module. if callable returns "true", hook
140 140 fails, else passes. if hook raises exception, treated as
141 141 hook failure. exception propagates if throw is "true".
142 142
143 143 reason for "true" meaning "hook failed" is so that
144 144 unmodified commands (e.g. mercurial.commands.update) can
145 145 be run as hooks without wrappers to convert return values.'''
146 146
147 147 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
148 148 obj = funcname
149 149 if not callable(obj):
150 150 d = funcname.rfind('.')
151 151 if d == -1:
152 152 raise util.Abort(_('%s hook is invalid ("%s" not in '
153 153 'a module)') % (hname, funcname))
154 154 modname = funcname[:d]
155 155 try:
156 156 obj = __import__(modname)
157 157 except ImportError:
158 158 try:
159 159 # extensions are loaded with hgext_ prefix
160 160 obj = __import__("hgext_%s" % modname)
161 161 except ImportError:
162 162 raise util.Abort(_('%s hook is invalid '
163 163 '(import of "%s" failed)') %
164 164 (hname, modname))
165 165 try:
166 166 for p in funcname.split('.')[1:]:
167 167 obj = getattr(obj, p)
168 168 except AttributeError, err:
169 169 raise util.Abort(_('%s hook is invalid '
170 170 '("%s" is not defined)') %
171 171 (hname, funcname))
172 172 if not callable(obj):
173 173 raise util.Abort(_('%s hook is invalid '
174 174 '("%s" is not callable)') %
175 175 (hname, funcname))
176 176 try:
177 177 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
178 178 except (KeyboardInterrupt, util.SignalInterrupt):
179 179 raise
180 180 except Exception, exc:
181 181 if isinstance(exc, util.Abort):
182 182 self.ui.warn(_('error: %s hook failed: %s\n') %
183 183 (hname, exc.args[0]))
184 184 else:
185 185 self.ui.warn(_('error: %s hook raised an exception: '
186 186 '%s\n') % (hname, exc))
187 187 if throw:
188 188 raise
189 189 self.ui.print_exc()
190 190 return True
191 191 if r:
192 192 if throw:
193 193 raise util.Abort(_('%s hook failed') % hname)
194 194 self.ui.warn(_('warning: %s hook failed\n') % hname)
195 195 return r
196 196
197 197 def runhook(name, cmd):
198 198 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
199 199 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
200 200 r = util.system(cmd, environ=env, cwd=self.root)
201 201 if r:
202 202 desc, r = util.explain_exit(r)
203 203 if throw:
204 204 raise util.Abort(_('%s hook %s') % (name, desc))
205 205 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
206 206 return r
207 207
208 208 r = False
209 209 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
210 210 if hname.split(".", 1)[0] == name and cmd]
211 211 hooks.sort()
212 212 for hname, cmd in hooks:
213 213 if callable(cmd):
214 214 r = callhook(hname, cmd) or r
215 215 elif cmd.startswith('python:'):
216 216 r = callhook(hname, cmd[7:].strip()) or r
217 217 else:
218 218 r = runhook(hname, cmd) or r
219 219 return r
220 220
221 221 tag_disallowed = ':\r\n'
222 222
223 223 def _tag(self, name, node, message, local, user, date, parent=None):
224 224 use_dirstate = parent is None
225 225
226 226 for c in self.tag_disallowed:
227 227 if c in name:
228 228 raise util.Abort(_('%r cannot be used in a tag name') % c)
229 229
230 230 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
231 231
232 232 if local:
233 233 # local tags are stored in the current charset
234 234 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
235 235 self.hook('tag', node=hex(node), tag=name, local=local)
236 236 return
237 237
238 238 # committed tags are stored in UTF-8
239 239 line = '%s %s\n' % (hex(node), util.fromlocal(name))
240 240 if use_dirstate:
241 241 self.wfile('.hgtags', 'ab').write(line)
242 242 else:
243 243 ntags = self.filectx('.hgtags', parent).data()
244 244 self.wfile('.hgtags', 'ab').write(ntags + line)
245 245 if use_dirstate and self.dirstate.state('.hgtags') == '?':
246 246 self.add(['.hgtags'])
247 247
248 248 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent)
249 249
250 250 self.hook('tag', node=hex(node), tag=name, local=local)
251 251
252 252 return tagnode
253 253
254 254 def tag(self, name, node, message, local, user, date):
255 255 '''tag a revision with a symbolic name.
256 256
257 257 if local is True, the tag is stored in a per-repository file.
258 258 otherwise, it is stored in the .hgtags file, and a new
259 259 changeset is committed with the change.
260 260
261 261 keyword arguments:
262 262
263 263 local: whether to store tag in non-version-controlled file
264 264 (default False)
265 265
266 266 message: commit message to use if committing
267 267
268 268 user: name of user to use if committing
269 269
270 270 date: date tuple to use if committing'''
271 271
272 272 for x in self.status()[:5]:
273 273 if '.hgtags' in x:
274 274 raise util.Abort(_('working copy of .hgtags is changed '
275 275 '(please commit .hgtags manually)'))
276 276
277 277
278 278 self._tag(name, node, message, local, user, date)
279 279
280 280 def tags(self):
281 281 '''return a mapping of tag to node'''
282 282 if not self.tagscache:
283 283 self.tagscache = {}
284 284
285 285 def parsetag(line, context):
286 286 if not line:
287 287 return
288 288 s = l.split(" ", 1)
289 289 if len(s) != 2:
290 290 self.ui.warn(_("%s: cannot parse entry\n") % context)
291 291 return
292 292 node, key = s
293 293 key = util.tolocal(key.strip()) # stored in UTF-8
294 294 try:
295 295 bin_n = bin(node)
296 296 except TypeError:
297 297 self.ui.warn(_("%s: node '%s' is not well formed\n") %
298 298 (context, node))
299 299 return
300 300 if bin_n not in self.changelog.nodemap:
301 301 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
302 302 (context, key))
303 303 return
304 304 self.tagscache[key] = bin_n
305 305
306 306 # read the tags file from each head, ending with the tip,
307 307 # and add each tag found to the map, with "newer" ones
308 308 # taking precedence
309 309 f = None
310 310 for rev, node, fnode in self._hgtagsnodes():
311 311 f = (f and f.filectx(fnode) or
312 312 self.filectx('.hgtags', fileid=fnode))
313 313 count = 0
314 314 for l in f.data().splitlines():
315 315 count += 1
316 316 parsetag(l, _("%s, line %d") % (str(f), count))
317 317
318 318 try:
319 319 f = self.opener("localtags")
320 320 count = 0
321 321 for l in f:
322 322 # localtags are stored in the local character set
323 323 # while the internal tag table is stored in UTF-8
324 324 l = util.fromlocal(l)
325 325 count += 1
326 326 parsetag(l, _("localtags, line %d") % count)
327 327 except IOError:
328 328 pass
329 329
330 330 self.tagscache['tip'] = self.changelog.tip()
331 331
332 332 return self.tagscache
333 333
334 334 def _hgtagsnodes(self):
335 335 heads = self.heads()
336 336 heads.reverse()
337 337 last = {}
338 338 ret = []
339 339 for node in heads:
340 340 c = self.changectx(node)
341 341 rev = c.rev()
342 342 try:
343 343 fnode = c.filenode('.hgtags')
344 344 except revlog.LookupError:
345 345 continue
346 346 ret.append((rev, node, fnode))
347 347 if fnode in last:
348 348 ret[last[fnode]] = None
349 349 last[fnode] = len(ret) - 1
350 350 return [item for item in ret if item]
351 351
352 352 def tagslist(self):
353 353 '''return a list of tags ordered by revision'''
354 354 l = []
355 355 for t, n in self.tags().items():
356 356 try:
357 357 r = self.changelog.rev(n)
358 358 except:
359 359 r = -2 # sort to the beginning of the list if unknown
360 360 l.append((r, t, n))
361 361 l.sort()
362 362 return [(t, n) for r, t, n in l]
363 363
364 364 def nodetags(self, node):
365 365 '''return the tags associated with a node'''
366 366 if not self.nodetagscache:
367 367 self.nodetagscache = {}
368 368 for t, n in self.tags().items():
369 369 self.nodetagscache.setdefault(n, []).append(t)
370 370 return self.nodetagscache.get(node, [])
371 371
372 372 def _branchtags(self):
373 373 partial, last, lrev = self._readbranchcache()
374 374
375 375 tiprev = self.changelog.count() - 1
376 376 if lrev != tiprev:
377 377 self._updatebranchcache(partial, lrev+1, tiprev+1)
378 378 self._writebranchcache(partial, self.changelog.tip(), tiprev)
379 379
380 380 return partial
381 381
382 382 def branchtags(self):
383 383 if self.branchcache is not None:
384 384 return self.branchcache
385 385
386 386 self.branchcache = {} # avoid recursion in changectx
387 387 partial = self._branchtags()
388 388
389 389 # the branch cache is stored on disk as UTF-8, but in the local
390 390 # charset internally
391 391 for k, v in partial.items():
392 392 self.branchcache[util.tolocal(k)] = v
393 393 return self.branchcache
394 394
395 395 def _readbranchcache(self):
396 396 partial = {}
397 397 try:
398 398 f = self.opener("branches.cache")
399 399 lines = f.read().split('\n')
400 400 f.close()
401 401 features = lines.pop(0).strip()
402 402 if not features.startswith('features: '):
403 403 raise ValueError(_('branch cache: no features specified'))
404 404 features = features.split(' ', 1)[1].split()
405 405 missing_features = []
406 406 for feature in self.branchcache_features:
407 407 try:
408 408 features.remove(feature)
409 409 except ValueError, inst:
410 410 missing_features.append(feature)
411 411 if missing_features:
412 412 raise ValueError(_('branch cache: missing features: %s')
413 413 % ', '.join(missing_features))
414 414 if features:
415 415 raise ValueError(_('branch cache: unknown features: %s')
416 416 % ', '.join(features))
417 417 last, lrev = lines.pop(0).split(" ", 1)
418 418 last, lrev = bin(last), int(lrev)
419 419 if not (lrev < self.changelog.count() and
420 420 self.changelog.node(lrev) == last): # sanity check
421 421 # invalidate the cache
422 422 raise ValueError('Invalid branch cache: unknown tip')
423 423 for l in lines:
424 424 if not l: continue
425 425 node, label = l.split(" ", 1)
426 426 partial[label.strip()] = bin(node)
427 427 except (KeyboardInterrupt, util.SignalInterrupt):
428 428 raise
429 429 except Exception, inst:
430 430 if self.ui.debugflag:
431 431 self.ui.warn(str(inst), '\n')
432 432 partial, last, lrev = {}, nullid, nullrev
433 433 return partial, last, lrev
434 434
435 435 def _writebranchcache(self, branches, tip, tiprev):
436 436 try:
437 437 f = self.opener("branches.cache", "w")
438 438 f.write(" features: %s\n" % ' '.join(self.branchcache_features))
439 439 f.write("%s %s\n" % (hex(tip), tiprev))
440 440 for label, node in branches.iteritems():
441 441 f.write("%s %s\n" % (hex(node), label))
442 442 except IOError:
443 443 pass
444 444
445 445 def _updatebranchcache(self, partial, start, end):
446 446 for r in xrange(start, end):
447 447 c = self.changectx(r)
448 448 b = c.branch()
449 449 partial[b] = c.node()
450 450
451 451 def lookup(self, key):
452 452 if key == '.':
453 453 key = self.dirstate.parents()[0]
454 454 if key == nullid:
455 455 raise repo.RepoError(_("no revision checked out"))
456 456 elif key == 'null':
457 457 return nullid
458 458 n = self.changelog._match(key)
459 459 if n:
460 460 return n
461 461 if key in self.tags():
462 462 return self.tags()[key]
463 463 if key in self.branchtags():
464 464 return self.branchtags()[key]
465 465 n = self.changelog._partialmatch(key)
466 466 if n:
467 467 return n
468 468 raise repo.RepoError(_("unknown revision '%s'") % key)
469 469
470 470 def dev(self):
471 471 return os.lstat(self.path).st_dev
472 472
473 473 def local(self):
474 474 return True
475 475
476 476 def join(self, f):
477 477 return os.path.join(self.path, f)
478 478
479 479 def sjoin(self, f):
480 480 f = self.encodefn(f)
481 481 return os.path.join(self.spath, f)
482 482
483 483 def wjoin(self, f):
484 484 return os.path.join(self.root, f)
485 485
486 486 def file(self, f):
487 487 if f[0] == '/':
488 488 f = f[1:]
489 489 return filelog.filelog(self.sopener, f, self.revlogversion)
490 490
491 491 def changectx(self, changeid=None):
492 492 return context.changectx(self, changeid)
493 493
494 494 def workingctx(self):
495 495 return context.workingctx(self)
496 496
497 497 def parents(self, changeid=None):
498 498 '''
499 499 get list of changectxs for parents of changeid or working directory
500 500 '''
501 501 if changeid is None:
502 502 pl = self.dirstate.parents()
503 503 else:
504 504 n = self.changelog.lookup(changeid)
505 505 pl = self.changelog.parents(n)
506 506 if pl[1] == nullid:
507 507 return [self.changectx(pl[0])]
508 508 return [self.changectx(pl[0]), self.changectx(pl[1])]
509 509
510 510 def filectx(self, path, changeid=None, fileid=None):
511 511 """changeid can be a changeset revision, node, or tag.
512 512 fileid can be a file revision or node."""
513 513 return context.filectx(self, path, changeid, fileid)
514 514
515 515 def getcwd(self):
516 516 return self.dirstate.getcwd()
517 517
518 518 def wfile(self, f, mode='r'):
519 519 return self.wopener(f, mode)
520 520
521 521 def _filter(self, filter, filename, data):
522 522 if filter not in self.filterpats:
523 523 l = []
524 524 for pat, cmd in self.ui.configitems(filter):
525 525 mf = util.matcher(self.root, "", [pat], [], [])[1]
526 526 l.append((mf, cmd))
527 527 self.filterpats[filter] = l
528 528
529 529 for mf, cmd in self.filterpats[filter]:
530 530 if mf(filename):
531 531 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
532 532 data = util.filter(data, cmd)
533 533 break
534 534
535 535 return data
536 536
537 537 def wread(self, filename):
538 538 if self._link(filename):
539 539 data = os.readlink(self.wjoin(filename))
540 540 else:
541 541 data = self.wopener(filename, 'r').read()
542 542 return self._filter("encode", filename, data)
543 543
544 544 def wwrite(self, filename, data, flags):
545 545 data = self._filter("decode", filename, data)
546 546 if "l" in flags:
547 547 f = self.wjoin(filename)
548 548 try:
549 549 os.unlink(f)
550 550 except OSError:
551 551 pass
552 552 d = os.path.dirname(f)
553 553 if not os.path.exists(d):
554 554 os.makedirs(d)
555 555 os.symlink(data, f)
556 556 else:
557 557 try:
558 558 if self._link(filename):
559 559 os.unlink(self.wjoin(filename))
560 560 except OSError:
561 561 pass
562 562 self.wopener(filename, 'w').write(data)
563 563 util.set_exec(self.wjoin(filename), "x" in flags)
564 564
565 565 def wwritedata(self, filename, data):
566 566 return self._filter("decode", filename, data)
567 567
568 568 def transaction(self):
569 569 tr = self.transhandle
570 570 if tr != None and tr.running():
571 571 return tr.nest()
572 572
573 573 # save dirstate for rollback
574 574 try:
575 575 ds = self.opener("dirstate").read()
576 576 except IOError:
577 577 ds = ""
578 578 self.opener("journal.dirstate", "w").write(ds)
579 579
580 580 renames = [(self.sjoin("journal"), self.sjoin("undo")),
581 581 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
582 582 tr = transaction.transaction(self.ui.warn, self.sopener,
583 583 self.sjoin("journal"),
584 584 aftertrans(renames))
585 585 self.transhandle = tr
586 586 return tr
587 587
588 588 def recover(self):
589 589 l = self.lock()
590 590 if os.path.exists(self.sjoin("journal")):
591 591 self.ui.status(_("rolling back interrupted transaction\n"))
592 592 transaction.rollback(self.sopener, self.sjoin("journal"))
593 593 self.reload()
594 594 return True
595 595 else:
596 596 self.ui.warn(_("no interrupted transaction available\n"))
597 597 return False
598 598
599 599 def rollback(self, wlock=None):
600 600 if not wlock:
601 601 wlock = self.wlock()
602 602 l = self.lock()
603 603 if os.path.exists(self.sjoin("undo")):
604 604 self.ui.status(_("rolling back last transaction\n"))
605 605 transaction.rollback(self.sopener, self.sjoin("undo"))
606 606 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
607 607 self.reload()
608 608 self.wreload()
609 609 else:
610 610 self.ui.warn(_("no rollback information available\n"))
611 611
612 612 def wreload(self):
613 613 self.dirstate.read()
614 614
615 615 def reload(self):
616 616 self.changelog.load()
617 617 self.manifest.load()
618 618 self.tagscache = None
619 619 self.nodetagscache = None
620 620
621 621 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
622 622 desc=None):
623 623 try:
624 624 l = lock.lock(lockname, 0, releasefn, desc=desc)
625 625 except lock.LockHeld, inst:
626 626 if not wait:
627 627 raise
628 628 self.ui.warn(_("waiting for lock on %s held by %r\n") %
629 629 (desc, inst.locker))
630 630 # default to 600 seconds timeout
631 631 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
632 632 releasefn, desc=desc)
633 633 if acquirefn:
634 634 acquirefn()
635 635 return l
636 636
637 637 def lock(self, wait=1):
638 638 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
639 639 desc=_('repository %s') % self.origroot)
640 640
641 641 def wlock(self, wait=1):
642 642 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
643 643 self.wreload,
644 644 desc=_('working directory of %s') % self.origroot)
645 645
646 646 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
647 647 """
648 648 commit an individual file as part of a larger transaction
649 649 """
650 650
651 651 t = self.wread(fn)
652 652 fl = self.file(fn)
653 653 fp1 = manifest1.get(fn, nullid)
654 654 fp2 = manifest2.get(fn, nullid)
655 655
656 656 meta = {}
657 657 cp = self.dirstate.copied(fn)
658 658 if cp:
659 659 # Mark the new revision of this file as a copy of another
660 660 # file. This copy data will effectively act as a parent
661 661 # of this new revision. If this is a merge, the first
662 662 # parent will be the nullid (meaning "look up the copy data")
663 663 # and the second one will be the other parent. For example:
664 664 #
665 665 # 0 --- 1 --- 3 rev1 changes file foo
666 666 # \ / rev2 renames foo to bar and changes it
667 667 # \- 2 -/ rev3 should have bar with all changes and
668 668 # should record that bar descends from
669 669 # bar in rev2 and foo in rev1
670 670 #
671 671 # this allows this merge to succeed:
672 672 #
673 673 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
674 674 # \ / merging rev3 and rev4 should use bar@rev2
675 675 # \- 2 --- 4 as the merge base
676 676 #
677 677 meta["copy"] = cp
678 678 if not manifest2: # not a branch merge
679 679 meta["copyrev"] = hex(manifest1.get(cp, nullid))
680 680 fp2 = nullid
681 681 elif fp2 != nullid: # copied on remote side
682 682 meta["copyrev"] = hex(manifest1.get(cp, nullid))
683 683 elif fp1 != nullid: # copied on local side, reversed
684 684 meta["copyrev"] = hex(manifest2.get(cp))
685 685 fp2 = fp1
686 686 else: # directory rename
687 687 meta["copyrev"] = hex(manifest1.get(cp, nullid))
688 688 self.ui.debug(_(" %s: copy %s:%s\n") %
689 689 (fn, cp, meta["copyrev"]))
690 690 fp1 = nullid
691 691 elif fp2 != nullid:
692 692 # is one parent an ancestor of the other?
693 693 fpa = fl.ancestor(fp1, fp2)
694 694 if fpa == fp1:
695 695 fp1, fp2 = fp2, nullid
696 696 elif fpa == fp2:
697 697 fp2 = nullid
698 698
699 699 # is the file unmodified from the parent? report existing entry
700 700 if fp2 == nullid and not fl.cmp(fp1, t):
701 701 return fp1
702 702
703 703 changelist.append(fn)
704 704 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
705 705
706 706 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
707 707 if p1 is None:
708 708 p1, p2 = self.dirstate.parents()
709 709 return self.commit(files=files, text=text, user=user, date=date,
710 710 p1=p1, p2=p2, wlock=wlock, extra=extra)
711 711
712 712 def commit(self, files=None, text="", user=None, date=None,
713 713 match=util.always, force=False, lock=None, wlock=None,
714 714 force_editor=False, p1=None, p2=None, extra={}):
715 715
716 716 commit = []
717 717 remove = []
718 718 changed = []
719 719 use_dirstate = (p1 is None) # not rawcommit
720 720 extra = extra.copy()
721 721
722 722 if use_dirstate:
723 723 if files:
724 724 for f in files:
725 725 s = self.dirstate.state(f)
726 726 if s in 'nmai':
727 727 commit.append(f)
728 728 elif s == 'r':
729 729 remove.append(f)
730 730 else:
731 731 self.ui.warn(_("%s not tracked!\n") % f)
732 732 else:
733 733 changes = self.status(match=match)[:5]
734 734 modified, added, removed, deleted, unknown = changes
735 735 commit = modified + added
736 736 remove = removed
737 737 else:
738 738 commit = files
739 739
740 740 if use_dirstate:
741 741 p1, p2 = self.dirstate.parents()
742 742 update_dirstate = True
743 743 else:
744 744 p1, p2 = p1, p2 or nullid
745 745 update_dirstate = (self.dirstate.parents()[0] == p1)
746 746
747 747 c1 = self.changelog.read(p1)
748 748 c2 = self.changelog.read(p2)
749 749 m1 = self.manifest.read(c1[0]).copy()
750 750 m2 = self.manifest.read(c2[0])
751 751
752 752 if use_dirstate:
753 753 branchname = self.workingctx().branch()
754 754 try:
755 755 branchname = branchname.decode('UTF-8').encode('UTF-8')
756 756 except UnicodeDecodeError:
757 757 raise util.Abort(_('branch name not in UTF-8!'))
758 758 else:
759 759 branchname = ""
760 760
761 761 if use_dirstate:
762 762 oldname = c1[5].get("branch", "") # stored in UTF-8
763 763 if not commit and not remove and not force and p2 == nullid and \
764 764 branchname == oldname:
765 765 self.ui.status(_("nothing changed\n"))
766 766 return None
767 767
768 768 xp1 = hex(p1)
769 769 if p2 == nullid: xp2 = ''
770 770 else: xp2 = hex(p2)
771 771
772 772 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
773 773
774 774 if not wlock:
775 775 wlock = self.wlock()
776 776 if not lock:
777 777 lock = self.lock()
778 778 tr = self.transaction()
779 779
780 780 # check in files
781 781 new = {}
782 782 linkrev = self.changelog.count()
783 783 commit.sort()
784 784 is_exec = util.execfunc(self.root, m1.execf)
785 785 is_link = util.linkfunc(self.root, m1.linkf)
786 786 for f in commit:
787 787 self.ui.note(f + "\n")
788 788 try:
789 789 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
790 790 m1.set(f, is_exec(f), is_link(f))
791 791 except (OSError, IOError):
792 792 if use_dirstate:
793 793 self.ui.warn(_("trouble committing %s!\n") % f)
794 794 raise
795 795 else:
796 796 remove.append(f)
797 797
798 798 # update manifest
799 799 m1.update(new)
800 800 remove.sort()
801 801 removed = []
802 802
803 803 for f in remove:
804 804 if f in m1:
805 805 del m1[f]
806 806 removed.append(f)
807 807 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
808 808
809 809 # add changeset
810 810 new = new.keys()
811 811 new.sort()
812 812
813 813 user = user or self.ui.username()
814 814 if not text or force_editor:
815 815 edittext = []
816 816 if text:
817 817 edittext.append(text)
818 818 edittext.append("")
819 819 edittext.append("HG: user: %s" % user)
820 820 if p2 != nullid:
821 821 edittext.append("HG: branch merge")
822 822 if branchname:
823 823 edittext.append("HG: branch %s" % util.tolocal(branchname))
824 824 edittext.extend(["HG: changed %s" % f for f in changed])
825 825 edittext.extend(["HG: removed %s" % f for f in removed])
826 826 if not changed and not remove:
827 827 edittext.append("HG: no files changed")
828 828 edittext.append("")
829 829 # run editor in the repository root
830 830 olddir = os.getcwd()
831 831 os.chdir(self.root)
832 832 text = self.ui.edit("\n".join(edittext), user)
833 833 os.chdir(olddir)
834 834
835 835 lines = [line.rstrip() for line in text.rstrip().splitlines()]
836 836 while lines and not lines[0]:
837 837 del lines[0]
838 838 if not lines:
839 839 return None
840 840 text = '\n'.join(lines)
841 841 if branchname:
842 842 extra["branch"] = branchname
843 843 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
844 844 user, date, extra)
845 845 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
846 846 parent2=xp2)
847 847 tr.close()
848 848
849 849 if self.branchcache and "branch" in extra:
850 850 self.branchcache[util.tolocal(extra["branch"])] = n
851 851
852 852 if use_dirstate or update_dirstate:
853 853 self.dirstate.setparents(n)
854 854 if use_dirstate:
855 855 self.dirstate.update(new, "n")
856 856 self.dirstate.forget(removed)
857 857
858 858 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
859 859 return n
860 860
861 861 def walk(self, node=None, files=[], match=util.always, badmatch=None):
862 862 '''
863 863 walk recursively through the directory tree or a given
864 864 changeset, finding all files matched by the match
865 865 function
866 866
867 867 results are yielded in a tuple (src, filename), where src
868 868 is one of:
869 869 'f' the file was found in the directory tree
870 870 'm' the file was only in the dirstate and not in the tree
871 871 'b' file was not found and matched badmatch
872 872 '''
873 873
874 874 if node:
875 875 fdict = dict.fromkeys(files)
876 876 for fn in self.manifest.read(self.changelog.read(node)[0]):
877 877 for ffn in fdict:
878 878 # match if the file is the exact name or a directory
879 879 if ffn == fn or fn.startswith("%s/" % ffn):
880 880 del fdict[ffn]
881 881 break
882 882 if match(fn):
883 883 yield 'm', fn
884 884 for fn in fdict:
885 885 if badmatch and badmatch(fn):
886 886 if match(fn):
887 887 yield 'b', fn
888 888 else:
889 889 self.ui.warn(_('%s: No such file in rev %s\n') % (
890 890 util.pathto(self.getcwd(), fn), short(node)))
891 891 else:
892 892 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
893 893 yield src, fn
894 894
895 895 def status(self, node1=None, node2=None, files=[], match=util.always,
896 896 wlock=None, list_ignored=False, list_clean=False):
897 897 """return status of files between two nodes or node and working directory
898 898
899 899 If node1 is None, use the first dirstate parent instead.
900 900 If node2 is None, compare node1 with working directory.
901 901 """
902 902
903 903 def fcmp(fn, getnode):
904 904 t1 = self.wread(fn)
905 905 return self.file(fn).cmp(getnode(fn), t1)
906 906
907 907 def mfmatches(node):
908 908 change = self.changelog.read(node)
909 909 mf = self.manifest.read(change[0]).copy()
910 910 for fn in mf.keys():
911 911 if not match(fn):
912 912 del mf[fn]
913 913 return mf
914 914
915 915 modified, added, removed, deleted, unknown = [], [], [], [], []
916 916 ignored, clean = [], []
917 917
918 918 compareworking = False
919 919 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
920 920 compareworking = True
921 921
922 922 if not compareworking:
923 923 # read the manifest from node1 before the manifest from node2,
924 924 # so that we'll hit the manifest cache if we're going through
925 925 # all the revisions in parent->child order.
926 926 mf1 = mfmatches(node1)
927 927
928 928 # are we comparing the working directory?
929 929 if not node2:
930 930 if not wlock:
931 931 try:
932 932 wlock = self.wlock(wait=0)
933 933 except lock.LockException:
934 934 wlock = None
935 935 (lookup, modified, added, removed, deleted, unknown,
936 936 ignored, clean) = self.dirstate.status(files, match,
937 937 list_ignored, list_clean)
938 938
939 939 # are we comparing working dir against its parent?
940 940 if compareworking:
941 941 if lookup:
942 942 # do a full compare of any files that might have changed
943 943 mnode = self.changelog.read(self.dirstate.parents()[0])[0]
944 944 getnode = lambda fn: (self.manifest.find(mnode, fn)[0] or
945 945 nullid)
946 946 for f in lookup:
947 947 if fcmp(f, getnode):
948 948 modified.append(f)
949 949 else:
950 950 clean.append(f)
951 951 if wlock is not None:
952 952 self.dirstate.update([f], "n")
953 953 else:
954 954 # we are comparing working dir against non-parent
955 955 # generate a pseudo-manifest for the working dir
956 956 # XXX: create it in dirstate.py ?
957 957 mf2 = mfmatches(self.dirstate.parents()[0])
958 958 is_exec = util.execfunc(self.root, mf2.execf)
959 959 is_link = util.linkfunc(self.root, mf2.linkf)
960 960 for f in lookup + modified + added:
961 961 mf2[f] = ""
962 962 mf2.set(f, is_exec(f), is_link(f))
963 963 for f in removed:
964 964 if f in mf2:
965 965 del mf2[f]
966 966 else:
967 967 # we are comparing two revisions
968 968 mf2 = mfmatches(node2)
969 969
970 970 if not compareworking:
971 971 # flush lists from dirstate before comparing manifests
972 972 modified, added, clean = [], [], []
973 973
974 974 # make sure to sort the files so we talk to the disk in a
975 975 # reasonable order
976 976 mf2keys = mf2.keys()
977 977 mf2keys.sort()
978 978 getnode = lambda fn: mf1.get(fn, nullid)
979 979 for fn in mf2keys:
980 980 if mf1.has_key(fn):
981 981 if mf1.flags(fn) != mf2.flags(fn) or \
982 982 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or
983 983 fcmp(fn, getnode))):
984 984 modified.append(fn)
985 985 elif list_clean:
986 986 clean.append(fn)
987 987 del mf1[fn]
988 988 else:
989 989 added.append(fn)
990 990
991 991 removed = mf1.keys()
992 992
993 993 # sort and return results:
994 994 for l in modified, added, removed, deleted, unknown, ignored, clean:
995 995 l.sort()
996 996 return (modified, added, removed, deleted, unknown, ignored, clean)
997 997
998 998 def add(self, list, wlock=None):
999 999 if not wlock:
1000 1000 wlock = self.wlock()
1001 1001 for f in list:
1002 1002 p = self.wjoin(f)
1003 1003 islink = os.path.islink(p)
1004 1004 if not islink and not os.path.exists(p):
1005 1005 self.ui.warn(_("%s does not exist!\n") % f)
1006 1006 elif not islink and not os.path.isfile(p):
1007 1007 self.ui.warn(_("%s not added: only files and symlinks "
1008 1008 "supported currently\n") % f)
1009 1009 elif self.dirstate.state(f) in 'an':
1010 1010 self.ui.warn(_("%s already tracked!\n") % f)
1011 1011 else:
1012 1012 self.dirstate.update([f], "a")
1013 1013
1014 1014 def forget(self, list, wlock=None):
1015 1015 if not wlock:
1016 1016 wlock = self.wlock()
1017 1017 for f in list:
1018 1018 if self.dirstate.state(f) not in 'ai':
1019 1019 self.ui.warn(_("%s not added!\n") % f)
1020 1020 else:
1021 1021 self.dirstate.forget([f])
1022 1022
1023 1023 def remove(self, list, unlink=False, wlock=None):
1024 1024 if unlink:
1025 1025 for f in list:
1026 1026 try:
1027 1027 util.unlink(self.wjoin(f))
1028 1028 except OSError, inst:
1029 1029 if inst.errno != errno.ENOENT:
1030 1030 raise
1031 1031 if not wlock:
1032 1032 wlock = self.wlock()
1033 1033 for f in list:
1034 1034 p = self.wjoin(f)
1035 1035 if os.path.exists(p):
1036 1036 self.ui.warn(_("%s still exists!\n") % f)
1037 1037 elif self.dirstate.state(f) == 'a':
1038 1038 self.dirstate.forget([f])
1039 1039 elif f not in self.dirstate:
1040 1040 self.ui.warn(_("%s not tracked!\n") % f)
1041 1041 else:
1042 1042 self.dirstate.update([f], "r")
1043 1043
1044 1044 def undelete(self, list, wlock=None):
1045 1045 p = self.dirstate.parents()[0]
1046 1046 mn = self.changelog.read(p)[0]
1047 1047 m = self.manifest.read(mn)
1048 1048 if not wlock:
1049 1049 wlock = self.wlock()
1050 1050 for f in list:
1051 1051 if self.dirstate.state(f) not in "r":
1052 1052 self.ui.warn("%s not removed!\n" % f)
1053 1053 else:
1054 1054 t = self.file(f).read(m[f])
1055 1055 self.wwrite(f, t, m.flags(f))
1056 1056 self.dirstate.update([f], "n")
1057 1057
1058 1058 def copy(self, source, dest, wlock=None):
1059 1059 p = self.wjoin(dest)
1060 1060 if not os.path.exists(p):
1061 1061 self.ui.warn(_("%s does not exist!\n") % dest)
1062 1062 elif not os.path.isfile(p):
1063 1063 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
1064 1064 else:
1065 1065 if not wlock:
1066 1066 wlock = self.wlock()
1067 1067 if self.dirstate.state(dest) == '?':
1068 1068 self.dirstate.update([dest], "a")
1069 1069 self.dirstate.copy(source, dest)
1070 1070
1071 1071 def heads(self, start=None):
1072 1072 heads = self.changelog.heads(start)
1073 1073 # sort the output in rev descending order
1074 1074 heads = [(-self.changelog.rev(h), h) for h in heads]
1075 1075 heads.sort()
1076 1076 return [n for (r, n) in heads]
1077 1077
1078 1078 def branches(self, nodes):
1079 1079 if not nodes:
1080 1080 nodes = [self.changelog.tip()]
1081 1081 b = []
1082 1082 for n in nodes:
1083 1083 t = n
1084 1084 while 1:
1085 1085 p = self.changelog.parents(n)
1086 1086 if p[1] != nullid or p[0] == nullid:
1087 1087 b.append((t, n, p[0], p[1]))
1088 1088 break
1089 1089 n = p[0]
1090 1090 return b
1091 1091
1092 1092 def between(self, pairs):
1093 1093 r = []
1094 1094
1095 1095 for top, bottom in pairs:
1096 1096 n, l, i = top, [], 0
1097 1097 f = 1
1098 1098
1099 1099 while n != bottom:
1100 1100 p = self.changelog.parents(n)[0]
1101 1101 if i == f:
1102 1102 l.append(n)
1103 1103 f = f * 2
1104 1104 n = p
1105 1105 i += 1
1106 1106
1107 1107 r.append(l)
1108 1108
1109 1109 return r
1110 1110
1111 1111 def findincoming(self, remote, base=None, heads=None, force=False):
1112 1112 """Return list of roots of the subsets of missing nodes from remote
1113 1113
1114 1114 If base dict is specified, assume that these nodes and their parents
1115 1115 exist on the remote side and that no child of a node of base exists
1116 1116 in both remote and self.
1117 1117 Furthermore base will be updated to include the nodes that exists
1118 1118 in self and remote but no children exists in self and remote.
1119 1119 If a list of heads is specified, return only nodes which are heads
1120 1120 or ancestors of these heads.
1121 1121
1122 1122 All the ancestors of base are in self and in remote.
1123 1123 All the descendants of the list returned are missing in self.
1124 1124 (and so we know that the rest of the nodes are missing in remote, see
1125 1125 outgoing)
1126 1126 """
1127 1127 m = self.changelog.nodemap
1128 1128 search = []
1129 1129 fetch = {}
1130 1130 seen = {}
1131 1131 seenbranch = {}
1132 1132 if base == None:
1133 1133 base = {}
1134 1134
1135 1135 if not heads:
1136 1136 heads = remote.heads()
1137 1137
1138 1138 if self.changelog.tip() == nullid:
1139 1139 base[nullid] = 1
1140 1140 if heads != [nullid]:
1141 1141 return [nullid]
1142 1142 return []
1143 1143
1144 1144 # assume we're closer to the tip than the root
1145 1145 # and start by examining the heads
1146 1146 self.ui.status(_("searching for changes\n"))
1147 1147
1148 1148 unknown = []
1149 1149 for h in heads:
1150 1150 if h not in m:
1151 1151 unknown.append(h)
1152 1152 else:
1153 1153 base[h] = 1
1154 1154
1155 1155 if not unknown:
1156 1156 return []
1157 1157
1158 1158 req = dict.fromkeys(unknown)
1159 1159 reqcnt = 0
1160 1160
1161 1161 # search through remote branches
1162 1162 # a 'branch' here is a linear segment of history, with four parts:
1163 1163 # head, root, first parent, second parent
1164 1164 # (a branch always has two parents (or none) by definition)
1165 1165 unknown = remote.branches(unknown)
1166 1166 while unknown:
1167 1167 r = []
1168 1168 while unknown:
1169 1169 n = unknown.pop(0)
1170 1170 if n[0] in seen:
1171 1171 continue
1172 1172
1173 1173 self.ui.debug(_("examining %s:%s\n")
1174 1174 % (short(n[0]), short(n[1])))
1175 1175 if n[0] == nullid: # found the end of the branch
1176 1176 pass
1177 1177 elif n in seenbranch:
1178 1178 self.ui.debug(_("branch already found\n"))
1179 1179 continue
1180 1180 elif n[1] and n[1] in m: # do we know the base?
1181 1181 self.ui.debug(_("found incomplete branch %s:%s\n")
1182 1182 % (short(n[0]), short(n[1])))
1183 1183 search.append(n) # schedule branch range for scanning
1184 1184 seenbranch[n] = 1
1185 1185 else:
1186 1186 if n[1] not in seen and n[1] not in fetch:
1187 1187 if n[2] in m and n[3] in m:
1188 1188 self.ui.debug(_("found new changeset %s\n") %
1189 1189 short(n[1]))
1190 1190 fetch[n[1]] = 1 # earliest unknown
1191 1191 for p in n[2:4]:
1192 1192 if p in m:
1193 1193 base[p] = 1 # latest known
1194 1194
1195 1195 for p in n[2:4]:
1196 1196 if p not in req and p not in m:
1197 1197 r.append(p)
1198 1198 req[p] = 1
1199 1199 seen[n[0]] = 1
1200 1200
1201 1201 if r:
1202 1202 reqcnt += 1
1203 1203 self.ui.debug(_("request %d: %s\n") %
1204 1204 (reqcnt, " ".join(map(short, r))))
1205 1205 for p in xrange(0, len(r), 10):
1206 1206 for b in remote.branches(r[p:p+10]):
1207 1207 self.ui.debug(_("received %s:%s\n") %
1208 1208 (short(b[0]), short(b[1])))
1209 1209 unknown.append(b)
1210 1210
1211 1211 # do binary search on the branches we found
1212 1212 while search:
1213 1213 n = search.pop(0)
1214 1214 reqcnt += 1
1215 1215 l = remote.between([(n[0], n[1])])[0]
1216 1216 l.append(n[1])
1217 1217 p = n[0]
1218 1218 f = 1
1219 1219 for i in l:
1220 1220 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1221 1221 if i in m:
1222 1222 if f <= 2:
1223 1223 self.ui.debug(_("found new branch changeset %s\n") %
1224 1224 short(p))
1225 1225 fetch[p] = 1
1226 1226 base[i] = 1
1227 1227 else:
1228 1228 self.ui.debug(_("narrowed branch search to %s:%s\n")
1229 1229 % (short(p), short(i)))
1230 1230 search.append((p, i))
1231 1231 break
1232 1232 p, f = i, f * 2
1233 1233
1234 1234 # sanity check our fetch list
1235 1235 for f in fetch.keys():
1236 1236 if f in m:
1237 1237 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1238 1238
1239 1239 if base.keys() == [nullid]:
1240 1240 if force:
1241 1241 self.ui.warn(_("warning: repository is unrelated\n"))
1242 1242 else:
1243 1243 raise util.Abort(_("repository is unrelated"))
1244 1244
1245 1245 self.ui.debug(_("found new changesets starting at ") +
1246 1246 " ".join([short(f) for f in fetch]) + "\n")
1247 1247
1248 1248 self.ui.debug(_("%d total queries\n") % reqcnt)
1249 1249
1250 1250 return fetch.keys()
1251 1251
1252 1252 def findoutgoing(self, remote, base=None, heads=None, force=False):
1253 1253 """Return list of nodes that are roots of subsets not in remote
1254 1254
1255 1255 If base dict is specified, assume that these nodes and their parents
1256 1256 exist on the remote side.
1257 1257 If a list of heads is specified, return only nodes which are heads
1258 1258 or ancestors of these heads, and return a second element which
1259 1259 contains all remote heads which get new children.
1260 1260 """
1261 1261 if base == None:
1262 1262 base = {}
1263 1263 self.findincoming(remote, base, heads, force=force)
1264 1264
1265 1265 self.ui.debug(_("common changesets up to ")
1266 1266 + " ".join(map(short, base.keys())) + "\n")
1267 1267
1268 1268 remain = dict.fromkeys(self.changelog.nodemap)
1269 1269
1270 1270 # prune everything remote has from the tree
1271 1271 del remain[nullid]
1272 1272 remove = base.keys()
1273 1273 while remove:
1274 1274 n = remove.pop(0)
1275 1275 if n in remain:
1276 1276 del remain[n]
1277 1277 for p in self.changelog.parents(n):
1278 1278 remove.append(p)
1279 1279
1280 1280 # find every node whose parents have been pruned
1281 1281 subset = []
1282 1282 # find every remote head that will get new children
1283 1283 updated_heads = {}
1284 1284 for n in remain:
1285 1285 p1, p2 = self.changelog.parents(n)
1286 1286 if p1 not in remain and p2 not in remain:
1287 1287 subset.append(n)
1288 1288 if heads:
1289 1289 if p1 in heads:
1290 1290 updated_heads[p1] = True
1291 1291 if p2 in heads:
1292 1292 updated_heads[p2] = True
1293 1293
1294 1294 # this is the set of all roots we have to push
1295 1295 if heads:
1296 1296 return subset, updated_heads.keys()
1297 1297 else:
1298 1298 return subset
1299 1299
1300 1300 def pull(self, remote, heads=None, force=False, lock=None):
1301 1301 mylock = False
1302 1302 if not lock:
1303 1303 lock = self.lock()
1304 1304 mylock = True
1305 1305
1306 1306 try:
1307 1307 fetch = self.findincoming(remote, force=force)
1308 1308 if fetch == [nullid]:
1309 1309 self.ui.status(_("requesting all changes\n"))
1310 1310
1311 1311 if not fetch:
1312 1312 self.ui.status(_("no changes found\n"))
1313 1313 return 0
1314 1314
1315 1315 if heads is None:
1316 1316 cg = remote.changegroup(fetch, 'pull')
1317 1317 else:
1318 1318 if 'changegroupsubset' not in remote.capabilities:
1319 1319 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1320 1320 cg = remote.changegroupsubset(fetch, heads, 'pull')
1321 1321 return self.addchangegroup(cg, 'pull', remote.url())
1322 1322 finally:
1323 1323 if mylock:
1324 1324 lock.release()
1325 1325
1326 1326 def push(self, remote, force=False, revs=None):
1327 1327 # there are two ways to push to remote repo:
1328 1328 #
1329 1329 # addchangegroup assumes local user can lock remote
1330 1330 # repo (local filesystem, old ssh servers).
1331 1331 #
1332 1332 # unbundle assumes local user cannot lock remote repo (new ssh
1333 1333 # servers, http servers).
1334 1334
1335 1335 if remote.capable('unbundle'):
1336 1336 return self.push_unbundle(remote, force, revs)
1337 1337 return self.push_addchangegroup(remote, force, revs)
1338 1338
1339 1339 def prepush(self, remote, force, revs):
1340 1340 base = {}
1341 1341 remote_heads = remote.heads()
1342 1342 inc = self.findincoming(remote, base, remote_heads, force=force)
1343 1343
1344 1344 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1345 1345 if revs is not None:
1346 1346 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1347 1347 else:
1348 1348 bases, heads = update, self.changelog.heads()
1349 1349
1350 1350 if not bases:
1351 1351 self.ui.status(_("no changes found\n"))
1352 1352 return None, 1
1353 1353 elif not force:
1354 1354 # check if we're creating new remote heads
1355 1355 # to be a remote head after push, node must be either
1356 1356 # - unknown locally
1357 1357 # - a local outgoing head descended from update
1358 1358 # - a remote head that's known locally and not
1359 1359 # ancestral to an outgoing head
1360 1360
1361 1361 warn = 0
1362 1362
1363 1363 if remote_heads == [nullid]:
1364 1364 warn = 0
1365 1365 elif not revs and len(heads) > len(remote_heads):
1366 1366 warn = 1
1367 1367 else:
1368 1368 newheads = list(heads)
1369 1369 for r in remote_heads:
1370 1370 if r in self.changelog.nodemap:
1371 1371 desc = self.changelog.heads(r, heads)
1372 1372 l = [h for h in heads if h in desc]
1373 1373 if not l:
1374 1374 newheads.append(r)
1375 1375 else:
1376 1376 newheads.append(r)
1377 1377 if len(newheads) > len(remote_heads):
1378 1378 warn = 1
1379 1379
1380 1380 if warn:
1381 1381 self.ui.warn(_("abort: push creates new remote branches!\n"))
1382 1382 self.ui.status(_("(did you forget to merge?"
1383 1383 " use push -f to force)\n"))
1384 1384 return None, 1
1385 1385 elif inc:
1386 1386 self.ui.warn(_("note: unsynced remote changes!\n"))
1387 1387
1388 1388
1389 1389 if revs is None:
1390 1390 cg = self.changegroup(update, 'push')
1391 1391 else:
1392 1392 cg = self.changegroupsubset(update, revs, 'push')
1393 1393 return cg, remote_heads
1394 1394
1395 1395 def push_addchangegroup(self, remote, force, revs):
1396 1396 lock = remote.lock()
1397 1397
1398 1398 ret = self.prepush(remote, force, revs)
1399 1399 if ret[0] is not None:
1400 1400 cg, remote_heads = ret
1401 1401 return remote.addchangegroup(cg, 'push', self.url())
1402 1402 return ret[1]
1403 1403
1404 1404 def push_unbundle(self, remote, force, revs):
1405 1405 # local repo finds heads on server, finds out what revs it
1406 1406 # must push. once revs transferred, if server finds it has
1407 1407 # different heads (someone else won commit/push race), server
1408 1408 # aborts.
1409 1409
1410 1410 ret = self.prepush(remote, force, revs)
1411 1411 if ret[0] is not None:
1412 1412 cg, remote_heads = ret
1413 1413 if force: remote_heads = ['force']
1414 1414 return remote.unbundle(cg, remote_heads, 'push')
1415 1415 return ret[1]
1416 1416
1417 1417 def changegroupinfo(self, nodes):
1418 1418 self.ui.note(_("%d changesets found\n") % len(nodes))
1419 1419 if self.ui.debugflag:
1420 1420 self.ui.debug(_("List of changesets:\n"))
1421 1421 for node in nodes:
1422 1422 self.ui.debug("%s\n" % hex(node))
1423 1423
1424 1424 def changegroupsubset(self, bases, heads, source):
1425 1425 """This function generates a changegroup consisting of all the nodes
1426 1426 that are descendents of any of the bases, and ancestors of any of
1427 1427 the heads.
1428 1428
1429 1429 It is fairly complex as determining which filenodes and which
1430 1430 manifest nodes need to be included for the changeset to be complete
1431 1431 is non-trivial.
1432 1432
1433 1433 Another wrinkle is doing the reverse, figuring out which changeset in
1434 1434 the changegroup a particular filenode or manifestnode belongs to."""
1435 1435
1436 1436 self.hook('preoutgoing', throw=True, source=source)
1437 1437
1438 1438 # Set up some initial variables
1439 1439 # Make it easy to refer to self.changelog
1440 1440 cl = self.changelog
1441 1441 # msng is short for missing - compute the list of changesets in this
1442 1442 # changegroup.
1443 1443 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1444 1444 self.changegroupinfo(msng_cl_lst)
1445 1445 # Some bases may turn out to be superfluous, and some heads may be
1446 1446 # too. nodesbetween will return the minimal set of bases and heads
1447 1447 # necessary to re-create the changegroup.
1448 1448
1449 1449 # Known heads are the list of heads that it is assumed the recipient
1450 1450 # of this changegroup will know about.
1451 1451 knownheads = {}
1452 1452 # We assume that all parents of bases are known heads.
1453 1453 for n in bases:
1454 1454 for p in cl.parents(n):
1455 1455 if p != nullid:
1456 1456 knownheads[p] = 1
1457 1457 knownheads = knownheads.keys()
1458 1458 if knownheads:
1459 1459 # Now that we know what heads are known, we can compute which
1460 1460 # changesets are known. The recipient must know about all
1461 1461 # changesets required to reach the known heads from the null
1462 1462 # changeset.
1463 1463 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1464 1464 junk = None
1465 1465 # Transform the list into an ersatz set.
1466 1466 has_cl_set = dict.fromkeys(has_cl_set)
1467 1467 else:
1468 1468 # If there were no known heads, the recipient cannot be assumed to
1469 1469 # know about any changesets.
1470 1470 has_cl_set = {}
1471 1471
1472 1472 # Make it easy to refer to self.manifest
1473 1473 mnfst = self.manifest
1474 1474 # We don't know which manifests are missing yet
1475 1475 msng_mnfst_set = {}
1476 1476 # Nor do we know which filenodes are missing.
1477 1477 msng_filenode_set = {}
1478 1478
1479 1479 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1480 1480 junk = None
1481 1481
1482 1482 # A changeset always belongs to itself, so the changenode lookup
1483 1483 # function for a changenode is identity.
1484 1484 def identity(x):
1485 1485 return x
1486 1486
1487 1487 # A function generating function. Sets up an environment for the
1488 1488 # inner function.
1489 1489 def cmp_by_rev_func(revlog):
1490 1490 # Compare two nodes by their revision number in the environment's
1491 1491 # revision history. Since the revision number both represents the
1492 1492 # most efficient order to read the nodes in, and represents a
1493 1493 # topological sorting of the nodes, this function is often useful.
1494 1494 def cmp_by_rev(a, b):
1495 1495 return cmp(revlog.rev(a), revlog.rev(b))
1496 1496 return cmp_by_rev
1497 1497
1498 1498 # If we determine that a particular file or manifest node must be a
1499 1499 # node that the recipient of the changegroup will already have, we can
1500 1500 # also assume the recipient will have all the parents. This function
1501 1501 # prunes them from the set of missing nodes.
1502 1502 def prune_parents(revlog, hasset, msngset):
1503 1503 haslst = hasset.keys()
1504 1504 haslst.sort(cmp_by_rev_func(revlog))
1505 1505 for node in haslst:
1506 1506 parentlst = [p for p in revlog.parents(node) if p != nullid]
1507 1507 while parentlst:
1508 1508 n = parentlst.pop()
1509 1509 if n not in hasset:
1510 1510 hasset[n] = 1
1511 1511 p = [p for p in revlog.parents(n) if p != nullid]
1512 1512 parentlst.extend(p)
1513 1513 for n in hasset:
1514 1514 msngset.pop(n, None)
1515 1515
1516 1516 # This is a function generating function used to set up an environment
1517 1517 # for the inner function to execute in.
1518 1518 def manifest_and_file_collector(changedfileset):
1519 1519 # This is an information gathering function that gathers
1520 1520 # information from each changeset node that goes out as part of
1521 1521 # the changegroup. The information gathered is a list of which
1522 1522 # manifest nodes are potentially required (the recipient may
1523 1523 # already have them) and total list of all files which were
1524 1524 # changed in any changeset in the changegroup.
1525 1525 #
1526 1526 # We also remember the first changenode we saw any manifest
1527 1527 # referenced by so we can later determine which changenode 'owns'
1528 1528 # the manifest.
1529 1529 def collect_manifests_and_files(clnode):
1530 1530 c = cl.read(clnode)
1531 1531 for f in c[3]:
1532 1532 # This is to make sure we only have one instance of each
1533 1533 # filename string for each filename.
1534 1534 changedfileset.setdefault(f, f)
1535 1535 msng_mnfst_set.setdefault(c[0], clnode)
1536 1536 return collect_manifests_and_files
1537 1537
1538 1538 # Figure out which manifest nodes (of the ones we think might be part
1539 1539 # of the changegroup) the recipient must know about and remove them
1540 1540 # from the changegroup.
1541 1541 def prune_manifests():
1542 1542 has_mnfst_set = {}
1543 1543 for n in msng_mnfst_set:
1544 1544 # If a 'missing' manifest thinks it belongs to a changenode
1545 1545 # the recipient is assumed to have, obviously the recipient
1546 1546 # must have that manifest.
1547 1547 linknode = cl.node(mnfst.linkrev(n))
1548 1548 if linknode in has_cl_set:
1549 1549 has_mnfst_set[n] = 1
1550 1550 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1551 1551
1552 1552 # Use the information collected in collect_manifests_and_files to say
1553 1553 # which changenode any manifestnode belongs to.
1554 1554 def lookup_manifest_link(mnfstnode):
1555 1555 return msng_mnfst_set[mnfstnode]
1556 1556
1557 1557 # A function generating function that sets up the initial environment
1558 1558 # the inner function.
1559 1559 def filenode_collector(changedfiles):
1560 1560 next_rev = [0]
1561 1561 # This gathers information from each manifestnode included in the
1562 1562 # changegroup about which filenodes the manifest node references
1563 1563 # so we can include those in the changegroup too.
1564 1564 #
1565 1565 # It also remembers which changenode each filenode belongs to. It
1566 1566 # does this by assuming the a filenode belongs to the changenode
1567 1567 # the first manifest that references it belongs to.
1568 1568 def collect_msng_filenodes(mnfstnode):
1569 1569 r = mnfst.rev(mnfstnode)
1570 1570 if r == next_rev[0]:
1571 1571 # If the last rev we looked at was the one just previous,
1572 1572 # we only need to see a diff.
1573 1573 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1574 1574 # For each line in the delta
1575 1575 for dline in delta.splitlines():
1576 1576 # get the filename and filenode for that line
1577 1577 f, fnode = dline.split('\0')
1578 1578 fnode = bin(fnode[:40])
1579 1579 f = changedfiles.get(f, None)
1580 1580 # And if the file is in the list of files we care
1581 1581 # about.
1582 1582 if f is not None:
1583 1583 # Get the changenode this manifest belongs to
1584 1584 clnode = msng_mnfst_set[mnfstnode]
1585 1585 # Create the set of filenodes for the file if
1586 1586 # there isn't one already.
1587 1587 ndset = msng_filenode_set.setdefault(f, {})
1588 1588 # And set the filenode's changelog node to the
1589 1589 # manifest's if it hasn't been set already.
1590 1590 ndset.setdefault(fnode, clnode)
1591 1591 else:
1592 1592 # Otherwise we need a full manifest.
1593 1593 m = mnfst.read(mnfstnode)
1594 1594 # For every file in we care about.
1595 1595 for f in changedfiles:
1596 1596 fnode = m.get(f, None)
1597 1597 # If it's in the manifest
1598 1598 if fnode is not None:
1599 1599 # See comments above.
1600 1600 clnode = msng_mnfst_set[mnfstnode]
1601 1601 ndset = msng_filenode_set.setdefault(f, {})
1602 1602 ndset.setdefault(fnode, clnode)
1603 1603 # Remember the revision we hope to see next.
1604 1604 next_rev[0] = r + 1
1605 1605 return collect_msng_filenodes
1606 1606
1607 1607 # We have a list of filenodes we think we need for a file, lets remove
1608 1608 # all those we now the recipient must have.
1609 1609 def prune_filenodes(f, filerevlog):
1610 1610 msngset = msng_filenode_set[f]
1611 1611 hasset = {}
1612 1612 # If a 'missing' filenode thinks it belongs to a changenode we
1613 1613 # assume the recipient must have, then the recipient must have
1614 1614 # that filenode.
1615 1615 for n in msngset:
1616 1616 clnode = cl.node(filerevlog.linkrev(n))
1617 1617 if clnode in has_cl_set:
1618 1618 hasset[n] = 1
1619 1619 prune_parents(filerevlog, hasset, msngset)
1620 1620
1621 1621 # A function generator function that sets up the a context for the
1622 1622 # inner function.
1623 1623 def lookup_filenode_link_func(fname):
1624 1624 msngset = msng_filenode_set[fname]
1625 1625 # Lookup the changenode the filenode belongs to.
1626 1626 def lookup_filenode_link(fnode):
1627 1627 return msngset[fnode]
1628 1628 return lookup_filenode_link
1629 1629
1630 1630 # Now that we have all theses utility functions to help out and
1631 1631 # logically divide up the task, generate the group.
1632 1632 def gengroup():
1633 1633 # The set of changed files starts empty.
1634 1634 changedfiles = {}
1635 1635 # Create a changenode group generator that will call our functions
1636 1636 # back to lookup the owning changenode and collect information.
1637 1637 group = cl.group(msng_cl_lst, identity,
1638 1638 manifest_and_file_collector(changedfiles))
1639 1639 for chnk in group:
1640 1640 yield chnk
1641 1641
1642 1642 # The list of manifests has been collected by the generator
1643 1643 # calling our functions back.
1644 1644 prune_manifests()
1645 1645 msng_mnfst_lst = msng_mnfst_set.keys()
1646 1646 # Sort the manifestnodes by revision number.
1647 1647 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1648 1648 # Create a generator for the manifestnodes that calls our lookup
1649 1649 # and data collection functions back.
1650 1650 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1651 1651 filenode_collector(changedfiles))
1652 1652 for chnk in group:
1653 1653 yield chnk
1654 1654
1655 1655 # These are no longer needed, dereference and toss the memory for
1656 1656 # them.
1657 1657 msng_mnfst_lst = None
1658 1658 msng_mnfst_set.clear()
1659 1659
1660 1660 changedfiles = changedfiles.keys()
1661 1661 changedfiles.sort()
1662 1662 # Go through all our files in order sorted by name.
1663 1663 for fname in changedfiles:
1664 1664 filerevlog = self.file(fname)
1665 1665 # Toss out the filenodes that the recipient isn't really
1666 1666 # missing.
1667 1667 if msng_filenode_set.has_key(fname):
1668 1668 prune_filenodes(fname, filerevlog)
1669 1669 msng_filenode_lst = msng_filenode_set[fname].keys()
1670 1670 else:
1671 1671 msng_filenode_lst = []
1672 1672 # If any filenodes are left, generate the group for them,
1673 1673 # otherwise don't bother.
1674 1674 if len(msng_filenode_lst) > 0:
1675 1675 yield changegroup.genchunk(fname)
1676 1676 # Sort the filenodes by their revision #
1677 1677 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1678 1678 # Create a group generator and only pass in a changenode
1679 1679 # lookup function as we need to collect no information
1680 1680 # from filenodes.
1681 1681 group = filerevlog.group(msng_filenode_lst,
1682 1682 lookup_filenode_link_func(fname))
1683 1683 for chnk in group:
1684 1684 yield chnk
1685 1685 if msng_filenode_set.has_key(fname):
1686 1686 # Don't need this anymore, toss it to free memory.
1687 1687 del msng_filenode_set[fname]
1688 1688 # Signal that no more groups are left.
1689 1689 yield changegroup.closechunk()
1690 1690
1691 1691 if msng_cl_lst:
1692 1692 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1693 1693
1694 1694 return util.chunkbuffer(gengroup())
1695 1695
1696 1696 def changegroup(self, basenodes, source):
1697 1697 """Generate a changegroup of all nodes that we have that a recipient
1698 1698 doesn't.
1699 1699
1700 1700 This is much easier than the previous function as we can assume that
1701 1701 the recipient has any changenode we aren't sending them."""
1702 1702
1703 1703 self.hook('preoutgoing', throw=True, source=source)
1704 1704
1705 1705 cl = self.changelog
1706 1706 nodes = cl.nodesbetween(basenodes, None)[0]
1707 1707 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1708 1708 self.changegroupinfo(nodes)
1709 1709
1710 1710 def identity(x):
1711 1711 return x
1712 1712
1713 1713 def gennodelst(revlog):
1714 1714 for r in xrange(0, revlog.count()):
1715 1715 n = revlog.node(r)
1716 1716 if revlog.linkrev(n) in revset:
1717 1717 yield n
1718 1718
1719 1719 def changed_file_collector(changedfileset):
1720 1720 def collect_changed_files(clnode):
1721 1721 c = cl.read(clnode)
1722 1722 for fname in c[3]:
1723 1723 changedfileset[fname] = 1
1724 1724 return collect_changed_files
1725 1725
1726 1726 def lookuprevlink_func(revlog):
1727 1727 def lookuprevlink(n):
1728 1728 return cl.node(revlog.linkrev(n))
1729 1729 return lookuprevlink
1730 1730
1731 1731 def gengroup():
1732 1732 # construct a list of all changed files
1733 1733 changedfiles = {}
1734 1734
1735 1735 for chnk in cl.group(nodes, identity,
1736 1736 changed_file_collector(changedfiles)):
1737 1737 yield chnk
1738 1738 changedfiles = changedfiles.keys()
1739 1739 changedfiles.sort()
1740 1740
1741 1741 mnfst = self.manifest
1742 1742 nodeiter = gennodelst(mnfst)
1743 1743 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1744 1744 yield chnk
1745 1745
1746 1746 for fname in changedfiles:
1747 1747 filerevlog = self.file(fname)
1748 1748 nodeiter = gennodelst(filerevlog)
1749 1749 nodeiter = list(nodeiter)
1750 1750 if nodeiter:
1751 1751 yield changegroup.genchunk(fname)
1752 1752 lookup = lookuprevlink_func(filerevlog)
1753 1753 for chnk in filerevlog.group(nodeiter, lookup):
1754 1754 yield chnk
1755 1755
1756 1756 yield changegroup.closechunk()
1757 1757
1758 1758 if nodes:
1759 1759 self.hook('outgoing', node=hex(nodes[0]), source=source)
1760 1760
1761 1761 return util.chunkbuffer(gengroup())
1762 1762
1763 1763 def addchangegroup(self, source, srctype, url):
1764 1764 """add changegroup to repo.
1765 1765
1766 1766 return values:
1767 1767 - nothing changed or no source: 0
1768 1768 - more heads than before: 1+added heads (2..n)
1769 1769 - less heads than before: -1-removed heads (-2..-n)
1770 1770 - number of heads stays the same: 1
1771 1771 """
1772 1772 def csmap(x):
1773 1773 self.ui.debug(_("add changeset %s\n") % short(x))
1774 1774 return cl.count()
1775 1775
1776 1776 def revmap(x):
1777 1777 return cl.rev(x)
1778 1778
1779 1779 if not source:
1780 1780 return 0
1781 1781
1782 1782 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1783 1783
1784 1784 changesets = files = revisions = 0
1785 1785
1786 1786 tr = self.transaction()
1787 1787
1788 1788 # write changelog data to temp files so concurrent readers will not see
1789 1789 # inconsistent view
1790 1790 cl = None
1791 1791 try:
1792 1792 cl = appendfile.appendchangelog(self.sopener,
1793 1793 self.changelog.version)
1794 1794
1795 1795 oldheads = len(cl.heads())
1796 1796
1797 1797 # pull off the changeset group
1798 1798 self.ui.status(_("adding changesets\n"))
1799 1799 cor = cl.count() - 1
1800 1800 chunkiter = changegroup.chunkiter(source)
1801 1801 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1802 1802 raise util.Abort(_("received changelog group is empty"))
1803 1803 cnr = cl.count() - 1
1804 1804 changesets = cnr - cor
1805 1805
1806 1806 # pull off the manifest group
1807 1807 self.ui.status(_("adding manifests\n"))
1808 1808 chunkiter = changegroup.chunkiter(source)
1809 1809 # no need to check for empty manifest group here:
1810 1810 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1811 1811 # no new manifest will be created and the manifest group will
1812 1812 # be empty during the pull
1813 1813 self.manifest.addgroup(chunkiter, revmap, tr)
1814 1814
1815 1815 # process the files
1816 1816 self.ui.status(_("adding file changes\n"))
1817 1817 while 1:
1818 1818 f = changegroup.getchunk(source)
1819 1819 if not f:
1820 1820 break
1821 1821 self.ui.debug(_("adding %s revisions\n") % f)
1822 1822 fl = self.file(f)
1823 1823 o = fl.count()
1824 1824 chunkiter = changegroup.chunkiter(source)
1825 1825 if fl.addgroup(chunkiter, revmap, tr) is None:
1826 1826 raise util.Abort(_("received file revlog group is empty"))
1827 1827 revisions += fl.count() - o
1828 1828 files += 1
1829 1829
1830 1830 cl.writedata()
1831 1831 finally:
1832 1832 if cl:
1833 1833 cl.cleanup()
1834 1834
1835 1835 # make changelog see real files again
1836 1836 self.changelog = changelog.changelog(self.sopener,
1837 1837 self.changelog.version)
1838 1838 self.changelog.checkinlinesize(tr)
1839 1839
1840 1840 newheads = len(self.changelog.heads())
1841 1841 heads = ""
1842 1842 if oldheads and newheads != oldheads:
1843 1843 heads = _(" (%+d heads)") % (newheads - oldheads)
1844 1844
1845 1845 self.ui.status(_("added %d changesets"
1846 1846 " with %d changes to %d files%s\n")
1847 1847 % (changesets, revisions, files, heads))
1848 1848
1849 1849 if changesets > 0:
1850 1850 self.hook('pretxnchangegroup', throw=True,
1851 1851 node=hex(self.changelog.node(cor+1)), source=srctype,
1852 1852 url=url)
1853 1853
1854 1854 tr.close()
1855 1855
1856 1856 if changesets > 0:
1857 1857 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1858 1858 source=srctype, url=url)
1859 1859
1860 1860 for i in xrange(cor + 1, cnr + 1):
1861 1861 self.hook("incoming", node=hex(self.changelog.node(i)),
1862 1862 source=srctype, url=url)
1863 1863
1864 1864 # never return 0 here:
1865 1865 if newheads < oldheads:
1866 1866 return newheads - oldheads - 1
1867 1867 else:
1868 1868 return newheads - oldheads + 1
1869 1869
1870 1870
1871 1871 def stream_in(self, remote):
1872 1872 fp = remote.stream_out()
1873 1873 l = fp.readline()
1874 1874 try:
1875 1875 resp = int(l)
1876 1876 except ValueError:
1877 1877 raise util.UnexpectedOutput(
1878 1878 _('Unexpected response from remote server:'), l)
1879 1879 if resp == 1:
1880 1880 raise util.Abort(_('operation forbidden by server'))
1881 1881 elif resp == 2:
1882 1882 raise util.Abort(_('locking the remote repository failed'))
1883 1883 elif resp != 0:
1884 1884 raise util.Abort(_('the server sent an unknown error code'))
1885 1885 self.ui.status(_('streaming all changes\n'))
1886 1886 l = fp.readline()
1887 1887 try:
1888 1888 total_files, total_bytes = map(int, l.split(' ', 1))
1889 1889 except ValueError, TypeError:
1890 1890 raise util.UnexpectedOutput(
1891 1891 _('Unexpected response from remote server:'), l)
1892 1892 self.ui.status(_('%d files to transfer, %s of data\n') %
1893 1893 (total_files, util.bytecount(total_bytes)))
1894 1894 start = time.time()
1895 1895 for i in xrange(total_files):
1896 1896 # XXX doesn't support '\n' or '\r' in filenames
1897 1897 l = fp.readline()
1898 1898 try:
1899 1899 name, size = l.split('\0', 1)
1900 1900 size = int(size)
1901 1901 except ValueError, TypeError:
1902 1902 raise util.UnexpectedOutput(
1903 1903 _('Unexpected response from remote server:'), l)
1904 1904 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1905 1905 ofp = self.sopener(name, 'w')
1906 1906 for chunk in util.filechunkiter(fp, limit=size):
1907 1907 ofp.write(chunk)
1908 1908 ofp.close()
1909 1909 elapsed = time.time() - start
1910 1910 if elapsed <= 0:
1911 1911 elapsed = 0.001
1912 1912 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1913 1913 (util.bytecount(total_bytes), elapsed,
1914 1914 util.bytecount(total_bytes / elapsed)))
1915 1915 self.reload()
1916 1916 return len(self.heads()) + 1
1917 1917
1918 1918 def clone(self, remote, heads=[], stream=False):
1919 1919 '''clone remote repository.
1920 1920
1921 1921 keyword arguments:
1922 1922 heads: list of revs to clone (forces use of pull)
1923 1923 stream: use streaming clone if possible'''
1924 1924
1925 1925 # now, all clients that can request uncompressed clones can
1926 1926 # read repo formats supported by all servers that can serve
1927 1927 # them.
1928 1928
1929 1929 # if revlog format changes, client will have to check version
1930 1930 # and format flags on "stream" capability, and use
1931 1931 # uncompressed only if compatible.
1932 1932
1933 1933 if stream and not heads and remote.capable('stream'):
1934 1934 return self.stream_in(remote)
1935 1935 return self.pull(remote, heads)
1936 1936
1937 1937 # used to avoid circular references so destructors work
1938 1938 def aftertrans(files):
1939 1939 renamefiles = [tuple(t) for t in files]
1940 1940 def a():
1941 1941 for src, dest in renamefiles:
1942 1942 util.rename(src, dest)
1943 1943 return a
1944 1944
1945 1945 def instance(ui, path, create):
1946 1946 return localrepository(ui, util.drop_scheme('file', path), create)
1947 1947
1948 1948 def islocal(path):
1949 1949 return True
@@ -1,501 +1,501 b''
1 1 # merge.py - directory-level update/merge handling for Mercurial
2 2 #
3 3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import _
10 10 import errno, util, os, tempfile
11 11
12 12 def filemerge(repo, fw, fo, wctx, mctx):
13 13 """perform a 3-way merge in the working directory
14 14
15 15 fw = filename in the working directory
16 16 fo = filename in other parent
17 17 wctx, mctx = working and merge changecontexts
18 18 """
19 19
20 20 def temp(prefix, ctx):
21 21 pre = "%s~%s." % (os.path.basename(ctx.path()), prefix)
22 22 (fd, name) = tempfile.mkstemp(prefix=pre)
23 23 data = repo.wwritedata(ctx.path(), ctx.data())
24 24 f = os.fdopen(fd, "wb")
25 25 f.write(data)
26 26 f.close()
27 27 return name
28 28
29 29 fcm = wctx.filectx(fw)
30 30 fco = mctx.filectx(fo)
31 31
32 32 if not fco.cmp(fcm.data()): # files identical?
33 33 return None
34 34
35 35 fca = fcm.ancestor(fco)
36 36 if not fca:
37 37 fca = repo.filectx(fw, fileid=nullrev)
38 38 a = repo.wjoin(fw)
39 39 b = temp("base", fca)
40 40 c = temp("other", fco)
41 41
42 42 if fw != fo:
43 43 repo.ui.status(_("merging %s and %s\n") % (fw, fo))
44 44 else:
45 45 repo.ui.status(_("merging %s\n") % fw)
46 46
47 47 repo.ui.debug(_("my %s other %s ancestor %s\n") % (fcm, fco, fca))
48 48
49 49 cmd = (os.environ.get("HGMERGE") or repo.ui.config("ui", "merge")
50 50 or "hgmerge")
51 51 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=repo.root,
52 52 environ={'HG_FILE': fw,
53 53 'HG_MY_NODE': str(wctx.parents()[0]),
54 54 'HG_OTHER_NODE': str(mctx)})
55 55 if r:
56 56 repo.ui.warn(_("merging %s failed!\n") % fw)
57 57
58 58 os.unlink(b)
59 59 os.unlink(c)
60 60 return r
61 61
62 62 def checkunknown(wctx, mctx):
63 63 "check for collisions between unknown files and files in mctx"
64 64 man = mctx.manifest()
65 65 for f in wctx.unknown():
66 66 if f in man:
67 67 if mctx.filectx(f).cmp(wctx.filectx(f).data()):
68 68 raise util.Abort(_("untracked local file '%s' differs"\
69 69 " from remote version") % f)
70 70
71 71 def checkcollision(mctx):
72 72 "check for case folding collisions in the destination context"
73 73 folded = {}
74 74 for fn in mctx.manifest():
75 75 fold = fn.lower()
76 76 if fold in folded:
77 77 raise util.Abort(_("case-folding collision between %s and %s")
78 78 % (fn, folded[fold]))
79 79 folded[fold] = fn
80 80
81 81 def forgetremoved(wctx, mctx):
82 82 """
83 83 Forget removed files
84 84
85 85 If we're jumping between revisions (as opposed to merging), and if
86 86 neither the working directory nor the target rev has the file,
87 87 then we need to remove it from the dirstate, to prevent the
88 88 dirstate from listing the file when it is no longer in the
89 89 manifest.
90 90 """
91 91
92 92 action = []
93 93 man = mctx.manifest()
94 94 for f in wctx.deleted() + wctx.removed():
95 95 if f not in man:
96 96 action.append((f, "f"))
97 97
98 98 return action
99 99
100 100 def findcopies(repo, m1, m2, ma, limit):
101 101 """
102 102 Find moves and copies between m1 and m2 back to limit linkrev
103 103 """
104 104
105 105 def findold(fctx):
106 106 "find files that path was copied from, back to linkrev limit"
107 107 old = {}
108 108 orig = fctx.path()
109 109 visit = [fctx]
110 110 while visit:
111 111 fc = visit.pop()
112 112 if fc.path() != orig and fc.path() not in old:
113 113 old[fc.path()] = 1
114 114 if fc.rev() < limit:
115 115 continue
116 116 visit += fc.parents()
117 117
118 118 old = old.keys()
119 119 old.sort()
120 120 return old
121 121
122 122 def nonoverlap(d1, d2, d3):
123 123 "Return list of elements in d1 not in d2 or d3"
124 124 l = [d for d in d1 if d not in d3 and d not in d2]
125 125 l.sort()
126 126 return l
127 127
128 128 def checkcopies(c, man):
129 129 '''check possible copies for filectx c'''
130 130 for of in findold(c):
131 131 if of not in man:
132 132 return
133 133 c2 = ctx(of, man[of])
134 134 ca = c.ancestor(c2)
135 135 if not ca: # unrelated
136 136 return
137 137 if ca.path() == c.path() or ca.path() == c2.path():
138 138 fullcopy[c.path()] = of
139 139 if c == ca or c2 == ca: # no merge needed, ignore copy
140 140 return
141 141 copy[c.path()] = of
142 142
143 143 def dirs(files):
144 144 d = {}
145 145 for f in files:
146 146 d[os.path.dirname(f)] = True
147 147 return d
148 148
149 149 if not repo.ui.configbool("merge", "followcopies", True):
150 150 return {}
151 151
152 152 # avoid silly behavior for update from empty dir
153 153 if not m1 or not m2 or not ma:
154 154 return {}
155 155
156 156 dcopies = repo.dirstate.copies()
157 157 copy = {}
158 158 fullcopy = {}
159 159 u1 = nonoverlap(m1, m2, ma)
160 160 u2 = nonoverlap(m2, m1, ma)
161 161 ctx = util.cachefunc(lambda f, n: repo.filectx(f, fileid=n[:20]))
162 162
163 163 for f in u1:
164 164 checkcopies(ctx(dcopies.get(f, f), m1[f]), m2)
165 165
166 166 for f in u2:
167 167 checkcopies(ctx(f, m2[f]), m1)
168 168
169 169 if not fullcopy or not repo.ui.configbool("merge", "followdirs", True):
170 170 return copy
171 171
172 172 # generate a directory move map
173 173 d1, d2 = dirs(m1), dirs(m2)
174 174 invalid = {}
175 175 dirmove = {}
176 176
177 177 for dst, src in fullcopy.items():
178 178 dsrc, ddst = os.path.dirname(src), os.path.dirname(dst)
179 179 if dsrc in invalid:
180 180 continue
181 181 elif (dsrc in d1 and ddst in d1) or (dsrc in d2 and ddst in d2):
182 182 invalid[dsrc] = True
183 183 elif dsrc in dirmove and dirmove[dsrc] != ddst:
184 184 invalid[dsrc] = True
185 185 del dirmove[dsrc]
186 186 else:
187 187 dirmove[dsrc + "/"] = ddst + "/"
188 188
189 189 del d1, d2, invalid
190 190
191 191 if not dirmove:
192 192 return copy
193 193
194 194 # check unaccounted nonoverlapping files
195 195 for f in u1 + u2:
196 196 if f not in fullcopy:
197 197 for d in dirmove:
198 198 if f.startswith(d):
199 199 copy[f] = dirmove[d] + f[len(d):]
200 200 break
201 201
202 202 return copy
203 203
204 204 def manifestmerge(repo, p1, p2, pa, overwrite, partial):
205 205 """
206 206 Merge p1 and p2 with ancestor ma and generate merge action list
207 207
208 208 overwrite = whether we clobber working files
209 209 partial = function to filter file lists
210 210 """
211 211
212 212 repo.ui.note(_("resolving manifests\n"))
213 213 repo.ui.debug(_(" overwrite %s partial %s\n") % (overwrite, bool(partial)))
214 214 repo.ui.debug(_(" ancestor %s local %s remote %s\n") % (pa, p1, p2))
215 215
216 216 m1 = p1.manifest()
217 217 m2 = p2.manifest()
218 218 ma = pa.manifest()
219 219 backwards = (pa == p2)
220 220 action = []
221 221 copy = {}
222 222
223 223 def fmerge(f, f2=None, fa=None):
224 224 """merge flags"""
225 225 if not f2:
226 226 f2 = f
227 227 fa = f
228 228 a, b, c = ma.execf(fa), m1.execf(f), m2.execf(f2)
229 229 if ((a^b) | (a^c)) ^ a:
230 230 return 'x'
231 231 a, b, c = ma.linkf(fa), m1.linkf(f), m2.linkf(f2)
232 232 if ((a^b) | (a^c)) ^ a:
233 233 return 'l'
234 234 return ''
235 235
236 236 def act(msg, m, f, *args):
237 237 repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m))
238 238 action.append((f, m) + args)
239 239
240 240 if not (backwards or overwrite):
241 241 copy = findcopies(repo, m1, m2, ma, pa.rev())
242 242 copied = dict.fromkeys(copy.values())
243 243
244 244 # Compare manifests
245 245 for f, n in m1.iteritems():
246 246 if partial and not partial(f):
247 247 continue
248 248 if f in m2:
249 249 # are files different?
250 250 if n != m2[f]:
251 251 a = ma.get(f, nullid)
252 252 # are both different from the ancestor?
253 253 if not overwrite and n != a and m2[f] != a:
254 254 act("versions differ", "m", f, f, f, fmerge(f), False)
255 255 # are we clobbering?
256 256 # is remote's version newer?
257 257 # or are we going back in time and clean?
258 258 elif overwrite or m2[f] != a or (backwards and not n[20:]):
259 259 act("remote is newer", "g", f, m2.flags(f))
260 260 # local is newer, not overwrite, check mode bits
261 261 elif fmerge(f) != m1.flags(f):
262 262 act("update permissions", "e", f, m2.flags(f))
263 263 # contents same, check mode bits
264 264 elif m1.flags(f) != m2.flags(f):
265 265 if overwrite or fmerge(f) != m1.flags(f):
266 266 act("update permissions", "e", f, m2.flags(f))
267 267 elif f in copied:
268 268 continue
269 269 elif f in copy:
270 270 f2 = copy[f]
271 271 if f2 not in m2: # directory rename
272 272 act("remote renamed directory to " + f2, "d",
273 273 f, None, f2, m1.flags(f))
274 274 elif f2 in m1: # case 2 A,B/B/B
275 275 act("local copied to " + f2, "m",
276 276 f, f2, f, fmerge(f, f2, f2), False)
277 277 else: # case 4,21 A/B/B
278 278 act("local moved to " + f2, "m",
279 279 f, f2, f, fmerge(f, f2, f2), False)
280 280 elif f in ma:
281 281 if n != ma[f] and not overwrite:
282 282 if repo.ui.prompt(
283 283 (_(" local changed %s which remote deleted\n") % f) +
284 284 _("(k)eep or (d)elete?"), _("[kd]"), _("k")) == _("d"):
285 285 act("prompt delete", "r", f)
286 286 else:
287 287 act("other deleted", "r", f)
288 288 else:
289 289 # file is created on branch or in working directory
290 290 if (overwrite and n[20:] != "u") or (backwards and not n[20:]):
291 291 act("remote deleted", "r", f)
292 292
293 293 for f, n in m2.iteritems():
294 294 if partial and not partial(f):
295 295 continue
296 296 if f in m1:
297 297 continue
298 298 if f in copied:
299 299 continue
300 300 if f in copy:
301 301 f2 = copy[f]
302 302 if f2 not in m1: # directory rename
303 303 act("local renamed directory to " + f2, "d",
304 304 None, f, f2, m2.flags(f))
305 305 elif f2 in m2: # rename case 1, A/A,B/A
306 306 act("remote copied to " + f, "m",
307 307 f2, f, f, fmerge(f2, f, f2), False)
308 308 else: # case 3,20 A/B/A
309 309 act("remote moved to " + f, "m",
310 310 f2, f, f, fmerge(f2, f, f2), True)
311 311 elif f in ma:
312 312 if overwrite or backwards:
313 313 act("recreating", "g", f, m2.flags(f))
314 314 elif n != ma[f]:
315 315 if repo.ui.prompt(
316 316 (_("remote changed %s which local deleted\n") % f) +
317 317 _("(k)eep or (d)elete?"), _("[kd]"), _("k")) == _("k"):
318 318 act("prompt recreating", "g", f, m2.flags(f))
319 319 else:
320 320 act("remote created", "g", f, m2.flags(f))
321 321
322 322 return action
323 323
324 324 def applyupdates(repo, action, wctx, mctx):
325 325 "apply the merge action list to the working directory"
326 326
327 327 updated, merged, removed, unresolved = 0, 0, 0, 0
328 328 action.sort()
329 329 for a in action:
330 330 f, m = a[:2]
331 331 if f and f[0] == "/":
332 332 continue
333 333 if m == "r": # remove
334 334 repo.ui.note(_("removing %s\n") % f)
335 335 util.audit_path(f)
336 336 try:
337 337 util.unlink(repo.wjoin(f))
338 338 except OSError, inst:
339 339 if inst.errno != errno.ENOENT:
340 340 repo.ui.warn(_("update failed to remove %s: %s!\n") %
341 341 (f, inst.strerror))
342 342 removed += 1
343 343 elif m == "m": # merge
344 344 f2, fd, flags, move = a[2:]
345 345 r = filemerge(repo, f, f2, wctx, mctx)
346 346 if r > 0:
347 347 unresolved += 1
348 348 else:
349 349 if r is None:
350 350 updated += 1
351 351 else:
352 352 merged += 1
353 353 if f != fd:
354 354 repo.ui.debug(_("copying %s to %s\n") % (f, fd))
355 355 repo.wwrite(fd, repo.wread(f), flags)
356 356 if move:
357 357 repo.ui.debug(_("removing %s\n") % f)
358 358 os.unlink(repo.wjoin(f))
359 359 util.set_exec(repo.wjoin(fd), "x" in flags)
360 360 elif m == "g": # get
361 361 flags = a[2]
362 362 repo.ui.note(_("getting %s\n") % f)
363 363 t = mctx.filectx(f).data()
364 364 repo.wwrite(f, t, flags)
365 365 updated += 1
366 366 elif m == "d": # directory rename
367 367 f2, fd, flags = a[2:]
368 368 if f:
369 369 repo.ui.note(_("moving %s to %s\n") % (f, fd))
370 370 t = wctx.filectx(f).data()
371 371 repo.wwrite(fd, t, flags)
372 372 util.unlink(repo.wjoin(f))
373 373 if f2:
374 374 repo.ui.note(_("getting %s to %s\n") % (f2, fd))
375 375 t = mctx.filectx(f2).data()
376 376 repo.wwrite(fd, t, flags)
377 377 updated += 1
378 378 elif m == "e": # exec
379 379 flags = a[2]
380 380 util.set_exec(repo.wjoin(f), flags)
381 381
382 382 return updated, merged, removed, unresolved
383 383
384 384 def recordupdates(repo, action, branchmerge):
385 385 "record merge actions to the dirstate"
386 386
387 387 for a in action:
388 388 f, m = a[:2]
389 389 if m == "r": # remove
390 390 if branchmerge:
391 391 repo.dirstate.update([f], 'r')
392 392 else:
393 393 repo.dirstate.forget([f])
394 394 elif m == "f": # forget
395 395 repo.dirstate.forget([f])
396 396 elif m == "g": # get
397 397 if branchmerge:
398 398 repo.dirstate.update([f], 'n', st_mtime=-1)
399 399 else:
400 400 repo.dirstate.update([f], 'n')
401 401 elif m == "m": # merge
402 402 f2, fd, flag, move = a[2:]
403 403 if branchmerge:
404 404 # We've done a branch merge, mark this file as merged
405 405 # so that we properly record the merger later
406 406 repo.dirstate.update([fd], 'm')
407 407 if f != f2: # copy/rename
408 408 if move:
409 409 repo.dirstate.update([f], 'r')
410 410 if f != fd:
411 411 repo.dirstate.copy(f, fd)
412 412 else:
413 413 repo.dirstate.copy(f2, fd)
414 414 else:
415 415 # We've update-merged a locally modified file, so
416 416 # we set the dirstate to emulate a normal checkout
417 417 # of that file some time in the past. Thus our
418 418 # merge will appear as a normal local file
419 419 # modification.
420 420 repo.dirstate.update([fd], 'n', st_size=-1, st_mtime=-1)
421 421 if move:
422 422 repo.dirstate.forget([f])
423 423 elif m == "d": # directory rename
424 424 f2, fd, flag = a[2:]
425 425 if branchmerge:
426 426 repo.dirstate.update([fd], 'a')
427 427 if f:
428 428 repo.dirstate.update([f], 'r')
429 429 repo.dirstate.copy(f, fd)
430 430 if f2:
431 431 repo.dirstate.copy(f2, fd)
432 432 else:
433 433 repo.dirstate.update([fd], 'n')
434 434 if f:
435 435 repo.dirstate.forget([f])
436 436
437 437 def update(repo, node, branchmerge, force, partial, wlock):
438 438 """
439 439 Perform a merge between the working directory and the given node
440 440
441 441 branchmerge = whether to merge between branches
442 442 force = whether to force branch merging or file overwriting
443 443 partial = a function to filter file lists (dirstate not updated)
444 444 wlock = working dir lock, if already held
445 445 """
446 446
447 if node is None:
448 node = "tip"
449
450 447 if not wlock:
451 448 wlock = repo.wlock()
452 449
450 wc = repo.workingctx()
451 if node is None:
452 # tip of current branch
453 node = repo.branchtags()[wc.branch()]
453 454 overwrite = force and not branchmerge
454 455 forcemerge = force and branchmerge
455 wc = repo.workingctx()
456 456 pl = wc.parents()
457 457 p1, p2 = pl[0], repo.changectx(node)
458 458 pa = p1.ancestor(p2)
459 459 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
460 460
461 461 ### check phase
462 462 if not overwrite and len(pl) > 1:
463 463 raise util.Abort(_("outstanding uncommitted merges"))
464 464 if pa == p1 or pa == p2: # is there a linear path from p1 to p2?
465 465 if branchmerge:
466 466 raise util.Abort(_("there is nothing to merge, just use "
467 467 "'hg update' or look at 'hg heads'"))
468 468 elif not (overwrite or branchmerge):
469 469 raise util.Abort(_("update spans branches, use 'hg merge' "
470 470 "or 'hg update -C' to lose changes"))
471 471 if branchmerge and not forcemerge:
472 472 if wc.files():
473 473 raise util.Abort(_("outstanding uncommitted changes"))
474 474
475 475 ### calculate phase
476 476 action = []
477 477 if not force:
478 478 checkunknown(wc, p2)
479 479 if not util.checkfolding(repo.path):
480 480 checkcollision(p2)
481 481 if not branchmerge:
482 482 action += forgetremoved(wc, p2)
483 483 action += manifestmerge(repo, wc, p2, pa, overwrite, partial)
484 484
485 485 ### apply phase
486 486 if not branchmerge: # just jump to the new rev
487 487 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
488 488 if not partial:
489 489 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
490 490
491 491 stats = applyupdates(repo, action, wc, p2)
492 492
493 493 if not partial:
494 494 recordupdates(repo, action, branchmerge)
495 495 repo.dirstate.setparents(fp1, fp2)
496 496 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
497 497 if not branchmerge:
498 498 repo.opener("branch", "w").write(p2.branch() + "\n")
499 499
500 500 return stats
501 501
@@ -1,379 +1,391 b''
1 1 #!/bin/sh
2 2
3 3 echo "[extensions]" >> $HGRCPATH
4 4 echo "mq=" >> $HGRCPATH
5 5
6 6 echo % help
7 7 hg help mq
8 8
9 9 hg init a
10 10 cd a
11 11 echo a > a
12 12 hg ci -Ama
13 13
14 14 hg clone . ../k
15 15
16 16 mkdir b
17 17 echo z > b/z
18 18 hg ci -Ama
19 19
20 20 echo % qinit
21 21
22 22 hg qinit
23 23
24 24 cd ..
25 25 hg init b
26 26
27 27 echo % -R qinit
28 28
29 29 hg -R b qinit
30 30
31 31 hg init c
32 32
33 33 echo % qinit -c
34 34
35 35 hg --cwd c qinit -c
36 36 hg -R c/.hg/patches st
37 37
38 38 echo % qnew implies add
39 39
40 40 hg -R c qnew test.patch
41 41 hg -R c/.hg/patches st
42 42
43 43 echo '% qinit; qinit -c'
44 44 hg init d
45 45 cd d
46 46 hg qinit
47 47 hg qinit -c
48 48 # qinit -c should create both files if they don't exist
49 49 echo ' .hgignore:'
50 50 cat .hg/patches/.hgignore
51 51 echo ' series:'
52 52 cat .hg/patches/series
53 53 hg qinit -c 2>&1 | sed -e 's/repository.*already/repository already/'
54 54 cd ..
55 55
56 56 echo '% qinit; <stuff>; qinit -c'
57 57 hg init e
58 58 cd e
59 59 hg qnew A
60 60 echo foo > foo
61 61 hg add foo
62 62 hg qrefresh
63 63 hg qnew B
64 64 echo >> foo
65 65 hg qrefresh
66 66 echo status >> .hg/patches/.hgignore
67 67 echo bleh >> .hg/patches/.hgignore
68 68 hg qinit -c
69 69 hg -R .hg/patches status
70 70 # qinit -c shouldn't touch these files if they already exist
71 71 echo ' .hgignore:'
72 72 cat .hg/patches/.hgignore
73 73 echo ' series:'
74 74 cat .hg/patches/series
75 75 cd ..
76 76
77 77 cd a
78 78
79 79 echo % qnew -m
80 80
81 81 hg qnew -m 'foo bar' test.patch
82 82 cat .hg/patches/test.patch
83 83
84 84 echo % qrefresh
85 85
86 86 echo a >> a
87 87 hg qrefresh
88 88 sed -e "s/^\(diff -r \)\([a-f0-9]* \)/\1 x/" \
89 89 -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \
90 90 -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/" .hg/patches/test.patch
91 91
92 echo % empty qrefresh
93
94 hg qrefresh -X a
95 echo 'revision:'
96 hg diff -r -2 -r -1
97 echo 'patch:'
98 cat .hg/patches/test.patch
99 echo 'working dir diff:'
100 hg diff --nodates -q
101 # restore things
102 hg qrefresh
103
92 104 echo % qpop
93 105
94 106 hg qpop
95 107
96 108 echo % qpush
97 109
98 110 hg qpush
99 111
100 112 cd ..
101 113
102 114 echo % pop/push outside repo
103 115
104 116 hg -R a qpop
105 117 hg -R a qpush
106 118
107 119 cd a
108 120 hg qnew test2.patch
109 121
110 122 echo % qrefresh in subdir
111 123
112 124 cd b
113 125 echo a > a
114 126 hg add a
115 127 hg qrefresh
116 128
117 129 echo % pop/push -a in subdir
118 130
119 131 hg qpop -a
120 132 hg --traceback qpush -a
121 133
122 134 echo % qseries
123 135 hg qseries
124 136 hg qpop
125 137 hg qseries -vs
126 138 hg qpush
127 139
128 140 echo % qapplied
129 141 hg qapplied
130 142
131 143 echo % qtop
132 144 hg qtop
133 145
134 146 echo % qprev
135 147 hg qprev
136 148
137 149 echo % qnext
138 150 hg qnext
139 151
140 152 echo % pop, qnext, qprev, qapplied
141 153 hg qpop
142 154 hg qnext
143 155 hg qprev
144 156 hg qapplied
145 157
146 158 echo % commit should fail
147 159 hg commit
148 160
149 161 echo % push should fail
150 162 hg push ../../k
151 163
152 164 echo % qunapplied
153 165 hg qunapplied
154 166
155 167 echo % qpush/qpop with index
156 168 hg qnew test1b.patch
157 169 echo 1b > 1b
158 170 hg add 1b
159 171 hg qrefresh
160 172 hg qpush 2
161 173 hg qpop 0
162 174 hg qpush test.patch+1
163 175 hg qpush test.patch+2
164 176 hg qpop test2.patch-1
165 177 hg qpop test2.patch-2
166 178 hg qpush test1b.patch+1
167 179
168 180 echo % push should succeed
169 181 hg qpop -a
170 182 hg push ../../k
171 183
172 184 echo % qpush/qpop error codes
173 185 errorcode()
174 186 {
175 187 hg "$@" && echo " $@ succeeds" || echo " $@ fails"
176 188 }
177 189
178 190 # we want to start with some patches applied
179 191 hg qpush -a
180 192 echo " % pops all patches and succeeds"
181 193 errorcode qpop -a
182 194 echo " % does nothing and succeeds"
183 195 errorcode qpop -a
184 196 echo " % fails - nothing else to pop"
185 197 errorcode qpop
186 198 echo " % pushes a patch and succeeds"
187 199 errorcode qpush
188 200 echo " % pops a patch and succeeds"
189 201 errorcode qpop
190 202 echo " % pushes up to test1b.patch and succeeds"
191 203 errorcode qpush test1b.patch
192 204 echo " % does nothing and succeeds"
193 205 errorcode qpush test1b.patch
194 206 echo " % does nothing and succeeds"
195 207 errorcode qpop test1b.patch
196 208 echo " % fails - can't push to this patch"
197 209 errorcode qpush test.patch
198 210 echo " % fails - can't pop to this patch"
199 211 errorcode qpop test2.patch
200 212 echo " % pops up to test.patch and succeeds"
201 213 errorcode qpop test.patch
202 214 echo " % pushes all patches and succeeds"
203 215 errorcode qpush -a
204 216 echo " % does nothing and succeeds"
205 217 errorcode qpush -a
206 218 echo " % fails - nothing else to push"
207 219 errorcode qpush
208 220 echo " % does nothing and succeeds"
209 221 errorcode qpush test2.patch
210 222
211 223
212 224 echo % strip
213 225 cd ../../b
214 226 echo x>x
215 227 hg ci -Ama
216 228 hg strip tip 2>&1 | sed 's/\(saving bundle to \).*/\1/'
217 229 hg unbundle .hg/strip-backup/*
218 230
219 231 echo '% cd b; hg qrefresh'
220 232 hg init refresh
221 233 cd refresh
222 234 echo a > a
223 235 hg ci -Ama -d'0 0'
224 236 hg qnew -mfoo foo
225 237 echo a >> a
226 238 hg qrefresh
227 239 mkdir b
228 240 cd b
229 241 echo f > f
230 242 hg add f
231 243 hg qrefresh
232 244 sed -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \
233 245 -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/" ../.hg/patches/foo
234 246 echo % hg qrefresh .
235 247 hg qrefresh .
236 248 sed -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \
237 249 -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/" ../.hg/patches/foo
238 250 hg status
239 251
240 252 echo % qpush failure
241 253 cd ..
242 254 hg qrefresh
243 255 hg qnew -mbar bar
244 256 echo foo > foo
245 257 echo bar > bar
246 258 hg add foo bar
247 259 hg qrefresh
248 260 hg qpop -a
249 261 echo bar > foo
250 262 hg qpush -a
251 263 hg st
252 264
253 265 cat >>$HGRCPATH <<EOF
254 266 [diff]
255 267 git = True
256 268 EOF
257 269 cd ..
258 270 hg init git
259 271 cd git
260 272 hg qinit
261 273
262 274 hg qnew -m'new file' new
263 275 echo foo > new
264 276 chmod +x new
265 277 hg add new
266 278 hg qrefresh
267 279 sed -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \
268 280 -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/" .hg/patches/new
269 281
270 282 hg qnew -m'copy file' copy
271 283 hg cp new copy
272 284 hg qrefresh
273 285 sed -e "s/\(+++ [a-zA-Z0-9_/.-]*\).*/\1/" \
274 286 -e "s/\(--- [a-zA-Z0-9_/.-]*\).*/\1/" .hg/patches/copy
275 287
276 288 hg qpop
277 289 hg qpush
278 290 hg qdiff
279 291 cat >>$HGRCPATH <<EOF
280 292 [diff]
281 293 git = False
282 294 EOF
283 295 hg qdiff --git
284 296
285 297 cd ..
286 298 hg init slow
287 299 cd slow
288 300 hg qinit
289 301 echo foo > foo
290 302 hg add foo
291 303 hg ci -m 'add foo'
292 304 hg qnew bar
293 305 echo bar > bar
294 306 hg add bar
295 307 hg mv foo baz
296 308 hg qrefresh --git
297 309 hg up -C 0
298 310 echo >> foo
299 311 hg ci -m 'change foo'
300 312 hg up -C 1
301 313 hg qrefresh --git 2>&1 | grep -v 'saving bundle'
302 314 cat .hg/patches/bar
303 315 hg log -vC --template '{rev} {file_copies%filecopy}\n' -r .
304 316 hg qrefresh --git
305 317 cat .hg/patches/bar
306 318 hg log -vC --template '{rev} {file_copies%filecopy}\n' -r .
307 319
308 320 echo
309 321 hg up -C 1
310 322 echo >> foo
311 323 hg ci -m 'change foo again'
312 324 hg up -C 2
313 325 hg mv bar quux
314 326 hg mv baz bleh
315 327 hg qrefresh --git 2>&1 | grep -v 'saving bundle'
316 328 cat .hg/patches/bar
317 329 hg log -vC --template '{rev} {file_copies%filecopy}\n' -r .
318 330 hg mv quux fred
319 331 hg mv bleh barney
320 332 hg qrefresh --git
321 333 cat .hg/patches/bar
322 334 hg log -vC --template '{rev} {file_copies%filecopy}\n' -r .
323 335
324 336 echo '% strip again'
325 337 cd ..
326 338 hg init strip
327 339 cd strip
328 340 touch foo
329 341 hg add foo
330 342 hg ci -m 'add foo' -d '0 0'
331 343 echo >> foo
332 344 hg ci -m 'change foo 1' -d '0 0'
333 345 hg up -C 0
334 346 echo 1 >> foo
335 347 hg ci -m 'change foo 2' -d '0 0'
336 348 HGMERGE=true hg merge
337 349 hg ci -m merge -d '0 0'
338 350 hg log
339 351 hg strip 1 2>&1 | sed 's/\(saving bundle to \).*/\1/'
340 352 hg log
341 353 cd ..
342 354
343 355 echo '% qclone'
344 356 qlog()
345 357 {
346 358 echo 'main repo:'
347 359 hg log --template ' rev {rev}: {desc}\n'
348 360 echo 'patch repo:'
349 361 hg -R .hg/patches log --template ' rev {rev}: {desc}\n'
350 362 }
351 363 hg init qclonesource
352 364 cd qclonesource
353 365 echo foo > foo
354 366 hg add foo
355 367 hg ci -m 'add foo'
356 368 hg qinit -c
357 369 hg qnew patch1
358 370 echo bar >> foo
359 371 hg qrefresh -m 'change foo'
360 372 hg qci -m checkpoint
361 373 qlog
362 374 cd ..
363 375
364 376 # repo with patches applied
365 377 hg qclone qclonesource qclonedest
366 378 cd qclonedest
367 379 qlog
368 380 cd ..
369 381
370 382 # repo with patches unapplied
371 383 cd qclonesource
372 384 hg qpop -a
373 385 qlog
374 386 cd ..
375 387 hg qclone qclonesource qclonedest2
376 388 cd qclonedest2
377 389 qlog
378 390 cd ..
379 391
@@ -1,413 +1,424 b''
1 1 % help
2 2 mq extension - patch management and development
3 3
4 4 This extension lets you work with a stack of patches in a Mercurial
5 5 repository. It manages two stacks of patches - all known patches, and
6 6 applied patches (subset of known patches).
7 7
8 8 Known patches are represented as patch files in the .hg/patches
9 9 directory. Applied patches are both patch files and changesets.
10 10
11 11 Common tasks (use "hg help command" for more details):
12 12
13 13 prepare repository to work with patches qinit
14 14 create new patch qnew
15 15 import existing patch qimport
16 16
17 17 print patch series qseries
18 18 print applied patches qapplied
19 19 print name of top applied patch qtop
20 20
21 21 add known patch to applied stack qpush
22 22 remove patch from applied stack qpop
23 23 refresh contents of top applied patch qrefresh
24 24
25 25 list of commands (use "hg help -v mq" to show aliases and global options):
26 26
27 27 qapplied print the patches already applied
28 28 qclone clone main and patch repository at same time
29 29 qcommit commit changes in the queue repository
30 30 qdelete remove patches from queue
31 31 qdiff diff of the current patch
32 32 qfold fold the named patches into the current patch
33 33 qguard set or print guards for a patch
34 34 qheader Print the header of the topmost or specified patch
35 35 qimport import a patch
36 36 qinit init a new queue repository
37 37 qnew create a new patch
38 38 qnext print the name of the next patch
39 39 qpop pop the current patch off the stack
40 40 qprev print the name of the previous patch
41 41 qpush push the next patch onto the stack
42 42 qrefresh update the current patch
43 43 qrename rename a patch
44 44 qrestore restore the queue state saved by a rev
45 45 qsave save current queue state
46 46 qselect set or print guarded patches to push
47 47 qseries print the entire series file
48 48 qtop print the name of the current patch
49 49 qunapplied print the patches not yet applied
50 50 strip strip a revision and all later revs on the same branch
51 51 adding a
52 52 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
53 53 adding b/z
54 54 % qinit
55 55 % -R qinit
56 56 % qinit -c
57 57 A .hgignore
58 58 A series
59 59 % qnew implies add
60 60 A .hgignore
61 61 A series
62 62 A test.patch
63 63 % qinit; qinit -c
64 64 .hgignore:
65 65 syntax: glob
66 66 status
67 67 guards
68 68 series:
69 69 abort: repository already exists!
70 70 % qinit; <stuff>; qinit -c
71 71 adding A
72 72 adding B
73 73 A .hgignore
74 74 A A
75 75 A B
76 76 A series
77 77 .hgignore:
78 78 status
79 79 bleh
80 80 series:
81 81 A
82 82 B
83 83 % qnew -m
84 84 foo bar
85 85 % qrefresh
86 86 foo bar
87 87
88 88 diff -r xa
89 89 --- a/a
90 90 +++ b/a
91 91 @@ -1,1 +1,2 @@ a
92 92 a
93 93 +a
94 % empty qrefresh
95 revision:
96 patch:
97 foo bar
98
99 working dir diff:
100 --- a/a
101 +++ b/a
102 @@ -1,1 +1,2 @@ a
103 a
104 +a
94 105 % qpop
95 106 Patch queue now empty
96 107 % qpush
97 108 applying test.patch
98 109 Now at: test.patch
99 110 % pop/push outside repo
100 111 Patch queue now empty
101 112 applying test.patch
102 113 Now at: test.patch
103 114 % qrefresh in subdir
104 115 % pop/push -a in subdir
105 116 Patch queue now empty
106 117 applying test.patch
107 118 applying test2.patch
108 119 Now at: test2.patch
109 120 % qseries
110 121 test.patch
111 122 test2.patch
112 123 Now at: test.patch
113 124 0 A test.patch: foo bar
114 125 1 U test2.patch:
115 126 applying test2.patch
116 127 Now at: test2.patch
117 128 % qapplied
118 129 test.patch
119 130 test2.patch
120 131 % qtop
121 132 test2.patch
122 133 % qprev
123 134 test.patch
124 135 % qnext
125 136 All patches applied
126 137 % pop, qnext, qprev, qapplied
127 138 Now at: test.patch
128 139 test2.patch
129 140 Only one patch applied
130 141 test.patch
131 142 % commit should fail
132 143 abort: cannot commit over an applied mq patch
133 144 % push should fail
134 145 pushing to ../../k
135 146 abort: source has mq patches applied
136 147 % qunapplied
137 148 test2.patch
138 149 % qpush/qpop with index
139 150 applying test2.patch
140 151 Now at: test2.patch
141 152 Now at: test.patch
142 153 applying test1b.patch
143 154 Now at: test1b.patch
144 155 applying test2.patch
145 156 Now at: test2.patch
146 157 Now at: test1b.patch
147 158 Now at: test.patch
148 159 applying test1b.patch
149 160 applying test2.patch
150 161 Now at: test2.patch
151 162 % push should succeed
152 163 Patch queue now empty
153 164 pushing to ../../k
154 165 searching for changes
155 166 adding changesets
156 167 adding manifests
157 168 adding file changes
158 169 added 1 changesets with 1 changes to 1 files
159 170 % qpush/qpop error codes
160 171 applying test.patch
161 172 applying test1b.patch
162 173 applying test2.patch
163 174 Now at: test2.patch
164 175 % pops all patches and succeeds
165 176 Patch queue now empty
166 177 qpop -a succeeds
167 178 % does nothing and succeeds
168 179 no patches applied
169 180 qpop -a succeeds
170 181 % fails - nothing else to pop
171 182 no patches applied
172 183 qpop fails
173 184 % pushes a patch and succeeds
174 185 applying test.patch
175 186 Now at: test.patch
176 187 qpush succeeds
177 188 % pops a patch and succeeds
178 189 Patch queue now empty
179 190 qpop succeeds
180 191 % pushes up to test1b.patch and succeeds
181 192 applying test.patch
182 193 applying test1b.patch
183 194 Now at: test1b.patch
184 195 qpush test1b.patch succeeds
185 196 % does nothing and succeeds
186 197 qpush: test1b.patch is already at the top
187 198 qpush test1b.patch succeeds
188 199 % does nothing and succeeds
189 200 qpop: test1b.patch is already at the top
190 201 qpop test1b.patch succeeds
191 202 % fails - can't push to this patch
192 203 abort: cannot push to a previous patch: test.patch
193 204 qpush test.patch fails
194 205 % fails - can't pop to this patch
195 206 abort: patch test2.patch is not applied
196 207 qpop test2.patch fails
197 208 % pops up to test.patch and succeeds
198 209 Now at: test.patch
199 210 qpop test.patch succeeds
200 211 % pushes all patches and succeeds
201 212 applying test1b.patch
202 213 applying test2.patch
203 214 Now at: test2.patch
204 215 qpush -a succeeds
205 216 % does nothing and succeeds
206 217 all patches are currently applied
207 218 qpush -a succeeds
208 219 % fails - nothing else to push
209 220 patch series already fully applied
210 221 qpush fails
211 222 % does nothing and succeeds
212 223 all patches are currently applied
213 224 qpush test2.patch succeeds
214 225 % strip
215 226 adding x
216 227 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
217 228 saving bundle to
218 229 adding changesets
219 230 adding manifests
220 231 adding file changes
221 232 added 1 changesets with 1 changes to 1 files
222 233 (run 'hg update' to get a working copy)
223 234 % cd b; hg qrefresh
224 235 adding a
225 236 foo
226 237
227 238 diff -r cb9a9f314b8b a
228 239 --- a/a
229 240 +++ b/a
230 241 @@ -1,1 +1,2 @@ a
231 242 a
232 243 +a
233 244 diff -r cb9a9f314b8b b/f
234 245 --- /dev/null
235 246 +++ b/b/f
236 247 @@ -0,0 +1,1 @@
237 248 +f
238 249 % hg qrefresh .
239 250 foo
240 251
241 252 diff -r cb9a9f314b8b b/f
242 253 --- /dev/null
243 254 +++ b/b/f
244 255 @@ -0,0 +1,1 @@
245 256 +f
246 257 M a
247 258 % qpush failure
248 259 Patch queue now empty
249 260 applying foo
250 261 applying bar
251 262 1 out of 1 hunk ignored -- saving rejects to file foo.rej
252 263 patch failed, unable to continue (try -v)
253 264 patch failed, rejects left in working dir
254 265 Errors during apply, please fix and refresh bar
255 266 ? foo
256 267 ? foo.rej
257 268 new file
258 269
259 270 diff --git a/new b/new
260 271 new file mode 100755
261 272 --- /dev/null
262 273 +++ b/new
263 274 @@ -0,0 +1,1 @@
264 275 +foo
265 276 copy file
266 277
267 278 diff --git a/new b/copy
268 279 copy from new
269 280 copy to copy
270 281 Now at: new
271 282 applying copy
272 283 Now at: copy
273 284 diff --git a/new b/copy
274 285 copy from new
275 286 copy to copy
276 287 diff --git a/new b/copy
277 288 copy from new
278 289 copy to copy
279 290 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
280 291 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
281 292 adding branch
282 293 adding changesets
283 294 adding manifests
284 295 adding file changes
285 296 added 1 changesets with 1 changes to 1 files
286 297 (run 'hg update' to get a working copy)
287 298 Patch queue now empty
288 299 applying bar
289 300 Now at: bar
290 301 diff --git a/bar b/bar
291 302 new file mode 100644
292 303 --- /dev/null
293 304 +++ b/bar
294 305 @@ -0,0 +1,1 @@
295 306 +bar
296 307 diff --git a/foo b/baz
297 308 rename from foo
298 309 rename to baz
299 310 2 baz (foo)
300 311 diff --git a/bar b/bar
301 312 new file mode 100644
302 313 --- /dev/null
303 314 +++ b/bar
304 315 @@ -0,0 +1,1 @@
305 316 +bar
306 317 diff --git a/foo b/baz
307 318 rename from foo
308 319 rename to baz
309 320 2 baz (foo)
310 321
311 322 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
312 323 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
313 324 adding branch
314 325 adding changesets
315 326 adding manifests
316 327 adding file changes
317 328 added 1 changesets with 1 changes to 1 files
318 329 (run 'hg update' to get a working copy)
319 330 Patch queue now empty
320 331 applying bar
321 332 Now at: bar
322 333 diff --git a/foo b/bleh
323 334 rename from foo
324 335 rename to bleh
325 336 diff --git a/quux b/quux
326 337 new file mode 100644
327 338 --- /dev/null
328 339 +++ b/quux
329 340 @@ -0,0 +1,1 @@
330 341 +bar
331 342 3 bleh (foo)
332 343 diff --git a/foo b/barney
333 344 rename from foo
334 345 rename to barney
335 346 diff --git a/fred b/fred
336 347 new file mode 100644
337 348 --- /dev/null
338 349 +++ b/fred
339 350 @@ -0,0 +1,1 @@
340 351 +bar
341 352 3 barney (foo)
342 353 % strip again
343 354 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
344 355 merging foo
345 356 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
346 357 (branch merge, don't forget to commit)
347 358 changeset: 3:99615015637b
348 359 tag: tip
349 360 parent: 2:20cbbe65cff7
350 361 parent: 1:d2871fc282d4
351 362 user: test
352 363 date: Thu Jan 01 00:00:00 1970 +0000
353 364 summary: merge
354 365
355 366 changeset: 2:20cbbe65cff7
356 367 parent: 0:53245c60e682
357 368 user: test
358 369 date: Thu Jan 01 00:00:00 1970 +0000
359 370 summary: change foo 2
360 371
361 372 changeset: 1:d2871fc282d4
362 373 user: test
363 374 date: Thu Jan 01 00:00:00 1970 +0000
364 375 summary: change foo 1
365 376
366 377 changeset: 0:53245c60e682
367 378 user: test
368 379 date: Thu Jan 01 00:00:00 1970 +0000
369 380 summary: add foo
370 381
371 382 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
372 383 saving bundle to
373 384 saving bundle to
374 385 adding branch
375 386 adding changesets
376 387 adding manifests
377 388 adding file changes
378 389 added 1 changesets with 1 changes to 1 files
379 390 (run 'hg update' to get a working copy)
380 391 changeset: 1:20cbbe65cff7
381 392 tag: tip
382 393 user: test
383 394 date: Thu Jan 01 00:00:00 1970 +0000
384 395 summary: change foo 2
385 396
386 397 changeset: 0:53245c60e682
387 398 user: test
388 399 date: Thu Jan 01 00:00:00 1970 +0000
389 400 summary: add foo
390 401
391 402 % qclone
392 403 main repo:
393 404 rev 1: change foo
394 405 rev 0: add foo
395 406 patch repo:
396 407 rev 0: checkpoint
397 408 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
398 409 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
399 410 main repo:
400 411 rev 0: add foo
401 412 patch repo:
402 413 rev 0: checkpoint
403 414 Patch queue now empty
404 415 main repo:
405 416 rev 0: add foo
406 417 patch repo:
407 418 rev 0: checkpoint
408 419 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
409 420 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
410 421 main repo:
411 422 rev 0: add foo
412 423 patch repo:
413 424 rev 0: checkpoint
@@ -1,65 +1,73 b''
1 1 #!/bin/sh
2 2
3 3 hg init t
4 4 cd t
5 5 hg branches
6 6
7 7 echo foo > a
8 8 hg add a
9 9 hg ci -m "initial" -d "1000000 0"
10 10 hg branch foo
11 11 hg branch
12 12 hg ci -m "add branch name" -d "1000000 0"
13 13 hg branch bar
14 14 hg ci -m "change branch name" -d "1000000 0"
15 15 hg branch ""
16 16 hg ci -m "clear branch name" -d "1000000 0"
17 17
18 18 hg co foo
19 19 hg branch
20 20 echo bleah > a
21 21 hg ci -m "modify a branch" -d "1000000 0"
22 22
23 23 hg merge
24 24 hg branch
25 25 hg ci -m "merge" -d "1000000 0"
26 26 hg log
27 27
28 28 hg branches
29 29 hg branches -q
30 30
31 31 echo % test for invalid branch cache
32 32 hg rollback
33 33 cp .hg/branches.cache .hg/bc-invalid
34 34 hg log -r foo
35 35 cp .hg/bc-invalid .hg/branches.cache
36 36 hg --debug log -r foo
37 37 rm .hg/branches.cache
38 38 echo corrupted > .hg/branches.cache
39 39 hg log -qr foo
40 40 cat .hg/branches.cache
41 41
42 42 echo % test for different branch cache features
43 43 echo '4909a3732169c0c20011c4f4b8fdff4e3d89b23f 4' > .hg/branches.cache
44 44 hg branches --debug
45 45 echo ' features: unnamed dummy foo bar' > .hg/branches.cache
46 46 hg branches --debug
47 47 echo ' features: dummy' > .hg/branches.cache
48 48 hg branches --debug
49 49
50 50 echo % test old hg reading branch cache with feature list
51 51 python << EOF
52 52 import binascii
53 53 f = file('.hg/branches.cache')
54 54 lines = f.read().split('\n')
55 55 f.close()
56 56 firstline = lines[0]
57 57 last, lrev = lines.pop(0).rstrip().split(" ", 1)
58 58 try:
59 59 last, lrev = binascii.unhexlify(last), int(lrev)
60 60 except ValueError, inst:
61 61 if str(inst) == "invalid literal for int():%s" % firstline:
62 62 print "ValueError raised correctly, good."
63 63 else:
64 64 print "ValueError: %s" % inst
65 65 EOF
66
67 echo % update with no arguments: tipmost revision of the current branch
68 hg up -q -C 0
69 hg up -q
70 hg id
71 hg up -q 1
72 hg up -q
73 hg id
@@ -1,96 +1,99 b''
1 1 foo
2 2 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
3 3 foo
4 4 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
5 5 (branch merge, don't forget to commit)
6 6 foo
7 7 changeset: 5:5f8fb06e083e
8 8 branch: foo
9 9 tag: tip
10 10 parent: 4:4909a3732169
11 11 parent: 3:bf1bc2f45e83
12 12 user: test
13 13 date: Mon Jan 12 13:46:40 1970 +0000
14 14 summary: merge
15 15
16 16 changeset: 4:4909a3732169
17 17 branch: foo
18 18 parent: 1:b699b1cec9c2
19 19 user: test
20 20 date: Mon Jan 12 13:46:40 1970 +0000
21 21 summary: modify a branch
22 22
23 23 changeset: 3:bf1bc2f45e83
24 24 user: test
25 25 date: Mon Jan 12 13:46:40 1970 +0000
26 26 summary: clear branch name
27 27
28 28 changeset: 2:67ec16bde7f1
29 29 branch: bar
30 30 user: test
31 31 date: Mon Jan 12 13:46:40 1970 +0000
32 32 summary: change branch name
33 33
34 34 changeset: 1:b699b1cec9c2
35 35 branch: foo
36 36 user: test
37 37 date: Mon Jan 12 13:46:40 1970 +0000
38 38 summary: add branch name
39 39
40 40 changeset: 0:be8523e69bf8
41 41 user: test
42 42 date: Mon Jan 12 13:46:40 1970 +0000
43 43 summary: initial
44 44
45 45 foo 5:5f8fb06e083e
46 46 3:bf1bc2f45e83
47 47 bar 2:67ec16bde7f1
48 48 foo
49 49
50 50 bar
51 51 % test for invalid branch cache
52 52 rolling back last transaction
53 53 changeset: 4:4909a3732169
54 54 branch: foo
55 55 tag: tip
56 56 parent: 1:b699b1cec9c2
57 57 user: test
58 58 date: Mon Jan 12 13:46:40 1970 +0000
59 59 summary: modify a branch
60 60
61 61 Invalid branch cache: unknown tip
62 62 changeset: 4:4909a3732169c0c20011c4f4b8fdff4e3d89b23f
63 63 branch: foo
64 64 tag: tip
65 65 parent: 1:b699b1cec9c2966b3700de4fef0dc123cd754c31
66 66 parent: -1:0000000000000000000000000000000000000000
67 67 manifest: 4:d01b250baaa05909152f7ae07d7a649deea0df9a
68 68 user: test
69 69 date: Mon Jan 12 13:46:40 1970 +0000
70 70 files: a
71 71 extra: branch=foo
72 72 description:
73 73 modify a branch
74 74
75 75
76 76 4:4909a3732169
77 77 features: unnamed
78 78 4909a3732169c0c20011c4f4b8fdff4e3d89b23f 4
79 79 bf1bc2f45e834c75404d0ddab57d53beab56e2f8
80 80 4909a3732169c0c20011c4f4b8fdff4e3d89b23f foo
81 81 67ec16bde7f1575d523313b9bca000f6a6f12dca bar
82 82 % test for different branch cache features
83 83 branch cache: no features specified
84 84 foo 4:4909a3732169c0c20011c4f4b8fdff4e3d89b23f
85 85 3:bf1bc2f45e834c75404d0ddab57d53beab56e2f8
86 86 bar 2:67ec16bde7f1575d523313b9bca000f6a6f12dca
87 87 branch cache: unknown features: dummy, foo, bar
88 88 foo 4:4909a3732169c0c20011c4f4b8fdff4e3d89b23f
89 89 3:bf1bc2f45e834c75404d0ddab57d53beab56e2f8
90 90 bar 2:67ec16bde7f1575d523313b9bca000f6a6f12dca
91 91 branch cache: missing features: unnamed
92 92 foo 4:4909a3732169c0c20011c4f4b8fdff4e3d89b23f
93 93 3:bf1bc2f45e834c75404d0ddab57d53beab56e2f8
94 94 bar 2:67ec16bde7f1575d523313b9bca000f6a6f12dca
95 95 % test old hg reading branch cache with feature list
96 96 ValueError raised correctly, good.
97 % update with no arguments: tipmost revision of the current branch
98 bf1bc2f45e83
99 4909a3732169 (foo) tip
General Comments 0
You need to be logged in to leave comments. Login now