##// END OF EJS Templates
merge with crew
Thomas Arendsen Hein -
r6126:11a09d57 merge default
parent child Browse files
Show More
@@ -1,2350 +1,2347
1 1 # queue.py - patch queues for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 '''patch management and development
9 9
10 10 This extension lets you work with a stack of patches in a Mercurial
11 11 repository. It manages two stacks of patches - all known patches, and
12 12 applied patches (subset of known patches).
13 13
14 14 Known patches are represented as patch files in the .hg/patches
15 15 directory. Applied patches are both patch files and changesets.
16 16
17 17 Common tasks (use "hg help command" for more details):
18 18
19 19 prepare repository to work with patches qinit
20 20 create new patch qnew
21 21 import existing patch qimport
22 22
23 23 print patch series qseries
24 24 print applied patches qapplied
25 25 print name of top applied patch qtop
26 26
27 27 add known patch to applied stack qpush
28 28 remove patch from applied stack qpop
29 29 refresh contents of top applied patch qrefresh
30 30 '''
31 31
32 32 from mercurial.i18n import _
33 33 from mercurial import commands, cmdutil, hg, patch, revlog, util
34 34 from mercurial import repair
35 35 import os, sys, re, errno
36 36
37 37 commands.norepo += " qclone"
38 38
39 39 # Patch names looks like unix-file names.
40 40 # They must be joinable with queue directory and result in the patch path.
41 41 normname = util.normpath
42 42
43 43 class statusentry:
44 44 def __init__(self, rev, name=None):
45 45 if not name:
46 46 fields = rev.split(':', 1)
47 47 if len(fields) == 2:
48 48 self.rev, self.name = fields
49 49 else:
50 50 self.rev, self.name = None, None
51 51 else:
52 52 self.rev, self.name = rev, name
53 53
54 54 def __str__(self):
55 55 return self.rev + ':' + self.name
56 56
57 57 class queue:
58 58 def __init__(self, ui, path, patchdir=None):
59 59 self.basepath = path
60 60 self.path = patchdir or os.path.join(path, "patches")
61 61 self.opener = util.opener(self.path)
62 62 self.ui = ui
63 63 self.applied = []
64 64 self.full_series = []
65 65 self.applied_dirty = 0
66 66 self.series_dirty = 0
67 67 self.series_path = "series"
68 68 self.status_path = "status"
69 69 self.guards_path = "guards"
70 70 self.active_guards = None
71 71 self.guards_dirty = False
72 72 self._diffopts = None
73 73
74 74 if os.path.exists(self.join(self.series_path)):
75 75 self.full_series = self.opener(self.series_path).read().splitlines()
76 76 self.parse_series()
77 77
78 78 if os.path.exists(self.join(self.status_path)):
79 79 lines = self.opener(self.status_path).read().splitlines()
80 80 self.applied = [statusentry(l) for l in lines]
81 81
82 82 def diffopts(self):
83 83 if self._diffopts is None:
84 84 self._diffopts = patch.diffopts(self.ui)
85 85 return self._diffopts
86 86
87 87 def join(self, *p):
88 88 return os.path.join(self.path, *p)
89 89
90 90 def find_series(self, patch):
91 91 pre = re.compile("(\s*)([^#]+)")
92 92 index = 0
93 93 for l in self.full_series:
94 94 m = pre.match(l)
95 95 if m:
96 96 s = m.group(2)
97 97 s = s.rstrip()
98 98 if s == patch:
99 99 return index
100 100 index += 1
101 101 return None
102 102
103 103 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
104 104
105 105 def parse_series(self):
106 106 self.series = []
107 107 self.series_guards = []
108 108 for l in self.full_series:
109 109 h = l.find('#')
110 110 if h == -1:
111 111 patch = l
112 112 comment = ''
113 113 elif h == 0:
114 114 continue
115 115 else:
116 116 patch = l[:h]
117 117 comment = l[h:]
118 118 patch = patch.strip()
119 119 if patch:
120 120 if patch in self.series:
121 121 raise util.Abort(_('%s appears more than once in %s') %
122 122 (patch, self.join(self.series_path)))
123 123 self.series.append(patch)
124 124 self.series_guards.append(self.guard_re.findall(comment))
125 125
126 126 def check_guard(self, guard):
127 127 bad_chars = '# \t\r\n\f'
128 128 first = guard[0]
129 129 for c in '-+':
130 130 if first == c:
131 131 return (_('guard %r starts with invalid character: %r') %
132 132 (guard, c))
133 133 for c in bad_chars:
134 134 if c in guard:
135 135 return _('invalid character in guard %r: %r') % (guard, c)
136 136
137 137 def set_active(self, guards):
138 138 for guard in guards:
139 139 bad = self.check_guard(guard)
140 140 if bad:
141 141 raise util.Abort(bad)
142 142 guards = dict.fromkeys(guards).keys()
143 143 guards.sort()
144 144 self.ui.debug('active guards: %s\n' % ' '.join(guards))
145 145 self.active_guards = guards
146 146 self.guards_dirty = True
147 147
148 148 def active(self):
149 149 if self.active_guards is None:
150 150 self.active_guards = []
151 151 try:
152 152 guards = self.opener(self.guards_path).read().split()
153 153 except IOError, err:
154 154 if err.errno != errno.ENOENT: raise
155 155 guards = []
156 156 for i, guard in enumerate(guards):
157 157 bad = self.check_guard(guard)
158 158 if bad:
159 159 self.ui.warn('%s:%d: %s\n' %
160 160 (self.join(self.guards_path), i + 1, bad))
161 161 else:
162 162 self.active_guards.append(guard)
163 163 return self.active_guards
164 164
165 165 def set_guards(self, idx, guards):
166 166 for g in guards:
167 167 if len(g) < 2:
168 168 raise util.Abort(_('guard %r too short') % g)
169 169 if g[0] not in '-+':
170 170 raise util.Abort(_('guard %r starts with invalid char') % g)
171 171 bad = self.check_guard(g[1:])
172 172 if bad:
173 173 raise util.Abort(bad)
174 174 drop = self.guard_re.sub('', self.full_series[idx])
175 175 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
176 176 self.parse_series()
177 177 self.series_dirty = True
178 178
179 179 def pushable(self, idx):
180 180 if isinstance(idx, str):
181 181 idx = self.series.index(idx)
182 182 patchguards = self.series_guards[idx]
183 183 if not patchguards:
184 184 return True, None
185 185 default = False
186 186 guards = self.active()
187 187 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
188 188 if exactneg:
189 189 return False, exactneg[0]
190 190 pos = [g for g in patchguards if g[0] == '+']
191 191 exactpos = [g for g in pos if g[1:] in guards]
192 192 if pos:
193 193 if exactpos:
194 194 return True, exactpos[0]
195 195 return False, pos
196 196 return True, ''
197 197
198 198 def explain_pushable(self, idx, all_patches=False):
199 199 write = all_patches and self.ui.write or self.ui.warn
200 200 if all_patches or self.ui.verbose:
201 201 if isinstance(idx, str):
202 202 idx = self.series.index(idx)
203 203 pushable, why = self.pushable(idx)
204 204 if all_patches and pushable:
205 205 if why is None:
206 206 write(_('allowing %s - no guards in effect\n') %
207 207 self.series[idx])
208 208 else:
209 209 if not why:
210 210 write(_('allowing %s - no matching negative guards\n') %
211 211 self.series[idx])
212 212 else:
213 213 write(_('allowing %s - guarded by %r\n') %
214 214 (self.series[idx], why))
215 215 if not pushable:
216 216 if why:
217 217 write(_('skipping %s - guarded by %r\n') %
218 218 (self.series[idx], why))
219 219 else:
220 220 write(_('skipping %s - no matching guards\n') %
221 221 self.series[idx])
222 222
223 223 def save_dirty(self):
224 224 def write_list(items, path):
225 225 fp = self.opener(path, 'w')
226 226 for i in items:
227 227 fp.write("%s\n" % i)
228 228 fp.close()
229 229 if self.applied_dirty: write_list(map(str, self.applied), self.status_path)
230 230 if self.series_dirty: write_list(self.full_series, self.series_path)
231 231 if self.guards_dirty: write_list(self.active_guards, self.guards_path)
232 232
233 233 def readheaders(self, patch):
234 234 def eatdiff(lines):
235 235 while lines:
236 236 l = lines[-1]
237 237 if (l.startswith("diff -") or
238 238 l.startswith("Index:") or
239 239 l.startswith("===========")):
240 240 del lines[-1]
241 241 else:
242 242 break
243 243 def eatempty(lines):
244 244 while lines:
245 245 l = lines[-1]
246 246 if re.match('\s*$', l):
247 247 del lines[-1]
248 248 else:
249 249 break
250 250
251 251 pf = self.join(patch)
252 252 message = []
253 253 comments = []
254 254 user = None
255 255 date = None
256 256 format = None
257 257 subject = None
258 258 diffstart = 0
259 259
260 260 for line in file(pf):
261 261 line = line.rstrip()
262 262 if line.startswith('diff --git'):
263 263 diffstart = 2
264 264 break
265 265 if diffstart:
266 266 if line.startswith('+++ '):
267 267 diffstart = 2
268 268 break
269 269 if line.startswith("--- "):
270 270 diffstart = 1
271 271 continue
272 272 elif format == "hgpatch":
273 273 # parse values when importing the result of an hg export
274 274 if line.startswith("# User "):
275 275 user = line[7:]
276 276 elif line.startswith("# Date "):
277 277 date = line[7:]
278 278 elif not line.startswith("# ") and line:
279 279 message.append(line)
280 280 format = None
281 281 elif line == '# HG changeset patch':
282 282 format = "hgpatch"
283 283 elif (format != "tagdone" and (line.startswith("Subject: ") or
284 284 line.startswith("subject: "))):
285 285 subject = line[9:]
286 286 format = "tag"
287 287 elif (format != "tagdone" and (line.startswith("From: ") or
288 288 line.startswith("from: "))):
289 289 user = line[6:]
290 290 format = "tag"
291 291 elif format == "tag" and line == "":
292 292 # when looking for tags (subject: from: etc) they
293 293 # end once you find a blank line in the source
294 294 format = "tagdone"
295 295 elif message or line:
296 296 message.append(line)
297 297 comments.append(line)
298 298
299 299 eatdiff(message)
300 300 eatdiff(comments)
301 301 eatempty(message)
302 302 eatempty(comments)
303 303
304 304 # make sure message isn't empty
305 305 if format and format.startswith("tag") and subject:
306 306 message.insert(0, "")
307 307 message.insert(0, subject)
308 308 return (message, comments, user, date, diffstart > 1)
309 309
310 310 def removeundo(self, repo):
311 311 undo = repo.sjoin('undo')
312 312 if not os.path.exists(undo):
313 313 return
314 314 try:
315 315 os.unlink(undo)
316 316 except OSError, inst:
317 317 self.ui.warn('error removing undo: %s\n' % str(inst))
318 318
319 319 def printdiff(self, repo, node1, node2=None, files=None,
320 320 fp=None, changes=None, opts={}):
321 321 fns, matchfn, anypats = cmdutil.matchpats(repo, files, opts)
322 322
323 323 patch.diff(repo, node1, node2, fns, match=matchfn,
324 324 fp=fp, changes=changes, opts=self.diffopts())
325 325
326 326 def mergeone(self, repo, mergeq, head, patch, rev):
327 327 # first try just applying the patch
328 328 (err, n) = self.apply(repo, [ patch ], update_status=False,
329 329 strict=True, merge=rev)
330 330
331 331 if err == 0:
332 332 return (err, n)
333 333
334 334 if n is None:
335 335 raise util.Abort(_("apply failed for patch %s") % patch)
336 336
337 337 self.ui.warn("patch didn't work out, merging %s\n" % patch)
338 338
339 339 # apply failed, strip away that rev and merge.
340 340 hg.clean(repo, head)
341 341 self.strip(repo, n, update=False, backup='strip')
342 342
343 343 ctx = repo.changectx(rev)
344 344 ret = hg.merge(repo, rev)
345 345 if ret:
346 346 raise util.Abort(_("update returned %d") % ret)
347 347 n = repo.commit(None, ctx.description(), ctx.user(), force=1)
348 348 if n == None:
349 349 raise util.Abort(_("repo commit failed"))
350 350 try:
351 351 message, comments, user, date, patchfound = mergeq.readheaders(patch)
352 352 except:
353 353 raise util.Abort(_("unable to read %s") % patch)
354 354
355 355 patchf = self.opener(patch, "w")
356 356 if comments:
357 357 comments = "\n".join(comments) + '\n\n'
358 358 patchf.write(comments)
359 359 self.printdiff(repo, head, n, fp=patchf)
360 360 patchf.close()
361 361 self.removeundo(repo)
362 362 return (0, n)
363 363
364 364 def qparents(self, repo, rev=None):
365 365 if rev is None:
366 366 (p1, p2) = repo.dirstate.parents()
367 367 if p2 == revlog.nullid:
368 368 return p1
369 369 if len(self.applied) == 0:
370 370 return None
371 371 return revlog.bin(self.applied[-1].rev)
372 372 pp = repo.changelog.parents(rev)
373 373 if pp[1] != revlog.nullid:
374 374 arevs = [ x.rev for x in self.applied ]
375 375 p0 = revlog.hex(pp[0])
376 376 p1 = revlog.hex(pp[1])
377 377 if p0 in arevs:
378 378 return pp[0]
379 379 if p1 in arevs:
380 380 return pp[1]
381 381 return pp[0]
382 382
383 383 def mergepatch(self, repo, mergeq, series):
384 384 if len(self.applied) == 0:
385 385 # each of the patches merged in will have two parents. This
386 386 # can confuse the qrefresh, qdiff, and strip code because it
387 387 # needs to know which parent is actually in the patch queue.
388 388 # so, we insert a merge marker with only one parent. This way
389 389 # the first patch in the queue is never a merge patch
390 390 #
391 391 pname = ".hg.patches.merge.marker"
392 392 n = repo.commit(None, '[mq]: merge marker', user=None, force=1)
393 393 self.removeundo(repo)
394 394 self.applied.append(statusentry(revlog.hex(n), pname))
395 395 self.applied_dirty = 1
396 396
397 397 head = self.qparents(repo)
398 398
399 399 for patch in series:
400 400 patch = mergeq.lookup(patch, strict=True)
401 401 if not patch:
402 402 self.ui.warn("patch %s does not exist\n" % patch)
403 403 return (1, None)
404 404 pushable, reason = self.pushable(patch)
405 405 if not pushable:
406 406 self.explain_pushable(patch, all_patches=True)
407 407 continue
408 408 info = mergeq.isapplied(patch)
409 409 if not info:
410 410 self.ui.warn("patch %s is not applied\n" % patch)
411 411 return (1, None)
412 412 rev = revlog.bin(info[1])
413 413 (err, head) = self.mergeone(repo, mergeq, head, patch, rev)
414 414 if head:
415 415 self.applied.append(statusentry(revlog.hex(head), patch))
416 416 self.applied_dirty = 1
417 417 if err:
418 418 return (err, head)
419 419 self.save_dirty()
420 420 return (0, head)
421 421
422 422 def patch(self, repo, patchfile):
423 423 '''Apply patchfile to the working directory.
424 424 patchfile: file name of patch'''
425 425 files = {}
426 426 try:
427 427 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
428 428 files=files)
429 429 except Exception, inst:
430 430 self.ui.note(str(inst) + '\n')
431 431 if not self.ui.verbose:
432 432 self.ui.warn("patch failed, unable to continue (try -v)\n")
433 433 return (False, files, False)
434 434
435 435 return (True, files, fuzz)
436 436
437 437 def apply(self, repo, series, list=False, update_status=True,
438 438 strict=False, patchdir=None, merge=None, all_files={}):
439 439 wlock = lock = tr = None
440 440 try:
441 441 wlock = repo.wlock()
442 442 lock = repo.lock()
443 443 tr = repo.transaction()
444 444 try:
445 445 ret = self._apply(repo, series, list, update_status,
446 446 strict, patchdir, merge, all_files=all_files)
447 447 tr.close()
448 448 self.save_dirty()
449 449 return ret
450 450 except:
451 451 try:
452 452 tr.abort()
453 453 finally:
454 454 repo.invalidate()
455 455 repo.dirstate.invalidate()
456 456 raise
457 457 finally:
458 458 del tr, lock, wlock
459 459 self.removeundo(repo)
460 460
461 461 def _apply(self, repo, series, list=False, update_status=True,
462 462 strict=False, patchdir=None, merge=None, all_files={}):
463 463 # TODO unify with commands.py
464 464 if not patchdir:
465 465 patchdir = self.path
466 466 err = 0
467 467 n = None
468 468 for patchname in series:
469 469 pushable, reason = self.pushable(patchname)
470 470 if not pushable:
471 471 self.explain_pushable(patchname, all_patches=True)
472 472 continue
473 473 self.ui.warn("applying %s\n" % patchname)
474 474 pf = os.path.join(patchdir, patchname)
475 475
476 476 try:
477 477 message, comments, user, date, patchfound = self.readheaders(patchname)
478 478 except:
479 479 self.ui.warn("Unable to read %s\n" % patchname)
480 480 err = 1
481 481 break
482 482
483 483 if not message:
484 484 message = "imported patch %s\n" % patchname
485 485 else:
486 486 if list:
487 487 message.append("\nimported patch %s" % patchname)
488 488 message = '\n'.join(message)
489 489
490 490 (patcherr, files, fuzz) = self.patch(repo, pf)
491 491 all_files.update(files)
492 492 patcherr = not patcherr
493 493
494 494 if merge and files:
495 495 # Mark as removed/merged and update dirstate parent info
496 496 removed = []
497 497 merged = []
498 498 for f in files:
499 499 if os.path.exists(repo.wjoin(f)):
500 500 merged.append(f)
501 501 else:
502 502 removed.append(f)
503 503 for f in removed:
504 504 repo.dirstate.remove(f)
505 505 for f in merged:
506 506 repo.dirstate.merge(f)
507 507 p1, p2 = repo.dirstate.parents()
508 508 repo.dirstate.setparents(p1, merge)
509 509 files = patch.updatedir(self.ui, repo, files)
510 510 n = repo.commit(files, message, user, date, force=1)
511 511
512 512 if n == None:
513 513 raise util.Abort(_("repo commit failed"))
514 514
515 515 if update_status:
516 516 self.applied.append(statusentry(revlog.hex(n), patchname))
517 517
518 518 if patcherr:
519 519 if not patchfound:
520 520 self.ui.warn("patch %s is empty\n" % patchname)
521 521 err = 0
522 522 else:
523 523 self.ui.warn("patch failed, rejects left in working dir\n")
524 524 err = 1
525 525 break
526 526
527 527 if fuzz and strict:
528 528 self.ui.warn("fuzz found when applying patch, stopping\n")
529 529 err = 1
530 530 break
531 531 return (err, n)
532 532
533 533 def delete(self, repo, patches, opts):
534 534 if not patches and not opts.get('rev'):
535 535 raise util.Abort(_('qdelete requires at least one revision or '
536 536 'patch name'))
537 537
538 538 realpatches = []
539 539 for patch in patches:
540 540 patch = self.lookup(patch, strict=True)
541 541 info = self.isapplied(patch)
542 542 if info:
543 543 raise util.Abort(_("cannot delete applied patch %s") % patch)
544 544 if patch not in self.series:
545 545 raise util.Abort(_("patch %s not in series file") % patch)
546 546 realpatches.append(patch)
547 547
548 548 appliedbase = 0
549 549 if opts.get('rev'):
550 550 if not self.applied:
551 551 raise util.Abort(_('no patches applied'))
552 552 revs = cmdutil.revrange(repo, opts['rev'])
553 553 if len(revs) > 1 and revs[0] > revs[1]:
554 554 revs.reverse()
555 555 for rev in revs:
556 556 if appliedbase >= len(self.applied):
557 557 raise util.Abort(_("revision %d is not managed") % rev)
558 558
559 559 base = revlog.bin(self.applied[appliedbase].rev)
560 560 node = repo.changelog.node(rev)
561 561 if node != base:
562 562 raise util.Abort(_("cannot delete revision %d above "
563 563 "applied patches") % rev)
564 564 realpatches.append(self.applied[appliedbase].name)
565 565 appliedbase += 1
566 566
567 567 if not opts.get('keep'):
568 568 r = self.qrepo()
569 569 if r:
570 570 r.remove(realpatches, True)
571 571 else:
572 572 for p in realpatches:
573 573 os.unlink(self.join(p))
574 574
575 575 if appliedbase:
576 576 del self.applied[:appliedbase]
577 577 self.applied_dirty = 1
578 578 indices = [self.find_series(p) for p in realpatches]
579 579 indices.sort()
580 580 for i in indices[-1::-1]:
581 581 del self.full_series[i]
582 582 self.parse_series()
583 583 self.series_dirty = 1
584 584
585 585 def check_toppatch(self, repo):
586 586 if len(self.applied) > 0:
587 587 top = revlog.bin(self.applied[-1].rev)
588 588 pp = repo.dirstate.parents()
589 589 if top not in pp:
590 590 raise util.Abort(_("working directory revision is not qtip"))
591 591 return top
592 592 return None
593 593 def check_localchanges(self, repo, force=False, refresh=True):
594 594 m, a, r, d = repo.status()[:4]
595 595 if m or a or r or d:
596 596 if not force:
597 597 if refresh:
598 598 raise util.Abort(_("local changes found, refresh first"))
599 599 else:
600 600 raise util.Abort(_("local changes found"))
601 601 return m, a, r, d
602 602
603 603 _reserved = ('series', 'status', 'guards')
604 604 def check_reserved_name(self, name):
605 605 if (name in self._reserved or name.startswith('.hg')
606 606 or name.startswith('.mq')):
607 607 raise util.Abort(_('"%s" cannot be used as the name of a patch')
608 608 % name)
609 609
610 610 def new(self, repo, patch, *pats, **opts):
611 611 msg = opts.get('msg')
612 612 force = opts.get('force')
613 613 user = opts.get('user')
614 614 date = opts.get('date')
615 615 self.check_reserved_name(patch)
616 616 if os.path.exists(self.join(patch)):
617 617 raise util.Abort(_('patch "%s" already exists') % patch)
618 618 if opts.get('include') or opts.get('exclude') or pats:
619 619 fns, match, anypats = cmdutil.matchpats(repo, pats, opts)
620 620 m, a, r, d = repo.status(files=fns, match=match)[:4]
621 621 else:
622 622 m, a, r, d = self.check_localchanges(repo, force)
623 623 fns, match, anypats = cmdutil.matchpats(repo, m + a + r)
624 624 commitfiles = m + a + r
625 625 self.check_toppatch(repo)
626 626 wlock = repo.wlock()
627 627 try:
628 628 insert = self.full_series_end()
629 629 commitmsg = msg and msg or ("[mq]: %s" % patch)
630 630 n = repo.commit(commitfiles, commitmsg, user, date, match=match, force=True)
631 631 if n == None:
632 632 raise util.Abort(_("repo commit failed"))
633 633 self.full_series[insert:insert] = [patch]
634 634 self.applied.append(statusentry(revlog.hex(n), patch))
635 635 self.parse_series()
636 636 self.series_dirty = 1
637 637 self.applied_dirty = 1
638 638 p = self.opener(patch, "w")
639 639 if date:
640 640 p.write("# HG changeset patch\n")
641 641 if user:
642 642 p.write("# User " + user + "\n")
643 643 p.write("# Date " + date + "\n")
644 644 p.write("\n")
645 645 elif user:
646 646 p.write("From: " + user + "\n")
647 647 p.write("\n")
648 648 if msg:
649 649 msg = msg + "\n"
650 650 p.write(msg)
651 651 p.close()
652 652 wlock = None
653 653 r = self.qrepo()
654 654 if r: r.add([patch])
655 655 if commitfiles:
656 656 self.refresh(repo, short=True, git=opts.get('git'))
657 657 self.removeundo(repo)
658 658 finally:
659 659 del wlock
660 660
661 661 def strip(self, repo, rev, update=True, backup="all"):
662 662 wlock = lock = None
663 663 try:
664 664 wlock = repo.wlock()
665 665 lock = repo.lock()
666 666
667 667 if update:
668 668 self.check_localchanges(repo, refresh=False)
669 669 urev = self.qparents(repo, rev)
670 670 hg.clean(repo, urev)
671 671 repo.dirstate.write()
672 672
673 673 self.removeundo(repo)
674 674 repair.strip(self.ui, repo, rev, backup)
675 675 # strip may have unbundled a set of backed up revisions after
676 676 # the actual strip
677 677 self.removeundo(repo)
678 678 finally:
679 679 del lock, wlock
680 680
681 681 def isapplied(self, patch):
682 682 """returns (index, rev, patch)"""
683 683 for i in xrange(len(self.applied)):
684 684 a = self.applied[i]
685 685 if a.name == patch:
686 686 return (i, a.rev, a.name)
687 687 return None
688 688
689 689 # if the exact patch name does not exist, we try a few
690 690 # variations. If strict is passed, we try only #1
691 691 #
692 692 # 1) a number to indicate an offset in the series file
693 693 # 2) a unique substring of the patch name was given
694 694 # 3) patchname[-+]num to indicate an offset in the series file
695 695 def lookup(self, patch, strict=False):
696 696 patch = patch and str(patch)
697 697
698 698 def partial_name(s):
699 699 if s in self.series:
700 700 return s
701 701 matches = [x for x in self.series if s in x]
702 702 if len(matches) > 1:
703 703 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
704 704 for m in matches:
705 705 self.ui.warn(' %s\n' % m)
706 706 return None
707 707 if matches:
708 708 return matches[0]
709 709 if len(self.series) > 0 and len(self.applied) > 0:
710 710 if s == 'qtip':
711 711 return self.series[self.series_end(True)-1]
712 712 if s == 'qbase':
713 713 return self.series[0]
714 714 return None
715 715 if patch == None:
716 716 return None
717 717
718 718 # we don't want to return a partial match until we make
719 719 # sure the file name passed in does not exist (checked below)
720 720 res = partial_name(patch)
721 721 if res and res == patch:
722 722 return res
723 723
724 724 if not os.path.isfile(self.join(patch)):
725 725 try:
726 726 sno = int(patch)
727 727 except(ValueError, OverflowError):
728 728 pass
729 729 else:
730 730 if sno < len(self.series):
731 731 return self.series[sno]
732 732 if not strict:
733 733 # return any partial match made above
734 734 if res:
735 735 return res
736 736 minus = patch.rfind('-')
737 737 if minus >= 0:
738 738 res = partial_name(patch[:minus])
739 739 if res:
740 740 i = self.series.index(res)
741 741 try:
742 742 off = int(patch[minus+1:] or 1)
743 743 except(ValueError, OverflowError):
744 744 pass
745 745 else:
746 746 if i - off >= 0:
747 747 return self.series[i - off]
748 748 plus = patch.rfind('+')
749 749 if plus >= 0:
750 750 res = partial_name(patch[:plus])
751 751 if res:
752 752 i = self.series.index(res)
753 753 try:
754 754 off = int(patch[plus+1:] or 1)
755 755 except(ValueError, OverflowError):
756 756 pass
757 757 else:
758 758 if i + off < len(self.series):
759 759 return self.series[i + off]
760 760 raise util.Abort(_("patch %s not in series") % patch)
761 761
762 762 def push(self, repo, patch=None, force=False, list=False,
763 763 mergeq=None):
764 764 wlock = repo.wlock()
765 765 try:
766 766 patch = self.lookup(patch)
767 767 # Suppose our series file is: A B C and the current 'top'
768 768 # patch is B. qpush C should be performed (moving forward)
769 769 # qpush B is a NOP (no change) qpush A is an error (can't
770 770 # go backwards with qpush)
771 771 if patch:
772 772 info = self.isapplied(patch)
773 773 if info:
774 774 if info[0] < len(self.applied) - 1:
775 775 raise util.Abort(
776 776 _("cannot push to a previous patch: %s") % patch)
777 777 if info[0] < len(self.series) - 1:
778 778 self.ui.warn(
779 779 _('qpush: %s is already at the top\n') % patch)
780 780 else:
781 781 self.ui.warn(_('all patches are currently applied\n'))
782 782 return
783 783
784 784 # Following the above example, starting at 'top' of B:
785 785 # qpush should be performed (pushes C), but a subsequent
786 786 # qpush without an argument is an error (nothing to
787 787 # apply). This allows a loop of "...while hg qpush..." to
788 788 # work as it detects an error when done
789 789 if self.series_end() == len(self.series):
790 790 self.ui.warn(_('patch series already fully applied\n'))
791 791 return 1
792 792 if not force:
793 793 self.check_localchanges(repo)
794 794
795 795 self.applied_dirty = 1;
796 796 start = self.series_end()
797 797 if start > 0:
798 798 self.check_toppatch(repo)
799 799 if not patch:
800 800 patch = self.series[start]
801 801 end = start + 1
802 802 else:
803 803 end = self.series.index(patch, start) + 1
804 804 s = self.series[start:end]
805 805 all_files = {}
806 806 try:
807 807 if mergeq:
808 808 ret = self.mergepatch(repo, mergeq, s)
809 809 else:
810 810 ret = self.apply(repo, s, list, all_files=all_files)
811 811 except:
812 812 self.ui.warn(_('cleaning up working directory...'))
813 813 node = repo.dirstate.parents()[0]
814 814 hg.revert(repo, node, None)
815 815 unknown = repo.status()[4]
816 816 # only remove unknown files that we know we touched or
817 817 # created while patching
818 818 for f in unknown:
819 819 if f in all_files:
820 820 util.unlink(repo.wjoin(f))
821 821 self.ui.warn(_('done\n'))
822 822 raise
823 823 top = self.applied[-1].name
824 824 if ret[0]:
825 825 self.ui.write(
826 826 "Errors during apply, please fix and refresh %s\n" % top)
827 827 else:
828 828 self.ui.write("Now at: %s\n" % top)
829 829 return ret[0]
830 830 finally:
831 831 del wlock
832 832
833 833 def pop(self, repo, patch=None, force=False, update=True, all=False):
834 834 def getfile(f, rev, flags):
835 835 t = repo.file(f).read(rev)
836 836 repo.wwrite(f, t, flags)
837 837
838 838 wlock = repo.wlock()
839 839 try:
840 840 if patch:
841 841 # index, rev, patch
842 842 info = self.isapplied(patch)
843 843 if not info:
844 844 patch = self.lookup(patch)
845 845 info = self.isapplied(patch)
846 846 if not info:
847 847 raise util.Abort(_("patch %s is not applied") % patch)
848 848
849 849 if len(self.applied) == 0:
850 850 # Allow qpop -a to work repeatedly,
851 851 # but not qpop without an argument
852 852 self.ui.warn(_("no patches applied\n"))
853 853 return not all
854 854
855 855 if not update:
856 856 parents = repo.dirstate.parents()
857 857 rr = [ revlog.bin(x.rev) for x in self.applied ]
858 858 for p in parents:
859 859 if p in rr:
860 860 self.ui.warn("qpop: forcing dirstate update\n")
861 861 update = True
862 862
863 863 if not force and update:
864 864 self.check_localchanges(repo)
865 865
866 866 self.applied_dirty = 1;
867 867 end = len(self.applied)
868 868 if not patch:
869 869 if all:
870 870 popi = 0
871 871 else:
872 872 popi = len(self.applied) - 1
873 873 else:
874 874 popi = info[0] + 1
875 875 if popi >= end:
876 876 self.ui.warn("qpop: %s is already at the top\n" % patch)
877 877 return
878 878 info = [ popi ] + [self.applied[popi].rev, self.applied[popi].name]
879 879
880 880 start = info[0]
881 881 rev = revlog.bin(info[1])
882 882
883 883 if update:
884 884 top = self.check_toppatch(repo)
885 885
886 886 if repo.changelog.heads(rev) != [revlog.bin(self.applied[-1].rev)]:
887 887 raise util.Abort("popping would remove a revision not "
888 888 "managed by this patch queue")
889 889
890 890 # we know there are no local changes, so we can make a simplified
891 891 # form of hg.update.
892 892 if update:
893 893 qp = self.qparents(repo, rev)
894 894 changes = repo.changelog.read(qp)
895 895 mmap = repo.manifest.read(changes[0])
896 896 m, a, r, d, u = repo.status(qp, top)[:5]
897 897 if d:
898 898 raise util.Abort("deletions found between repo revs")
899 899 for f in m:
900 900 getfile(f, mmap[f], mmap.flags(f))
901 901 for f in r:
902 902 getfile(f, mmap[f], mmap.flags(f))
903 903 for f in m + r:
904 904 repo.dirstate.normal(f)
905 905 for f in a:
906 906 try:
907 907 os.unlink(repo.wjoin(f))
908 908 except OSError, e:
909 909 if e.errno != errno.ENOENT:
910 910 raise
911 911 try: os.removedirs(os.path.dirname(repo.wjoin(f)))
912 912 except: pass
913 913 repo.dirstate.forget(f)
914 914 repo.dirstate.setparents(qp, revlog.nullid)
915 915 del self.applied[start:end]
916 916 self.strip(repo, rev, update=False, backup='strip')
917 917 if len(self.applied):
918 918 self.ui.write("Now at: %s\n" % self.applied[-1].name)
919 919 else:
920 920 self.ui.write("Patch queue now empty\n")
921 921 finally:
922 922 del wlock
923 923
924 924 def diff(self, repo, pats, opts):
925 925 top = self.check_toppatch(repo)
926 926 if not top:
927 927 self.ui.write("No patches applied\n")
928 928 return
929 929 qp = self.qparents(repo, top)
930 930 if opts.get('git'):
931 931 self.diffopts().git = True
932 932 self.printdiff(repo, qp, files=pats, opts=opts)
933 933
934 934 def refresh(self, repo, pats=None, **opts):
935 935 if len(self.applied) == 0:
936 936 self.ui.write("No patches applied\n")
937 937 return 1
938 938 wlock = repo.wlock()
939 939 try:
940 940 self.check_toppatch(repo)
941 941 (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
942 942 top = revlog.bin(top)
943 943 if repo.changelog.heads(top) != [top]:
944 944 raise util.Abort("cannot refresh a revision with children")
945 945 cparents = repo.changelog.parents(top)
946 946 patchparent = self.qparents(repo, top)
947 947 message, comments, user, date, patchfound = self.readheaders(patchfn)
948 948
949 949 patchf = self.opener(patchfn, 'r+')
950 950
951 951 # if the patch was a git patch, refresh it as a git patch
952 952 for line in patchf:
953 953 if line.startswith('diff --git'):
954 954 self.diffopts().git = True
955 955 break
956 956
957 957 msg = opts.get('msg', '').rstrip()
958 958 if msg and comments:
959 959 # Remove existing message, keeping the rest of the comments
960 960 # fields.
961 961 # If comments contains 'subject: ', message will prepend
962 962 # the field and a blank line.
963 963 if message:
964 964 subj = 'subject: ' + message[0].lower()
965 965 for i in xrange(len(comments)):
966 966 if subj == comments[i].lower():
967 967 del comments[i]
968 968 message = message[2:]
969 969 break
970 970 ci = 0
971 971 for mi in xrange(len(message)):
972 972 while message[mi] != comments[ci]:
973 973 ci += 1
974 974 del comments[ci]
975 975
976 976 def setheaderfield(comments, prefixes, new):
977 977 # Update all references to a field in the patch header.
978 978 # If none found, add it email style.
979 979 res = False
980 980 for prefix in prefixes:
981 981 for i in xrange(len(comments)):
982 982 if comments[i].startswith(prefix):
983 983 comments[i] = prefix + new
984 984 res = True
985 985 break
986 986 return res
987 987
988 988 newuser = opts.get('user')
989 989 if newuser:
990 990 if not setheaderfield(comments, ['From: ', '# User '], newuser):
991 991 try:
992 992 patchheaderat = comments.index('# HG changeset patch')
993 993 comments.insert(patchheaderat + 1,'# User ' + newuser)
994 994 except ValueError:
995 995 comments = ['From: ' + newuser, ''] + comments
996 996 user = newuser
997 997
998 998 newdate = opts.get('date')
999 999 if newdate:
1000 1000 if setheaderfield(comments, ['# Date '], newdate):
1001 1001 date = newdate
1002 1002
1003 1003 if msg:
1004 1004 comments.append(msg)
1005 1005
1006 1006 patchf.seek(0)
1007 1007 patchf.truncate()
1008 1008
1009 1009 if comments:
1010 1010 comments = "\n".join(comments) + '\n\n'
1011 1011 patchf.write(comments)
1012 1012
1013 1013 if opts.get('git'):
1014 1014 self.diffopts().git = True
1015 1015 fns, matchfn, anypats = cmdutil.matchpats(repo, pats, opts)
1016 1016 tip = repo.changelog.tip()
1017 1017 if top == tip:
1018 1018 # if the top of our patch queue is also the tip, there is an
1019 1019 # optimization here. We update the dirstate in place and strip
1020 1020 # off the tip commit. Then just commit the current directory
1021 1021 # tree. We can also send repo.commit the list of files
1022 1022 # changed to speed up the diff
1023 1023 #
1024 1024 # in short mode, we only diff the files included in the
1025 1025 # patch already
1026 1026 #
1027 1027 # this should really read:
1028 1028 # mm, dd, aa, aa2, uu = repo.status(tip, patchparent)[:5]
1029 1029 # but we do it backwards to take advantage of manifest/chlog
1030 1030 # caching against the next repo.status call
1031 1031 #
1032 1032 mm, aa, dd, aa2, uu = repo.status(patchparent, tip)[:5]
1033 1033 changes = repo.changelog.read(tip)
1034 1034 man = repo.manifest.read(changes[0])
1035 1035 aaa = aa[:]
1036 1036 if opts.get('short'):
1037 1037 filelist = mm + aa + dd
1038 1038 match = dict.fromkeys(filelist).__contains__
1039 1039 else:
1040 1040 filelist = None
1041 1041 match = util.always
1042 1042 m, a, r, d, u = repo.status(files=filelist, match=match)[:5]
1043 1043
1044 1044 # we might end up with files that were added between
1045 1045 # tip and the dirstate parent, but then changed in the
1046 1046 # local dirstate. in this case, we want them to only
1047 1047 # show up in the added section
1048 1048 for x in m:
1049 1049 if x not in aa:
1050 1050 mm.append(x)
1051 1051 # we might end up with files added by the local dirstate that
1052 1052 # were deleted by the patch. In this case, they should only
1053 1053 # show up in the changed section.
1054 1054 for x in a:
1055 1055 if x in dd:
1056 1056 del dd[dd.index(x)]
1057 1057 mm.append(x)
1058 1058 else:
1059 1059 aa.append(x)
1060 1060 # make sure any files deleted in the local dirstate
1061 1061 # are not in the add or change column of the patch
1062 1062 forget = []
1063 1063 for x in d + r:
1064 1064 if x in aa:
1065 1065 del aa[aa.index(x)]
1066 1066 forget.append(x)
1067 1067 continue
1068 1068 elif x in mm:
1069 1069 del mm[mm.index(x)]
1070 1070 dd.append(x)
1071 1071
1072 1072 m = util.unique(mm)
1073 1073 r = util.unique(dd)
1074 1074 a = util.unique(aa)
1075 1075 c = [filter(matchfn, l) for l in (m, a, r, [], u)]
1076 1076 filelist = util.unique(c[0] + c[1] + c[2])
1077 1077 patch.diff(repo, patchparent, files=filelist, match=matchfn,
1078 1078 fp=patchf, changes=c, opts=self.diffopts())
1079 1079 patchf.close()
1080 1080
1081 1081 repo.dirstate.setparents(*cparents)
1082 1082 copies = {}
1083 1083 for dst in a:
1084 1084 src = repo.dirstate.copied(dst)
1085 1085 if src is not None:
1086 1086 copies.setdefault(src, []).append(dst)
1087 1087 repo.dirstate.add(dst)
1088 1088 # remember the copies between patchparent and tip
1089 1089 # this may be slow, so don't do it if we're not tracking copies
1090 1090 if self.diffopts().git:
1091 1091 for dst in aaa:
1092 1092 f = repo.file(dst)
1093 1093 src = f.renamed(man[dst])
1094 1094 if src:
1095 1095 copies[src[0]] = copies.get(dst, [])
1096 1096 if dst in a:
1097 1097 copies[src[0]].append(dst)
1098 1098 # we can't copy a file created by the patch itself
1099 1099 if dst in copies:
1100 1100 del copies[dst]
1101 1101 for src, dsts in copies.iteritems():
1102 1102 for dst in dsts:
1103 1103 repo.dirstate.copy(src, dst)
1104 1104 for f in r:
1105 1105 repo.dirstate.remove(f)
1106 1106 # if the patch excludes a modified file, mark that
1107 1107 # file with mtime=0 so status can see it.
1108 1108 mm = []
1109 1109 for i in xrange(len(m)-1, -1, -1):
1110 1110 if not matchfn(m[i]):
1111 1111 mm.append(m[i])
1112 1112 del m[i]
1113 1113 for f in m:
1114 1114 repo.dirstate.normal(f)
1115 1115 for f in mm:
1116 1116 repo.dirstate.normallookup(f)
1117 1117 for f in forget:
1118 1118 repo.dirstate.forget(f)
1119 1119
1120 1120 if not msg:
1121 1121 if not message:
1122 1122 message = "[mq]: %s\n" % patchfn
1123 1123 else:
1124 1124 message = "\n".join(message)
1125 1125 else:
1126 1126 message = msg
1127 1127
1128 1128 if not user:
1129 1129 user = changes[1]
1130 1130
1131 1131 self.applied.pop()
1132 1132 self.applied_dirty = 1
1133 1133 self.strip(repo, top, update=False,
1134 1134 backup='strip')
1135 1135 n = repo.commit(filelist, message, user, date, match=matchfn,
1136 1136 force=1)
1137 1137 self.applied.append(statusentry(revlog.hex(n), patchfn))
1138 1138 self.removeundo(repo)
1139 1139 else:
1140 1140 self.printdiff(repo, patchparent, fp=patchf)
1141 1141 patchf.close()
1142 1142 added = repo.status()[1]
1143 1143 for a in added:
1144 1144 f = repo.wjoin(a)
1145 1145 try:
1146 1146 os.unlink(f)
1147 1147 except OSError, e:
1148 1148 if e.errno != errno.ENOENT:
1149 1149 raise
1150 1150 try: os.removedirs(os.path.dirname(f))
1151 1151 except: pass
1152 1152 # forget the file copies in the dirstate
1153 1153 # push should readd the files later on
1154 1154 repo.dirstate.forget(a)
1155 1155 self.pop(repo, force=True)
1156 1156 self.push(repo, force=True)
1157 1157 finally:
1158 1158 del wlock
1159 1159
1160 1160 def init(self, repo, create=False):
1161 1161 if not create and os.path.isdir(self.path):
1162 1162 raise util.Abort(_("patch queue directory already exists"))
1163 1163 try:
1164 1164 os.mkdir(self.path)
1165 1165 except OSError, inst:
1166 1166 if inst.errno != errno.EEXIST or not create:
1167 1167 raise
1168 1168 if create:
1169 1169 return self.qrepo(create=True)
1170 1170
1171 1171 def unapplied(self, repo, patch=None):
1172 1172 if patch and patch not in self.series:
1173 1173 raise util.Abort(_("patch %s is not in series file") % patch)
1174 1174 if not patch:
1175 1175 start = self.series_end()
1176 1176 else:
1177 1177 start = self.series.index(patch) + 1
1178 1178 unapplied = []
1179 1179 for i in xrange(start, len(self.series)):
1180 1180 pushable, reason = self.pushable(i)
1181 1181 if pushable:
1182 1182 unapplied.append((i, self.series[i]))
1183 1183 self.explain_pushable(i)
1184 1184 return unapplied
1185 1185
1186 1186 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1187 1187 summary=False):
1188 1188 def displayname(patchname):
1189 1189 if summary:
1190 1190 msg = self.readheaders(patchname)[0]
1191 1191 msg = msg and ': ' + msg[0] or ': '
1192 1192 else:
1193 1193 msg = ''
1194 1194 return '%s%s' % (patchname, msg)
1195 1195
1196 1196 applied = dict.fromkeys([p.name for p in self.applied])
1197 1197 if length is None:
1198 1198 length = len(self.series) - start
1199 1199 if not missing:
1200 1200 for i in xrange(start, start+length):
1201 1201 patch = self.series[i]
1202 1202 if patch in applied:
1203 1203 stat = 'A'
1204 1204 elif self.pushable(i)[0]:
1205 1205 stat = 'U'
1206 1206 else:
1207 1207 stat = 'G'
1208 1208 pfx = ''
1209 1209 if self.ui.verbose:
1210 1210 pfx = '%d %s ' % (i, stat)
1211 1211 elif status and status != stat:
1212 1212 continue
1213 1213 self.ui.write('%s%s\n' % (pfx, displayname(patch)))
1214 1214 else:
1215 1215 msng_list = []
1216 1216 for root, dirs, files in os.walk(self.path):
1217 1217 d = root[len(self.path) + 1:]
1218 1218 for f in files:
1219 1219 fl = os.path.join(d, f)
1220 1220 if (fl not in self.series and
1221 1221 fl not in (self.status_path, self.series_path,
1222 1222 self.guards_path)
1223 1223 and not fl.startswith('.')):
1224 1224 msng_list.append(fl)
1225 1225 msng_list.sort()
1226 1226 for x in msng_list:
1227 1227 pfx = self.ui.verbose and ('D ') or ''
1228 1228 self.ui.write("%s%s\n" % (pfx, displayname(x)))
1229 1229
1230 1230 def issaveline(self, l):
1231 1231 if l.name == '.hg.patches.save.line':
1232 1232 return True
1233 1233
1234 1234 def qrepo(self, create=False):
1235 1235 if create or os.path.isdir(self.join(".hg")):
1236 1236 return hg.repository(self.ui, path=self.path, create=create)
1237 1237
1238 1238 def restore(self, repo, rev, delete=None, qupdate=None):
1239 1239 c = repo.changelog.read(rev)
1240 1240 desc = c[4].strip()
1241 1241 lines = desc.splitlines()
1242 1242 i = 0
1243 1243 datastart = None
1244 1244 series = []
1245 1245 applied = []
1246 1246 qpp = None
1247 1247 for i in xrange(0, len(lines)):
1248 1248 if lines[i] == 'Patch Data:':
1249 1249 datastart = i + 1
1250 1250 elif lines[i].startswith('Dirstate:'):
1251 1251 l = lines[i].rstrip()
1252 1252 l = l[10:].split(' ')
1253 1253 qpp = [ hg.bin(x) for x in l ]
1254 1254 elif datastart != None:
1255 1255 l = lines[i].rstrip()
1256 1256 se = statusentry(l)
1257 1257 file_ = se.name
1258 1258 if se.rev:
1259 1259 applied.append(se)
1260 1260 else:
1261 1261 series.append(file_)
1262 1262 if datastart == None:
1263 1263 self.ui.warn("No saved patch data found\n")
1264 1264 return 1
1265 1265 self.ui.warn("restoring status: %s\n" % lines[0])
1266 1266 self.full_series = series
1267 1267 self.applied = applied
1268 1268 self.parse_series()
1269 1269 self.series_dirty = 1
1270 1270 self.applied_dirty = 1
1271 1271 heads = repo.changelog.heads()
1272 1272 if delete:
1273 1273 if rev not in heads:
1274 1274 self.ui.warn("save entry has children, leaving it alone\n")
1275 1275 else:
1276 1276 self.ui.warn("removing save entry %s\n" % hg.short(rev))
1277 1277 pp = repo.dirstate.parents()
1278 1278 if rev in pp:
1279 1279 update = True
1280 1280 else:
1281 1281 update = False
1282 1282 self.strip(repo, rev, update=update, backup='strip')
1283 1283 if qpp:
1284 1284 self.ui.warn("saved queue repository parents: %s %s\n" %
1285 1285 (hg.short(qpp[0]), hg.short(qpp[1])))
1286 1286 if qupdate:
1287 1287 self.ui.status(_("queue directory updating\n"))
1288 1288 r = self.qrepo()
1289 1289 if not r:
1290 1290 self.ui.warn("Unable to load queue repository\n")
1291 1291 return 1
1292 1292 hg.clean(r, qpp[0])
1293 1293
1294 1294 def save(self, repo, msg=None):
1295 1295 if len(self.applied) == 0:
1296 1296 self.ui.warn("save: no patches applied, exiting\n")
1297 1297 return 1
1298 1298 if self.issaveline(self.applied[-1]):
1299 1299 self.ui.warn("status is already saved\n")
1300 1300 return 1
1301 1301
1302 1302 ar = [ ':' + x for x in self.full_series ]
1303 1303 if not msg:
1304 1304 msg = "hg patches saved state"
1305 1305 else:
1306 1306 msg = "hg patches: " + msg.rstrip('\r\n')
1307 1307 r = self.qrepo()
1308 1308 if r:
1309 1309 pp = r.dirstate.parents()
1310 1310 msg += "\nDirstate: %s %s" % (hg.hex(pp[0]), hg.hex(pp[1]))
1311 1311 msg += "\n\nPatch Data:\n"
1312 1312 text = msg + "\n".join([str(x) for x in self.applied]) + '\n' + (ar and
1313 1313 "\n".join(ar) + '\n' or "")
1314 1314 n = repo.commit(None, text, user=None, force=1)
1315 1315 if not n:
1316 1316 self.ui.warn("repo commit failed\n")
1317 1317 return 1
1318 1318 self.applied.append(statusentry(revlog.hex(n),'.hg.patches.save.line'))
1319 1319 self.applied_dirty = 1
1320 1320 self.removeundo(repo)
1321 1321
1322 1322 def full_series_end(self):
1323 1323 if len(self.applied) > 0:
1324 1324 p = self.applied[-1].name
1325 1325 end = self.find_series(p)
1326 1326 if end == None:
1327 1327 return len(self.full_series)
1328 1328 return end + 1
1329 1329 return 0
1330 1330
1331 1331 def series_end(self, all_patches=False):
1332 1332 """If all_patches is False, return the index of the next pushable patch
1333 1333 in the series, or the series length. If all_patches is True, return the
1334 1334 index of the first patch past the last applied one.
1335 1335 """
1336 1336 end = 0
1337 1337 def next(start):
1338 1338 if all_patches:
1339 1339 return start
1340 1340 i = start
1341 1341 while i < len(self.series):
1342 1342 p, reason = self.pushable(i)
1343 1343 if p:
1344 1344 break
1345 1345 self.explain_pushable(i)
1346 1346 i += 1
1347 1347 return i
1348 1348 if len(self.applied) > 0:
1349 1349 p = self.applied[-1].name
1350 1350 try:
1351 1351 end = self.series.index(p)
1352 1352 except ValueError:
1353 1353 return 0
1354 1354 return next(end + 1)
1355 1355 return next(end)
1356 1356
1357 1357 def appliedname(self, index):
1358 1358 pname = self.applied[index].name
1359 1359 if not self.ui.verbose:
1360 1360 p = pname
1361 1361 else:
1362 1362 p = str(self.series.index(pname)) + " " + pname
1363 1363 return p
1364 1364
1365 1365 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1366 1366 force=None, git=False):
1367 1367 def checkseries(patchname):
1368 1368 if patchname in self.series:
1369 1369 raise util.Abort(_('patch %s is already in the series file')
1370 1370 % patchname)
1371 1371 def checkfile(patchname):
1372 1372 if not force and os.path.exists(self.join(patchname)):
1373 1373 raise util.Abort(_('patch "%s" already exists')
1374 1374 % patchname)
1375 1375
1376 1376 if rev:
1377 1377 if files:
1378 1378 raise util.Abort(_('option "-r" not valid when importing '
1379 1379 'files'))
1380 1380 rev = cmdutil.revrange(repo, rev)
1381 1381 rev.sort(lambda x, y: cmp(y, x))
1382 1382 if (len(files) > 1 or len(rev) > 1) and patchname:
1383 1383 raise util.Abort(_('option "-n" not valid when importing multiple '
1384 1384 'patches'))
1385 1385 i = 0
1386 1386 added = []
1387 1387 if rev:
1388 1388 # If mq patches are applied, we can only import revisions
1389 1389 # that form a linear path to qbase.
1390 1390 # Otherwise, they should form a linear path to a head.
1391 1391 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1392 1392 if len(heads) > 1:
1393 1393 raise util.Abort(_('revision %d is the root of more than one '
1394 1394 'branch') % rev[-1])
1395 1395 if self.applied:
1396 1396 base = revlog.hex(repo.changelog.node(rev[0]))
1397 1397 if base in [n.rev for n in self.applied]:
1398 1398 raise util.Abort(_('revision %d is already managed')
1399 1399 % rev[0])
1400 1400 if heads != [revlog.bin(self.applied[-1].rev)]:
1401 1401 raise util.Abort(_('revision %d is not the parent of '
1402 1402 'the queue') % rev[0])
1403 1403 base = repo.changelog.rev(revlog.bin(self.applied[0].rev))
1404 1404 lastparent = repo.changelog.parentrevs(base)[0]
1405 1405 else:
1406 1406 if heads != [repo.changelog.node(rev[0])]:
1407 1407 raise util.Abort(_('revision %d has unmanaged children')
1408 1408 % rev[0])
1409 1409 lastparent = None
1410 1410
1411 1411 if git:
1412 1412 self.diffopts().git = True
1413 1413
1414 1414 for r in rev:
1415 1415 p1, p2 = repo.changelog.parentrevs(r)
1416 1416 n = repo.changelog.node(r)
1417 1417 if p2 != revlog.nullrev:
1418 1418 raise util.Abort(_('cannot import merge revision %d') % r)
1419 1419 if lastparent and lastparent != r:
1420 1420 raise util.Abort(_('revision %d is not the parent of %d')
1421 1421 % (r, lastparent))
1422 1422 lastparent = p1
1423 1423
1424 1424 if not patchname:
1425 1425 patchname = normname('%d.diff' % r)
1426 1426 self.check_reserved_name(patchname)
1427 1427 checkseries(patchname)
1428 1428 checkfile(patchname)
1429 1429 self.full_series.insert(0, patchname)
1430 1430
1431 1431 patchf = self.opener(patchname, "w")
1432 1432 patch.export(repo, [n], fp=patchf, opts=self.diffopts())
1433 1433 patchf.close()
1434 1434
1435 1435 se = statusentry(revlog.hex(n), patchname)
1436 1436 self.applied.insert(0, se)
1437 1437
1438 1438 added.append(patchname)
1439 1439 patchname = None
1440 1440 self.parse_series()
1441 1441 self.applied_dirty = 1
1442 1442
1443 1443 for filename in files:
1444 1444 if existing:
1445 1445 if filename == '-':
1446 1446 raise util.Abort(_('-e is incompatible with import from -'))
1447 1447 if not patchname:
1448 1448 patchname = normname(filename)
1449 1449 self.check_reserved_name(patchname)
1450 1450 if not os.path.isfile(self.join(patchname)):
1451 1451 raise util.Abort(_("patch %s does not exist") % patchname)
1452 1452 else:
1453 1453 try:
1454 1454 if filename == '-':
1455 1455 if not patchname:
1456 1456 raise util.Abort(_('need --name to import a patch from -'))
1457 1457 text = sys.stdin.read()
1458 1458 else:
1459 1459 text = file(filename, 'rb').read()
1460 1460 except IOError:
1461 1461 raise util.Abort(_("unable to read %s") % patchname)
1462 1462 if not patchname:
1463 1463 patchname = normname(os.path.basename(filename))
1464 1464 self.check_reserved_name(patchname)
1465 1465 checkfile(patchname)
1466 1466 patchf = self.opener(patchname, "w")
1467 1467 patchf.write(text)
1468 1468 checkseries(patchname)
1469 1469 index = self.full_series_end() + i
1470 1470 self.full_series[index:index] = [patchname]
1471 1471 self.parse_series()
1472 1472 self.ui.warn("adding %s to series file\n" % patchname)
1473 1473 i += 1
1474 1474 added.append(patchname)
1475 1475 patchname = None
1476 1476 self.series_dirty = 1
1477 1477 qrepo = self.qrepo()
1478 1478 if qrepo:
1479 1479 qrepo.add(added)
1480 1480
1481 1481 def delete(ui, repo, *patches, **opts):
1482 1482 """remove patches from queue
1483 1483
1484 1484 The patches must not be applied, unless they are arguments to
1485 1485 the --rev parameter. At least one patch or revision is required.
1486 1486
1487 1487 With --rev, mq will stop managing the named revisions (converting
1488 1488 them to regular mercurial changesets). The patches must be applied
1489 1489 and at the base of the stack. This option is useful when the patches
1490 1490 have been applied upstream.
1491 1491
1492 1492 With --keep, the patch files are preserved in the patch directory."""
1493 1493 q = repo.mq
1494 1494 q.delete(repo, patches, opts)
1495 1495 q.save_dirty()
1496 1496 return 0
1497 1497
1498 1498 def applied(ui, repo, patch=None, **opts):
1499 1499 """print the patches already applied"""
1500 1500 q = repo.mq
1501 1501 if patch:
1502 1502 if patch not in q.series:
1503 1503 raise util.Abort(_("patch %s is not in series file") % patch)
1504 1504 end = q.series.index(patch) + 1
1505 1505 else:
1506 1506 end = q.series_end(True)
1507 1507 return q.qseries(repo, length=end, status='A', summary=opts.get('summary'))
1508 1508
1509 1509 def unapplied(ui, repo, patch=None, **opts):
1510 1510 """print the patches not yet applied"""
1511 1511 q = repo.mq
1512 1512 if patch:
1513 1513 if patch not in q.series:
1514 1514 raise util.Abort(_("patch %s is not in series file") % patch)
1515 1515 start = q.series.index(patch) + 1
1516 1516 else:
1517 1517 start = q.series_end(True)
1518 1518 q.qseries(repo, start=start, status='U', summary=opts.get('summary'))
1519 1519
1520 1520 def qimport(ui, repo, *filename, **opts):
1521 1521 """import a patch
1522 1522
1523 1523 The patch will have the same name as its source file unless you
1524 1524 give it a new one with --name.
1525 1525
1526 1526 You can register an existing patch inside the patch directory
1527 1527 with the --existing flag.
1528 1528
1529 1529 With --force, an existing patch of the same name will be overwritten.
1530 1530
1531 1531 An existing changeset may be placed under mq control with --rev
1532 1532 (e.g. qimport --rev tip -n patch will place tip under mq control).
1533 1533 With --git, patches imported with --rev will use the git diff
1534 1534 format.
1535 1535 """
1536 1536 q = repo.mq
1537 1537 q.qimport(repo, filename, patchname=opts['name'],
1538 1538 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1539 1539 git=opts['git'])
1540 1540 q.save_dirty()
1541 1541 return 0
1542 1542
1543 1543 def init(ui, repo, **opts):
1544 1544 """init a new queue repository
1545 1545
1546 1546 The queue repository is unversioned by default. If -c is
1547 1547 specified, qinit will create a separate nested repository
1548 1548 for patches (qinit -c may also be run later to convert
1549 1549 an unversioned patch repository into a versioned one).
1550 1550 You can use qcommit to commit changes to this queue repository."""
1551 1551 q = repo.mq
1552 1552 r = q.init(repo, create=opts['create_repo'])
1553 1553 q.save_dirty()
1554 1554 if r:
1555 1555 if not os.path.exists(r.wjoin('.hgignore')):
1556 1556 fp = r.wopener('.hgignore', 'w')
1557 1557 fp.write('^\\.hg\n')
1558 1558 fp.write('^\\.mq\n')
1559 1559 fp.write('syntax: glob\n')
1560 1560 fp.write('status\n')
1561 1561 fp.write('guards\n')
1562 1562 fp.close()
1563 1563 if not os.path.exists(r.wjoin('series')):
1564 1564 r.wopener('series', 'w').close()
1565 1565 r.add(['.hgignore', 'series'])
1566 1566 commands.add(ui, r)
1567 1567 return 0
1568 1568
1569 1569 def clone(ui, source, dest=None, **opts):
1570 1570 '''clone main and patch repository at same time
1571 1571
1572 1572 If source is local, destination will have no patches applied. If
1573 1573 source is remote, this command can not check if patches are
1574 1574 applied in source, so cannot guarantee that patches are not
1575 1575 applied in destination. If you clone remote repository, be sure
1576 1576 before that it has no patches applied.
1577 1577
1578 1578 Source patch repository is looked for in <src>/.hg/patches by
1579 1579 default. Use -p <url> to change.
1580 1580
1581 1581 The patch directory must be a nested mercurial repository, as
1582 1582 would be created by qinit -c.
1583 1583 '''
1584 1584 def patchdir(repo):
1585 1585 url = repo.url()
1586 1586 if url.endswith('/'):
1587 1587 url = url[:-1]
1588 1588 return url + '/.hg/patches'
1589 1589 cmdutil.setremoteconfig(ui, opts)
1590 1590 if dest is None:
1591 1591 dest = hg.defaultdest(source)
1592 1592 sr = hg.repository(ui, ui.expandpath(source))
1593 1593 patchespath = opts['patches'] or patchdir(sr)
1594 1594 try:
1595 1595 pr = hg.repository(ui, patchespath)
1596 1596 except hg.RepoError:
1597 1597 raise util.Abort(_('versioned patch repository not found'
1598 1598 ' (see qinit -c)'))
1599 1599 qbase, destrev = None, None
1600 1600 if sr.local():
1601 1601 if sr.mq.applied:
1602 1602 qbase = revlog.bin(sr.mq.applied[0].rev)
1603 1603 if not hg.islocal(dest):
1604 1604 heads = dict.fromkeys(sr.heads())
1605 1605 for h in sr.heads(qbase):
1606 1606 del heads[h]
1607 1607 destrev = heads.keys()
1608 1608 destrev.append(sr.changelog.parents(qbase)[0])
1609 1609 ui.note(_('cloning main repo\n'))
1610 1610 sr, dr = hg.clone(ui, sr.url(), dest,
1611 1611 pull=opts['pull'],
1612 1612 rev=destrev,
1613 1613 update=False,
1614 1614 stream=opts['uncompressed'])
1615 1615 ui.note(_('cloning patch repo\n'))
1616 1616 spr, dpr = hg.clone(ui, opts['patches'] or patchdir(sr), patchdir(dr),
1617 1617 pull=opts['pull'], update=not opts['noupdate'],
1618 1618 stream=opts['uncompressed'])
1619 1619 if dr.local():
1620 1620 if qbase:
1621 1621 ui.note(_('stripping applied patches from destination repo\n'))
1622 1622 dr.mq.strip(dr, qbase, update=False, backup=None)
1623 1623 if not opts['noupdate']:
1624 1624 ui.note(_('updating destination repo\n'))
1625 1625 hg.update(dr, dr.changelog.tip())
1626 1626
1627 1627 def commit(ui, repo, *pats, **opts):
1628 1628 """commit changes in the queue repository"""
1629 1629 q = repo.mq
1630 1630 r = q.qrepo()
1631 1631 if not r: raise util.Abort('no queue repository')
1632 1632 commands.commit(r.ui, r, *pats, **opts)
1633 1633
1634 1634 def series(ui, repo, **opts):
1635 1635 """print the entire series file"""
1636 1636 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1637 1637 return 0
1638 1638
1639 1639 def top(ui, repo, **opts):
1640 1640 """print the name of the current patch"""
1641 1641 q = repo.mq
1642 1642 t = q.applied and q.series_end(True) or 0
1643 1643 if t:
1644 1644 return q.qseries(repo, start=t-1, length=1, status='A',
1645 1645 summary=opts.get('summary'))
1646 1646 else:
1647 1647 ui.write("No patches applied\n")
1648 1648 return 1
1649 1649
1650 1650 def next(ui, repo, **opts):
1651 1651 """print the name of the next patch"""
1652 1652 q = repo.mq
1653 1653 end = q.series_end()
1654 1654 if end == len(q.series):
1655 1655 ui.write("All patches applied\n")
1656 1656 return 1
1657 1657 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1658 1658
1659 1659 def prev(ui, repo, **opts):
1660 1660 """print the name of the previous patch"""
1661 1661 q = repo.mq
1662 1662 l = len(q.applied)
1663 1663 if l == 1:
1664 1664 ui.write("Only one patch applied\n")
1665 1665 return 1
1666 1666 if not l:
1667 1667 ui.write("No patches applied\n")
1668 1668 return 1
1669 1669 return q.qseries(repo, start=l-2, length=1, status='A',
1670 1670 summary=opts.get('summary'))
1671 1671
1672 1672 def setupheaderopts(ui, opts):
1673 1673 def do(opt,val):
1674 1674 if not opts[opt] and opts['current' + opt]:
1675 1675 opts[opt] = val
1676 1676 do('user', ui.username())
1677 1677 do('date', "%d %d" % util.makedate())
1678 1678
1679 1679 def new(ui, repo, patch, *args, **opts):
1680 1680 """create a new patch
1681 1681
1682 1682 qnew creates a new patch on top of the currently-applied patch
1683 1683 (if any). It will refuse to run if there are any outstanding
1684 1684 changes unless -f is specified, in which case the patch will
1685 1685 be initialised with them. You may also use -I, -X, and/or a list of
1686 1686 files after the patch name to add only changes to matching files
1687 1687 to the new patch, leaving the rest as uncommitted modifications.
1688 1688
1689 1689 -e, -m or -l set the patch header as well as the commit message.
1690 1690 If none is specified, the patch header is empty and the
1691 1691 commit message is '[mq]: PATCH'"""
1692 1692 q = repo.mq
1693 1693 message = cmdutil.logmessage(opts)
1694 1694 if opts['edit']:
1695 1695 message = ui.edit(message, ui.username())
1696 1696 opts['msg'] = message
1697 1697 setupheaderopts(ui, opts)
1698 1698 q.new(repo, patch, *args, **opts)
1699 1699 q.save_dirty()
1700 1700 return 0
1701 1701
1702 1702 def refresh(ui, repo, *pats, **opts):
1703 1703 """update the current patch
1704 1704
1705 1705 If any file patterns are provided, the refreshed patch will contain only
1706 1706 the modifications that match those patterns; the remaining modifications
1707 1707 will remain in the working directory.
1708 1708
1709 1709 hg add/remove/copy/rename work as usual, though you might want to use
1710 1710 git-style patches (--git or [diff] git=1) to track copies and renames.
1711 1711 """
1712 1712 q = repo.mq
1713 1713 message = cmdutil.logmessage(opts)
1714 1714 if opts['edit']:
1715 1715 if not q.applied:
1716 1716 ui.write(_("No patches applied\n"))
1717 1717 return 1
1718 1718 if message:
1719 1719 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1720 1720 patch = q.applied[-1].name
1721 1721 (message, comment, user, date, hasdiff) = q.readheaders(patch)
1722 1722 message = ui.edit('\n'.join(message), user or ui.username())
1723 1723 setupheaderopts(ui, opts)
1724 1724 ret = q.refresh(repo, pats, msg=message, **opts)
1725 1725 q.save_dirty()
1726 1726 return ret
1727 1727
1728 1728 def diff(ui, repo, *pats, **opts):
1729 1729 """diff of the current patch"""
1730 1730 repo.mq.diff(repo, pats, opts)
1731 1731 return 0
1732 1732
1733 1733 def fold(ui, repo, *files, **opts):
1734 1734 """fold the named patches into the current patch
1735 1735
1736 1736 Patches must not yet be applied. Each patch will be successively
1737 1737 applied to the current patch in the order given. If all the
1738 1738 patches apply successfully, the current patch will be refreshed
1739 1739 with the new cumulative patch, and the folded patches will
1740 1740 be deleted. With -k/--keep, the folded patch files will not
1741 1741 be removed afterwards.
1742 1742
1743 1743 The header for each folded patch will be concatenated with
1744 1744 the current patch header, separated by a line of '* * *'."""
1745 1745
1746 1746 q = repo.mq
1747 1747
1748 1748 if not files:
1749 1749 raise util.Abort(_('qfold requires at least one patch name'))
1750 1750 if not q.check_toppatch(repo):
1751 1751 raise util.Abort(_('No patches applied'))
1752 1752
1753 1753 message = cmdutil.logmessage(opts)
1754 1754 if opts['edit']:
1755 1755 if message:
1756 1756 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1757 1757
1758 1758 parent = q.lookup('qtip')
1759 1759 patches = []
1760 1760 messages = []
1761 1761 for f in files:
1762 1762 p = q.lookup(f)
1763 1763 if p in patches or p == parent:
1764 1764 ui.warn(_('Skipping already folded patch %s') % p)
1765 1765 if q.isapplied(p):
1766 1766 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
1767 1767 patches.append(p)
1768 1768
1769 1769 for p in patches:
1770 1770 if not message:
1771 1771 messages.append(q.readheaders(p)[0])
1772 1772 pf = q.join(p)
1773 1773 (patchsuccess, files, fuzz) = q.patch(repo, pf)
1774 1774 if not patchsuccess:
1775 1775 raise util.Abort(_('Error folding patch %s') % p)
1776 1776 patch.updatedir(ui, repo, files)
1777 1777
1778 1778 if not message:
1779 1779 message, comments, user = q.readheaders(parent)[0:3]
1780 1780 for msg in messages:
1781 1781 message.append('* * *')
1782 1782 message.extend(msg)
1783 1783 message = '\n'.join(message)
1784 1784
1785 1785 if opts['edit']:
1786 1786 message = ui.edit(message, user or ui.username())
1787 1787
1788 1788 q.refresh(repo, msg=message)
1789 1789 q.delete(repo, patches, opts)
1790 1790 q.save_dirty()
1791 1791
1792 1792 def goto(ui, repo, patch, **opts):
1793 1793 '''push or pop patches until named patch is at top of stack'''
1794 1794 q = repo.mq
1795 1795 patch = q.lookup(patch)
1796 1796 if q.isapplied(patch):
1797 1797 ret = q.pop(repo, patch, force=opts['force'])
1798 1798 else:
1799 1799 ret = q.push(repo, patch, force=opts['force'])
1800 1800 q.save_dirty()
1801 1801 return ret
1802 1802
1803 1803 def guard(ui, repo, *args, **opts):
1804 1804 '''set or print guards for a patch
1805 1805
1806 1806 Guards control whether a patch can be pushed. A patch with no
1807 1807 guards is always pushed. A patch with a positive guard ("+foo") is
1808 1808 pushed only if the qselect command has activated it. A patch with
1809 1809 a negative guard ("-foo") is never pushed if the qselect command
1810 1810 has activated it.
1811 1811
1812 1812 With no arguments, print the currently active guards.
1813 1813 With arguments, set guards for the named patch.
1814 1814
1815 1815 To set a negative guard "-foo" on topmost patch ("--" is needed so
1816 1816 hg will not interpret "-foo" as an option):
1817 1817 hg qguard -- -foo
1818 1818
1819 1819 To set guards on another patch:
1820 1820 hg qguard other.patch +2.6.17 -stable
1821 1821 '''
1822 1822 def status(idx):
1823 1823 guards = q.series_guards[idx] or ['unguarded']
1824 1824 ui.write('%s: %s\n' % (q.series[idx], ' '.join(guards)))
1825 1825 q = repo.mq
1826 1826 patch = None
1827 1827 args = list(args)
1828 1828 if opts['list']:
1829 1829 if args or opts['none']:
1830 1830 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
1831 1831 for i in xrange(len(q.series)):
1832 1832 status(i)
1833 1833 return
1834 1834 if not args or args[0][0:1] in '-+':
1835 1835 if not q.applied:
1836 1836 raise util.Abort(_('no patches applied'))
1837 1837 patch = q.applied[-1].name
1838 1838 if patch is None and args[0][0:1] not in '-+':
1839 1839 patch = args.pop(0)
1840 1840 if patch is None:
1841 1841 raise util.Abort(_('no patch to work with'))
1842 1842 if args or opts['none']:
1843 1843 idx = q.find_series(patch)
1844 1844 if idx is None:
1845 1845 raise util.Abort(_('no patch named %s') % patch)
1846 1846 q.set_guards(idx, args)
1847 1847 q.save_dirty()
1848 1848 else:
1849 1849 status(q.series.index(q.lookup(patch)))
1850 1850
1851 1851 def header(ui, repo, patch=None):
1852 1852 """Print the header of the topmost or specified patch"""
1853 1853 q = repo.mq
1854 1854
1855 1855 if patch:
1856 1856 patch = q.lookup(patch)
1857 1857 else:
1858 1858 if not q.applied:
1859 1859 ui.write('No patches applied\n')
1860 1860 return 1
1861 1861 patch = q.lookup('qtip')
1862 1862 message = repo.mq.readheaders(patch)[0]
1863 1863
1864 1864 ui.write('\n'.join(message) + '\n')
1865 1865
1866 1866 def lastsavename(path):
1867 1867 (directory, base) = os.path.split(path)
1868 1868 names = os.listdir(directory)
1869 1869 namere = re.compile("%s.([0-9]+)" % base)
1870 1870 maxindex = None
1871 1871 maxname = None
1872 1872 for f in names:
1873 1873 m = namere.match(f)
1874 1874 if m:
1875 1875 index = int(m.group(1))
1876 1876 if maxindex == None or index > maxindex:
1877 1877 maxindex = index
1878 1878 maxname = f
1879 1879 if maxname:
1880 1880 return (os.path.join(directory, maxname), maxindex)
1881 1881 return (None, None)
1882 1882
1883 1883 def savename(path):
1884 1884 (last, index) = lastsavename(path)
1885 1885 if last is None:
1886 1886 index = 0
1887 1887 newpath = path + ".%d" % (index + 1)
1888 1888 return newpath
1889 1889
1890 1890 def push(ui, repo, patch=None, **opts):
1891 1891 """push the next patch onto the stack"""
1892 1892 q = repo.mq
1893 1893 mergeq = None
1894 1894
1895 1895 if opts['all']:
1896 1896 if not q.series:
1897 1897 ui.warn(_('no patches in series\n'))
1898 1898 return 0
1899 1899 patch = q.series[-1]
1900 1900 if opts['merge']:
1901 1901 if opts['name']:
1902 1902 newpath = opts['name']
1903 1903 else:
1904 1904 newpath, i = lastsavename(q.path)
1905 1905 if not newpath:
1906 1906 ui.warn("no saved queues found, please use -n\n")
1907 1907 return 1
1908 1908 mergeq = queue(ui, repo.join(""), newpath)
1909 1909 ui.warn("merging with queue at: %s\n" % mergeq.path)
1910 1910 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
1911 1911 mergeq=mergeq)
1912 1912 return ret
1913 1913
1914 1914 def pop(ui, repo, patch=None, **opts):
1915 1915 """pop the current patch off the stack"""
1916 1916 localupdate = True
1917 1917 if opts['name']:
1918 1918 q = queue(ui, repo.join(""), repo.join(opts['name']))
1919 1919 ui.warn('using patch queue: %s\n' % q.path)
1920 1920 localupdate = False
1921 1921 else:
1922 1922 q = repo.mq
1923 1923 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
1924 1924 all=opts['all'])
1925 1925 q.save_dirty()
1926 1926 return ret
1927 1927
1928 1928 def rename(ui, repo, patch, name=None, **opts):
1929 1929 """rename a patch
1930 1930
1931 1931 With one argument, renames the current patch to PATCH1.
1932 1932 With two arguments, renames PATCH1 to PATCH2."""
1933 1933
1934 1934 q = repo.mq
1935 1935
1936 1936 if not name:
1937 1937 name = patch
1938 1938 patch = None
1939 1939
1940 1940 if patch:
1941 1941 patch = q.lookup(patch)
1942 1942 else:
1943 1943 if not q.applied:
1944 1944 ui.write(_('No patches applied\n'))
1945 1945 return
1946 1946 patch = q.lookup('qtip')
1947 1947 absdest = q.join(name)
1948 1948 if os.path.isdir(absdest):
1949 1949 name = normname(os.path.join(name, os.path.basename(patch)))
1950 1950 absdest = q.join(name)
1951 1951 if os.path.exists(absdest):
1952 1952 raise util.Abort(_('%s already exists') % absdest)
1953 1953
1954 1954 if name in q.series:
1955 1955 raise util.Abort(_('A patch named %s already exists in the series file') % name)
1956 1956
1957 1957 if ui.verbose:
1958 1958 ui.write('Renaming %s to %s\n' % (patch, name))
1959 1959 i = q.find_series(patch)
1960 1960 guards = q.guard_re.findall(q.full_series[i])
1961 1961 q.full_series[i] = name + ''.join([' #' + g for g in guards])
1962 1962 q.parse_series()
1963 1963 q.series_dirty = 1
1964 1964
1965 1965 info = q.isapplied(patch)
1966 1966 if info:
1967 1967 q.applied[info[0]] = statusentry(info[1], name)
1968 1968 q.applied_dirty = 1
1969 1969
1970 1970 util.rename(q.join(patch), absdest)
1971 1971 r = q.qrepo()
1972 1972 if r:
1973 1973 wlock = r.wlock()
1974 1974 try:
1975 1975 if r.dirstate[name] == 'r':
1976 1976 r.undelete([name])
1977 1977 r.copy(patch, name)
1978 1978 r.remove([patch], False)
1979 1979 finally:
1980 1980 del wlock
1981 1981
1982 1982 q.save_dirty()
1983 1983
1984 1984 def restore(ui, repo, rev, **opts):
1985 1985 """restore the queue state saved by a rev"""
1986 1986 rev = repo.lookup(rev)
1987 1987 q = repo.mq
1988 1988 q.restore(repo, rev, delete=opts['delete'],
1989 1989 qupdate=opts['update'])
1990 1990 q.save_dirty()
1991 1991 return 0
1992 1992
1993 1993 def save(ui, repo, **opts):
1994 1994 """save current queue state"""
1995 1995 q = repo.mq
1996 1996 message = cmdutil.logmessage(opts)
1997 1997 ret = q.save(repo, msg=message)
1998 1998 if ret:
1999 1999 return ret
2000 2000 q.save_dirty()
2001 2001 if opts['copy']:
2002 2002 path = q.path
2003 2003 if opts['name']:
2004 2004 newpath = os.path.join(q.basepath, opts['name'])
2005 2005 if os.path.exists(newpath):
2006 2006 if not os.path.isdir(newpath):
2007 2007 raise util.Abort(_('destination %s exists and is not '
2008 2008 'a directory') % newpath)
2009 2009 if not opts['force']:
2010 2010 raise util.Abort(_('destination %s exists, '
2011 2011 'use -f to force') % newpath)
2012 2012 else:
2013 2013 newpath = savename(path)
2014 2014 ui.warn("copy %s to %s\n" % (path, newpath))
2015 2015 util.copyfiles(path, newpath)
2016 2016 if opts['empty']:
2017 2017 try:
2018 2018 os.unlink(q.join(q.status_path))
2019 2019 except:
2020 2020 pass
2021 2021 return 0
2022 2022
2023 2023 def strip(ui, repo, rev, **opts):
2024 2024 """strip a revision and all later revs on the same branch"""
2025 2025 rev = repo.lookup(rev)
2026 2026 backup = 'all'
2027 2027 if opts['backup']:
2028 2028 backup = 'strip'
2029 2029 elif opts['nobackup']:
2030 2030 backup = 'none'
2031 2031 update = repo.dirstate.parents()[0] != revlog.nullid
2032 2032 repo.mq.strip(repo, rev, backup=backup, update=update)
2033 2033 return 0
2034 2034
2035 2035 def select(ui, repo, *args, **opts):
2036 2036 '''set or print guarded patches to push
2037 2037
2038 2038 Use the qguard command to set or print guards on patch, then use
2039 2039 qselect to tell mq which guards to use. A patch will be pushed if it
2040 2040 has no guards or any positive guards match the currently selected guard,
2041 2041 but will not be pushed if any negative guards match the current guard.
2042 2042 For example:
2043 2043
2044 2044 qguard foo.patch -stable (negative guard)
2045 2045 qguard bar.patch +stable (positive guard)
2046 2046 qselect stable
2047 2047
2048 2048 This activates the "stable" guard. mq will skip foo.patch (because
2049 2049 it has a negative match) but push bar.patch (because it
2050 2050 has a positive match).
2051 2051
2052 2052 With no arguments, prints the currently active guards.
2053 2053 With one argument, sets the active guard.
2054 2054
2055 2055 Use -n/--none to deactivate guards (no other arguments needed).
2056 2056 When no guards are active, patches with positive guards are skipped
2057 2057 and patches with negative guards are pushed.
2058 2058
2059 2059 qselect can change the guards on applied patches. It does not pop
2060 2060 guarded patches by default. Use --pop to pop back to the last applied
2061 2061 patch that is not guarded. Use --reapply (which implies --pop) to push
2062 2062 back to the current patch afterwards, but skip guarded patches.
2063 2063
2064 2064 Use -s/--series to print a list of all guards in the series file (no
2065 2065 other arguments needed). Use -v for more information.'''
2066 2066
2067 2067 q = repo.mq
2068 2068 guards = q.active()
2069 2069 if args or opts['none']:
2070 2070 old_unapplied = q.unapplied(repo)
2071 2071 old_guarded = [i for i in xrange(len(q.applied)) if
2072 2072 not q.pushable(i)[0]]
2073 2073 q.set_active(args)
2074 2074 q.save_dirty()
2075 2075 if not args:
2076 2076 ui.status(_('guards deactivated\n'))
2077 2077 if not opts['pop'] and not opts['reapply']:
2078 2078 unapplied = q.unapplied(repo)
2079 2079 guarded = [i for i in xrange(len(q.applied))
2080 2080 if not q.pushable(i)[0]]
2081 2081 if len(unapplied) != len(old_unapplied):
2082 2082 ui.status(_('number of unguarded, unapplied patches has '
2083 2083 'changed from %d to %d\n') %
2084 2084 (len(old_unapplied), len(unapplied)))
2085 2085 if len(guarded) != len(old_guarded):
2086 2086 ui.status(_('number of guarded, applied patches has changed '
2087 2087 'from %d to %d\n') %
2088 2088 (len(old_guarded), len(guarded)))
2089 2089 elif opts['series']:
2090 2090 guards = {}
2091 2091 noguards = 0
2092 2092 for gs in q.series_guards:
2093 2093 if not gs:
2094 2094 noguards += 1
2095 2095 for g in gs:
2096 2096 guards.setdefault(g, 0)
2097 2097 guards[g] += 1
2098 2098 if ui.verbose:
2099 2099 guards['NONE'] = noguards
2100 2100 guards = guards.items()
2101 2101 guards.sort(lambda a, b: cmp(a[0][1:], b[0][1:]))
2102 2102 if guards:
2103 2103 ui.note(_('guards in series file:\n'))
2104 2104 for guard, count in guards:
2105 2105 ui.note('%2d ' % count)
2106 2106 ui.write(guard, '\n')
2107 2107 else:
2108 2108 ui.note(_('no guards in series file\n'))
2109 2109 else:
2110 2110 if guards:
2111 2111 ui.note(_('active guards:\n'))
2112 2112 for g in guards:
2113 2113 ui.write(g, '\n')
2114 2114 else:
2115 2115 ui.write(_('no active guards\n'))
2116 2116 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2117 2117 popped = False
2118 2118 if opts['pop'] or opts['reapply']:
2119 2119 for i in xrange(len(q.applied)):
2120 2120 pushable, reason = q.pushable(i)
2121 2121 if not pushable:
2122 2122 ui.status(_('popping guarded patches\n'))
2123 2123 popped = True
2124 2124 if i == 0:
2125 2125 q.pop(repo, all=True)
2126 2126 else:
2127 2127 q.pop(repo, i-1)
2128 2128 break
2129 2129 if popped:
2130 2130 try:
2131 2131 if reapply:
2132 2132 ui.status(_('reapplying unguarded patches\n'))
2133 2133 q.push(repo, reapply)
2134 2134 finally:
2135 2135 q.save_dirty()
2136 2136
2137 2137 def reposetup(ui, repo):
2138 2138 class mqrepo(repo.__class__):
2139 2139 def abort_if_wdir_patched(self, errmsg, force=False):
2140 2140 if self.mq.applied and not force:
2141 2141 parent = revlog.hex(self.dirstate.parents()[0])
2142 2142 if parent in [s.rev for s in self.mq.applied]:
2143 2143 raise util.Abort(errmsg)
2144 2144
2145 2145 def commit(self, *args, **opts):
2146 2146 if len(args) >= 6:
2147 2147 force = args[5]
2148 2148 else:
2149 2149 force = opts.get('force')
2150 2150 self.abort_if_wdir_patched(
2151 2151 _('cannot commit over an applied mq patch'),
2152 2152 force)
2153 2153
2154 2154 return super(mqrepo, self).commit(*args, **opts)
2155 2155
2156 2156 def push(self, remote, force=False, revs=None):
2157 2157 if self.mq.applied and not force and not revs:
2158 2158 raise util.Abort(_('source has mq patches applied'))
2159 2159 return super(mqrepo, self).push(remote, force, revs)
2160 2160
2161 2161 def tags(self):
2162 2162 if self.tagscache:
2163 2163 return self.tagscache
2164 2164
2165 2165 tagscache = super(mqrepo, self).tags()
2166 2166
2167 2167 q = self.mq
2168 2168 if not q.applied:
2169 2169 return tagscache
2170 2170
2171 2171 mqtags = [(revlog.bin(patch.rev), patch.name) for patch in q.applied]
2172 2172
2173 2173 if mqtags[-1][0] not in self.changelog.nodemap:
2174 2174 self.ui.warn('mq status file refers to unknown node %s\n'
2175 2175 % revlog.short(mqtags[-1][0]))
2176 2176 return tagscache
2177 2177
2178 2178 mqtags.append((mqtags[-1][0], 'qtip'))
2179 2179 mqtags.append((mqtags[0][0], 'qbase'))
2180 2180 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2181 2181 for patch in mqtags:
2182 2182 if patch[1] in tagscache:
2183 2183 self.ui.warn('Tag %s overrides mq patch of the same name\n' % patch[1])
2184 2184 else:
2185 2185 tagscache[patch[1]] = patch[0]
2186 2186
2187 2187 return tagscache
2188 2188
2189 def _branchtags(self):
2189 def _branchtags(self, partial, lrev):
2190 2190 q = self.mq
2191 2191 if not q.applied:
2192 return super(mqrepo, self)._branchtags()
2192 return super(mqrepo, self)._branchtags(partial, lrev)
2193 2193
2194 2194 cl = self.changelog
2195 2195 qbasenode = revlog.bin(q.applied[0].rev)
2196 2196 if qbasenode not in cl.nodemap:
2197 2197 self.ui.warn('mq status file refers to unknown node %s\n'
2198 2198 % revlog.short(qbasenode))
2199 return super(mqrepo, self)._branchtags()
2200
2201 self.branchcache = {} # avoid recursion in changectx
2202 partial, last, lrev = self._readbranchcache()
2199 return super(mqrepo, self)._branchtags(partial, lrev)
2203 2200
2204 2201 qbase = cl.rev(qbasenode)
2205 2202 start = lrev + 1
2206 2203 if start < qbase:
2207 2204 # update the cache (excluding the patches) and save it
2208 2205 self._updatebranchcache(partial, lrev+1, qbase)
2209 2206 self._writebranchcache(partial, cl.node(qbase-1), qbase-1)
2210 2207 start = qbase
2211 2208 # if start = qbase, the cache is as updated as it should be.
2212 2209 # if start > qbase, the cache includes (part of) the patches.
2213 2210 # we might as well use it, but we won't save it.
2214 2211
2215 2212 # update the cache up to the tip
2216 2213 self._updatebranchcache(partial, start, cl.count())
2217 2214
2218 2215 return partial
2219 2216
2220 2217 if repo.local():
2221 2218 repo.__class__ = mqrepo
2222 2219 repo.mq = queue(ui, repo.join(""))
2223 2220
2224 2221 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2225 2222
2226 2223 headeropts = [
2227 2224 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2228 2225 ('u', 'user', '', _('add "From: <given user>" to patch')),
2229 2226 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2230 2227 ('d', 'date', '', _('add "Date: <given date>" to patch'))]
2231 2228
2232 2229 cmdtable = {
2233 2230 "qapplied": (applied, [] + seriesopts, _('hg qapplied [-s] [PATCH]')),
2234 2231 "qclone":
2235 2232 (clone,
2236 2233 [('', 'pull', None, _('use pull protocol to copy metadata')),
2237 2234 ('U', 'noupdate', None, _('do not update the new working directories')),
2238 2235 ('', 'uncompressed', None,
2239 2236 _('use uncompressed transfer (fast over LAN)')),
2240 2237 ('p', 'patches', '', _('location of source patch repo')),
2241 2238 ] + commands.remoteopts,
2242 2239 _('hg qclone [OPTION]... SOURCE [DEST]')),
2243 2240 "qcommit|qci":
2244 2241 (commit,
2245 2242 commands.table["^commit|ci"][1],
2246 2243 _('hg qcommit [OPTION]... [FILE]...')),
2247 2244 "^qdiff":
2248 2245 (diff,
2249 2246 [('g', 'git', None, _('use git extended diff format')),
2250 2247 ('U', 'unified', 3, _('number of lines of context to show')),
2251 2248 ] + commands.walkopts,
2252 2249 _('hg qdiff [-I] [-X] [-U NUM] [-g] [FILE]...')),
2253 2250 "qdelete|qremove|qrm":
2254 2251 (delete,
2255 2252 [('k', 'keep', None, _('keep patch file')),
2256 2253 ('r', 'rev', [], _('stop managing a revision'))],
2257 2254 _('hg qdelete [-k] [-r REV]... [PATCH]...')),
2258 2255 'qfold':
2259 2256 (fold,
2260 2257 [('e', 'edit', None, _('edit patch header')),
2261 2258 ('k', 'keep', None, _('keep folded patch files')),
2262 2259 ] + commands.commitopts,
2263 2260 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')),
2264 2261 'qgoto':
2265 2262 (goto,
2266 2263 [('f', 'force', None, _('overwrite any local changes'))],
2267 2264 _('hg qgoto [OPTION]... PATCH')),
2268 2265 'qguard':
2269 2266 (guard,
2270 2267 [('l', 'list', None, _('list all patches and guards')),
2271 2268 ('n', 'none', None, _('drop all guards'))],
2272 2269 _('hg qguard [-l] [-n] [PATCH] [+GUARD]... [-GUARD]...')),
2273 2270 'qheader': (header, [], _('hg qheader [PATCH]')),
2274 2271 "^qimport":
2275 2272 (qimport,
2276 2273 [('e', 'existing', None, 'import file in patch dir'),
2277 2274 ('n', 'name', '', 'patch file name'),
2278 2275 ('f', 'force', None, 'overwrite existing files'),
2279 2276 ('r', 'rev', [], 'place existing revisions under mq control'),
2280 2277 ('g', 'git', None, _('use git extended diff format'))],
2281 2278 _('hg qimport [-e] [-n NAME] [-f] [-g] [-r REV]... FILE...')),
2282 2279 "^qinit":
2283 2280 (init,
2284 2281 [('c', 'create-repo', None, 'create queue repository')],
2285 2282 _('hg qinit [-c]')),
2286 2283 "qnew":
2287 2284 (new,
2288 2285 [('e', 'edit', None, _('edit commit message')),
2289 2286 ('f', 'force', None, _('import uncommitted changes into patch')),
2290 2287 ('g', 'git', None, _('use git extended diff format')),
2291 2288 ] + commands.walkopts + commands.commitopts + headeropts,
2292 2289 _('hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH [FILE]...')),
2293 2290 "qnext": (next, [] + seriesopts, _('hg qnext [-s]')),
2294 2291 "qprev": (prev, [] + seriesopts, _('hg qprev [-s]')),
2295 2292 "^qpop":
2296 2293 (pop,
2297 2294 [('a', 'all', None, _('pop all patches')),
2298 2295 ('n', 'name', '', _('queue name to pop')),
2299 2296 ('f', 'force', None, _('forget any local changes'))],
2300 2297 _('hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]')),
2301 2298 "^qpush":
2302 2299 (push,
2303 2300 [('f', 'force', None, _('apply if the patch has rejects')),
2304 2301 ('l', 'list', None, _('list patch name in commit text')),
2305 2302 ('a', 'all', None, _('apply all patches')),
2306 2303 ('m', 'merge', None, _('merge from another queue')),
2307 2304 ('n', 'name', '', _('merge queue name'))],
2308 2305 _('hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]')),
2309 2306 "^qrefresh":
2310 2307 (refresh,
2311 2308 [('e', 'edit', None, _('edit commit message')),
2312 2309 ('g', 'git', None, _('use git extended diff format')),
2313 2310 ('s', 'short', None, _('refresh only files already in the patch')),
2314 2311 ] + commands.walkopts + commands.commitopts + headeropts,
2315 2312 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')),
2316 2313 'qrename|qmv':
2317 2314 (rename, [], _('hg qrename PATCH1 [PATCH2]')),
2318 2315 "qrestore":
2319 2316 (restore,
2320 2317 [('d', 'delete', None, _('delete save entry')),
2321 2318 ('u', 'update', None, _('update queue working dir'))],
2322 2319 _('hg qrestore [-d] [-u] REV')),
2323 2320 "qsave":
2324 2321 (save,
2325 2322 [('c', 'copy', None, _('copy patch directory')),
2326 2323 ('n', 'name', '', _('copy directory name')),
2327 2324 ('e', 'empty', None, _('clear queue status file')),
2328 2325 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2329 2326 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')),
2330 2327 "qselect":
2331 2328 (select,
2332 2329 [('n', 'none', None, _('disable all guards')),
2333 2330 ('s', 'series', None, _('list all guards in series file')),
2334 2331 ('', 'pop', None, _('pop to before first guarded applied patch')),
2335 2332 ('', 'reapply', None, _('pop, then reapply patches'))],
2336 2333 _('hg qselect [OPTION]... [GUARD]...')),
2337 2334 "qseries":
2338 2335 (series,
2339 2336 [('m', 'missing', None, _('print patches not in series')),
2340 2337 ] + seriesopts,
2341 2338 _('hg qseries [-ms]')),
2342 2339 "^strip":
2343 2340 (strip,
2344 2341 [('f', 'force', None, _('force multi-head removal')),
2345 2342 ('b', 'backup', None, _('bundle unrelated changesets')),
2346 2343 ('n', 'nobackup', None, _('no backups'))],
2347 2344 _('hg strip [-f] [-b] [-n] REV')),
2348 2345 "qtop": (top, [] + seriesopts, _('hg qtop [-s]')),
2349 2346 "qunapplied": (unapplied, [] + seriesopts, _('hg qunapplied [-s] [PATCH]')),
2350 2347 }
@@ -1,2102 +1,2117
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import _
10 10 import repo, changegroup
11 11 import changelog, dirstate, filelog, manifest, context, weakref
12 12 import re, lock, transaction, tempfile, stat, errno, ui
13 13 import os, revlog, time, util, extensions, hook, inspect
14 14
15 15 class localrepository(repo.repository):
16 16 capabilities = util.set(('lookup', 'changegroupsubset'))
17 17 supported = ('revlogv1', 'store')
18 18
19 19 def __init__(self, parentui, path=None, create=0):
20 20 repo.repository.__init__(self)
21 21 self.root = os.path.realpath(path)
22 22 self.path = os.path.join(self.root, ".hg")
23 23 self.origroot = path
24 24 self.opener = util.opener(self.path)
25 25 self.wopener = util.opener(self.root)
26 26
27 27 if not os.path.isdir(self.path):
28 28 if create:
29 29 if not os.path.exists(path):
30 30 os.mkdir(path)
31 31 os.mkdir(self.path)
32 32 requirements = ["revlogv1"]
33 33 if parentui.configbool('format', 'usestore', True):
34 34 os.mkdir(os.path.join(self.path, "store"))
35 35 requirements.append("store")
36 36 # create an invalid changelog
37 37 self.opener("00changelog.i", "a").write(
38 38 '\0\0\0\2' # represents revlogv2
39 39 ' dummy changelog to prevent using the old repo layout'
40 40 )
41 41 reqfile = self.opener("requires", "w")
42 42 for r in requirements:
43 43 reqfile.write("%s\n" % r)
44 44 reqfile.close()
45 45 else:
46 46 raise repo.RepoError(_("repository %s not found") % path)
47 47 elif create:
48 48 raise repo.RepoError(_("repository %s already exists") % path)
49 49 else:
50 50 # find requirements
51 51 try:
52 52 requirements = self.opener("requires").read().splitlines()
53 53 except IOError, inst:
54 54 if inst.errno != errno.ENOENT:
55 55 raise
56 56 requirements = []
57 57 # check them
58 58 for r in requirements:
59 59 if r not in self.supported:
60 60 raise repo.RepoError(_("requirement '%s' not supported") % r)
61 61
62 62 # setup store
63 63 if "store" in requirements:
64 64 self.encodefn = util.encodefilename
65 65 self.decodefn = util.decodefilename
66 66 self.spath = os.path.join(self.path, "store")
67 67 else:
68 68 self.encodefn = lambda x: x
69 69 self.decodefn = lambda x: x
70 70 self.spath = self.path
71 71
72 72 try:
73 73 # files in .hg/ will be created using this mode
74 74 mode = os.stat(self.spath).st_mode
75 75 # avoid some useless chmods
76 76 if (0777 & ~util._umask) == (0777 & mode):
77 77 mode = None
78 78 except OSError:
79 79 mode = None
80 80
81 81 self._createmode = mode
82 82 self.opener.createmode = mode
83 83 sopener = util.opener(self.spath)
84 84 sopener.createmode = mode
85 85 self.sopener = util.encodedopener(sopener, self.encodefn)
86 86
87 87 self.ui = ui.ui(parentui=parentui)
88 88 try:
89 89 self.ui.readconfig(self.join("hgrc"), self.root)
90 90 extensions.loadall(self.ui)
91 91 except IOError:
92 92 pass
93 93
94 94 self.tagscache = None
95 95 self._tagstypecache = None
96 96 self.branchcache = None
97 self._ubranchcache = None # UTF-8 version of branchcache
98 self._branchcachetip = None
97 99 self.nodetagscache = None
98 100 self.filterpats = {}
99 101 self._datafilters = {}
100 102 self._transref = self._lockref = self._wlockref = None
101 103
102 104 def __getattr__(self, name):
103 105 if name == 'changelog':
104 106 self.changelog = changelog.changelog(self.sopener)
105 107 self.sopener.defversion = self.changelog.version
106 108 return self.changelog
107 109 if name == 'manifest':
108 110 self.changelog
109 111 self.manifest = manifest.manifest(self.sopener)
110 112 return self.manifest
111 113 if name == 'dirstate':
112 114 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
113 115 return self.dirstate
114 116 else:
115 117 raise AttributeError, name
116 118
117 119 def url(self):
118 120 return 'file:' + self.root
119 121
120 122 def hook(self, name, throw=False, **args):
121 123 return hook.hook(self.ui, self, name, throw, **args)
122 124
123 125 tag_disallowed = ':\r\n'
124 126
125 127 def _tag(self, name, node, message, local, user, date, parent=None,
126 128 extra={}):
127 129 use_dirstate = parent is None
128 130
129 131 for c in self.tag_disallowed:
130 132 if c in name:
131 133 raise util.Abort(_('%r cannot be used in a tag name') % c)
132 134
133 135 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
134 136
135 137 def writetag(fp, name, munge, prevtags):
136 138 fp.seek(0, 2)
137 139 if prevtags and prevtags[-1] != '\n':
138 140 fp.write('\n')
139 141 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
140 142 fp.close()
141 143
142 144 prevtags = ''
143 145 if local:
144 146 try:
145 147 fp = self.opener('localtags', 'r+')
146 148 except IOError, err:
147 149 fp = self.opener('localtags', 'a')
148 150 else:
149 151 prevtags = fp.read()
150 152
151 153 # local tags are stored in the current charset
152 154 writetag(fp, name, None, prevtags)
153 155 self.hook('tag', node=hex(node), tag=name, local=local)
154 156 return
155 157
156 158 if use_dirstate:
157 159 try:
158 160 fp = self.wfile('.hgtags', 'rb+')
159 161 except IOError, err:
160 162 fp = self.wfile('.hgtags', 'ab')
161 163 else:
162 164 prevtags = fp.read()
163 165 else:
164 166 try:
165 167 prevtags = self.filectx('.hgtags', parent).data()
166 168 except revlog.LookupError:
167 169 pass
168 170 fp = self.wfile('.hgtags', 'wb')
169 171 if prevtags:
170 172 fp.write(prevtags)
171 173
172 174 # committed tags are stored in UTF-8
173 175 writetag(fp, name, util.fromlocal, prevtags)
174 176
175 177 if use_dirstate and '.hgtags' not in self.dirstate:
176 178 self.add(['.hgtags'])
177 179
178 180 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
179 181 extra=extra)
180 182
181 183 self.hook('tag', node=hex(node), tag=name, local=local)
182 184
183 185 return tagnode
184 186
185 187 def tag(self, name, node, message, local, user, date):
186 188 '''tag a revision with a symbolic name.
187 189
188 190 if local is True, the tag is stored in a per-repository file.
189 191 otherwise, it is stored in the .hgtags file, and a new
190 192 changeset is committed with the change.
191 193
192 194 keyword arguments:
193 195
194 196 local: whether to store tag in non-version-controlled file
195 197 (default False)
196 198
197 199 message: commit message to use if committing
198 200
199 201 user: name of user to use if committing
200 202
201 203 date: date tuple to use if committing'''
202 204
203 205 for x in self.status()[:5]:
204 206 if '.hgtags' in x:
205 207 raise util.Abort(_('working copy of .hgtags is changed '
206 208 '(please commit .hgtags manually)'))
207 209
208 210
209 211 self._tag(name, node, message, local, user, date)
210 212
211 213 def tags(self):
212 214 '''return a mapping of tag to node'''
213 215 if self.tagscache:
214 216 return self.tagscache
215 217
216 218 globaltags = {}
217 219 tagtypes = {}
218 220
219 221 def readtags(lines, fn, tagtype):
220 222 filetags = {}
221 223 count = 0
222 224
223 225 def warn(msg):
224 226 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
225 227
226 228 for l in lines:
227 229 count += 1
228 230 if not l:
229 231 continue
230 232 s = l.split(" ", 1)
231 233 if len(s) != 2:
232 234 warn(_("cannot parse entry"))
233 235 continue
234 236 node, key = s
235 237 key = util.tolocal(key.strip()) # stored in UTF-8
236 238 try:
237 239 bin_n = bin(node)
238 240 except TypeError:
239 241 warn(_("node '%s' is not well formed") % node)
240 242 continue
241 243 if bin_n not in self.changelog.nodemap:
242 244 warn(_("tag '%s' refers to unknown node") % key)
243 245 continue
244 246
245 247 h = []
246 248 if key in filetags:
247 249 n, h = filetags[key]
248 250 h.append(n)
249 251 filetags[key] = (bin_n, h)
250 252
251 253 for k, nh in filetags.items():
252 254 if k not in globaltags:
253 255 globaltags[k] = nh
254 256 tagtypes[k] = tagtype
255 257 continue
256 258
257 259 # we prefer the global tag if:
258 260 # it supercedes us OR
259 261 # mutual supercedes and it has a higher rank
260 262 # otherwise we win because we're tip-most
261 263 an, ah = nh
262 264 bn, bh = globaltags[k]
263 265 if (bn != an and an in bh and
264 266 (bn not in ah or len(bh) > len(ah))):
265 267 an = bn
266 268 ah.extend([n for n in bh if n not in ah])
267 269 globaltags[k] = an, ah
268 270 tagtypes[k] = tagtype
269 271
270 272 # read the tags file from each head, ending with the tip
271 273 f = None
272 274 for rev, node, fnode in self._hgtagsnodes():
273 275 f = (f and f.filectx(fnode) or
274 276 self.filectx('.hgtags', fileid=fnode))
275 277 readtags(f.data().splitlines(), f, "global")
276 278
277 279 try:
278 280 data = util.fromlocal(self.opener("localtags").read())
279 281 # localtags are stored in the local character set
280 282 # while the internal tag table is stored in UTF-8
281 283 readtags(data.splitlines(), "localtags", "local")
282 284 except IOError:
283 285 pass
284 286
285 287 self.tagscache = {}
286 288 self._tagstypecache = {}
287 289 for k,nh in globaltags.items():
288 290 n = nh[0]
289 291 if n != nullid:
290 292 self.tagscache[k] = n
291 293 self._tagstypecache[k] = tagtypes[k]
292 294 self.tagscache['tip'] = self.changelog.tip()
293 295
294 296 return self.tagscache
295 297
296 298 def tagtype(self, tagname):
297 299 '''
298 300 return the type of the given tag. result can be:
299 301
300 302 'local' : a local tag
301 303 'global' : a global tag
302 304 None : tag does not exist
303 305 '''
304 306
305 307 self.tags()
306 308
307 309 return self._tagstypecache.get(tagname)
308 310
309 311 def _hgtagsnodes(self):
310 312 heads = self.heads()
311 313 heads.reverse()
312 314 last = {}
313 315 ret = []
314 316 for node in heads:
315 317 c = self.changectx(node)
316 318 rev = c.rev()
317 319 try:
318 320 fnode = c.filenode('.hgtags')
319 321 except revlog.LookupError:
320 322 continue
321 323 ret.append((rev, node, fnode))
322 324 if fnode in last:
323 325 ret[last[fnode]] = None
324 326 last[fnode] = len(ret) - 1
325 327 return [item for item in ret if item]
326 328
327 329 def tagslist(self):
328 330 '''return a list of tags ordered by revision'''
329 331 l = []
330 332 for t, n in self.tags().items():
331 333 try:
332 334 r = self.changelog.rev(n)
333 335 except:
334 336 r = -2 # sort to the beginning of the list if unknown
335 337 l.append((r, t, n))
336 338 l.sort()
337 339 return [(t, n) for r, t, n in l]
338 340
339 341 def nodetags(self, node):
340 342 '''return the tags associated with a node'''
341 343 if not self.nodetagscache:
342 344 self.nodetagscache = {}
343 345 for t, n in self.tags().items():
344 346 self.nodetagscache.setdefault(n, []).append(t)
345 347 return self.nodetagscache.get(node, [])
346 348
347 def _branchtags(self):
348 partial, last, lrev = self._readbranchcache()
349
349 def _branchtags(self, partial, lrev):
350 350 tiprev = self.changelog.count() - 1
351 351 if lrev != tiprev:
352 352 self._updatebranchcache(partial, lrev+1, tiprev+1)
353 353 self._writebranchcache(partial, self.changelog.tip(), tiprev)
354 354
355 355 return partial
356 356
357 357 def branchtags(self):
358 if self.branchcache is not None:
358 tip = self.changelog.tip()
359 if self.branchcache is not None and self._branchcachetip == tip:
359 360 return self.branchcache
360 361
362 oldtip = self._branchcachetip
363 self._branchcachetip = tip
364 if self.branchcache is None:
361 365 self.branchcache = {} # avoid recursion in changectx
362 partial = self._branchtags()
366 else:
367 self.branchcache.clear() # keep using the same dict
368 if oldtip is None or oldtip not in self.changelog.nodemap:
369 partial, last, lrev = self._readbranchcache()
370 else:
371 lrev = self.changelog.rev(oldtip)
372 partial = self._ubranchcache
373
374 self._branchtags(partial, lrev)
363 375
364 376 # the branch cache is stored on disk as UTF-8, but in the local
365 377 # charset internally
366 378 for k, v in partial.items():
367 379 self.branchcache[util.tolocal(k)] = v
380 self._ubranchcache = partial
368 381 return self.branchcache
369 382
370 383 def _readbranchcache(self):
371 384 partial = {}
372 385 try:
373 386 f = self.opener("branch.cache")
374 387 lines = f.read().split('\n')
375 388 f.close()
376 389 except (IOError, OSError):
377 390 return {}, nullid, nullrev
378 391
379 392 try:
380 393 last, lrev = lines.pop(0).split(" ", 1)
381 394 last, lrev = bin(last), int(lrev)
382 395 if not (lrev < self.changelog.count() and
383 396 self.changelog.node(lrev) == last): # sanity check
384 397 # invalidate the cache
385 398 raise ValueError('invalidating branch cache (tip differs)')
386 399 for l in lines:
387 400 if not l: continue
388 401 node, label = l.split(" ", 1)
389 402 partial[label.strip()] = bin(node)
390 403 except (KeyboardInterrupt, util.SignalInterrupt):
391 404 raise
392 405 except Exception, inst:
393 406 if self.ui.debugflag:
394 407 self.ui.warn(str(inst), '\n')
395 408 partial, last, lrev = {}, nullid, nullrev
396 409 return partial, last, lrev
397 410
398 411 def _writebranchcache(self, branches, tip, tiprev):
399 412 try:
400 413 f = self.opener("branch.cache", "w", atomictemp=True)
401 414 f.write("%s %s\n" % (hex(tip), tiprev))
402 415 for label, node in branches.iteritems():
403 416 f.write("%s %s\n" % (hex(node), label))
404 417 f.rename()
405 418 except (IOError, OSError):
406 419 pass
407 420
408 421 def _updatebranchcache(self, partial, start, end):
409 422 for r in xrange(start, end):
410 423 c = self.changectx(r)
411 424 b = c.branch()
412 425 partial[b] = c.node()
413 426
414 427 def lookup(self, key):
415 428 if key == '.':
416 429 key, second = self.dirstate.parents()
417 430 if key == nullid:
418 431 raise repo.RepoError(_("no revision checked out"))
419 432 if second != nullid:
420 433 self.ui.warn(_("warning: working directory has two parents, "
421 434 "tag '.' uses the first\n"))
422 435 elif key == 'null':
423 436 return nullid
424 437 n = self.changelog._match(key)
425 438 if n:
426 439 return n
427 440 if key in self.tags():
428 441 return self.tags()[key]
429 442 if key in self.branchtags():
430 443 return self.branchtags()[key]
431 444 n = self.changelog._partialmatch(key)
432 445 if n:
433 446 return n
434 447 try:
435 448 if len(key) == 20:
436 449 key = hex(key)
437 450 except:
438 451 pass
439 452 raise repo.RepoError(_("unknown revision '%s'") % key)
440 453
441 454 def dev(self):
442 455 return os.lstat(self.path).st_dev
443 456
444 457 def local(self):
445 458 return True
446 459
447 460 def join(self, f):
448 461 return os.path.join(self.path, f)
449 462
450 463 def sjoin(self, f):
451 464 f = self.encodefn(f)
452 465 return os.path.join(self.spath, f)
453 466
454 467 def wjoin(self, f):
455 468 return os.path.join(self.root, f)
456 469
457 470 def file(self, f):
458 471 if f[0] == '/':
459 472 f = f[1:]
460 473 return filelog.filelog(self.sopener, f)
461 474
462 475 def changectx(self, changeid=None):
463 476 return context.changectx(self, changeid)
464 477
465 478 def workingctx(self):
466 479 return context.workingctx(self)
467 480
468 481 def parents(self, changeid=None):
469 482 '''
470 483 get list of changectxs for parents of changeid or working directory
471 484 '''
472 485 if changeid is None:
473 486 pl = self.dirstate.parents()
474 487 else:
475 488 n = self.changelog.lookup(changeid)
476 489 pl = self.changelog.parents(n)
477 490 if pl[1] == nullid:
478 491 return [self.changectx(pl[0])]
479 492 return [self.changectx(pl[0]), self.changectx(pl[1])]
480 493
481 494 def filectx(self, path, changeid=None, fileid=None):
482 495 """changeid can be a changeset revision, node, or tag.
483 496 fileid can be a file revision or node."""
484 497 return context.filectx(self, path, changeid, fileid)
485 498
486 499 def getcwd(self):
487 500 return self.dirstate.getcwd()
488 501
489 502 def pathto(self, f, cwd=None):
490 503 return self.dirstate.pathto(f, cwd)
491 504
492 505 def wfile(self, f, mode='r'):
493 506 return self.wopener(f, mode)
494 507
495 508 def _link(self, f):
496 509 return os.path.islink(self.wjoin(f))
497 510
498 511 def _filter(self, filter, filename, data):
499 512 if filter not in self.filterpats:
500 513 l = []
501 514 for pat, cmd in self.ui.configitems(filter):
502 515 mf = util.matcher(self.root, "", [pat], [], [])[1]
503 516 fn = None
504 517 params = cmd
505 518 for name, filterfn in self._datafilters.iteritems():
506 519 if cmd.startswith(name):
507 520 fn = filterfn
508 521 params = cmd[len(name):].lstrip()
509 522 break
510 523 if not fn:
511 524 fn = lambda s, c, **kwargs: util.filter(s, c)
512 525 # Wrap old filters not supporting keyword arguments
513 526 if not inspect.getargspec(fn)[2]:
514 527 oldfn = fn
515 528 fn = lambda s, c, **kwargs: oldfn(s, c)
516 529 l.append((mf, fn, params))
517 530 self.filterpats[filter] = l
518 531
519 532 for mf, fn, cmd in self.filterpats[filter]:
520 533 if mf(filename):
521 534 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
522 535 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
523 536 break
524 537
525 538 return data
526 539
527 540 def adddatafilter(self, name, filter):
528 541 self._datafilters[name] = filter
529 542
530 543 def wread(self, filename):
531 544 if self._link(filename):
532 545 data = os.readlink(self.wjoin(filename))
533 546 else:
534 547 data = self.wopener(filename, 'r').read()
535 548 return self._filter("encode", filename, data)
536 549
537 550 def wwrite(self, filename, data, flags):
538 551 data = self._filter("decode", filename, data)
539 552 try:
540 553 os.unlink(self.wjoin(filename))
541 554 except OSError:
542 555 pass
543 556 self.wopener(filename, 'w').write(data)
544 557 util.set_flags(self.wjoin(filename), flags)
545 558
546 559 def wwritedata(self, filename, data):
547 560 return self._filter("decode", filename, data)
548 561
549 562 def transaction(self):
550 563 if self._transref and self._transref():
551 564 return self._transref().nest()
552 565
553 566 # abort here if the journal already exists
554 567 if os.path.exists(self.sjoin("journal")):
555 568 raise repo.RepoError(_("journal already exists - run hg recover"))
556 569
557 570 # save dirstate for rollback
558 571 try:
559 572 ds = self.opener("dirstate").read()
560 573 except IOError:
561 574 ds = ""
562 575 self.opener("journal.dirstate", "w").write(ds)
563 576 self.opener("journal.branch", "w").write(self.dirstate.branch())
564 577
565 578 renames = [(self.sjoin("journal"), self.sjoin("undo")),
566 579 (self.join("journal.dirstate"), self.join("undo.dirstate")),
567 580 (self.join("journal.branch"), self.join("undo.branch"))]
568 581 tr = transaction.transaction(self.ui.warn, self.sopener,
569 582 self.sjoin("journal"),
570 583 aftertrans(renames),
571 584 self._createmode)
572 585 self._transref = weakref.ref(tr)
573 586 return tr
574 587
575 588 def recover(self):
576 589 l = self.lock()
577 590 try:
578 591 if os.path.exists(self.sjoin("journal")):
579 592 self.ui.status(_("rolling back interrupted transaction\n"))
580 593 transaction.rollback(self.sopener, self.sjoin("journal"))
581 594 self.invalidate()
582 595 return True
583 596 else:
584 597 self.ui.warn(_("no interrupted transaction available\n"))
585 598 return False
586 599 finally:
587 600 del l
588 601
589 602 def rollback(self):
590 603 wlock = lock = None
591 604 try:
592 605 wlock = self.wlock()
593 606 lock = self.lock()
594 607 if os.path.exists(self.sjoin("undo")):
595 608 self.ui.status(_("rolling back last transaction\n"))
596 609 transaction.rollback(self.sopener, self.sjoin("undo"))
597 610 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
598 611 try:
599 612 branch = self.opener("undo.branch").read()
600 613 self.dirstate.setbranch(branch)
601 614 except IOError:
602 615 self.ui.warn(_("Named branch could not be reset, "
603 616 "current branch still is: %s\n")
604 617 % util.tolocal(self.dirstate.branch()))
605 618 self.invalidate()
606 619 self.dirstate.invalidate()
607 620 else:
608 621 self.ui.warn(_("no rollback information available\n"))
609 622 finally:
610 623 del lock, wlock
611 624
612 625 def invalidate(self):
613 626 for a in "changelog manifest".split():
614 627 if hasattr(self, a):
615 628 self.__delattr__(a)
616 629 self.tagscache = None
617 630 self._tagstypecache = None
618 631 self.nodetagscache = None
632 self.branchcache = None
633 self._ubranchcache = None
634 self._branchcachetip = None
619 635
620 636 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
621 637 try:
622 638 l = lock.lock(lockname, 0, releasefn, desc=desc)
623 639 except lock.LockHeld, inst:
624 640 if not wait:
625 641 raise
626 642 self.ui.warn(_("waiting for lock on %s held by %r\n") %
627 643 (desc, inst.locker))
628 644 # default to 600 seconds timeout
629 645 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
630 646 releasefn, desc=desc)
631 647 if acquirefn:
632 648 acquirefn()
633 649 return l
634 650
635 651 def lock(self, wait=True):
636 652 if self._lockref and self._lockref():
637 653 return self._lockref()
638 654
639 655 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
640 656 _('repository %s') % self.origroot)
641 657 self._lockref = weakref.ref(l)
642 658 return l
643 659
644 660 def wlock(self, wait=True):
645 661 if self._wlockref and self._wlockref():
646 662 return self._wlockref()
647 663
648 664 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
649 665 self.dirstate.invalidate, _('working directory of %s') %
650 666 self.origroot)
651 667 self._wlockref = weakref.ref(l)
652 668 return l
653 669
654 670 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
655 671 """
656 672 commit an individual file as part of a larger transaction
657 673 """
658 674
659 675 t = self.wread(fn)
660 676 fl = self.file(fn)
661 677 fp1 = manifest1.get(fn, nullid)
662 678 fp2 = manifest2.get(fn, nullid)
663 679
664 680 meta = {}
665 681 cp = self.dirstate.copied(fn)
666 682 if cp:
667 683 # Mark the new revision of this file as a copy of another
668 684 # file. This copy data will effectively act as a parent
669 685 # of this new revision. If this is a merge, the first
670 686 # parent will be the nullid (meaning "look up the copy data")
671 687 # and the second one will be the other parent. For example:
672 688 #
673 689 # 0 --- 1 --- 3 rev1 changes file foo
674 690 # \ / rev2 renames foo to bar and changes it
675 691 # \- 2 -/ rev3 should have bar with all changes and
676 692 # should record that bar descends from
677 693 # bar in rev2 and foo in rev1
678 694 #
679 695 # this allows this merge to succeed:
680 696 #
681 697 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
682 698 # \ / merging rev3 and rev4 should use bar@rev2
683 699 # \- 2 --- 4 as the merge base
684 700 #
685 701 meta["copy"] = cp
686 702 if not manifest2: # not a branch merge
687 703 meta["copyrev"] = hex(manifest1.get(cp, nullid))
688 704 fp2 = nullid
689 705 elif fp2 != nullid: # copied on remote side
690 706 meta["copyrev"] = hex(manifest1.get(cp, nullid))
691 707 elif fp1 != nullid: # copied on local side, reversed
692 708 meta["copyrev"] = hex(manifest2.get(cp))
693 709 fp2 = fp1
694 710 elif cp in manifest2: # directory rename on local side
695 711 meta["copyrev"] = hex(manifest2[cp])
696 712 else: # directory rename on remote side
697 713 meta["copyrev"] = hex(manifest1.get(cp, nullid))
698 714 self.ui.debug(_(" %s: copy %s:%s\n") %
699 715 (fn, cp, meta["copyrev"]))
700 716 fp1 = nullid
701 717 elif fp2 != nullid:
702 718 # is one parent an ancestor of the other?
703 719 fpa = fl.ancestor(fp1, fp2)
704 720 if fpa == fp1:
705 721 fp1, fp2 = fp2, nullid
706 722 elif fpa == fp2:
707 723 fp2 = nullid
708 724
709 725 # is the file unmodified from the parent? report existing entry
710 726 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
711 727 return fp1
712 728
713 729 changelist.append(fn)
714 730 return fl.add(t, meta, tr, linkrev, fp1, fp2)
715 731
716 732 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
717 733 if p1 is None:
718 734 p1, p2 = self.dirstate.parents()
719 735 return self.commit(files=files, text=text, user=user, date=date,
720 736 p1=p1, p2=p2, extra=extra, empty_ok=True)
721 737
722 738 def commit(self, files=None, text="", user=None, date=None,
723 739 match=util.always, force=False, force_editor=False,
724 740 p1=None, p2=None, extra={}, empty_ok=False):
725 741 wlock = lock = tr = None
726 742 valid = 0 # don't save the dirstate if this isn't set
727 743 if files:
728 744 files = util.unique(files)
729 745 try:
730 746 commit = []
731 747 remove = []
732 748 changed = []
733 749 use_dirstate = (p1 is None) # not rawcommit
734 750 extra = extra.copy()
735 751
736 752 if use_dirstate:
737 753 if files:
738 754 for f in files:
739 755 s = self.dirstate[f]
740 756 if s in 'nma':
741 757 commit.append(f)
742 758 elif s == 'r':
743 759 remove.append(f)
744 760 else:
745 761 self.ui.warn(_("%s not tracked!\n") % f)
746 762 else:
747 763 changes = self.status(match=match)[:5]
748 764 modified, added, removed, deleted, unknown = changes
749 765 commit = modified + added
750 766 remove = removed
751 767 else:
752 768 commit = files
753 769
754 770 if use_dirstate:
755 771 p1, p2 = self.dirstate.parents()
756 772 update_dirstate = True
757 773 else:
758 774 p1, p2 = p1, p2 or nullid
759 775 update_dirstate = (self.dirstate.parents()[0] == p1)
760 776
761 777 c1 = self.changelog.read(p1)
762 778 c2 = self.changelog.read(p2)
763 779 m1 = self.manifest.read(c1[0]).copy()
764 780 m2 = self.manifest.read(c2[0])
765 781
766 782 if use_dirstate:
767 783 branchname = self.workingctx().branch()
768 784 try:
769 785 branchname = branchname.decode('UTF-8').encode('UTF-8')
770 786 except UnicodeDecodeError:
771 787 raise util.Abort(_('branch name not in UTF-8!'))
772 788 else:
773 789 branchname = ""
774 790
775 791 if use_dirstate:
776 792 oldname = c1[5].get("branch") # stored in UTF-8
777 793 if (not commit and not remove and not force and p2 == nullid
778 794 and branchname == oldname):
779 795 self.ui.status(_("nothing changed\n"))
780 796 return None
781 797
782 798 xp1 = hex(p1)
783 799 if p2 == nullid: xp2 = ''
784 800 else: xp2 = hex(p2)
785 801
786 802 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
787 803
788 804 wlock = self.wlock()
789 805 lock = self.lock()
790 806 tr = self.transaction()
791 807 trp = weakref.proxy(tr)
792 808
793 809 # check in files
794 810 new = {}
795 811 linkrev = self.changelog.count()
796 812 commit.sort()
797 813 is_exec = util.execfunc(self.root, m1.execf)
798 814 is_link = util.linkfunc(self.root, m1.linkf)
799 815 for f in commit:
800 816 self.ui.note(f + "\n")
801 817 try:
802 818 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
803 819 new_exec = is_exec(f)
804 820 new_link = is_link(f)
805 821 if ((not changed or changed[-1] != f) and
806 822 m2.get(f) != new[f]):
807 823 # mention the file in the changelog if some
808 824 # flag changed, even if there was no content
809 825 # change.
810 826 old_exec = m1.execf(f)
811 827 old_link = m1.linkf(f)
812 828 if old_exec != new_exec or old_link != new_link:
813 829 changed.append(f)
814 830 m1.set(f, new_exec, new_link)
815 831 if use_dirstate:
816 832 self.dirstate.normal(f)
817 833
818 834 except (OSError, IOError):
819 835 if use_dirstate:
820 836 self.ui.warn(_("trouble committing %s!\n") % f)
821 837 raise
822 838 else:
823 839 remove.append(f)
824 840
825 841 # update manifest
826 842 m1.update(new)
827 843 remove.sort()
828 844 removed = []
829 845
830 846 for f in remove:
831 847 if f in m1:
832 848 del m1[f]
833 849 removed.append(f)
834 850 elif f in m2:
835 851 removed.append(f)
836 852 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
837 853 (new, removed))
838 854
839 855 # add changeset
840 856 new = new.keys()
841 857 new.sort()
842 858
843 859 user = user or self.ui.username()
844 860 if (not empty_ok and not text) or force_editor:
845 861 edittext = []
846 862 if text:
847 863 edittext.append(text)
848 864 edittext.append("")
849 865 edittext.append(_("HG: Enter commit message."
850 866 " Lines beginning with 'HG:' are removed."))
851 867 edittext.append("HG: --")
852 868 edittext.append("HG: user: %s" % user)
853 869 if p2 != nullid:
854 870 edittext.append("HG: branch merge")
855 871 if branchname:
856 872 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
857 873 edittext.extend(["HG: changed %s" % f for f in changed])
858 874 edittext.extend(["HG: removed %s" % f for f in removed])
859 875 if not changed and not remove:
860 876 edittext.append("HG: no files changed")
861 877 edittext.append("")
862 878 # run editor in the repository root
863 879 olddir = os.getcwd()
864 880 os.chdir(self.root)
865 881 text = self.ui.edit("\n".join(edittext), user)
866 882 os.chdir(olddir)
867 883
868 884 if branchname:
869 885 extra["branch"] = branchname
870 886
871 887 if use_dirstate:
872 888 lines = [line.rstrip() for line in text.rstrip().splitlines()]
873 889 while lines and not lines[0]:
874 890 del lines[0]
875 891 if not lines:
876 892 raise util.Abort(_("empty commit message"))
877 893 text = '\n'.join(lines)
878 894
879 895 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
880 896 user, date, extra)
881 897 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
882 898 parent2=xp2)
883 899 tr.close()
884 900
885 if self.branchcache and "branch" in extra:
886 self.branchcache[util.tolocal(extra["branch"])] = n
901 if self.branchcache:
902 self.branchtags()
887 903
888 904 if use_dirstate or update_dirstate:
889 905 self.dirstate.setparents(n)
890 906 if use_dirstate:
891 907 for f in removed:
892 908 self.dirstate.forget(f)
893 909 valid = 1 # our dirstate updates are complete
894 910
895 911 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
896 912 return n
897 913 finally:
898 914 if not valid: # don't save our updated dirstate
899 915 self.dirstate.invalidate()
900 916 del tr, lock, wlock
901 917
902 918 def walk(self, node=None, files=[], match=util.always, badmatch=None):
903 919 '''
904 920 walk recursively through the directory tree or a given
905 921 changeset, finding all files matched by the match
906 922 function
907 923
908 924 results are yielded in a tuple (src, filename), where src
909 925 is one of:
910 926 'f' the file was found in the directory tree
911 927 'm' the file was only in the dirstate and not in the tree
912 928 'b' file was not found and matched badmatch
913 929 '''
914 930
915 931 if node:
916 932 fdict = dict.fromkeys(files)
917 933 # for dirstate.walk, files=['.'] means "walk the whole tree".
918 934 # follow that here, too
919 935 fdict.pop('.', None)
920 936 mdict = self.manifest.read(self.changelog.read(node)[0])
921 937 mfiles = mdict.keys()
922 938 mfiles.sort()
923 939 for fn in mfiles:
924 940 for ffn in fdict:
925 941 # match if the file is the exact name or a directory
926 942 if ffn == fn or fn.startswith("%s/" % ffn):
927 943 del fdict[ffn]
928 944 break
929 945 if match(fn):
930 946 yield 'm', fn
931 947 ffiles = fdict.keys()
932 948 ffiles.sort()
933 949 for fn in ffiles:
934 950 if badmatch and badmatch(fn):
935 951 if match(fn):
936 952 yield 'b', fn
937 953 else:
938 954 self.ui.warn(_('%s: No such file in rev %s\n')
939 955 % (self.pathto(fn), short(node)))
940 956 else:
941 957 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
942 958 yield src, fn
943 959
944 960 def status(self, node1=None, node2=None, files=[], match=util.always,
945 961 list_ignored=False, list_clean=False):
946 962 """return status of files between two nodes or node and working directory
947 963
948 964 If node1 is None, use the first dirstate parent instead.
949 965 If node2 is None, compare node1 with working directory.
950 966 """
951 967
952 968 def fcmp(fn, getnode):
953 969 t1 = self.wread(fn)
954 970 return self.file(fn).cmp(getnode(fn), t1)
955 971
956 972 def mfmatches(node):
957 973 change = self.changelog.read(node)
958 974 mf = self.manifest.read(change[0]).copy()
959 975 for fn in mf.keys():
960 976 if not match(fn):
961 977 del mf[fn]
962 978 return mf
963 979
964 980 modified, added, removed, deleted, unknown = [], [], [], [], []
965 981 ignored, clean = [], []
966 982
967 983 compareworking = False
968 984 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
969 985 compareworking = True
970 986
971 987 if not compareworking:
972 988 # read the manifest from node1 before the manifest from node2,
973 989 # so that we'll hit the manifest cache if we're going through
974 990 # all the revisions in parent->child order.
975 991 mf1 = mfmatches(node1)
976 992
977 993 # are we comparing the working directory?
978 994 if not node2:
979 995 (lookup, modified, added, removed, deleted, unknown,
980 996 ignored, clean) = self.dirstate.status(files, match,
981 997 list_ignored, list_clean)
982 998
983 999 # are we comparing working dir against its parent?
984 1000 if compareworking:
985 1001 if lookup:
986 1002 fixup = []
987 1003 # do a full compare of any files that might have changed
988 1004 ctx = self.changectx()
989 1005 for f in lookup:
990 1006 if f not in ctx or ctx[f].cmp(self.wread(f)):
991 1007 modified.append(f)
992 1008 else:
993 1009 fixup.append(f)
994 1010 if list_clean:
995 1011 clean.append(f)
996 1012
997 1013 # update dirstate for files that are actually clean
998 1014 if fixup:
999 1015 wlock = None
1000 1016 try:
1001 1017 try:
1002 1018 wlock = self.wlock(False)
1003 1019 except lock.LockException:
1004 1020 pass
1005 1021 if wlock:
1006 1022 for f in fixup:
1007 1023 self.dirstate.normal(f)
1008 1024 finally:
1009 1025 del wlock
1010 1026 else:
1011 1027 # we are comparing working dir against non-parent
1012 1028 # generate a pseudo-manifest for the working dir
1013 1029 # XXX: create it in dirstate.py ?
1014 1030 mf2 = mfmatches(self.dirstate.parents()[0])
1015 1031 is_exec = util.execfunc(self.root, mf2.execf)
1016 1032 is_link = util.linkfunc(self.root, mf2.linkf)
1017 1033 for f in lookup + modified + added:
1018 1034 mf2[f] = ""
1019 1035 mf2.set(f, is_exec(f), is_link(f))
1020 1036 for f in removed:
1021 1037 if f in mf2:
1022 1038 del mf2[f]
1023 1039
1024 1040 else:
1025 1041 # we are comparing two revisions
1026 1042 mf2 = mfmatches(node2)
1027 1043
1028 1044 if not compareworking:
1029 1045 # flush lists from dirstate before comparing manifests
1030 1046 modified, added, clean = [], [], []
1031 1047
1032 1048 # make sure to sort the files so we talk to the disk in a
1033 1049 # reasonable order
1034 1050 mf2keys = mf2.keys()
1035 1051 mf2keys.sort()
1036 1052 getnode = lambda fn: mf1.get(fn, nullid)
1037 1053 for fn in mf2keys:
1038 1054 if fn in mf1:
1039 1055 if (mf1.flags(fn) != mf2.flags(fn) or
1040 1056 (mf1[fn] != mf2[fn] and
1041 1057 (mf2[fn] != "" or fcmp(fn, getnode)))):
1042 1058 modified.append(fn)
1043 1059 elif list_clean:
1044 1060 clean.append(fn)
1045 1061 del mf1[fn]
1046 1062 else:
1047 1063 added.append(fn)
1048 1064
1049 1065 removed = mf1.keys()
1050 1066
1051 1067 # sort and return results:
1052 1068 for l in modified, added, removed, deleted, unknown, ignored, clean:
1053 1069 l.sort()
1054 1070 return (modified, added, removed, deleted, unknown, ignored, clean)
1055 1071
1056 1072 def add(self, list):
1057 1073 wlock = self.wlock()
1058 1074 try:
1059 1075 rejected = []
1060 1076 for f in list:
1061 1077 p = self.wjoin(f)
1062 1078 try:
1063 1079 st = os.lstat(p)
1064 1080 except:
1065 1081 self.ui.warn(_("%s does not exist!\n") % f)
1066 1082 rejected.append(f)
1067 1083 continue
1068 1084 if st.st_size > 10000000:
1069 1085 self.ui.warn(_("%s: files over 10MB may cause memory and"
1070 1086 " performance problems\n"
1071 1087 "(use 'hg revert %s' to unadd the file)\n")
1072 1088 % (f, f))
1073 1089 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1074 1090 self.ui.warn(_("%s not added: only files and symlinks "
1075 1091 "supported currently\n") % f)
1076 1092 rejected.append(p)
1077 1093 elif self.dirstate[f] in 'amn':
1078 1094 self.ui.warn(_("%s already tracked!\n") % f)
1079 1095 elif self.dirstate[f] == 'r':
1080 1096 self.dirstate.normallookup(f)
1081 1097 else:
1082 1098 self.dirstate.add(f)
1083 1099 return rejected
1084 1100 finally:
1085 1101 del wlock
1086 1102
1087 1103 def forget(self, list):
1088 1104 wlock = self.wlock()
1089 1105 try:
1090 1106 for f in list:
1091 1107 if self.dirstate[f] != 'a':
1092 1108 self.ui.warn(_("%s not added!\n") % f)
1093 1109 else:
1094 1110 self.dirstate.forget(f)
1095 1111 finally:
1096 1112 del wlock
1097 1113
1098 1114 def remove(self, list, unlink=False):
1099 1115 wlock = None
1100 1116 try:
1101 1117 if unlink:
1102 1118 for f in list:
1103 1119 try:
1104 1120 util.unlink(self.wjoin(f))
1105 1121 except OSError, inst:
1106 1122 if inst.errno != errno.ENOENT:
1107 1123 raise
1108 1124 wlock = self.wlock()
1109 1125 for f in list:
1110 1126 if unlink and os.path.exists(self.wjoin(f)):
1111 1127 self.ui.warn(_("%s still exists!\n") % f)
1112 1128 elif self.dirstate[f] == 'a':
1113 1129 self.dirstate.forget(f)
1114 1130 elif f not in self.dirstate:
1115 1131 self.ui.warn(_("%s not tracked!\n") % f)
1116 1132 else:
1117 1133 self.dirstate.remove(f)
1118 1134 finally:
1119 1135 del wlock
1120 1136
1121 1137 def undelete(self, list):
1122 1138 wlock = None
1123 1139 try:
1124 1140 manifests = [self.manifest.read(self.changelog.read(p)[0])
1125 1141 for p in self.dirstate.parents() if p != nullid]
1126 1142 wlock = self.wlock()
1127 1143 for f in list:
1128 1144 if self.dirstate[f] != 'r':
1129 1145 self.ui.warn("%s not removed!\n" % f)
1130 1146 else:
1131 1147 m = f in manifests[0] and manifests[0] or manifests[1]
1132 1148 t = self.file(f).read(m[f])
1133 1149 self.wwrite(f, t, m.flags(f))
1134 1150 self.dirstate.normal(f)
1135 1151 finally:
1136 1152 del wlock
1137 1153
1138 1154 def copy(self, source, dest):
1139 1155 wlock = None
1140 1156 try:
1141 1157 p = self.wjoin(dest)
1142 1158 if not (os.path.exists(p) or os.path.islink(p)):
1143 1159 self.ui.warn(_("%s does not exist!\n") % dest)
1144 1160 elif not (os.path.isfile(p) or os.path.islink(p)):
1145 1161 self.ui.warn(_("copy failed: %s is not a file or a "
1146 1162 "symbolic link\n") % dest)
1147 1163 else:
1148 1164 wlock = self.wlock()
1149 1165 if dest not in self.dirstate:
1150 1166 self.dirstate.add(dest)
1151 1167 self.dirstate.copy(source, dest)
1152 1168 finally:
1153 1169 del wlock
1154 1170
1155 1171 def heads(self, start=None):
1156 1172 heads = self.changelog.heads(start)
1157 1173 # sort the output in rev descending order
1158 1174 heads = [(-self.changelog.rev(h), h) for h in heads]
1159 1175 heads.sort()
1160 1176 return [n for (r, n) in heads]
1161 1177
1162 1178 def branchheads(self, branch, start=None):
1163 1179 branches = self.branchtags()
1164 1180 if branch not in branches:
1165 1181 return []
1166 1182 # The basic algorithm is this:
1167 1183 #
1168 1184 # Start from the branch tip since there are no later revisions that can
1169 1185 # possibly be in this branch, and the tip is a guaranteed head.
1170 1186 #
1171 1187 # Remember the tip's parents as the first ancestors, since these by
1172 1188 # definition are not heads.
1173 1189 #
1174 1190 # Step backwards from the brach tip through all the revisions. We are
1175 1191 # guaranteed by the rules of Mercurial that we will now be visiting the
1176 1192 # nodes in reverse topological order (children before parents).
1177 1193 #
1178 1194 # If a revision is one of the ancestors of a head then we can toss it
1179 1195 # out of the ancestors set (we've already found it and won't be
1180 1196 # visiting it again) and put its parents in the ancestors set.
1181 1197 #
1182 1198 # Otherwise, if a revision is in the branch it's another head, since it
1183 1199 # wasn't in the ancestor list of an existing head. So add it to the
1184 1200 # head list, and add its parents to the ancestor list.
1185 1201 #
1186 1202 # If it is not in the branch ignore it.
1187 1203 #
1188 1204 # Once we have a list of heads, use nodesbetween to filter out all the
1189 1205 # heads that cannot be reached from startrev. There may be a more
1190 1206 # efficient way to do this as part of the previous algorithm.
1191 1207
1192 1208 set = util.set
1193 1209 heads = [self.changelog.rev(branches[branch])]
1194 1210 # Don't care if ancestors contains nullrev or not.
1195 1211 ancestors = set(self.changelog.parentrevs(heads[0]))
1196 1212 for rev in xrange(heads[0] - 1, nullrev, -1):
1197 1213 if rev in ancestors:
1198 1214 ancestors.update(self.changelog.parentrevs(rev))
1199 1215 ancestors.remove(rev)
1200 1216 elif self.changectx(rev).branch() == branch:
1201 1217 heads.append(rev)
1202 1218 ancestors.update(self.changelog.parentrevs(rev))
1203 1219 heads = [self.changelog.node(rev) for rev in heads]
1204 1220 if start is not None:
1205 1221 heads = self.changelog.nodesbetween([start], heads)[2]
1206 1222 return heads
1207 1223
1208 1224 def branches(self, nodes):
1209 1225 if not nodes:
1210 1226 nodes = [self.changelog.tip()]
1211 1227 b = []
1212 1228 for n in nodes:
1213 1229 t = n
1214 1230 while 1:
1215 1231 p = self.changelog.parents(n)
1216 1232 if p[1] != nullid or p[0] == nullid:
1217 1233 b.append((t, n, p[0], p[1]))
1218 1234 break
1219 1235 n = p[0]
1220 1236 return b
1221 1237
1222 1238 def between(self, pairs):
1223 1239 r = []
1224 1240
1225 1241 for top, bottom in pairs:
1226 1242 n, l, i = top, [], 0
1227 1243 f = 1
1228 1244
1229 1245 while n != bottom:
1230 1246 p = self.changelog.parents(n)[0]
1231 1247 if i == f:
1232 1248 l.append(n)
1233 1249 f = f * 2
1234 1250 n = p
1235 1251 i += 1
1236 1252
1237 1253 r.append(l)
1238 1254
1239 1255 return r
1240 1256
1241 1257 def findincoming(self, remote, base=None, heads=None, force=False):
1242 1258 """Return list of roots of the subsets of missing nodes from remote
1243 1259
1244 1260 If base dict is specified, assume that these nodes and their parents
1245 1261 exist on the remote side and that no child of a node of base exists
1246 1262 in both remote and self.
1247 1263 Furthermore base will be updated to include the nodes that exists
1248 1264 in self and remote but no children exists in self and remote.
1249 1265 If a list of heads is specified, return only nodes which are heads
1250 1266 or ancestors of these heads.
1251 1267
1252 1268 All the ancestors of base are in self and in remote.
1253 1269 All the descendants of the list returned are missing in self.
1254 1270 (and so we know that the rest of the nodes are missing in remote, see
1255 1271 outgoing)
1256 1272 """
1257 1273 m = self.changelog.nodemap
1258 1274 search = []
1259 1275 fetch = {}
1260 1276 seen = {}
1261 1277 seenbranch = {}
1262 1278 if base == None:
1263 1279 base = {}
1264 1280
1265 1281 if not heads:
1266 1282 heads = remote.heads()
1267 1283
1268 1284 if self.changelog.tip() == nullid:
1269 1285 base[nullid] = 1
1270 1286 if heads != [nullid]:
1271 1287 return [nullid]
1272 1288 return []
1273 1289
1274 1290 # assume we're closer to the tip than the root
1275 1291 # and start by examining the heads
1276 1292 self.ui.status(_("searching for changes\n"))
1277 1293
1278 1294 unknown = []
1279 1295 for h in heads:
1280 1296 if h not in m:
1281 1297 unknown.append(h)
1282 1298 else:
1283 1299 base[h] = 1
1284 1300
1285 1301 if not unknown:
1286 1302 return []
1287 1303
1288 1304 req = dict.fromkeys(unknown)
1289 1305 reqcnt = 0
1290 1306
1291 1307 # search through remote branches
1292 1308 # a 'branch' here is a linear segment of history, with four parts:
1293 1309 # head, root, first parent, second parent
1294 1310 # (a branch always has two parents (or none) by definition)
1295 1311 unknown = remote.branches(unknown)
1296 1312 while unknown:
1297 1313 r = []
1298 1314 while unknown:
1299 1315 n = unknown.pop(0)
1300 1316 if n[0] in seen:
1301 1317 continue
1302 1318
1303 1319 self.ui.debug(_("examining %s:%s\n")
1304 1320 % (short(n[0]), short(n[1])))
1305 1321 if n[0] == nullid: # found the end of the branch
1306 1322 pass
1307 1323 elif n in seenbranch:
1308 1324 self.ui.debug(_("branch already found\n"))
1309 1325 continue
1310 1326 elif n[1] and n[1] in m: # do we know the base?
1311 1327 self.ui.debug(_("found incomplete branch %s:%s\n")
1312 1328 % (short(n[0]), short(n[1])))
1313 1329 search.append(n) # schedule branch range for scanning
1314 1330 seenbranch[n] = 1
1315 1331 else:
1316 1332 if n[1] not in seen and n[1] not in fetch:
1317 1333 if n[2] in m and n[3] in m:
1318 1334 self.ui.debug(_("found new changeset %s\n") %
1319 1335 short(n[1]))
1320 1336 fetch[n[1]] = 1 # earliest unknown
1321 1337 for p in n[2:4]:
1322 1338 if p in m:
1323 1339 base[p] = 1 # latest known
1324 1340
1325 1341 for p in n[2:4]:
1326 1342 if p not in req and p not in m:
1327 1343 r.append(p)
1328 1344 req[p] = 1
1329 1345 seen[n[0]] = 1
1330 1346
1331 1347 if r:
1332 1348 reqcnt += 1
1333 1349 self.ui.debug(_("request %d: %s\n") %
1334 1350 (reqcnt, " ".join(map(short, r))))
1335 1351 for p in xrange(0, len(r), 10):
1336 1352 for b in remote.branches(r[p:p+10]):
1337 1353 self.ui.debug(_("received %s:%s\n") %
1338 1354 (short(b[0]), short(b[1])))
1339 1355 unknown.append(b)
1340 1356
1341 1357 # do binary search on the branches we found
1342 1358 while search:
1343 1359 n = search.pop(0)
1344 1360 reqcnt += 1
1345 1361 l = remote.between([(n[0], n[1])])[0]
1346 1362 l.append(n[1])
1347 1363 p = n[0]
1348 1364 f = 1
1349 1365 for i in l:
1350 1366 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1351 1367 if i in m:
1352 1368 if f <= 2:
1353 1369 self.ui.debug(_("found new branch changeset %s\n") %
1354 1370 short(p))
1355 1371 fetch[p] = 1
1356 1372 base[i] = 1
1357 1373 else:
1358 1374 self.ui.debug(_("narrowed branch search to %s:%s\n")
1359 1375 % (short(p), short(i)))
1360 1376 search.append((p, i))
1361 1377 break
1362 1378 p, f = i, f * 2
1363 1379
1364 1380 # sanity check our fetch list
1365 1381 for f in fetch.keys():
1366 1382 if f in m:
1367 1383 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1368 1384
1369 1385 if base.keys() == [nullid]:
1370 1386 if force:
1371 1387 self.ui.warn(_("warning: repository is unrelated\n"))
1372 1388 else:
1373 1389 raise util.Abort(_("repository is unrelated"))
1374 1390
1375 1391 self.ui.debug(_("found new changesets starting at ") +
1376 1392 " ".join([short(f) for f in fetch]) + "\n")
1377 1393
1378 1394 self.ui.debug(_("%d total queries\n") % reqcnt)
1379 1395
1380 1396 return fetch.keys()
1381 1397
1382 1398 def findoutgoing(self, remote, base=None, heads=None, force=False):
1383 1399 """Return list of nodes that are roots of subsets not in remote
1384 1400
1385 1401 If base dict is specified, assume that these nodes and their parents
1386 1402 exist on the remote side.
1387 1403 If a list of heads is specified, return only nodes which are heads
1388 1404 or ancestors of these heads, and return a second element which
1389 1405 contains all remote heads which get new children.
1390 1406 """
1391 1407 if base == None:
1392 1408 base = {}
1393 1409 self.findincoming(remote, base, heads, force=force)
1394 1410
1395 1411 self.ui.debug(_("common changesets up to ")
1396 1412 + " ".join(map(short, base.keys())) + "\n")
1397 1413
1398 1414 remain = dict.fromkeys(self.changelog.nodemap)
1399 1415
1400 1416 # prune everything remote has from the tree
1401 1417 del remain[nullid]
1402 1418 remove = base.keys()
1403 1419 while remove:
1404 1420 n = remove.pop(0)
1405 1421 if n in remain:
1406 1422 del remain[n]
1407 1423 for p in self.changelog.parents(n):
1408 1424 remove.append(p)
1409 1425
1410 1426 # find every node whose parents have been pruned
1411 1427 subset = []
1412 1428 # find every remote head that will get new children
1413 1429 updated_heads = {}
1414 1430 for n in remain:
1415 1431 p1, p2 = self.changelog.parents(n)
1416 1432 if p1 not in remain and p2 not in remain:
1417 1433 subset.append(n)
1418 1434 if heads:
1419 1435 if p1 in heads:
1420 1436 updated_heads[p1] = True
1421 1437 if p2 in heads:
1422 1438 updated_heads[p2] = True
1423 1439
1424 1440 # this is the set of all roots we have to push
1425 1441 if heads:
1426 1442 return subset, updated_heads.keys()
1427 1443 else:
1428 1444 return subset
1429 1445
1430 1446 def pull(self, remote, heads=None, force=False):
1431 1447 lock = self.lock()
1432 1448 try:
1433 1449 fetch = self.findincoming(remote, heads=heads, force=force)
1434 1450 if fetch == [nullid]:
1435 1451 self.ui.status(_("requesting all changes\n"))
1436 1452
1437 1453 if not fetch:
1438 1454 self.ui.status(_("no changes found\n"))
1439 1455 return 0
1440 1456
1441 1457 if heads is None:
1442 1458 cg = remote.changegroup(fetch, 'pull')
1443 1459 else:
1444 1460 if 'changegroupsubset' not in remote.capabilities:
1445 1461 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1446 1462 cg = remote.changegroupsubset(fetch, heads, 'pull')
1447 1463 return self.addchangegroup(cg, 'pull', remote.url())
1448 1464 finally:
1449 1465 del lock
1450 1466
1451 1467 def push(self, remote, force=False, revs=None):
1452 1468 # there are two ways to push to remote repo:
1453 1469 #
1454 1470 # addchangegroup assumes local user can lock remote
1455 1471 # repo (local filesystem, old ssh servers).
1456 1472 #
1457 1473 # unbundle assumes local user cannot lock remote repo (new ssh
1458 1474 # servers, http servers).
1459 1475
1460 1476 if remote.capable('unbundle'):
1461 1477 return self.push_unbundle(remote, force, revs)
1462 1478 return self.push_addchangegroup(remote, force, revs)
1463 1479
1464 1480 def prepush(self, remote, force, revs):
1465 1481 base = {}
1466 1482 remote_heads = remote.heads()
1467 1483 inc = self.findincoming(remote, base, remote_heads, force=force)
1468 1484
1469 1485 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1470 1486 if revs is not None:
1471 1487 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1472 1488 else:
1473 1489 bases, heads = update, self.changelog.heads()
1474 1490
1475 1491 if not bases:
1476 1492 self.ui.status(_("no changes found\n"))
1477 1493 return None, 1
1478 1494 elif not force:
1479 1495 # check if we're creating new remote heads
1480 1496 # to be a remote head after push, node must be either
1481 1497 # - unknown locally
1482 1498 # - a local outgoing head descended from update
1483 1499 # - a remote head that's known locally and not
1484 1500 # ancestral to an outgoing head
1485 1501
1486 1502 warn = 0
1487 1503
1488 1504 if remote_heads == [nullid]:
1489 1505 warn = 0
1490 1506 elif not revs and len(heads) > len(remote_heads):
1491 1507 warn = 1
1492 1508 else:
1493 1509 newheads = list(heads)
1494 1510 for r in remote_heads:
1495 1511 if r in self.changelog.nodemap:
1496 1512 desc = self.changelog.heads(r, heads)
1497 1513 l = [h for h in heads if h in desc]
1498 1514 if not l:
1499 1515 newheads.append(r)
1500 1516 else:
1501 1517 newheads.append(r)
1502 1518 if len(newheads) > len(remote_heads):
1503 1519 warn = 1
1504 1520
1505 1521 if warn:
1506 1522 self.ui.warn(_("abort: push creates new remote branches!\n"))
1507 1523 self.ui.status(_("(did you forget to merge?"
1508 1524 " use push -f to force)\n"))
1509 1525 return None, 1
1510 1526 elif inc:
1511 1527 self.ui.warn(_("note: unsynced remote changes!\n"))
1512 1528
1513 1529
1514 1530 if revs is None:
1515 1531 cg = self.changegroup(update, 'push')
1516 1532 else:
1517 1533 cg = self.changegroupsubset(update, revs, 'push')
1518 1534 return cg, remote_heads
1519 1535
1520 1536 def push_addchangegroup(self, remote, force, revs):
1521 1537 lock = remote.lock()
1522 1538 try:
1523 1539 ret = self.prepush(remote, force, revs)
1524 1540 if ret[0] is not None:
1525 1541 cg, remote_heads = ret
1526 1542 return remote.addchangegroup(cg, 'push', self.url())
1527 1543 return ret[1]
1528 1544 finally:
1529 1545 del lock
1530 1546
1531 1547 def push_unbundle(self, remote, force, revs):
1532 1548 # local repo finds heads on server, finds out what revs it
1533 1549 # must push. once revs transferred, if server finds it has
1534 1550 # different heads (someone else won commit/push race), server
1535 1551 # aborts.
1536 1552
1537 1553 ret = self.prepush(remote, force, revs)
1538 1554 if ret[0] is not None:
1539 1555 cg, remote_heads = ret
1540 1556 if force: remote_heads = ['force']
1541 1557 return remote.unbundle(cg, remote_heads, 'push')
1542 1558 return ret[1]
1543 1559
1544 1560 def changegroupinfo(self, nodes, source):
1545 1561 if self.ui.verbose or source == 'bundle':
1546 1562 self.ui.status(_("%d changesets found\n") % len(nodes))
1547 1563 if self.ui.debugflag:
1548 1564 self.ui.debug(_("List of changesets:\n"))
1549 1565 for node in nodes:
1550 1566 self.ui.debug("%s\n" % hex(node))
1551 1567
1552 1568 def changegroupsubset(self, bases, heads, source, extranodes=None):
1553 1569 """This function generates a changegroup consisting of all the nodes
1554 1570 that are descendents of any of the bases, and ancestors of any of
1555 1571 the heads.
1556 1572
1557 1573 It is fairly complex as determining which filenodes and which
1558 1574 manifest nodes need to be included for the changeset to be complete
1559 1575 is non-trivial.
1560 1576
1561 1577 Another wrinkle is doing the reverse, figuring out which changeset in
1562 1578 the changegroup a particular filenode or manifestnode belongs to.
1563 1579
1564 1580 The caller can specify some nodes that must be included in the
1565 1581 changegroup using the extranodes argument. It should be a dict
1566 1582 where the keys are the filenames (or 1 for the manifest), and the
1567 1583 values are lists of (node, linknode) tuples, where node is a wanted
1568 1584 node and linknode is the changelog node that should be transmitted as
1569 1585 the linkrev.
1570 1586 """
1571 1587
1572 1588 self.hook('preoutgoing', throw=True, source=source)
1573 1589
1574 1590 # Set up some initial variables
1575 1591 # Make it easy to refer to self.changelog
1576 1592 cl = self.changelog
1577 1593 # msng is short for missing - compute the list of changesets in this
1578 1594 # changegroup.
1579 1595 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1580 1596 self.changegroupinfo(msng_cl_lst, source)
1581 1597 # Some bases may turn out to be superfluous, and some heads may be
1582 1598 # too. nodesbetween will return the minimal set of bases and heads
1583 1599 # necessary to re-create the changegroup.
1584 1600
1585 1601 # Known heads are the list of heads that it is assumed the recipient
1586 1602 # of this changegroup will know about.
1587 1603 knownheads = {}
1588 1604 # We assume that all parents of bases are known heads.
1589 1605 for n in bases:
1590 1606 for p in cl.parents(n):
1591 1607 if p != nullid:
1592 1608 knownheads[p] = 1
1593 1609 knownheads = knownheads.keys()
1594 1610 if knownheads:
1595 1611 # Now that we know what heads are known, we can compute which
1596 1612 # changesets are known. The recipient must know about all
1597 1613 # changesets required to reach the known heads from the null
1598 1614 # changeset.
1599 1615 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1600 1616 junk = None
1601 1617 # Transform the list into an ersatz set.
1602 1618 has_cl_set = dict.fromkeys(has_cl_set)
1603 1619 else:
1604 1620 # If there were no known heads, the recipient cannot be assumed to
1605 1621 # know about any changesets.
1606 1622 has_cl_set = {}
1607 1623
1608 1624 # Make it easy to refer to self.manifest
1609 1625 mnfst = self.manifest
1610 1626 # We don't know which manifests are missing yet
1611 1627 msng_mnfst_set = {}
1612 1628 # Nor do we know which filenodes are missing.
1613 1629 msng_filenode_set = {}
1614 1630
1615 1631 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1616 1632 junk = None
1617 1633
1618 1634 # A changeset always belongs to itself, so the changenode lookup
1619 1635 # function for a changenode is identity.
1620 1636 def identity(x):
1621 1637 return x
1622 1638
1623 1639 # A function generating function. Sets up an environment for the
1624 1640 # inner function.
1625 1641 def cmp_by_rev_func(revlog):
1626 1642 # Compare two nodes by their revision number in the environment's
1627 1643 # revision history. Since the revision number both represents the
1628 1644 # most efficient order to read the nodes in, and represents a
1629 1645 # topological sorting of the nodes, this function is often useful.
1630 1646 def cmp_by_rev(a, b):
1631 1647 return cmp(revlog.rev(a), revlog.rev(b))
1632 1648 return cmp_by_rev
1633 1649
1634 1650 # If we determine that a particular file or manifest node must be a
1635 1651 # node that the recipient of the changegroup will already have, we can
1636 1652 # also assume the recipient will have all the parents. This function
1637 1653 # prunes them from the set of missing nodes.
1638 1654 def prune_parents(revlog, hasset, msngset):
1639 1655 haslst = hasset.keys()
1640 1656 haslst.sort(cmp_by_rev_func(revlog))
1641 1657 for node in haslst:
1642 1658 parentlst = [p for p in revlog.parents(node) if p != nullid]
1643 1659 while parentlst:
1644 1660 n = parentlst.pop()
1645 1661 if n not in hasset:
1646 1662 hasset[n] = 1
1647 1663 p = [p for p in revlog.parents(n) if p != nullid]
1648 1664 parentlst.extend(p)
1649 1665 for n in hasset:
1650 1666 msngset.pop(n, None)
1651 1667
1652 1668 # This is a function generating function used to set up an environment
1653 1669 # for the inner function to execute in.
1654 1670 def manifest_and_file_collector(changedfileset):
1655 1671 # This is an information gathering function that gathers
1656 1672 # information from each changeset node that goes out as part of
1657 1673 # the changegroup. The information gathered is a list of which
1658 1674 # manifest nodes are potentially required (the recipient may
1659 1675 # already have them) and total list of all files which were
1660 1676 # changed in any changeset in the changegroup.
1661 1677 #
1662 1678 # We also remember the first changenode we saw any manifest
1663 1679 # referenced by so we can later determine which changenode 'owns'
1664 1680 # the manifest.
1665 1681 def collect_manifests_and_files(clnode):
1666 1682 c = cl.read(clnode)
1667 1683 for f in c[3]:
1668 1684 # This is to make sure we only have one instance of each
1669 1685 # filename string for each filename.
1670 1686 changedfileset.setdefault(f, f)
1671 1687 msng_mnfst_set.setdefault(c[0], clnode)
1672 1688 return collect_manifests_and_files
1673 1689
1674 1690 # Figure out which manifest nodes (of the ones we think might be part
1675 1691 # of the changegroup) the recipient must know about and remove them
1676 1692 # from the changegroup.
1677 1693 def prune_manifests():
1678 1694 has_mnfst_set = {}
1679 1695 for n in msng_mnfst_set:
1680 1696 # If a 'missing' manifest thinks it belongs to a changenode
1681 1697 # the recipient is assumed to have, obviously the recipient
1682 1698 # must have that manifest.
1683 1699 linknode = cl.node(mnfst.linkrev(n))
1684 1700 if linknode in has_cl_set:
1685 1701 has_mnfst_set[n] = 1
1686 1702 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1687 1703
1688 1704 # Use the information collected in collect_manifests_and_files to say
1689 1705 # which changenode any manifestnode belongs to.
1690 1706 def lookup_manifest_link(mnfstnode):
1691 1707 return msng_mnfst_set[mnfstnode]
1692 1708
1693 1709 # A function generating function that sets up the initial environment
1694 1710 # the inner function.
1695 1711 def filenode_collector(changedfiles):
1696 1712 next_rev = [0]
1697 1713 # This gathers information from each manifestnode included in the
1698 1714 # changegroup about which filenodes the manifest node references
1699 1715 # so we can include those in the changegroup too.
1700 1716 #
1701 1717 # It also remembers which changenode each filenode belongs to. It
1702 1718 # does this by assuming the a filenode belongs to the changenode
1703 1719 # the first manifest that references it belongs to.
1704 1720 def collect_msng_filenodes(mnfstnode):
1705 1721 r = mnfst.rev(mnfstnode)
1706 1722 if r == next_rev[0]:
1707 1723 # If the last rev we looked at was the one just previous,
1708 1724 # we only need to see a diff.
1709 1725 deltamf = mnfst.readdelta(mnfstnode)
1710 1726 # For each line in the delta
1711 1727 for f, fnode in deltamf.items():
1712 1728 f = changedfiles.get(f, None)
1713 1729 # And if the file is in the list of files we care
1714 1730 # about.
1715 1731 if f is not None:
1716 1732 # Get the changenode this manifest belongs to
1717 1733 clnode = msng_mnfst_set[mnfstnode]
1718 1734 # Create the set of filenodes for the file if
1719 1735 # there isn't one already.
1720 1736 ndset = msng_filenode_set.setdefault(f, {})
1721 1737 # And set the filenode's changelog node to the
1722 1738 # manifest's if it hasn't been set already.
1723 1739 ndset.setdefault(fnode, clnode)
1724 1740 else:
1725 1741 # Otherwise we need a full manifest.
1726 1742 m = mnfst.read(mnfstnode)
1727 1743 # For every file in we care about.
1728 1744 for f in changedfiles:
1729 1745 fnode = m.get(f, None)
1730 1746 # If it's in the manifest
1731 1747 if fnode is not None:
1732 1748 # See comments above.
1733 1749 clnode = msng_mnfst_set[mnfstnode]
1734 1750 ndset = msng_filenode_set.setdefault(f, {})
1735 1751 ndset.setdefault(fnode, clnode)
1736 1752 # Remember the revision we hope to see next.
1737 1753 next_rev[0] = r + 1
1738 1754 return collect_msng_filenodes
1739 1755
1740 1756 # We have a list of filenodes we think we need for a file, lets remove
1741 1757 # all those we now the recipient must have.
1742 1758 def prune_filenodes(f, filerevlog):
1743 1759 msngset = msng_filenode_set[f]
1744 1760 hasset = {}
1745 1761 # If a 'missing' filenode thinks it belongs to a changenode we
1746 1762 # assume the recipient must have, then the recipient must have
1747 1763 # that filenode.
1748 1764 for n in msngset:
1749 1765 clnode = cl.node(filerevlog.linkrev(n))
1750 1766 if clnode in has_cl_set:
1751 1767 hasset[n] = 1
1752 1768 prune_parents(filerevlog, hasset, msngset)
1753 1769
1754 1770 # A function generator function that sets up the a context for the
1755 1771 # inner function.
1756 1772 def lookup_filenode_link_func(fname):
1757 1773 msngset = msng_filenode_set[fname]
1758 1774 # Lookup the changenode the filenode belongs to.
1759 1775 def lookup_filenode_link(fnode):
1760 1776 return msngset[fnode]
1761 1777 return lookup_filenode_link
1762 1778
1763 1779 # Add the nodes that were explicitly requested.
1764 1780 def add_extra_nodes(name, nodes):
1765 1781 if not extranodes or name not in extranodes:
1766 1782 return
1767 1783
1768 1784 for node, linknode in extranodes[name]:
1769 1785 if node not in nodes:
1770 1786 nodes[node] = linknode
1771 1787
1772 1788 # Now that we have all theses utility functions to help out and
1773 1789 # logically divide up the task, generate the group.
1774 1790 def gengroup():
1775 1791 # The set of changed files starts empty.
1776 1792 changedfiles = {}
1777 1793 # Create a changenode group generator that will call our functions
1778 1794 # back to lookup the owning changenode and collect information.
1779 1795 group = cl.group(msng_cl_lst, identity,
1780 1796 manifest_and_file_collector(changedfiles))
1781 1797 for chnk in group:
1782 1798 yield chnk
1783 1799
1784 1800 # The list of manifests has been collected by the generator
1785 1801 # calling our functions back.
1786 1802 prune_manifests()
1787 1803 add_extra_nodes(1, msng_mnfst_set)
1788 1804 msng_mnfst_lst = msng_mnfst_set.keys()
1789 1805 # Sort the manifestnodes by revision number.
1790 1806 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1791 1807 # Create a generator for the manifestnodes that calls our lookup
1792 1808 # and data collection functions back.
1793 1809 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1794 1810 filenode_collector(changedfiles))
1795 1811 for chnk in group:
1796 1812 yield chnk
1797 1813
1798 1814 # These are no longer needed, dereference and toss the memory for
1799 1815 # them.
1800 1816 msng_mnfst_lst = None
1801 1817 msng_mnfst_set.clear()
1802 1818
1803 1819 if extranodes:
1804 1820 for fname in extranodes:
1805 1821 if isinstance(fname, int):
1806 1822 continue
1807 1823 add_extra_nodes(fname,
1808 1824 msng_filenode_set.setdefault(fname, {}))
1809 1825 changedfiles[fname] = 1
1810 1826 changedfiles = changedfiles.keys()
1811 1827 changedfiles.sort()
1812 1828 # Go through all our files in order sorted by name.
1813 1829 for fname in changedfiles:
1814 1830 filerevlog = self.file(fname)
1815 1831 if filerevlog.count() == 0:
1816 1832 raise util.Abort(_("empty or missing revlog for %s") % fname)
1817 1833 # Toss out the filenodes that the recipient isn't really
1818 1834 # missing.
1819 1835 if fname in msng_filenode_set:
1820 1836 prune_filenodes(fname, filerevlog)
1821 1837 msng_filenode_lst = msng_filenode_set[fname].keys()
1822 1838 else:
1823 1839 msng_filenode_lst = []
1824 1840 # If any filenodes are left, generate the group for them,
1825 1841 # otherwise don't bother.
1826 1842 if len(msng_filenode_lst) > 0:
1827 1843 yield changegroup.chunkheader(len(fname))
1828 1844 yield fname
1829 1845 # Sort the filenodes by their revision #
1830 1846 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1831 1847 # Create a group generator and only pass in a changenode
1832 1848 # lookup function as we need to collect no information
1833 1849 # from filenodes.
1834 1850 group = filerevlog.group(msng_filenode_lst,
1835 1851 lookup_filenode_link_func(fname))
1836 1852 for chnk in group:
1837 1853 yield chnk
1838 1854 if fname in msng_filenode_set:
1839 1855 # Don't need this anymore, toss it to free memory.
1840 1856 del msng_filenode_set[fname]
1841 1857 # Signal that no more groups are left.
1842 1858 yield changegroup.closechunk()
1843 1859
1844 1860 if msng_cl_lst:
1845 1861 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1846 1862
1847 1863 return util.chunkbuffer(gengroup())
1848 1864
1849 1865 def changegroup(self, basenodes, source):
1850 1866 """Generate a changegroup of all nodes that we have that a recipient
1851 1867 doesn't.
1852 1868
1853 1869 This is much easier than the previous function as we can assume that
1854 1870 the recipient has any changenode we aren't sending them."""
1855 1871
1856 1872 self.hook('preoutgoing', throw=True, source=source)
1857 1873
1858 1874 cl = self.changelog
1859 1875 nodes = cl.nodesbetween(basenodes, None)[0]
1860 1876 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1861 1877 self.changegroupinfo(nodes, source)
1862 1878
1863 1879 def identity(x):
1864 1880 return x
1865 1881
1866 1882 def gennodelst(revlog):
1867 1883 for r in xrange(0, revlog.count()):
1868 1884 n = revlog.node(r)
1869 1885 if revlog.linkrev(n) in revset:
1870 1886 yield n
1871 1887
1872 1888 def changed_file_collector(changedfileset):
1873 1889 def collect_changed_files(clnode):
1874 1890 c = cl.read(clnode)
1875 1891 for fname in c[3]:
1876 1892 changedfileset[fname] = 1
1877 1893 return collect_changed_files
1878 1894
1879 1895 def lookuprevlink_func(revlog):
1880 1896 def lookuprevlink(n):
1881 1897 return cl.node(revlog.linkrev(n))
1882 1898 return lookuprevlink
1883 1899
1884 1900 def gengroup():
1885 1901 # construct a list of all changed files
1886 1902 changedfiles = {}
1887 1903
1888 1904 for chnk in cl.group(nodes, identity,
1889 1905 changed_file_collector(changedfiles)):
1890 1906 yield chnk
1891 1907 changedfiles = changedfiles.keys()
1892 1908 changedfiles.sort()
1893 1909
1894 1910 mnfst = self.manifest
1895 1911 nodeiter = gennodelst(mnfst)
1896 1912 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1897 1913 yield chnk
1898 1914
1899 1915 for fname in changedfiles:
1900 1916 filerevlog = self.file(fname)
1901 1917 if filerevlog.count() == 0:
1902 1918 raise util.Abort(_("empty or missing revlog for %s") % fname)
1903 1919 nodeiter = gennodelst(filerevlog)
1904 1920 nodeiter = list(nodeiter)
1905 1921 if nodeiter:
1906 1922 yield changegroup.chunkheader(len(fname))
1907 1923 yield fname
1908 1924 lookup = lookuprevlink_func(filerevlog)
1909 1925 for chnk in filerevlog.group(nodeiter, lookup):
1910 1926 yield chnk
1911 1927
1912 1928 yield changegroup.closechunk()
1913 1929
1914 1930 if nodes:
1915 1931 self.hook('outgoing', node=hex(nodes[0]), source=source)
1916 1932
1917 1933 return util.chunkbuffer(gengroup())
1918 1934
1919 1935 def addchangegroup(self, source, srctype, url, emptyok=False):
1920 1936 """add changegroup to repo.
1921 1937
1922 1938 return values:
1923 1939 - nothing changed or no source: 0
1924 1940 - more heads than before: 1+added heads (2..n)
1925 1941 - less heads than before: -1-removed heads (-2..-n)
1926 1942 - number of heads stays the same: 1
1927 1943 """
1928 1944 def csmap(x):
1929 1945 self.ui.debug(_("add changeset %s\n") % short(x))
1930 1946 return cl.count()
1931 1947
1932 1948 def revmap(x):
1933 1949 return cl.rev(x)
1934 1950
1935 1951 if not source:
1936 1952 return 0
1937 1953
1938 1954 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1939 1955
1940 1956 changesets = files = revisions = 0
1941 1957
1942 1958 # write changelog data to temp files so concurrent readers will not see
1943 1959 # inconsistent view
1944 1960 cl = self.changelog
1945 1961 cl.delayupdate()
1946 1962 oldheads = len(cl.heads())
1947 1963
1948 1964 tr = self.transaction()
1949 1965 try:
1950 1966 trp = weakref.proxy(tr)
1951 1967 # pull off the changeset group
1952 1968 self.ui.status(_("adding changesets\n"))
1953 1969 cor = cl.count() - 1
1954 1970 chunkiter = changegroup.chunkiter(source)
1955 1971 if cl.addgroup(chunkiter, csmap, trp, 1) is None and not emptyok:
1956 1972 raise util.Abort(_("received changelog group is empty"))
1957 1973 cnr = cl.count() - 1
1958 1974 changesets = cnr - cor
1959 1975
1960 1976 # pull off the manifest group
1961 1977 self.ui.status(_("adding manifests\n"))
1962 1978 chunkiter = changegroup.chunkiter(source)
1963 1979 # no need to check for empty manifest group here:
1964 1980 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1965 1981 # no new manifest will be created and the manifest group will
1966 1982 # be empty during the pull
1967 1983 self.manifest.addgroup(chunkiter, revmap, trp)
1968 1984
1969 1985 # process the files
1970 1986 self.ui.status(_("adding file changes\n"))
1971 1987 while 1:
1972 1988 f = changegroup.getchunk(source)
1973 1989 if not f:
1974 1990 break
1975 1991 self.ui.debug(_("adding %s revisions\n") % f)
1976 1992 fl = self.file(f)
1977 1993 o = fl.count()
1978 1994 chunkiter = changegroup.chunkiter(source)
1979 1995 if fl.addgroup(chunkiter, revmap, trp) is None:
1980 1996 raise util.Abort(_("received file revlog group is empty"))
1981 1997 revisions += fl.count() - o
1982 1998 files += 1
1983 1999
1984 2000 # make changelog see real files again
1985 2001 cl.finalize(trp)
1986 2002
1987 2003 newheads = len(self.changelog.heads())
1988 2004 heads = ""
1989 2005 if oldheads and newheads != oldheads:
1990 2006 heads = _(" (%+d heads)") % (newheads - oldheads)
1991 2007
1992 2008 self.ui.status(_("added %d changesets"
1993 2009 " with %d changes to %d files%s\n")
1994 2010 % (changesets, revisions, files, heads))
1995 2011
1996 2012 if changesets > 0:
1997 2013 self.hook('pretxnchangegroup', throw=True,
1998 2014 node=hex(self.changelog.node(cor+1)), source=srctype,
1999 2015 url=url)
2000 2016
2001 2017 tr.close()
2002 2018 finally:
2003 2019 del tr
2004 2020
2005 2021 if changesets > 0:
2006 2022 # forcefully update the on-disk branch cache
2007 2023 self.ui.debug(_("updating the branch cache\n"))
2008 self.branchcache = None
2009 2024 self.branchtags()
2010 2025 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2011 2026 source=srctype, url=url)
2012 2027
2013 2028 for i in xrange(cor + 1, cnr + 1):
2014 2029 self.hook("incoming", node=hex(self.changelog.node(i)),
2015 2030 source=srctype, url=url)
2016 2031
2017 2032 # never return 0 here:
2018 2033 if newheads < oldheads:
2019 2034 return newheads - oldheads - 1
2020 2035 else:
2021 2036 return newheads - oldheads + 1
2022 2037
2023 2038
2024 2039 def stream_in(self, remote):
2025 2040 fp = remote.stream_out()
2026 2041 l = fp.readline()
2027 2042 try:
2028 2043 resp = int(l)
2029 2044 except ValueError:
2030 2045 raise util.UnexpectedOutput(
2031 2046 _('Unexpected response from remote server:'), l)
2032 2047 if resp == 1:
2033 2048 raise util.Abort(_('operation forbidden by server'))
2034 2049 elif resp == 2:
2035 2050 raise util.Abort(_('locking the remote repository failed'))
2036 2051 elif resp != 0:
2037 2052 raise util.Abort(_('the server sent an unknown error code'))
2038 2053 self.ui.status(_('streaming all changes\n'))
2039 2054 l = fp.readline()
2040 2055 try:
2041 2056 total_files, total_bytes = map(int, l.split(' ', 1))
2042 2057 except ValueError, TypeError:
2043 2058 raise util.UnexpectedOutput(
2044 2059 _('Unexpected response from remote server:'), l)
2045 2060 self.ui.status(_('%d files to transfer, %s of data\n') %
2046 2061 (total_files, util.bytecount(total_bytes)))
2047 2062 start = time.time()
2048 2063 for i in xrange(total_files):
2049 2064 # XXX doesn't support '\n' or '\r' in filenames
2050 2065 l = fp.readline()
2051 2066 try:
2052 2067 name, size = l.split('\0', 1)
2053 2068 size = int(size)
2054 2069 except ValueError, TypeError:
2055 2070 raise util.UnexpectedOutput(
2056 2071 _('Unexpected response from remote server:'), l)
2057 2072 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2058 2073 ofp = self.sopener(name, 'w')
2059 2074 for chunk in util.filechunkiter(fp, limit=size):
2060 2075 ofp.write(chunk)
2061 2076 ofp.close()
2062 2077 elapsed = time.time() - start
2063 2078 if elapsed <= 0:
2064 2079 elapsed = 0.001
2065 2080 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2066 2081 (util.bytecount(total_bytes), elapsed,
2067 2082 util.bytecount(total_bytes / elapsed)))
2068 2083 self.invalidate()
2069 2084 return len(self.heads()) + 1
2070 2085
2071 2086 def clone(self, remote, heads=[], stream=False):
2072 2087 '''clone remote repository.
2073 2088
2074 2089 keyword arguments:
2075 2090 heads: list of revs to clone (forces use of pull)
2076 2091 stream: use streaming clone if possible'''
2077 2092
2078 2093 # now, all clients that can request uncompressed clones can
2079 2094 # read repo formats supported by all servers that can serve
2080 2095 # them.
2081 2096
2082 2097 # if revlog format changes, client will have to check version
2083 2098 # and format flags on "stream" capability, and use
2084 2099 # uncompressed only if compatible.
2085 2100
2086 2101 if stream and not heads and remote.capable('stream'):
2087 2102 return self.stream_in(remote)
2088 2103 return self.pull(remote, heads)
2089 2104
2090 2105 # used to avoid circular references so destructors work
2091 2106 def aftertrans(files):
2092 2107 renamefiles = [tuple(t) for t in files]
2093 2108 def a():
2094 2109 for src, dest in renamefiles:
2095 2110 util.rename(src, dest)
2096 2111 return a
2097 2112
2098 2113 def instance(ui, path, create):
2099 2114 return localrepository(ui, util.drop_scheme('file', path), create)
2100 2115
2101 2116 def islocal(path):
2102 2117 return True
General Comments 0
You need to be logged in to leave comments. Login now