##// END OF EJS Templates
move the reading of branch.cache from _branchtags to branchtags
Alexis S. L. Carvalho -
r6120:f89878df default
parent child Browse files
Show More
@@ -1,2350 +1,2347 b''
1 1 # queue.py - patch queues for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 '''patch management and development
9 9
10 10 This extension lets you work with a stack of patches in a Mercurial
11 11 repository. It manages two stacks of patches - all known patches, and
12 12 applied patches (subset of known patches).
13 13
14 14 Known patches are represented as patch files in the .hg/patches
15 15 directory. Applied patches are both patch files and changesets.
16 16
17 17 Common tasks (use "hg help command" for more details):
18 18
19 19 prepare repository to work with patches qinit
20 20 create new patch qnew
21 21 import existing patch qimport
22 22
23 23 print patch series qseries
24 24 print applied patches qapplied
25 25 print name of top applied patch qtop
26 26
27 27 add known patch to applied stack qpush
28 28 remove patch from applied stack qpop
29 29 refresh contents of top applied patch qrefresh
30 30 '''
31 31
32 32 from mercurial.i18n import _
33 33 from mercurial import commands, cmdutil, hg, patch, revlog, util
34 34 from mercurial import repair
35 35 import os, sys, re, errno
36 36
37 37 commands.norepo += " qclone"
38 38
39 39 # Patch names looks like unix-file names.
40 40 # They must be joinable with queue directory and result in the patch path.
41 41 normname = util.normpath
42 42
43 43 class statusentry:
44 44 def __init__(self, rev, name=None):
45 45 if not name:
46 46 fields = rev.split(':', 1)
47 47 if len(fields) == 2:
48 48 self.rev, self.name = fields
49 49 else:
50 50 self.rev, self.name = None, None
51 51 else:
52 52 self.rev, self.name = rev, name
53 53
54 54 def __str__(self):
55 55 return self.rev + ':' + self.name
56 56
57 57 class queue:
58 58 def __init__(self, ui, path, patchdir=None):
59 59 self.basepath = path
60 60 self.path = patchdir or os.path.join(path, "patches")
61 61 self.opener = util.opener(self.path)
62 62 self.ui = ui
63 63 self.applied = []
64 64 self.full_series = []
65 65 self.applied_dirty = 0
66 66 self.series_dirty = 0
67 67 self.series_path = "series"
68 68 self.status_path = "status"
69 69 self.guards_path = "guards"
70 70 self.active_guards = None
71 71 self.guards_dirty = False
72 72 self._diffopts = None
73 73
74 74 if os.path.exists(self.join(self.series_path)):
75 75 self.full_series = self.opener(self.series_path).read().splitlines()
76 76 self.parse_series()
77 77
78 78 if os.path.exists(self.join(self.status_path)):
79 79 lines = self.opener(self.status_path).read().splitlines()
80 80 self.applied = [statusentry(l) for l in lines]
81 81
82 82 def diffopts(self):
83 83 if self._diffopts is None:
84 84 self._diffopts = patch.diffopts(self.ui)
85 85 return self._diffopts
86 86
87 87 def join(self, *p):
88 88 return os.path.join(self.path, *p)
89 89
90 90 def find_series(self, patch):
91 91 pre = re.compile("(\s*)([^#]+)")
92 92 index = 0
93 93 for l in self.full_series:
94 94 m = pre.match(l)
95 95 if m:
96 96 s = m.group(2)
97 97 s = s.rstrip()
98 98 if s == patch:
99 99 return index
100 100 index += 1
101 101 return None
102 102
103 103 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
104 104
105 105 def parse_series(self):
106 106 self.series = []
107 107 self.series_guards = []
108 108 for l in self.full_series:
109 109 h = l.find('#')
110 110 if h == -1:
111 111 patch = l
112 112 comment = ''
113 113 elif h == 0:
114 114 continue
115 115 else:
116 116 patch = l[:h]
117 117 comment = l[h:]
118 118 patch = patch.strip()
119 119 if patch:
120 120 if patch in self.series:
121 121 raise util.Abort(_('%s appears more than once in %s') %
122 122 (patch, self.join(self.series_path)))
123 123 self.series.append(patch)
124 124 self.series_guards.append(self.guard_re.findall(comment))
125 125
126 126 def check_guard(self, guard):
127 127 bad_chars = '# \t\r\n\f'
128 128 first = guard[0]
129 129 for c in '-+':
130 130 if first == c:
131 131 return (_('guard %r starts with invalid character: %r') %
132 132 (guard, c))
133 133 for c in bad_chars:
134 134 if c in guard:
135 135 return _('invalid character in guard %r: %r') % (guard, c)
136 136
137 137 def set_active(self, guards):
138 138 for guard in guards:
139 139 bad = self.check_guard(guard)
140 140 if bad:
141 141 raise util.Abort(bad)
142 142 guards = dict.fromkeys(guards).keys()
143 143 guards.sort()
144 144 self.ui.debug('active guards: %s\n' % ' '.join(guards))
145 145 self.active_guards = guards
146 146 self.guards_dirty = True
147 147
148 148 def active(self):
149 149 if self.active_guards is None:
150 150 self.active_guards = []
151 151 try:
152 152 guards = self.opener(self.guards_path).read().split()
153 153 except IOError, err:
154 154 if err.errno != errno.ENOENT: raise
155 155 guards = []
156 156 for i, guard in enumerate(guards):
157 157 bad = self.check_guard(guard)
158 158 if bad:
159 159 self.ui.warn('%s:%d: %s\n' %
160 160 (self.join(self.guards_path), i + 1, bad))
161 161 else:
162 162 self.active_guards.append(guard)
163 163 return self.active_guards
164 164
165 165 def set_guards(self, idx, guards):
166 166 for g in guards:
167 167 if len(g) < 2:
168 168 raise util.Abort(_('guard %r too short') % g)
169 169 if g[0] not in '-+':
170 170 raise util.Abort(_('guard %r starts with invalid char') % g)
171 171 bad = self.check_guard(g[1:])
172 172 if bad:
173 173 raise util.Abort(bad)
174 174 drop = self.guard_re.sub('', self.full_series[idx])
175 175 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
176 176 self.parse_series()
177 177 self.series_dirty = True
178 178
179 179 def pushable(self, idx):
180 180 if isinstance(idx, str):
181 181 idx = self.series.index(idx)
182 182 patchguards = self.series_guards[idx]
183 183 if not patchguards:
184 184 return True, None
185 185 default = False
186 186 guards = self.active()
187 187 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
188 188 if exactneg:
189 189 return False, exactneg[0]
190 190 pos = [g for g in patchguards if g[0] == '+']
191 191 exactpos = [g for g in pos if g[1:] in guards]
192 192 if pos:
193 193 if exactpos:
194 194 return True, exactpos[0]
195 195 return False, pos
196 196 return True, ''
197 197
198 198 def explain_pushable(self, idx, all_patches=False):
199 199 write = all_patches and self.ui.write or self.ui.warn
200 200 if all_patches or self.ui.verbose:
201 201 if isinstance(idx, str):
202 202 idx = self.series.index(idx)
203 203 pushable, why = self.pushable(idx)
204 204 if all_patches and pushable:
205 205 if why is None:
206 206 write(_('allowing %s - no guards in effect\n') %
207 207 self.series[idx])
208 208 else:
209 209 if not why:
210 210 write(_('allowing %s - no matching negative guards\n') %
211 211 self.series[idx])
212 212 else:
213 213 write(_('allowing %s - guarded by %r\n') %
214 214 (self.series[idx], why))
215 215 if not pushable:
216 216 if why:
217 217 write(_('skipping %s - guarded by %r\n') %
218 218 (self.series[idx], why))
219 219 else:
220 220 write(_('skipping %s - no matching guards\n') %
221 221 self.series[idx])
222 222
223 223 def save_dirty(self):
224 224 def write_list(items, path):
225 225 fp = self.opener(path, 'w')
226 226 for i in items:
227 227 fp.write("%s\n" % i)
228 228 fp.close()
229 229 if self.applied_dirty: write_list(map(str, self.applied), self.status_path)
230 230 if self.series_dirty: write_list(self.full_series, self.series_path)
231 231 if self.guards_dirty: write_list(self.active_guards, self.guards_path)
232 232
233 233 def readheaders(self, patch):
234 234 def eatdiff(lines):
235 235 while lines:
236 236 l = lines[-1]
237 237 if (l.startswith("diff -") or
238 238 l.startswith("Index:") or
239 239 l.startswith("===========")):
240 240 del lines[-1]
241 241 else:
242 242 break
243 243 def eatempty(lines):
244 244 while lines:
245 245 l = lines[-1]
246 246 if re.match('\s*$', l):
247 247 del lines[-1]
248 248 else:
249 249 break
250 250
251 251 pf = self.join(patch)
252 252 message = []
253 253 comments = []
254 254 user = None
255 255 date = None
256 256 format = None
257 257 subject = None
258 258 diffstart = 0
259 259
260 260 for line in file(pf):
261 261 line = line.rstrip()
262 262 if line.startswith('diff --git'):
263 263 diffstart = 2
264 264 break
265 265 if diffstart:
266 266 if line.startswith('+++ '):
267 267 diffstart = 2
268 268 break
269 269 if line.startswith("--- "):
270 270 diffstart = 1
271 271 continue
272 272 elif format == "hgpatch":
273 273 # parse values when importing the result of an hg export
274 274 if line.startswith("# User "):
275 275 user = line[7:]
276 276 elif line.startswith("# Date "):
277 277 date = line[7:]
278 278 elif not line.startswith("# ") and line:
279 279 message.append(line)
280 280 format = None
281 281 elif line == '# HG changeset patch':
282 282 format = "hgpatch"
283 283 elif (format != "tagdone" and (line.startswith("Subject: ") or
284 284 line.startswith("subject: "))):
285 285 subject = line[9:]
286 286 format = "tag"
287 287 elif (format != "tagdone" and (line.startswith("From: ") or
288 288 line.startswith("from: "))):
289 289 user = line[6:]
290 290 format = "tag"
291 291 elif format == "tag" and line == "":
292 292 # when looking for tags (subject: from: etc) they
293 293 # end once you find a blank line in the source
294 294 format = "tagdone"
295 295 elif message or line:
296 296 message.append(line)
297 297 comments.append(line)
298 298
299 299 eatdiff(message)
300 300 eatdiff(comments)
301 301 eatempty(message)
302 302 eatempty(comments)
303 303
304 304 # make sure message isn't empty
305 305 if format and format.startswith("tag") and subject:
306 306 message.insert(0, "")
307 307 message.insert(0, subject)
308 308 return (message, comments, user, date, diffstart > 1)
309 309
310 310 def removeundo(self, repo):
311 311 undo = repo.sjoin('undo')
312 312 if not os.path.exists(undo):
313 313 return
314 314 try:
315 315 os.unlink(undo)
316 316 except OSError, inst:
317 317 self.ui.warn('error removing undo: %s\n' % str(inst))
318 318
319 319 def printdiff(self, repo, node1, node2=None, files=None,
320 320 fp=None, changes=None, opts={}):
321 321 fns, matchfn, anypats = cmdutil.matchpats(repo, files, opts)
322 322
323 323 patch.diff(repo, node1, node2, fns, match=matchfn,
324 324 fp=fp, changes=changes, opts=self.diffopts())
325 325
326 326 def mergeone(self, repo, mergeq, head, patch, rev):
327 327 # first try just applying the patch
328 328 (err, n) = self.apply(repo, [ patch ], update_status=False,
329 329 strict=True, merge=rev)
330 330
331 331 if err == 0:
332 332 return (err, n)
333 333
334 334 if n is None:
335 335 raise util.Abort(_("apply failed for patch %s") % patch)
336 336
337 337 self.ui.warn("patch didn't work out, merging %s\n" % patch)
338 338
339 339 # apply failed, strip away that rev and merge.
340 340 hg.clean(repo, head)
341 341 self.strip(repo, n, update=False, backup='strip')
342 342
343 343 ctx = repo.changectx(rev)
344 344 ret = hg.merge(repo, rev)
345 345 if ret:
346 346 raise util.Abort(_("update returned %d") % ret)
347 347 n = repo.commit(None, ctx.description(), ctx.user(), force=1)
348 348 if n == None:
349 349 raise util.Abort(_("repo commit failed"))
350 350 try:
351 351 message, comments, user, date, patchfound = mergeq.readheaders(patch)
352 352 except:
353 353 raise util.Abort(_("unable to read %s") % patch)
354 354
355 355 patchf = self.opener(patch, "w")
356 356 if comments:
357 357 comments = "\n".join(comments) + '\n\n'
358 358 patchf.write(comments)
359 359 self.printdiff(repo, head, n, fp=patchf)
360 360 patchf.close()
361 361 self.removeundo(repo)
362 362 return (0, n)
363 363
364 364 def qparents(self, repo, rev=None):
365 365 if rev is None:
366 366 (p1, p2) = repo.dirstate.parents()
367 367 if p2 == revlog.nullid:
368 368 return p1
369 369 if len(self.applied) == 0:
370 370 return None
371 371 return revlog.bin(self.applied[-1].rev)
372 372 pp = repo.changelog.parents(rev)
373 373 if pp[1] != revlog.nullid:
374 374 arevs = [ x.rev for x in self.applied ]
375 375 p0 = revlog.hex(pp[0])
376 376 p1 = revlog.hex(pp[1])
377 377 if p0 in arevs:
378 378 return pp[0]
379 379 if p1 in arevs:
380 380 return pp[1]
381 381 return pp[0]
382 382
383 383 def mergepatch(self, repo, mergeq, series):
384 384 if len(self.applied) == 0:
385 385 # each of the patches merged in will have two parents. This
386 386 # can confuse the qrefresh, qdiff, and strip code because it
387 387 # needs to know which parent is actually in the patch queue.
388 388 # so, we insert a merge marker with only one parent. This way
389 389 # the first patch in the queue is never a merge patch
390 390 #
391 391 pname = ".hg.patches.merge.marker"
392 392 n = repo.commit(None, '[mq]: merge marker', user=None, force=1)
393 393 self.removeundo(repo)
394 394 self.applied.append(statusentry(revlog.hex(n), pname))
395 395 self.applied_dirty = 1
396 396
397 397 head = self.qparents(repo)
398 398
399 399 for patch in series:
400 400 patch = mergeq.lookup(patch, strict=True)
401 401 if not patch:
402 402 self.ui.warn("patch %s does not exist\n" % patch)
403 403 return (1, None)
404 404 pushable, reason = self.pushable(patch)
405 405 if not pushable:
406 406 self.explain_pushable(patch, all_patches=True)
407 407 continue
408 408 info = mergeq.isapplied(patch)
409 409 if not info:
410 410 self.ui.warn("patch %s is not applied\n" % patch)
411 411 return (1, None)
412 412 rev = revlog.bin(info[1])
413 413 (err, head) = self.mergeone(repo, mergeq, head, patch, rev)
414 414 if head:
415 415 self.applied.append(statusentry(revlog.hex(head), patch))
416 416 self.applied_dirty = 1
417 417 if err:
418 418 return (err, head)
419 419 self.save_dirty()
420 420 return (0, head)
421 421
422 422 def patch(self, repo, patchfile):
423 423 '''Apply patchfile to the working directory.
424 424 patchfile: file name of patch'''
425 425 files = {}
426 426 try:
427 427 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
428 428 files=files)
429 429 except Exception, inst:
430 430 self.ui.note(str(inst) + '\n')
431 431 if not self.ui.verbose:
432 432 self.ui.warn("patch failed, unable to continue (try -v)\n")
433 433 return (False, files, False)
434 434
435 435 return (True, files, fuzz)
436 436
437 437 def apply(self, repo, series, list=False, update_status=True,
438 438 strict=False, patchdir=None, merge=None, all_files={}):
439 439 wlock = lock = tr = None
440 440 try:
441 441 wlock = repo.wlock()
442 442 lock = repo.lock()
443 443 tr = repo.transaction()
444 444 try:
445 445 ret = self._apply(repo, series, list, update_status,
446 446 strict, patchdir, merge, all_files=all_files)
447 447 tr.close()
448 448 self.save_dirty()
449 449 return ret
450 450 except:
451 451 try:
452 452 tr.abort()
453 453 finally:
454 454 repo.invalidate()
455 455 repo.dirstate.invalidate()
456 456 raise
457 457 finally:
458 458 del tr, lock, wlock
459 459 self.removeundo(repo)
460 460
461 461 def _apply(self, repo, series, list=False, update_status=True,
462 462 strict=False, patchdir=None, merge=None, all_files={}):
463 463 # TODO unify with commands.py
464 464 if not patchdir:
465 465 patchdir = self.path
466 466 err = 0
467 467 n = None
468 468 for patchname in series:
469 469 pushable, reason = self.pushable(patchname)
470 470 if not pushable:
471 471 self.explain_pushable(patchname, all_patches=True)
472 472 continue
473 473 self.ui.warn("applying %s\n" % patchname)
474 474 pf = os.path.join(patchdir, patchname)
475 475
476 476 try:
477 477 message, comments, user, date, patchfound = self.readheaders(patchname)
478 478 except:
479 479 self.ui.warn("Unable to read %s\n" % patchname)
480 480 err = 1
481 481 break
482 482
483 483 if not message:
484 484 message = "imported patch %s\n" % patchname
485 485 else:
486 486 if list:
487 487 message.append("\nimported patch %s" % patchname)
488 488 message = '\n'.join(message)
489 489
490 490 (patcherr, files, fuzz) = self.patch(repo, pf)
491 491 all_files.update(files)
492 492 patcherr = not patcherr
493 493
494 494 if merge and files:
495 495 # Mark as removed/merged and update dirstate parent info
496 496 removed = []
497 497 merged = []
498 498 for f in files:
499 499 if os.path.exists(repo.wjoin(f)):
500 500 merged.append(f)
501 501 else:
502 502 removed.append(f)
503 503 for f in removed:
504 504 repo.dirstate.remove(f)
505 505 for f in merged:
506 506 repo.dirstate.merge(f)
507 507 p1, p2 = repo.dirstate.parents()
508 508 repo.dirstate.setparents(p1, merge)
509 509 files = patch.updatedir(self.ui, repo, files)
510 510 n = repo.commit(files, message, user, date, force=1)
511 511
512 512 if n == None:
513 513 raise util.Abort(_("repo commit failed"))
514 514
515 515 if update_status:
516 516 self.applied.append(statusentry(revlog.hex(n), patchname))
517 517
518 518 if patcherr:
519 519 if not patchfound:
520 520 self.ui.warn("patch %s is empty\n" % patchname)
521 521 err = 0
522 522 else:
523 523 self.ui.warn("patch failed, rejects left in working dir\n")
524 524 err = 1
525 525 break
526 526
527 527 if fuzz and strict:
528 528 self.ui.warn("fuzz found when applying patch, stopping\n")
529 529 err = 1
530 530 break
531 531 return (err, n)
532 532
533 533 def delete(self, repo, patches, opts):
534 534 if not patches and not opts.get('rev'):
535 535 raise util.Abort(_('qdelete requires at least one revision or '
536 536 'patch name'))
537 537
538 538 realpatches = []
539 539 for patch in patches:
540 540 patch = self.lookup(patch, strict=True)
541 541 info = self.isapplied(patch)
542 542 if info:
543 543 raise util.Abort(_("cannot delete applied patch %s") % patch)
544 544 if patch not in self.series:
545 545 raise util.Abort(_("patch %s not in series file") % patch)
546 546 realpatches.append(patch)
547 547
548 548 appliedbase = 0
549 549 if opts.get('rev'):
550 550 if not self.applied:
551 551 raise util.Abort(_('no patches applied'))
552 552 revs = cmdutil.revrange(repo, opts['rev'])
553 553 if len(revs) > 1 and revs[0] > revs[1]:
554 554 revs.reverse()
555 555 for rev in revs:
556 556 if appliedbase >= len(self.applied):
557 557 raise util.Abort(_("revision %d is not managed") % rev)
558 558
559 559 base = revlog.bin(self.applied[appliedbase].rev)
560 560 node = repo.changelog.node(rev)
561 561 if node != base:
562 562 raise util.Abort(_("cannot delete revision %d above "
563 563 "applied patches") % rev)
564 564 realpatches.append(self.applied[appliedbase].name)
565 565 appliedbase += 1
566 566
567 567 if not opts.get('keep'):
568 568 r = self.qrepo()
569 569 if r:
570 570 r.remove(realpatches, True)
571 571 else:
572 572 for p in realpatches:
573 573 os.unlink(self.join(p))
574 574
575 575 if appliedbase:
576 576 del self.applied[:appliedbase]
577 577 self.applied_dirty = 1
578 578 indices = [self.find_series(p) for p in realpatches]
579 579 indices.sort()
580 580 for i in indices[-1::-1]:
581 581 del self.full_series[i]
582 582 self.parse_series()
583 583 self.series_dirty = 1
584 584
585 585 def check_toppatch(self, repo):
586 586 if len(self.applied) > 0:
587 587 top = revlog.bin(self.applied[-1].rev)
588 588 pp = repo.dirstate.parents()
589 589 if top not in pp:
590 590 raise util.Abort(_("working directory revision is not qtip"))
591 591 return top
592 592 return None
593 593 def check_localchanges(self, repo, force=False, refresh=True):
594 594 m, a, r, d = repo.status()[:4]
595 595 if m or a or r or d:
596 596 if not force:
597 597 if refresh:
598 598 raise util.Abort(_("local changes found, refresh first"))
599 599 else:
600 600 raise util.Abort(_("local changes found"))
601 601 return m, a, r, d
602 602
603 603 _reserved = ('series', 'status', 'guards')
604 604 def check_reserved_name(self, name):
605 605 if (name in self._reserved or name.startswith('.hg')
606 606 or name.startswith('.mq')):
607 607 raise util.Abort(_('"%s" cannot be used as the name of a patch')
608 608 % name)
609 609
610 610 def new(self, repo, patch, *pats, **opts):
611 611 msg = opts.get('msg')
612 612 force = opts.get('force')
613 613 user = opts.get('user')
614 614 date = opts.get('date')
615 615 self.check_reserved_name(patch)
616 616 if os.path.exists(self.join(patch)):
617 617 raise util.Abort(_('patch "%s" already exists') % patch)
618 618 if opts.get('include') or opts.get('exclude') or pats:
619 619 fns, match, anypats = cmdutil.matchpats(repo, pats, opts)
620 620 m, a, r, d = repo.status(files=fns, match=match)[:4]
621 621 else:
622 622 m, a, r, d = self.check_localchanges(repo, force)
623 623 fns, match, anypats = cmdutil.matchpats(repo, m + a + r)
624 624 commitfiles = m + a + r
625 625 self.check_toppatch(repo)
626 626 wlock = repo.wlock()
627 627 try:
628 628 insert = self.full_series_end()
629 629 commitmsg = msg and msg or ("[mq]: %s" % patch)
630 630 n = repo.commit(commitfiles, commitmsg, user, date, match=match, force=True)
631 631 if n == None:
632 632 raise util.Abort(_("repo commit failed"))
633 633 self.full_series[insert:insert] = [patch]
634 634 self.applied.append(statusentry(revlog.hex(n), patch))
635 635 self.parse_series()
636 636 self.series_dirty = 1
637 637 self.applied_dirty = 1
638 638 p = self.opener(patch, "w")
639 639 if date:
640 640 p.write("# HG changeset patch\n")
641 641 if user:
642 642 p.write("# User " + user + "\n")
643 643 p.write("# Date " + date + "\n")
644 644 p.write("\n")
645 645 elif user:
646 646 p.write("From: " + user + "\n")
647 647 p.write("\n")
648 648 if msg:
649 649 msg = msg + "\n"
650 650 p.write(msg)
651 651 p.close()
652 652 wlock = None
653 653 r = self.qrepo()
654 654 if r: r.add([patch])
655 655 if commitfiles:
656 656 self.refresh(repo, short=True, git=opts.get('git'))
657 657 self.removeundo(repo)
658 658 finally:
659 659 del wlock
660 660
661 661 def strip(self, repo, rev, update=True, backup="all"):
662 662 wlock = lock = None
663 663 try:
664 664 wlock = repo.wlock()
665 665 lock = repo.lock()
666 666
667 667 if update:
668 668 self.check_localchanges(repo, refresh=False)
669 669 urev = self.qparents(repo, rev)
670 670 hg.clean(repo, urev)
671 671 repo.dirstate.write()
672 672
673 673 self.removeundo(repo)
674 674 repair.strip(self.ui, repo, rev, backup)
675 675 # strip may have unbundled a set of backed up revisions after
676 676 # the actual strip
677 677 self.removeundo(repo)
678 678 finally:
679 679 del lock, wlock
680 680
681 681 def isapplied(self, patch):
682 682 """returns (index, rev, patch)"""
683 683 for i in xrange(len(self.applied)):
684 684 a = self.applied[i]
685 685 if a.name == patch:
686 686 return (i, a.rev, a.name)
687 687 return None
688 688
689 689 # if the exact patch name does not exist, we try a few
690 690 # variations. If strict is passed, we try only #1
691 691 #
692 692 # 1) a number to indicate an offset in the series file
693 693 # 2) a unique substring of the patch name was given
694 694 # 3) patchname[-+]num to indicate an offset in the series file
695 695 def lookup(self, patch, strict=False):
696 696 patch = patch and str(patch)
697 697
698 698 def partial_name(s):
699 699 if s in self.series:
700 700 return s
701 701 matches = [x for x in self.series if s in x]
702 702 if len(matches) > 1:
703 703 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
704 704 for m in matches:
705 705 self.ui.warn(' %s\n' % m)
706 706 return None
707 707 if matches:
708 708 return matches[0]
709 709 if len(self.series) > 0 and len(self.applied) > 0:
710 710 if s == 'qtip':
711 711 return self.series[self.series_end(True)-1]
712 712 if s == 'qbase':
713 713 return self.series[0]
714 714 return None
715 715 if patch == None:
716 716 return None
717 717
718 718 # we don't want to return a partial match until we make
719 719 # sure the file name passed in does not exist (checked below)
720 720 res = partial_name(patch)
721 721 if res and res == patch:
722 722 return res
723 723
724 724 if not os.path.isfile(self.join(patch)):
725 725 try:
726 726 sno = int(patch)
727 727 except(ValueError, OverflowError):
728 728 pass
729 729 else:
730 730 if sno < len(self.series):
731 731 return self.series[sno]
732 732 if not strict:
733 733 # return any partial match made above
734 734 if res:
735 735 return res
736 736 minus = patch.rfind('-')
737 737 if minus >= 0:
738 738 res = partial_name(patch[:minus])
739 739 if res:
740 740 i = self.series.index(res)
741 741 try:
742 742 off = int(patch[minus+1:] or 1)
743 743 except(ValueError, OverflowError):
744 744 pass
745 745 else:
746 746 if i - off >= 0:
747 747 return self.series[i - off]
748 748 plus = patch.rfind('+')
749 749 if plus >= 0:
750 750 res = partial_name(patch[:plus])
751 751 if res:
752 752 i = self.series.index(res)
753 753 try:
754 754 off = int(patch[plus+1:] or 1)
755 755 except(ValueError, OverflowError):
756 756 pass
757 757 else:
758 758 if i + off < len(self.series):
759 759 return self.series[i + off]
760 760 raise util.Abort(_("patch %s not in series") % patch)
761 761
762 762 def push(self, repo, patch=None, force=False, list=False,
763 763 mergeq=None):
764 764 wlock = repo.wlock()
765 765 try:
766 766 patch = self.lookup(patch)
767 767 # Suppose our series file is: A B C and the current 'top'
768 768 # patch is B. qpush C should be performed (moving forward)
769 769 # qpush B is a NOP (no change) qpush A is an error (can't
770 770 # go backwards with qpush)
771 771 if patch:
772 772 info = self.isapplied(patch)
773 773 if info:
774 774 if info[0] < len(self.applied) - 1:
775 775 raise util.Abort(
776 776 _("cannot push to a previous patch: %s") % patch)
777 777 if info[0] < len(self.series) - 1:
778 778 self.ui.warn(
779 779 _('qpush: %s is already at the top\n') % patch)
780 780 else:
781 781 self.ui.warn(_('all patches are currently applied\n'))
782 782 return
783 783
784 784 # Following the above example, starting at 'top' of B:
785 785 # qpush should be performed (pushes C), but a subsequent
786 786 # qpush without an argument is an error (nothing to
787 787 # apply). This allows a loop of "...while hg qpush..." to
788 788 # work as it detects an error when done
789 789 if self.series_end() == len(self.series):
790 790 self.ui.warn(_('patch series already fully applied\n'))
791 791 return 1
792 792 if not force:
793 793 self.check_localchanges(repo)
794 794
795 795 self.applied_dirty = 1;
796 796 start = self.series_end()
797 797 if start > 0:
798 798 self.check_toppatch(repo)
799 799 if not patch:
800 800 patch = self.series[start]
801 801 end = start + 1
802 802 else:
803 803 end = self.series.index(patch, start) + 1
804 804 s = self.series[start:end]
805 805 all_files = {}
806 806 try:
807 807 if mergeq:
808 808 ret = self.mergepatch(repo, mergeq, s)
809 809 else:
810 810 ret = self.apply(repo, s, list, all_files=all_files)
811 811 except:
812 812 self.ui.warn(_('cleaning up working directory...'))
813 813 node = repo.dirstate.parents()[0]
814 814 hg.revert(repo, node, None)
815 815 unknown = repo.status()[4]
816 816 # only remove unknown files that we know we touched or
817 817 # created while patching
818 818 for f in unknown:
819 819 if f in all_files:
820 820 util.unlink(repo.wjoin(f))
821 821 self.ui.warn(_('done\n'))
822 822 raise
823 823 top = self.applied[-1].name
824 824 if ret[0]:
825 825 self.ui.write(
826 826 "Errors during apply, please fix and refresh %s\n" % top)
827 827 else:
828 828 self.ui.write("Now at: %s\n" % top)
829 829 return ret[0]
830 830 finally:
831 831 del wlock
832 832
833 833 def pop(self, repo, patch=None, force=False, update=True, all=False):
834 834 def getfile(f, rev, flags):
835 835 t = repo.file(f).read(rev)
836 836 repo.wwrite(f, t, flags)
837 837
838 838 wlock = repo.wlock()
839 839 try:
840 840 if patch:
841 841 # index, rev, patch
842 842 info = self.isapplied(patch)
843 843 if not info:
844 844 patch = self.lookup(patch)
845 845 info = self.isapplied(patch)
846 846 if not info:
847 847 raise util.Abort(_("patch %s is not applied") % patch)
848 848
849 849 if len(self.applied) == 0:
850 850 # Allow qpop -a to work repeatedly,
851 851 # but not qpop without an argument
852 852 self.ui.warn(_("no patches applied\n"))
853 853 return not all
854 854
855 855 if not update:
856 856 parents = repo.dirstate.parents()
857 857 rr = [ revlog.bin(x.rev) for x in self.applied ]
858 858 for p in parents:
859 859 if p in rr:
860 860 self.ui.warn("qpop: forcing dirstate update\n")
861 861 update = True
862 862
863 863 if not force and update:
864 864 self.check_localchanges(repo)
865 865
866 866 self.applied_dirty = 1;
867 867 end = len(self.applied)
868 868 if not patch:
869 869 if all:
870 870 popi = 0
871 871 else:
872 872 popi = len(self.applied) - 1
873 873 else:
874 874 popi = info[0] + 1
875 875 if popi >= end:
876 876 self.ui.warn("qpop: %s is already at the top\n" % patch)
877 877 return
878 878 info = [ popi ] + [self.applied[popi].rev, self.applied[popi].name]
879 879
880 880 start = info[0]
881 881 rev = revlog.bin(info[1])
882 882
883 883 if update:
884 884 top = self.check_toppatch(repo)
885 885
886 886 if repo.changelog.heads(rev) != [revlog.bin(self.applied[-1].rev)]:
887 887 raise util.Abort("popping would remove a revision not "
888 888 "managed by this patch queue")
889 889
890 890 # we know there are no local changes, so we can make a simplified
891 891 # form of hg.update.
892 892 if update:
893 893 qp = self.qparents(repo, rev)
894 894 changes = repo.changelog.read(qp)
895 895 mmap = repo.manifest.read(changes[0])
896 896 m, a, r, d, u = repo.status(qp, top)[:5]
897 897 if d:
898 898 raise util.Abort("deletions found between repo revs")
899 899 for f in m:
900 900 getfile(f, mmap[f], mmap.flags(f))
901 901 for f in r:
902 902 getfile(f, mmap[f], mmap.flags(f))
903 903 for f in m + r:
904 904 repo.dirstate.normal(f)
905 905 for f in a:
906 906 try:
907 907 os.unlink(repo.wjoin(f))
908 908 except OSError, e:
909 909 if e.errno != errno.ENOENT:
910 910 raise
911 911 try: os.removedirs(os.path.dirname(repo.wjoin(f)))
912 912 except: pass
913 913 repo.dirstate.forget(f)
914 914 repo.dirstate.setparents(qp, revlog.nullid)
915 915 del self.applied[start:end]
916 916 self.strip(repo, rev, update=False, backup='strip')
917 917 if len(self.applied):
918 918 self.ui.write("Now at: %s\n" % self.applied[-1].name)
919 919 else:
920 920 self.ui.write("Patch queue now empty\n")
921 921 finally:
922 922 del wlock
923 923
924 924 def diff(self, repo, pats, opts):
925 925 top = self.check_toppatch(repo)
926 926 if not top:
927 927 self.ui.write("No patches applied\n")
928 928 return
929 929 qp = self.qparents(repo, top)
930 930 if opts.get('git'):
931 931 self.diffopts().git = True
932 932 self.printdiff(repo, qp, files=pats, opts=opts)
933 933
934 934 def refresh(self, repo, pats=None, **opts):
935 935 if len(self.applied) == 0:
936 936 self.ui.write("No patches applied\n")
937 937 return 1
938 938 wlock = repo.wlock()
939 939 try:
940 940 self.check_toppatch(repo)
941 941 (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
942 942 top = revlog.bin(top)
943 943 if repo.changelog.heads(top) != [top]:
944 944 raise util.Abort("cannot refresh a revision with children")
945 945 cparents = repo.changelog.parents(top)
946 946 patchparent = self.qparents(repo, top)
947 947 message, comments, user, date, patchfound = self.readheaders(patchfn)
948 948
949 949 patchf = self.opener(patchfn, 'r+')
950 950
951 951 # if the patch was a git patch, refresh it as a git patch
952 952 for line in patchf:
953 953 if line.startswith('diff --git'):
954 954 self.diffopts().git = True
955 955 break
956 956
957 957 msg = opts.get('msg', '').rstrip()
958 958 if msg and comments:
959 959 # Remove existing message, keeping the rest of the comments
960 960 # fields.
961 961 # If comments contains 'subject: ', message will prepend
962 962 # the field and a blank line.
963 963 if message:
964 964 subj = 'subject: ' + message[0].lower()
965 965 for i in xrange(len(comments)):
966 966 if subj == comments[i].lower():
967 967 del comments[i]
968 968 message = message[2:]
969 969 break
970 970 ci = 0
971 971 for mi in xrange(len(message)):
972 972 while message[mi] != comments[ci]:
973 973 ci += 1
974 974 del comments[ci]
975 975
976 976 def setheaderfield(comments, prefixes, new):
977 977 # Update all references to a field in the patch header.
978 978 # If none found, add it email style.
979 979 res = False
980 980 for prefix in prefixes:
981 981 for i in xrange(len(comments)):
982 982 if comments[i].startswith(prefix):
983 983 comments[i] = prefix + new
984 984 res = True
985 985 break
986 986 return res
987 987
988 988 newuser = opts.get('user')
989 989 if newuser:
990 990 if not setheaderfield(comments, ['From: ', '# User '], newuser):
991 991 try:
992 992 patchheaderat = comments.index('# HG changeset patch')
993 993 comments.insert(patchheaderat + 1,'# User ' + newuser)
994 994 except ValueError:
995 995 comments = ['From: ' + newuser, ''] + comments
996 996 user = newuser
997 997
998 998 newdate = opts.get('date')
999 999 if newdate:
1000 1000 if setheaderfield(comments, ['# Date '], newdate):
1001 1001 date = newdate
1002 1002
1003 1003 if msg:
1004 1004 comments.append(msg)
1005 1005
1006 1006 patchf.seek(0)
1007 1007 patchf.truncate()
1008 1008
1009 1009 if comments:
1010 1010 comments = "\n".join(comments) + '\n\n'
1011 1011 patchf.write(comments)
1012 1012
1013 1013 if opts.get('git'):
1014 1014 self.diffopts().git = True
1015 1015 fns, matchfn, anypats = cmdutil.matchpats(repo, pats, opts)
1016 1016 tip = repo.changelog.tip()
1017 1017 if top == tip:
1018 1018 # if the top of our patch queue is also the tip, there is an
1019 1019 # optimization here. We update the dirstate in place and strip
1020 1020 # off the tip commit. Then just commit the current directory
1021 1021 # tree. We can also send repo.commit the list of files
1022 1022 # changed to speed up the diff
1023 1023 #
1024 1024 # in short mode, we only diff the files included in the
1025 1025 # patch already
1026 1026 #
1027 1027 # this should really read:
1028 1028 # mm, dd, aa, aa2, uu = repo.status(tip, patchparent)[:5]
1029 1029 # but we do it backwards to take advantage of manifest/chlog
1030 1030 # caching against the next repo.status call
1031 1031 #
1032 1032 mm, aa, dd, aa2, uu = repo.status(patchparent, tip)[:5]
1033 1033 changes = repo.changelog.read(tip)
1034 1034 man = repo.manifest.read(changes[0])
1035 1035 aaa = aa[:]
1036 1036 if opts.get('short'):
1037 1037 filelist = mm + aa + dd
1038 1038 match = dict.fromkeys(filelist).__contains__
1039 1039 else:
1040 1040 filelist = None
1041 1041 match = util.always
1042 1042 m, a, r, d, u = repo.status(files=filelist, match=match)[:5]
1043 1043
1044 1044 # we might end up with files that were added between
1045 1045 # tip and the dirstate parent, but then changed in the
1046 1046 # local dirstate. in this case, we want them to only
1047 1047 # show up in the added section
1048 1048 for x in m:
1049 1049 if x not in aa:
1050 1050 mm.append(x)
1051 1051 # we might end up with files added by the local dirstate that
1052 1052 # were deleted by the patch. In this case, they should only
1053 1053 # show up in the changed section.
1054 1054 for x in a:
1055 1055 if x in dd:
1056 1056 del dd[dd.index(x)]
1057 1057 mm.append(x)
1058 1058 else:
1059 1059 aa.append(x)
1060 1060 # make sure any files deleted in the local dirstate
1061 1061 # are not in the add or change column of the patch
1062 1062 forget = []
1063 1063 for x in d + r:
1064 1064 if x in aa:
1065 1065 del aa[aa.index(x)]
1066 1066 forget.append(x)
1067 1067 continue
1068 1068 elif x in mm:
1069 1069 del mm[mm.index(x)]
1070 1070 dd.append(x)
1071 1071
1072 1072 m = util.unique(mm)
1073 1073 r = util.unique(dd)
1074 1074 a = util.unique(aa)
1075 1075 c = [filter(matchfn, l) for l in (m, a, r, [], u)]
1076 1076 filelist = util.unique(c[0] + c[1] + c[2])
1077 1077 patch.diff(repo, patchparent, files=filelist, match=matchfn,
1078 1078 fp=patchf, changes=c, opts=self.diffopts())
1079 1079 patchf.close()
1080 1080
1081 1081 repo.dirstate.setparents(*cparents)
1082 1082 copies = {}
1083 1083 for dst in a:
1084 1084 src = repo.dirstate.copied(dst)
1085 1085 if src is not None:
1086 1086 copies.setdefault(src, []).append(dst)
1087 1087 repo.dirstate.add(dst)
1088 1088 # remember the copies between patchparent and tip
1089 1089 # this may be slow, so don't do it if we're not tracking copies
1090 1090 if self.diffopts().git:
1091 1091 for dst in aaa:
1092 1092 f = repo.file(dst)
1093 1093 src = f.renamed(man[dst])
1094 1094 if src:
1095 1095 copies[src[0]] = copies.get(dst, [])
1096 1096 if dst in a:
1097 1097 copies[src[0]].append(dst)
1098 1098 # we can't copy a file created by the patch itself
1099 1099 if dst in copies:
1100 1100 del copies[dst]
1101 1101 for src, dsts in copies.iteritems():
1102 1102 for dst in dsts:
1103 1103 repo.dirstate.copy(src, dst)
1104 1104 for f in r:
1105 1105 repo.dirstate.remove(f)
1106 1106 # if the patch excludes a modified file, mark that
1107 1107 # file with mtime=0 so status can see it.
1108 1108 mm = []
1109 1109 for i in xrange(len(m)-1, -1, -1):
1110 1110 if not matchfn(m[i]):
1111 1111 mm.append(m[i])
1112 1112 del m[i]
1113 1113 for f in m:
1114 1114 repo.dirstate.normal(f)
1115 1115 for f in mm:
1116 1116 repo.dirstate.normallookup(f)
1117 1117 for f in forget:
1118 1118 repo.dirstate.forget(f)
1119 1119
1120 1120 if not msg:
1121 1121 if not message:
1122 1122 message = "[mq]: %s\n" % patchfn
1123 1123 else:
1124 1124 message = "\n".join(message)
1125 1125 else:
1126 1126 message = msg
1127 1127
1128 1128 if not user:
1129 1129 user = changes[1]
1130 1130
1131 1131 self.applied.pop()
1132 1132 self.applied_dirty = 1
1133 1133 self.strip(repo, top, update=False,
1134 1134 backup='strip')
1135 1135 n = repo.commit(filelist, message, user, date, match=matchfn,
1136 1136 force=1)
1137 1137 self.applied.append(statusentry(revlog.hex(n), patchfn))
1138 1138 self.removeundo(repo)
1139 1139 else:
1140 1140 self.printdiff(repo, patchparent, fp=patchf)
1141 1141 patchf.close()
1142 1142 added = repo.status()[1]
1143 1143 for a in added:
1144 1144 f = repo.wjoin(a)
1145 1145 try:
1146 1146 os.unlink(f)
1147 1147 except OSError, e:
1148 1148 if e.errno != errno.ENOENT:
1149 1149 raise
1150 1150 try: os.removedirs(os.path.dirname(f))
1151 1151 except: pass
1152 1152 # forget the file copies in the dirstate
1153 1153 # push should readd the files later on
1154 1154 repo.dirstate.forget(a)
1155 1155 self.pop(repo, force=True)
1156 1156 self.push(repo, force=True)
1157 1157 finally:
1158 1158 del wlock
1159 1159
1160 1160 def init(self, repo, create=False):
1161 1161 if not create and os.path.isdir(self.path):
1162 1162 raise util.Abort(_("patch queue directory already exists"))
1163 1163 try:
1164 1164 os.mkdir(self.path)
1165 1165 except OSError, inst:
1166 1166 if inst.errno != errno.EEXIST or not create:
1167 1167 raise
1168 1168 if create:
1169 1169 return self.qrepo(create=True)
1170 1170
1171 1171 def unapplied(self, repo, patch=None):
1172 1172 if patch and patch not in self.series:
1173 1173 raise util.Abort(_("patch %s is not in series file") % patch)
1174 1174 if not patch:
1175 1175 start = self.series_end()
1176 1176 else:
1177 1177 start = self.series.index(patch) + 1
1178 1178 unapplied = []
1179 1179 for i in xrange(start, len(self.series)):
1180 1180 pushable, reason = self.pushable(i)
1181 1181 if pushable:
1182 1182 unapplied.append((i, self.series[i]))
1183 1183 self.explain_pushable(i)
1184 1184 return unapplied
1185 1185
1186 1186 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1187 1187 summary=False):
1188 1188 def displayname(patchname):
1189 1189 if summary:
1190 1190 msg = self.readheaders(patchname)[0]
1191 1191 msg = msg and ': ' + msg[0] or ': '
1192 1192 else:
1193 1193 msg = ''
1194 1194 return '%s%s' % (patchname, msg)
1195 1195
1196 1196 applied = dict.fromkeys([p.name for p in self.applied])
1197 1197 if length is None:
1198 1198 length = len(self.series) - start
1199 1199 if not missing:
1200 1200 for i in xrange(start, start+length):
1201 1201 patch = self.series[i]
1202 1202 if patch in applied:
1203 1203 stat = 'A'
1204 1204 elif self.pushable(i)[0]:
1205 1205 stat = 'U'
1206 1206 else:
1207 1207 stat = 'G'
1208 1208 pfx = ''
1209 1209 if self.ui.verbose:
1210 1210 pfx = '%d %s ' % (i, stat)
1211 1211 elif status and status != stat:
1212 1212 continue
1213 1213 self.ui.write('%s%s\n' % (pfx, displayname(patch)))
1214 1214 else:
1215 1215 msng_list = []
1216 1216 for root, dirs, files in os.walk(self.path):
1217 1217 d = root[len(self.path) + 1:]
1218 1218 for f in files:
1219 1219 fl = os.path.join(d, f)
1220 1220 if (fl not in self.series and
1221 1221 fl not in (self.status_path, self.series_path,
1222 1222 self.guards_path)
1223 1223 and not fl.startswith('.')):
1224 1224 msng_list.append(fl)
1225 1225 msng_list.sort()
1226 1226 for x in msng_list:
1227 1227 pfx = self.ui.verbose and ('D ') or ''
1228 1228 self.ui.write("%s%s\n" % (pfx, displayname(x)))
1229 1229
1230 1230 def issaveline(self, l):
1231 1231 if l.name == '.hg.patches.save.line':
1232 1232 return True
1233 1233
1234 1234 def qrepo(self, create=False):
1235 1235 if create or os.path.isdir(self.join(".hg")):
1236 1236 return hg.repository(self.ui, path=self.path, create=create)
1237 1237
1238 1238 def restore(self, repo, rev, delete=None, qupdate=None):
1239 1239 c = repo.changelog.read(rev)
1240 1240 desc = c[4].strip()
1241 1241 lines = desc.splitlines()
1242 1242 i = 0
1243 1243 datastart = None
1244 1244 series = []
1245 1245 applied = []
1246 1246 qpp = None
1247 1247 for i in xrange(0, len(lines)):
1248 1248 if lines[i] == 'Patch Data:':
1249 1249 datastart = i + 1
1250 1250 elif lines[i].startswith('Dirstate:'):
1251 1251 l = lines[i].rstrip()
1252 1252 l = l[10:].split(' ')
1253 1253 qpp = [ hg.bin(x) for x in l ]
1254 1254 elif datastart != None:
1255 1255 l = lines[i].rstrip()
1256 1256 se = statusentry(l)
1257 1257 file_ = se.name
1258 1258 if se.rev:
1259 1259 applied.append(se)
1260 1260 else:
1261 1261 series.append(file_)
1262 1262 if datastart == None:
1263 1263 self.ui.warn("No saved patch data found\n")
1264 1264 return 1
1265 1265 self.ui.warn("restoring status: %s\n" % lines[0])
1266 1266 self.full_series = series
1267 1267 self.applied = applied
1268 1268 self.parse_series()
1269 1269 self.series_dirty = 1
1270 1270 self.applied_dirty = 1
1271 1271 heads = repo.changelog.heads()
1272 1272 if delete:
1273 1273 if rev not in heads:
1274 1274 self.ui.warn("save entry has children, leaving it alone\n")
1275 1275 else:
1276 1276 self.ui.warn("removing save entry %s\n" % hg.short(rev))
1277 1277 pp = repo.dirstate.parents()
1278 1278 if rev in pp:
1279 1279 update = True
1280 1280 else:
1281 1281 update = False
1282 1282 self.strip(repo, rev, update=update, backup='strip')
1283 1283 if qpp:
1284 1284 self.ui.warn("saved queue repository parents: %s %s\n" %
1285 1285 (hg.short(qpp[0]), hg.short(qpp[1])))
1286 1286 if qupdate:
1287 1287 self.ui.status(_("queue directory updating\n"))
1288 1288 r = self.qrepo()
1289 1289 if not r:
1290 1290 self.ui.warn("Unable to load queue repository\n")
1291 1291 return 1
1292 1292 hg.clean(r, qpp[0])
1293 1293
1294 1294 def save(self, repo, msg=None):
1295 1295 if len(self.applied) == 0:
1296 1296 self.ui.warn("save: no patches applied, exiting\n")
1297 1297 return 1
1298 1298 if self.issaveline(self.applied[-1]):
1299 1299 self.ui.warn("status is already saved\n")
1300 1300 return 1
1301 1301
1302 1302 ar = [ ':' + x for x in self.full_series ]
1303 1303 if not msg:
1304 1304 msg = "hg patches saved state"
1305 1305 else:
1306 1306 msg = "hg patches: " + msg.rstrip('\r\n')
1307 1307 r = self.qrepo()
1308 1308 if r:
1309 1309 pp = r.dirstate.parents()
1310 1310 msg += "\nDirstate: %s %s" % (hg.hex(pp[0]), hg.hex(pp[1]))
1311 1311 msg += "\n\nPatch Data:\n"
1312 1312 text = msg + "\n".join([str(x) for x in self.applied]) + '\n' + (ar and
1313 1313 "\n".join(ar) + '\n' or "")
1314 1314 n = repo.commit(None, text, user=None, force=1)
1315 1315 if not n:
1316 1316 self.ui.warn("repo commit failed\n")
1317 1317 return 1
1318 1318 self.applied.append(statusentry(revlog.hex(n),'.hg.patches.save.line'))
1319 1319 self.applied_dirty = 1
1320 1320 self.removeundo(repo)
1321 1321
1322 1322 def full_series_end(self):
1323 1323 if len(self.applied) > 0:
1324 1324 p = self.applied[-1].name
1325 1325 end = self.find_series(p)
1326 1326 if end == None:
1327 1327 return len(self.full_series)
1328 1328 return end + 1
1329 1329 return 0
1330 1330
1331 1331 def series_end(self, all_patches=False):
1332 1332 """If all_patches is False, return the index of the next pushable patch
1333 1333 in the series, or the series length. If all_patches is True, return the
1334 1334 index of the first patch past the last applied one.
1335 1335 """
1336 1336 end = 0
1337 1337 def next(start):
1338 1338 if all_patches:
1339 1339 return start
1340 1340 i = start
1341 1341 while i < len(self.series):
1342 1342 p, reason = self.pushable(i)
1343 1343 if p:
1344 1344 break
1345 1345 self.explain_pushable(i)
1346 1346 i += 1
1347 1347 return i
1348 1348 if len(self.applied) > 0:
1349 1349 p = self.applied[-1].name
1350 1350 try:
1351 1351 end = self.series.index(p)
1352 1352 except ValueError:
1353 1353 return 0
1354 1354 return next(end + 1)
1355 1355 return next(end)
1356 1356
1357 1357 def appliedname(self, index):
1358 1358 pname = self.applied[index].name
1359 1359 if not self.ui.verbose:
1360 1360 p = pname
1361 1361 else:
1362 1362 p = str(self.series.index(pname)) + " " + pname
1363 1363 return p
1364 1364
1365 1365 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1366 1366 force=None, git=False):
1367 1367 def checkseries(patchname):
1368 1368 if patchname in self.series:
1369 1369 raise util.Abort(_('patch %s is already in the series file')
1370 1370 % patchname)
1371 1371 def checkfile(patchname):
1372 1372 if not force and os.path.exists(self.join(patchname)):
1373 1373 raise util.Abort(_('patch "%s" already exists')
1374 1374 % patchname)
1375 1375
1376 1376 if rev:
1377 1377 if files:
1378 1378 raise util.Abort(_('option "-r" not valid when importing '
1379 1379 'files'))
1380 1380 rev = cmdutil.revrange(repo, rev)
1381 1381 rev.sort(lambda x, y: cmp(y, x))
1382 1382 if (len(files) > 1 or len(rev) > 1) and patchname:
1383 1383 raise util.Abort(_('option "-n" not valid when importing multiple '
1384 1384 'patches'))
1385 1385 i = 0
1386 1386 added = []
1387 1387 if rev:
1388 1388 # If mq patches are applied, we can only import revisions
1389 1389 # that form a linear path to qbase.
1390 1390 # Otherwise, they should form a linear path to a head.
1391 1391 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1392 1392 if len(heads) > 1:
1393 1393 raise util.Abort(_('revision %d is the root of more than one '
1394 1394 'branch') % rev[-1])
1395 1395 if self.applied:
1396 1396 base = revlog.hex(repo.changelog.node(rev[0]))
1397 1397 if base in [n.rev for n in self.applied]:
1398 1398 raise util.Abort(_('revision %d is already managed')
1399 1399 % rev[0])
1400 1400 if heads != [revlog.bin(self.applied[-1].rev)]:
1401 1401 raise util.Abort(_('revision %d is not the parent of '
1402 1402 'the queue') % rev[0])
1403 1403 base = repo.changelog.rev(revlog.bin(self.applied[0].rev))
1404 1404 lastparent = repo.changelog.parentrevs(base)[0]
1405 1405 else:
1406 1406 if heads != [repo.changelog.node(rev[0])]:
1407 1407 raise util.Abort(_('revision %d has unmanaged children')
1408 1408 % rev[0])
1409 1409 lastparent = None
1410 1410
1411 1411 if git:
1412 1412 self.diffopts().git = True
1413 1413
1414 1414 for r in rev:
1415 1415 p1, p2 = repo.changelog.parentrevs(r)
1416 1416 n = repo.changelog.node(r)
1417 1417 if p2 != revlog.nullrev:
1418 1418 raise util.Abort(_('cannot import merge revision %d') % r)
1419 1419 if lastparent and lastparent != r:
1420 1420 raise util.Abort(_('revision %d is not the parent of %d')
1421 1421 % (r, lastparent))
1422 1422 lastparent = p1
1423 1423
1424 1424 if not patchname:
1425 1425 patchname = normname('%d.diff' % r)
1426 1426 self.check_reserved_name(patchname)
1427 1427 checkseries(patchname)
1428 1428 checkfile(patchname)
1429 1429 self.full_series.insert(0, patchname)
1430 1430
1431 1431 patchf = self.opener(patchname, "w")
1432 1432 patch.export(repo, [n], fp=patchf, opts=self.diffopts())
1433 1433 patchf.close()
1434 1434
1435 1435 se = statusentry(revlog.hex(n), patchname)
1436 1436 self.applied.insert(0, se)
1437 1437
1438 1438 added.append(patchname)
1439 1439 patchname = None
1440 1440 self.parse_series()
1441 1441 self.applied_dirty = 1
1442 1442
1443 1443 for filename in files:
1444 1444 if existing:
1445 1445 if filename == '-':
1446 1446 raise util.Abort(_('-e is incompatible with import from -'))
1447 1447 if not patchname:
1448 1448 patchname = normname(filename)
1449 1449 self.check_reserved_name(patchname)
1450 1450 if not os.path.isfile(self.join(patchname)):
1451 1451 raise util.Abort(_("patch %s does not exist") % patchname)
1452 1452 else:
1453 1453 try:
1454 1454 if filename == '-':
1455 1455 if not patchname:
1456 1456 raise util.Abort(_('need --name to import a patch from -'))
1457 1457 text = sys.stdin.read()
1458 1458 else:
1459 1459 text = file(filename, 'rb').read()
1460 1460 except IOError:
1461 1461 raise util.Abort(_("unable to read %s") % patchname)
1462 1462 if not patchname:
1463 1463 patchname = normname(os.path.basename(filename))
1464 1464 self.check_reserved_name(patchname)
1465 1465 checkfile(patchname)
1466 1466 patchf = self.opener(patchname, "w")
1467 1467 patchf.write(text)
1468 1468 checkseries(patchname)
1469 1469 index = self.full_series_end() + i
1470 1470 self.full_series[index:index] = [patchname]
1471 1471 self.parse_series()
1472 1472 self.ui.warn("adding %s to series file\n" % patchname)
1473 1473 i += 1
1474 1474 added.append(patchname)
1475 1475 patchname = None
1476 1476 self.series_dirty = 1
1477 1477 qrepo = self.qrepo()
1478 1478 if qrepo:
1479 1479 qrepo.add(added)
1480 1480
1481 1481 def delete(ui, repo, *patches, **opts):
1482 1482 """remove patches from queue
1483 1483
1484 1484 The patches must not be applied, unless they are arguments to
1485 1485 the --rev parameter. At least one patch or revision is required.
1486 1486
1487 1487 With --rev, mq will stop managing the named revisions (converting
1488 1488 them to regular mercurial changesets). The patches must be applied
1489 1489 and at the base of the stack. This option is useful when the patches
1490 1490 have been applied upstream.
1491 1491
1492 1492 With --keep, the patch files are preserved in the patch directory."""
1493 1493 q = repo.mq
1494 1494 q.delete(repo, patches, opts)
1495 1495 q.save_dirty()
1496 1496 return 0
1497 1497
1498 1498 def applied(ui, repo, patch=None, **opts):
1499 1499 """print the patches already applied"""
1500 1500 q = repo.mq
1501 1501 if patch:
1502 1502 if patch not in q.series:
1503 1503 raise util.Abort(_("patch %s is not in series file") % patch)
1504 1504 end = q.series.index(patch) + 1
1505 1505 else:
1506 1506 end = q.series_end(True)
1507 1507 return q.qseries(repo, length=end, status='A', summary=opts.get('summary'))
1508 1508
1509 1509 def unapplied(ui, repo, patch=None, **opts):
1510 1510 """print the patches not yet applied"""
1511 1511 q = repo.mq
1512 1512 if patch:
1513 1513 if patch not in q.series:
1514 1514 raise util.Abort(_("patch %s is not in series file") % patch)
1515 1515 start = q.series.index(patch) + 1
1516 1516 else:
1517 1517 start = q.series_end(True)
1518 1518 q.qseries(repo, start=start, status='U', summary=opts.get('summary'))
1519 1519
1520 1520 def qimport(ui, repo, *filename, **opts):
1521 1521 """import a patch
1522 1522
1523 1523 The patch will have the same name as its source file unless you
1524 1524 give it a new one with --name.
1525 1525
1526 1526 You can register an existing patch inside the patch directory
1527 1527 with the --existing flag.
1528 1528
1529 1529 With --force, an existing patch of the same name will be overwritten.
1530 1530
1531 1531 An existing changeset may be placed under mq control with --rev
1532 1532 (e.g. qimport --rev tip -n patch will place tip under mq control).
1533 1533 With --git, patches imported with --rev will use the git diff
1534 1534 format.
1535 1535 """
1536 1536 q = repo.mq
1537 1537 q.qimport(repo, filename, patchname=opts['name'],
1538 1538 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1539 1539 git=opts['git'])
1540 1540 q.save_dirty()
1541 1541 return 0
1542 1542
1543 1543 def init(ui, repo, **opts):
1544 1544 """init a new queue repository
1545 1545
1546 1546 The queue repository is unversioned by default. If -c is
1547 1547 specified, qinit will create a separate nested repository
1548 1548 for patches (qinit -c may also be run later to convert
1549 1549 an unversioned patch repository into a versioned one).
1550 1550 You can use qcommit to commit changes to this queue repository."""
1551 1551 q = repo.mq
1552 1552 r = q.init(repo, create=opts['create_repo'])
1553 1553 q.save_dirty()
1554 1554 if r:
1555 1555 if not os.path.exists(r.wjoin('.hgignore')):
1556 1556 fp = r.wopener('.hgignore', 'w')
1557 1557 fp.write('^\\.hg\n')
1558 1558 fp.write('^\\.mq\n')
1559 1559 fp.write('syntax: glob\n')
1560 1560 fp.write('status\n')
1561 1561 fp.write('guards\n')
1562 1562 fp.close()
1563 1563 if not os.path.exists(r.wjoin('series')):
1564 1564 r.wopener('series', 'w').close()
1565 1565 r.add(['.hgignore', 'series'])
1566 1566 commands.add(ui, r)
1567 1567 return 0
1568 1568
1569 1569 def clone(ui, source, dest=None, **opts):
1570 1570 '''clone main and patch repository at same time
1571 1571
1572 1572 If source is local, destination will have no patches applied. If
1573 1573 source is remote, this command can not check if patches are
1574 1574 applied in source, so cannot guarantee that patches are not
1575 1575 applied in destination. If you clone remote repository, be sure
1576 1576 before that it has no patches applied.
1577 1577
1578 1578 Source patch repository is looked for in <src>/.hg/patches by
1579 1579 default. Use -p <url> to change.
1580 1580
1581 1581 The patch directory must be a nested mercurial repository, as
1582 1582 would be created by qinit -c.
1583 1583 '''
1584 1584 def patchdir(repo):
1585 1585 url = repo.url()
1586 1586 if url.endswith('/'):
1587 1587 url = url[:-1]
1588 1588 return url + '/.hg/patches'
1589 1589 cmdutil.setremoteconfig(ui, opts)
1590 1590 if dest is None:
1591 1591 dest = hg.defaultdest(source)
1592 1592 sr = hg.repository(ui, ui.expandpath(source))
1593 1593 patchespath = opts['patches'] or patchdir(sr)
1594 1594 try:
1595 1595 pr = hg.repository(ui, patchespath)
1596 1596 except hg.RepoError:
1597 1597 raise util.Abort(_('versioned patch repository not found'
1598 1598 ' (see qinit -c)'))
1599 1599 qbase, destrev = None, None
1600 1600 if sr.local():
1601 1601 if sr.mq.applied:
1602 1602 qbase = revlog.bin(sr.mq.applied[0].rev)
1603 1603 if not hg.islocal(dest):
1604 1604 heads = dict.fromkeys(sr.heads())
1605 1605 for h in sr.heads(qbase):
1606 1606 del heads[h]
1607 1607 destrev = heads.keys()
1608 1608 destrev.append(sr.changelog.parents(qbase)[0])
1609 1609 ui.note(_('cloning main repo\n'))
1610 1610 sr, dr = hg.clone(ui, sr.url(), dest,
1611 1611 pull=opts['pull'],
1612 1612 rev=destrev,
1613 1613 update=False,
1614 1614 stream=opts['uncompressed'])
1615 1615 ui.note(_('cloning patch repo\n'))
1616 1616 spr, dpr = hg.clone(ui, opts['patches'] or patchdir(sr), patchdir(dr),
1617 1617 pull=opts['pull'], update=not opts['noupdate'],
1618 1618 stream=opts['uncompressed'])
1619 1619 if dr.local():
1620 1620 if qbase:
1621 1621 ui.note(_('stripping applied patches from destination repo\n'))
1622 1622 dr.mq.strip(dr, qbase, update=False, backup=None)
1623 1623 if not opts['noupdate']:
1624 1624 ui.note(_('updating destination repo\n'))
1625 1625 hg.update(dr, dr.changelog.tip())
1626 1626
1627 1627 def commit(ui, repo, *pats, **opts):
1628 1628 """commit changes in the queue repository"""
1629 1629 q = repo.mq
1630 1630 r = q.qrepo()
1631 1631 if not r: raise util.Abort('no queue repository')
1632 1632 commands.commit(r.ui, r, *pats, **opts)
1633 1633
1634 1634 def series(ui, repo, **opts):
1635 1635 """print the entire series file"""
1636 1636 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1637 1637 return 0
1638 1638
1639 1639 def top(ui, repo, **opts):
1640 1640 """print the name of the current patch"""
1641 1641 q = repo.mq
1642 1642 t = q.applied and q.series_end(True) or 0
1643 1643 if t:
1644 1644 return q.qseries(repo, start=t-1, length=1, status='A',
1645 1645 summary=opts.get('summary'))
1646 1646 else:
1647 1647 ui.write("No patches applied\n")
1648 1648 return 1
1649 1649
1650 1650 def next(ui, repo, **opts):
1651 1651 """print the name of the next patch"""
1652 1652 q = repo.mq
1653 1653 end = q.series_end()
1654 1654 if end == len(q.series):
1655 1655 ui.write("All patches applied\n")
1656 1656 return 1
1657 1657 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1658 1658
1659 1659 def prev(ui, repo, **opts):
1660 1660 """print the name of the previous patch"""
1661 1661 q = repo.mq
1662 1662 l = len(q.applied)
1663 1663 if l == 1:
1664 1664 ui.write("Only one patch applied\n")
1665 1665 return 1
1666 1666 if not l:
1667 1667 ui.write("No patches applied\n")
1668 1668 return 1
1669 1669 return q.qseries(repo, start=l-2, length=1, status='A',
1670 1670 summary=opts.get('summary'))
1671 1671
1672 1672 def setupheaderopts(ui, opts):
1673 1673 def do(opt,val):
1674 1674 if not opts[opt] and opts['current' + opt]:
1675 1675 opts[opt] = val
1676 1676 do('user', ui.username())
1677 1677 do('date', "%d %d" % util.makedate())
1678 1678
1679 1679 def new(ui, repo, patch, *args, **opts):
1680 1680 """create a new patch
1681 1681
1682 1682 qnew creates a new patch on top of the currently-applied patch
1683 1683 (if any). It will refuse to run if there are any outstanding
1684 1684 changes unless -f is specified, in which case the patch will
1685 1685 be initialised with them. You may also use -I, -X, and/or a list of
1686 1686 files after the patch name to add only changes to matching files
1687 1687 to the new patch, leaving the rest as uncommitted modifications.
1688 1688
1689 1689 -e, -m or -l set the patch header as well as the commit message.
1690 1690 If none is specified, the patch header is empty and the
1691 1691 commit message is '[mq]: PATCH'"""
1692 1692 q = repo.mq
1693 1693 message = cmdutil.logmessage(opts)
1694 1694 if opts['edit']:
1695 1695 message = ui.edit(message, ui.username())
1696 1696 opts['msg'] = message
1697 1697 setupheaderopts(ui, opts)
1698 1698 q.new(repo, patch, *args, **opts)
1699 1699 q.save_dirty()
1700 1700 return 0
1701 1701
1702 1702 def refresh(ui, repo, *pats, **opts):
1703 1703 """update the current patch
1704 1704
1705 1705 If any file patterns are provided, the refreshed patch will contain only
1706 1706 the modifications that match those patterns; the remaining modifications
1707 1707 will remain in the working directory.
1708 1708
1709 1709 hg add/remove/copy/rename work as usual, though you might want to use
1710 1710 git-style patches (--git or [diff] git=1) to track copies and renames.
1711 1711 """
1712 1712 q = repo.mq
1713 1713 message = cmdutil.logmessage(opts)
1714 1714 if opts['edit']:
1715 1715 if not q.applied:
1716 1716 ui.write(_("No patches applied\n"))
1717 1717 return 1
1718 1718 if message:
1719 1719 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1720 1720 patch = q.applied[-1].name
1721 1721 (message, comment, user, date, hasdiff) = q.readheaders(patch)
1722 1722 message = ui.edit('\n'.join(message), user or ui.username())
1723 1723 setupheaderopts(ui, opts)
1724 1724 ret = q.refresh(repo, pats, msg=message, **opts)
1725 1725 q.save_dirty()
1726 1726 return ret
1727 1727
1728 1728 def diff(ui, repo, *pats, **opts):
1729 1729 """diff of the current patch"""
1730 1730 repo.mq.diff(repo, pats, opts)
1731 1731 return 0
1732 1732
1733 1733 def fold(ui, repo, *files, **opts):
1734 1734 """fold the named patches into the current patch
1735 1735
1736 1736 Patches must not yet be applied. Each patch will be successively
1737 1737 applied to the current patch in the order given. If all the
1738 1738 patches apply successfully, the current patch will be refreshed
1739 1739 with the new cumulative patch, and the folded patches will
1740 1740 be deleted. With -k/--keep, the folded patch files will not
1741 1741 be removed afterwards.
1742 1742
1743 1743 The header for each folded patch will be concatenated with
1744 1744 the current patch header, separated by a line of '* * *'."""
1745 1745
1746 1746 q = repo.mq
1747 1747
1748 1748 if not files:
1749 1749 raise util.Abort(_('qfold requires at least one patch name'))
1750 1750 if not q.check_toppatch(repo):
1751 1751 raise util.Abort(_('No patches applied'))
1752 1752
1753 1753 message = cmdutil.logmessage(opts)
1754 1754 if opts['edit']:
1755 1755 if message:
1756 1756 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1757 1757
1758 1758 parent = q.lookup('qtip')
1759 1759 patches = []
1760 1760 messages = []
1761 1761 for f in files:
1762 1762 p = q.lookup(f)
1763 1763 if p in patches or p == parent:
1764 1764 ui.warn(_('Skipping already folded patch %s') % p)
1765 1765 if q.isapplied(p):
1766 1766 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
1767 1767 patches.append(p)
1768 1768
1769 1769 for p in patches:
1770 1770 if not message:
1771 1771 messages.append(q.readheaders(p)[0])
1772 1772 pf = q.join(p)
1773 1773 (patchsuccess, files, fuzz) = q.patch(repo, pf)
1774 1774 if not patchsuccess:
1775 1775 raise util.Abort(_('Error folding patch %s') % p)
1776 1776 patch.updatedir(ui, repo, files)
1777 1777
1778 1778 if not message:
1779 1779 message, comments, user = q.readheaders(parent)[0:3]
1780 1780 for msg in messages:
1781 1781 message.append('* * *')
1782 1782 message.extend(msg)
1783 1783 message = '\n'.join(message)
1784 1784
1785 1785 if opts['edit']:
1786 1786 message = ui.edit(message, user or ui.username())
1787 1787
1788 1788 q.refresh(repo, msg=message)
1789 1789 q.delete(repo, patches, opts)
1790 1790 q.save_dirty()
1791 1791
1792 1792 def goto(ui, repo, patch, **opts):
1793 1793 '''push or pop patches until named patch is at top of stack'''
1794 1794 q = repo.mq
1795 1795 patch = q.lookup(patch)
1796 1796 if q.isapplied(patch):
1797 1797 ret = q.pop(repo, patch, force=opts['force'])
1798 1798 else:
1799 1799 ret = q.push(repo, patch, force=opts['force'])
1800 1800 q.save_dirty()
1801 1801 return ret
1802 1802
1803 1803 def guard(ui, repo, *args, **opts):
1804 1804 '''set or print guards for a patch
1805 1805
1806 1806 Guards control whether a patch can be pushed. A patch with no
1807 1807 guards is always pushed. A patch with a positive guard ("+foo") is
1808 1808 pushed only if the qselect command has activated it. A patch with
1809 1809 a negative guard ("-foo") is never pushed if the qselect command
1810 1810 has activated it.
1811 1811
1812 1812 With no arguments, print the currently active guards.
1813 1813 With arguments, set guards for the named patch.
1814 1814
1815 1815 To set a negative guard "-foo" on topmost patch ("--" is needed so
1816 1816 hg will not interpret "-foo" as an option):
1817 1817 hg qguard -- -foo
1818 1818
1819 1819 To set guards on another patch:
1820 1820 hg qguard other.patch +2.6.17 -stable
1821 1821 '''
1822 1822 def status(idx):
1823 1823 guards = q.series_guards[idx] or ['unguarded']
1824 1824 ui.write('%s: %s\n' % (q.series[idx], ' '.join(guards)))
1825 1825 q = repo.mq
1826 1826 patch = None
1827 1827 args = list(args)
1828 1828 if opts['list']:
1829 1829 if args or opts['none']:
1830 1830 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
1831 1831 for i in xrange(len(q.series)):
1832 1832 status(i)
1833 1833 return
1834 1834 if not args or args[0][0:1] in '-+':
1835 1835 if not q.applied:
1836 1836 raise util.Abort(_('no patches applied'))
1837 1837 patch = q.applied[-1].name
1838 1838 if patch is None and args[0][0:1] not in '-+':
1839 1839 patch = args.pop(0)
1840 1840 if patch is None:
1841 1841 raise util.Abort(_('no patch to work with'))
1842 1842 if args or opts['none']:
1843 1843 idx = q.find_series(patch)
1844 1844 if idx is None:
1845 1845 raise util.Abort(_('no patch named %s') % patch)
1846 1846 q.set_guards(idx, args)
1847 1847 q.save_dirty()
1848 1848 else:
1849 1849 status(q.series.index(q.lookup(patch)))
1850 1850
1851 1851 def header(ui, repo, patch=None):
1852 1852 """Print the header of the topmost or specified patch"""
1853 1853 q = repo.mq
1854 1854
1855 1855 if patch:
1856 1856 patch = q.lookup(patch)
1857 1857 else:
1858 1858 if not q.applied:
1859 1859 ui.write('No patches applied\n')
1860 1860 return 1
1861 1861 patch = q.lookup('qtip')
1862 1862 message = repo.mq.readheaders(patch)[0]
1863 1863
1864 1864 ui.write('\n'.join(message) + '\n')
1865 1865
1866 1866 def lastsavename(path):
1867 1867 (directory, base) = os.path.split(path)
1868 1868 names = os.listdir(directory)
1869 1869 namere = re.compile("%s.([0-9]+)" % base)
1870 1870 maxindex = None
1871 1871 maxname = None
1872 1872 for f in names:
1873 1873 m = namere.match(f)
1874 1874 if m:
1875 1875 index = int(m.group(1))
1876 1876 if maxindex == None or index > maxindex:
1877 1877 maxindex = index
1878 1878 maxname = f
1879 1879 if maxname:
1880 1880 return (os.path.join(directory, maxname), maxindex)
1881 1881 return (None, None)
1882 1882
1883 1883 def savename(path):
1884 1884 (last, index) = lastsavename(path)
1885 1885 if last is None:
1886 1886 index = 0
1887 1887 newpath = path + ".%d" % (index + 1)
1888 1888 return newpath
1889 1889
1890 1890 def push(ui, repo, patch=None, **opts):
1891 1891 """push the next patch onto the stack"""
1892 1892 q = repo.mq
1893 1893 mergeq = None
1894 1894
1895 1895 if opts['all']:
1896 1896 if not q.series:
1897 1897 ui.warn(_('no patches in series\n'))
1898 1898 return 0
1899 1899 patch = q.series[-1]
1900 1900 if opts['merge']:
1901 1901 if opts['name']:
1902 1902 newpath = opts['name']
1903 1903 else:
1904 1904 newpath, i = lastsavename(q.path)
1905 1905 if not newpath:
1906 1906 ui.warn("no saved queues found, please use -n\n")
1907 1907 return 1
1908 1908 mergeq = queue(ui, repo.join(""), newpath)
1909 1909 ui.warn("merging with queue at: %s\n" % mergeq.path)
1910 1910 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
1911 1911 mergeq=mergeq)
1912 1912 return ret
1913 1913
1914 1914 def pop(ui, repo, patch=None, **opts):
1915 1915 """pop the current patch off the stack"""
1916 1916 localupdate = True
1917 1917 if opts['name']:
1918 1918 q = queue(ui, repo.join(""), repo.join(opts['name']))
1919 1919 ui.warn('using patch queue: %s\n' % q.path)
1920 1920 localupdate = False
1921 1921 else:
1922 1922 q = repo.mq
1923 1923 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
1924 1924 all=opts['all'])
1925 1925 q.save_dirty()
1926 1926 return ret
1927 1927
1928 1928 def rename(ui, repo, patch, name=None, **opts):
1929 1929 """rename a patch
1930 1930
1931 1931 With one argument, renames the current patch to PATCH1.
1932 1932 With two arguments, renames PATCH1 to PATCH2."""
1933 1933
1934 1934 q = repo.mq
1935 1935
1936 1936 if not name:
1937 1937 name = patch
1938 1938 patch = None
1939 1939
1940 1940 if patch:
1941 1941 patch = q.lookup(patch)
1942 1942 else:
1943 1943 if not q.applied:
1944 1944 ui.write(_('No patches applied\n'))
1945 1945 return
1946 1946 patch = q.lookup('qtip')
1947 1947 absdest = q.join(name)
1948 1948 if os.path.isdir(absdest):
1949 1949 name = normname(os.path.join(name, os.path.basename(patch)))
1950 1950 absdest = q.join(name)
1951 1951 if os.path.exists(absdest):
1952 1952 raise util.Abort(_('%s already exists') % absdest)
1953 1953
1954 1954 if name in q.series:
1955 1955 raise util.Abort(_('A patch named %s already exists in the series file') % name)
1956 1956
1957 1957 if ui.verbose:
1958 1958 ui.write('Renaming %s to %s\n' % (patch, name))
1959 1959 i = q.find_series(patch)
1960 1960 guards = q.guard_re.findall(q.full_series[i])
1961 1961 q.full_series[i] = name + ''.join([' #' + g for g in guards])
1962 1962 q.parse_series()
1963 1963 q.series_dirty = 1
1964 1964
1965 1965 info = q.isapplied(patch)
1966 1966 if info:
1967 1967 q.applied[info[0]] = statusentry(info[1], name)
1968 1968 q.applied_dirty = 1
1969 1969
1970 1970 util.rename(q.join(patch), absdest)
1971 1971 r = q.qrepo()
1972 1972 if r:
1973 1973 wlock = r.wlock()
1974 1974 try:
1975 1975 if r.dirstate[name] == 'r':
1976 1976 r.undelete([name])
1977 1977 r.copy(patch, name)
1978 1978 r.remove([patch], False)
1979 1979 finally:
1980 1980 del wlock
1981 1981
1982 1982 q.save_dirty()
1983 1983
1984 1984 def restore(ui, repo, rev, **opts):
1985 1985 """restore the queue state saved by a rev"""
1986 1986 rev = repo.lookup(rev)
1987 1987 q = repo.mq
1988 1988 q.restore(repo, rev, delete=opts['delete'],
1989 1989 qupdate=opts['update'])
1990 1990 q.save_dirty()
1991 1991 return 0
1992 1992
1993 1993 def save(ui, repo, **opts):
1994 1994 """save current queue state"""
1995 1995 q = repo.mq
1996 1996 message = cmdutil.logmessage(opts)
1997 1997 ret = q.save(repo, msg=message)
1998 1998 if ret:
1999 1999 return ret
2000 2000 q.save_dirty()
2001 2001 if opts['copy']:
2002 2002 path = q.path
2003 2003 if opts['name']:
2004 2004 newpath = os.path.join(q.basepath, opts['name'])
2005 2005 if os.path.exists(newpath):
2006 2006 if not os.path.isdir(newpath):
2007 2007 raise util.Abort(_('destination %s exists and is not '
2008 2008 'a directory') % newpath)
2009 2009 if not opts['force']:
2010 2010 raise util.Abort(_('destination %s exists, '
2011 2011 'use -f to force') % newpath)
2012 2012 else:
2013 2013 newpath = savename(path)
2014 2014 ui.warn("copy %s to %s\n" % (path, newpath))
2015 2015 util.copyfiles(path, newpath)
2016 2016 if opts['empty']:
2017 2017 try:
2018 2018 os.unlink(q.join(q.status_path))
2019 2019 except:
2020 2020 pass
2021 2021 return 0
2022 2022
2023 2023 def strip(ui, repo, rev, **opts):
2024 2024 """strip a revision and all later revs on the same branch"""
2025 2025 rev = repo.lookup(rev)
2026 2026 backup = 'all'
2027 2027 if opts['backup']:
2028 2028 backup = 'strip'
2029 2029 elif opts['nobackup']:
2030 2030 backup = 'none'
2031 2031 update = repo.dirstate.parents()[0] != revlog.nullid
2032 2032 repo.mq.strip(repo, rev, backup=backup, update=update)
2033 2033 return 0
2034 2034
2035 2035 def select(ui, repo, *args, **opts):
2036 2036 '''set or print guarded patches to push
2037 2037
2038 2038 Use the qguard command to set or print guards on patch, then use
2039 2039 qselect to tell mq which guards to use. A patch will be pushed if it
2040 2040 has no guards or any positive guards match the currently selected guard,
2041 2041 but will not be pushed if any negative guards match the current guard.
2042 2042 For example:
2043 2043
2044 2044 qguard foo.patch -stable (negative guard)
2045 2045 qguard bar.patch +stable (positive guard)
2046 2046 qselect stable
2047 2047
2048 2048 This activates the "stable" guard. mq will skip foo.patch (because
2049 2049 it has a negative match) but push bar.patch (because it
2050 2050 has a positive match).
2051 2051
2052 2052 With no arguments, prints the currently active guards.
2053 2053 With one argument, sets the active guard.
2054 2054
2055 2055 Use -n/--none to deactivate guards (no other arguments needed).
2056 2056 When no guards are active, patches with positive guards are skipped
2057 2057 and patches with negative guards are pushed.
2058 2058
2059 2059 qselect can change the guards on applied patches. It does not pop
2060 2060 guarded patches by default. Use --pop to pop back to the last applied
2061 2061 patch that is not guarded. Use --reapply (which implies --pop) to push
2062 2062 back to the current patch afterwards, but skip guarded patches.
2063 2063
2064 2064 Use -s/--series to print a list of all guards in the series file (no
2065 2065 other arguments needed). Use -v for more information.'''
2066 2066
2067 2067 q = repo.mq
2068 2068 guards = q.active()
2069 2069 if args or opts['none']:
2070 2070 old_unapplied = q.unapplied(repo)
2071 2071 old_guarded = [i for i in xrange(len(q.applied)) if
2072 2072 not q.pushable(i)[0]]
2073 2073 q.set_active(args)
2074 2074 q.save_dirty()
2075 2075 if not args:
2076 2076 ui.status(_('guards deactivated\n'))
2077 2077 if not opts['pop'] and not opts['reapply']:
2078 2078 unapplied = q.unapplied(repo)
2079 2079 guarded = [i for i in xrange(len(q.applied))
2080 2080 if not q.pushable(i)[0]]
2081 2081 if len(unapplied) != len(old_unapplied):
2082 2082 ui.status(_('number of unguarded, unapplied patches has '
2083 2083 'changed from %d to %d\n') %
2084 2084 (len(old_unapplied), len(unapplied)))
2085 2085 if len(guarded) != len(old_guarded):
2086 2086 ui.status(_('number of guarded, applied patches has changed '
2087 2087 'from %d to %d\n') %
2088 2088 (len(old_guarded), len(guarded)))
2089 2089 elif opts['series']:
2090 2090 guards = {}
2091 2091 noguards = 0
2092 2092 for gs in q.series_guards:
2093 2093 if not gs:
2094 2094 noguards += 1
2095 2095 for g in gs:
2096 2096 guards.setdefault(g, 0)
2097 2097 guards[g] += 1
2098 2098 if ui.verbose:
2099 2099 guards['NONE'] = noguards
2100 2100 guards = guards.items()
2101 2101 guards.sort(lambda a, b: cmp(a[0][1:], b[0][1:]))
2102 2102 if guards:
2103 2103 ui.note(_('guards in series file:\n'))
2104 2104 for guard, count in guards:
2105 2105 ui.note('%2d ' % count)
2106 2106 ui.write(guard, '\n')
2107 2107 else:
2108 2108 ui.note(_('no guards in series file\n'))
2109 2109 else:
2110 2110 if guards:
2111 2111 ui.note(_('active guards:\n'))
2112 2112 for g in guards:
2113 2113 ui.write(g, '\n')
2114 2114 else:
2115 2115 ui.write(_('no active guards\n'))
2116 2116 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2117 2117 popped = False
2118 2118 if opts['pop'] or opts['reapply']:
2119 2119 for i in xrange(len(q.applied)):
2120 2120 pushable, reason = q.pushable(i)
2121 2121 if not pushable:
2122 2122 ui.status(_('popping guarded patches\n'))
2123 2123 popped = True
2124 2124 if i == 0:
2125 2125 q.pop(repo, all=True)
2126 2126 else:
2127 2127 q.pop(repo, i-1)
2128 2128 break
2129 2129 if popped:
2130 2130 try:
2131 2131 if reapply:
2132 2132 ui.status(_('reapplying unguarded patches\n'))
2133 2133 q.push(repo, reapply)
2134 2134 finally:
2135 2135 q.save_dirty()
2136 2136
2137 2137 def reposetup(ui, repo):
2138 2138 class mqrepo(repo.__class__):
2139 2139 def abort_if_wdir_patched(self, errmsg, force=False):
2140 2140 if self.mq.applied and not force:
2141 2141 parent = revlog.hex(self.dirstate.parents()[0])
2142 2142 if parent in [s.rev for s in self.mq.applied]:
2143 2143 raise util.Abort(errmsg)
2144 2144
2145 2145 def commit(self, *args, **opts):
2146 2146 if len(args) >= 6:
2147 2147 force = args[5]
2148 2148 else:
2149 2149 force = opts.get('force')
2150 2150 self.abort_if_wdir_patched(
2151 2151 _('cannot commit over an applied mq patch'),
2152 2152 force)
2153 2153
2154 2154 return super(mqrepo, self).commit(*args, **opts)
2155 2155
2156 2156 def push(self, remote, force=False, revs=None):
2157 2157 if self.mq.applied and not force and not revs:
2158 2158 raise util.Abort(_('source has mq patches applied'))
2159 2159 return super(mqrepo, self).push(remote, force, revs)
2160 2160
2161 2161 def tags(self):
2162 2162 if self.tagscache:
2163 2163 return self.tagscache
2164 2164
2165 2165 tagscache = super(mqrepo, self).tags()
2166 2166
2167 2167 q = self.mq
2168 2168 if not q.applied:
2169 2169 return tagscache
2170 2170
2171 2171 mqtags = [(revlog.bin(patch.rev), patch.name) for patch in q.applied]
2172 2172
2173 2173 if mqtags[-1][0] not in self.changelog.nodemap:
2174 2174 self.ui.warn('mq status file refers to unknown node %s\n'
2175 2175 % revlog.short(mqtags[-1][0]))
2176 2176 return tagscache
2177 2177
2178 2178 mqtags.append((mqtags[-1][0], 'qtip'))
2179 2179 mqtags.append((mqtags[0][0], 'qbase'))
2180 2180 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2181 2181 for patch in mqtags:
2182 2182 if patch[1] in tagscache:
2183 2183 self.ui.warn('Tag %s overrides mq patch of the same name\n' % patch[1])
2184 2184 else:
2185 2185 tagscache[patch[1]] = patch[0]
2186 2186
2187 2187 return tagscache
2188 2188
2189 def _branchtags(self):
2189 def _branchtags(self, partial, lrev):
2190 2190 q = self.mq
2191 2191 if not q.applied:
2192 return super(mqrepo, self)._branchtags()
2192 return super(mqrepo, self)._branchtags(partial, lrev)
2193 2193
2194 2194 cl = self.changelog
2195 2195 qbasenode = revlog.bin(q.applied[0].rev)
2196 2196 if qbasenode not in cl.nodemap:
2197 2197 self.ui.warn('mq status file refers to unknown node %s\n'
2198 2198 % revlog.short(qbasenode))
2199 return super(mqrepo, self)._branchtags()
2200
2201 self.branchcache = {} # avoid recursion in changectx
2202 partial, last, lrev = self._readbranchcache()
2199 return super(mqrepo, self)._branchtags(partial, lrev)
2203 2200
2204 2201 qbase = cl.rev(qbasenode)
2205 2202 start = lrev + 1
2206 2203 if start < qbase:
2207 2204 # update the cache (excluding the patches) and save it
2208 2205 self._updatebranchcache(partial, lrev+1, qbase)
2209 2206 self._writebranchcache(partial, cl.node(qbase-1), qbase-1)
2210 2207 start = qbase
2211 2208 # if start = qbase, the cache is as updated as it should be.
2212 2209 # if start > qbase, the cache includes (part of) the patches.
2213 2210 # we might as well use it, but we won't save it.
2214 2211
2215 2212 # update the cache up to the tip
2216 2213 self._updatebranchcache(partial, start, cl.count())
2217 2214
2218 2215 return partial
2219 2216
2220 2217 if repo.local():
2221 2218 repo.__class__ = mqrepo
2222 2219 repo.mq = queue(ui, repo.join(""))
2223 2220
2224 2221 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2225 2222
2226 2223 headeropts = [
2227 2224 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2228 2225 ('u', 'user', '', _('add "From: <given user>" to patch')),
2229 2226 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2230 2227 ('d', 'date', '', _('add "Date: <given date>" to patch'))]
2231 2228
2232 2229 cmdtable = {
2233 2230 "qapplied": (applied, [] + seriesopts, _('hg qapplied [-s] [PATCH]')),
2234 2231 "qclone":
2235 2232 (clone,
2236 2233 [('', 'pull', None, _('use pull protocol to copy metadata')),
2237 2234 ('U', 'noupdate', None, _('do not update the new working directories')),
2238 2235 ('', 'uncompressed', None,
2239 2236 _('use uncompressed transfer (fast over LAN)')),
2240 2237 ('p', 'patches', '', _('location of source patch repo')),
2241 2238 ] + commands.remoteopts,
2242 2239 _('hg qclone [OPTION]... SOURCE [DEST]')),
2243 2240 "qcommit|qci":
2244 2241 (commit,
2245 2242 commands.table["^commit|ci"][1],
2246 2243 _('hg qcommit [OPTION]... [FILE]...')),
2247 2244 "^qdiff":
2248 2245 (diff,
2249 2246 [('g', 'git', None, _('use git extended diff format')),
2250 2247 ('U', 'unified', 3, _('number of lines of context to show')),
2251 2248 ] + commands.walkopts,
2252 2249 _('hg qdiff [-I] [-X] [-U NUM] [-g] [FILE]...')),
2253 2250 "qdelete|qremove|qrm":
2254 2251 (delete,
2255 2252 [('k', 'keep', None, _('keep patch file')),
2256 2253 ('r', 'rev', [], _('stop managing a revision'))],
2257 2254 _('hg qdelete [-k] [-r REV]... [PATCH]...')),
2258 2255 'qfold':
2259 2256 (fold,
2260 2257 [('e', 'edit', None, _('edit patch header')),
2261 2258 ('k', 'keep', None, _('keep folded patch files')),
2262 2259 ] + commands.commitopts,
2263 2260 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')),
2264 2261 'qgoto':
2265 2262 (goto,
2266 2263 [('f', 'force', None, _('overwrite any local changes'))],
2267 2264 _('hg qgoto [OPTION]... PATCH')),
2268 2265 'qguard':
2269 2266 (guard,
2270 2267 [('l', 'list', None, _('list all patches and guards')),
2271 2268 ('n', 'none', None, _('drop all guards'))],
2272 2269 _('hg qguard [-l] [-n] [PATCH] [+GUARD]... [-GUARD]...')),
2273 2270 'qheader': (header, [], _('hg qheader [PATCH]')),
2274 2271 "^qimport":
2275 2272 (qimport,
2276 2273 [('e', 'existing', None, 'import file in patch dir'),
2277 2274 ('n', 'name', '', 'patch file name'),
2278 2275 ('f', 'force', None, 'overwrite existing files'),
2279 2276 ('r', 'rev', [], 'place existing revisions under mq control'),
2280 2277 ('g', 'git', None, _('use git extended diff format'))],
2281 2278 _('hg qimport [-e] [-n NAME] [-f] [-g] [-r REV]... FILE...')),
2282 2279 "^qinit":
2283 2280 (init,
2284 2281 [('c', 'create-repo', None, 'create queue repository')],
2285 2282 _('hg qinit [-c]')),
2286 2283 "qnew":
2287 2284 (new,
2288 2285 [('e', 'edit', None, _('edit commit message')),
2289 2286 ('f', 'force', None, _('import uncommitted changes into patch')),
2290 2287 ('g', 'git', None, _('use git extended diff format')),
2291 2288 ] + commands.walkopts + commands.commitopts + headeropts,
2292 2289 _('hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH [FILE]...')),
2293 2290 "qnext": (next, [] + seriesopts, _('hg qnext [-s]')),
2294 2291 "qprev": (prev, [] + seriesopts, _('hg qprev [-s]')),
2295 2292 "^qpop":
2296 2293 (pop,
2297 2294 [('a', 'all', None, _('pop all patches')),
2298 2295 ('n', 'name', '', _('queue name to pop')),
2299 2296 ('f', 'force', None, _('forget any local changes'))],
2300 2297 _('hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]')),
2301 2298 "^qpush":
2302 2299 (push,
2303 2300 [('f', 'force', None, _('apply if the patch has rejects')),
2304 2301 ('l', 'list', None, _('list patch name in commit text')),
2305 2302 ('a', 'all', None, _('apply all patches')),
2306 2303 ('m', 'merge', None, _('merge from another queue')),
2307 2304 ('n', 'name', '', _('merge queue name'))],
2308 2305 _('hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]')),
2309 2306 "^qrefresh":
2310 2307 (refresh,
2311 2308 [('e', 'edit', None, _('edit commit message')),
2312 2309 ('g', 'git', None, _('use git extended diff format')),
2313 2310 ('s', 'short', None, _('refresh only files already in the patch')),
2314 2311 ] + commands.walkopts + commands.commitopts + headeropts,
2315 2312 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')),
2316 2313 'qrename|qmv':
2317 2314 (rename, [], _('hg qrename PATCH1 [PATCH2]')),
2318 2315 "qrestore":
2319 2316 (restore,
2320 2317 [('d', 'delete', None, _('delete save entry')),
2321 2318 ('u', 'update', None, _('update queue working dir'))],
2322 2319 _('hg qrestore [-d] [-u] REV')),
2323 2320 "qsave":
2324 2321 (save,
2325 2322 [('c', 'copy', None, _('copy patch directory')),
2326 2323 ('n', 'name', '', _('copy directory name')),
2327 2324 ('e', 'empty', None, _('clear queue status file')),
2328 2325 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2329 2326 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')),
2330 2327 "qselect":
2331 2328 (select,
2332 2329 [('n', 'none', None, _('disable all guards')),
2333 2330 ('s', 'series', None, _('list all guards in series file')),
2334 2331 ('', 'pop', None, _('pop to before first guarded applied patch')),
2335 2332 ('', 'reapply', None, _('pop, then reapply patches'))],
2336 2333 _('hg qselect [OPTION]... [GUARD]...')),
2337 2334 "qseries":
2338 2335 (series,
2339 2336 [('m', 'missing', None, _('print patches not in series')),
2340 2337 ] + seriesopts,
2341 2338 _('hg qseries [-ms]')),
2342 2339 "^strip":
2343 2340 (strip,
2344 2341 [('f', 'force', None, _('force multi-head removal')),
2345 2342 ('b', 'backup', None, _('bundle unrelated changesets')),
2346 2343 ('n', 'nobackup', None, _('no backups'))],
2347 2344 _('hg strip [-f] [-b] [-n] REV')),
2348 2345 "qtop": (top, [] + seriesopts, _('hg qtop [-s]')),
2349 2346 "qunapplied": (unapplied, [] + seriesopts, _('hg qunapplied [-s] [PATCH]')),
2350 2347 }
@@ -1,2106 +1,2105 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import _
10 10 import repo, changegroup
11 11 import changelog, dirstate, filelog, manifest, context, weakref
12 12 import re, lock, transaction, tempfile, stat, errno, ui
13 13 import os, revlog, time, util, extensions, hook, inspect
14 14
15 15 class localrepository(repo.repository):
16 16 capabilities = util.set(('lookup', 'changegroupsubset'))
17 17 supported = ('revlogv1', 'store')
18 18
19 19 def __init__(self, parentui, path=None, create=0):
20 20 repo.repository.__init__(self)
21 21 self.root = os.path.realpath(path)
22 22 self.path = os.path.join(self.root, ".hg")
23 23 self.origroot = path
24 24 self.opener = util.opener(self.path)
25 25 self.wopener = util.opener(self.root)
26 26
27 27 if not os.path.isdir(self.path):
28 28 if create:
29 29 if not os.path.exists(path):
30 30 os.mkdir(path)
31 31 os.mkdir(self.path)
32 32 requirements = ["revlogv1"]
33 33 if parentui.configbool('format', 'usestore', True):
34 34 os.mkdir(os.path.join(self.path, "store"))
35 35 requirements.append("store")
36 36 # create an invalid changelog
37 37 self.opener("00changelog.i", "a").write(
38 38 '\0\0\0\2' # represents revlogv2
39 39 ' dummy changelog to prevent using the old repo layout'
40 40 )
41 41 reqfile = self.opener("requires", "w")
42 42 for r in requirements:
43 43 reqfile.write("%s\n" % r)
44 44 reqfile.close()
45 45 else:
46 46 raise repo.RepoError(_("repository %s not found") % path)
47 47 elif create:
48 48 raise repo.RepoError(_("repository %s already exists") % path)
49 49 else:
50 50 # find requirements
51 51 try:
52 52 requirements = self.opener("requires").read().splitlines()
53 53 except IOError, inst:
54 54 if inst.errno != errno.ENOENT:
55 55 raise
56 56 requirements = []
57 57 # check them
58 58 for r in requirements:
59 59 if r not in self.supported:
60 60 raise repo.RepoError(_("requirement '%s' not supported") % r)
61 61
62 62 # setup store
63 63 if "store" in requirements:
64 64 self.encodefn = util.encodefilename
65 65 self.decodefn = util.decodefilename
66 66 self.spath = os.path.join(self.path, "store")
67 67 else:
68 68 self.encodefn = lambda x: x
69 69 self.decodefn = lambda x: x
70 70 self.spath = self.path
71 71
72 72 try:
73 73 # files in .hg/ will be created using this mode
74 74 mode = os.stat(self.spath).st_mode
75 75 # avoid some useless chmods
76 76 if (0777 & ~util._umask) == (0777 & mode):
77 77 mode = None
78 78 except OSError:
79 79 mode = None
80 80
81 81 self._createmode = mode
82 82 self.opener.createmode = mode
83 83 sopener = util.opener(self.spath)
84 84 sopener.createmode = mode
85 85 self.sopener = util.encodedopener(sopener, self.encodefn)
86 86
87 87 self.ui = ui.ui(parentui=parentui)
88 88 try:
89 89 self.ui.readconfig(self.join("hgrc"), self.root)
90 90 extensions.loadall(self.ui)
91 91 except IOError:
92 92 pass
93 93
94 94 self.tagscache = None
95 95 self._tagstypecache = None
96 96 self.branchcache = None
97 97 self._ubranchcache = None # UTF-8 version of branchcache
98 98 self.nodetagscache = None
99 99 self.filterpats = {}
100 100 self._datafilters = {}
101 101 self._transref = self._lockref = self._wlockref = None
102 102
103 103 def __getattr__(self, name):
104 104 if name == 'changelog':
105 105 self.changelog = changelog.changelog(self.sopener)
106 106 self.sopener.defversion = self.changelog.version
107 107 return self.changelog
108 108 if name == 'manifest':
109 109 self.changelog
110 110 self.manifest = manifest.manifest(self.sopener)
111 111 return self.manifest
112 112 if name == 'dirstate':
113 113 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
114 114 return self.dirstate
115 115 else:
116 116 raise AttributeError, name
117 117
118 118 def url(self):
119 119 return 'file:' + self.root
120 120
121 121 def hook(self, name, throw=False, **args):
122 122 return hook.hook(self.ui, self, name, throw, **args)
123 123
124 124 tag_disallowed = ':\r\n'
125 125
126 126 def _tag(self, name, node, message, local, user, date, parent=None,
127 127 extra={}):
128 128 use_dirstate = parent is None
129 129
130 130 for c in self.tag_disallowed:
131 131 if c in name:
132 132 raise util.Abort(_('%r cannot be used in a tag name') % c)
133 133
134 134 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
135 135
136 136 def writetag(fp, name, munge, prevtags):
137 137 fp.seek(0, 2)
138 138 if prevtags and prevtags[-1] != '\n':
139 139 fp.write('\n')
140 140 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
141 141 fp.close()
142 142
143 143 prevtags = ''
144 144 if local:
145 145 try:
146 146 fp = self.opener('localtags', 'r+')
147 147 except IOError, err:
148 148 fp = self.opener('localtags', 'a')
149 149 else:
150 150 prevtags = fp.read()
151 151
152 152 # local tags are stored in the current charset
153 153 writetag(fp, name, None, prevtags)
154 154 self.hook('tag', node=hex(node), tag=name, local=local)
155 155 return
156 156
157 157 if use_dirstate:
158 158 try:
159 159 fp = self.wfile('.hgtags', 'rb+')
160 160 except IOError, err:
161 161 fp = self.wfile('.hgtags', 'ab')
162 162 else:
163 163 prevtags = fp.read()
164 164 else:
165 165 try:
166 166 prevtags = self.filectx('.hgtags', parent).data()
167 167 except revlog.LookupError:
168 168 pass
169 169 fp = self.wfile('.hgtags', 'wb')
170 170 if prevtags:
171 171 fp.write(prevtags)
172 172
173 173 # committed tags are stored in UTF-8
174 174 writetag(fp, name, util.fromlocal, prevtags)
175 175
176 176 if use_dirstate and '.hgtags' not in self.dirstate:
177 177 self.add(['.hgtags'])
178 178
179 179 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
180 180 extra=extra)
181 181
182 182 self.hook('tag', node=hex(node), tag=name, local=local)
183 183
184 184 return tagnode
185 185
186 186 def tag(self, name, node, message, local, user, date):
187 187 '''tag a revision with a symbolic name.
188 188
189 189 if local is True, the tag is stored in a per-repository file.
190 190 otherwise, it is stored in the .hgtags file, and a new
191 191 changeset is committed with the change.
192 192
193 193 keyword arguments:
194 194
195 195 local: whether to store tag in non-version-controlled file
196 196 (default False)
197 197
198 198 message: commit message to use if committing
199 199
200 200 user: name of user to use if committing
201 201
202 202 date: date tuple to use if committing'''
203 203
204 204 for x in self.status()[:5]:
205 205 if '.hgtags' in x:
206 206 raise util.Abort(_('working copy of .hgtags is changed '
207 207 '(please commit .hgtags manually)'))
208 208
209 209
210 210 self._tag(name, node, message, local, user, date)
211 211
212 212 def tags(self):
213 213 '''return a mapping of tag to node'''
214 214 if self.tagscache:
215 215 return self.tagscache
216 216
217 217 globaltags = {}
218 218 tagtypes = {}
219 219
220 220 def readtags(lines, fn, tagtype):
221 221 filetags = {}
222 222 count = 0
223 223
224 224 def warn(msg):
225 225 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
226 226
227 227 for l in lines:
228 228 count += 1
229 229 if not l:
230 230 continue
231 231 s = l.split(" ", 1)
232 232 if len(s) != 2:
233 233 warn(_("cannot parse entry"))
234 234 continue
235 235 node, key = s
236 236 key = util.tolocal(key.strip()) # stored in UTF-8
237 237 try:
238 238 bin_n = bin(node)
239 239 except TypeError:
240 240 warn(_("node '%s' is not well formed") % node)
241 241 continue
242 242 if bin_n not in self.changelog.nodemap:
243 243 warn(_("tag '%s' refers to unknown node") % key)
244 244 continue
245 245
246 246 h = []
247 247 if key in filetags:
248 248 n, h = filetags[key]
249 249 h.append(n)
250 250 filetags[key] = (bin_n, h)
251 251
252 252 for k, nh in filetags.items():
253 253 if k not in globaltags:
254 254 globaltags[k] = nh
255 255 tagtypes[k] = tagtype
256 256 continue
257 257
258 258 # we prefer the global tag if:
259 259 # it supercedes us OR
260 260 # mutual supercedes and it has a higher rank
261 261 # otherwise we win because we're tip-most
262 262 an, ah = nh
263 263 bn, bh = globaltags[k]
264 264 if (bn != an and an in bh and
265 265 (bn not in ah or len(bh) > len(ah))):
266 266 an = bn
267 267 ah.extend([n for n in bh if n not in ah])
268 268 globaltags[k] = an, ah
269 269 tagtypes[k] = tagtype
270 270
271 271 # read the tags file from each head, ending with the tip
272 272 f = None
273 273 for rev, node, fnode in self._hgtagsnodes():
274 274 f = (f and f.filectx(fnode) or
275 275 self.filectx('.hgtags', fileid=fnode))
276 276 readtags(f.data().splitlines(), f, "global")
277 277
278 278 try:
279 279 data = util.fromlocal(self.opener("localtags").read())
280 280 # localtags are stored in the local character set
281 281 # while the internal tag table is stored in UTF-8
282 282 readtags(data.splitlines(), "localtags", "local")
283 283 except IOError:
284 284 pass
285 285
286 286 self.tagscache = {}
287 287 self._tagstypecache = {}
288 288 for k,nh in globaltags.items():
289 289 n = nh[0]
290 290 if n != nullid:
291 291 self.tagscache[k] = n
292 292 self._tagstypecache[k] = tagtypes[k]
293 293 self.tagscache['tip'] = self.changelog.tip()
294 294
295 295 return self.tagscache
296 296
297 297 def tagtype(self, tagname):
298 298 '''
299 299 return the type of the given tag. result can be:
300 300
301 301 'local' : a local tag
302 302 'global' : a global tag
303 303 None : tag does not exist
304 304 '''
305 305
306 306 self.tags()
307 307
308 308 return self._tagstypecache.get(tagname)
309 309
310 310 def _hgtagsnodes(self):
311 311 heads = self.heads()
312 312 heads.reverse()
313 313 last = {}
314 314 ret = []
315 315 for node in heads:
316 316 c = self.changectx(node)
317 317 rev = c.rev()
318 318 try:
319 319 fnode = c.filenode('.hgtags')
320 320 except revlog.LookupError:
321 321 continue
322 322 ret.append((rev, node, fnode))
323 323 if fnode in last:
324 324 ret[last[fnode]] = None
325 325 last[fnode] = len(ret) - 1
326 326 return [item for item in ret if item]
327 327
328 328 def tagslist(self):
329 329 '''return a list of tags ordered by revision'''
330 330 l = []
331 331 for t, n in self.tags().items():
332 332 try:
333 333 r = self.changelog.rev(n)
334 334 except:
335 335 r = -2 # sort to the beginning of the list if unknown
336 336 l.append((r, t, n))
337 337 l.sort()
338 338 return [(t, n) for r, t, n in l]
339 339
340 340 def nodetags(self, node):
341 341 '''return the tags associated with a node'''
342 342 if not self.nodetagscache:
343 343 self.nodetagscache = {}
344 344 for t, n in self.tags().items():
345 345 self.nodetagscache.setdefault(n, []).append(t)
346 346 return self.nodetagscache.get(node, [])
347 347
348 def _branchtags(self):
349 partial, last, lrev = self._readbranchcache()
350
348 def _branchtags(self, partial, lrev):
351 349 tiprev = self.changelog.count() - 1
352 350 if lrev != tiprev:
353 351 self._updatebranchcache(partial, lrev+1, tiprev+1)
354 352 self._writebranchcache(partial, self.changelog.tip(), tiprev)
355 353
356 354 return partial
357 355
358 356 def branchtags(self):
359 357 if self.branchcache is not None:
360 358 return self.branchcache
361 359
362 360 self.branchcache = {} # avoid recursion in changectx
363 partial = self._branchtags()
361 partial, last, lrev = self._readbranchcache()
362 self._branchtags(partial, lrev)
364 363
365 364 # the branch cache is stored on disk as UTF-8, but in the local
366 365 # charset internally
367 366 for k, v in partial.items():
368 367 self.branchcache[util.tolocal(k)] = v
369 368 self._ubranchcache = partial
370 369 return self.branchcache
371 370
372 371 def _readbranchcache(self):
373 372 partial = {}
374 373 try:
375 374 f = self.opener("branch.cache")
376 375 lines = f.read().split('\n')
377 376 f.close()
378 377 except (IOError, OSError):
379 378 return {}, nullid, nullrev
380 379
381 380 try:
382 381 last, lrev = lines.pop(0).split(" ", 1)
383 382 last, lrev = bin(last), int(lrev)
384 383 if not (lrev < self.changelog.count() and
385 384 self.changelog.node(lrev) == last): # sanity check
386 385 # invalidate the cache
387 386 raise ValueError('invalidating branch cache (tip differs)')
388 387 for l in lines:
389 388 if not l: continue
390 389 node, label = l.split(" ", 1)
391 390 partial[label.strip()] = bin(node)
392 391 except (KeyboardInterrupt, util.SignalInterrupt):
393 392 raise
394 393 except Exception, inst:
395 394 if self.ui.debugflag:
396 395 self.ui.warn(str(inst), '\n')
397 396 partial, last, lrev = {}, nullid, nullrev
398 397 return partial, last, lrev
399 398
400 399 def _writebranchcache(self, branches, tip, tiprev):
401 400 try:
402 401 f = self.opener("branch.cache", "w", atomictemp=True)
403 402 f.write("%s %s\n" % (hex(tip), tiprev))
404 403 for label, node in branches.iteritems():
405 404 f.write("%s %s\n" % (hex(node), label))
406 405 f.rename()
407 406 except (IOError, OSError):
408 407 pass
409 408
410 409 def _updatebranchcache(self, partial, start, end):
411 410 for r in xrange(start, end):
412 411 c = self.changectx(r)
413 412 b = c.branch()
414 413 partial[b] = c.node()
415 414
416 415 def lookup(self, key):
417 416 if key == '.':
418 417 key, second = self.dirstate.parents()
419 418 if key == nullid:
420 419 raise repo.RepoError(_("no revision checked out"))
421 420 if second != nullid:
422 421 self.ui.warn(_("warning: working directory has two parents, "
423 422 "tag '.' uses the first\n"))
424 423 elif key == 'null':
425 424 return nullid
426 425 n = self.changelog._match(key)
427 426 if n:
428 427 return n
429 428 if key in self.tags():
430 429 return self.tags()[key]
431 430 if key in self.branchtags():
432 431 return self.branchtags()[key]
433 432 n = self.changelog._partialmatch(key)
434 433 if n:
435 434 return n
436 435 try:
437 436 if len(key) == 20:
438 437 key = hex(key)
439 438 except:
440 439 pass
441 440 raise repo.RepoError(_("unknown revision '%s'") % key)
442 441
443 442 def dev(self):
444 443 return os.lstat(self.path).st_dev
445 444
446 445 def local(self):
447 446 return True
448 447
449 448 def join(self, f):
450 449 return os.path.join(self.path, f)
451 450
452 451 def sjoin(self, f):
453 452 f = self.encodefn(f)
454 453 return os.path.join(self.spath, f)
455 454
456 455 def wjoin(self, f):
457 456 return os.path.join(self.root, f)
458 457
459 458 def file(self, f):
460 459 if f[0] == '/':
461 460 f = f[1:]
462 461 return filelog.filelog(self.sopener, f)
463 462
464 463 def changectx(self, changeid=None):
465 464 return context.changectx(self, changeid)
466 465
467 466 def workingctx(self):
468 467 return context.workingctx(self)
469 468
470 469 def parents(self, changeid=None):
471 470 '''
472 471 get list of changectxs for parents of changeid or working directory
473 472 '''
474 473 if changeid is None:
475 474 pl = self.dirstate.parents()
476 475 else:
477 476 n = self.changelog.lookup(changeid)
478 477 pl = self.changelog.parents(n)
479 478 if pl[1] == nullid:
480 479 return [self.changectx(pl[0])]
481 480 return [self.changectx(pl[0]), self.changectx(pl[1])]
482 481
483 482 def filectx(self, path, changeid=None, fileid=None):
484 483 """changeid can be a changeset revision, node, or tag.
485 484 fileid can be a file revision or node."""
486 485 return context.filectx(self, path, changeid, fileid)
487 486
488 487 def getcwd(self):
489 488 return self.dirstate.getcwd()
490 489
491 490 def pathto(self, f, cwd=None):
492 491 return self.dirstate.pathto(f, cwd)
493 492
494 493 def wfile(self, f, mode='r'):
495 494 return self.wopener(f, mode)
496 495
497 496 def _link(self, f):
498 497 return os.path.islink(self.wjoin(f))
499 498
500 499 def _filter(self, filter, filename, data):
501 500 if filter not in self.filterpats:
502 501 l = []
503 502 for pat, cmd in self.ui.configitems(filter):
504 503 mf = util.matcher(self.root, "", [pat], [], [])[1]
505 504 fn = None
506 505 params = cmd
507 506 for name, filterfn in self._datafilters.iteritems():
508 507 if cmd.startswith(name):
509 508 fn = filterfn
510 509 params = cmd[len(name):].lstrip()
511 510 break
512 511 if not fn:
513 512 fn = lambda s, c, **kwargs: util.filter(s, c)
514 513 # Wrap old filters not supporting keyword arguments
515 514 if not inspect.getargspec(fn)[2]:
516 515 oldfn = fn
517 516 fn = lambda s, c, **kwargs: oldfn(s, c)
518 517 l.append((mf, fn, params))
519 518 self.filterpats[filter] = l
520 519
521 520 for mf, fn, cmd in self.filterpats[filter]:
522 521 if mf(filename):
523 522 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
524 523 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
525 524 break
526 525
527 526 return data
528 527
529 528 def adddatafilter(self, name, filter):
530 529 self._datafilters[name] = filter
531 530
532 531 def wread(self, filename):
533 532 if self._link(filename):
534 533 data = os.readlink(self.wjoin(filename))
535 534 else:
536 535 data = self.wopener(filename, 'r').read()
537 536 return self._filter("encode", filename, data)
538 537
539 538 def wwrite(self, filename, data, flags):
540 539 data = self._filter("decode", filename, data)
541 540 try:
542 541 os.unlink(self.wjoin(filename))
543 542 except OSError:
544 543 pass
545 544 self.wopener(filename, 'w').write(data)
546 545 util.set_flags(self.wjoin(filename), flags)
547 546
548 547 def wwritedata(self, filename, data):
549 548 return self._filter("decode", filename, data)
550 549
551 550 def transaction(self):
552 551 if self._transref and self._transref():
553 552 return self._transref().nest()
554 553
555 554 # abort here if the journal already exists
556 555 if os.path.exists(self.sjoin("journal")):
557 556 raise repo.RepoError(_("journal already exists - run hg recover"))
558 557
559 558 # save dirstate for rollback
560 559 try:
561 560 ds = self.opener("dirstate").read()
562 561 except IOError:
563 562 ds = ""
564 563 self.opener("journal.dirstate", "w").write(ds)
565 564 self.opener("journal.branch", "w").write(self.dirstate.branch())
566 565
567 566 renames = [(self.sjoin("journal"), self.sjoin("undo")),
568 567 (self.join("journal.dirstate"), self.join("undo.dirstate")),
569 568 (self.join("journal.branch"), self.join("undo.branch"))]
570 569 tr = transaction.transaction(self.ui.warn, self.sopener,
571 570 self.sjoin("journal"),
572 571 aftertrans(renames),
573 572 self._createmode)
574 573 self._transref = weakref.ref(tr)
575 574 return tr
576 575
577 576 def recover(self):
578 577 l = self.lock()
579 578 try:
580 579 if os.path.exists(self.sjoin("journal")):
581 580 self.ui.status(_("rolling back interrupted transaction\n"))
582 581 transaction.rollback(self.sopener, self.sjoin("journal"))
583 582 self.invalidate()
584 583 return True
585 584 else:
586 585 self.ui.warn(_("no interrupted transaction available\n"))
587 586 return False
588 587 finally:
589 588 del l
590 589
591 590 def rollback(self):
592 591 wlock = lock = None
593 592 try:
594 593 wlock = self.wlock()
595 594 lock = self.lock()
596 595 if os.path.exists(self.sjoin("undo")):
597 596 self.ui.status(_("rolling back last transaction\n"))
598 597 transaction.rollback(self.sopener, self.sjoin("undo"))
599 598 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
600 599 try:
601 600 branch = self.opener("undo.branch").read()
602 601 self.dirstate.setbranch(branch)
603 602 except IOError:
604 603 self.ui.warn(_("Named branch could not be reset, "
605 604 "current branch still is: %s\n")
606 605 % util.tolocal(self.dirstate.branch()))
607 606 self.invalidate()
608 607 self.dirstate.invalidate()
609 608 else:
610 609 self.ui.warn(_("no rollback information available\n"))
611 610 finally:
612 611 del lock, wlock
613 612
614 613 def invalidate(self):
615 614 for a in "changelog manifest".split():
616 615 if hasattr(self, a):
617 616 self.__delattr__(a)
618 617 self.tagscache = None
619 618 self._tagstypecache = None
620 619 self.nodetagscache = None
621 620 self.branchcache = None
622 621 self._ubranchcache = None
623 622
624 623 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
625 624 try:
626 625 l = lock.lock(lockname, 0, releasefn, desc=desc)
627 626 except lock.LockHeld, inst:
628 627 if not wait:
629 628 raise
630 629 self.ui.warn(_("waiting for lock on %s held by %r\n") %
631 630 (desc, inst.locker))
632 631 # default to 600 seconds timeout
633 632 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
634 633 releasefn, desc=desc)
635 634 if acquirefn:
636 635 acquirefn()
637 636 return l
638 637
639 638 def lock(self, wait=True):
640 639 if self._lockref and self._lockref():
641 640 return self._lockref()
642 641
643 642 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
644 643 _('repository %s') % self.origroot)
645 644 self._lockref = weakref.ref(l)
646 645 return l
647 646
648 647 def wlock(self, wait=True):
649 648 if self._wlockref and self._wlockref():
650 649 return self._wlockref()
651 650
652 651 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
653 652 self.dirstate.invalidate, _('working directory of %s') %
654 653 self.origroot)
655 654 self._wlockref = weakref.ref(l)
656 655 return l
657 656
658 657 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
659 658 """
660 659 commit an individual file as part of a larger transaction
661 660 """
662 661
663 662 t = self.wread(fn)
664 663 fl = self.file(fn)
665 664 fp1 = manifest1.get(fn, nullid)
666 665 fp2 = manifest2.get(fn, nullid)
667 666
668 667 meta = {}
669 668 cp = self.dirstate.copied(fn)
670 669 if cp:
671 670 # Mark the new revision of this file as a copy of another
672 671 # file. This copy data will effectively act as a parent
673 672 # of this new revision. If this is a merge, the first
674 673 # parent will be the nullid (meaning "look up the copy data")
675 674 # and the second one will be the other parent. For example:
676 675 #
677 676 # 0 --- 1 --- 3 rev1 changes file foo
678 677 # \ / rev2 renames foo to bar and changes it
679 678 # \- 2 -/ rev3 should have bar with all changes and
680 679 # should record that bar descends from
681 680 # bar in rev2 and foo in rev1
682 681 #
683 682 # this allows this merge to succeed:
684 683 #
685 684 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
686 685 # \ / merging rev3 and rev4 should use bar@rev2
687 686 # \- 2 --- 4 as the merge base
688 687 #
689 688 meta["copy"] = cp
690 689 if not manifest2: # not a branch merge
691 690 meta["copyrev"] = hex(manifest1.get(cp, nullid))
692 691 fp2 = nullid
693 692 elif fp2 != nullid: # copied on remote side
694 693 meta["copyrev"] = hex(manifest1.get(cp, nullid))
695 694 elif fp1 != nullid: # copied on local side, reversed
696 695 meta["copyrev"] = hex(manifest2.get(cp))
697 696 fp2 = fp1
698 697 elif cp in manifest2: # directory rename on local side
699 698 meta["copyrev"] = hex(manifest2[cp])
700 699 else: # directory rename on remote side
701 700 meta["copyrev"] = hex(manifest1.get(cp, nullid))
702 701 self.ui.debug(_(" %s: copy %s:%s\n") %
703 702 (fn, cp, meta["copyrev"]))
704 703 fp1 = nullid
705 704 elif fp2 != nullid:
706 705 # is one parent an ancestor of the other?
707 706 fpa = fl.ancestor(fp1, fp2)
708 707 if fpa == fp1:
709 708 fp1, fp2 = fp2, nullid
710 709 elif fpa == fp2:
711 710 fp2 = nullid
712 711
713 712 # is the file unmodified from the parent? report existing entry
714 713 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
715 714 return fp1
716 715
717 716 changelist.append(fn)
718 717 return fl.add(t, meta, tr, linkrev, fp1, fp2)
719 718
720 719 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
721 720 if p1 is None:
722 721 p1, p2 = self.dirstate.parents()
723 722 return self.commit(files=files, text=text, user=user, date=date,
724 723 p1=p1, p2=p2, extra=extra, empty_ok=True)
725 724
726 725 def commit(self, files=None, text="", user=None, date=None,
727 726 match=util.always, force=False, force_editor=False,
728 727 p1=None, p2=None, extra={}, empty_ok=False):
729 728 wlock = lock = tr = None
730 729 valid = 0 # don't save the dirstate if this isn't set
731 730 if files:
732 731 files = util.unique(files)
733 732 try:
734 733 commit = []
735 734 remove = []
736 735 changed = []
737 736 use_dirstate = (p1 is None) # not rawcommit
738 737 extra = extra.copy()
739 738
740 739 if use_dirstate:
741 740 if files:
742 741 for f in files:
743 742 s = self.dirstate[f]
744 743 if s in 'nma':
745 744 commit.append(f)
746 745 elif s == 'r':
747 746 remove.append(f)
748 747 else:
749 748 self.ui.warn(_("%s not tracked!\n") % f)
750 749 else:
751 750 changes = self.status(match=match)[:5]
752 751 modified, added, removed, deleted, unknown = changes
753 752 commit = modified + added
754 753 remove = removed
755 754 else:
756 755 commit = files
757 756
758 757 if use_dirstate:
759 758 p1, p2 = self.dirstate.parents()
760 759 update_dirstate = True
761 760 else:
762 761 p1, p2 = p1, p2 or nullid
763 762 update_dirstate = (self.dirstate.parents()[0] == p1)
764 763
765 764 c1 = self.changelog.read(p1)
766 765 c2 = self.changelog.read(p2)
767 766 m1 = self.manifest.read(c1[0]).copy()
768 767 m2 = self.manifest.read(c2[0])
769 768
770 769 if use_dirstate:
771 770 branchname = self.workingctx().branch()
772 771 try:
773 772 branchname = branchname.decode('UTF-8').encode('UTF-8')
774 773 except UnicodeDecodeError:
775 774 raise util.Abort(_('branch name not in UTF-8!'))
776 775 else:
777 776 branchname = ""
778 777
779 778 if use_dirstate:
780 779 oldname = c1[5].get("branch") # stored in UTF-8
781 780 if (not commit and not remove and not force and p2 == nullid
782 781 and branchname == oldname):
783 782 self.ui.status(_("nothing changed\n"))
784 783 return None
785 784
786 785 xp1 = hex(p1)
787 786 if p2 == nullid: xp2 = ''
788 787 else: xp2 = hex(p2)
789 788
790 789 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
791 790
792 791 wlock = self.wlock()
793 792 lock = self.lock()
794 793 tr = self.transaction()
795 794 trp = weakref.proxy(tr)
796 795
797 796 # check in files
798 797 new = {}
799 798 linkrev = self.changelog.count()
800 799 commit.sort()
801 800 is_exec = util.execfunc(self.root, m1.execf)
802 801 is_link = util.linkfunc(self.root, m1.linkf)
803 802 for f in commit:
804 803 self.ui.note(f + "\n")
805 804 try:
806 805 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
807 806 new_exec = is_exec(f)
808 807 new_link = is_link(f)
809 808 if ((not changed or changed[-1] != f) and
810 809 m2.get(f) != new[f]):
811 810 # mention the file in the changelog if some
812 811 # flag changed, even if there was no content
813 812 # change.
814 813 old_exec = m1.execf(f)
815 814 old_link = m1.linkf(f)
816 815 if old_exec != new_exec or old_link != new_link:
817 816 changed.append(f)
818 817 m1.set(f, new_exec, new_link)
819 818 if use_dirstate:
820 819 self.dirstate.normal(f)
821 820
822 821 except (OSError, IOError):
823 822 if use_dirstate:
824 823 self.ui.warn(_("trouble committing %s!\n") % f)
825 824 raise
826 825 else:
827 826 remove.append(f)
828 827
829 828 # update manifest
830 829 m1.update(new)
831 830 remove.sort()
832 831 removed = []
833 832
834 833 for f in remove:
835 834 if f in m1:
836 835 del m1[f]
837 836 removed.append(f)
838 837 elif f in m2:
839 838 removed.append(f)
840 839 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
841 840 (new, removed))
842 841
843 842 # add changeset
844 843 new = new.keys()
845 844 new.sort()
846 845
847 846 user = user or self.ui.username()
848 847 if (not empty_ok and not text) or force_editor:
849 848 edittext = []
850 849 if text:
851 850 edittext.append(text)
852 851 edittext.append("")
853 852 edittext.append(_("HG: Enter commit message."
854 853 " Lines beginning with 'HG:' are removed."))
855 854 edittext.append("HG: --")
856 855 edittext.append("HG: user: %s" % user)
857 856 if p2 != nullid:
858 857 edittext.append("HG: branch merge")
859 858 if branchname:
860 859 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
861 860 edittext.extend(["HG: changed %s" % f for f in changed])
862 861 edittext.extend(["HG: removed %s" % f for f in removed])
863 862 if not changed and not remove:
864 863 edittext.append("HG: no files changed")
865 864 edittext.append("")
866 865 # run editor in the repository root
867 866 olddir = os.getcwd()
868 867 os.chdir(self.root)
869 868 text = self.ui.edit("\n".join(edittext), user)
870 869 os.chdir(olddir)
871 870
872 871 if branchname:
873 872 extra["branch"] = branchname
874 873
875 874 if use_dirstate:
876 875 lines = [line.rstrip() for line in text.rstrip().splitlines()]
877 876 while lines and not lines[0]:
878 877 del lines[0]
879 878 if not lines:
880 879 raise util.Abort(_("empty commit message"))
881 880 text = '\n'.join(lines)
882 881
883 882 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
884 883 user, date, extra)
885 884 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
886 885 parent2=xp2)
887 886 tr.close()
888 887
889 888 if self.branchcache and "branch" in extra:
890 889 self.branchcache[util.tolocal(extra["branch"])] = n
891 890
892 891 if use_dirstate or update_dirstate:
893 892 self.dirstate.setparents(n)
894 893 if use_dirstate:
895 894 for f in removed:
896 895 self.dirstate.forget(f)
897 896 valid = 1 # our dirstate updates are complete
898 897
899 898 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
900 899 return n
901 900 finally:
902 901 if not valid: # don't save our updated dirstate
903 902 self.dirstate.invalidate()
904 903 del tr, lock, wlock
905 904
906 905 def walk(self, node=None, files=[], match=util.always, badmatch=None):
907 906 '''
908 907 walk recursively through the directory tree or a given
909 908 changeset, finding all files matched by the match
910 909 function
911 910
912 911 results are yielded in a tuple (src, filename), where src
913 912 is one of:
914 913 'f' the file was found in the directory tree
915 914 'm' the file was only in the dirstate and not in the tree
916 915 'b' file was not found and matched badmatch
917 916 '''
918 917
919 918 if node:
920 919 fdict = dict.fromkeys(files)
921 920 # for dirstate.walk, files=['.'] means "walk the whole tree".
922 921 # follow that here, too
923 922 fdict.pop('.', None)
924 923 mdict = self.manifest.read(self.changelog.read(node)[0])
925 924 mfiles = mdict.keys()
926 925 mfiles.sort()
927 926 for fn in mfiles:
928 927 for ffn in fdict:
929 928 # match if the file is the exact name or a directory
930 929 if ffn == fn or fn.startswith("%s/" % ffn):
931 930 del fdict[ffn]
932 931 break
933 932 if match(fn):
934 933 yield 'm', fn
935 934 ffiles = fdict.keys()
936 935 ffiles.sort()
937 936 for fn in ffiles:
938 937 if badmatch and badmatch(fn):
939 938 if match(fn):
940 939 yield 'b', fn
941 940 else:
942 941 self.ui.warn(_('%s: No such file in rev %s\n')
943 942 % (self.pathto(fn), short(node)))
944 943 else:
945 944 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
946 945 yield src, fn
947 946
948 947 def status(self, node1=None, node2=None, files=[], match=util.always,
949 948 list_ignored=False, list_clean=False):
950 949 """return status of files between two nodes or node and working directory
951 950
952 951 If node1 is None, use the first dirstate parent instead.
953 952 If node2 is None, compare node1 with working directory.
954 953 """
955 954
956 955 def fcmp(fn, getnode):
957 956 t1 = self.wread(fn)
958 957 return self.file(fn).cmp(getnode(fn), t1)
959 958
960 959 def mfmatches(node):
961 960 change = self.changelog.read(node)
962 961 mf = self.manifest.read(change[0]).copy()
963 962 for fn in mf.keys():
964 963 if not match(fn):
965 964 del mf[fn]
966 965 return mf
967 966
968 967 modified, added, removed, deleted, unknown = [], [], [], [], []
969 968 ignored, clean = [], []
970 969
971 970 compareworking = False
972 971 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
973 972 compareworking = True
974 973
975 974 if not compareworking:
976 975 # read the manifest from node1 before the manifest from node2,
977 976 # so that we'll hit the manifest cache if we're going through
978 977 # all the revisions in parent->child order.
979 978 mf1 = mfmatches(node1)
980 979
981 980 # are we comparing the working directory?
982 981 if not node2:
983 982 (lookup, modified, added, removed, deleted, unknown,
984 983 ignored, clean) = self.dirstate.status(files, match,
985 984 list_ignored, list_clean)
986 985
987 986 # are we comparing working dir against its parent?
988 987 if compareworking:
989 988 if lookup:
990 989 fixup = []
991 990 # do a full compare of any files that might have changed
992 991 ctx = self.changectx()
993 992 for f in lookup:
994 993 if f not in ctx or ctx[f].cmp(self.wread(f)):
995 994 modified.append(f)
996 995 else:
997 996 fixup.append(f)
998 997 if list_clean:
999 998 clean.append(f)
1000 999
1001 1000 # update dirstate for files that are actually clean
1002 1001 if fixup:
1003 1002 wlock = None
1004 1003 try:
1005 1004 try:
1006 1005 wlock = self.wlock(False)
1007 1006 except lock.LockException:
1008 1007 pass
1009 1008 if wlock:
1010 1009 for f in fixup:
1011 1010 self.dirstate.normal(f)
1012 1011 finally:
1013 1012 del wlock
1014 1013 else:
1015 1014 # we are comparing working dir against non-parent
1016 1015 # generate a pseudo-manifest for the working dir
1017 1016 # XXX: create it in dirstate.py ?
1018 1017 mf2 = mfmatches(self.dirstate.parents()[0])
1019 1018 is_exec = util.execfunc(self.root, mf2.execf)
1020 1019 is_link = util.linkfunc(self.root, mf2.linkf)
1021 1020 for f in lookup + modified + added:
1022 1021 mf2[f] = ""
1023 1022 mf2.set(f, is_exec(f), is_link(f))
1024 1023 for f in removed:
1025 1024 if f in mf2:
1026 1025 del mf2[f]
1027 1026
1028 1027 else:
1029 1028 # we are comparing two revisions
1030 1029 mf2 = mfmatches(node2)
1031 1030
1032 1031 if not compareworking:
1033 1032 # flush lists from dirstate before comparing manifests
1034 1033 modified, added, clean = [], [], []
1035 1034
1036 1035 # make sure to sort the files so we talk to the disk in a
1037 1036 # reasonable order
1038 1037 mf2keys = mf2.keys()
1039 1038 mf2keys.sort()
1040 1039 getnode = lambda fn: mf1.get(fn, nullid)
1041 1040 for fn in mf2keys:
1042 1041 if fn in mf1:
1043 1042 if (mf1.flags(fn) != mf2.flags(fn) or
1044 1043 (mf1[fn] != mf2[fn] and
1045 1044 (mf2[fn] != "" or fcmp(fn, getnode)))):
1046 1045 modified.append(fn)
1047 1046 elif list_clean:
1048 1047 clean.append(fn)
1049 1048 del mf1[fn]
1050 1049 else:
1051 1050 added.append(fn)
1052 1051
1053 1052 removed = mf1.keys()
1054 1053
1055 1054 # sort and return results:
1056 1055 for l in modified, added, removed, deleted, unknown, ignored, clean:
1057 1056 l.sort()
1058 1057 return (modified, added, removed, deleted, unknown, ignored, clean)
1059 1058
1060 1059 def add(self, list):
1061 1060 wlock = self.wlock()
1062 1061 try:
1063 1062 rejected = []
1064 1063 for f in list:
1065 1064 p = self.wjoin(f)
1066 1065 try:
1067 1066 st = os.lstat(p)
1068 1067 except:
1069 1068 self.ui.warn(_("%s does not exist!\n") % f)
1070 1069 rejected.append(f)
1071 1070 continue
1072 1071 if st.st_size > 10000000:
1073 1072 self.ui.warn(_("%s: files over 10MB may cause memory and"
1074 1073 " performance problems\n"
1075 1074 "(use 'hg revert %s' to unadd the file)\n")
1076 1075 % (f, f))
1077 1076 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1078 1077 self.ui.warn(_("%s not added: only files and symlinks "
1079 1078 "supported currently\n") % f)
1080 1079 rejected.append(p)
1081 1080 elif self.dirstate[f] in 'amn':
1082 1081 self.ui.warn(_("%s already tracked!\n") % f)
1083 1082 elif self.dirstate[f] == 'r':
1084 1083 self.dirstate.normallookup(f)
1085 1084 else:
1086 1085 self.dirstate.add(f)
1087 1086 return rejected
1088 1087 finally:
1089 1088 del wlock
1090 1089
1091 1090 def forget(self, list):
1092 1091 wlock = self.wlock()
1093 1092 try:
1094 1093 for f in list:
1095 1094 if self.dirstate[f] != 'a':
1096 1095 self.ui.warn(_("%s not added!\n") % f)
1097 1096 else:
1098 1097 self.dirstate.forget(f)
1099 1098 finally:
1100 1099 del wlock
1101 1100
1102 1101 def remove(self, list, unlink=False):
1103 1102 wlock = None
1104 1103 try:
1105 1104 if unlink:
1106 1105 for f in list:
1107 1106 try:
1108 1107 util.unlink(self.wjoin(f))
1109 1108 except OSError, inst:
1110 1109 if inst.errno != errno.ENOENT:
1111 1110 raise
1112 1111 wlock = self.wlock()
1113 1112 for f in list:
1114 1113 if unlink and os.path.exists(self.wjoin(f)):
1115 1114 self.ui.warn(_("%s still exists!\n") % f)
1116 1115 elif self.dirstate[f] == 'a':
1117 1116 self.dirstate.forget(f)
1118 1117 elif f not in self.dirstate:
1119 1118 self.ui.warn(_("%s not tracked!\n") % f)
1120 1119 else:
1121 1120 self.dirstate.remove(f)
1122 1121 finally:
1123 1122 del wlock
1124 1123
1125 1124 def undelete(self, list):
1126 1125 wlock = None
1127 1126 try:
1128 1127 manifests = [self.manifest.read(self.changelog.read(p)[0])
1129 1128 for p in self.dirstate.parents() if p != nullid]
1130 1129 wlock = self.wlock()
1131 1130 for f in list:
1132 1131 if self.dirstate[f] != 'r':
1133 1132 self.ui.warn("%s not removed!\n" % f)
1134 1133 else:
1135 1134 m = f in manifests[0] and manifests[0] or manifests[1]
1136 1135 t = self.file(f).read(m[f])
1137 1136 self.wwrite(f, t, m.flags(f))
1138 1137 self.dirstate.normal(f)
1139 1138 finally:
1140 1139 del wlock
1141 1140
1142 1141 def copy(self, source, dest):
1143 1142 wlock = None
1144 1143 try:
1145 1144 p = self.wjoin(dest)
1146 1145 if not (os.path.exists(p) or os.path.islink(p)):
1147 1146 self.ui.warn(_("%s does not exist!\n") % dest)
1148 1147 elif not (os.path.isfile(p) or os.path.islink(p)):
1149 1148 self.ui.warn(_("copy failed: %s is not a file or a "
1150 1149 "symbolic link\n") % dest)
1151 1150 else:
1152 1151 wlock = self.wlock()
1153 1152 if dest not in self.dirstate:
1154 1153 self.dirstate.add(dest)
1155 1154 self.dirstate.copy(source, dest)
1156 1155 finally:
1157 1156 del wlock
1158 1157
1159 1158 def heads(self, start=None):
1160 1159 heads = self.changelog.heads(start)
1161 1160 # sort the output in rev descending order
1162 1161 heads = [(-self.changelog.rev(h), h) for h in heads]
1163 1162 heads.sort()
1164 1163 return [n for (r, n) in heads]
1165 1164
1166 1165 def branchheads(self, branch, start=None):
1167 1166 branches = self.branchtags()
1168 1167 if branch not in branches:
1169 1168 return []
1170 1169 # The basic algorithm is this:
1171 1170 #
1172 1171 # Start from the branch tip since there are no later revisions that can
1173 1172 # possibly be in this branch, and the tip is a guaranteed head.
1174 1173 #
1175 1174 # Remember the tip's parents as the first ancestors, since these by
1176 1175 # definition are not heads.
1177 1176 #
1178 1177 # Step backwards from the brach tip through all the revisions. We are
1179 1178 # guaranteed by the rules of Mercurial that we will now be visiting the
1180 1179 # nodes in reverse topological order (children before parents).
1181 1180 #
1182 1181 # If a revision is one of the ancestors of a head then we can toss it
1183 1182 # out of the ancestors set (we've already found it and won't be
1184 1183 # visiting it again) and put its parents in the ancestors set.
1185 1184 #
1186 1185 # Otherwise, if a revision is in the branch it's another head, since it
1187 1186 # wasn't in the ancestor list of an existing head. So add it to the
1188 1187 # head list, and add its parents to the ancestor list.
1189 1188 #
1190 1189 # If it is not in the branch ignore it.
1191 1190 #
1192 1191 # Once we have a list of heads, use nodesbetween to filter out all the
1193 1192 # heads that cannot be reached from startrev. There may be a more
1194 1193 # efficient way to do this as part of the previous algorithm.
1195 1194
1196 1195 set = util.set
1197 1196 heads = [self.changelog.rev(branches[branch])]
1198 1197 # Don't care if ancestors contains nullrev or not.
1199 1198 ancestors = set(self.changelog.parentrevs(heads[0]))
1200 1199 for rev in xrange(heads[0] - 1, nullrev, -1):
1201 1200 if rev in ancestors:
1202 1201 ancestors.update(self.changelog.parentrevs(rev))
1203 1202 ancestors.remove(rev)
1204 1203 elif self.changectx(rev).branch() == branch:
1205 1204 heads.append(rev)
1206 1205 ancestors.update(self.changelog.parentrevs(rev))
1207 1206 heads = [self.changelog.node(rev) for rev in heads]
1208 1207 if start is not None:
1209 1208 heads = self.changelog.nodesbetween([start], heads)[2]
1210 1209 return heads
1211 1210
1212 1211 def branches(self, nodes):
1213 1212 if not nodes:
1214 1213 nodes = [self.changelog.tip()]
1215 1214 b = []
1216 1215 for n in nodes:
1217 1216 t = n
1218 1217 while 1:
1219 1218 p = self.changelog.parents(n)
1220 1219 if p[1] != nullid or p[0] == nullid:
1221 1220 b.append((t, n, p[0], p[1]))
1222 1221 break
1223 1222 n = p[0]
1224 1223 return b
1225 1224
1226 1225 def between(self, pairs):
1227 1226 r = []
1228 1227
1229 1228 for top, bottom in pairs:
1230 1229 n, l, i = top, [], 0
1231 1230 f = 1
1232 1231
1233 1232 while n != bottom:
1234 1233 p = self.changelog.parents(n)[0]
1235 1234 if i == f:
1236 1235 l.append(n)
1237 1236 f = f * 2
1238 1237 n = p
1239 1238 i += 1
1240 1239
1241 1240 r.append(l)
1242 1241
1243 1242 return r
1244 1243
1245 1244 def findincoming(self, remote, base=None, heads=None, force=False):
1246 1245 """Return list of roots of the subsets of missing nodes from remote
1247 1246
1248 1247 If base dict is specified, assume that these nodes and their parents
1249 1248 exist on the remote side and that no child of a node of base exists
1250 1249 in both remote and self.
1251 1250 Furthermore base will be updated to include the nodes that exists
1252 1251 in self and remote but no children exists in self and remote.
1253 1252 If a list of heads is specified, return only nodes which are heads
1254 1253 or ancestors of these heads.
1255 1254
1256 1255 All the ancestors of base are in self and in remote.
1257 1256 All the descendants of the list returned are missing in self.
1258 1257 (and so we know that the rest of the nodes are missing in remote, see
1259 1258 outgoing)
1260 1259 """
1261 1260 m = self.changelog.nodemap
1262 1261 search = []
1263 1262 fetch = {}
1264 1263 seen = {}
1265 1264 seenbranch = {}
1266 1265 if base == None:
1267 1266 base = {}
1268 1267
1269 1268 if not heads:
1270 1269 heads = remote.heads()
1271 1270
1272 1271 if self.changelog.tip() == nullid:
1273 1272 base[nullid] = 1
1274 1273 if heads != [nullid]:
1275 1274 return [nullid]
1276 1275 return []
1277 1276
1278 1277 # assume we're closer to the tip than the root
1279 1278 # and start by examining the heads
1280 1279 self.ui.status(_("searching for changes\n"))
1281 1280
1282 1281 unknown = []
1283 1282 for h in heads:
1284 1283 if h not in m:
1285 1284 unknown.append(h)
1286 1285 else:
1287 1286 base[h] = 1
1288 1287
1289 1288 if not unknown:
1290 1289 return []
1291 1290
1292 1291 req = dict.fromkeys(unknown)
1293 1292 reqcnt = 0
1294 1293
1295 1294 # search through remote branches
1296 1295 # a 'branch' here is a linear segment of history, with four parts:
1297 1296 # head, root, first parent, second parent
1298 1297 # (a branch always has two parents (or none) by definition)
1299 1298 unknown = remote.branches(unknown)
1300 1299 while unknown:
1301 1300 r = []
1302 1301 while unknown:
1303 1302 n = unknown.pop(0)
1304 1303 if n[0] in seen:
1305 1304 continue
1306 1305
1307 1306 self.ui.debug(_("examining %s:%s\n")
1308 1307 % (short(n[0]), short(n[1])))
1309 1308 if n[0] == nullid: # found the end of the branch
1310 1309 pass
1311 1310 elif n in seenbranch:
1312 1311 self.ui.debug(_("branch already found\n"))
1313 1312 continue
1314 1313 elif n[1] and n[1] in m: # do we know the base?
1315 1314 self.ui.debug(_("found incomplete branch %s:%s\n")
1316 1315 % (short(n[0]), short(n[1])))
1317 1316 search.append(n) # schedule branch range for scanning
1318 1317 seenbranch[n] = 1
1319 1318 else:
1320 1319 if n[1] not in seen and n[1] not in fetch:
1321 1320 if n[2] in m and n[3] in m:
1322 1321 self.ui.debug(_("found new changeset %s\n") %
1323 1322 short(n[1]))
1324 1323 fetch[n[1]] = 1 # earliest unknown
1325 1324 for p in n[2:4]:
1326 1325 if p in m:
1327 1326 base[p] = 1 # latest known
1328 1327
1329 1328 for p in n[2:4]:
1330 1329 if p not in req and p not in m:
1331 1330 r.append(p)
1332 1331 req[p] = 1
1333 1332 seen[n[0]] = 1
1334 1333
1335 1334 if r:
1336 1335 reqcnt += 1
1337 1336 self.ui.debug(_("request %d: %s\n") %
1338 1337 (reqcnt, " ".join(map(short, r))))
1339 1338 for p in xrange(0, len(r), 10):
1340 1339 for b in remote.branches(r[p:p+10]):
1341 1340 self.ui.debug(_("received %s:%s\n") %
1342 1341 (short(b[0]), short(b[1])))
1343 1342 unknown.append(b)
1344 1343
1345 1344 # do binary search on the branches we found
1346 1345 while search:
1347 1346 n = search.pop(0)
1348 1347 reqcnt += 1
1349 1348 l = remote.between([(n[0], n[1])])[0]
1350 1349 l.append(n[1])
1351 1350 p = n[0]
1352 1351 f = 1
1353 1352 for i in l:
1354 1353 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1355 1354 if i in m:
1356 1355 if f <= 2:
1357 1356 self.ui.debug(_("found new branch changeset %s\n") %
1358 1357 short(p))
1359 1358 fetch[p] = 1
1360 1359 base[i] = 1
1361 1360 else:
1362 1361 self.ui.debug(_("narrowed branch search to %s:%s\n")
1363 1362 % (short(p), short(i)))
1364 1363 search.append((p, i))
1365 1364 break
1366 1365 p, f = i, f * 2
1367 1366
1368 1367 # sanity check our fetch list
1369 1368 for f in fetch.keys():
1370 1369 if f in m:
1371 1370 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1372 1371
1373 1372 if base.keys() == [nullid]:
1374 1373 if force:
1375 1374 self.ui.warn(_("warning: repository is unrelated\n"))
1376 1375 else:
1377 1376 raise util.Abort(_("repository is unrelated"))
1378 1377
1379 1378 self.ui.debug(_("found new changesets starting at ") +
1380 1379 " ".join([short(f) for f in fetch]) + "\n")
1381 1380
1382 1381 self.ui.debug(_("%d total queries\n") % reqcnt)
1383 1382
1384 1383 return fetch.keys()
1385 1384
1386 1385 def findoutgoing(self, remote, base=None, heads=None, force=False):
1387 1386 """Return list of nodes that are roots of subsets not in remote
1388 1387
1389 1388 If base dict is specified, assume that these nodes and their parents
1390 1389 exist on the remote side.
1391 1390 If a list of heads is specified, return only nodes which are heads
1392 1391 or ancestors of these heads, and return a second element which
1393 1392 contains all remote heads which get new children.
1394 1393 """
1395 1394 if base == None:
1396 1395 base = {}
1397 1396 self.findincoming(remote, base, heads, force=force)
1398 1397
1399 1398 self.ui.debug(_("common changesets up to ")
1400 1399 + " ".join(map(short, base.keys())) + "\n")
1401 1400
1402 1401 remain = dict.fromkeys(self.changelog.nodemap)
1403 1402
1404 1403 # prune everything remote has from the tree
1405 1404 del remain[nullid]
1406 1405 remove = base.keys()
1407 1406 while remove:
1408 1407 n = remove.pop(0)
1409 1408 if n in remain:
1410 1409 del remain[n]
1411 1410 for p in self.changelog.parents(n):
1412 1411 remove.append(p)
1413 1412
1414 1413 # find every node whose parents have been pruned
1415 1414 subset = []
1416 1415 # find every remote head that will get new children
1417 1416 updated_heads = {}
1418 1417 for n in remain:
1419 1418 p1, p2 = self.changelog.parents(n)
1420 1419 if p1 not in remain and p2 not in remain:
1421 1420 subset.append(n)
1422 1421 if heads:
1423 1422 if p1 in heads:
1424 1423 updated_heads[p1] = True
1425 1424 if p2 in heads:
1426 1425 updated_heads[p2] = True
1427 1426
1428 1427 # this is the set of all roots we have to push
1429 1428 if heads:
1430 1429 return subset, updated_heads.keys()
1431 1430 else:
1432 1431 return subset
1433 1432
1434 1433 def pull(self, remote, heads=None, force=False):
1435 1434 lock = self.lock()
1436 1435 try:
1437 1436 fetch = self.findincoming(remote, heads=heads, force=force)
1438 1437 if fetch == [nullid]:
1439 1438 self.ui.status(_("requesting all changes\n"))
1440 1439
1441 1440 if not fetch:
1442 1441 self.ui.status(_("no changes found\n"))
1443 1442 return 0
1444 1443
1445 1444 if heads is None:
1446 1445 cg = remote.changegroup(fetch, 'pull')
1447 1446 else:
1448 1447 if 'changegroupsubset' not in remote.capabilities:
1449 1448 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1450 1449 cg = remote.changegroupsubset(fetch, heads, 'pull')
1451 1450 return self.addchangegroup(cg, 'pull', remote.url())
1452 1451 finally:
1453 1452 del lock
1454 1453
1455 1454 def push(self, remote, force=False, revs=None):
1456 1455 # there are two ways to push to remote repo:
1457 1456 #
1458 1457 # addchangegroup assumes local user can lock remote
1459 1458 # repo (local filesystem, old ssh servers).
1460 1459 #
1461 1460 # unbundle assumes local user cannot lock remote repo (new ssh
1462 1461 # servers, http servers).
1463 1462
1464 1463 if remote.capable('unbundle'):
1465 1464 return self.push_unbundle(remote, force, revs)
1466 1465 return self.push_addchangegroup(remote, force, revs)
1467 1466
1468 1467 def prepush(self, remote, force, revs):
1469 1468 base = {}
1470 1469 remote_heads = remote.heads()
1471 1470 inc = self.findincoming(remote, base, remote_heads, force=force)
1472 1471
1473 1472 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1474 1473 if revs is not None:
1475 1474 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1476 1475 else:
1477 1476 bases, heads = update, self.changelog.heads()
1478 1477
1479 1478 if not bases:
1480 1479 self.ui.status(_("no changes found\n"))
1481 1480 return None, 1
1482 1481 elif not force:
1483 1482 # check if we're creating new remote heads
1484 1483 # to be a remote head after push, node must be either
1485 1484 # - unknown locally
1486 1485 # - a local outgoing head descended from update
1487 1486 # - a remote head that's known locally and not
1488 1487 # ancestral to an outgoing head
1489 1488
1490 1489 warn = 0
1491 1490
1492 1491 if remote_heads == [nullid]:
1493 1492 warn = 0
1494 1493 elif not revs and len(heads) > len(remote_heads):
1495 1494 warn = 1
1496 1495 else:
1497 1496 newheads = list(heads)
1498 1497 for r in remote_heads:
1499 1498 if r in self.changelog.nodemap:
1500 1499 desc = self.changelog.heads(r, heads)
1501 1500 l = [h for h in heads if h in desc]
1502 1501 if not l:
1503 1502 newheads.append(r)
1504 1503 else:
1505 1504 newheads.append(r)
1506 1505 if len(newheads) > len(remote_heads):
1507 1506 warn = 1
1508 1507
1509 1508 if warn:
1510 1509 self.ui.warn(_("abort: push creates new remote branches!\n"))
1511 1510 self.ui.status(_("(did you forget to merge?"
1512 1511 " use push -f to force)\n"))
1513 1512 return None, 1
1514 1513 elif inc:
1515 1514 self.ui.warn(_("note: unsynced remote changes!\n"))
1516 1515
1517 1516
1518 1517 if revs is None:
1519 1518 cg = self.changegroup(update, 'push')
1520 1519 else:
1521 1520 cg = self.changegroupsubset(update, revs, 'push')
1522 1521 return cg, remote_heads
1523 1522
1524 1523 def push_addchangegroup(self, remote, force, revs):
1525 1524 lock = remote.lock()
1526 1525 try:
1527 1526 ret = self.prepush(remote, force, revs)
1528 1527 if ret[0] is not None:
1529 1528 cg, remote_heads = ret
1530 1529 return remote.addchangegroup(cg, 'push', self.url())
1531 1530 return ret[1]
1532 1531 finally:
1533 1532 del lock
1534 1533
1535 1534 def push_unbundle(self, remote, force, revs):
1536 1535 # local repo finds heads on server, finds out what revs it
1537 1536 # must push. once revs transferred, if server finds it has
1538 1537 # different heads (someone else won commit/push race), server
1539 1538 # aborts.
1540 1539
1541 1540 ret = self.prepush(remote, force, revs)
1542 1541 if ret[0] is not None:
1543 1542 cg, remote_heads = ret
1544 1543 if force: remote_heads = ['force']
1545 1544 return remote.unbundle(cg, remote_heads, 'push')
1546 1545 return ret[1]
1547 1546
1548 1547 def changegroupinfo(self, nodes, source):
1549 1548 if self.ui.verbose or source == 'bundle':
1550 1549 self.ui.status(_("%d changesets found\n") % len(nodes))
1551 1550 if self.ui.debugflag:
1552 1551 self.ui.debug(_("List of changesets:\n"))
1553 1552 for node in nodes:
1554 1553 self.ui.debug("%s\n" % hex(node))
1555 1554
1556 1555 def changegroupsubset(self, bases, heads, source, extranodes=None):
1557 1556 """This function generates a changegroup consisting of all the nodes
1558 1557 that are descendents of any of the bases, and ancestors of any of
1559 1558 the heads.
1560 1559
1561 1560 It is fairly complex as determining which filenodes and which
1562 1561 manifest nodes need to be included for the changeset to be complete
1563 1562 is non-trivial.
1564 1563
1565 1564 Another wrinkle is doing the reverse, figuring out which changeset in
1566 1565 the changegroup a particular filenode or manifestnode belongs to.
1567 1566
1568 1567 The caller can specify some nodes that must be included in the
1569 1568 changegroup using the extranodes argument. It should be a dict
1570 1569 where the keys are the filenames (or 1 for the manifest), and the
1571 1570 values are lists of (node, linknode) tuples, where node is a wanted
1572 1571 node and linknode is the changelog node that should be transmitted as
1573 1572 the linkrev.
1574 1573 """
1575 1574
1576 1575 self.hook('preoutgoing', throw=True, source=source)
1577 1576
1578 1577 # Set up some initial variables
1579 1578 # Make it easy to refer to self.changelog
1580 1579 cl = self.changelog
1581 1580 # msng is short for missing - compute the list of changesets in this
1582 1581 # changegroup.
1583 1582 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1584 1583 self.changegroupinfo(msng_cl_lst, source)
1585 1584 # Some bases may turn out to be superfluous, and some heads may be
1586 1585 # too. nodesbetween will return the minimal set of bases and heads
1587 1586 # necessary to re-create the changegroup.
1588 1587
1589 1588 # Known heads are the list of heads that it is assumed the recipient
1590 1589 # of this changegroup will know about.
1591 1590 knownheads = {}
1592 1591 # We assume that all parents of bases are known heads.
1593 1592 for n in bases:
1594 1593 for p in cl.parents(n):
1595 1594 if p != nullid:
1596 1595 knownheads[p] = 1
1597 1596 knownheads = knownheads.keys()
1598 1597 if knownheads:
1599 1598 # Now that we know what heads are known, we can compute which
1600 1599 # changesets are known. The recipient must know about all
1601 1600 # changesets required to reach the known heads from the null
1602 1601 # changeset.
1603 1602 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1604 1603 junk = None
1605 1604 # Transform the list into an ersatz set.
1606 1605 has_cl_set = dict.fromkeys(has_cl_set)
1607 1606 else:
1608 1607 # If there were no known heads, the recipient cannot be assumed to
1609 1608 # know about any changesets.
1610 1609 has_cl_set = {}
1611 1610
1612 1611 # Make it easy to refer to self.manifest
1613 1612 mnfst = self.manifest
1614 1613 # We don't know which manifests are missing yet
1615 1614 msng_mnfst_set = {}
1616 1615 # Nor do we know which filenodes are missing.
1617 1616 msng_filenode_set = {}
1618 1617
1619 1618 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1620 1619 junk = None
1621 1620
1622 1621 # A changeset always belongs to itself, so the changenode lookup
1623 1622 # function for a changenode is identity.
1624 1623 def identity(x):
1625 1624 return x
1626 1625
1627 1626 # A function generating function. Sets up an environment for the
1628 1627 # inner function.
1629 1628 def cmp_by_rev_func(revlog):
1630 1629 # Compare two nodes by their revision number in the environment's
1631 1630 # revision history. Since the revision number both represents the
1632 1631 # most efficient order to read the nodes in, and represents a
1633 1632 # topological sorting of the nodes, this function is often useful.
1634 1633 def cmp_by_rev(a, b):
1635 1634 return cmp(revlog.rev(a), revlog.rev(b))
1636 1635 return cmp_by_rev
1637 1636
1638 1637 # If we determine that a particular file or manifest node must be a
1639 1638 # node that the recipient of the changegroup will already have, we can
1640 1639 # also assume the recipient will have all the parents. This function
1641 1640 # prunes them from the set of missing nodes.
1642 1641 def prune_parents(revlog, hasset, msngset):
1643 1642 haslst = hasset.keys()
1644 1643 haslst.sort(cmp_by_rev_func(revlog))
1645 1644 for node in haslst:
1646 1645 parentlst = [p for p in revlog.parents(node) if p != nullid]
1647 1646 while parentlst:
1648 1647 n = parentlst.pop()
1649 1648 if n not in hasset:
1650 1649 hasset[n] = 1
1651 1650 p = [p for p in revlog.parents(n) if p != nullid]
1652 1651 parentlst.extend(p)
1653 1652 for n in hasset:
1654 1653 msngset.pop(n, None)
1655 1654
1656 1655 # This is a function generating function used to set up an environment
1657 1656 # for the inner function to execute in.
1658 1657 def manifest_and_file_collector(changedfileset):
1659 1658 # This is an information gathering function that gathers
1660 1659 # information from each changeset node that goes out as part of
1661 1660 # the changegroup. The information gathered is a list of which
1662 1661 # manifest nodes are potentially required (the recipient may
1663 1662 # already have them) and total list of all files which were
1664 1663 # changed in any changeset in the changegroup.
1665 1664 #
1666 1665 # We also remember the first changenode we saw any manifest
1667 1666 # referenced by so we can later determine which changenode 'owns'
1668 1667 # the manifest.
1669 1668 def collect_manifests_and_files(clnode):
1670 1669 c = cl.read(clnode)
1671 1670 for f in c[3]:
1672 1671 # This is to make sure we only have one instance of each
1673 1672 # filename string for each filename.
1674 1673 changedfileset.setdefault(f, f)
1675 1674 msng_mnfst_set.setdefault(c[0], clnode)
1676 1675 return collect_manifests_and_files
1677 1676
1678 1677 # Figure out which manifest nodes (of the ones we think might be part
1679 1678 # of the changegroup) the recipient must know about and remove them
1680 1679 # from the changegroup.
1681 1680 def prune_manifests():
1682 1681 has_mnfst_set = {}
1683 1682 for n in msng_mnfst_set:
1684 1683 # If a 'missing' manifest thinks it belongs to a changenode
1685 1684 # the recipient is assumed to have, obviously the recipient
1686 1685 # must have that manifest.
1687 1686 linknode = cl.node(mnfst.linkrev(n))
1688 1687 if linknode in has_cl_set:
1689 1688 has_mnfst_set[n] = 1
1690 1689 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1691 1690
1692 1691 # Use the information collected in collect_manifests_and_files to say
1693 1692 # which changenode any manifestnode belongs to.
1694 1693 def lookup_manifest_link(mnfstnode):
1695 1694 return msng_mnfst_set[mnfstnode]
1696 1695
1697 1696 # A function generating function that sets up the initial environment
1698 1697 # the inner function.
1699 1698 def filenode_collector(changedfiles):
1700 1699 next_rev = [0]
1701 1700 # This gathers information from each manifestnode included in the
1702 1701 # changegroup about which filenodes the manifest node references
1703 1702 # so we can include those in the changegroup too.
1704 1703 #
1705 1704 # It also remembers which changenode each filenode belongs to. It
1706 1705 # does this by assuming the a filenode belongs to the changenode
1707 1706 # the first manifest that references it belongs to.
1708 1707 def collect_msng_filenodes(mnfstnode):
1709 1708 r = mnfst.rev(mnfstnode)
1710 1709 if r == next_rev[0]:
1711 1710 # If the last rev we looked at was the one just previous,
1712 1711 # we only need to see a diff.
1713 1712 deltamf = mnfst.readdelta(mnfstnode)
1714 1713 # For each line in the delta
1715 1714 for f, fnode in deltamf.items():
1716 1715 f = changedfiles.get(f, None)
1717 1716 # And if the file is in the list of files we care
1718 1717 # about.
1719 1718 if f is not None:
1720 1719 # Get the changenode this manifest belongs to
1721 1720 clnode = msng_mnfst_set[mnfstnode]
1722 1721 # Create the set of filenodes for the file if
1723 1722 # there isn't one already.
1724 1723 ndset = msng_filenode_set.setdefault(f, {})
1725 1724 # And set the filenode's changelog node to the
1726 1725 # manifest's if it hasn't been set already.
1727 1726 ndset.setdefault(fnode, clnode)
1728 1727 else:
1729 1728 # Otherwise we need a full manifest.
1730 1729 m = mnfst.read(mnfstnode)
1731 1730 # For every file in we care about.
1732 1731 for f in changedfiles:
1733 1732 fnode = m.get(f, None)
1734 1733 # If it's in the manifest
1735 1734 if fnode is not None:
1736 1735 # See comments above.
1737 1736 clnode = msng_mnfst_set[mnfstnode]
1738 1737 ndset = msng_filenode_set.setdefault(f, {})
1739 1738 ndset.setdefault(fnode, clnode)
1740 1739 # Remember the revision we hope to see next.
1741 1740 next_rev[0] = r + 1
1742 1741 return collect_msng_filenodes
1743 1742
1744 1743 # We have a list of filenodes we think we need for a file, lets remove
1745 1744 # all those we now the recipient must have.
1746 1745 def prune_filenodes(f, filerevlog):
1747 1746 msngset = msng_filenode_set[f]
1748 1747 hasset = {}
1749 1748 # If a 'missing' filenode thinks it belongs to a changenode we
1750 1749 # assume the recipient must have, then the recipient must have
1751 1750 # that filenode.
1752 1751 for n in msngset:
1753 1752 clnode = cl.node(filerevlog.linkrev(n))
1754 1753 if clnode in has_cl_set:
1755 1754 hasset[n] = 1
1756 1755 prune_parents(filerevlog, hasset, msngset)
1757 1756
1758 1757 # A function generator function that sets up the a context for the
1759 1758 # inner function.
1760 1759 def lookup_filenode_link_func(fname):
1761 1760 msngset = msng_filenode_set[fname]
1762 1761 # Lookup the changenode the filenode belongs to.
1763 1762 def lookup_filenode_link(fnode):
1764 1763 return msngset[fnode]
1765 1764 return lookup_filenode_link
1766 1765
1767 1766 # Add the nodes that were explicitly requested.
1768 1767 def add_extra_nodes(name, nodes):
1769 1768 if not extranodes or name not in extranodes:
1770 1769 return
1771 1770
1772 1771 for node, linknode in extranodes[name]:
1773 1772 if node not in nodes:
1774 1773 nodes[node] = linknode
1775 1774
1776 1775 # Now that we have all theses utility functions to help out and
1777 1776 # logically divide up the task, generate the group.
1778 1777 def gengroup():
1779 1778 # The set of changed files starts empty.
1780 1779 changedfiles = {}
1781 1780 # Create a changenode group generator that will call our functions
1782 1781 # back to lookup the owning changenode and collect information.
1783 1782 group = cl.group(msng_cl_lst, identity,
1784 1783 manifest_and_file_collector(changedfiles))
1785 1784 for chnk in group:
1786 1785 yield chnk
1787 1786
1788 1787 # The list of manifests has been collected by the generator
1789 1788 # calling our functions back.
1790 1789 prune_manifests()
1791 1790 add_extra_nodes(1, msng_mnfst_set)
1792 1791 msng_mnfst_lst = msng_mnfst_set.keys()
1793 1792 # Sort the manifestnodes by revision number.
1794 1793 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1795 1794 # Create a generator for the manifestnodes that calls our lookup
1796 1795 # and data collection functions back.
1797 1796 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1798 1797 filenode_collector(changedfiles))
1799 1798 for chnk in group:
1800 1799 yield chnk
1801 1800
1802 1801 # These are no longer needed, dereference and toss the memory for
1803 1802 # them.
1804 1803 msng_mnfst_lst = None
1805 1804 msng_mnfst_set.clear()
1806 1805
1807 1806 if extranodes:
1808 1807 for fname in extranodes:
1809 1808 if isinstance(fname, int):
1810 1809 continue
1811 1810 add_extra_nodes(fname,
1812 1811 msng_filenode_set.setdefault(fname, {}))
1813 1812 changedfiles[fname] = 1
1814 1813 changedfiles = changedfiles.keys()
1815 1814 changedfiles.sort()
1816 1815 # Go through all our files in order sorted by name.
1817 1816 for fname in changedfiles:
1818 1817 filerevlog = self.file(fname)
1819 1818 if filerevlog.count() == 0:
1820 1819 raise util.Abort(_("empty or missing revlog for %s") % fname)
1821 1820 # Toss out the filenodes that the recipient isn't really
1822 1821 # missing.
1823 1822 if fname in msng_filenode_set:
1824 1823 prune_filenodes(fname, filerevlog)
1825 1824 msng_filenode_lst = msng_filenode_set[fname].keys()
1826 1825 else:
1827 1826 msng_filenode_lst = []
1828 1827 # If any filenodes are left, generate the group for them,
1829 1828 # otherwise don't bother.
1830 1829 if len(msng_filenode_lst) > 0:
1831 1830 yield changegroup.chunkheader(len(fname))
1832 1831 yield fname
1833 1832 # Sort the filenodes by their revision #
1834 1833 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1835 1834 # Create a group generator and only pass in a changenode
1836 1835 # lookup function as we need to collect no information
1837 1836 # from filenodes.
1838 1837 group = filerevlog.group(msng_filenode_lst,
1839 1838 lookup_filenode_link_func(fname))
1840 1839 for chnk in group:
1841 1840 yield chnk
1842 1841 if fname in msng_filenode_set:
1843 1842 # Don't need this anymore, toss it to free memory.
1844 1843 del msng_filenode_set[fname]
1845 1844 # Signal that no more groups are left.
1846 1845 yield changegroup.closechunk()
1847 1846
1848 1847 if msng_cl_lst:
1849 1848 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1850 1849
1851 1850 return util.chunkbuffer(gengroup())
1852 1851
1853 1852 def changegroup(self, basenodes, source):
1854 1853 """Generate a changegroup of all nodes that we have that a recipient
1855 1854 doesn't.
1856 1855
1857 1856 This is much easier than the previous function as we can assume that
1858 1857 the recipient has any changenode we aren't sending them."""
1859 1858
1860 1859 self.hook('preoutgoing', throw=True, source=source)
1861 1860
1862 1861 cl = self.changelog
1863 1862 nodes = cl.nodesbetween(basenodes, None)[0]
1864 1863 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1865 1864 self.changegroupinfo(nodes, source)
1866 1865
1867 1866 def identity(x):
1868 1867 return x
1869 1868
1870 1869 def gennodelst(revlog):
1871 1870 for r in xrange(0, revlog.count()):
1872 1871 n = revlog.node(r)
1873 1872 if revlog.linkrev(n) in revset:
1874 1873 yield n
1875 1874
1876 1875 def changed_file_collector(changedfileset):
1877 1876 def collect_changed_files(clnode):
1878 1877 c = cl.read(clnode)
1879 1878 for fname in c[3]:
1880 1879 changedfileset[fname] = 1
1881 1880 return collect_changed_files
1882 1881
1883 1882 def lookuprevlink_func(revlog):
1884 1883 def lookuprevlink(n):
1885 1884 return cl.node(revlog.linkrev(n))
1886 1885 return lookuprevlink
1887 1886
1888 1887 def gengroup():
1889 1888 # construct a list of all changed files
1890 1889 changedfiles = {}
1891 1890
1892 1891 for chnk in cl.group(nodes, identity,
1893 1892 changed_file_collector(changedfiles)):
1894 1893 yield chnk
1895 1894 changedfiles = changedfiles.keys()
1896 1895 changedfiles.sort()
1897 1896
1898 1897 mnfst = self.manifest
1899 1898 nodeiter = gennodelst(mnfst)
1900 1899 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1901 1900 yield chnk
1902 1901
1903 1902 for fname in changedfiles:
1904 1903 filerevlog = self.file(fname)
1905 1904 if filerevlog.count() == 0:
1906 1905 raise util.Abort(_("empty or missing revlog for %s") % fname)
1907 1906 nodeiter = gennodelst(filerevlog)
1908 1907 nodeiter = list(nodeiter)
1909 1908 if nodeiter:
1910 1909 yield changegroup.chunkheader(len(fname))
1911 1910 yield fname
1912 1911 lookup = lookuprevlink_func(filerevlog)
1913 1912 for chnk in filerevlog.group(nodeiter, lookup):
1914 1913 yield chnk
1915 1914
1916 1915 yield changegroup.closechunk()
1917 1916
1918 1917 if nodes:
1919 1918 self.hook('outgoing', node=hex(nodes[0]), source=source)
1920 1919
1921 1920 return util.chunkbuffer(gengroup())
1922 1921
1923 1922 def addchangegroup(self, source, srctype, url, emptyok=False):
1924 1923 """add changegroup to repo.
1925 1924
1926 1925 return values:
1927 1926 - nothing changed or no source: 0
1928 1927 - more heads than before: 1+added heads (2..n)
1929 1928 - less heads than before: -1-removed heads (-2..-n)
1930 1929 - number of heads stays the same: 1
1931 1930 """
1932 1931 def csmap(x):
1933 1932 self.ui.debug(_("add changeset %s\n") % short(x))
1934 1933 return cl.count()
1935 1934
1936 1935 def revmap(x):
1937 1936 return cl.rev(x)
1938 1937
1939 1938 if not source:
1940 1939 return 0
1941 1940
1942 1941 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1943 1942
1944 1943 changesets = files = revisions = 0
1945 1944
1946 1945 # write changelog data to temp files so concurrent readers will not see
1947 1946 # inconsistent view
1948 1947 cl = self.changelog
1949 1948 cl.delayupdate()
1950 1949 oldheads = len(cl.heads())
1951 1950
1952 1951 tr = self.transaction()
1953 1952 try:
1954 1953 trp = weakref.proxy(tr)
1955 1954 # pull off the changeset group
1956 1955 self.ui.status(_("adding changesets\n"))
1957 1956 cor = cl.count() - 1
1958 1957 chunkiter = changegroup.chunkiter(source)
1959 1958 if cl.addgroup(chunkiter, csmap, trp, 1) is None and not emptyok:
1960 1959 raise util.Abort(_("received changelog group is empty"))
1961 1960 cnr = cl.count() - 1
1962 1961 changesets = cnr - cor
1963 1962
1964 1963 # pull off the manifest group
1965 1964 self.ui.status(_("adding manifests\n"))
1966 1965 chunkiter = changegroup.chunkiter(source)
1967 1966 # no need to check for empty manifest group here:
1968 1967 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1969 1968 # no new manifest will be created and the manifest group will
1970 1969 # be empty during the pull
1971 1970 self.manifest.addgroup(chunkiter, revmap, trp)
1972 1971
1973 1972 # process the files
1974 1973 self.ui.status(_("adding file changes\n"))
1975 1974 while 1:
1976 1975 f = changegroup.getchunk(source)
1977 1976 if not f:
1978 1977 break
1979 1978 self.ui.debug(_("adding %s revisions\n") % f)
1980 1979 fl = self.file(f)
1981 1980 o = fl.count()
1982 1981 chunkiter = changegroup.chunkiter(source)
1983 1982 if fl.addgroup(chunkiter, revmap, trp) is None:
1984 1983 raise util.Abort(_("received file revlog group is empty"))
1985 1984 revisions += fl.count() - o
1986 1985 files += 1
1987 1986
1988 1987 # make changelog see real files again
1989 1988 cl.finalize(trp)
1990 1989
1991 1990 newheads = len(self.changelog.heads())
1992 1991 heads = ""
1993 1992 if oldheads and newheads != oldheads:
1994 1993 heads = _(" (%+d heads)") % (newheads - oldheads)
1995 1994
1996 1995 self.ui.status(_("added %d changesets"
1997 1996 " with %d changes to %d files%s\n")
1998 1997 % (changesets, revisions, files, heads))
1999 1998
2000 1999 if changesets > 0:
2001 2000 self.hook('pretxnchangegroup', throw=True,
2002 2001 node=hex(self.changelog.node(cor+1)), source=srctype,
2003 2002 url=url)
2004 2003
2005 2004 tr.close()
2006 2005 finally:
2007 2006 del tr
2008 2007
2009 2008 if changesets > 0:
2010 2009 # forcefully update the on-disk branch cache
2011 2010 self.ui.debug(_("updating the branch cache\n"))
2012 2011 self.branchcache = None
2013 2012 self.branchtags()
2014 2013 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2015 2014 source=srctype, url=url)
2016 2015
2017 2016 for i in xrange(cor + 1, cnr + 1):
2018 2017 self.hook("incoming", node=hex(self.changelog.node(i)),
2019 2018 source=srctype, url=url)
2020 2019
2021 2020 # never return 0 here:
2022 2021 if newheads < oldheads:
2023 2022 return newheads - oldheads - 1
2024 2023 else:
2025 2024 return newheads - oldheads + 1
2026 2025
2027 2026
2028 2027 def stream_in(self, remote):
2029 2028 fp = remote.stream_out()
2030 2029 l = fp.readline()
2031 2030 try:
2032 2031 resp = int(l)
2033 2032 except ValueError:
2034 2033 raise util.UnexpectedOutput(
2035 2034 _('Unexpected response from remote server:'), l)
2036 2035 if resp == 1:
2037 2036 raise util.Abort(_('operation forbidden by server'))
2038 2037 elif resp == 2:
2039 2038 raise util.Abort(_('locking the remote repository failed'))
2040 2039 elif resp != 0:
2041 2040 raise util.Abort(_('the server sent an unknown error code'))
2042 2041 self.ui.status(_('streaming all changes\n'))
2043 2042 l = fp.readline()
2044 2043 try:
2045 2044 total_files, total_bytes = map(int, l.split(' ', 1))
2046 2045 except ValueError, TypeError:
2047 2046 raise util.UnexpectedOutput(
2048 2047 _('Unexpected response from remote server:'), l)
2049 2048 self.ui.status(_('%d files to transfer, %s of data\n') %
2050 2049 (total_files, util.bytecount(total_bytes)))
2051 2050 start = time.time()
2052 2051 for i in xrange(total_files):
2053 2052 # XXX doesn't support '\n' or '\r' in filenames
2054 2053 l = fp.readline()
2055 2054 try:
2056 2055 name, size = l.split('\0', 1)
2057 2056 size = int(size)
2058 2057 except ValueError, TypeError:
2059 2058 raise util.UnexpectedOutput(
2060 2059 _('Unexpected response from remote server:'), l)
2061 2060 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2062 2061 ofp = self.sopener(name, 'w')
2063 2062 for chunk in util.filechunkiter(fp, limit=size):
2064 2063 ofp.write(chunk)
2065 2064 ofp.close()
2066 2065 elapsed = time.time() - start
2067 2066 if elapsed <= 0:
2068 2067 elapsed = 0.001
2069 2068 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2070 2069 (util.bytecount(total_bytes), elapsed,
2071 2070 util.bytecount(total_bytes / elapsed)))
2072 2071 self.invalidate()
2073 2072 return len(self.heads()) + 1
2074 2073
2075 2074 def clone(self, remote, heads=[], stream=False):
2076 2075 '''clone remote repository.
2077 2076
2078 2077 keyword arguments:
2079 2078 heads: list of revs to clone (forces use of pull)
2080 2079 stream: use streaming clone if possible'''
2081 2080
2082 2081 # now, all clients that can request uncompressed clones can
2083 2082 # read repo formats supported by all servers that can serve
2084 2083 # them.
2085 2084
2086 2085 # if revlog format changes, client will have to check version
2087 2086 # and format flags on "stream" capability, and use
2088 2087 # uncompressed only if compatible.
2089 2088
2090 2089 if stream and not heads and remote.capable('stream'):
2091 2090 return self.stream_in(remote)
2092 2091 return self.pull(remote, heads)
2093 2092
2094 2093 # used to avoid circular references so destructors work
2095 2094 def aftertrans(files):
2096 2095 renamefiles = [tuple(t) for t in files]
2097 2096 def a():
2098 2097 for src, dest in renamefiles:
2099 2098 util.rename(src, dest)
2100 2099 return a
2101 2100
2102 2101 def instance(ui, path, create):
2103 2102 return localrepository(ui, util.drop_scheme('file', path), create)
2104 2103
2105 2104 def islocal(path):
2106 2105 return True
General Comments 0
You need to be logged in to leave comments. Login now