##// END OF EJS Templates
mq: factor out push conditions checks...
Patrick Mezard -
r13327:dc11e30b default
parent child Browse files
Show More
@@ -1,3253 +1,3253 b''
1 1 # mq.py - patch queues for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''manage a stack of patches
9 9
10 10 This extension lets you work with a stack of patches in a Mercurial
11 11 repository. It manages two stacks of patches - all known patches, and
12 12 applied patches (subset of known patches).
13 13
14 14 Known patches are represented as patch files in the .hg/patches
15 15 directory. Applied patches are both patch files and changesets.
16 16
17 17 Common tasks (use :hg:`help command` for more details)::
18 18
19 19 create new patch qnew
20 20 import existing patch qimport
21 21
22 22 print patch series qseries
23 23 print applied patches qapplied
24 24
25 25 add known patch to applied stack qpush
26 26 remove patch from applied stack qpop
27 27 refresh contents of top applied patch qrefresh
28 28
29 29 By default, mq will automatically use git patches when required to
30 30 avoid losing file mode changes, copy records, binary files or empty
31 31 files creations or deletions. This behaviour can be configured with::
32 32
33 33 [mq]
34 34 git = auto/keep/yes/no
35 35
36 36 If set to 'keep', mq will obey the [diff] section configuration while
37 37 preserving existing git patches upon qrefresh. If set to 'yes' or
38 38 'no', mq will override the [diff] section and always generate git or
39 39 regular patches, possibly losing data in the second case.
40 40
41 41 You will by default be managing a patch queue named "patches". You can
42 42 create other, independent patch queues with the :hg:`qqueue` command.
43 43 '''
44 44
45 45 from mercurial.i18n import _
46 46 from mercurial.node import bin, hex, short, nullid, nullrev
47 47 from mercurial.lock import release
48 48 from mercurial import commands, cmdutil, hg, patch, util
49 49 from mercurial import repair, extensions, url, error
50 50 import os, sys, re, errno, shutil
51 51
52 52 commands.norepo += " qclone"
53 53
54 54 # Patch names looks like unix-file names.
55 55 # They must be joinable with queue directory and result in the patch path.
56 56 normname = util.normpath
57 57
58 58 class statusentry(object):
59 59 def __init__(self, node, name):
60 60 self.node, self.name = node, name
61 61 def __repr__(self):
62 62 return hex(self.node) + ':' + self.name
63 63
64 64 class patchheader(object):
65 65 def __init__(self, pf, plainmode=False):
66 66 def eatdiff(lines):
67 67 while lines:
68 68 l = lines[-1]
69 69 if (l.startswith("diff -") or
70 70 l.startswith("Index:") or
71 71 l.startswith("===========")):
72 72 del lines[-1]
73 73 else:
74 74 break
75 75 def eatempty(lines):
76 76 while lines:
77 77 if not lines[-1].strip():
78 78 del lines[-1]
79 79 else:
80 80 break
81 81
82 82 message = []
83 83 comments = []
84 84 user = None
85 85 date = None
86 86 parent = None
87 87 format = None
88 88 subject = None
89 89 branch = None
90 90 nodeid = None
91 91 diffstart = 0
92 92
93 93 for line in file(pf):
94 94 line = line.rstrip()
95 95 if (line.startswith('diff --git')
96 96 or (diffstart and line.startswith('+++ '))):
97 97 diffstart = 2
98 98 break
99 99 diffstart = 0 # reset
100 100 if line.startswith("--- "):
101 101 diffstart = 1
102 102 continue
103 103 elif format == "hgpatch":
104 104 # parse values when importing the result of an hg export
105 105 if line.startswith("# User "):
106 106 user = line[7:]
107 107 elif line.startswith("# Date "):
108 108 date = line[7:]
109 109 elif line.startswith("# Parent "):
110 110 parent = line[9:]
111 111 elif line.startswith("# Branch "):
112 112 branch = line[9:]
113 113 elif line.startswith("# Node ID "):
114 114 nodeid = line[10:]
115 115 elif not line.startswith("# ") and line:
116 116 message.append(line)
117 117 format = None
118 118 elif line == '# HG changeset patch':
119 119 message = []
120 120 format = "hgpatch"
121 121 elif (format != "tagdone" and (line.startswith("Subject: ") or
122 122 line.startswith("subject: "))):
123 123 subject = line[9:]
124 124 format = "tag"
125 125 elif (format != "tagdone" and (line.startswith("From: ") or
126 126 line.startswith("from: "))):
127 127 user = line[6:]
128 128 format = "tag"
129 129 elif (format != "tagdone" and (line.startswith("Date: ") or
130 130 line.startswith("date: "))):
131 131 date = line[6:]
132 132 format = "tag"
133 133 elif format == "tag" and line == "":
134 134 # when looking for tags (subject: from: etc) they
135 135 # end once you find a blank line in the source
136 136 format = "tagdone"
137 137 elif message or line:
138 138 message.append(line)
139 139 comments.append(line)
140 140
141 141 eatdiff(message)
142 142 eatdiff(comments)
143 143 # Remember the exact starting line of the patch diffs before consuming
144 144 # empty lines, for external use by TortoiseHg and others
145 145 self.diffstartline = len(comments)
146 146 eatempty(message)
147 147 eatempty(comments)
148 148
149 149 # make sure message isn't empty
150 150 if format and format.startswith("tag") and subject:
151 151 message.insert(0, "")
152 152 message.insert(0, subject)
153 153
154 154 self.message = message
155 155 self.comments = comments
156 156 self.user = user
157 157 self.date = date
158 158 self.parent = parent
159 159 # nodeid and branch are for external use by TortoiseHg and others
160 160 self.nodeid = nodeid
161 161 self.branch = branch
162 162 self.haspatch = diffstart > 1
163 163 self.plainmode = plainmode
164 164
165 165 def setuser(self, user):
166 166 if not self.updateheader(['From: ', '# User '], user):
167 167 try:
168 168 patchheaderat = self.comments.index('# HG changeset patch')
169 169 self.comments.insert(patchheaderat + 1, '# User ' + user)
170 170 except ValueError:
171 171 if self.plainmode or self._hasheader(['Date: ']):
172 172 self.comments = ['From: ' + user] + self.comments
173 173 else:
174 174 tmp = ['# HG changeset patch', '# User ' + user, '']
175 175 self.comments = tmp + self.comments
176 176 self.user = user
177 177
178 178 def setdate(self, date):
179 179 if not self.updateheader(['Date: ', '# Date '], date):
180 180 try:
181 181 patchheaderat = self.comments.index('# HG changeset patch')
182 182 self.comments.insert(patchheaderat + 1, '# Date ' + date)
183 183 except ValueError:
184 184 if self.plainmode or self._hasheader(['From: ']):
185 185 self.comments = ['Date: ' + date] + self.comments
186 186 else:
187 187 tmp = ['# HG changeset patch', '# Date ' + date, '']
188 188 self.comments = tmp + self.comments
189 189 self.date = date
190 190
191 191 def setparent(self, parent):
192 192 if not self.updateheader(['# Parent '], parent):
193 193 try:
194 194 patchheaderat = self.comments.index('# HG changeset patch')
195 195 self.comments.insert(patchheaderat + 1, '# Parent ' + parent)
196 196 except ValueError:
197 197 pass
198 198 self.parent = parent
199 199
200 200 def setmessage(self, message):
201 201 if self.comments:
202 202 self._delmsg()
203 203 self.message = [message]
204 204 self.comments += self.message
205 205
206 206 def updateheader(self, prefixes, new):
207 207 '''Update all references to a field in the patch header.
208 208 Return whether the field is present.'''
209 209 res = False
210 210 for prefix in prefixes:
211 211 for i in xrange(len(self.comments)):
212 212 if self.comments[i].startswith(prefix):
213 213 self.comments[i] = prefix + new
214 214 res = True
215 215 break
216 216 return res
217 217
218 218 def _hasheader(self, prefixes):
219 219 '''Check if a header starts with any of the given prefixes.'''
220 220 for prefix in prefixes:
221 221 for comment in self.comments:
222 222 if comment.startswith(prefix):
223 223 return True
224 224 return False
225 225
226 226 def __str__(self):
227 227 if not self.comments:
228 228 return ''
229 229 return '\n'.join(self.comments) + '\n\n'
230 230
231 231 def _delmsg(self):
232 232 '''Remove existing message, keeping the rest of the comments fields.
233 233 If comments contains 'subject: ', message will prepend
234 234 the field and a blank line.'''
235 235 if self.message:
236 236 subj = 'subject: ' + self.message[0].lower()
237 237 for i in xrange(len(self.comments)):
238 238 if subj == self.comments[i].lower():
239 239 del self.comments[i]
240 240 self.message = self.message[2:]
241 241 break
242 242 ci = 0
243 243 for mi in self.message:
244 244 while mi != self.comments[ci]:
245 245 ci += 1
246 246 del self.comments[ci]
247 247
248 248 class queue(object):
249 249 def __init__(self, ui, path, patchdir=None):
250 250 self.basepath = path
251 251 try:
252 252 fh = open(os.path.join(path, 'patches.queue'))
253 253 cur = fh.read().rstrip()
254 254 if not cur:
255 255 curpath = os.path.join(path, 'patches')
256 256 else:
257 257 curpath = os.path.join(path, 'patches-' + cur)
258 258 except IOError:
259 259 curpath = os.path.join(path, 'patches')
260 260 self.path = patchdir or curpath
261 261 self.opener = util.opener(self.path)
262 262 self.ui = ui
263 263 self.applied_dirty = 0
264 264 self.series_dirty = 0
265 265 self.added = []
266 266 self.series_path = "series"
267 267 self.status_path = "status"
268 268 self.guards_path = "guards"
269 269 self.active_guards = None
270 270 self.guards_dirty = False
271 271 # Handle mq.git as a bool with extended values
272 272 try:
273 273 gitmode = ui.configbool('mq', 'git', None)
274 274 if gitmode is None:
275 275 raise error.ConfigError()
276 276 self.gitmode = gitmode and 'yes' or 'no'
277 277 except error.ConfigError:
278 278 self.gitmode = ui.config('mq', 'git', 'auto').lower()
279 279 self.plainmode = ui.configbool('mq', 'plain', False)
280 280
281 281 @util.propertycache
282 282 def applied(self):
283 283 if os.path.exists(self.join(self.status_path)):
284 284 def parse(l):
285 285 n, name = l.split(':', 1)
286 286 return statusentry(bin(n), name)
287 287 lines = self.opener(self.status_path).read().splitlines()
288 288 return [parse(l) for l in lines]
289 289 return []
290 290
291 291 @util.propertycache
292 292 def full_series(self):
293 293 if os.path.exists(self.join(self.series_path)):
294 294 return self.opener(self.series_path).read().splitlines()
295 295 return []
296 296
297 297 @util.propertycache
298 298 def series(self):
299 299 self.parse_series()
300 300 return self.series
301 301
302 302 @util.propertycache
303 303 def series_guards(self):
304 304 self.parse_series()
305 305 return self.series_guards
306 306
307 307 def invalidate(self):
308 308 for a in 'applied full_series series series_guards'.split():
309 309 if a in self.__dict__:
310 310 delattr(self, a)
311 311 self.applied_dirty = 0
312 312 self.series_dirty = 0
313 313 self.guards_dirty = False
314 314 self.active_guards = None
315 315
316 316 def diffopts(self, opts={}, patchfn=None):
317 317 diffopts = patch.diffopts(self.ui, opts)
318 318 if self.gitmode == 'auto':
319 319 diffopts.upgrade = True
320 320 elif self.gitmode == 'keep':
321 321 pass
322 322 elif self.gitmode in ('yes', 'no'):
323 323 diffopts.git = self.gitmode == 'yes'
324 324 else:
325 325 raise util.Abort(_('mq.git option can be auto/keep/yes/no'
326 326 ' got %s') % self.gitmode)
327 327 if patchfn:
328 328 diffopts = self.patchopts(diffopts, patchfn)
329 329 return diffopts
330 330
331 331 def patchopts(self, diffopts, *patches):
332 332 """Return a copy of input diff options with git set to true if
333 333 referenced patch is a git patch and should be preserved as such.
334 334 """
335 335 diffopts = diffopts.copy()
336 336 if not diffopts.git and self.gitmode == 'keep':
337 337 for patchfn in patches:
338 338 patchf = self.opener(patchfn, 'r')
339 339 # if the patch was a git patch, refresh it as a git patch
340 340 for line in patchf:
341 341 if line.startswith('diff --git'):
342 342 diffopts.git = True
343 343 break
344 344 patchf.close()
345 345 return diffopts
346 346
347 347 def join(self, *p):
348 348 return os.path.join(self.path, *p)
349 349
350 350 def find_series(self, patch):
351 351 def matchpatch(l):
352 352 l = l.split('#', 1)[0]
353 353 return l.strip() == patch
354 354 for index, l in enumerate(self.full_series):
355 355 if matchpatch(l):
356 356 return index
357 357 return None
358 358
359 359 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
360 360
361 361 def parse_series(self):
362 362 self.series = []
363 363 self.series_guards = []
364 364 for l in self.full_series:
365 365 h = l.find('#')
366 366 if h == -1:
367 367 patch = l
368 368 comment = ''
369 369 elif h == 0:
370 370 continue
371 371 else:
372 372 patch = l[:h]
373 373 comment = l[h:]
374 374 patch = patch.strip()
375 375 if patch:
376 376 if patch in self.series:
377 377 raise util.Abort(_('%s appears more than once in %s') %
378 378 (patch, self.join(self.series_path)))
379 379 self.series.append(patch)
380 380 self.series_guards.append(self.guard_re.findall(comment))
381 381
382 382 def check_guard(self, guard):
383 383 if not guard:
384 384 return _('guard cannot be an empty string')
385 385 bad_chars = '# \t\r\n\f'
386 386 first = guard[0]
387 387 if first in '-+':
388 388 return (_('guard %r starts with invalid character: %r') %
389 389 (guard, first))
390 390 for c in bad_chars:
391 391 if c in guard:
392 392 return _('invalid character in guard %r: %r') % (guard, c)
393 393
394 394 def set_active(self, guards):
395 395 for guard in guards:
396 396 bad = self.check_guard(guard)
397 397 if bad:
398 398 raise util.Abort(bad)
399 399 guards = sorted(set(guards))
400 400 self.ui.debug('active guards: %s\n' % ' '.join(guards))
401 401 self.active_guards = guards
402 402 self.guards_dirty = True
403 403
404 404 def active(self):
405 405 if self.active_guards is None:
406 406 self.active_guards = []
407 407 try:
408 408 guards = self.opener(self.guards_path).read().split()
409 409 except IOError, err:
410 410 if err.errno != errno.ENOENT:
411 411 raise
412 412 guards = []
413 413 for i, guard in enumerate(guards):
414 414 bad = self.check_guard(guard)
415 415 if bad:
416 416 self.ui.warn('%s:%d: %s\n' %
417 417 (self.join(self.guards_path), i + 1, bad))
418 418 else:
419 419 self.active_guards.append(guard)
420 420 return self.active_guards
421 421
422 422 def set_guards(self, idx, guards):
423 423 for g in guards:
424 424 if len(g) < 2:
425 425 raise util.Abort(_('guard %r too short') % g)
426 426 if g[0] not in '-+':
427 427 raise util.Abort(_('guard %r starts with invalid char') % g)
428 428 bad = self.check_guard(g[1:])
429 429 if bad:
430 430 raise util.Abort(bad)
431 431 drop = self.guard_re.sub('', self.full_series[idx])
432 432 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
433 433 self.parse_series()
434 434 self.series_dirty = True
435 435
436 436 def pushable(self, idx):
437 437 if isinstance(idx, str):
438 438 idx = self.series.index(idx)
439 439 patchguards = self.series_guards[idx]
440 440 if not patchguards:
441 441 return True, None
442 442 guards = self.active()
443 443 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
444 444 if exactneg:
445 445 return False, exactneg[0]
446 446 pos = [g for g in patchguards if g[0] == '+']
447 447 exactpos = [g for g in pos if g[1:] in guards]
448 448 if pos:
449 449 if exactpos:
450 450 return True, exactpos[0]
451 451 return False, pos
452 452 return True, ''
453 453
454 454 def explain_pushable(self, idx, all_patches=False):
455 455 write = all_patches and self.ui.write or self.ui.warn
456 456 if all_patches or self.ui.verbose:
457 457 if isinstance(idx, str):
458 458 idx = self.series.index(idx)
459 459 pushable, why = self.pushable(idx)
460 460 if all_patches and pushable:
461 461 if why is None:
462 462 write(_('allowing %s - no guards in effect\n') %
463 463 self.series[idx])
464 464 else:
465 465 if not why:
466 466 write(_('allowing %s - no matching negative guards\n') %
467 467 self.series[idx])
468 468 else:
469 469 write(_('allowing %s - guarded by %r\n') %
470 470 (self.series[idx], why))
471 471 if not pushable:
472 472 if why:
473 473 write(_('skipping %s - guarded by %r\n') %
474 474 (self.series[idx], why))
475 475 else:
476 476 write(_('skipping %s - no matching guards\n') %
477 477 self.series[idx])
478 478
479 479 def save_dirty(self):
480 480 def write_list(items, path):
481 481 fp = self.opener(path, 'w')
482 482 for i in items:
483 483 fp.write("%s\n" % i)
484 484 fp.close()
485 485 if self.applied_dirty:
486 486 write_list(map(str, self.applied), self.status_path)
487 487 if self.series_dirty:
488 488 write_list(self.full_series, self.series_path)
489 489 if self.guards_dirty:
490 490 write_list(self.active_guards, self.guards_path)
491 491 if self.added:
492 492 qrepo = self.qrepo()
493 493 if qrepo:
494 494 qrepo[None].add(f for f in self.added if f not in qrepo[None])
495 495 self.added = []
496 496
497 497 def removeundo(self, repo):
498 498 undo = repo.sjoin('undo')
499 499 if not os.path.exists(undo):
500 500 return
501 501 try:
502 502 os.unlink(undo)
503 503 except OSError, inst:
504 504 self.ui.warn(_('error removing undo: %s\n') % str(inst))
505 505
506 506 def printdiff(self, repo, diffopts, node1, node2=None, files=None,
507 507 fp=None, changes=None, opts={}):
508 508 stat = opts.get('stat')
509 509 m = cmdutil.match(repo, files, opts)
510 510 cmdutil.diffordiffstat(self.ui, repo, diffopts, node1, node2, m,
511 511 changes, stat, fp)
512 512
513 513 def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
514 514 # first try just applying the patch
515 515 (err, n) = self.apply(repo, [patch], update_status=False,
516 516 strict=True, merge=rev)
517 517
518 518 if err == 0:
519 519 return (err, n)
520 520
521 521 if n is None:
522 522 raise util.Abort(_("apply failed for patch %s") % patch)
523 523
524 524 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
525 525
526 526 # apply failed, strip away that rev and merge.
527 527 hg.clean(repo, head)
528 528 self.strip(repo, [n], update=False, backup='strip')
529 529
530 530 ctx = repo[rev]
531 531 ret = hg.merge(repo, rev)
532 532 if ret:
533 533 raise util.Abort(_("update returned %d") % ret)
534 534 n = repo.commit(ctx.description(), ctx.user(), force=True)
535 535 if n is None:
536 536 raise util.Abort(_("repo commit failed"))
537 537 try:
538 538 ph = patchheader(mergeq.join(patch), self.plainmode)
539 539 except:
540 540 raise util.Abort(_("unable to read %s") % patch)
541 541
542 542 diffopts = self.patchopts(diffopts, patch)
543 543 patchf = self.opener(patch, "w")
544 544 comments = str(ph)
545 545 if comments:
546 546 patchf.write(comments)
547 547 self.printdiff(repo, diffopts, head, n, fp=patchf)
548 548 patchf.close()
549 549 self.removeundo(repo)
550 550 return (0, n)
551 551
552 552 def qparents(self, repo, rev=None):
553 553 if rev is None:
554 554 (p1, p2) = repo.dirstate.parents()
555 555 if p2 == nullid:
556 556 return p1
557 557 if not self.applied:
558 558 return None
559 559 return self.applied[-1].node
560 560 p1, p2 = repo.changelog.parents(rev)
561 561 if p2 != nullid and p2 in [x.node for x in self.applied]:
562 562 return p2
563 563 return p1
564 564
565 565 def mergepatch(self, repo, mergeq, series, diffopts):
566 566 if not self.applied:
567 567 # each of the patches merged in will have two parents. This
568 568 # can confuse the qrefresh, qdiff, and strip code because it
569 569 # needs to know which parent is actually in the patch queue.
570 570 # so, we insert a merge marker with only one parent. This way
571 571 # the first patch in the queue is never a merge patch
572 572 #
573 573 pname = ".hg.patches.merge.marker"
574 574 n = repo.commit('[mq]: merge marker', force=True)
575 575 self.removeundo(repo)
576 576 self.applied.append(statusentry(n, pname))
577 577 self.applied_dirty = 1
578 578
579 579 head = self.qparents(repo)
580 580
581 581 for patch in series:
582 582 patch = mergeq.lookup(patch, strict=True)
583 583 if not patch:
584 584 self.ui.warn(_("patch %s does not exist\n") % patch)
585 585 return (1, None)
586 586 pushable, reason = self.pushable(patch)
587 587 if not pushable:
588 588 self.explain_pushable(patch, all_patches=True)
589 589 continue
590 590 info = mergeq.isapplied(patch)
591 591 if not info:
592 592 self.ui.warn(_("patch %s is not applied\n") % patch)
593 593 return (1, None)
594 594 rev = info[1]
595 595 err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
596 596 if head:
597 597 self.applied.append(statusentry(head, patch))
598 598 self.applied_dirty = 1
599 599 if err:
600 600 return (err, head)
601 601 self.save_dirty()
602 602 return (0, head)
603 603
604 604 def patch(self, repo, patchfile):
605 605 '''Apply patchfile to the working directory.
606 606 patchfile: name of patch file'''
607 607 files = {}
608 608 try:
609 609 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
610 610 files=files, eolmode=None)
611 611 except Exception, inst:
612 612 self.ui.note(str(inst) + '\n')
613 613 if not self.ui.verbose:
614 614 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
615 615 return (False, files, False)
616 616
617 617 return (True, files, fuzz)
618 618
619 619 def apply(self, repo, series, list=False, update_status=True,
620 620 strict=False, patchdir=None, merge=None, all_files=None):
621 621 wlock = lock = tr = None
622 622 try:
623 623 wlock = repo.wlock()
624 624 lock = repo.lock()
625 625 tr = repo.transaction("qpush")
626 626 try:
627 627 ret = self._apply(repo, series, list, update_status,
628 628 strict, patchdir, merge, all_files=all_files)
629 629 tr.close()
630 630 self.save_dirty()
631 631 return ret
632 632 except:
633 633 try:
634 634 tr.abort()
635 635 finally:
636 636 repo.invalidate()
637 637 repo.dirstate.invalidate()
638 638 raise
639 639 finally:
640 640 release(tr, lock, wlock)
641 641 self.removeundo(repo)
642 642
643 643 def _apply(self, repo, series, list=False, update_status=True,
644 644 strict=False, patchdir=None, merge=None, all_files=None):
645 645 '''returns (error, hash)
646 646 error = 1 for unable to read, 2 for patch failed, 3 for patch fuzz'''
647 647 # TODO unify with commands.py
648 648 if not patchdir:
649 649 patchdir = self.path
650 650 err = 0
651 651 n = None
652 652 for patchname in series:
653 653 pushable, reason = self.pushable(patchname)
654 654 if not pushable:
655 655 self.explain_pushable(patchname, all_patches=True)
656 656 continue
657 657 self.ui.status(_("applying %s\n") % patchname)
658 658 pf = os.path.join(patchdir, patchname)
659 659
660 660 try:
661 661 ph = patchheader(self.join(patchname), self.plainmode)
662 662 except:
663 663 self.ui.warn(_("unable to read %s\n") % patchname)
664 664 err = 1
665 665 break
666 666
667 667 message = ph.message
668 668 if not message:
669 669 # The commit message should not be translated
670 670 message = "imported patch %s\n" % patchname
671 671 else:
672 672 if list:
673 673 # The commit message should not be translated
674 674 message.append("\nimported patch %s" % patchname)
675 675 message = '\n'.join(message)
676 676
677 677 if ph.haspatch:
678 678 (patcherr, files, fuzz) = self.patch(repo, pf)
679 679 if all_files is not None:
680 680 all_files.update(files)
681 681 patcherr = not patcherr
682 682 else:
683 683 self.ui.warn(_("patch %s is empty\n") % patchname)
684 684 patcherr, files, fuzz = 0, [], 0
685 685
686 686 if merge and files:
687 687 # Mark as removed/merged and update dirstate parent info
688 688 removed = []
689 689 merged = []
690 690 for f in files:
691 691 if os.path.lexists(repo.wjoin(f)):
692 692 merged.append(f)
693 693 else:
694 694 removed.append(f)
695 695 for f in removed:
696 696 repo.dirstate.remove(f)
697 697 for f in merged:
698 698 repo.dirstate.merge(f)
699 699 p1, p2 = repo.dirstate.parents()
700 700 repo.dirstate.setparents(p1, merge)
701 701
702 702 files = cmdutil.updatedir(self.ui, repo, files)
703 703 match = cmdutil.matchfiles(repo, files or [])
704 704 n = repo.commit(message, ph.user, ph.date, match=match, force=True)
705 705
706 706 if n is None:
707 707 raise util.Abort(_("repository commit failed"))
708 708
709 709 if update_status:
710 710 self.applied.append(statusentry(n, patchname))
711 711
712 712 if patcherr:
713 713 self.ui.warn(_("patch failed, rejects left in working dir\n"))
714 714 err = 2
715 715 break
716 716
717 717 if fuzz and strict:
718 718 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
719 719 err = 3
720 720 break
721 721 return (err, n)
722 722
723 723 def _cleanup(self, patches, numrevs, keep=False):
724 724 if not keep:
725 725 r = self.qrepo()
726 726 if r:
727 727 r[None].remove(patches, True)
728 728 else:
729 729 for p in patches:
730 730 os.unlink(self.join(p))
731 731
732 732 if numrevs:
733 733 del self.applied[:numrevs]
734 734 self.applied_dirty = 1
735 735
736 736 for i in sorted([self.find_series(p) for p in patches], reverse=True):
737 737 del self.full_series[i]
738 738 self.parse_series()
739 739 self.series_dirty = 1
740 740
741 741 def _revpatches(self, repo, revs):
742 742 firstrev = repo[self.applied[0].node].rev()
743 743 patches = []
744 744 for i, rev in enumerate(revs):
745 745
746 746 if rev < firstrev:
747 747 raise util.Abort(_('revision %d is not managed') % rev)
748 748
749 749 ctx = repo[rev]
750 750 base = self.applied[i].node
751 751 if ctx.node() != base:
752 752 msg = _('cannot delete revision %d above applied patches')
753 753 raise util.Abort(msg % rev)
754 754
755 755 patch = self.applied[i].name
756 756 for fmt in ('[mq]: %s', 'imported patch %s'):
757 757 if ctx.description() == fmt % patch:
758 758 msg = _('patch %s finalized without changeset message\n')
759 759 repo.ui.status(msg % patch)
760 760 break
761 761
762 762 patches.append(patch)
763 763 return patches
764 764
765 765 def finish(self, repo, revs):
766 766 patches = self._revpatches(repo, sorted(revs))
767 767 self._cleanup(patches, len(patches))
768 768
769 769 def delete(self, repo, patches, opts):
770 770 if not patches and not opts.get('rev'):
771 771 raise util.Abort(_('qdelete requires at least one revision or '
772 772 'patch name'))
773 773
774 774 realpatches = []
775 775 for patch in patches:
776 776 patch = self.lookup(patch, strict=True)
777 777 info = self.isapplied(patch)
778 778 if info:
779 779 raise util.Abort(_("cannot delete applied patch %s") % patch)
780 780 if patch not in self.series:
781 781 raise util.Abort(_("patch %s not in series file") % patch)
782 782 if patch not in realpatches:
783 783 realpatches.append(patch)
784 784
785 785 numrevs = 0
786 786 if opts.get('rev'):
787 787 if not self.applied:
788 788 raise util.Abort(_('no patches applied'))
789 789 revs = cmdutil.revrange(repo, opts.get('rev'))
790 790 if len(revs) > 1 and revs[0] > revs[1]:
791 791 revs.reverse()
792 792 revpatches = self._revpatches(repo, revs)
793 793 realpatches += revpatches
794 794 numrevs = len(revpatches)
795 795
796 796 self._cleanup(realpatches, numrevs, opts.get('keep'))
797 797
798 798 def check_toppatch(self, repo):
799 799 if self.applied:
800 800 top = self.applied[-1].node
801 801 patch = self.applied[-1].name
802 802 pp = repo.dirstate.parents()
803 803 if top not in pp:
804 804 raise util.Abort(_("working directory revision is not qtip"))
805 805 return top, patch
806 806 return None, None
807 807
808 808 def check_substate(self, repo):
809 809 '''return list of subrepos at a different revision than substate.
810 810 Abort if any subrepos have uncommitted changes.'''
811 811 inclsubs = []
812 812 wctx = repo[None]
813 813 for s in wctx.substate:
814 814 if wctx.sub(s).dirty(True):
815 815 raise util.Abort(
816 816 _("uncommitted changes in subrepository %s") % s)
817 817 elif wctx.sub(s).dirty():
818 818 inclsubs.append(s)
819 819 return inclsubs
820 820
821 821 def check_localchanges(self, repo, force=False, refresh=True):
822 822 m, a, r, d = repo.status()[:4]
823 823 if (m or a or r or d) and not force:
824 824 if refresh:
825 825 raise util.Abort(_("local changes found, refresh first"))
826 826 else:
827 827 raise util.Abort(_("local changes found"))
828 828 return m, a, r, d
829 829
830 830 _reserved = ('series', 'status', 'guards')
831 831 def check_reserved_name(self, name):
832 832 if (name in self._reserved or name.startswith('.hg')
833 833 or name.startswith('.mq') or '#' in name or ':' in name):
834 834 raise util.Abort(_('"%s" cannot be used as the name of a patch')
835 835 % name)
836 836
837 837 def new(self, repo, patchfn, *pats, **opts):
838 838 """options:
839 839 msg: a string or a no-argument function returning a string
840 840 """
841 841 msg = opts.get('msg')
842 842 user = opts.get('user')
843 843 date = opts.get('date')
844 844 if date:
845 845 date = util.parsedate(date)
846 846 diffopts = self.diffopts({'git': opts.get('git')})
847 847 self.check_reserved_name(patchfn)
848 848 if os.path.exists(self.join(patchfn)):
849 849 if os.path.isdir(self.join(patchfn)):
850 850 raise util.Abort(_('"%s" already exists as a directory')
851 851 % patchfn)
852 852 else:
853 853 raise util.Abort(_('patch "%s" already exists') % patchfn)
854 854
855 855 inclsubs = self.check_substate(repo)
856 856 if inclsubs:
857 857 inclsubs.append('.hgsubstate')
858 858 if opts.get('include') or opts.get('exclude') or pats:
859 859 if inclsubs:
860 860 pats = list(pats or []) + inclsubs
861 861 match = cmdutil.match(repo, pats, opts)
862 862 # detect missing files in pats
863 863 def badfn(f, msg):
864 864 if f != '.hgsubstate': # .hgsubstate is auto-created
865 865 raise util.Abort('%s: %s' % (f, msg))
866 866 match.bad = badfn
867 867 m, a, r, d = repo.status(match=match)[:4]
868 868 else:
869 869 m, a, r, d = self.check_localchanges(repo, force=True)
870 870 match = cmdutil.matchfiles(repo, m + a + r + inclsubs)
871 871 if len(repo[None].parents()) > 1:
872 872 raise util.Abort(_('cannot manage merge changesets'))
873 873 commitfiles = m + a + r
874 874 self.check_toppatch(repo)
875 875 insert = self.full_series_end()
876 876 wlock = repo.wlock()
877 877 try:
878 878 try:
879 879 # if patch file write fails, abort early
880 880 p = self.opener(patchfn, "w")
881 881 except IOError, e:
882 882 raise util.Abort(_('cannot write patch "%s": %s')
883 883 % (patchfn, e.strerror))
884 884 try:
885 885 if self.plainmode:
886 886 if user:
887 887 p.write("From: " + user + "\n")
888 888 if not date:
889 889 p.write("\n")
890 890 if date:
891 891 p.write("Date: %d %d\n\n" % date)
892 892 else:
893 893 p.write("# HG changeset patch\n")
894 894 p.write("# Parent "
895 895 + hex(repo[None].parents()[0].node()) + "\n")
896 896 if user:
897 897 p.write("# User " + user + "\n")
898 898 if date:
899 899 p.write("# Date %s %s\n\n" % date)
900 900 if hasattr(msg, '__call__'):
901 901 msg = msg()
902 902 commitmsg = msg and msg or ("[mq]: %s" % patchfn)
903 903 n = repo.commit(commitmsg, user, date, match=match, force=True)
904 904 if n is None:
905 905 raise util.Abort(_("repo commit failed"))
906 906 try:
907 907 self.full_series[insert:insert] = [patchfn]
908 908 self.applied.append(statusentry(n, patchfn))
909 909 self.parse_series()
910 910 self.series_dirty = 1
911 911 self.applied_dirty = 1
912 912 if msg:
913 913 msg = msg + "\n\n"
914 914 p.write(msg)
915 915 if commitfiles:
916 916 parent = self.qparents(repo, n)
917 917 chunks = patch.diff(repo, node1=parent, node2=n,
918 918 match=match, opts=diffopts)
919 919 for chunk in chunks:
920 920 p.write(chunk)
921 921 p.close()
922 922 wlock.release()
923 923 wlock = None
924 924 r = self.qrepo()
925 925 if r:
926 926 r[None].add([patchfn])
927 927 except:
928 928 repo.rollback()
929 929 raise
930 930 except Exception:
931 931 patchpath = self.join(patchfn)
932 932 try:
933 933 os.unlink(patchpath)
934 934 except:
935 935 self.ui.warn(_('error unlinking %s\n') % patchpath)
936 936 raise
937 937 self.removeundo(repo)
938 938 finally:
939 939 release(wlock)
940 940
941 941 def strip(self, repo, revs, update=True, backup="all", force=None):
942 942 wlock = lock = None
943 943 try:
944 944 wlock = repo.wlock()
945 945 lock = repo.lock()
946 946
947 947 if update:
948 948 self.check_localchanges(repo, force=force, refresh=False)
949 949 urev = self.qparents(repo, revs[0])
950 950 hg.clean(repo, urev)
951 951 repo.dirstate.write()
952 952
953 953 self.removeundo(repo)
954 954 for rev in revs:
955 955 repair.strip(self.ui, repo, rev, backup)
956 956 # strip may have unbundled a set of backed up revisions after
957 957 # the actual strip
958 958 self.removeundo(repo)
959 959 finally:
960 960 release(lock, wlock)
961 961
962 962 def isapplied(self, patch):
963 963 """returns (index, rev, patch)"""
964 964 for i, a in enumerate(self.applied):
965 965 if a.name == patch:
966 966 return (i, a.node, a.name)
967 967 return None
968 968
969 969 # if the exact patch name does not exist, we try a few
970 970 # variations. If strict is passed, we try only #1
971 971 #
972 972 # 1) a number to indicate an offset in the series file
973 973 # 2) a unique substring of the patch name was given
974 974 # 3) patchname[-+]num to indicate an offset in the series file
975 975 def lookup(self, patch, strict=False):
976 976 patch = patch and str(patch)
977 977
978 978 def partial_name(s):
979 979 if s in self.series:
980 980 return s
981 981 matches = [x for x in self.series if s in x]
982 982 if len(matches) > 1:
983 983 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
984 984 for m in matches:
985 985 self.ui.warn(' %s\n' % m)
986 986 return None
987 987 if matches:
988 988 return matches[0]
989 989 if self.series and self.applied:
990 990 if s == 'qtip':
991 991 return self.series[self.series_end(True)-1]
992 992 if s == 'qbase':
993 993 return self.series[0]
994 994 return None
995 995
996 996 if patch is None:
997 997 return None
998 998 if patch in self.series:
999 999 return patch
1000 1000
1001 1001 if not os.path.isfile(self.join(patch)):
1002 1002 try:
1003 1003 sno = int(patch)
1004 1004 except (ValueError, OverflowError):
1005 1005 pass
1006 1006 else:
1007 1007 if -len(self.series) <= sno < len(self.series):
1008 1008 return self.series[sno]
1009 1009
1010 1010 if not strict:
1011 1011 res = partial_name(patch)
1012 1012 if res:
1013 1013 return res
1014 1014 minus = patch.rfind('-')
1015 1015 if minus >= 0:
1016 1016 res = partial_name(patch[:minus])
1017 1017 if res:
1018 1018 i = self.series.index(res)
1019 1019 try:
1020 1020 off = int(patch[minus + 1:] or 1)
1021 1021 except (ValueError, OverflowError):
1022 1022 pass
1023 1023 else:
1024 1024 if i - off >= 0:
1025 1025 return self.series[i - off]
1026 1026 plus = patch.rfind('+')
1027 1027 if plus >= 0:
1028 1028 res = partial_name(patch[:plus])
1029 1029 if res:
1030 1030 i = self.series.index(res)
1031 1031 try:
1032 1032 off = int(patch[plus + 1:] or 1)
1033 1033 except (ValueError, OverflowError):
1034 1034 pass
1035 1035 else:
1036 1036 if i + off < len(self.series):
1037 1037 return self.series[i + off]
1038 1038 raise util.Abort(_("patch %s not in series") % patch)
1039 1039
1040 1040 def push(self, repo, patch=None, force=False, list=False,
1041 1041 mergeq=None, all=False, move=False, exact=False):
1042 1042 diffopts = self.diffopts()
1043 1043 wlock = repo.wlock()
1044 1044 try:
1045 1045 heads = []
1046 1046 for b, ls in repo.branchmap().iteritems():
1047 1047 heads += ls
1048 1048 if not heads:
1049 1049 heads = [nullid]
1050 1050 if repo.dirstate.parents()[0] not in heads and not exact:
1051 1051 self.ui.status(_("(working directory not at a head)\n"))
1052 1052
1053 1053 if not self.series:
1054 1054 self.ui.warn(_('no patches in series\n'))
1055 1055 return 0
1056 1056
1057 1057 patch = self.lookup(patch)
1058 1058 # Suppose our series file is: A B C and the current 'top'
1059 1059 # patch is B. qpush C should be performed (moving forward)
1060 1060 # qpush B is a NOP (no change) qpush A is an error (can't
1061 1061 # go backwards with qpush)
1062 1062 if patch:
1063 1063 info = self.isapplied(patch)
1064 1064 if info:
1065 1065 if info[0] < len(self.applied) - 1:
1066 1066 raise util.Abort(
1067 1067 _("cannot push to a previous patch: %s") % patch)
1068 1068 self.ui.warn(
1069 1069 _('qpush: %s is already at the top\n') % patch)
1070 1070 return 0
1071 1071 pushable, reason = self.pushable(patch)
1072 1072 if not pushable:
1073 1073 if reason:
1074 1074 reason = _('guarded by %r') % reason
1075 1075 else:
1076 1076 reason = _('no matching guards')
1077 1077 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
1078 1078 return 1
1079 1079 elif all:
1080 1080 patch = self.series[-1]
1081 1081 if self.isapplied(patch):
1082 1082 self.ui.warn(_('all patches are currently applied\n'))
1083 1083 return 0
1084 1084
1085 1085 # Following the above example, starting at 'top' of B:
1086 1086 # qpush should be performed (pushes C), but a subsequent
1087 1087 # qpush without an argument is an error (nothing to
1088 1088 # apply). This allows a loop of "...while hg qpush..." to
1089 1089 # work as it detects an error when done
1090 1090 start = self.series_end()
1091 1091 if start == len(self.series):
1092 1092 self.ui.warn(_('patch series already fully applied\n'))
1093 1093 return 1
1094 1094 if not force:
1095 1095 self.check_localchanges(repo)
1096 1096
1097 1097 if exact:
1098 1098 if move:
1099 1099 raise util.Abort(_("cannot use --exact and --move together"))
1100 1100 if self.applied:
1101 1101 raise util.Abort(_("cannot push --exact with applied patches"))
1102 1102 root = self.series[start]
1103 1103 target = patchheader(self.join(root), self.plainmode).parent
1104 1104 if not target:
1105 1105 raise util.Abort(_("%s does not have a parent recorded" % root))
1106 1106 if not repo[target] == repo['.']:
1107 1107 hg.update(repo, target)
1108 1108
1109 1109 if move:
1110 1110 if not patch:
1111 1111 raise util.Abort(_("please specify the patch to move"))
1112 1112 for i, rpn in enumerate(self.full_series[start:]):
1113 1113 # strip markers for patch guards
1114 1114 if self.guard_re.split(rpn, 1)[0] == patch:
1115 1115 break
1116 1116 index = start + i
1117 1117 assert index < len(self.full_series)
1118 1118 fullpatch = self.full_series[index]
1119 1119 del self.full_series[index]
1120 1120 self.full_series.insert(start, fullpatch)
1121 1121 self.parse_series()
1122 1122 self.series_dirty = 1
1123 1123
1124 1124 self.applied_dirty = 1
1125 1125 if start > 0:
1126 1126 self.check_toppatch(repo)
1127 1127 if not patch:
1128 1128 patch = self.series[start]
1129 1129 end = start + 1
1130 1130 else:
1131 1131 end = self.series.index(patch, start) + 1
1132 1132
1133 1133 s = self.series[start:end]
1134 1134 all_files = set()
1135 1135 try:
1136 1136 if mergeq:
1137 1137 ret = self.mergepatch(repo, mergeq, s, diffopts)
1138 1138 else:
1139 1139 ret = self.apply(repo, s, list, all_files=all_files)
1140 1140 except:
1141 1141 self.ui.warn(_('cleaning up working directory...'))
1142 1142 node = repo.dirstate.parents()[0]
1143 1143 hg.revert(repo, node, None)
1144 1144 # only remove unknown files that we know we touched or
1145 1145 # created while patching
1146 1146 for f in all_files:
1147 1147 if f not in repo.dirstate:
1148 1148 try:
1149 1149 util.unlinkpath(repo.wjoin(f))
1150 1150 except OSError, inst:
1151 1151 if inst.errno != errno.ENOENT:
1152 1152 raise
1153 1153 self.ui.warn(_('done\n'))
1154 1154 raise
1155 1155
1156 1156 if not self.applied:
1157 1157 return ret[0]
1158 1158 top = self.applied[-1].name
1159 1159 if ret[0] and ret[0] > 1:
1160 1160 msg = _("errors during apply, please fix and refresh %s\n")
1161 1161 self.ui.write(msg % top)
1162 1162 else:
1163 1163 self.ui.write(_("now at: %s\n") % top)
1164 1164 return ret[0]
1165 1165
1166 1166 finally:
1167 1167 wlock.release()
1168 1168
1169 1169 def pop(self, repo, patch=None, force=False, update=True, all=False):
1170 1170 wlock = repo.wlock()
1171 1171 try:
1172 1172 if patch:
1173 1173 # index, rev, patch
1174 1174 info = self.isapplied(patch)
1175 1175 if not info:
1176 1176 patch = self.lookup(patch)
1177 1177 info = self.isapplied(patch)
1178 1178 if not info:
1179 1179 raise util.Abort(_("patch %s is not applied") % patch)
1180 1180
1181 1181 if not self.applied:
1182 1182 # Allow qpop -a to work repeatedly,
1183 1183 # but not qpop without an argument
1184 1184 self.ui.warn(_("no patches applied\n"))
1185 1185 return not all
1186 1186
1187 1187 if all:
1188 1188 start = 0
1189 1189 elif patch:
1190 1190 start = info[0] + 1
1191 1191 else:
1192 1192 start = len(self.applied) - 1
1193 1193
1194 1194 if start >= len(self.applied):
1195 1195 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1196 1196 return
1197 1197
1198 1198 if not update:
1199 1199 parents = repo.dirstate.parents()
1200 1200 rr = [x.node for x in self.applied]
1201 1201 for p in parents:
1202 1202 if p in rr:
1203 1203 self.ui.warn(_("qpop: forcing dirstate update\n"))
1204 1204 update = True
1205 1205 else:
1206 1206 parents = [p.node() for p in repo[None].parents()]
1207 1207 needupdate = False
1208 1208 for entry in self.applied[start:]:
1209 1209 if entry.node in parents:
1210 1210 needupdate = True
1211 1211 break
1212 1212 update = needupdate
1213 1213
1214 1214 if not force and update:
1215 1215 self.check_localchanges(repo)
1216 1216
1217 1217 self.applied_dirty = 1
1218 1218 end = len(self.applied)
1219 1219 rev = self.applied[start].node
1220 1220 if update:
1221 1221 top = self.check_toppatch(repo)[0]
1222 1222
1223 1223 try:
1224 1224 heads = repo.changelog.heads(rev)
1225 1225 except error.LookupError:
1226 1226 node = short(rev)
1227 1227 raise util.Abort(_('trying to pop unknown node %s') % node)
1228 1228
1229 1229 if heads != [self.applied[-1].node]:
1230 1230 raise util.Abort(_("popping would remove a revision not "
1231 1231 "managed by this patch queue"))
1232 1232
1233 1233 # we know there are no local changes, so we can make a simplified
1234 1234 # form of hg.update.
1235 1235 if update:
1236 1236 qp = self.qparents(repo, rev)
1237 1237 ctx = repo[qp]
1238 1238 m, a, r, d = repo.status(qp, top)[:4]
1239 1239 if d:
1240 1240 raise util.Abort(_("deletions found between repo revs"))
1241 1241 for f in a:
1242 1242 try:
1243 1243 util.unlinkpath(repo.wjoin(f))
1244 1244 except OSError, e:
1245 1245 if e.errno != errno.ENOENT:
1246 1246 raise
1247 1247 repo.dirstate.forget(f)
1248 1248 for f in m + r:
1249 1249 fctx = ctx[f]
1250 1250 repo.wwrite(f, fctx.data(), fctx.flags())
1251 1251 repo.dirstate.normal(f)
1252 1252 repo.dirstate.setparents(qp, nullid)
1253 1253 for patch in reversed(self.applied[start:end]):
1254 1254 self.ui.status(_("popping %s\n") % patch.name)
1255 1255 del self.applied[start:end]
1256 1256 self.strip(repo, [rev], update=False, backup='strip')
1257 1257 if self.applied:
1258 1258 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1259 1259 else:
1260 1260 self.ui.write(_("patch queue now empty\n"))
1261 1261 finally:
1262 1262 wlock.release()
1263 1263
1264 1264 def diff(self, repo, pats, opts):
1265 1265 top, patch = self.check_toppatch(repo)
1266 1266 if not top:
1267 1267 self.ui.write(_("no patches applied\n"))
1268 1268 return
1269 1269 qp = self.qparents(repo, top)
1270 1270 if opts.get('reverse'):
1271 1271 node1, node2 = None, qp
1272 1272 else:
1273 1273 node1, node2 = qp, None
1274 1274 diffopts = self.diffopts(opts, patch)
1275 1275 self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
1276 1276
1277 1277 def refresh(self, repo, pats=None, **opts):
1278 1278 if not self.applied:
1279 1279 self.ui.write(_("no patches applied\n"))
1280 1280 return 1
1281 1281 msg = opts.get('msg', '').rstrip()
1282 1282 newuser = opts.get('user')
1283 1283 newdate = opts.get('date')
1284 1284 if newdate:
1285 1285 newdate = '%d %d' % util.parsedate(newdate)
1286 1286 wlock = repo.wlock()
1287 1287
1288 1288 try:
1289 1289 self.check_toppatch(repo)
1290 1290 (top, patchfn) = (self.applied[-1].node, self.applied[-1].name)
1291 1291 if repo.changelog.heads(top) != [top]:
1292 1292 raise util.Abort(_("cannot refresh a revision with children"))
1293 1293
1294 1294 inclsubs = self.check_substate(repo)
1295 1295
1296 1296 cparents = repo.changelog.parents(top)
1297 1297 patchparent = self.qparents(repo, top)
1298 1298 ph = patchheader(self.join(patchfn), self.plainmode)
1299 1299 diffopts = self.diffopts({'git': opts.get('git')}, patchfn)
1300 1300 if msg:
1301 1301 ph.setmessage(msg)
1302 1302 if newuser:
1303 1303 ph.setuser(newuser)
1304 1304 if newdate:
1305 1305 ph.setdate(newdate)
1306 1306 ph.setparent(hex(patchparent))
1307 1307
1308 1308 # only commit new patch when write is complete
1309 1309 patchf = self.opener(patchfn, 'w', atomictemp=True)
1310 1310
1311 1311 comments = str(ph)
1312 1312 if comments:
1313 1313 patchf.write(comments)
1314 1314
1315 1315 # update the dirstate in place, strip off the qtip commit
1316 1316 # and then commit.
1317 1317 #
1318 1318 # this should really read:
1319 1319 # mm, dd, aa = repo.status(top, patchparent)[:3]
1320 1320 # but we do it backwards to take advantage of manifest/chlog
1321 1321 # caching against the next repo.status call
1322 1322 mm, aa, dd = repo.status(patchparent, top)[:3]
1323 1323 changes = repo.changelog.read(top)
1324 1324 man = repo.manifest.read(changes[0])
1325 1325 aaa = aa[:]
1326 1326 matchfn = cmdutil.match(repo, pats, opts)
1327 1327 # in short mode, we only diff the files included in the
1328 1328 # patch already plus specified files
1329 1329 if opts.get('short'):
1330 1330 # if amending a patch, we start with existing
1331 1331 # files plus specified files - unfiltered
1332 1332 match = cmdutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1333 1333 # filter with inc/exl options
1334 1334 matchfn = cmdutil.match(repo, opts=opts)
1335 1335 else:
1336 1336 match = cmdutil.matchall(repo)
1337 1337 m, a, r, d = repo.status(match=match)[:4]
1338 1338 mm = set(mm)
1339 1339 aa = set(aa)
1340 1340 dd = set(dd)
1341 1341
1342 1342 # we might end up with files that were added between
1343 1343 # qtip and the dirstate parent, but then changed in the
1344 1344 # local dirstate. in this case, we want them to only
1345 1345 # show up in the added section
1346 1346 for x in m:
1347 1347 if x not in aa:
1348 1348 mm.add(x)
1349 1349 # we might end up with files added by the local dirstate that
1350 1350 # were deleted by the patch. In this case, they should only
1351 1351 # show up in the changed section.
1352 1352 for x in a:
1353 1353 if x in dd:
1354 1354 dd.remove(x)
1355 1355 mm.add(x)
1356 1356 else:
1357 1357 aa.add(x)
1358 1358 # make sure any files deleted in the local dirstate
1359 1359 # are not in the add or change column of the patch
1360 1360 forget = []
1361 1361 for x in d + r:
1362 1362 if x in aa:
1363 1363 aa.remove(x)
1364 1364 forget.append(x)
1365 1365 continue
1366 1366 else:
1367 1367 mm.discard(x)
1368 1368 dd.add(x)
1369 1369
1370 1370 m = list(mm)
1371 1371 r = list(dd)
1372 1372 a = list(aa)
1373 1373 c = [filter(matchfn, l) for l in (m, a, r)]
1374 1374 match = cmdutil.matchfiles(repo, set(c[0] + c[1] + c[2] + inclsubs))
1375 1375 chunks = patch.diff(repo, patchparent, match=match,
1376 1376 changes=c, opts=diffopts)
1377 1377 for chunk in chunks:
1378 1378 patchf.write(chunk)
1379 1379
1380 1380 try:
1381 1381 if diffopts.git or diffopts.upgrade:
1382 1382 copies = {}
1383 1383 for dst in a:
1384 1384 src = repo.dirstate.copied(dst)
1385 1385 # during qfold, the source file for copies may
1386 1386 # be removed. Treat this as a simple add.
1387 1387 if src is not None and src in repo.dirstate:
1388 1388 copies.setdefault(src, []).append(dst)
1389 1389 repo.dirstate.add(dst)
1390 1390 # remember the copies between patchparent and qtip
1391 1391 for dst in aaa:
1392 1392 f = repo.file(dst)
1393 1393 src = f.renamed(man[dst])
1394 1394 if src:
1395 1395 copies.setdefault(src[0], []).extend(
1396 1396 copies.get(dst, []))
1397 1397 if dst in a:
1398 1398 copies[src[0]].append(dst)
1399 1399 # we can't copy a file created by the patch itself
1400 1400 if dst in copies:
1401 1401 del copies[dst]
1402 1402 for src, dsts in copies.iteritems():
1403 1403 for dst in dsts:
1404 1404 repo.dirstate.copy(src, dst)
1405 1405 else:
1406 1406 for dst in a:
1407 1407 repo.dirstate.add(dst)
1408 1408 # Drop useless copy information
1409 1409 for f in list(repo.dirstate.copies()):
1410 1410 repo.dirstate.copy(None, f)
1411 1411 for f in r:
1412 1412 repo.dirstate.remove(f)
1413 1413 # if the patch excludes a modified file, mark that
1414 1414 # file with mtime=0 so status can see it.
1415 1415 mm = []
1416 1416 for i in xrange(len(m)-1, -1, -1):
1417 1417 if not matchfn(m[i]):
1418 1418 mm.append(m[i])
1419 1419 del m[i]
1420 1420 for f in m:
1421 1421 repo.dirstate.normal(f)
1422 1422 for f in mm:
1423 1423 repo.dirstate.normallookup(f)
1424 1424 for f in forget:
1425 1425 repo.dirstate.forget(f)
1426 1426
1427 1427 if not msg:
1428 1428 if not ph.message:
1429 1429 message = "[mq]: %s\n" % patchfn
1430 1430 else:
1431 1431 message = "\n".join(ph.message)
1432 1432 else:
1433 1433 message = msg
1434 1434
1435 1435 user = ph.user or changes[1]
1436 1436
1437 1437 # assumes strip can roll itself back if interrupted
1438 1438 repo.dirstate.setparents(*cparents)
1439 1439 self.applied.pop()
1440 1440 self.applied_dirty = 1
1441 1441 self.strip(repo, [top], update=False,
1442 1442 backup='strip')
1443 1443 except:
1444 1444 repo.dirstate.invalidate()
1445 1445 raise
1446 1446
1447 1447 try:
1448 1448 # might be nice to attempt to roll back strip after this
1449 1449 patchf.rename()
1450 1450 n = repo.commit(message, user, ph.date, match=match,
1451 1451 force=True)
1452 1452 self.applied.append(statusentry(n, patchfn))
1453 1453 except:
1454 1454 ctx = repo[cparents[0]]
1455 1455 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1456 1456 self.save_dirty()
1457 1457 self.ui.warn(_('refresh interrupted while patch was popped! '
1458 1458 '(revert --all, qpush to recover)\n'))
1459 1459 raise
1460 1460 finally:
1461 1461 wlock.release()
1462 1462 self.removeundo(repo)
1463 1463
1464 1464 def init(self, repo, create=False):
1465 1465 if not create and os.path.isdir(self.path):
1466 1466 raise util.Abort(_("patch queue directory already exists"))
1467 1467 try:
1468 1468 os.mkdir(self.path)
1469 1469 except OSError, inst:
1470 1470 if inst.errno != errno.EEXIST or not create:
1471 1471 raise
1472 1472 if create:
1473 1473 return self.qrepo(create=True)
1474 1474
1475 1475 def unapplied(self, repo, patch=None):
1476 1476 if patch and patch not in self.series:
1477 1477 raise util.Abort(_("patch %s is not in series file") % patch)
1478 1478 if not patch:
1479 1479 start = self.series_end()
1480 1480 else:
1481 1481 start = self.series.index(patch) + 1
1482 1482 unapplied = []
1483 1483 for i in xrange(start, len(self.series)):
1484 1484 pushable, reason = self.pushable(i)
1485 1485 if pushable:
1486 1486 unapplied.append((i, self.series[i]))
1487 1487 self.explain_pushable(i)
1488 1488 return unapplied
1489 1489
1490 1490 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1491 1491 summary=False):
1492 1492 def displayname(pfx, patchname, state):
1493 1493 if pfx:
1494 1494 self.ui.write(pfx)
1495 1495 if summary:
1496 1496 ph = patchheader(self.join(patchname), self.plainmode)
1497 1497 msg = ph.message and ph.message[0] or ''
1498 1498 if self.ui.formatted():
1499 1499 width = self.ui.termwidth() - len(pfx) - len(patchname) - 2
1500 1500 if width > 0:
1501 1501 msg = util.ellipsis(msg, width)
1502 1502 else:
1503 1503 msg = ''
1504 1504 self.ui.write(patchname, label='qseries.' + state)
1505 1505 self.ui.write(': ')
1506 1506 self.ui.write(msg, label='qseries.message.' + state)
1507 1507 else:
1508 1508 self.ui.write(patchname, label='qseries.' + state)
1509 1509 self.ui.write('\n')
1510 1510
1511 1511 applied = set([p.name for p in self.applied])
1512 1512 if length is None:
1513 1513 length = len(self.series) - start
1514 1514 if not missing:
1515 1515 if self.ui.verbose:
1516 1516 idxwidth = len(str(start + length - 1))
1517 1517 for i in xrange(start, start + length):
1518 1518 patch = self.series[i]
1519 1519 if patch in applied:
1520 1520 char, state = 'A', 'applied'
1521 1521 elif self.pushable(i)[0]:
1522 1522 char, state = 'U', 'unapplied'
1523 1523 else:
1524 1524 char, state = 'G', 'guarded'
1525 1525 pfx = ''
1526 1526 if self.ui.verbose:
1527 1527 pfx = '%*d %s ' % (idxwidth, i, char)
1528 1528 elif status and status != char:
1529 1529 continue
1530 1530 displayname(pfx, patch, state)
1531 1531 else:
1532 1532 msng_list = []
1533 1533 for root, dirs, files in os.walk(self.path):
1534 1534 d = root[len(self.path) + 1:]
1535 1535 for f in files:
1536 1536 fl = os.path.join(d, f)
1537 1537 if (fl not in self.series and
1538 1538 fl not in (self.status_path, self.series_path,
1539 1539 self.guards_path)
1540 1540 and not fl.startswith('.')):
1541 1541 msng_list.append(fl)
1542 1542 for x in sorted(msng_list):
1543 1543 pfx = self.ui.verbose and ('D ') or ''
1544 1544 displayname(pfx, x, 'missing')
1545 1545
1546 1546 def issaveline(self, l):
1547 1547 if l.name == '.hg.patches.save.line':
1548 1548 return True
1549 1549
1550 1550 def qrepo(self, create=False):
1551 1551 ui = self.ui.copy()
1552 1552 ui.setconfig('paths', 'default', '', overlay=False)
1553 1553 ui.setconfig('paths', 'default-push', '', overlay=False)
1554 1554 if create or os.path.isdir(self.join(".hg")):
1555 1555 return hg.repository(ui, path=self.path, create=create)
1556 1556
1557 1557 def restore(self, repo, rev, delete=None, qupdate=None):
1558 1558 desc = repo[rev].description().strip()
1559 1559 lines = desc.splitlines()
1560 1560 i = 0
1561 1561 datastart = None
1562 1562 series = []
1563 1563 applied = []
1564 1564 qpp = None
1565 1565 for i, line in enumerate(lines):
1566 1566 if line == 'Patch Data:':
1567 1567 datastart = i + 1
1568 1568 elif line.startswith('Dirstate:'):
1569 1569 l = line.rstrip()
1570 1570 l = l[10:].split(' ')
1571 1571 qpp = [bin(x) for x in l]
1572 1572 elif datastart is not None:
1573 1573 l = line.rstrip()
1574 1574 n, name = l.split(':', 1)
1575 1575 if n:
1576 1576 applied.append(statusentry(bin(n), name))
1577 1577 else:
1578 1578 series.append(l)
1579 1579 if datastart is None:
1580 1580 self.ui.warn(_("No saved patch data found\n"))
1581 1581 return 1
1582 1582 self.ui.warn(_("restoring status: %s\n") % lines[0])
1583 1583 self.full_series = series
1584 1584 self.applied = applied
1585 1585 self.parse_series()
1586 1586 self.series_dirty = 1
1587 1587 self.applied_dirty = 1
1588 1588 heads = repo.changelog.heads()
1589 1589 if delete:
1590 1590 if rev not in heads:
1591 1591 self.ui.warn(_("save entry has children, leaving it alone\n"))
1592 1592 else:
1593 1593 self.ui.warn(_("removing save entry %s\n") % short(rev))
1594 1594 pp = repo.dirstate.parents()
1595 1595 if rev in pp:
1596 1596 update = True
1597 1597 else:
1598 1598 update = False
1599 1599 self.strip(repo, [rev], update=update, backup='strip')
1600 1600 if qpp:
1601 1601 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1602 1602 (short(qpp[0]), short(qpp[1])))
1603 1603 if qupdate:
1604 1604 self.ui.status(_("updating queue directory\n"))
1605 1605 r = self.qrepo()
1606 1606 if not r:
1607 1607 self.ui.warn(_("Unable to load queue repository\n"))
1608 1608 return 1
1609 1609 hg.clean(r, qpp[0])
1610 1610
1611 1611 def save(self, repo, msg=None):
1612 1612 if not self.applied:
1613 1613 self.ui.warn(_("save: no patches applied, exiting\n"))
1614 1614 return 1
1615 1615 if self.issaveline(self.applied[-1]):
1616 1616 self.ui.warn(_("status is already saved\n"))
1617 1617 return 1
1618 1618
1619 1619 if not msg:
1620 1620 msg = _("hg patches saved state")
1621 1621 else:
1622 1622 msg = "hg patches: " + msg.rstrip('\r\n')
1623 1623 r = self.qrepo()
1624 1624 if r:
1625 1625 pp = r.dirstate.parents()
1626 1626 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
1627 1627 msg += "\n\nPatch Data:\n"
1628 1628 msg += ''.join('%s\n' % x for x in self.applied)
1629 1629 msg += ''.join(':%s\n' % x for x in self.full_series)
1630 1630 n = repo.commit(msg, force=True)
1631 1631 if not n:
1632 1632 self.ui.warn(_("repo commit failed\n"))
1633 1633 return 1
1634 1634 self.applied.append(statusentry(n, '.hg.patches.save.line'))
1635 1635 self.applied_dirty = 1
1636 1636 self.removeundo(repo)
1637 1637
1638 1638 def full_series_end(self):
1639 1639 if self.applied:
1640 1640 p = self.applied[-1].name
1641 1641 end = self.find_series(p)
1642 1642 if end is None:
1643 1643 return len(self.full_series)
1644 1644 return end + 1
1645 1645 return 0
1646 1646
1647 1647 def series_end(self, all_patches=False):
1648 1648 """If all_patches is False, return the index of the next pushable patch
1649 1649 in the series, or the series length. If all_patches is True, return the
1650 1650 index of the first patch past the last applied one.
1651 1651 """
1652 1652 end = 0
1653 1653 def next(start):
1654 1654 if all_patches or start >= len(self.series):
1655 1655 return start
1656 1656 for i in xrange(start, len(self.series)):
1657 1657 p, reason = self.pushable(i)
1658 1658 if p:
1659 1659 break
1660 1660 self.explain_pushable(i)
1661 1661 return i
1662 1662 if self.applied:
1663 1663 p = self.applied[-1].name
1664 1664 try:
1665 1665 end = self.series.index(p)
1666 1666 except ValueError:
1667 1667 return 0
1668 1668 return next(end + 1)
1669 1669 return next(end)
1670 1670
1671 1671 def appliedname(self, index):
1672 1672 pname = self.applied[index].name
1673 1673 if not self.ui.verbose:
1674 1674 p = pname
1675 1675 else:
1676 1676 p = str(self.series.index(pname)) + " " + pname
1677 1677 return p
1678 1678
1679 1679 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1680 1680 force=None, git=False):
1681 1681 def checkseries(patchname):
1682 1682 if patchname in self.series:
1683 1683 raise util.Abort(_('patch %s is already in the series file')
1684 1684 % patchname)
1685 1685 def checkfile(patchname):
1686 1686 if not force and os.path.exists(self.join(patchname)):
1687 1687 raise util.Abort(_('patch "%s" already exists')
1688 1688 % patchname)
1689 1689
1690 1690 if rev:
1691 1691 if files:
1692 1692 raise util.Abort(_('option "-r" not valid when importing '
1693 1693 'files'))
1694 1694 rev = cmdutil.revrange(repo, rev)
1695 1695 rev.sort(reverse=True)
1696 1696 if (len(files) > 1 or len(rev) > 1) and patchname:
1697 1697 raise util.Abort(_('option "-n" not valid when importing multiple '
1698 1698 'patches'))
1699 1699 if rev:
1700 1700 # If mq patches are applied, we can only import revisions
1701 1701 # that form a linear path to qbase.
1702 1702 # Otherwise, they should form a linear path to a head.
1703 1703 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1704 1704 if len(heads) > 1:
1705 1705 raise util.Abort(_('revision %d is the root of more than one '
1706 1706 'branch') % rev[-1])
1707 1707 if self.applied:
1708 1708 base = repo.changelog.node(rev[0])
1709 1709 if base in [n.node for n in self.applied]:
1710 1710 raise util.Abort(_('revision %d is already managed')
1711 1711 % rev[0])
1712 1712 if heads != [self.applied[-1].node]:
1713 1713 raise util.Abort(_('revision %d is not the parent of '
1714 1714 'the queue') % rev[0])
1715 1715 base = repo.changelog.rev(self.applied[0].node)
1716 1716 lastparent = repo.changelog.parentrevs(base)[0]
1717 1717 else:
1718 1718 if heads != [repo.changelog.node(rev[0])]:
1719 1719 raise util.Abort(_('revision %d has unmanaged children')
1720 1720 % rev[0])
1721 1721 lastparent = None
1722 1722
1723 1723 diffopts = self.diffopts({'git': git})
1724 1724 for r in rev:
1725 1725 p1, p2 = repo.changelog.parentrevs(r)
1726 1726 n = repo.changelog.node(r)
1727 1727 if p2 != nullrev:
1728 1728 raise util.Abort(_('cannot import merge revision %d') % r)
1729 1729 if lastparent and lastparent != r:
1730 1730 raise util.Abort(_('revision %d is not the parent of %d')
1731 1731 % (r, lastparent))
1732 1732 lastparent = p1
1733 1733
1734 1734 if not patchname:
1735 1735 patchname = normname('%d.diff' % r)
1736 1736 self.check_reserved_name(patchname)
1737 1737 checkseries(patchname)
1738 1738 checkfile(patchname)
1739 1739 self.full_series.insert(0, patchname)
1740 1740
1741 1741 patchf = self.opener(patchname, "w")
1742 1742 cmdutil.export(repo, [n], fp=patchf, opts=diffopts)
1743 1743 patchf.close()
1744 1744
1745 1745 se = statusentry(n, patchname)
1746 1746 self.applied.insert(0, se)
1747 1747
1748 1748 self.added.append(patchname)
1749 1749 patchname = None
1750 1750 self.parse_series()
1751 1751 self.applied_dirty = 1
1752 1752 self.series_dirty = True
1753 1753
1754 1754 for i, filename in enumerate(files):
1755 1755 if existing:
1756 1756 if filename == '-':
1757 1757 raise util.Abort(_('-e is incompatible with import from -'))
1758 1758 filename = normname(filename)
1759 1759 self.check_reserved_name(filename)
1760 1760 originpath = self.join(filename)
1761 1761 if not os.path.isfile(originpath):
1762 1762 raise util.Abort(_("patch %s does not exist") % filename)
1763 1763
1764 1764 if patchname:
1765 1765 self.check_reserved_name(patchname)
1766 1766 checkfile(patchname)
1767 1767
1768 1768 self.ui.write(_('renaming %s to %s\n')
1769 1769 % (filename, patchname))
1770 1770 util.rename(originpath, self.join(patchname))
1771 1771 else:
1772 1772 patchname = filename
1773 1773
1774 1774 else:
1775 1775 try:
1776 1776 if filename == '-':
1777 1777 if not patchname:
1778 1778 raise util.Abort(
1779 1779 _('need --name to import a patch from -'))
1780 1780 text = sys.stdin.read()
1781 1781 else:
1782 1782 text = url.open(self.ui, filename).read()
1783 1783 except (OSError, IOError):
1784 1784 raise util.Abort(_("unable to read file %s") % filename)
1785 1785 if not patchname:
1786 1786 patchname = normname(os.path.basename(filename))
1787 1787 self.check_reserved_name(patchname)
1788 1788 checkfile(patchname)
1789 1789 patchf = self.opener(patchname, "w")
1790 1790 patchf.write(text)
1791 1791 if not force:
1792 1792 checkseries(patchname)
1793 1793 if patchname not in self.series:
1794 1794 index = self.full_series_end() + i
1795 1795 self.full_series[index:index] = [patchname]
1796 1796 self.parse_series()
1797 1797 self.series_dirty = True
1798 1798 self.ui.warn(_("adding %s to series file\n") % patchname)
1799 1799 self.added.append(patchname)
1800 1800 patchname = None
1801 1801
1802 1802 def delete(ui, repo, *patches, **opts):
1803 1803 """remove patches from queue
1804 1804
1805 1805 The patches must not be applied, and at least one patch is required. With
1806 1806 -k/--keep, the patch files are preserved in the patch directory.
1807 1807
1808 1808 To stop managing a patch and move it into permanent history,
1809 1809 use the :hg:`qfinish` command."""
1810 1810 q = repo.mq
1811 1811 q.delete(repo, patches, opts)
1812 1812 q.save_dirty()
1813 1813 return 0
1814 1814
1815 1815 def applied(ui, repo, patch=None, **opts):
1816 1816 """print the patches already applied
1817 1817
1818 1818 Returns 0 on success."""
1819 1819
1820 1820 q = repo.mq
1821 1821
1822 1822 if patch:
1823 1823 if patch not in q.series:
1824 1824 raise util.Abort(_("patch %s is not in series file") % patch)
1825 1825 end = q.series.index(patch) + 1
1826 1826 else:
1827 1827 end = q.series_end(True)
1828 1828
1829 1829 if opts.get('last') and not end:
1830 1830 ui.write(_("no patches applied\n"))
1831 1831 return 1
1832 1832 elif opts.get('last') and end == 1:
1833 1833 ui.write(_("only one patch applied\n"))
1834 1834 return 1
1835 1835 elif opts.get('last'):
1836 1836 start = end - 2
1837 1837 end = 1
1838 1838 else:
1839 1839 start = 0
1840 1840
1841 1841 q.qseries(repo, length=end, start=start, status='A',
1842 1842 summary=opts.get('summary'))
1843 1843
1844 1844
1845 1845 def unapplied(ui, repo, patch=None, **opts):
1846 1846 """print the patches not yet applied
1847 1847
1848 1848 Returns 0 on success."""
1849 1849
1850 1850 q = repo.mq
1851 1851 if patch:
1852 1852 if patch not in q.series:
1853 1853 raise util.Abort(_("patch %s is not in series file") % patch)
1854 1854 start = q.series.index(patch) + 1
1855 1855 else:
1856 1856 start = q.series_end(True)
1857 1857
1858 1858 if start == len(q.series) and opts.get('first'):
1859 1859 ui.write(_("all patches applied\n"))
1860 1860 return 1
1861 1861
1862 1862 length = opts.get('first') and 1 or None
1863 1863 q.qseries(repo, start=start, length=length, status='U',
1864 1864 summary=opts.get('summary'))
1865 1865
1866 1866 def qimport(ui, repo, *filename, **opts):
1867 1867 """import a patch
1868 1868
1869 1869 The patch is inserted into the series after the last applied
1870 1870 patch. If no patches have been applied, qimport prepends the patch
1871 1871 to the series.
1872 1872
1873 1873 The patch will have the same name as its source file unless you
1874 1874 give it a new one with -n/--name.
1875 1875
1876 1876 You can register an existing patch inside the patch directory with
1877 1877 the -e/--existing flag.
1878 1878
1879 1879 With -f/--force, an existing patch of the same name will be
1880 1880 overwritten.
1881 1881
1882 1882 An existing changeset may be placed under mq control with -r/--rev
1883 1883 (e.g. qimport --rev tip -n patch will place tip under mq control).
1884 1884 With -g/--git, patches imported with --rev will use the git diff
1885 1885 format. See the diffs help topic for information on why this is
1886 1886 important for preserving rename/copy information and permission
1887 1887 changes.
1888 1888
1889 1889 To import a patch from standard input, pass - as the patch file.
1890 1890 When importing from standard input, a patch name must be specified
1891 1891 using the --name flag.
1892 1892
1893 1893 To import an existing patch while renaming it::
1894 1894
1895 1895 hg qimport -e existing-patch -n new-name
1896 1896
1897 1897 Returns 0 if import succeeded.
1898 1898 """
1899 1899 q = repo.mq
1900 1900 try:
1901 1901 q.qimport(repo, filename, patchname=opts.get('name'),
1902 1902 existing=opts.get('existing'), force=opts.get('force'),
1903 1903 rev=opts.get('rev'), git=opts.get('git'))
1904 1904 finally:
1905 1905 q.save_dirty()
1906 1906
1907 1907 if opts.get('push') and not opts.get('rev'):
1908 1908 return q.push(repo, None)
1909 1909 return 0
1910 1910
1911 1911 def qinit(ui, repo, create):
1912 1912 """initialize a new queue repository
1913 1913
1914 1914 This command also creates a series file for ordering patches, and
1915 1915 an mq-specific .hgignore file in the queue repository, to exclude
1916 1916 the status and guards files (these contain mostly transient state).
1917 1917
1918 1918 Returns 0 if initialization succeeded."""
1919 1919 q = repo.mq
1920 1920 r = q.init(repo, create)
1921 1921 q.save_dirty()
1922 1922 if r:
1923 1923 if not os.path.exists(r.wjoin('.hgignore')):
1924 1924 fp = r.wopener('.hgignore', 'w')
1925 1925 fp.write('^\\.hg\n')
1926 1926 fp.write('^\\.mq\n')
1927 1927 fp.write('syntax: glob\n')
1928 1928 fp.write('status\n')
1929 1929 fp.write('guards\n')
1930 1930 fp.close()
1931 1931 if not os.path.exists(r.wjoin('series')):
1932 1932 r.wopener('series', 'w').close()
1933 1933 r[None].add(['.hgignore', 'series'])
1934 1934 commands.add(ui, r)
1935 1935 return 0
1936 1936
1937 1937 def init(ui, repo, **opts):
1938 1938 """init a new queue repository (DEPRECATED)
1939 1939
1940 1940 The queue repository is unversioned by default. If
1941 1941 -c/--create-repo is specified, qinit will create a separate nested
1942 1942 repository for patches (qinit -c may also be run later to convert
1943 1943 an unversioned patch repository into a versioned one). You can use
1944 1944 qcommit to commit changes to this queue repository.
1945 1945
1946 1946 This command is deprecated. Without -c, it's implied by other relevant
1947 1947 commands. With -c, use :hg:`init --mq` instead."""
1948 1948 return qinit(ui, repo, create=opts.get('create_repo'))
1949 1949
1950 1950 def clone(ui, source, dest=None, **opts):
1951 1951 '''clone main and patch repository at same time
1952 1952
1953 1953 If source is local, destination will have no patches applied. If
1954 1954 source is remote, this command can not check if patches are
1955 1955 applied in source, so cannot guarantee that patches are not
1956 1956 applied in destination. If you clone remote repository, be sure
1957 1957 before that it has no patches applied.
1958 1958
1959 1959 Source patch repository is looked for in <src>/.hg/patches by
1960 1960 default. Use -p <url> to change.
1961 1961
1962 1962 The patch directory must be a nested Mercurial repository, as
1963 1963 would be created by :hg:`init --mq`.
1964 1964
1965 1965 Return 0 on success.
1966 1966 '''
1967 1967 def patchdir(repo):
1968 1968 url = repo.url()
1969 1969 if url.endswith('/'):
1970 1970 url = url[:-1]
1971 1971 return url + '/.hg/patches'
1972 1972 if dest is None:
1973 1973 dest = hg.defaultdest(source)
1974 1974 sr = hg.repository(hg.remoteui(ui, opts), ui.expandpath(source))
1975 1975 if opts.get('patches'):
1976 1976 patchespath = ui.expandpath(opts.get('patches'))
1977 1977 else:
1978 1978 patchespath = patchdir(sr)
1979 1979 try:
1980 1980 hg.repository(ui, patchespath)
1981 1981 except error.RepoError:
1982 1982 raise util.Abort(_('versioned patch repository not found'
1983 1983 ' (see init --mq)'))
1984 1984 qbase, destrev = None, None
1985 1985 if sr.local():
1986 1986 if sr.mq.applied:
1987 1987 qbase = sr.mq.applied[0].node
1988 1988 if not hg.islocal(dest):
1989 1989 heads = set(sr.heads())
1990 1990 destrev = list(heads.difference(sr.heads(qbase)))
1991 1991 destrev.append(sr.changelog.parents(qbase)[0])
1992 1992 elif sr.capable('lookup'):
1993 1993 try:
1994 1994 qbase = sr.lookup('qbase')
1995 1995 except error.RepoError:
1996 1996 pass
1997 1997 ui.note(_('cloning main repository\n'))
1998 1998 sr, dr = hg.clone(ui, sr.url(), dest,
1999 1999 pull=opts.get('pull'),
2000 2000 rev=destrev,
2001 2001 update=False,
2002 2002 stream=opts.get('uncompressed'))
2003 2003 ui.note(_('cloning patch repository\n'))
2004 2004 hg.clone(ui, opts.get('patches') or patchdir(sr), patchdir(dr),
2005 2005 pull=opts.get('pull'), update=not opts.get('noupdate'),
2006 2006 stream=opts.get('uncompressed'))
2007 2007 if dr.local():
2008 2008 if qbase:
2009 2009 ui.note(_('stripping applied patches from destination '
2010 2010 'repository\n'))
2011 2011 dr.mq.strip(dr, [qbase], update=False, backup=None)
2012 2012 if not opts.get('noupdate'):
2013 2013 ui.note(_('updating destination repository\n'))
2014 2014 hg.update(dr, dr.changelog.tip())
2015 2015
2016 2016 def commit(ui, repo, *pats, **opts):
2017 2017 """commit changes in the queue repository (DEPRECATED)
2018 2018
2019 2019 This command is deprecated; use :hg:`commit --mq` instead."""
2020 2020 q = repo.mq
2021 2021 r = q.qrepo()
2022 2022 if not r:
2023 2023 raise util.Abort('no queue repository')
2024 2024 commands.commit(r.ui, r, *pats, **opts)
2025 2025
2026 2026 def series(ui, repo, **opts):
2027 2027 """print the entire series file
2028 2028
2029 2029 Returns 0 on success."""
2030 2030 repo.mq.qseries(repo, missing=opts.get('missing'), summary=opts.get('summary'))
2031 2031 return 0
2032 2032
2033 2033 def top(ui, repo, **opts):
2034 2034 """print the name of the current patch
2035 2035
2036 2036 Returns 0 on success."""
2037 2037 q = repo.mq
2038 2038 t = q.applied and q.series_end(True) or 0
2039 2039 if t:
2040 2040 q.qseries(repo, start=t - 1, length=1, status='A',
2041 2041 summary=opts.get('summary'))
2042 2042 else:
2043 2043 ui.write(_("no patches applied\n"))
2044 2044 return 1
2045 2045
2046 2046 def next(ui, repo, **opts):
2047 2047 """print the name of the next patch
2048 2048
2049 2049 Returns 0 on success."""
2050 2050 q = repo.mq
2051 2051 end = q.series_end()
2052 2052 if end == len(q.series):
2053 2053 ui.write(_("all patches applied\n"))
2054 2054 return 1
2055 2055 q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
2056 2056
2057 2057 def prev(ui, repo, **opts):
2058 2058 """print the name of the previous patch
2059 2059
2060 2060 Returns 0 on success."""
2061 2061 q = repo.mq
2062 2062 l = len(q.applied)
2063 2063 if l == 1:
2064 2064 ui.write(_("only one patch applied\n"))
2065 2065 return 1
2066 2066 if not l:
2067 2067 ui.write(_("no patches applied\n"))
2068 2068 return 1
2069 2069 q.qseries(repo, start=l - 2, length=1, status='A',
2070 2070 summary=opts.get('summary'))
2071 2071
2072 2072 def setupheaderopts(ui, opts):
2073 2073 if not opts.get('user') and opts.get('currentuser'):
2074 2074 opts['user'] = ui.username()
2075 2075 if not opts.get('date') and opts.get('currentdate'):
2076 2076 opts['date'] = "%d %d" % util.makedate()
2077 2077
2078 2078 def new(ui, repo, patch, *args, **opts):
2079 2079 """create a new patch
2080 2080
2081 2081 qnew creates a new patch on top of the currently-applied patch (if
2082 2082 any). The patch will be initialized with any outstanding changes
2083 2083 in the working directory. You may also use -I/--include,
2084 2084 -X/--exclude, and/or a list of files after the patch name to add
2085 2085 only changes to matching files to the new patch, leaving the rest
2086 2086 as uncommitted modifications.
2087 2087
2088 2088 -u/--user and -d/--date can be used to set the (given) user and
2089 2089 date, respectively. -U/--currentuser and -D/--currentdate set user
2090 2090 to current user and date to current date.
2091 2091
2092 2092 -e/--edit, -m/--message or -l/--logfile set the patch header as
2093 2093 well as the commit message. If none is specified, the header is
2094 2094 empty and the commit message is '[mq]: PATCH'.
2095 2095
2096 2096 Use the -g/--git option to keep the patch in the git extended diff
2097 2097 format. Read the diffs help topic for more information on why this
2098 2098 is important for preserving permission changes and copy/rename
2099 2099 information.
2100 2100
2101 2101 Returns 0 on successful creation of a new patch.
2102 2102 """
2103 2103 msg = cmdutil.logmessage(opts)
2104 2104 def getmsg():
2105 2105 return ui.edit(msg, opts.get('user') or ui.username())
2106 2106 q = repo.mq
2107 2107 opts['msg'] = msg
2108 2108 if opts.get('edit'):
2109 2109 opts['msg'] = getmsg
2110 2110 else:
2111 2111 opts['msg'] = msg
2112 2112 setupheaderopts(ui, opts)
2113 2113 q.new(repo, patch, *args, **opts)
2114 2114 q.save_dirty()
2115 2115 return 0
2116 2116
2117 2117 def refresh(ui, repo, *pats, **opts):
2118 2118 """update the current patch
2119 2119
2120 2120 If any file patterns are provided, the refreshed patch will
2121 2121 contain only the modifications that match those patterns; the
2122 2122 remaining modifications will remain in the working directory.
2123 2123
2124 2124 If -s/--short is specified, files currently included in the patch
2125 2125 will be refreshed just like matched files and remain in the patch.
2126 2126
2127 2127 If -e/--edit is specified, Mercurial will start your configured editor for
2128 2128 you to enter a message. In case qrefresh fails, you will find a backup of
2129 2129 your message in ``.hg/last-message.txt``.
2130 2130
2131 2131 hg add/remove/copy/rename work as usual, though you might want to
2132 2132 use git-style patches (-g/--git or [diff] git=1) to track copies
2133 2133 and renames. See the diffs help topic for more information on the
2134 2134 git diff format.
2135 2135
2136 2136 Returns 0 on success.
2137 2137 """
2138 2138 q = repo.mq
2139 2139 message = cmdutil.logmessage(opts)
2140 2140 if opts.get('edit'):
2141 2141 if not q.applied:
2142 2142 ui.write(_("no patches applied\n"))
2143 2143 return 1
2144 2144 if message:
2145 2145 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2146 2146 patch = q.applied[-1].name
2147 2147 ph = patchheader(q.join(patch), q.plainmode)
2148 2148 message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
2149 2149 # We don't want to lose the patch message if qrefresh fails (issue2062)
2150 2150 msgfile = repo.opener('last-message.txt', 'wb')
2151 2151 msgfile.write(message)
2152 2152 msgfile.close()
2153 2153 setupheaderopts(ui, opts)
2154 2154 ret = q.refresh(repo, pats, msg=message, **opts)
2155 2155 q.save_dirty()
2156 2156 return ret
2157 2157
2158 2158 def diff(ui, repo, *pats, **opts):
2159 2159 """diff of the current patch and subsequent modifications
2160 2160
2161 2161 Shows a diff which includes the current patch as well as any
2162 2162 changes which have been made in the working directory since the
2163 2163 last refresh (thus showing what the current patch would become
2164 2164 after a qrefresh).
2165 2165
2166 2166 Use :hg:`diff` if you only want to see the changes made since the
2167 2167 last qrefresh, or :hg:`export qtip` if you want to see changes
2168 2168 made by the current patch without including changes made since the
2169 2169 qrefresh.
2170 2170
2171 2171 Returns 0 on success.
2172 2172 """
2173 2173 repo.mq.diff(repo, pats, opts)
2174 2174 return 0
2175 2175
2176 2176 def fold(ui, repo, *files, **opts):
2177 2177 """fold the named patches into the current patch
2178 2178
2179 2179 Patches must not yet be applied. Each patch will be successively
2180 2180 applied to the current patch in the order given. If all the
2181 2181 patches apply successfully, the current patch will be refreshed
2182 2182 with the new cumulative patch, and the folded patches will be
2183 2183 deleted. With -k/--keep, the folded patch files will not be
2184 2184 removed afterwards.
2185 2185
2186 2186 The header for each folded patch will be concatenated with the
2187 2187 current patch header, separated by a line of ``* * *``.
2188 2188
2189 2189 Returns 0 on success."""
2190 2190
2191 2191 q = repo.mq
2192 2192
2193 2193 if not files:
2194 2194 raise util.Abort(_('qfold requires at least one patch name'))
2195 2195 if not q.check_toppatch(repo)[0]:
2196 2196 raise util.Abort(_('no patches applied'))
2197 2197 q.check_localchanges(repo)
2198 2198
2199 2199 message = cmdutil.logmessage(opts)
2200 2200 if opts.get('edit'):
2201 2201 if message:
2202 2202 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2203 2203
2204 2204 parent = q.lookup('qtip')
2205 2205 patches = []
2206 2206 messages = []
2207 2207 for f in files:
2208 2208 p = q.lookup(f)
2209 2209 if p in patches or p == parent:
2210 2210 ui.warn(_('Skipping already folded patch %s\n') % p)
2211 2211 if q.isapplied(p):
2212 2212 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
2213 2213 patches.append(p)
2214 2214
2215 2215 for p in patches:
2216 2216 if not message:
2217 2217 ph = patchheader(q.join(p), q.plainmode)
2218 2218 if ph.message:
2219 2219 messages.append(ph.message)
2220 2220 pf = q.join(p)
2221 2221 (patchsuccess, files, fuzz) = q.patch(repo, pf)
2222 2222 if not patchsuccess:
2223 2223 raise util.Abort(_('error folding patch %s') % p)
2224 2224 cmdutil.updatedir(ui, repo, files)
2225 2225
2226 2226 if not message:
2227 2227 ph = patchheader(q.join(parent), q.plainmode)
2228 2228 message, user = ph.message, ph.user
2229 2229 for msg in messages:
2230 2230 message.append('* * *')
2231 2231 message.extend(msg)
2232 2232 message = '\n'.join(message)
2233 2233
2234 2234 if opts.get('edit'):
2235 2235 message = ui.edit(message, user or ui.username())
2236 2236
2237 2237 diffopts = q.patchopts(q.diffopts(), *patches)
2238 2238 q.refresh(repo, msg=message, git=diffopts.git)
2239 2239 q.delete(repo, patches, opts)
2240 2240 q.save_dirty()
2241 2241
2242 2242 def goto(ui, repo, patch, **opts):
2243 2243 '''push or pop patches until named patch is at top of stack
2244 2244
2245 2245 Returns 0 on success.'''
2246 2246 q = repo.mq
2247 2247 patch = q.lookup(patch)
2248 2248 if q.isapplied(patch):
2249 2249 ret = q.pop(repo, patch, force=opts.get('force'))
2250 2250 else:
2251 2251 ret = q.push(repo, patch, force=opts.get('force'))
2252 2252 q.save_dirty()
2253 2253 return ret
2254 2254
2255 2255 def guard(ui, repo, *args, **opts):
2256 2256 '''set or print guards for a patch
2257 2257
2258 2258 Guards control whether a patch can be pushed. A patch with no
2259 2259 guards is always pushed. A patch with a positive guard ("+foo") is
2260 2260 pushed only if the :hg:`qselect` command has activated it. A patch with
2261 2261 a negative guard ("-foo") is never pushed if the :hg:`qselect` command
2262 2262 has activated it.
2263 2263
2264 2264 With no arguments, print the currently active guards.
2265 2265 With arguments, set guards for the named patch.
2266 2266
2267 2267 .. note::
2268 2268 Specifying negative guards now requires '--'.
2269 2269
2270 2270 To set guards on another patch::
2271 2271
2272 2272 hg qguard other.patch -- +2.6.17 -stable
2273 2273
2274 2274 Returns 0 on success.
2275 2275 '''
2276 2276 def status(idx):
2277 2277 guards = q.series_guards[idx] or ['unguarded']
2278 2278 if q.series[idx] in applied:
2279 2279 state = 'applied'
2280 2280 elif q.pushable(idx)[0]:
2281 2281 state = 'unapplied'
2282 2282 else:
2283 2283 state = 'guarded'
2284 2284 label = 'qguard.patch qguard.%s qseries.%s' % (state, state)
2285 2285 ui.write('%s: ' % ui.label(q.series[idx], label))
2286 2286
2287 2287 for i, guard in enumerate(guards):
2288 2288 if guard.startswith('+'):
2289 2289 ui.write(guard, label='qguard.positive')
2290 2290 elif guard.startswith('-'):
2291 2291 ui.write(guard, label='qguard.negative')
2292 2292 else:
2293 2293 ui.write(guard, label='qguard.unguarded')
2294 2294 if i != len(guards) - 1:
2295 2295 ui.write(' ')
2296 2296 ui.write('\n')
2297 2297 q = repo.mq
2298 2298 applied = set(p.name for p in q.applied)
2299 2299 patch = None
2300 2300 args = list(args)
2301 2301 if opts.get('list'):
2302 2302 if args or opts.get('none'):
2303 2303 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
2304 2304 for i in xrange(len(q.series)):
2305 2305 status(i)
2306 2306 return
2307 2307 if not args or args[0][0:1] in '-+':
2308 2308 if not q.applied:
2309 2309 raise util.Abort(_('no patches applied'))
2310 2310 patch = q.applied[-1].name
2311 2311 if patch is None and args[0][0:1] not in '-+':
2312 2312 patch = args.pop(0)
2313 2313 if patch is None:
2314 2314 raise util.Abort(_('no patch to work with'))
2315 2315 if args or opts.get('none'):
2316 2316 idx = q.find_series(patch)
2317 2317 if idx is None:
2318 2318 raise util.Abort(_('no patch named %s') % patch)
2319 2319 q.set_guards(idx, args)
2320 2320 q.save_dirty()
2321 2321 else:
2322 2322 status(q.series.index(q.lookup(patch)))
2323 2323
2324 2324 def header(ui, repo, patch=None):
2325 2325 """print the header of the topmost or specified patch
2326 2326
2327 2327 Returns 0 on success."""
2328 2328 q = repo.mq
2329 2329
2330 2330 if patch:
2331 2331 patch = q.lookup(patch)
2332 2332 else:
2333 2333 if not q.applied:
2334 2334 ui.write(_('no patches applied\n'))
2335 2335 return 1
2336 2336 patch = q.lookup('qtip')
2337 2337 ph = patchheader(q.join(patch), q.plainmode)
2338 2338
2339 2339 ui.write('\n'.join(ph.message) + '\n')
2340 2340
2341 2341 def lastsavename(path):
2342 2342 (directory, base) = os.path.split(path)
2343 2343 names = os.listdir(directory)
2344 2344 namere = re.compile("%s.([0-9]+)" % base)
2345 2345 maxindex = None
2346 2346 maxname = None
2347 2347 for f in names:
2348 2348 m = namere.match(f)
2349 2349 if m:
2350 2350 index = int(m.group(1))
2351 2351 if maxindex is None or index > maxindex:
2352 2352 maxindex = index
2353 2353 maxname = f
2354 2354 if maxname:
2355 2355 return (os.path.join(directory, maxname), maxindex)
2356 2356 return (None, None)
2357 2357
2358 2358 def savename(path):
2359 2359 (last, index) = lastsavename(path)
2360 2360 if last is None:
2361 2361 index = 0
2362 2362 newpath = path + ".%d" % (index + 1)
2363 2363 return newpath
2364 2364
2365 2365 def push(ui, repo, patch=None, **opts):
2366 2366 """push the next patch onto the stack
2367 2367
2368 2368 When -f/--force is applied, all local changes in patched files
2369 2369 will be lost.
2370 2370
2371 2371 Return 0 on succces.
2372 2372 """
2373 2373 q = repo.mq
2374 2374 mergeq = None
2375 2375
2376 2376 if opts.get('merge'):
2377 2377 if opts.get('name'):
2378 2378 newpath = repo.join(opts.get('name'))
2379 2379 else:
2380 2380 newpath, i = lastsavename(q.path)
2381 2381 if not newpath:
2382 2382 ui.warn(_("no saved queues found, please use -n\n"))
2383 2383 return 1
2384 2384 mergeq = queue(ui, repo.join(""), newpath)
2385 2385 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2386 2386 ret = q.push(repo, patch, force=opts.get('force'), list=opts.get('list'),
2387 2387 mergeq=mergeq, all=opts.get('all'), move=opts.get('move'),
2388 2388 exact=opts.get('exact'))
2389 2389 return ret
2390 2390
2391 2391 def pop(ui, repo, patch=None, **opts):
2392 2392 """pop the current patch off the stack
2393 2393
2394 2394 By default, pops off the top of the patch stack. If given a patch
2395 2395 name, keeps popping off patches until the named patch is at the
2396 2396 top of the stack.
2397 2397
2398 2398 Return 0 on success.
2399 2399 """
2400 2400 localupdate = True
2401 2401 if opts.get('name'):
2402 2402 q = queue(ui, repo.join(""), repo.join(opts.get('name')))
2403 2403 ui.warn(_('using patch queue: %s\n') % q.path)
2404 2404 localupdate = False
2405 2405 else:
2406 2406 q = repo.mq
2407 2407 ret = q.pop(repo, patch, force=opts.get('force'), update=localupdate,
2408 2408 all=opts.get('all'))
2409 2409 q.save_dirty()
2410 2410 return ret
2411 2411
2412 2412 def rename(ui, repo, patch, name=None, **opts):
2413 2413 """rename a patch
2414 2414
2415 2415 With one argument, renames the current patch to PATCH1.
2416 2416 With two arguments, renames PATCH1 to PATCH2.
2417 2417
2418 2418 Returns 0 on success."""
2419 2419
2420 2420 q = repo.mq
2421 2421
2422 2422 if not name:
2423 2423 name = patch
2424 2424 patch = None
2425 2425
2426 2426 if patch:
2427 2427 patch = q.lookup(patch)
2428 2428 else:
2429 2429 if not q.applied:
2430 2430 ui.write(_('no patches applied\n'))
2431 2431 return
2432 2432 patch = q.lookup('qtip')
2433 2433 absdest = q.join(name)
2434 2434 if os.path.isdir(absdest):
2435 2435 name = normname(os.path.join(name, os.path.basename(patch)))
2436 2436 absdest = q.join(name)
2437 2437 if os.path.exists(absdest):
2438 2438 raise util.Abort(_('%s already exists') % absdest)
2439 2439
2440 2440 if name in q.series:
2441 2441 raise util.Abort(
2442 2442 _('A patch named %s already exists in the series file') % name)
2443 2443
2444 2444 ui.note(_('renaming %s to %s\n') % (patch, name))
2445 2445 i = q.find_series(patch)
2446 2446 guards = q.guard_re.findall(q.full_series[i])
2447 2447 q.full_series[i] = name + ''.join([' #' + g for g in guards])
2448 2448 q.parse_series()
2449 2449 q.series_dirty = 1
2450 2450
2451 2451 info = q.isapplied(patch)
2452 2452 if info:
2453 2453 q.applied[info[0]] = statusentry(info[1], name)
2454 2454 q.applied_dirty = 1
2455 2455
2456 2456 destdir = os.path.dirname(absdest)
2457 2457 if not os.path.isdir(destdir):
2458 2458 os.makedirs(destdir)
2459 2459 util.rename(q.join(patch), absdest)
2460 2460 r = q.qrepo()
2461 2461 if r and patch in r.dirstate:
2462 2462 wctx = r[None]
2463 2463 wlock = r.wlock()
2464 2464 try:
2465 2465 if r.dirstate[patch] == 'a':
2466 2466 r.dirstate.forget(patch)
2467 2467 r.dirstate.add(name)
2468 2468 else:
2469 2469 if r.dirstate[name] == 'r':
2470 2470 wctx.undelete([name])
2471 2471 wctx.copy(patch, name)
2472 2472 wctx.remove([patch], False)
2473 2473 finally:
2474 2474 wlock.release()
2475 2475
2476 2476 q.save_dirty()
2477 2477
2478 2478 def restore(ui, repo, rev, **opts):
2479 2479 """restore the queue state saved by a revision (DEPRECATED)
2480 2480
2481 2481 This command is deprecated, use :hg:`rebase` instead."""
2482 2482 rev = repo.lookup(rev)
2483 2483 q = repo.mq
2484 2484 q.restore(repo, rev, delete=opts.get('delete'),
2485 2485 qupdate=opts.get('update'))
2486 2486 q.save_dirty()
2487 2487 return 0
2488 2488
2489 2489 def save(ui, repo, **opts):
2490 2490 """save current queue state (DEPRECATED)
2491 2491
2492 2492 This command is deprecated, use :hg:`rebase` instead."""
2493 2493 q = repo.mq
2494 2494 message = cmdutil.logmessage(opts)
2495 2495 ret = q.save(repo, msg=message)
2496 2496 if ret:
2497 2497 return ret
2498 2498 q.save_dirty()
2499 2499 if opts.get('copy'):
2500 2500 path = q.path
2501 2501 if opts.get('name'):
2502 2502 newpath = os.path.join(q.basepath, opts.get('name'))
2503 2503 if os.path.exists(newpath):
2504 2504 if not os.path.isdir(newpath):
2505 2505 raise util.Abort(_('destination %s exists and is not '
2506 2506 'a directory') % newpath)
2507 2507 if not opts.get('force'):
2508 2508 raise util.Abort(_('destination %s exists, '
2509 2509 'use -f to force') % newpath)
2510 2510 else:
2511 2511 newpath = savename(path)
2512 2512 ui.warn(_("copy %s to %s\n") % (path, newpath))
2513 2513 util.copyfiles(path, newpath)
2514 2514 if opts.get('empty'):
2515 2515 try:
2516 2516 os.unlink(q.join(q.status_path))
2517 2517 except:
2518 2518 pass
2519 2519 return 0
2520 2520
2521 2521 def strip(ui, repo, *revs, **opts):
2522 2522 """strip changesets and all their descendants from the repository
2523 2523
2524 2524 The strip command removes the specified changesets and all their
2525 2525 descendants. If the working directory has uncommitted changes,
2526 2526 the operation is aborted unless the --force flag is supplied.
2527 2527
2528 2528 If a parent of the working directory is stripped, then the working
2529 2529 directory will automatically be updated to the most recent
2530 2530 available ancestor of the stripped parent after the operation
2531 2531 completes.
2532 2532
2533 2533 Any stripped changesets are stored in ``.hg/strip-backup`` as a
2534 2534 bundle (see :hg:`help bundle` and :hg:`help unbundle`). They can
2535 2535 be restored by running :hg:`unbundle .hg/strip-backup/BUNDLE`,
2536 2536 where BUNDLE is the bundle file created by the strip. Note that
2537 2537 the local revision numbers will in general be different after the
2538 2538 restore.
2539 2539
2540 2540 Use the --no-backup option to discard the backup bundle once the
2541 2541 operation completes.
2542 2542
2543 2543 Return 0 on success.
2544 2544 """
2545 2545 backup = 'all'
2546 2546 if opts.get('backup'):
2547 2547 backup = 'strip'
2548 2548 elif opts.get('no_backup') or opts.get('nobackup'):
2549 2549 backup = 'none'
2550 2550
2551 2551 cl = repo.changelog
2552 2552 revs = set(cmdutil.revrange(repo, revs))
2553 2553 if not revs:
2554 2554 raise util.Abort(_('empty revision set'))
2555 2555
2556 2556 descendants = set(cl.descendants(*revs))
2557 2557 strippedrevs = revs.union(descendants)
2558 2558 roots = revs.difference(descendants)
2559 2559
2560 2560 update = False
2561 2561 # if one of the wdir parent is stripped we'll need
2562 2562 # to update away to an earlier revision
2563 2563 for p in repo.dirstate.parents():
2564 2564 if p != nullid and cl.rev(p) in strippedrevs:
2565 2565 update = True
2566 2566 break
2567 2567
2568 2568 rootnodes = set(cl.node(r) for r in roots)
2569 2569
2570 2570 q = repo.mq
2571 2571 if q.applied:
2572 2572 # refresh queue state if we're about to strip
2573 2573 # applied patches
2574 2574 if cl.rev(repo.lookup('qtip')) in strippedrevs:
2575 2575 q.applied_dirty = True
2576 2576 start = 0
2577 2577 end = len(q.applied)
2578 2578 for i, statusentry in enumerate(q.applied):
2579 2579 if statusentry.node in rootnodes:
2580 2580 # if one of the stripped roots is an applied
2581 2581 # patch, only part of the queue is stripped
2582 2582 start = i
2583 2583 break
2584 2584 del q.applied[start:end]
2585 2585 q.save_dirty()
2586 2586
2587 2587 revs = list(rootnodes)
2588 2588 if update and opts.get('keep'):
2589 2589 wlock = repo.wlock()
2590 2590 try:
2591 2591 urev = repo.mq.qparents(repo, revs[0])
2592 2592 repo.dirstate.rebuild(urev, repo[urev].manifest())
2593 2593 repo.dirstate.write()
2594 2594 update = False
2595 2595 finally:
2596 2596 wlock.release()
2597 2597
2598 2598 repo.mq.strip(repo, revs, backup=backup, update=update,
2599 2599 force=opts.get('force'))
2600 2600 return 0
2601 2601
2602 2602 def select(ui, repo, *args, **opts):
2603 2603 '''set or print guarded patches to push
2604 2604
2605 2605 Use the :hg:`qguard` command to set or print guards on patch, then use
2606 2606 qselect to tell mq which guards to use. A patch will be pushed if
2607 2607 it has no guards or any positive guards match the currently
2608 2608 selected guard, but will not be pushed if any negative guards
2609 2609 match the current guard. For example::
2610 2610
2611 2611 qguard foo.patch -stable (negative guard)
2612 2612 qguard bar.patch +stable (positive guard)
2613 2613 qselect stable
2614 2614
2615 2615 This activates the "stable" guard. mq will skip foo.patch (because
2616 2616 it has a negative match) but push bar.patch (because it has a
2617 2617 positive match).
2618 2618
2619 2619 With no arguments, prints the currently active guards.
2620 2620 With one argument, sets the active guard.
2621 2621
2622 2622 Use -n/--none to deactivate guards (no other arguments needed).
2623 2623 When no guards are active, patches with positive guards are
2624 2624 skipped and patches with negative guards are pushed.
2625 2625
2626 2626 qselect can change the guards on applied patches. It does not pop
2627 2627 guarded patches by default. Use --pop to pop back to the last
2628 2628 applied patch that is not guarded. Use --reapply (which implies
2629 2629 --pop) to push back to the current patch afterwards, but skip
2630 2630 guarded patches.
2631 2631
2632 2632 Use -s/--series to print a list of all guards in the series file
2633 2633 (no other arguments needed). Use -v for more information.
2634 2634
2635 2635 Returns 0 on success.'''
2636 2636
2637 2637 q = repo.mq
2638 2638 guards = q.active()
2639 2639 if args or opts.get('none'):
2640 2640 old_unapplied = q.unapplied(repo)
2641 2641 old_guarded = [i for i in xrange(len(q.applied)) if
2642 2642 not q.pushable(i)[0]]
2643 2643 q.set_active(args)
2644 2644 q.save_dirty()
2645 2645 if not args:
2646 2646 ui.status(_('guards deactivated\n'))
2647 2647 if not opts.get('pop') and not opts.get('reapply'):
2648 2648 unapplied = q.unapplied(repo)
2649 2649 guarded = [i for i in xrange(len(q.applied))
2650 2650 if not q.pushable(i)[0]]
2651 2651 if len(unapplied) != len(old_unapplied):
2652 2652 ui.status(_('number of unguarded, unapplied patches has '
2653 2653 'changed from %d to %d\n') %
2654 2654 (len(old_unapplied), len(unapplied)))
2655 2655 if len(guarded) != len(old_guarded):
2656 2656 ui.status(_('number of guarded, applied patches has changed '
2657 2657 'from %d to %d\n') %
2658 2658 (len(old_guarded), len(guarded)))
2659 2659 elif opts.get('series'):
2660 2660 guards = {}
2661 2661 noguards = 0
2662 2662 for gs in q.series_guards:
2663 2663 if not gs:
2664 2664 noguards += 1
2665 2665 for g in gs:
2666 2666 guards.setdefault(g, 0)
2667 2667 guards[g] += 1
2668 2668 if ui.verbose:
2669 2669 guards['NONE'] = noguards
2670 2670 guards = guards.items()
2671 2671 guards.sort(key=lambda x: x[0][1:])
2672 2672 if guards:
2673 2673 ui.note(_('guards in series file:\n'))
2674 2674 for guard, count in guards:
2675 2675 ui.note('%2d ' % count)
2676 2676 ui.write(guard, '\n')
2677 2677 else:
2678 2678 ui.note(_('no guards in series file\n'))
2679 2679 else:
2680 2680 if guards:
2681 2681 ui.note(_('active guards:\n'))
2682 2682 for g in guards:
2683 2683 ui.write(g, '\n')
2684 2684 else:
2685 2685 ui.write(_('no active guards\n'))
2686 2686 reapply = opts.get('reapply') and q.applied and q.appliedname(-1)
2687 2687 popped = False
2688 2688 if opts.get('pop') or opts.get('reapply'):
2689 2689 for i in xrange(len(q.applied)):
2690 2690 pushable, reason = q.pushable(i)
2691 2691 if not pushable:
2692 2692 ui.status(_('popping guarded patches\n'))
2693 2693 popped = True
2694 2694 if i == 0:
2695 2695 q.pop(repo, all=True)
2696 2696 else:
2697 2697 q.pop(repo, i - 1)
2698 2698 break
2699 2699 if popped:
2700 2700 try:
2701 2701 if reapply:
2702 2702 ui.status(_('reapplying unguarded patches\n'))
2703 2703 q.push(repo, reapply)
2704 2704 finally:
2705 2705 q.save_dirty()
2706 2706
2707 2707 def finish(ui, repo, *revrange, **opts):
2708 2708 """move applied patches into repository history
2709 2709
2710 2710 Finishes the specified revisions (corresponding to applied
2711 2711 patches) by moving them out of mq control into regular repository
2712 2712 history.
2713 2713
2714 2714 Accepts a revision range or the -a/--applied option. If --applied
2715 2715 is specified, all applied mq revisions are removed from mq
2716 2716 control. Otherwise, the given revisions must be at the base of the
2717 2717 stack of applied patches.
2718 2718
2719 2719 This can be especially useful if your changes have been applied to
2720 2720 an upstream repository, or if you are about to push your changes
2721 2721 to upstream.
2722 2722
2723 2723 Returns 0 on success.
2724 2724 """
2725 2725 if not opts.get('applied') and not revrange:
2726 2726 raise util.Abort(_('no revisions specified'))
2727 2727 elif opts.get('applied'):
2728 2728 revrange = ('qbase::qtip',) + revrange
2729 2729
2730 2730 q = repo.mq
2731 2731 if not q.applied:
2732 2732 ui.status(_('no patches applied\n'))
2733 2733 return 0
2734 2734
2735 2735 revs = cmdutil.revrange(repo, revrange)
2736 2736 q.finish(repo, revs)
2737 2737 q.save_dirty()
2738 2738 return 0
2739 2739
2740 2740 def qqueue(ui, repo, name=None, **opts):
2741 2741 '''manage multiple patch queues
2742 2742
2743 2743 Supports switching between different patch queues, as well as creating
2744 2744 new patch queues and deleting existing ones.
2745 2745
2746 2746 Omitting a queue name or specifying -l/--list will show you the registered
2747 2747 queues - by default the "normal" patches queue is registered. The currently
2748 2748 active queue will be marked with "(active)".
2749 2749
2750 2750 To create a new queue, use -c/--create. The queue is automatically made
2751 2751 active, except in the case where there are applied patches from the
2752 2752 currently active queue in the repository. Then the queue will only be
2753 2753 created and switching will fail.
2754 2754
2755 2755 To delete an existing queue, use --delete. You cannot delete the currently
2756 2756 active queue.
2757 2757
2758 2758 Returns 0 on success.
2759 2759 '''
2760 2760
2761 2761 q = repo.mq
2762 2762
2763 2763 _defaultqueue = 'patches'
2764 2764 _allqueues = 'patches.queues'
2765 2765 _activequeue = 'patches.queue'
2766 2766
2767 2767 def _getcurrent():
2768 2768 cur = os.path.basename(q.path)
2769 2769 if cur.startswith('patches-'):
2770 2770 cur = cur[8:]
2771 2771 return cur
2772 2772
2773 2773 def _noqueues():
2774 2774 try:
2775 2775 fh = repo.opener(_allqueues, 'r')
2776 2776 fh.close()
2777 2777 except IOError:
2778 2778 return True
2779 2779
2780 2780 return False
2781 2781
2782 2782 def _getqueues():
2783 2783 current = _getcurrent()
2784 2784
2785 2785 try:
2786 2786 fh = repo.opener(_allqueues, 'r')
2787 2787 queues = [queue.strip() for queue in fh if queue.strip()]
2788 2788 if current not in queues:
2789 2789 queues.append(current)
2790 2790 except IOError:
2791 2791 queues = [_defaultqueue]
2792 2792
2793 2793 return sorted(queues)
2794 2794
2795 2795 def _setactive(name):
2796 2796 if q.applied:
2797 2797 raise util.Abort(_('patches applied - cannot set new queue active'))
2798 2798 _setactivenocheck(name)
2799 2799
2800 2800 def _setactivenocheck(name):
2801 2801 fh = repo.opener(_activequeue, 'w')
2802 2802 if name != 'patches':
2803 2803 fh.write(name)
2804 2804 fh.close()
2805 2805
2806 2806 def _addqueue(name):
2807 2807 fh = repo.opener(_allqueues, 'a')
2808 2808 fh.write('%s\n' % (name,))
2809 2809 fh.close()
2810 2810
2811 2811 def _queuedir(name):
2812 2812 if name == 'patches':
2813 2813 return repo.join('patches')
2814 2814 else:
2815 2815 return repo.join('patches-' + name)
2816 2816
2817 2817 def _validname(name):
2818 2818 for n in name:
2819 2819 if n in ':\\/.':
2820 2820 return False
2821 2821 return True
2822 2822
2823 2823 def _delete(name):
2824 2824 if name not in existing:
2825 2825 raise util.Abort(_('cannot delete queue that does not exist'))
2826 2826
2827 2827 current = _getcurrent()
2828 2828
2829 2829 if name == current:
2830 2830 raise util.Abort(_('cannot delete currently active queue'))
2831 2831
2832 2832 fh = repo.opener('patches.queues.new', 'w')
2833 2833 for queue in existing:
2834 2834 if queue == name:
2835 2835 continue
2836 2836 fh.write('%s\n' % (queue,))
2837 2837 fh.close()
2838 2838 util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
2839 2839
2840 2840 if not name or opts.get('list'):
2841 2841 current = _getcurrent()
2842 2842 for queue in _getqueues():
2843 2843 ui.write('%s' % (queue,))
2844 2844 if queue == current and not ui.quiet:
2845 2845 ui.write(_(' (active)\n'))
2846 2846 else:
2847 2847 ui.write('\n')
2848 2848 return
2849 2849
2850 2850 if not _validname(name):
2851 2851 raise util.Abort(
2852 2852 _('invalid queue name, may not contain the characters ":\\/."'))
2853 2853
2854 2854 existing = _getqueues()
2855 2855
2856 2856 if opts.get('create'):
2857 2857 if name in existing:
2858 2858 raise util.Abort(_('queue "%s" already exists') % name)
2859 2859 if _noqueues():
2860 2860 _addqueue(_defaultqueue)
2861 2861 _addqueue(name)
2862 2862 _setactive(name)
2863 2863 elif opts.get('rename'):
2864 2864 current = _getcurrent()
2865 2865 if name == current:
2866 2866 raise util.Abort(_('can\'t rename "%s" to its current name') % name)
2867 2867 if name in existing:
2868 2868 raise util.Abort(_('queue "%s" already exists') % name)
2869 2869
2870 2870 olddir = _queuedir(current)
2871 2871 newdir = _queuedir(name)
2872 2872
2873 2873 if os.path.exists(newdir):
2874 2874 raise util.Abort(_('non-queue directory "%s" already exists') %
2875 2875 newdir)
2876 2876
2877 2877 fh = repo.opener('patches.queues.new', 'w')
2878 2878 for queue in existing:
2879 2879 if queue == current:
2880 2880 fh.write('%s\n' % (name,))
2881 2881 if os.path.exists(olddir):
2882 2882 util.rename(olddir, newdir)
2883 2883 else:
2884 2884 fh.write('%s\n' % (queue,))
2885 2885 fh.close()
2886 2886 util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
2887 2887 _setactivenocheck(name)
2888 2888 elif opts.get('delete'):
2889 2889 _delete(name)
2890 2890 elif opts.get('purge'):
2891 2891 if name in existing:
2892 2892 _delete(name)
2893 2893 qdir = _queuedir(name)
2894 2894 if os.path.exists(qdir):
2895 2895 shutil.rmtree(qdir)
2896 2896 else:
2897 2897 if name not in existing:
2898 2898 raise util.Abort(_('use --create to create a new queue'))
2899 2899 _setactive(name)
2900 2900
2901 2901 def reposetup(ui, repo):
2902 2902 class mqrepo(repo.__class__):
2903 2903 @util.propertycache
2904 2904 def mq(self):
2905 2905 return queue(self.ui, self.join(""))
2906 2906
2907 2907 def abort_if_wdir_patched(self, errmsg, force=False):
2908 2908 if self.mq.applied and not force:
2909 2909 parent = self.dirstate.parents()[0]
2910 2910 if parent in [s.node for s in self.mq.applied]:
2911 2911 raise util.Abort(errmsg)
2912 2912
2913 2913 def commit(self, text="", user=None, date=None, match=None,
2914 2914 force=False, editor=False, extra={}):
2915 2915 self.abort_if_wdir_patched(
2916 2916 _('cannot commit over an applied mq patch'),
2917 2917 force)
2918 2918
2919 2919 return super(mqrepo, self).commit(text, user, date, match, force,
2920 2920 editor, extra)
2921 2921
2922 def push(self, remote, force=False, revs=None, newbranch=False):
2922 def checkpush(self, force, revs):
2923 2923 if self.mq.applied and not force:
2924 2924 haspatches = True
2925 2925 if revs:
2926 2926 # Assume applied patches have no non-patch descendants
2927 2927 # and are not on remote already. If they appear in the
2928 2928 # set of resolved 'revs', bail out.
2929 2929 applied = set(e.node for e in self.mq.applied)
2930 2930 haspatches = bool([n for n in revs if n in applied])
2931 2931 if haspatches:
2932 2932 raise util.Abort(_('source has mq patches applied'))
2933 return super(mqrepo, self).push(remote, force, revs, newbranch)
2933 super(mqrepo, self).checkpush(force, revs)
2934 2934
2935 2935 def _findtags(self):
2936 2936 '''augment tags from base class with patch tags'''
2937 2937 result = super(mqrepo, self)._findtags()
2938 2938
2939 2939 q = self.mq
2940 2940 if not q.applied:
2941 2941 return result
2942 2942
2943 2943 mqtags = [(patch.node, patch.name) for patch in q.applied]
2944 2944
2945 2945 if mqtags[-1][0] not in self:
2946 2946 self.ui.warn(_('mq status file refers to unknown node %s\n')
2947 2947 % short(mqtags[-1][0]))
2948 2948 return result
2949 2949
2950 2950 mqtags.append((mqtags[-1][0], 'qtip'))
2951 2951 mqtags.append((mqtags[0][0], 'qbase'))
2952 2952 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2953 2953 tags = result[0]
2954 2954 for patch in mqtags:
2955 2955 if patch[1] in tags:
2956 2956 self.ui.warn(_('Tag %s overrides mq patch of the same name\n')
2957 2957 % patch[1])
2958 2958 else:
2959 2959 tags[patch[1]] = patch[0]
2960 2960
2961 2961 return result
2962 2962
2963 2963 def _branchtags(self, partial, lrev):
2964 2964 q = self.mq
2965 2965 if not q.applied:
2966 2966 return super(mqrepo, self)._branchtags(partial, lrev)
2967 2967
2968 2968 cl = self.changelog
2969 2969 qbasenode = q.applied[0].node
2970 2970 if qbasenode not in self:
2971 2971 self.ui.warn(_('mq status file refers to unknown node %s\n')
2972 2972 % short(qbasenode))
2973 2973 return super(mqrepo, self)._branchtags(partial, lrev)
2974 2974
2975 2975 qbase = cl.rev(qbasenode)
2976 2976 start = lrev + 1
2977 2977 if start < qbase:
2978 2978 # update the cache (excluding the patches) and save it
2979 2979 ctxgen = (self[r] for r in xrange(lrev + 1, qbase))
2980 2980 self._updatebranchcache(partial, ctxgen)
2981 2981 self._writebranchcache(partial, cl.node(qbase - 1), qbase - 1)
2982 2982 start = qbase
2983 2983 # if start = qbase, the cache is as updated as it should be.
2984 2984 # if start > qbase, the cache includes (part of) the patches.
2985 2985 # we might as well use it, but we won't save it.
2986 2986
2987 2987 # update the cache up to the tip
2988 2988 ctxgen = (self[r] for r in xrange(start, len(cl)))
2989 2989 self._updatebranchcache(partial, ctxgen)
2990 2990
2991 2991 return partial
2992 2992
2993 2993 if repo.local():
2994 2994 repo.__class__ = mqrepo
2995 2995
2996 2996 def mqimport(orig, ui, repo, *args, **kwargs):
2997 2997 if (hasattr(repo, 'abort_if_wdir_patched')
2998 2998 and not kwargs.get('no_commit', False)):
2999 2999 repo.abort_if_wdir_patched(_('cannot import over an applied patch'),
3000 3000 kwargs.get('force'))
3001 3001 return orig(ui, repo, *args, **kwargs)
3002 3002
3003 3003 def mqinit(orig, ui, *args, **kwargs):
3004 3004 mq = kwargs.pop('mq', None)
3005 3005
3006 3006 if not mq:
3007 3007 return orig(ui, *args, **kwargs)
3008 3008
3009 3009 if args:
3010 3010 repopath = args[0]
3011 3011 if not hg.islocal(repopath):
3012 3012 raise util.Abort(_('only a local queue repository '
3013 3013 'may be initialized'))
3014 3014 else:
3015 3015 repopath = cmdutil.findrepo(os.getcwd())
3016 3016 if not repopath:
3017 3017 raise util.Abort(_('there is no Mercurial repository here '
3018 3018 '(.hg not found)'))
3019 3019 repo = hg.repository(ui, repopath)
3020 3020 return qinit(ui, repo, True)
3021 3021
3022 3022 def mqcommand(orig, ui, repo, *args, **kwargs):
3023 3023 """Add --mq option to operate on patch repository instead of main"""
3024 3024
3025 3025 # some commands do not like getting unknown options
3026 3026 mq = kwargs.pop('mq', None)
3027 3027
3028 3028 if not mq:
3029 3029 return orig(ui, repo, *args, **kwargs)
3030 3030
3031 3031 q = repo.mq
3032 3032 r = q.qrepo()
3033 3033 if not r:
3034 3034 raise util.Abort(_('no queue repository'))
3035 3035 return orig(r.ui, r, *args, **kwargs)
3036 3036
3037 3037 def summary(orig, ui, repo, *args, **kwargs):
3038 3038 r = orig(ui, repo, *args, **kwargs)
3039 3039 q = repo.mq
3040 3040 m = []
3041 3041 a, u = len(q.applied), len(q.unapplied(repo))
3042 3042 if a:
3043 3043 m.append(ui.label(_("%d applied"), 'qseries.applied') % a)
3044 3044 if u:
3045 3045 m.append(ui.label(_("%d unapplied"), 'qseries.unapplied') % u)
3046 3046 if m:
3047 3047 ui.write("mq: %s\n" % ', '.join(m))
3048 3048 else:
3049 3049 ui.note(_("mq: (empty queue)\n"))
3050 3050 return r
3051 3051
3052 3052 def uisetup(ui):
3053 3053 mqopt = [('', 'mq', None, _("operate on patch repository"))]
3054 3054
3055 3055 extensions.wrapcommand(commands.table, 'import', mqimport)
3056 3056 extensions.wrapcommand(commands.table, 'summary', summary)
3057 3057
3058 3058 entry = extensions.wrapcommand(commands.table, 'init', mqinit)
3059 3059 entry[1].extend(mqopt)
3060 3060
3061 3061 nowrap = set(commands.norepo.split(" ") + ['qrecord'])
3062 3062
3063 3063 def dotable(cmdtable):
3064 3064 for cmd in cmdtable.keys():
3065 3065 cmd = cmdutil.parsealiases(cmd)[0]
3066 3066 if cmd in nowrap:
3067 3067 continue
3068 3068 entry = extensions.wrapcommand(cmdtable, cmd, mqcommand)
3069 3069 entry[1].extend(mqopt)
3070 3070
3071 3071 dotable(commands.table)
3072 3072
3073 3073 for extname, extmodule in extensions.extensions():
3074 3074 if extmodule.__file__ != __file__:
3075 3075 dotable(getattr(extmodule, 'cmdtable', {}))
3076 3076
3077 3077 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
3078 3078
3079 3079 cmdtable = {
3080 3080 "qapplied":
3081 3081 (applied,
3082 3082 [('1', 'last', None, _('show only the last patch'))] + seriesopts,
3083 3083 _('hg qapplied [-1] [-s] [PATCH]')),
3084 3084 "qclone":
3085 3085 (clone,
3086 3086 [('', 'pull', None, _('use pull protocol to copy metadata')),
3087 3087 ('U', 'noupdate', None, _('do not update the new working directories')),
3088 3088 ('', 'uncompressed', None,
3089 3089 _('use uncompressed transfer (fast over LAN)')),
3090 3090 ('p', 'patches', '',
3091 3091 _('location of source patch repository'), _('REPO')),
3092 3092 ] + commands.remoteopts,
3093 3093 _('hg qclone [OPTION]... SOURCE [DEST]')),
3094 3094 "qcommit|qci":
3095 3095 (commit,
3096 3096 commands.table["^commit|ci"][1],
3097 3097 _('hg qcommit [OPTION]... [FILE]...')),
3098 3098 "^qdiff":
3099 3099 (diff,
3100 3100 commands.diffopts + commands.diffopts2 + commands.walkopts,
3101 3101 _('hg qdiff [OPTION]... [FILE]...')),
3102 3102 "qdelete|qremove|qrm":
3103 3103 (delete,
3104 3104 [('k', 'keep', None, _('keep patch file')),
3105 3105 ('r', 'rev', [],
3106 3106 _('stop managing a revision (DEPRECATED)'), _('REV'))],
3107 3107 _('hg qdelete [-k] [PATCH]...')),
3108 3108 'qfold':
3109 3109 (fold,
3110 3110 [('e', 'edit', None, _('edit patch header')),
3111 3111 ('k', 'keep', None, _('keep folded patch files')),
3112 3112 ] + commands.commitopts,
3113 3113 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')),
3114 3114 'qgoto':
3115 3115 (goto,
3116 3116 [('f', 'force', None, _('overwrite any local changes'))],
3117 3117 _('hg qgoto [OPTION]... PATCH')),
3118 3118 'qguard':
3119 3119 (guard,
3120 3120 [('l', 'list', None, _('list all patches and guards')),
3121 3121 ('n', 'none', None, _('drop all guards'))],
3122 3122 _('hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]')),
3123 3123 'qheader': (header, [], _('hg qheader [PATCH]')),
3124 3124 "qimport":
3125 3125 (qimport,
3126 3126 [('e', 'existing', None, _('import file in patch directory')),
3127 3127 ('n', 'name', '',
3128 3128 _('name of patch file'), _('NAME')),
3129 3129 ('f', 'force', None, _('overwrite existing files')),
3130 3130 ('r', 'rev', [],
3131 3131 _('place existing revisions under mq control'), _('REV')),
3132 3132 ('g', 'git', None, _('use git extended diff format')),
3133 3133 ('P', 'push', None, _('qpush after importing'))],
3134 3134 _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... FILE...')),
3135 3135 "^qinit":
3136 3136 (init,
3137 3137 [('c', 'create-repo', None, _('create queue repository'))],
3138 3138 _('hg qinit [-c]')),
3139 3139 "^qnew":
3140 3140 (new,
3141 3141 [('e', 'edit', None, _('edit commit message')),
3142 3142 ('f', 'force', None, _('import uncommitted changes (DEPRECATED)')),
3143 3143 ('g', 'git', None, _('use git extended diff format')),
3144 3144 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
3145 3145 ('u', 'user', '',
3146 3146 _('add "From: <USER>" to patch'), _('USER')),
3147 3147 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
3148 3148 ('d', 'date', '',
3149 3149 _('add "Date: <DATE>" to patch'), _('DATE'))
3150 3150 ] + commands.walkopts + commands.commitopts,
3151 3151 _('hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...')),
3152 3152 "qnext": (next, [] + seriesopts, _('hg qnext [-s]')),
3153 3153 "qprev": (prev, [] + seriesopts, _('hg qprev [-s]')),
3154 3154 "^qpop":
3155 3155 (pop,
3156 3156 [('a', 'all', None, _('pop all patches')),
3157 3157 ('n', 'name', '',
3158 3158 _('queue name to pop (DEPRECATED)'), _('NAME')),
3159 3159 ('f', 'force', None, _('forget any local changes to patched files'))],
3160 3160 _('hg qpop [-a] [-f] [PATCH | INDEX]')),
3161 3161 "^qpush":
3162 3162 (push,
3163 3163 [('f', 'force', None, _('apply on top of local changes')),
3164 3164 ('e', 'exact', None, _('apply the target patch to its recorded parent')),
3165 3165 ('l', 'list', None, _('list patch name in commit text')),
3166 3166 ('a', 'all', None, _('apply all patches')),
3167 3167 ('m', 'merge', None, _('merge from another queue (DEPRECATED)')),
3168 3168 ('n', 'name', '',
3169 3169 _('merge queue name (DEPRECATED)'), _('NAME')),
3170 3170 ('', 'move', None, _('reorder patch series and apply only the patch'))],
3171 3171 _('hg qpush [-f] [-l] [-a] [--move] [PATCH | INDEX]')),
3172 3172 "^qrefresh":
3173 3173 (refresh,
3174 3174 [('e', 'edit', None, _('edit commit message')),
3175 3175 ('g', 'git', None, _('use git extended diff format')),
3176 3176 ('s', 'short', None,
3177 3177 _('refresh only files already in the patch and specified files')),
3178 3178 ('U', 'currentuser', None,
3179 3179 _('add/update author field in patch with current user')),
3180 3180 ('u', 'user', '',
3181 3181 _('add/update author field in patch with given user'), _('USER')),
3182 3182 ('D', 'currentdate', None,
3183 3183 _('add/update date field in patch with current date')),
3184 3184 ('d', 'date', '',
3185 3185 _('add/update date field in patch with given date'), _('DATE'))
3186 3186 ] + commands.walkopts + commands.commitopts,
3187 3187 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')),
3188 3188 'qrename|qmv':
3189 3189 (rename, [], _('hg qrename PATCH1 [PATCH2]')),
3190 3190 "qrestore":
3191 3191 (restore,
3192 3192 [('d', 'delete', None, _('delete save entry')),
3193 3193 ('u', 'update', None, _('update queue working directory'))],
3194 3194 _('hg qrestore [-d] [-u] REV')),
3195 3195 "qsave":
3196 3196 (save,
3197 3197 [('c', 'copy', None, _('copy patch directory')),
3198 3198 ('n', 'name', '',
3199 3199 _('copy directory name'), _('NAME')),
3200 3200 ('e', 'empty', None, _('clear queue status file')),
3201 3201 ('f', 'force', None, _('force copy'))] + commands.commitopts,
3202 3202 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')),
3203 3203 "qselect":
3204 3204 (select,
3205 3205 [('n', 'none', None, _('disable all guards')),
3206 3206 ('s', 'series', None, _('list all guards in series file')),
3207 3207 ('', 'pop', None, _('pop to before first guarded applied patch')),
3208 3208 ('', 'reapply', None, _('pop, then reapply patches'))],
3209 3209 _('hg qselect [OPTION]... [GUARD]...')),
3210 3210 "qseries":
3211 3211 (series,
3212 3212 [('m', 'missing', None, _('print patches not in series')),
3213 3213 ] + seriesopts,
3214 3214 _('hg qseries [-ms]')),
3215 3215 "strip":
3216 3216 (strip,
3217 3217 [('f', 'force', None, _('force removal of changesets even if the '
3218 3218 'working directory has uncommitted changes')),
3219 3219 ('b', 'backup', None, _('bundle only changesets with local revision'
3220 3220 ' number greater than REV which are not'
3221 3221 ' descendants of REV (DEPRECATED)')),
3222 3222 ('n', 'no-backup', None, _('no backups')),
3223 3223 ('', 'nobackup', None, _('no backups (DEPRECATED)')),
3224 3224 ('k', 'keep', None, _("do not modify working copy during strip"))],
3225 3225 _('hg strip [-k] [-f] [-n] REV...')),
3226 3226 "qtop": (top, [] + seriesopts, _('hg qtop [-s]')),
3227 3227 "qunapplied":
3228 3228 (unapplied,
3229 3229 [('1', 'first', None, _('show only the first patch'))] + seriesopts,
3230 3230 _('hg qunapplied [-1] [-s] [PATCH]')),
3231 3231 "qfinish":
3232 3232 (finish,
3233 3233 [('a', 'applied', None, _('finish all applied changesets'))],
3234 3234 _('hg qfinish [-a] [REV]...')),
3235 3235 'qqueue':
3236 3236 (qqueue,
3237 3237 [
3238 3238 ('l', 'list', False, _('list all available queues')),
3239 3239 ('c', 'create', False, _('create new queue')),
3240 3240 ('', 'rename', False, _('rename active queue')),
3241 3241 ('', 'delete', False, _('delete reference to queue')),
3242 3242 ('', 'purge', False, _('delete queue, and remove patch dir')),
3243 3243 ],
3244 3244 _('[OPTION] [QUEUE]')),
3245 3245 }
3246 3246
3247 3247 colortable = {'qguard.negative': 'red',
3248 3248 'qguard.positive': 'yellow',
3249 3249 'qguard.unguarded': 'green',
3250 3250 'qseries.applied': 'blue bold underline',
3251 3251 'qseries.guarded': 'black bold',
3252 3252 'qseries.missing': 'red bold',
3253 3253 'qseries.unapplied': 'black bold'}
@@ -1,1938 +1,1946 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context
12 12 import lock, transaction, store, encoding
13 13 import util, extensions, hook, error
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 import url as urlmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect
20 20 propertycache = util.propertycache
21 21
22 22 class localrepository(repo.repository):
23 23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
24 24 supportedformats = set(('revlogv1', 'parentdelta'))
25 25 supported = supportedformats | set(('store', 'fncache', 'shared',
26 26 'dotencode'))
27 27
28 28 def __init__(self, baseui, path=None, create=0):
29 29 repo.repository.__init__(self)
30 30 self.root = os.path.realpath(util.expandpath(path))
31 31 self.path = os.path.join(self.root, ".hg")
32 32 self.origroot = path
33 33 self.auditor = util.path_auditor(self.root, self._checknested)
34 34 self.opener = util.opener(self.path)
35 35 self.wopener = util.opener(self.root)
36 36 self.baseui = baseui
37 37 self.ui = baseui.copy()
38 38
39 39 try:
40 40 self.ui.readconfig(self.join("hgrc"), self.root)
41 41 extensions.loadall(self.ui)
42 42 except IOError:
43 43 pass
44 44
45 45 if not os.path.isdir(self.path):
46 46 if create:
47 47 if not os.path.exists(path):
48 48 util.makedirs(path)
49 49 os.mkdir(self.path)
50 50 requirements = ["revlogv1"]
51 51 if self.ui.configbool('format', 'usestore', True):
52 52 os.mkdir(os.path.join(self.path, "store"))
53 53 requirements.append("store")
54 54 if self.ui.configbool('format', 'usefncache', True):
55 55 requirements.append("fncache")
56 56 if self.ui.configbool('format', 'dotencode', True):
57 57 requirements.append('dotencode')
58 58 # create an invalid changelog
59 59 self.opener("00changelog.i", "a").write(
60 60 '\0\0\0\2' # represents revlogv2
61 61 ' dummy changelog to prevent using the old repo layout'
62 62 )
63 63 if self.ui.configbool('format', 'parentdelta', False):
64 64 requirements.append("parentdelta")
65 65 else:
66 66 raise error.RepoError(_("repository %s not found") % path)
67 67 elif create:
68 68 raise error.RepoError(_("repository %s already exists") % path)
69 69 else:
70 70 # find requirements
71 71 requirements = set()
72 72 try:
73 73 requirements = set(self.opener("requires").read().splitlines())
74 74 except IOError, inst:
75 75 if inst.errno != errno.ENOENT:
76 76 raise
77 77 for r in requirements - self.supported:
78 78 raise error.RepoError(_("requirement '%s' not supported") % r)
79 79
80 80 self.sharedpath = self.path
81 81 try:
82 82 s = os.path.realpath(self.opener("sharedpath").read())
83 83 if not os.path.exists(s):
84 84 raise error.RepoError(
85 85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 86 self.sharedpath = s
87 87 except IOError, inst:
88 88 if inst.errno != errno.ENOENT:
89 89 raise
90 90
91 91 self.store = store.store(requirements, self.sharedpath, util.opener)
92 92 self.spath = self.store.path
93 93 self.sopener = self.store.opener
94 94 self.sjoin = self.store.join
95 95 self.opener.createmode = self.store.createmode
96 96 self._applyrequirements(requirements)
97 97 if create:
98 98 self._writerequirements()
99 99
100 100 # These two define the set of tags for this repository. _tags
101 101 # maps tag name to node; _tagtypes maps tag name to 'global' or
102 102 # 'local'. (Global tags are defined by .hgtags across all
103 103 # heads, and local tags are defined in .hg/localtags.) They
104 104 # constitute the in-memory cache of tags.
105 105 self._tags = None
106 106 self._tagtypes = None
107 107
108 108 self._branchcache = None
109 109 self._branchcachetip = None
110 110 self.nodetagscache = None
111 111 self.filterpats = {}
112 112 self._datafilters = {}
113 113 self._transref = self._lockref = self._wlockref = None
114 114
115 115 def _applyrequirements(self, requirements):
116 116 self.requirements = requirements
117 117 self.sopener.options = {}
118 118 if 'parentdelta' in requirements:
119 119 self.sopener.options['parentdelta'] = 1
120 120
121 121 def _writerequirements(self):
122 122 reqfile = self.opener("requires", "w")
123 123 for r in self.requirements:
124 124 reqfile.write("%s\n" % r)
125 125 reqfile.close()
126 126
127 127 def _checknested(self, path):
128 128 """Determine if path is a legal nested repository."""
129 129 if not path.startswith(self.root):
130 130 return False
131 131 subpath = path[len(self.root) + 1:]
132 132
133 133 # XXX: Checking against the current working copy is wrong in
134 134 # the sense that it can reject things like
135 135 #
136 136 # $ hg cat -r 10 sub/x.txt
137 137 #
138 138 # if sub/ is no longer a subrepository in the working copy
139 139 # parent revision.
140 140 #
141 141 # However, it can of course also allow things that would have
142 142 # been rejected before, such as the above cat command if sub/
143 143 # is a subrepository now, but was a normal directory before.
144 144 # The old path auditor would have rejected by mistake since it
145 145 # panics when it sees sub/.hg/.
146 146 #
147 147 # All in all, checking against the working copy seems sensible
148 148 # since we want to prevent access to nested repositories on
149 149 # the filesystem *now*.
150 150 ctx = self[None]
151 151 parts = util.splitpath(subpath)
152 152 while parts:
153 153 prefix = os.sep.join(parts)
154 154 if prefix in ctx.substate:
155 155 if prefix == subpath:
156 156 return True
157 157 else:
158 158 sub = ctx.sub(prefix)
159 159 return sub.checknested(subpath[len(prefix) + 1:])
160 160 else:
161 161 parts.pop()
162 162 return False
163 163
164 164
165 165 @propertycache
166 166 def changelog(self):
167 167 c = changelog.changelog(self.sopener)
168 168 if 'HG_PENDING' in os.environ:
169 169 p = os.environ['HG_PENDING']
170 170 if p.startswith(self.root):
171 171 c.readpending('00changelog.i.a')
172 172 self.sopener.options['defversion'] = c.version
173 173 return c
174 174
175 175 @propertycache
176 176 def manifest(self):
177 177 return manifest.manifest(self.sopener)
178 178
179 179 @propertycache
180 180 def dirstate(self):
181 181 warned = [0]
182 182 def validate(node):
183 183 try:
184 184 r = self.changelog.rev(node)
185 185 return node
186 186 except error.LookupError:
187 187 if not warned[0]:
188 188 warned[0] = True
189 189 self.ui.warn(_("warning: ignoring unknown"
190 190 " working parent %s!\n") % short(node))
191 191 return nullid
192 192
193 193 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
194 194
195 195 def __getitem__(self, changeid):
196 196 if changeid is None:
197 197 return context.workingctx(self)
198 198 return context.changectx(self, changeid)
199 199
200 200 def __contains__(self, changeid):
201 201 try:
202 202 return bool(self.lookup(changeid))
203 203 except error.RepoLookupError:
204 204 return False
205 205
206 206 def __nonzero__(self):
207 207 return True
208 208
209 209 def __len__(self):
210 210 return len(self.changelog)
211 211
212 212 def __iter__(self):
213 213 for i in xrange(len(self)):
214 214 yield i
215 215
216 216 def url(self):
217 217 return 'file:' + self.root
218 218
219 219 def hook(self, name, throw=False, **args):
220 220 return hook.hook(self.ui, self, name, throw, **args)
221 221
222 222 tag_disallowed = ':\r\n'
223 223
224 224 def _tag(self, names, node, message, local, user, date, extra={}):
225 225 if isinstance(names, str):
226 226 allchars = names
227 227 names = (names,)
228 228 else:
229 229 allchars = ''.join(names)
230 230 for c in self.tag_disallowed:
231 231 if c in allchars:
232 232 raise util.Abort(_('%r cannot be used in a tag name') % c)
233 233
234 234 branches = self.branchmap()
235 235 for name in names:
236 236 self.hook('pretag', throw=True, node=hex(node), tag=name,
237 237 local=local)
238 238 if name in branches:
239 239 self.ui.warn(_("warning: tag %s conflicts with existing"
240 240 " branch name\n") % name)
241 241
242 242 def writetags(fp, names, munge, prevtags):
243 243 fp.seek(0, 2)
244 244 if prevtags and prevtags[-1] != '\n':
245 245 fp.write('\n')
246 246 for name in names:
247 247 m = munge and munge(name) or name
248 248 if self._tagtypes and name in self._tagtypes:
249 249 old = self._tags.get(name, nullid)
250 250 fp.write('%s %s\n' % (hex(old), m))
251 251 fp.write('%s %s\n' % (hex(node), m))
252 252 fp.close()
253 253
254 254 prevtags = ''
255 255 if local:
256 256 try:
257 257 fp = self.opener('localtags', 'r+')
258 258 except IOError:
259 259 fp = self.opener('localtags', 'a')
260 260 else:
261 261 prevtags = fp.read()
262 262
263 263 # local tags are stored in the current charset
264 264 writetags(fp, names, None, prevtags)
265 265 for name in names:
266 266 self.hook('tag', node=hex(node), tag=name, local=local)
267 267 return
268 268
269 269 try:
270 270 fp = self.wfile('.hgtags', 'rb+')
271 271 except IOError:
272 272 fp = self.wfile('.hgtags', 'ab')
273 273 else:
274 274 prevtags = fp.read()
275 275
276 276 # committed tags are stored in UTF-8
277 277 writetags(fp, names, encoding.fromlocal, prevtags)
278 278
279 279 if '.hgtags' not in self.dirstate:
280 280 self[None].add(['.hgtags'])
281 281
282 282 m = matchmod.exact(self.root, '', ['.hgtags'])
283 283 tagnode = self.commit(message, user, date, extra=extra, match=m)
284 284
285 285 for name in names:
286 286 self.hook('tag', node=hex(node), tag=name, local=local)
287 287
288 288 return tagnode
289 289
290 290 def tag(self, names, node, message, local, user, date):
291 291 '''tag a revision with one or more symbolic names.
292 292
293 293 names is a list of strings or, when adding a single tag, names may be a
294 294 string.
295 295
296 296 if local is True, the tags are stored in a per-repository file.
297 297 otherwise, they are stored in the .hgtags file, and a new
298 298 changeset is committed with the change.
299 299
300 300 keyword arguments:
301 301
302 302 local: whether to store tags in non-version-controlled file
303 303 (default False)
304 304
305 305 message: commit message to use if committing
306 306
307 307 user: name of user to use if committing
308 308
309 309 date: date tuple to use if committing'''
310 310
311 311 if not local:
312 312 for x in self.status()[:5]:
313 313 if '.hgtags' in x:
314 314 raise util.Abort(_('working copy of .hgtags is changed '
315 315 '(please commit .hgtags manually)'))
316 316
317 317 self.tags() # instantiate the cache
318 318 self._tag(names, node, message, local, user, date)
319 319
320 320 def tags(self):
321 321 '''return a mapping of tag to node'''
322 322 if self._tags is None:
323 323 (self._tags, self._tagtypes) = self._findtags()
324 324
325 325 return self._tags
326 326
327 327 def _findtags(self):
328 328 '''Do the hard work of finding tags. Return a pair of dicts
329 329 (tags, tagtypes) where tags maps tag name to node, and tagtypes
330 330 maps tag name to a string like \'global\' or \'local\'.
331 331 Subclasses or extensions are free to add their own tags, but
332 332 should be aware that the returned dicts will be retained for the
333 333 duration of the localrepo object.'''
334 334
335 335 # XXX what tagtype should subclasses/extensions use? Currently
336 336 # mq and bookmarks add tags, but do not set the tagtype at all.
337 337 # Should each extension invent its own tag type? Should there
338 338 # be one tagtype for all such "virtual" tags? Or is the status
339 339 # quo fine?
340 340
341 341 alltags = {} # map tag name to (node, hist)
342 342 tagtypes = {}
343 343
344 344 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
345 345 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
346 346
347 347 # Build the return dicts. Have to re-encode tag names because
348 348 # the tags module always uses UTF-8 (in order not to lose info
349 349 # writing to the cache), but the rest of Mercurial wants them in
350 350 # local encoding.
351 351 tags = {}
352 352 for (name, (node, hist)) in alltags.iteritems():
353 353 if node != nullid:
354 354 tags[encoding.tolocal(name)] = node
355 355 tags['tip'] = self.changelog.tip()
356 356 tagtypes = dict([(encoding.tolocal(name), value)
357 357 for (name, value) in tagtypes.iteritems()])
358 358 return (tags, tagtypes)
359 359
360 360 def tagtype(self, tagname):
361 361 '''
362 362 return the type of the given tag. result can be:
363 363
364 364 'local' : a local tag
365 365 'global' : a global tag
366 366 None : tag does not exist
367 367 '''
368 368
369 369 self.tags()
370 370
371 371 return self._tagtypes.get(tagname)
372 372
373 373 def tagslist(self):
374 374 '''return a list of tags ordered by revision'''
375 375 l = []
376 376 for t, n in self.tags().iteritems():
377 377 try:
378 378 r = self.changelog.rev(n)
379 379 except:
380 380 r = -2 # sort to the beginning of the list if unknown
381 381 l.append((r, t, n))
382 382 return [(t, n) for r, t, n in sorted(l)]
383 383
384 384 def nodetags(self, node):
385 385 '''return the tags associated with a node'''
386 386 if not self.nodetagscache:
387 387 self.nodetagscache = {}
388 388 for t, n in self.tags().iteritems():
389 389 self.nodetagscache.setdefault(n, []).append(t)
390 390 for tags in self.nodetagscache.itervalues():
391 391 tags.sort()
392 392 return self.nodetagscache.get(node, [])
393 393
394 394 def _branchtags(self, partial, lrev):
395 395 # TODO: rename this function?
396 396 tiprev = len(self) - 1
397 397 if lrev != tiprev:
398 398 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
399 399 self._updatebranchcache(partial, ctxgen)
400 400 self._writebranchcache(partial, self.changelog.tip(), tiprev)
401 401
402 402 return partial
403 403
404 404 def updatebranchcache(self):
405 405 tip = self.changelog.tip()
406 406 if self._branchcache is not None and self._branchcachetip == tip:
407 407 return self._branchcache
408 408
409 409 oldtip = self._branchcachetip
410 410 self._branchcachetip = tip
411 411 if oldtip is None or oldtip not in self.changelog.nodemap:
412 412 partial, last, lrev = self._readbranchcache()
413 413 else:
414 414 lrev = self.changelog.rev(oldtip)
415 415 partial = self._branchcache
416 416
417 417 self._branchtags(partial, lrev)
418 418 # this private cache holds all heads (not just tips)
419 419 self._branchcache = partial
420 420
421 421 def branchmap(self):
422 422 '''returns a dictionary {branch: [branchheads]}'''
423 423 self.updatebranchcache()
424 424 return self._branchcache
425 425
426 426 def branchtags(self):
427 427 '''return a dict where branch names map to the tipmost head of
428 428 the branch, open heads come before closed'''
429 429 bt = {}
430 430 for bn, heads in self.branchmap().iteritems():
431 431 tip = heads[-1]
432 432 for h in reversed(heads):
433 433 if 'close' not in self.changelog.read(h)[5]:
434 434 tip = h
435 435 break
436 436 bt[bn] = tip
437 437 return bt
438 438
439 439 def _readbranchcache(self):
440 440 partial = {}
441 441 try:
442 442 f = self.opener(os.path.join("cache", "branchheads"))
443 443 lines = f.read().split('\n')
444 444 f.close()
445 445 except (IOError, OSError):
446 446 return {}, nullid, nullrev
447 447
448 448 try:
449 449 last, lrev = lines.pop(0).split(" ", 1)
450 450 last, lrev = bin(last), int(lrev)
451 451 if lrev >= len(self) or self[lrev].node() != last:
452 452 # invalidate the cache
453 453 raise ValueError('invalidating branch cache (tip differs)')
454 454 for l in lines:
455 455 if not l:
456 456 continue
457 457 node, label = l.split(" ", 1)
458 458 label = encoding.tolocal(label.strip())
459 459 partial.setdefault(label, []).append(bin(node))
460 460 except KeyboardInterrupt:
461 461 raise
462 462 except Exception, inst:
463 463 if self.ui.debugflag:
464 464 self.ui.warn(str(inst), '\n')
465 465 partial, last, lrev = {}, nullid, nullrev
466 466 return partial, last, lrev
467 467
468 468 def _writebranchcache(self, branches, tip, tiprev):
469 469 try:
470 470 f = self.opener(os.path.join("cache", "branchheads"), "w",
471 471 atomictemp=True)
472 472 f.write("%s %s\n" % (hex(tip), tiprev))
473 473 for label, nodes in branches.iteritems():
474 474 for node in nodes:
475 475 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
476 476 f.rename()
477 477 except (IOError, OSError):
478 478 pass
479 479
480 480 def _updatebranchcache(self, partial, ctxgen):
481 481 # collect new branch entries
482 482 newbranches = {}
483 483 for c in ctxgen:
484 484 newbranches.setdefault(c.branch(), []).append(c.node())
485 485 # if older branchheads are reachable from new ones, they aren't
486 486 # really branchheads. Note checking parents is insufficient:
487 487 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
488 488 for branch, newnodes in newbranches.iteritems():
489 489 bheads = partial.setdefault(branch, [])
490 490 bheads.extend(newnodes)
491 491 if len(bheads) <= 1:
492 492 continue
493 493 # starting from tip means fewer passes over reachable
494 494 while newnodes:
495 495 latest = newnodes.pop()
496 496 if latest not in bheads:
497 497 continue
498 498 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
499 499 reachable = self.changelog.reachable(latest, minbhrev)
500 500 reachable.remove(latest)
501 501 bheads = [b for b in bheads if b not in reachable]
502 502 partial[branch] = bheads
503 503
504 504 def lookup(self, key):
505 505 if isinstance(key, int):
506 506 return self.changelog.node(key)
507 507 elif key == '.':
508 508 return self.dirstate.parents()[0]
509 509 elif key == 'null':
510 510 return nullid
511 511 elif key == 'tip':
512 512 return self.changelog.tip()
513 513 n = self.changelog._match(key)
514 514 if n:
515 515 return n
516 516 if key in self.tags():
517 517 return self.tags()[key]
518 518 if key in self.branchtags():
519 519 return self.branchtags()[key]
520 520 n = self.changelog._partialmatch(key)
521 521 if n:
522 522 return n
523 523
524 524 # can't find key, check if it might have come from damaged dirstate
525 525 if key in self.dirstate.parents():
526 526 raise error.Abort(_("working directory has unknown parent '%s'!")
527 527 % short(key))
528 528 try:
529 529 if len(key) == 20:
530 530 key = hex(key)
531 531 except:
532 532 pass
533 533 raise error.RepoLookupError(_("unknown revision '%s'") % key)
534 534
535 535 def lookupbranch(self, key, remote=None):
536 536 repo = remote or self
537 537 if key in repo.branchmap():
538 538 return key
539 539
540 540 repo = (remote and remote.local()) and remote or self
541 541 return repo[key].branch()
542 542
543 543 def local(self):
544 544 return True
545 545
546 546 def join(self, f):
547 547 return os.path.join(self.path, f)
548 548
549 549 def wjoin(self, f):
550 550 return os.path.join(self.root, f)
551 551
552 552 def file(self, f):
553 553 if f[0] == '/':
554 554 f = f[1:]
555 555 return filelog.filelog(self.sopener, f)
556 556
557 557 def changectx(self, changeid):
558 558 return self[changeid]
559 559
560 560 def parents(self, changeid=None):
561 561 '''get list of changectxs for parents of changeid'''
562 562 return self[changeid].parents()
563 563
564 564 def filectx(self, path, changeid=None, fileid=None):
565 565 """changeid can be a changeset revision, node, or tag.
566 566 fileid can be a file revision or node."""
567 567 return context.filectx(self, path, changeid, fileid)
568 568
569 569 def getcwd(self):
570 570 return self.dirstate.getcwd()
571 571
572 572 def pathto(self, f, cwd=None):
573 573 return self.dirstate.pathto(f, cwd)
574 574
575 575 def wfile(self, f, mode='r'):
576 576 return self.wopener(f, mode)
577 577
578 578 def _link(self, f):
579 579 return os.path.islink(self.wjoin(f))
580 580
581 581 def _loadfilter(self, filter):
582 582 if filter not in self.filterpats:
583 583 l = []
584 584 for pat, cmd in self.ui.configitems(filter):
585 585 if cmd == '!':
586 586 continue
587 587 mf = matchmod.match(self.root, '', [pat])
588 588 fn = None
589 589 params = cmd
590 590 for name, filterfn in self._datafilters.iteritems():
591 591 if cmd.startswith(name):
592 592 fn = filterfn
593 593 params = cmd[len(name):].lstrip()
594 594 break
595 595 if not fn:
596 596 fn = lambda s, c, **kwargs: util.filter(s, c)
597 597 # Wrap old filters not supporting keyword arguments
598 598 if not inspect.getargspec(fn)[2]:
599 599 oldfn = fn
600 600 fn = lambda s, c, **kwargs: oldfn(s, c)
601 601 l.append((mf, fn, params))
602 602 self.filterpats[filter] = l
603 603 return self.filterpats[filter]
604 604
605 605 def _filter(self, filterpats, filename, data):
606 606 for mf, fn, cmd in filterpats:
607 607 if mf(filename):
608 608 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
609 609 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
610 610 break
611 611
612 612 return data
613 613
614 614 @propertycache
615 615 def _encodefilterpats(self):
616 616 return self._loadfilter('encode')
617 617
618 618 @propertycache
619 619 def _decodefilterpats(self):
620 620 return self._loadfilter('decode')
621 621
622 622 def adddatafilter(self, name, filter):
623 623 self._datafilters[name] = filter
624 624
625 625 def wread(self, filename):
626 626 if self._link(filename):
627 627 data = os.readlink(self.wjoin(filename))
628 628 else:
629 629 data = self.wopener(filename, 'r').read()
630 630 return self._filter(self._encodefilterpats, filename, data)
631 631
632 632 def wwrite(self, filename, data, flags):
633 633 data = self._filter(self._decodefilterpats, filename, data)
634 634 if 'l' in flags:
635 635 self.wopener.symlink(data, filename)
636 636 else:
637 637 self.wopener(filename, 'w').write(data)
638 638 if 'x' in flags:
639 639 util.set_flags(self.wjoin(filename), False, True)
640 640
641 641 def wwritedata(self, filename, data):
642 642 return self._filter(self._decodefilterpats, filename, data)
643 643
644 644 def transaction(self, desc):
645 645 tr = self._transref and self._transref() or None
646 646 if tr and tr.running():
647 647 return tr.nest()
648 648
649 649 # abort here if the journal already exists
650 650 if os.path.exists(self.sjoin("journal")):
651 651 raise error.RepoError(
652 652 _("abandoned transaction found - run hg recover"))
653 653
654 654 # save dirstate for rollback
655 655 try:
656 656 ds = self.opener("dirstate").read()
657 657 except IOError:
658 658 ds = ""
659 659 self.opener("journal.dirstate", "w").write(ds)
660 660 self.opener("journal.branch", "w").write(
661 661 encoding.fromlocal(self.dirstate.branch()))
662 662 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
663 663
664 664 renames = [(self.sjoin("journal"), self.sjoin("undo")),
665 665 (self.join("journal.dirstate"), self.join("undo.dirstate")),
666 666 (self.join("journal.branch"), self.join("undo.branch")),
667 667 (self.join("journal.desc"), self.join("undo.desc"))]
668 668 tr = transaction.transaction(self.ui.warn, self.sopener,
669 669 self.sjoin("journal"),
670 670 aftertrans(renames),
671 671 self.store.createmode)
672 672 self._transref = weakref.ref(tr)
673 673 return tr
674 674
675 675 def recover(self):
676 676 lock = self.lock()
677 677 try:
678 678 if os.path.exists(self.sjoin("journal")):
679 679 self.ui.status(_("rolling back interrupted transaction\n"))
680 680 transaction.rollback(self.sopener, self.sjoin("journal"),
681 681 self.ui.warn)
682 682 self.invalidate()
683 683 return True
684 684 else:
685 685 self.ui.warn(_("no interrupted transaction available\n"))
686 686 return False
687 687 finally:
688 688 lock.release()
689 689
690 690 def rollback(self, dryrun=False):
691 691 wlock = lock = None
692 692 try:
693 693 wlock = self.wlock()
694 694 lock = self.lock()
695 695 if os.path.exists(self.sjoin("undo")):
696 696 try:
697 697 args = self.opener("undo.desc", "r").read().splitlines()
698 698 if len(args) >= 3 and self.ui.verbose:
699 699 desc = _("rolling back to revision %s"
700 700 " (undo %s: %s)\n") % (
701 701 int(args[0]) - 1, args[1], args[2])
702 702 elif len(args) >= 2:
703 703 desc = _("rolling back to revision %s (undo %s)\n") % (
704 704 int(args[0]) - 1, args[1])
705 705 except IOError:
706 706 desc = _("rolling back unknown transaction\n")
707 707 self.ui.status(desc)
708 708 if dryrun:
709 709 return
710 710 transaction.rollback(self.sopener, self.sjoin("undo"),
711 711 self.ui.warn)
712 712 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
713 713 try:
714 714 branch = self.opener("undo.branch").read()
715 715 self.dirstate.setbranch(branch)
716 716 except IOError:
717 717 self.ui.warn(_("Named branch could not be reset, "
718 718 "current branch still is: %s\n")
719 719 % self.dirstate.branch())
720 720 self.invalidate()
721 721 self.dirstate.invalidate()
722 722 self.destroyed()
723 723 else:
724 724 self.ui.warn(_("no rollback information available\n"))
725 725 return 1
726 726 finally:
727 727 release(lock, wlock)
728 728
729 729 def invalidatecaches(self):
730 730 self._tags = None
731 731 self._tagtypes = None
732 732 self.nodetagscache = None
733 733 self._branchcache = None # in UTF-8
734 734 self._branchcachetip = None
735 735
736 736 def invalidate(self):
737 737 for a in ("changelog", "manifest"):
738 738 if a in self.__dict__:
739 739 delattr(self, a)
740 740 self.invalidatecaches()
741 741
742 742 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
743 743 try:
744 744 l = lock.lock(lockname, 0, releasefn, desc=desc)
745 745 except error.LockHeld, inst:
746 746 if not wait:
747 747 raise
748 748 self.ui.warn(_("waiting for lock on %s held by %r\n") %
749 749 (desc, inst.locker))
750 750 # default to 600 seconds timeout
751 751 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
752 752 releasefn, desc=desc)
753 753 if acquirefn:
754 754 acquirefn()
755 755 return l
756 756
757 757 def lock(self, wait=True):
758 758 '''Lock the repository store (.hg/store) and return a weak reference
759 759 to the lock. Use this before modifying the store (e.g. committing or
760 760 stripping). If you are opening a transaction, get a lock as well.)'''
761 761 l = self._lockref and self._lockref()
762 762 if l is not None and l.held:
763 763 l.lock()
764 764 return l
765 765
766 766 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
767 767 _('repository %s') % self.origroot)
768 768 self._lockref = weakref.ref(l)
769 769 return l
770 770
771 771 def wlock(self, wait=True):
772 772 '''Lock the non-store parts of the repository (everything under
773 773 .hg except .hg/store) and return a weak reference to the lock.
774 774 Use this before modifying files in .hg.'''
775 775 l = self._wlockref and self._wlockref()
776 776 if l is not None and l.held:
777 777 l.lock()
778 778 return l
779 779
780 780 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
781 781 self.dirstate.invalidate, _('working directory of %s') %
782 782 self.origroot)
783 783 self._wlockref = weakref.ref(l)
784 784 return l
785 785
786 786 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
787 787 """
788 788 commit an individual file as part of a larger transaction
789 789 """
790 790
791 791 fname = fctx.path()
792 792 text = fctx.data()
793 793 flog = self.file(fname)
794 794 fparent1 = manifest1.get(fname, nullid)
795 795 fparent2 = fparent2o = manifest2.get(fname, nullid)
796 796
797 797 meta = {}
798 798 copy = fctx.renamed()
799 799 if copy and copy[0] != fname:
800 800 # Mark the new revision of this file as a copy of another
801 801 # file. This copy data will effectively act as a parent
802 802 # of this new revision. If this is a merge, the first
803 803 # parent will be the nullid (meaning "look up the copy data")
804 804 # and the second one will be the other parent. For example:
805 805 #
806 806 # 0 --- 1 --- 3 rev1 changes file foo
807 807 # \ / rev2 renames foo to bar and changes it
808 808 # \- 2 -/ rev3 should have bar with all changes and
809 809 # should record that bar descends from
810 810 # bar in rev2 and foo in rev1
811 811 #
812 812 # this allows this merge to succeed:
813 813 #
814 814 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
815 815 # \ / merging rev3 and rev4 should use bar@rev2
816 816 # \- 2 --- 4 as the merge base
817 817 #
818 818
819 819 cfname = copy[0]
820 820 crev = manifest1.get(cfname)
821 821 newfparent = fparent2
822 822
823 823 if manifest2: # branch merge
824 824 if fparent2 == nullid or crev is None: # copied on remote side
825 825 if cfname in manifest2:
826 826 crev = manifest2[cfname]
827 827 newfparent = fparent1
828 828
829 829 # find source in nearest ancestor if we've lost track
830 830 if not crev:
831 831 self.ui.debug(" %s: searching for copy revision for %s\n" %
832 832 (fname, cfname))
833 833 for ancestor in self[None].ancestors():
834 834 if cfname in ancestor:
835 835 crev = ancestor[cfname].filenode()
836 836 break
837 837
838 838 if crev:
839 839 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
840 840 meta["copy"] = cfname
841 841 meta["copyrev"] = hex(crev)
842 842 fparent1, fparent2 = nullid, newfparent
843 843 else:
844 844 self.ui.warn(_("warning: can't find ancestor for '%s' "
845 845 "copied from '%s'!\n") % (fname, cfname))
846 846
847 847 elif fparent2 != nullid:
848 848 # is one parent an ancestor of the other?
849 849 fparentancestor = flog.ancestor(fparent1, fparent2)
850 850 if fparentancestor == fparent1:
851 851 fparent1, fparent2 = fparent2, nullid
852 852 elif fparentancestor == fparent2:
853 853 fparent2 = nullid
854 854
855 855 # is the file changed?
856 856 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
857 857 changelist.append(fname)
858 858 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
859 859
860 860 # are just the flags changed during merge?
861 861 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
862 862 changelist.append(fname)
863 863
864 864 return fparent1
865 865
866 866 def commit(self, text="", user=None, date=None, match=None, force=False,
867 867 editor=False, extra={}):
868 868 """Add a new revision to current repository.
869 869
870 870 Revision information is gathered from the working directory,
871 871 match can be used to filter the committed files. If editor is
872 872 supplied, it is called to get a commit message.
873 873 """
874 874
875 875 def fail(f, msg):
876 876 raise util.Abort('%s: %s' % (f, msg))
877 877
878 878 if not match:
879 879 match = matchmod.always(self.root, '')
880 880
881 881 if not force:
882 882 vdirs = []
883 883 match.dir = vdirs.append
884 884 match.bad = fail
885 885
886 886 wlock = self.wlock()
887 887 try:
888 888 wctx = self[None]
889 889 merge = len(wctx.parents()) > 1
890 890
891 891 if (not force and merge and match and
892 892 (match.files() or match.anypats())):
893 893 raise util.Abort(_('cannot partially commit a merge '
894 894 '(do not specify files or patterns)'))
895 895
896 896 changes = self.status(match=match, clean=force)
897 897 if force:
898 898 changes[0].extend(changes[6]) # mq may commit unchanged files
899 899
900 900 # check subrepos
901 901 subs = []
902 902 removedsubs = set()
903 903 for p in wctx.parents():
904 904 removedsubs.update(s for s in p.substate if match(s))
905 905 for s in wctx.substate:
906 906 removedsubs.discard(s)
907 907 if match(s) and wctx.sub(s).dirty():
908 908 subs.append(s)
909 909 if (subs or removedsubs):
910 910 if (not match('.hgsub') and
911 911 '.hgsub' in (wctx.modified() + wctx.added())):
912 912 raise util.Abort(_("can't commit subrepos without .hgsub"))
913 913 if '.hgsubstate' not in changes[0]:
914 914 changes[0].insert(0, '.hgsubstate')
915 915
916 916 # make sure all explicit patterns are matched
917 917 if not force and match.files():
918 918 matched = set(changes[0] + changes[1] + changes[2])
919 919
920 920 for f in match.files():
921 921 if f == '.' or f in matched or f in wctx.substate:
922 922 continue
923 923 if f in changes[3]: # missing
924 924 fail(f, _('file not found!'))
925 925 if f in vdirs: # visited directory
926 926 d = f + '/'
927 927 for mf in matched:
928 928 if mf.startswith(d):
929 929 break
930 930 else:
931 931 fail(f, _("no match under directory!"))
932 932 elif f not in self.dirstate:
933 933 fail(f, _("file not tracked!"))
934 934
935 935 if (not force and not extra.get("close") and not merge
936 936 and not (changes[0] or changes[1] or changes[2])
937 937 and wctx.branch() == wctx.p1().branch()):
938 938 return None
939 939
940 940 ms = mergemod.mergestate(self)
941 941 for f in changes[0]:
942 942 if f in ms and ms[f] == 'u':
943 943 raise util.Abort(_("unresolved merge conflicts "
944 944 "(see hg resolve)"))
945 945
946 946 cctx = context.workingctx(self, text, user, date, extra, changes)
947 947 if editor:
948 948 cctx._text = editor(self, cctx, subs)
949 949 edited = (text != cctx._text)
950 950
951 951 # commit subs
952 952 if subs or removedsubs:
953 953 state = wctx.substate.copy()
954 954 for s in sorted(subs):
955 955 sub = wctx.sub(s)
956 956 self.ui.status(_('committing subrepository %s\n') %
957 957 subrepo.subrelpath(sub))
958 958 sr = sub.commit(cctx._text, user, date)
959 959 state[s] = (state[s][0], sr)
960 960 subrepo.writestate(self, state)
961 961
962 962 # Save commit message in case this transaction gets rolled back
963 963 # (e.g. by a pretxncommit hook). Leave the content alone on
964 964 # the assumption that the user will use the same editor again.
965 965 msgfile = self.opener('last-message.txt', 'wb')
966 966 msgfile.write(cctx._text)
967 967 msgfile.close()
968 968
969 969 p1, p2 = self.dirstate.parents()
970 970 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
971 971 try:
972 972 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
973 973 ret = self.commitctx(cctx, True)
974 974 except:
975 975 if edited:
976 976 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
977 977 self.ui.write(
978 978 _('note: commit message saved in %s\n') % msgfn)
979 979 raise
980 980
981 981 # update dirstate and mergestate
982 982 for f in changes[0] + changes[1]:
983 983 self.dirstate.normal(f)
984 984 for f in changes[2]:
985 985 self.dirstate.forget(f)
986 986 self.dirstate.setparents(ret)
987 987 ms.reset()
988 988 finally:
989 989 wlock.release()
990 990
991 991 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
992 992 return ret
993 993
994 994 def commitctx(self, ctx, error=False):
995 995 """Add a new revision to current repository.
996 996 Revision information is passed via the context argument.
997 997 """
998 998
999 999 tr = lock = None
1000 1000 removed = list(ctx.removed())
1001 1001 p1, p2 = ctx.p1(), ctx.p2()
1002 1002 m1 = p1.manifest().copy()
1003 1003 m2 = p2.manifest()
1004 1004 user = ctx.user()
1005 1005
1006 1006 lock = self.lock()
1007 1007 try:
1008 1008 tr = self.transaction("commit")
1009 1009 trp = weakref.proxy(tr)
1010 1010
1011 1011 # check in files
1012 1012 new = {}
1013 1013 changed = []
1014 1014 linkrev = len(self)
1015 1015 for f in sorted(ctx.modified() + ctx.added()):
1016 1016 self.ui.note(f + "\n")
1017 1017 try:
1018 1018 fctx = ctx[f]
1019 1019 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1020 1020 changed)
1021 1021 m1.set(f, fctx.flags())
1022 1022 except OSError, inst:
1023 1023 self.ui.warn(_("trouble committing %s!\n") % f)
1024 1024 raise
1025 1025 except IOError, inst:
1026 1026 errcode = getattr(inst, 'errno', errno.ENOENT)
1027 1027 if error or errcode and errcode != errno.ENOENT:
1028 1028 self.ui.warn(_("trouble committing %s!\n") % f)
1029 1029 raise
1030 1030 else:
1031 1031 removed.append(f)
1032 1032
1033 1033 # update manifest
1034 1034 m1.update(new)
1035 1035 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1036 1036 drop = [f for f in removed if f in m1]
1037 1037 for f in drop:
1038 1038 del m1[f]
1039 1039 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1040 1040 p2.manifestnode(), (new, drop))
1041 1041
1042 1042 # update changelog
1043 1043 self.changelog.delayupdate()
1044 1044 n = self.changelog.add(mn, changed + removed, ctx.description(),
1045 1045 trp, p1.node(), p2.node(),
1046 1046 user, ctx.date(), ctx.extra().copy())
1047 1047 p = lambda: self.changelog.writepending() and self.root or ""
1048 1048 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1049 1049 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1050 1050 parent2=xp2, pending=p)
1051 1051 self.changelog.finalize(trp)
1052 1052 tr.close()
1053 1053
1054 1054 if self._branchcache:
1055 1055 self.updatebranchcache()
1056 1056 return n
1057 1057 finally:
1058 1058 if tr:
1059 1059 tr.release()
1060 1060 lock.release()
1061 1061
1062 1062 def destroyed(self):
1063 1063 '''Inform the repository that nodes have been destroyed.
1064 1064 Intended for use by strip and rollback, so there's a common
1065 1065 place for anything that has to be done after destroying history.'''
1066 1066 # XXX it might be nice if we could take the list of destroyed
1067 1067 # nodes, but I don't see an easy way for rollback() to do that
1068 1068
1069 1069 # Ensure the persistent tag cache is updated. Doing it now
1070 1070 # means that the tag cache only has to worry about destroyed
1071 1071 # heads immediately after a strip/rollback. That in turn
1072 1072 # guarantees that "cachetip == currenttip" (comparing both rev
1073 1073 # and node) always means no nodes have been added or destroyed.
1074 1074
1075 1075 # XXX this is suboptimal when qrefresh'ing: we strip the current
1076 1076 # head, refresh the tag cache, then immediately add a new head.
1077 1077 # But I think doing it this way is necessary for the "instant
1078 1078 # tag cache retrieval" case to work.
1079 1079 self.invalidatecaches()
1080 1080
1081 1081 def walk(self, match, node=None):
1082 1082 '''
1083 1083 walk recursively through the directory tree or a given
1084 1084 changeset, finding all files matched by the match
1085 1085 function
1086 1086 '''
1087 1087 return self[node].walk(match)
1088 1088
1089 1089 def status(self, node1='.', node2=None, match=None,
1090 1090 ignored=False, clean=False, unknown=False,
1091 1091 listsubrepos=False):
1092 1092 """return status of files between two nodes or node and working directory
1093 1093
1094 1094 If node1 is None, use the first dirstate parent instead.
1095 1095 If node2 is None, compare node1 with working directory.
1096 1096 """
1097 1097
1098 1098 def mfmatches(ctx):
1099 1099 mf = ctx.manifest().copy()
1100 1100 for fn in mf.keys():
1101 1101 if not match(fn):
1102 1102 del mf[fn]
1103 1103 return mf
1104 1104
1105 1105 if isinstance(node1, context.changectx):
1106 1106 ctx1 = node1
1107 1107 else:
1108 1108 ctx1 = self[node1]
1109 1109 if isinstance(node2, context.changectx):
1110 1110 ctx2 = node2
1111 1111 else:
1112 1112 ctx2 = self[node2]
1113 1113
1114 1114 working = ctx2.rev() is None
1115 1115 parentworking = working and ctx1 == self['.']
1116 1116 match = match or matchmod.always(self.root, self.getcwd())
1117 1117 listignored, listclean, listunknown = ignored, clean, unknown
1118 1118
1119 1119 # load earliest manifest first for caching reasons
1120 1120 if not working and ctx2.rev() < ctx1.rev():
1121 1121 ctx2.manifest()
1122 1122
1123 1123 if not parentworking:
1124 1124 def bad(f, msg):
1125 1125 if f not in ctx1:
1126 1126 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1127 1127 match.bad = bad
1128 1128
1129 1129 if working: # we need to scan the working dir
1130 1130 subrepos = []
1131 1131 if '.hgsub' in self.dirstate:
1132 1132 subrepos = ctx1.substate.keys()
1133 1133 s = self.dirstate.status(match, subrepos, listignored,
1134 1134 listclean, listunknown)
1135 1135 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1136 1136
1137 1137 # check for any possibly clean files
1138 1138 if parentworking and cmp:
1139 1139 fixup = []
1140 1140 # do a full compare of any files that might have changed
1141 1141 for f in sorted(cmp):
1142 1142 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1143 1143 or ctx1[f].cmp(ctx2[f])):
1144 1144 modified.append(f)
1145 1145 else:
1146 1146 fixup.append(f)
1147 1147
1148 1148 # update dirstate for files that are actually clean
1149 1149 if fixup:
1150 1150 if listclean:
1151 1151 clean += fixup
1152 1152
1153 1153 try:
1154 1154 # updating the dirstate is optional
1155 1155 # so we don't wait on the lock
1156 1156 wlock = self.wlock(False)
1157 1157 try:
1158 1158 for f in fixup:
1159 1159 self.dirstate.normal(f)
1160 1160 finally:
1161 1161 wlock.release()
1162 1162 except error.LockError:
1163 1163 pass
1164 1164
1165 1165 if not parentworking:
1166 1166 mf1 = mfmatches(ctx1)
1167 1167 if working:
1168 1168 # we are comparing working dir against non-parent
1169 1169 # generate a pseudo-manifest for the working dir
1170 1170 mf2 = mfmatches(self['.'])
1171 1171 for f in cmp + modified + added:
1172 1172 mf2[f] = None
1173 1173 mf2.set(f, ctx2.flags(f))
1174 1174 for f in removed:
1175 1175 if f in mf2:
1176 1176 del mf2[f]
1177 1177 else:
1178 1178 # we are comparing two revisions
1179 1179 deleted, unknown, ignored = [], [], []
1180 1180 mf2 = mfmatches(ctx2)
1181 1181
1182 1182 modified, added, clean = [], [], []
1183 1183 for fn in mf2:
1184 1184 if fn in mf1:
1185 1185 if (mf1.flags(fn) != mf2.flags(fn) or
1186 1186 (mf1[fn] != mf2[fn] and
1187 1187 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1188 1188 modified.append(fn)
1189 1189 elif listclean:
1190 1190 clean.append(fn)
1191 1191 del mf1[fn]
1192 1192 else:
1193 1193 added.append(fn)
1194 1194 removed = mf1.keys()
1195 1195
1196 1196 r = modified, added, removed, deleted, unknown, ignored, clean
1197 1197
1198 1198 if listsubrepos:
1199 1199 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1200 1200 if working:
1201 1201 rev2 = None
1202 1202 else:
1203 1203 rev2 = ctx2.substate[subpath][1]
1204 1204 try:
1205 1205 submatch = matchmod.narrowmatcher(subpath, match)
1206 1206 s = sub.status(rev2, match=submatch, ignored=listignored,
1207 1207 clean=listclean, unknown=listunknown,
1208 1208 listsubrepos=True)
1209 1209 for rfiles, sfiles in zip(r, s):
1210 1210 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1211 1211 except error.LookupError:
1212 1212 self.ui.status(_("skipping missing subrepository: %s\n")
1213 1213 % subpath)
1214 1214
1215 1215 [l.sort() for l in r]
1216 1216 return r
1217 1217
1218 1218 def heads(self, start=None):
1219 1219 heads = self.changelog.heads(start)
1220 1220 # sort the output in rev descending order
1221 1221 return sorted(heads, key=self.changelog.rev, reverse=True)
1222 1222
1223 1223 def branchheads(self, branch=None, start=None, closed=False):
1224 1224 '''return a (possibly filtered) list of heads for the given branch
1225 1225
1226 1226 Heads are returned in topological order, from newest to oldest.
1227 1227 If branch is None, use the dirstate branch.
1228 1228 If start is not None, return only heads reachable from start.
1229 1229 If closed is True, return heads that are marked as closed as well.
1230 1230 '''
1231 1231 if branch is None:
1232 1232 branch = self[None].branch()
1233 1233 branches = self.branchmap()
1234 1234 if branch not in branches:
1235 1235 return []
1236 1236 # the cache returns heads ordered lowest to highest
1237 1237 bheads = list(reversed(branches[branch]))
1238 1238 if start is not None:
1239 1239 # filter out the heads that cannot be reached from startrev
1240 1240 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1241 1241 bheads = [h for h in bheads if h in fbheads]
1242 1242 if not closed:
1243 1243 bheads = [h for h in bheads if
1244 1244 ('close' not in self.changelog.read(h)[5])]
1245 1245 return bheads
1246 1246
1247 1247 def branches(self, nodes):
1248 1248 if not nodes:
1249 1249 nodes = [self.changelog.tip()]
1250 1250 b = []
1251 1251 for n in nodes:
1252 1252 t = n
1253 1253 while 1:
1254 1254 p = self.changelog.parents(n)
1255 1255 if p[1] != nullid or p[0] == nullid:
1256 1256 b.append((t, n, p[0], p[1]))
1257 1257 break
1258 1258 n = p[0]
1259 1259 return b
1260 1260
1261 1261 def between(self, pairs):
1262 1262 r = []
1263 1263
1264 1264 for top, bottom in pairs:
1265 1265 n, l, i = top, [], 0
1266 1266 f = 1
1267 1267
1268 1268 while n != bottom and n != nullid:
1269 1269 p = self.changelog.parents(n)[0]
1270 1270 if i == f:
1271 1271 l.append(n)
1272 1272 f = f * 2
1273 1273 n = p
1274 1274 i += 1
1275 1275
1276 1276 r.append(l)
1277 1277
1278 1278 return r
1279 1279
1280 1280 def pull(self, remote, heads=None, force=False):
1281 1281 lock = self.lock()
1282 1282 try:
1283 1283 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1284 1284 force=force)
1285 1285 common, fetch, rheads = tmp
1286 1286 if not fetch:
1287 1287 self.ui.status(_("no changes found\n"))
1288 1288 return 0
1289 1289
1290 1290 if heads is None and fetch == [nullid]:
1291 1291 self.ui.status(_("requesting all changes\n"))
1292 1292 elif heads is None and remote.capable('changegroupsubset'):
1293 1293 # issue1320, avoid a race if remote changed after discovery
1294 1294 heads = rheads
1295 1295
1296 1296 if heads is None:
1297 1297 cg = remote.changegroup(fetch, 'pull')
1298 1298 else:
1299 1299 if not remote.capable('changegroupsubset'):
1300 1300 raise util.Abort(_("partial pull cannot be done because "
1301 1301 "other repository doesn't support "
1302 1302 "changegroupsubset."))
1303 1303 cg = remote.changegroupsubset(fetch, heads, 'pull')
1304 1304 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1305 1305 finally:
1306 1306 lock.release()
1307 1307
1308 def checkpush(self, force, revs):
1309 """Extensions can override this function if additional checks have
1310 to be performed before pushing, or call it if they override push
1311 command.
1312 """
1313 pass
1314
1308 1315 def push(self, remote, force=False, revs=None, newbranch=False):
1309 1316 '''Push outgoing changesets (limited by revs) from the current
1310 1317 repository to remote. Return an integer:
1311 1318 - 0 means HTTP error *or* nothing to push
1312 1319 - 1 means we pushed and remote head count is unchanged *or*
1313 1320 we have outgoing changesets but refused to push
1314 1321 - other values as described by addchangegroup()
1315 1322 '''
1316 1323 # there are two ways to push to remote repo:
1317 1324 #
1318 1325 # addchangegroup assumes local user can lock remote
1319 1326 # repo (local filesystem, old ssh servers).
1320 1327 #
1321 1328 # unbundle assumes local user cannot lock remote repo (new ssh
1322 1329 # servers, http servers).
1323 1330
1331 self.checkpush(force, revs)
1324 1332 lock = None
1325 1333 unbundle = remote.capable('unbundle')
1326 1334 if not unbundle:
1327 1335 lock = remote.lock()
1328 1336 try:
1329 1337 ret = discovery.prepush(self, remote, force, revs, newbranch)
1330 1338 if ret[0] is None:
1331 1339 # and here we return 0 for "nothing to push" or 1 for
1332 1340 # "something to push but I refuse"
1333 1341 return ret[1]
1334 1342
1335 1343 cg, remote_heads = ret
1336 1344 if unbundle:
1337 1345 # local repo finds heads on server, finds out what revs it must
1338 1346 # push. once revs transferred, if server finds it has
1339 1347 # different heads (someone else won commit/push race), server
1340 1348 # aborts.
1341 1349 if force:
1342 1350 remote_heads = ['force']
1343 1351 # ssh: return remote's addchangegroup()
1344 1352 # http: return remote's addchangegroup() or 0 for error
1345 1353 return remote.unbundle(cg, remote_heads, 'push')
1346 1354 else:
1347 1355 # we return an integer indicating remote head count change
1348 1356 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1349 1357 finally:
1350 1358 if lock is not None:
1351 1359 lock.release()
1352 1360
1353 1361 def changegroupinfo(self, nodes, source):
1354 1362 if self.ui.verbose or source == 'bundle':
1355 1363 self.ui.status(_("%d changesets found\n") % len(nodes))
1356 1364 if self.ui.debugflag:
1357 1365 self.ui.debug("list of changesets:\n")
1358 1366 for node in nodes:
1359 1367 self.ui.debug("%s\n" % hex(node))
1360 1368
1361 1369 def changegroupsubset(self, bases, heads, source, extranodes=None):
1362 1370 """Compute a changegroup consisting of all the nodes that are
1363 1371 descendents of any of the bases and ancestors of any of the heads.
1364 1372 Return a chunkbuffer object whose read() method will return
1365 1373 successive changegroup chunks.
1366 1374
1367 1375 It is fairly complex as determining which filenodes and which
1368 1376 manifest nodes need to be included for the changeset to be complete
1369 1377 is non-trivial.
1370 1378
1371 1379 Another wrinkle is doing the reverse, figuring out which changeset in
1372 1380 the changegroup a particular filenode or manifestnode belongs to.
1373 1381
1374 1382 The caller can specify some nodes that must be included in the
1375 1383 changegroup using the extranodes argument. It should be a dict
1376 1384 where the keys are the filenames (or 1 for the manifest), and the
1377 1385 values are lists of (node, linknode) tuples, where node is a wanted
1378 1386 node and linknode is the changelog node that should be transmitted as
1379 1387 the linkrev.
1380 1388 """
1381 1389
1382 1390 # Set up some initial variables
1383 1391 # Make it easy to refer to self.changelog
1384 1392 cl = self.changelog
1385 1393 # Compute the list of changesets in this changegroup.
1386 1394 # Some bases may turn out to be superfluous, and some heads may be
1387 1395 # too. nodesbetween will return the minimal set of bases and heads
1388 1396 # necessary to re-create the changegroup.
1389 1397 if not bases:
1390 1398 bases = [nullid]
1391 1399 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1392 1400
1393 1401 if extranodes is None:
1394 1402 # can we go through the fast path ?
1395 1403 heads.sort()
1396 1404 allheads = self.heads()
1397 1405 allheads.sort()
1398 1406 if heads == allheads:
1399 1407 return self._changegroup(msng_cl_lst, source)
1400 1408
1401 1409 # slow path
1402 1410 self.hook('preoutgoing', throw=True, source=source)
1403 1411
1404 1412 self.changegroupinfo(msng_cl_lst, source)
1405 1413
1406 1414 # We assume that all ancestors of bases are known
1407 1415 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1408 1416
1409 1417 # Make it easy to refer to self.manifest
1410 1418 mnfst = self.manifest
1411 1419 # We don't know which manifests are missing yet
1412 1420 msng_mnfst_set = {}
1413 1421 # Nor do we know which filenodes are missing.
1414 1422 msng_filenode_set = {}
1415 1423
1416 1424 # A changeset always belongs to itself, so the changenode lookup
1417 1425 # function for a changenode is identity.
1418 1426 def identity(x):
1419 1427 return x
1420 1428
1421 1429 # A function generating function that sets up the initial environment
1422 1430 # the inner function.
1423 1431 def filenode_collector(changedfiles):
1424 1432 # This gathers information from each manifestnode included in the
1425 1433 # changegroup about which filenodes the manifest node references
1426 1434 # so we can include those in the changegroup too.
1427 1435 #
1428 1436 # It also remembers which changenode each filenode belongs to. It
1429 1437 # does this by assuming the a filenode belongs to the changenode
1430 1438 # the first manifest that references it belongs to.
1431 1439 def collect_msng_filenodes(mnfstnode):
1432 1440 r = mnfst.rev(mnfstnode)
1433 1441 if mnfst.deltaparent(r) in mnfst.parentrevs(r):
1434 1442 # If the previous rev is one of the parents,
1435 1443 # we only need to see a diff.
1436 1444 deltamf = mnfst.readdelta(mnfstnode)
1437 1445 # For each line in the delta
1438 1446 for f, fnode in deltamf.iteritems():
1439 1447 # And if the file is in the list of files we care
1440 1448 # about.
1441 1449 if f in changedfiles:
1442 1450 # Get the changenode this manifest belongs to
1443 1451 clnode = msng_mnfst_set[mnfstnode]
1444 1452 # Create the set of filenodes for the file if
1445 1453 # there isn't one already.
1446 1454 ndset = msng_filenode_set.setdefault(f, {})
1447 1455 # And set the filenode's changelog node to the
1448 1456 # manifest's if it hasn't been set already.
1449 1457 ndset.setdefault(fnode, clnode)
1450 1458 else:
1451 1459 # Otherwise we need a full manifest.
1452 1460 m = mnfst.read(mnfstnode)
1453 1461 # For every file in we care about.
1454 1462 for f in changedfiles:
1455 1463 fnode = m.get(f, None)
1456 1464 # If it's in the manifest
1457 1465 if fnode is not None:
1458 1466 # See comments above.
1459 1467 clnode = msng_mnfst_set[mnfstnode]
1460 1468 ndset = msng_filenode_set.setdefault(f, {})
1461 1469 ndset.setdefault(fnode, clnode)
1462 1470 return collect_msng_filenodes
1463 1471
1464 1472 # If we determine that a particular file or manifest node must be a
1465 1473 # node that the recipient of the changegroup will already have, we can
1466 1474 # also assume the recipient will have all the parents. This function
1467 1475 # prunes them from the set of missing nodes.
1468 1476 def prune(revlog, missingnodes):
1469 1477 hasset = set()
1470 1478 # If a 'missing' filenode thinks it belongs to a changenode we
1471 1479 # assume the recipient must have, then the recipient must have
1472 1480 # that filenode.
1473 1481 for n in missingnodes:
1474 1482 clrev = revlog.linkrev(revlog.rev(n))
1475 1483 if clrev in commonrevs:
1476 1484 hasset.add(n)
1477 1485 for n in hasset:
1478 1486 missingnodes.pop(n, None)
1479 1487 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1480 1488 missingnodes.pop(revlog.node(r), None)
1481 1489
1482 1490 # Add the nodes that were explicitly requested.
1483 1491 def add_extra_nodes(name, nodes):
1484 1492 if not extranodes or name not in extranodes:
1485 1493 return
1486 1494
1487 1495 for node, linknode in extranodes[name]:
1488 1496 if node not in nodes:
1489 1497 nodes[node] = linknode
1490 1498
1491 1499 # Now that we have all theses utility functions to help out and
1492 1500 # logically divide up the task, generate the group.
1493 1501 def gengroup():
1494 1502 # The set of changed files starts empty.
1495 1503 changedfiles = set()
1496 1504 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1497 1505
1498 1506 # Create a changenode group generator that will call our functions
1499 1507 # back to lookup the owning changenode and collect information.
1500 1508 group = cl.group(msng_cl_lst, identity, collect)
1501 1509 for cnt, chnk in enumerate(group):
1502 1510 yield chnk
1503 1511 # revlog.group yields three entries per node, so
1504 1512 # dividing by 3 gives an approximation of how many
1505 1513 # nodes have been processed.
1506 1514 self.ui.progress(_('bundling'), cnt / 3,
1507 1515 unit=_('changesets'))
1508 1516 changecount = cnt / 3
1509 1517 self.ui.progress(_('bundling'), None)
1510 1518
1511 1519 prune(mnfst, msng_mnfst_set)
1512 1520 add_extra_nodes(1, msng_mnfst_set)
1513 1521 msng_mnfst_lst = msng_mnfst_set.keys()
1514 1522 # Sort the manifestnodes by revision number.
1515 1523 msng_mnfst_lst.sort(key=mnfst.rev)
1516 1524 # Create a generator for the manifestnodes that calls our lookup
1517 1525 # and data collection functions back.
1518 1526 group = mnfst.group(msng_mnfst_lst,
1519 1527 lambda mnode: msng_mnfst_set[mnode],
1520 1528 filenode_collector(changedfiles))
1521 1529 efiles = {}
1522 1530 for cnt, chnk in enumerate(group):
1523 1531 if cnt % 3 == 1:
1524 1532 mnode = chnk[:20]
1525 1533 efiles.update(mnfst.readdelta(mnode))
1526 1534 yield chnk
1527 1535 # see above comment for why we divide by 3
1528 1536 self.ui.progress(_('bundling'), cnt / 3,
1529 1537 unit=_('manifests'), total=changecount)
1530 1538 self.ui.progress(_('bundling'), None)
1531 1539 efiles = len(efiles)
1532 1540
1533 1541 # These are no longer needed, dereference and toss the memory for
1534 1542 # them.
1535 1543 msng_mnfst_lst = None
1536 1544 msng_mnfst_set.clear()
1537 1545
1538 1546 if extranodes:
1539 1547 for fname in extranodes:
1540 1548 if isinstance(fname, int):
1541 1549 continue
1542 1550 msng_filenode_set.setdefault(fname, {})
1543 1551 changedfiles.add(fname)
1544 1552 # Go through all our files in order sorted by name.
1545 1553 for idx, fname in enumerate(sorted(changedfiles)):
1546 1554 filerevlog = self.file(fname)
1547 1555 if not len(filerevlog):
1548 1556 raise util.Abort(_("empty or missing revlog for %s") % fname)
1549 1557 # Toss out the filenodes that the recipient isn't really
1550 1558 # missing.
1551 1559 missingfnodes = msng_filenode_set.pop(fname, {})
1552 1560 prune(filerevlog, missingfnodes)
1553 1561 add_extra_nodes(fname, missingfnodes)
1554 1562 # If any filenodes are left, generate the group for them,
1555 1563 # otherwise don't bother.
1556 1564 if missingfnodes:
1557 1565 yield changegroup.chunkheader(len(fname))
1558 1566 yield fname
1559 1567 # Sort the filenodes by their revision # (topological order)
1560 1568 nodeiter = list(missingfnodes)
1561 1569 nodeiter.sort(key=filerevlog.rev)
1562 1570 # Create a group generator and only pass in a changenode
1563 1571 # lookup function as we need to collect no information
1564 1572 # from filenodes.
1565 1573 group = filerevlog.group(nodeiter,
1566 1574 lambda fnode: missingfnodes[fnode])
1567 1575 for chnk in group:
1568 1576 # even though we print the same progress on
1569 1577 # most loop iterations, put the progress call
1570 1578 # here so that time estimates (if any) can be updated
1571 1579 self.ui.progress(
1572 1580 _('bundling'), idx, item=fname,
1573 1581 unit=_('files'), total=efiles)
1574 1582 yield chnk
1575 1583 # Signal that no more groups are left.
1576 1584 yield changegroup.closechunk()
1577 1585 self.ui.progress(_('bundling'), None)
1578 1586
1579 1587 if msng_cl_lst:
1580 1588 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1581 1589
1582 1590 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1583 1591
1584 1592 def changegroup(self, basenodes, source):
1585 1593 # to avoid a race we use changegroupsubset() (issue1320)
1586 1594 return self.changegroupsubset(basenodes, self.heads(), source)
1587 1595
1588 1596 def _changegroup(self, nodes, source):
1589 1597 """Compute the changegroup of all nodes that we have that a recipient
1590 1598 doesn't. Return a chunkbuffer object whose read() method will return
1591 1599 successive changegroup chunks.
1592 1600
1593 1601 This is much easier than the previous function as we can assume that
1594 1602 the recipient has any changenode we aren't sending them.
1595 1603
1596 1604 nodes is the set of nodes to send"""
1597 1605
1598 1606 self.hook('preoutgoing', throw=True, source=source)
1599 1607
1600 1608 cl = self.changelog
1601 1609 revset = set([cl.rev(n) for n in nodes])
1602 1610 self.changegroupinfo(nodes, source)
1603 1611
1604 1612 def identity(x):
1605 1613 return x
1606 1614
1607 1615 def gennodelst(log):
1608 1616 for r in log:
1609 1617 if log.linkrev(r) in revset:
1610 1618 yield log.node(r)
1611 1619
1612 1620 def lookuplinkrev_func(revlog):
1613 1621 def lookuplinkrev(n):
1614 1622 return cl.node(revlog.linkrev(revlog.rev(n)))
1615 1623 return lookuplinkrev
1616 1624
1617 1625 def gengroup():
1618 1626 '''yield a sequence of changegroup chunks (strings)'''
1619 1627 # construct a list of all changed files
1620 1628 changedfiles = set()
1621 1629 mmfs = {}
1622 1630 collect = changegroup.collector(cl, mmfs, changedfiles)
1623 1631
1624 1632 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1625 1633 # revlog.group yields three entries per node, so
1626 1634 # dividing by 3 gives an approximation of how many
1627 1635 # nodes have been processed.
1628 1636 self.ui.progress(_('bundling'), cnt / 3, unit=_('changesets'))
1629 1637 yield chnk
1630 1638 changecount = cnt / 3
1631 1639 self.ui.progress(_('bundling'), None)
1632 1640
1633 1641 mnfst = self.manifest
1634 1642 nodeiter = gennodelst(mnfst)
1635 1643 efiles = {}
1636 1644 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1637 1645 lookuplinkrev_func(mnfst))):
1638 1646 if cnt % 3 == 1:
1639 1647 mnode = chnk[:20]
1640 1648 efiles.update(mnfst.readdelta(mnode))
1641 1649 # see above comment for why we divide by 3
1642 1650 self.ui.progress(_('bundling'), cnt / 3,
1643 1651 unit=_('manifests'), total=changecount)
1644 1652 yield chnk
1645 1653 efiles = len(efiles)
1646 1654 self.ui.progress(_('bundling'), None)
1647 1655
1648 1656 for idx, fname in enumerate(sorted(changedfiles)):
1649 1657 filerevlog = self.file(fname)
1650 1658 if not len(filerevlog):
1651 1659 raise util.Abort(_("empty or missing revlog for %s") % fname)
1652 1660 nodeiter = gennodelst(filerevlog)
1653 1661 nodeiter = list(nodeiter)
1654 1662 if nodeiter:
1655 1663 yield changegroup.chunkheader(len(fname))
1656 1664 yield fname
1657 1665 lookup = lookuplinkrev_func(filerevlog)
1658 1666 for chnk in filerevlog.group(nodeiter, lookup):
1659 1667 self.ui.progress(
1660 1668 _('bundling'), idx, item=fname,
1661 1669 total=efiles, unit=_('files'))
1662 1670 yield chnk
1663 1671 self.ui.progress(_('bundling'), None)
1664 1672
1665 1673 yield changegroup.closechunk()
1666 1674
1667 1675 if nodes:
1668 1676 self.hook('outgoing', node=hex(nodes[0]), source=source)
1669 1677
1670 1678 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1671 1679
1672 1680 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1673 1681 """Add the changegroup returned by source.read() to this repo.
1674 1682 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1675 1683 the URL of the repo where this changegroup is coming from.
1676 1684 If lock is not None, the function takes ownership of the lock
1677 1685 and releases it after the changegroup is added.
1678 1686
1679 1687 Return an integer summarizing the change to this repo:
1680 1688 - nothing changed or no source: 0
1681 1689 - more heads than before: 1+added heads (2..n)
1682 1690 - fewer heads than before: -1-removed heads (-2..-n)
1683 1691 - number of heads stays the same: 1
1684 1692 """
1685 1693 def csmap(x):
1686 1694 self.ui.debug("add changeset %s\n" % short(x))
1687 1695 return len(cl)
1688 1696
1689 1697 def revmap(x):
1690 1698 return cl.rev(x)
1691 1699
1692 1700 if not source:
1693 1701 return 0
1694 1702
1695 1703 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1696 1704
1697 1705 changesets = files = revisions = 0
1698 1706 efiles = set()
1699 1707
1700 1708 # write changelog data to temp files so concurrent readers will not see
1701 1709 # inconsistent view
1702 1710 cl = self.changelog
1703 1711 cl.delayupdate()
1704 1712 oldheads = len(cl.heads())
1705 1713
1706 1714 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1707 1715 try:
1708 1716 trp = weakref.proxy(tr)
1709 1717 # pull off the changeset group
1710 1718 self.ui.status(_("adding changesets\n"))
1711 1719 clstart = len(cl)
1712 1720 class prog(object):
1713 1721 step = _('changesets')
1714 1722 count = 1
1715 1723 ui = self.ui
1716 1724 total = None
1717 1725 def __call__(self):
1718 1726 self.ui.progress(self.step, self.count, unit=_('chunks'),
1719 1727 total=self.total)
1720 1728 self.count += 1
1721 1729 pr = prog()
1722 1730 source.callback = pr
1723 1731
1724 1732 if (cl.addgroup(source, csmap, trp) is None
1725 1733 and not emptyok):
1726 1734 raise util.Abort(_("received changelog group is empty"))
1727 1735 clend = len(cl)
1728 1736 changesets = clend - clstart
1729 1737 for c in xrange(clstart, clend):
1730 1738 efiles.update(self[c].files())
1731 1739 efiles = len(efiles)
1732 1740 self.ui.progress(_('changesets'), None)
1733 1741
1734 1742 # pull off the manifest group
1735 1743 self.ui.status(_("adding manifests\n"))
1736 1744 pr.step = _('manifests')
1737 1745 pr.count = 1
1738 1746 pr.total = changesets # manifests <= changesets
1739 1747 # no need to check for empty manifest group here:
1740 1748 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1741 1749 # no new manifest will be created and the manifest group will
1742 1750 # be empty during the pull
1743 1751 self.manifest.addgroup(source, revmap, trp)
1744 1752 self.ui.progress(_('manifests'), None)
1745 1753
1746 1754 needfiles = {}
1747 1755 if self.ui.configbool('server', 'validate', default=False):
1748 1756 # validate incoming csets have their manifests
1749 1757 for cset in xrange(clstart, clend):
1750 1758 mfest = self.changelog.read(self.changelog.node(cset))[0]
1751 1759 mfest = self.manifest.readdelta(mfest)
1752 1760 # store file nodes we must see
1753 1761 for f, n in mfest.iteritems():
1754 1762 needfiles.setdefault(f, set()).add(n)
1755 1763
1756 1764 # process the files
1757 1765 self.ui.status(_("adding file changes\n"))
1758 1766 pr.step = 'files'
1759 1767 pr.count = 1
1760 1768 pr.total = efiles
1761 1769 source.callback = None
1762 1770
1763 1771 while 1:
1764 1772 f = source.chunk()
1765 1773 if not f:
1766 1774 break
1767 1775 self.ui.debug("adding %s revisions\n" % f)
1768 1776 pr()
1769 1777 fl = self.file(f)
1770 1778 o = len(fl)
1771 1779 if fl.addgroup(source, revmap, trp) is None:
1772 1780 raise util.Abort(_("received file revlog group is empty"))
1773 1781 revisions += len(fl) - o
1774 1782 files += 1
1775 1783 if f in needfiles:
1776 1784 needs = needfiles[f]
1777 1785 for new in xrange(o, len(fl)):
1778 1786 n = fl.node(new)
1779 1787 if n in needs:
1780 1788 needs.remove(n)
1781 1789 if not needs:
1782 1790 del needfiles[f]
1783 1791 self.ui.progress(_('files'), None)
1784 1792
1785 1793 for f, needs in needfiles.iteritems():
1786 1794 fl = self.file(f)
1787 1795 for n in needs:
1788 1796 try:
1789 1797 fl.rev(n)
1790 1798 except error.LookupError:
1791 1799 raise util.Abort(
1792 1800 _('missing file data for %s:%s - run hg verify') %
1793 1801 (f, hex(n)))
1794 1802
1795 1803 newheads = len(cl.heads())
1796 1804 heads = ""
1797 1805 if oldheads and newheads != oldheads:
1798 1806 heads = _(" (%+d heads)") % (newheads - oldheads)
1799 1807
1800 1808 self.ui.status(_("added %d changesets"
1801 1809 " with %d changes to %d files%s\n")
1802 1810 % (changesets, revisions, files, heads))
1803 1811
1804 1812 if changesets > 0:
1805 1813 p = lambda: cl.writepending() and self.root or ""
1806 1814 self.hook('pretxnchangegroup', throw=True,
1807 1815 node=hex(cl.node(clstart)), source=srctype,
1808 1816 url=url, pending=p)
1809 1817
1810 1818 # make changelog see real files again
1811 1819 cl.finalize(trp)
1812 1820
1813 1821 tr.close()
1814 1822 finally:
1815 1823 tr.release()
1816 1824 if lock:
1817 1825 lock.release()
1818 1826
1819 1827 if changesets > 0:
1820 1828 # forcefully update the on-disk branch cache
1821 1829 self.ui.debug("updating the branch cache\n")
1822 1830 self.updatebranchcache()
1823 1831 self.hook("changegroup", node=hex(cl.node(clstart)),
1824 1832 source=srctype, url=url)
1825 1833
1826 1834 for i in xrange(clstart, clend):
1827 1835 self.hook("incoming", node=hex(cl.node(i)),
1828 1836 source=srctype, url=url)
1829 1837
1830 1838 # never return 0 here:
1831 1839 if newheads < oldheads:
1832 1840 return newheads - oldheads - 1
1833 1841 else:
1834 1842 return newheads - oldheads + 1
1835 1843
1836 1844
1837 1845 def stream_in(self, remote, requirements):
1838 1846 fp = remote.stream_out()
1839 1847 l = fp.readline()
1840 1848 try:
1841 1849 resp = int(l)
1842 1850 except ValueError:
1843 1851 raise error.ResponseError(
1844 1852 _('Unexpected response from remote server:'), l)
1845 1853 if resp == 1:
1846 1854 raise util.Abort(_('operation forbidden by server'))
1847 1855 elif resp == 2:
1848 1856 raise util.Abort(_('locking the remote repository failed'))
1849 1857 elif resp != 0:
1850 1858 raise util.Abort(_('the server sent an unknown error code'))
1851 1859 self.ui.status(_('streaming all changes\n'))
1852 1860 l = fp.readline()
1853 1861 try:
1854 1862 total_files, total_bytes = map(int, l.split(' ', 1))
1855 1863 except (ValueError, TypeError):
1856 1864 raise error.ResponseError(
1857 1865 _('Unexpected response from remote server:'), l)
1858 1866 self.ui.status(_('%d files to transfer, %s of data\n') %
1859 1867 (total_files, util.bytecount(total_bytes)))
1860 1868 start = time.time()
1861 1869 for i in xrange(total_files):
1862 1870 # XXX doesn't support '\n' or '\r' in filenames
1863 1871 l = fp.readline()
1864 1872 try:
1865 1873 name, size = l.split('\0', 1)
1866 1874 size = int(size)
1867 1875 except (ValueError, TypeError):
1868 1876 raise error.ResponseError(
1869 1877 _('Unexpected response from remote server:'), l)
1870 1878 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1871 1879 # for backwards compat, name was partially encoded
1872 1880 ofp = self.sopener(store.decodedir(name), 'w')
1873 1881 for chunk in util.filechunkiter(fp, limit=size):
1874 1882 ofp.write(chunk)
1875 1883 ofp.close()
1876 1884 elapsed = time.time() - start
1877 1885 if elapsed <= 0:
1878 1886 elapsed = 0.001
1879 1887 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1880 1888 (util.bytecount(total_bytes), elapsed,
1881 1889 util.bytecount(total_bytes / elapsed)))
1882 1890
1883 1891 # new requirements = old non-format requirements + new format-related
1884 1892 # requirements from the streamed-in repository
1885 1893 requirements.update(set(self.requirements) - self.supportedformats)
1886 1894 self._applyrequirements(requirements)
1887 1895 self._writerequirements()
1888 1896
1889 1897 self.invalidate()
1890 1898 return len(self.heads()) + 1
1891 1899
1892 1900 def clone(self, remote, heads=[], stream=False):
1893 1901 '''clone remote repository.
1894 1902
1895 1903 keyword arguments:
1896 1904 heads: list of revs to clone (forces use of pull)
1897 1905 stream: use streaming clone if possible'''
1898 1906
1899 1907 # now, all clients that can request uncompressed clones can
1900 1908 # read repo formats supported by all servers that can serve
1901 1909 # them.
1902 1910
1903 1911 # if revlog format changes, client will have to check version
1904 1912 # and format flags on "stream" capability, and use
1905 1913 # uncompressed only if compatible.
1906 1914
1907 1915 if stream and not heads:
1908 1916 # 'stream' means remote revlog format is revlogv1 only
1909 1917 if remote.capable('stream'):
1910 1918 return self.stream_in(remote, set(('revlogv1',)))
1911 1919 # otherwise, 'streamreqs' contains the remote revlog format
1912 1920 streamreqs = remote.capable('streamreqs')
1913 1921 if streamreqs:
1914 1922 streamreqs = set(streamreqs.split(','))
1915 1923 # if we support it, stream in and adjust our requirements
1916 1924 if not streamreqs - self.supportedformats:
1917 1925 return self.stream_in(remote, streamreqs)
1918 1926 return self.pull(remote, heads)
1919 1927
1920 1928 def pushkey(self, namespace, key, old, new):
1921 1929 return pushkey.push(self, namespace, key, old, new)
1922 1930
1923 1931 def listkeys(self, namespace):
1924 1932 return pushkey.list(self, namespace)
1925 1933
1926 1934 # used to avoid circular references so destructors work
1927 1935 def aftertrans(files):
1928 1936 renamefiles = [tuple(t) for t in files]
1929 1937 def a():
1930 1938 for src, dest in renamefiles:
1931 1939 util.rename(src, dest)
1932 1940 return a
1933 1941
1934 1942 def instance(ui, path, create):
1935 1943 return localrepository(ui, util.drop_scheme('file', path), create)
1936 1944
1937 1945 def islocal(path):
1938 1946 return True
General Comments 0
You need to be logged in to leave comments. Login now