##// END OF EJS Templates
coding style: fix gratuitous whitespace after Python keywords
Thomas Arendsen Hein -
r13075:d73c3034 default
parent child Browse files
Show More
@@ -1,3228 +1,3228 b''
1 1 # mq.py - patch queues for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''manage a stack of patches
9 9
10 10 This extension lets you work with a stack of patches in a Mercurial
11 11 repository. It manages two stacks of patches - all known patches, and
12 12 applied patches (subset of known patches).
13 13
14 14 Known patches are represented as patch files in the .hg/patches
15 15 directory. Applied patches are both patch files and changesets.
16 16
17 17 Common tasks (use :hg:`help command` for more details)::
18 18
19 19 create new patch qnew
20 20 import existing patch qimport
21 21
22 22 print patch series qseries
23 23 print applied patches qapplied
24 24
25 25 add known patch to applied stack qpush
26 26 remove patch from applied stack qpop
27 27 refresh contents of top applied patch qrefresh
28 28
29 29 By default, mq will automatically use git patches when required to
30 30 avoid losing file mode changes, copy records, binary files or empty
31 31 files creations or deletions. This behaviour can be configured with::
32 32
33 33 [mq]
34 34 git = auto/keep/yes/no
35 35
36 36 If set to 'keep', mq will obey the [diff] section configuration while
37 37 preserving existing git patches upon qrefresh. If set to 'yes' or
38 38 'no', mq will override the [diff] section and always generate git or
39 39 regular patches, possibly losing data in the second case.
40 40
41 41 You will by default be managing a patch queue named "patches". You can
42 42 create other, independent patch queues with the :hg:`qqueue` command.
43 43 '''
44 44
45 45 from mercurial.i18n import _
46 46 from mercurial.node import bin, hex, short, nullid, nullrev
47 47 from mercurial.lock import release
48 48 from mercurial import commands, cmdutil, hg, patch, util
49 49 from mercurial import repair, extensions, url, error
50 50 import os, sys, re, errno, shutil
51 51
52 52 commands.norepo += " qclone"
53 53
54 54 # Patch names looks like unix-file names.
55 55 # They must be joinable with queue directory and result in the patch path.
56 56 normname = util.normpath
57 57
58 58 class statusentry(object):
59 59 def __init__(self, node, name):
60 60 self.node, self.name = node, name
61 61 def __repr__(self):
62 62 return hex(self.node) + ':' + self.name
63 63
64 64 class patchheader(object):
65 65 def __init__(self, pf, plainmode=False):
66 66 def eatdiff(lines):
67 67 while lines:
68 68 l = lines[-1]
69 69 if (l.startswith("diff -") or
70 70 l.startswith("Index:") or
71 71 l.startswith("===========")):
72 72 del lines[-1]
73 73 else:
74 74 break
75 75 def eatempty(lines):
76 76 while lines:
77 77 if not lines[-1].strip():
78 78 del lines[-1]
79 79 else:
80 80 break
81 81
82 82 message = []
83 83 comments = []
84 84 user = None
85 85 date = None
86 86 parent = None
87 87 format = None
88 88 subject = None
89 89 diffstart = 0
90 90
91 91 for line in file(pf):
92 92 line = line.rstrip()
93 93 if (line.startswith('diff --git')
94 94 or (diffstart and line.startswith('+++ '))):
95 95 diffstart = 2
96 96 break
97 97 diffstart = 0 # reset
98 98 if line.startswith("--- "):
99 99 diffstart = 1
100 100 continue
101 101 elif format == "hgpatch":
102 102 # parse values when importing the result of an hg export
103 103 if line.startswith("# User "):
104 104 user = line[7:]
105 105 elif line.startswith("# Date "):
106 106 date = line[7:]
107 107 elif line.startswith("# Parent "):
108 108 parent = line[9:]
109 109 elif not line.startswith("# ") and line:
110 110 message.append(line)
111 111 format = None
112 112 elif line == '# HG changeset patch':
113 113 message = []
114 114 format = "hgpatch"
115 115 elif (format != "tagdone" and (line.startswith("Subject: ") or
116 116 line.startswith("subject: "))):
117 117 subject = line[9:]
118 118 format = "tag"
119 119 elif (format != "tagdone" and (line.startswith("From: ") or
120 120 line.startswith("from: "))):
121 121 user = line[6:]
122 122 format = "tag"
123 123 elif (format != "tagdone" and (line.startswith("Date: ") or
124 124 line.startswith("date: "))):
125 125 date = line[6:]
126 126 format = "tag"
127 127 elif format == "tag" and line == "":
128 128 # when looking for tags (subject: from: etc) they
129 129 # end once you find a blank line in the source
130 130 format = "tagdone"
131 131 elif message or line:
132 132 message.append(line)
133 133 comments.append(line)
134 134
135 135 eatdiff(message)
136 136 eatdiff(comments)
137 137 eatempty(message)
138 138 eatempty(comments)
139 139
140 140 # make sure message isn't empty
141 141 if format and format.startswith("tag") and subject:
142 142 message.insert(0, "")
143 143 message.insert(0, subject)
144 144
145 145 self.message = message
146 146 self.comments = comments
147 147 self.user = user
148 148 self.date = date
149 149 self.parent = parent
150 150 self.haspatch = diffstart > 1
151 151 self.plainmode = plainmode
152 152
153 153 def setuser(self, user):
154 154 if not self.updateheader(['From: ', '# User '], user):
155 155 try:
156 156 patchheaderat = self.comments.index('# HG changeset patch')
157 157 self.comments.insert(patchheaderat + 1, '# User ' + user)
158 158 except ValueError:
159 159 if self.plainmode or self._hasheader(['Date: ']):
160 160 self.comments = ['From: ' + user] + self.comments
161 161 else:
162 162 tmp = ['# HG changeset patch', '# User ' + user, '']
163 163 self.comments = tmp + self.comments
164 164 self.user = user
165 165
166 166 def setdate(self, date):
167 167 if not self.updateheader(['Date: ', '# Date '], date):
168 168 try:
169 169 patchheaderat = self.comments.index('# HG changeset patch')
170 170 self.comments.insert(patchheaderat + 1, '# Date ' + date)
171 171 except ValueError:
172 172 if self.plainmode or self._hasheader(['From: ']):
173 173 self.comments = ['Date: ' + date] + self.comments
174 174 else:
175 175 tmp = ['# HG changeset patch', '# Date ' + date, '']
176 176 self.comments = tmp + self.comments
177 177 self.date = date
178 178
179 179 def setparent(self, parent):
180 180 if not self.updateheader(['# Parent '], parent):
181 181 try:
182 182 patchheaderat = self.comments.index('# HG changeset patch')
183 183 self.comments.insert(patchheaderat + 1, '# Parent ' + parent)
184 184 except ValueError:
185 185 pass
186 186 self.parent = parent
187 187
188 188 def setmessage(self, message):
189 189 if self.comments:
190 190 self._delmsg()
191 191 self.message = [message]
192 192 self.comments += self.message
193 193
194 194 def updateheader(self, prefixes, new):
195 195 '''Update all references to a field in the patch header.
196 196 Return whether the field is present.'''
197 197 res = False
198 198 for prefix in prefixes:
199 199 for i in xrange(len(self.comments)):
200 200 if self.comments[i].startswith(prefix):
201 201 self.comments[i] = prefix + new
202 202 res = True
203 203 break
204 204 return res
205 205
206 206 def _hasheader(self, prefixes):
207 207 '''Check if a header starts with any of the given prefixes.'''
208 208 for prefix in prefixes:
209 209 for comment in self.comments:
210 210 if comment.startswith(prefix):
211 211 return True
212 212 return False
213 213
214 214 def __str__(self):
215 215 if not self.comments:
216 216 return ''
217 217 return '\n'.join(self.comments) + '\n\n'
218 218
219 219 def _delmsg(self):
220 220 '''Remove existing message, keeping the rest of the comments fields.
221 221 If comments contains 'subject: ', message will prepend
222 222 the field and a blank line.'''
223 223 if self.message:
224 224 subj = 'subject: ' + self.message[0].lower()
225 225 for i in xrange(len(self.comments)):
226 226 if subj == self.comments[i].lower():
227 227 del self.comments[i]
228 228 self.message = self.message[2:]
229 229 break
230 230 ci = 0
231 231 for mi in self.message:
232 232 while mi != self.comments[ci]:
233 233 ci += 1
234 234 del self.comments[ci]
235 235
236 236 class queue(object):
237 237 def __init__(self, ui, path, patchdir=None):
238 238 self.basepath = path
239 239 try:
240 240 fh = open(os.path.join(path, 'patches.queue'))
241 241 cur = fh.read().rstrip()
242 242 if not cur:
243 243 curpath = os.path.join(path, 'patches')
244 244 else:
245 245 curpath = os.path.join(path, 'patches-' + cur)
246 246 except IOError:
247 247 curpath = os.path.join(path, 'patches')
248 248 self.path = patchdir or curpath
249 249 self.opener = util.opener(self.path)
250 250 self.ui = ui
251 251 self.applied_dirty = 0
252 252 self.series_dirty = 0
253 253 self.added = []
254 254 self.series_path = "series"
255 255 self.status_path = "status"
256 256 self.guards_path = "guards"
257 257 self.active_guards = None
258 258 self.guards_dirty = False
259 259 # Handle mq.git as a bool with extended values
260 260 try:
261 261 gitmode = ui.configbool('mq', 'git', None)
262 262 if gitmode is None:
263 263 raise error.ConfigError()
264 264 self.gitmode = gitmode and 'yes' or 'no'
265 265 except error.ConfigError:
266 266 self.gitmode = ui.config('mq', 'git', 'auto').lower()
267 267 self.plainmode = ui.configbool('mq', 'plain', False)
268 268
269 269 @util.propertycache
270 270 def applied(self):
271 271 if os.path.exists(self.join(self.status_path)):
272 272 def parse(l):
273 273 n, name = l.split(':', 1)
274 274 return statusentry(bin(n), name)
275 275 lines = self.opener(self.status_path).read().splitlines()
276 276 return [parse(l) for l in lines]
277 277 return []
278 278
279 279 @util.propertycache
280 280 def full_series(self):
281 281 if os.path.exists(self.join(self.series_path)):
282 282 return self.opener(self.series_path).read().splitlines()
283 283 return []
284 284
285 285 @util.propertycache
286 286 def series(self):
287 287 self.parse_series()
288 288 return self.series
289 289
290 290 @util.propertycache
291 291 def series_guards(self):
292 292 self.parse_series()
293 293 return self.series_guards
294 294
295 295 def invalidate(self):
296 296 for a in 'applied full_series series series_guards'.split():
297 297 if a in self.__dict__:
298 298 delattr(self, a)
299 299 self.applied_dirty = 0
300 300 self.series_dirty = 0
301 301 self.guards_dirty = False
302 302 self.active_guards = None
303 303
304 304 def diffopts(self, opts={}, patchfn=None):
305 305 diffopts = patch.diffopts(self.ui, opts)
306 306 if self.gitmode == 'auto':
307 307 diffopts.upgrade = True
308 308 elif self.gitmode == 'keep':
309 309 pass
310 310 elif self.gitmode in ('yes', 'no'):
311 311 diffopts.git = self.gitmode == 'yes'
312 312 else:
313 313 raise util.Abort(_('mq.git option can be auto/keep/yes/no'
314 314 ' got %s') % self.gitmode)
315 315 if patchfn:
316 316 diffopts = self.patchopts(diffopts, patchfn)
317 317 return diffopts
318 318
319 319 def patchopts(self, diffopts, *patches):
320 320 """Return a copy of input diff options with git set to true if
321 321 referenced patch is a git patch and should be preserved as such.
322 322 """
323 323 diffopts = diffopts.copy()
324 324 if not diffopts.git and self.gitmode == 'keep':
325 325 for patchfn in patches:
326 326 patchf = self.opener(patchfn, 'r')
327 327 # if the patch was a git patch, refresh it as a git patch
328 328 for line in patchf:
329 329 if line.startswith('diff --git'):
330 330 diffopts.git = True
331 331 break
332 332 patchf.close()
333 333 return diffopts
334 334
335 335 def join(self, *p):
336 336 return os.path.join(self.path, *p)
337 337
338 338 def find_series(self, patch):
339 339 def matchpatch(l):
340 340 l = l.split('#', 1)[0]
341 341 return l.strip() == patch
342 342 for index, l in enumerate(self.full_series):
343 343 if matchpatch(l):
344 344 return index
345 345 return None
346 346
347 347 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
348 348
349 349 def parse_series(self):
350 350 self.series = []
351 351 self.series_guards = []
352 352 for l in self.full_series:
353 353 h = l.find('#')
354 354 if h == -1:
355 355 patch = l
356 356 comment = ''
357 357 elif h == 0:
358 358 continue
359 359 else:
360 360 patch = l[:h]
361 361 comment = l[h:]
362 362 patch = patch.strip()
363 363 if patch:
364 364 if patch in self.series:
365 365 raise util.Abort(_('%s appears more than once in %s') %
366 366 (patch, self.join(self.series_path)))
367 367 self.series.append(patch)
368 368 self.series_guards.append(self.guard_re.findall(comment))
369 369
370 370 def check_guard(self, guard):
371 371 if not guard:
372 372 return _('guard cannot be an empty string')
373 373 bad_chars = '# \t\r\n\f'
374 374 first = guard[0]
375 375 if first in '-+':
376 376 return (_('guard %r starts with invalid character: %r') %
377 377 (guard, first))
378 378 for c in bad_chars:
379 379 if c in guard:
380 380 return _('invalid character in guard %r: %r') % (guard, c)
381 381
382 382 def set_active(self, guards):
383 383 for guard in guards:
384 384 bad = self.check_guard(guard)
385 385 if bad:
386 386 raise util.Abort(bad)
387 387 guards = sorted(set(guards))
388 388 self.ui.debug('active guards: %s\n' % ' '.join(guards))
389 389 self.active_guards = guards
390 390 self.guards_dirty = True
391 391
392 392 def active(self):
393 393 if self.active_guards is None:
394 394 self.active_guards = []
395 395 try:
396 396 guards = self.opener(self.guards_path).read().split()
397 397 except IOError, err:
398 398 if err.errno != errno.ENOENT:
399 399 raise
400 400 guards = []
401 401 for i, guard in enumerate(guards):
402 402 bad = self.check_guard(guard)
403 403 if bad:
404 404 self.ui.warn('%s:%d: %s\n' %
405 405 (self.join(self.guards_path), i + 1, bad))
406 406 else:
407 407 self.active_guards.append(guard)
408 408 return self.active_guards
409 409
410 410 def set_guards(self, idx, guards):
411 411 for g in guards:
412 412 if len(g) < 2:
413 413 raise util.Abort(_('guard %r too short') % g)
414 414 if g[0] not in '-+':
415 415 raise util.Abort(_('guard %r starts with invalid char') % g)
416 416 bad = self.check_guard(g[1:])
417 417 if bad:
418 418 raise util.Abort(bad)
419 419 drop = self.guard_re.sub('', self.full_series[idx])
420 420 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
421 421 self.parse_series()
422 422 self.series_dirty = True
423 423
424 424 def pushable(self, idx):
425 425 if isinstance(idx, str):
426 426 idx = self.series.index(idx)
427 427 patchguards = self.series_guards[idx]
428 428 if not patchguards:
429 429 return True, None
430 430 guards = self.active()
431 431 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
432 432 if exactneg:
433 433 return False, exactneg[0]
434 434 pos = [g for g in patchguards if g[0] == '+']
435 435 exactpos = [g for g in pos if g[1:] in guards]
436 436 if pos:
437 437 if exactpos:
438 438 return True, exactpos[0]
439 439 return False, pos
440 440 return True, ''
441 441
442 442 def explain_pushable(self, idx, all_patches=False):
443 443 write = all_patches and self.ui.write or self.ui.warn
444 444 if all_patches or self.ui.verbose:
445 445 if isinstance(idx, str):
446 446 idx = self.series.index(idx)
447 447 pushable, why = self.pushable(idx)
448 448 if all_patches and pushable:
449 449 if why is None:
450 450 write(_('allowing %s - no guards in effect\n') %
451 451 self.series[idx])
452 452 else:
453 453 if not why:
454 454 write(_('allowing %s - no matching negative guards\n') %
455 455 self.series[idx])
456 456 else:
457 457 write(_('allowing %s - guarded by %r\n') %
458 458 (self.series[idx], why))
459 459 if not pushable:
460 460 if why:
461 461 write(_('skipping %s - guarded by %r\n') %
462 462 (self.series[idx], why))
463 463 else:
464 464 write(_('skipping %s - no matching guards\n') %
465 465 self.series[idx])
466 466
467 467 def save_dirty(self):
468 468 def write_list(items, path):
469 469 fp = self.opener(path, 'w')
470 470 for i in items:
471 471 fp.write("%s\n" % i)
472 472 fp.close()
473 473 if self.applied_dirty:
474 474 write_list(map(str, self.applied), self.status_path)
475 475 if self.series_dirty:
476 476 write_list(self.full_series, self.series_path)
477 477 if self.guards_dirty:
478 478 write_list(self.active_guards, self.guards_path)
479 479 if self.added:
480 480 qrepo = self.qrepo()
481 481 if qrepo:
482 482 qrepo[None].add(f for f in self.added if f not in qrepo[None])
483 483 self.added = []
484 484
485 485 def removeundo(self, repo):
486 486 undo = repo.sjoin('undo')
487 487 if not os.path.exists(undo):
488 488 return
489 489 try:
490 490 os.unlink(undo)
491 491 except OSError, inst:
492 492 self.ui.warn(_('error removing undo: %s\n') % str(inst))
493 493
494 494 def printdiff(self, repo, diffopts, node1, node2=None, files=None,
495 495 fp=None, changes=None, opts={}):
496 496 stat = opts.get('stat')
497 497 m = cmdutil.match(repo, files, opts)
498 498 cmdutil.diffordiffstat(self.ui, repo, diffopts, node1, node2, m,
499 499 changes, stat, fp)
500 500
501 501 def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
502 502 # first try just applying the patch
503 503 (err, n) = self.apply(repo, [patch], update_status=False,
504 504 strict=True, merge=rev)
505 505
506 506 if err == 0:
507 507 return (err, n)
508 508
509 509 if n is None:
510 510 raise util.Abort(_("apply failed for patch %s") % patch)
511 511
512 512 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
513 513
514 514 # apply failed, strip away that rev and merge.
515 515 hg.clean(repo, head)
516 516 self.strip(repo, [n], update=False, backup='strip')
517 517
518 518 ctx = repo[rev]
519 519 ret = hg.merge(repo, rev)
520 520 if ret:
521 521 raise util.Abort(_("update returned %d") % ret)
522 522 n = repo.commit(ctx.description(), ctx.user(), force=True)
523 523 if n is None:
524 524 raise util.Abort(_("repo commit failed"))
525 525 try:
526 526 ph = patchheader(mergeq.join(patch), self.plainmode)
527 527 except:
528 528 raise util.Abort(_("unable to read %s") % patch)
529 529
530 530 diffopts = self.patchopts(diffopts, patch)
531 531 patchf = self.opener(patch, "w")
532 532 comments = str(ph)
533 533 if comments:
534 534 patchf.write(comments)
535 535 self.printdiff(repo, diffopts, head, n, fp=patchf)
536 536 patchf.close()
537 537 self.removeundo(repo)
538 538 return (0, n)
539 539
540 540 def qparents(self, repo, rev=None):
541 541 if rev is None:
542 542 (p1, p2) = repo.dirstate.parents()
543 543 if p2 == nullid:
544 544 return p1
545 545 if not self.applied:
546 546 return None
547 547 return self.applied[-1].node
548 548 p1, p2 = repo.changelog.parents(rev)
549 549 if p2 != nullid and p2 in [x.node for x in self.applied]:
550 550 return p2
551 551 return p1
552 552
553 553 def mergepatch(self, repo, mergeq, series, diffopts):
554 554 if not self.applied:
555 555 # each of the patches merged in will have two parents. This
556 556 # can confuse the qrefresh, qdiff, and strip code because it
557 557 # needs to know which parent is actually in the patch queue.
558 558 # so, we insert a merge marker with only one parent. This way
559 559 # the first patch in the queue is never a merge patch
560 560 #
561 561 pname = ".hg.patches.merge.marker"
562 562 n = repo.commit('[mq]: merge marker', force=True)
563 563 self.removeundo(repo)
564 564 self.applied.append(statusentry(n, pname))
565 565 self.applied_dirty = 1
566 566
567 567 head = self.qparents(repo)
568 568
569 569 for patch in series:
570 570 patch = mergeq.lookup(patch, strict=True)
571 571 if not patch:
572 572 self.ui.warn(_("patch %s does not exist\n") % patch)
573 573 return (1, None)
574 574 pushable, reason = self.pushable(patch)
575 575 if not pushable:
576 576 self.explain_pushable(patch, all_patches=True)
577 577 continue
578 578 info = mergeq.isapplied(patch)
579 579 if not info:
580 580 self.ui.warn(_("patch %s is not applied\n") % patch)
581 581 return (1, None)
582 582 rev = info[1]
583 583 err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
584 584 if head:
585 585 self.applied.append(statusentry(head, patch))
586 586 self.applied_dirty = 1
587 587 if err:
588 588 return (err, head)
589 589 self.save_dirty()
590 590 return (0, head)
591 591
592 592 def patch(self, repo, patchfile):
593 593 '''Apply patchfile to the working directory.
594 594 patchfile: name of patch file'''
595 595 files = {}
596 596 try:
597 597 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
598 598 files=files, eolmode=None)
599 599 except Exception, inst:
600 600 self.ui.note(str(inst) + '\n')
601 601 if not self.ui.verbose:
602 602 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
603 603 return (False, files, False)
604 604
605 605 return (True, files, fuzz)
606 606
607 607 def apply(self, repo, series, list=False, update_status=True,
608 608 strict=False, patchdir=None, merge=None, all_files=None):
609 609 wlock = lock = tr = None
610 610 try:
611 611 wlock = repo.wlock()
612 612 lock = repo.lock()
613 613 tr = repo.transaction("qpush")
614 614 try:
615 615 ret = self._apply(repo, series, list, update_status,
616 616 strict, patchdir, merge, all_files=all_files)
617 617 tr.close()
618 618 self.save_dirty()
619 619 return ret
620 620 except:
621 621 try:
622 622 tr.abort()
623 623 finally:
624 624 repo.invalidate()
625 625 repo.dirstate.invalidate()
626 626 raise
627 627 finally:
628 628 release(tr, lock, wlock)
629 629 self.removeundo(repo)
630 630
631 631 def _apply(self, repo, series, list=False, update_status=True,
632 632 strict=False, patchdir=None, merge=None, all_files=None):
633 633 '''returns (error, hash)
634 634 error = 1 for unable to read, 2 for patch failed, 3 for patch fuzz'''
635 635 # TODO unify with commands.py
636 636 if not patchdir:
637 637 patchdir = self.path
638 638 err = 0
639 639 n = None
640 640 for patchname in series:
641 641 pushable, reason = self.pushable(patchname)
642 642 if not pushable:
643 643 self.explain_pushable(patchname, all_patches=True)
644 644 continue
645 645 self.ui.status(_("applying %s\n") % patchname)
646 646 pf = os.path.join(patchdir, patchname)
647 647
648 648 try:
649 649 ph = patchheader(self.join(patchname), self.plainmode)
650 650 except:
651 651 self.ui.warn(_("unable to read %s\n") % patchname)
652 652 err = 1
653 653 break
654 654
655 655 message = ph.message
656 656 if not message:
657 657 # The commit message should not be translated
658 658 message = "imported patch %s\n" % patchname
659 659 else:
660 660 if list:
661 661 # The commit message should not be translated
662 662 message.append("\nimported patch %s" % patchname)
663 663 message = '\n'.join(message)
664 664
665 665 if ph.haspatch:
666 666 (patcherr, files, fuzz) = self.patch(repo, pf)
667 667 if all_files is not None:
668 668 all_files.update(files)
669 669 patcherr = not patcherr
670 670 else:
671 671 self.ui.warn(_("patch %s is empty\n") % patchname)
672 672 patcherr, files, fuzz = 0, [], 0
673 673
674 674 if merge and files:
675 675 # Mark as removed/merged and update dirstate parent info
676 676 removed = []
677 677 merged = []
678 678 for f in files:
679 679 if os.path.lexists(repo.wjoin(f)):
680 680 merged.append(f)
681 681 else:
682 682 removed.append(f)
683 683 for f in removed:
684 684 repo.dirstate.remove(f)
685 685 for f in merged:
686 686 repo.dirstate.merge(f)
687 687 p1, p2 = repo.dirstate.parents()
688 688 repo.dirstate.setparents(p1, merge)
689 689
690 690 files = cmdutil.updatedir(self.ui, repo, files)
691 691 match = cmdutil.matchfiles(repo, files or [])
692 692 n = repo.commit(message, ph.user, ph.date, match=match, force=True)
693 693
694 694 if n is None:
695 695 raise util.Abort(_("repository commit failed"))
696 696
697 697 if update_status:
698 698 self.applied.append(statusentry(n, patchname))
699 699
700 700 if patcherr:
701 701 self.ui.warn(_("patch failed, rejects left in working dir\n"))
702 702 err = 2
703 703 break
704 704
705 705 if fuzz and strict:
706 706 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
707 707 err = 3
708 708 break
709 709 return (err, n)
710 710
711 711 def _cleanup(self, patches, numrevs, keep=False):
712 712 if not keep:
713 713 r = self.qrepo()
714 714 if r:
715 715 r[None].remove(patches, True)
716 716 else:
717 717 for p in patches:
718 718 os.unlink(self.join(p))
719 719
720 720 if numrevs:
721 721 del self.applied[:numrevs]
722 722 self.applied_dirty = 1
723 723
724 724 for i in sorted([self.find_series(p) for p in patches], reverse=True):
725 725 del self.full_series[i]
726 726 self.parse_series()
727 727 self.series_dirty = 1
728 728
729 729 def _revpatches(self, repo, revs):
730 730 firstrev = repo[self.applied[0].node].rev()
731 731 patches = []
732 732 for i, rev in enumerate(revs):
733 733
734 734 if rev < firstrev:
735 735 raise util.Abort(_('revision %d is not managed') % rev)
736 736
737 737 ctx = repo[rev]
738 738 base = self.applied[i].node
739 739 if ctx.node() != base:
740 740 msg = _('cannot delete revision %d above applied patches')
741 741 raise util.Abort(msg % rev)
742 742
743 743 patch = self.applied[i].name
744 744 for fmt in ('[mq]: %s', 'imported patch %s'):
745 745 if ctx.description() == fmt % patch:
746 746 msg = _('patch %s finalized without changeset message\n')
747 747 repo.ui.status(msg % patch)
748 748 break
749 749
750 750 patches.append(patch)
751 751 return patches
752 752
753 753 def finish(self, repo, revs):
754 754 patches = self._revpatches(repo, sorted(revs))
755 755 self._cleanup(patches, len(patches))
756 756
757 757 def delete(self, repo, patches, opts):
758 758 if not patches and not opts.get('rev'):
759 759 raise util.Abort(_('qdelete requires at least one revision or '
760 760 'patch name'))
761 761
762 762 realpatches = []
763 763 for patch in patches:
764 764 patch = self.lookup(patch, strict=True)
765 765 info = self.isapplied(patch)
766 766 if info:
767 767 raise util.Abort(_("cannot delete applied patch %s") % patch)
768 768 if patch not in self.series:
769 769 raise util.Abort(_("patch %s not in series file") % patch)
770 770 if patch not in realpatches:
771 771 realpatches.append(patch)
772 772
773 773 numrevs = 0
774 774 if opts.get('rev'):
775 775 if not self.applied:
776 776 raise util.Abort(_('no patches applied'))
777 777 revs = cmdutil.revrange(repo, opts.get('rev'))
778 778 if len(revs) > 1 and revs[0] > revs[1]:
779 779 revs.reverse()
780 780 revpatches = self._revpatches(repo, revs)
781 781 realpatches += revpatches
782 782 numrevs = len(revpatches)
783 783
784 784 self._cleanup(realpatches, numrevs, opts.get('keep'))
785 785
786 786 def check_toppatch(self, repo):
787 787 if self.applied:
788 788 top = self.applied[-1].node
789 789 patch = self.applied[-1].name
790 790 pp = repo.dirstate.parents()
791 791 if top not in pp:
792 792 raise util.Abort(_("working directory revision is not qtip"))
793 793 return top, patch
794 794 return None, None
795 795
796 796 def check_localchanges(self, repo, force=False, refresh=True):
797 797 m, a, r, d = repo.status()[:4]
798 798 if (m or a or r or d) and not force:
799 799 if refresh:
800 800 raise util.Abort(_("local changes found, refresh first"))
801 801 else:
802 802 raise util.Abort(_("local changes found"))
803 803 return m, a, r, d
804 804
805 805 _reserved = ('series', 'status', 'guards')
806 806 def check_reserved_name(self, name):
807 807 if (name in self._reserved or name.startswith('.hg')
808 808 or name.startswith('.mq') or '#' in name or ':' in name):
809 809 raise util.Abort(_('"%s" cannot be used as the name of a patch')
810 810 % name)
811 811
812 812 def new(self, repo, patchfn, *pats, **opts):
813 813 """options:
814 814 msg: a string or a no-argument function returning a string
815 815 """
816 816 msg = opts.get('msg')
817 817 user = opts.get('user')
818 818 date = opts.get('date')
819 819 if date:
820 820 date = util.parsedate(date)
821 821 diffopts = self.diffopts({'git': opts.get('git')})
822 822 self.check_reserved_name(patchfn)
823 823 if os.path.exists(self.join(patchfn)):
824 824 if os.path.isdir(self.join(patchfn)):
825 825 raise util.Abort(_('"%s" already exists as a directory')
826 826 % patchfn)
827 827 else:
828 828 raise util.Abort(_('patch "%s" already exists') % patchfn)
829 829 if opts.get('include') or opts.get('exclude') or pats:
830 830 match = cmdutil.match(repo, pats, opts)
831 831 # detect missing files in pats
832 832 def badfn(f, msg):
833 833 raise util.Abort('%s: %s' % (f, msg))
834 834 match.bad = badfn
835 835 m, a, r, d = repo.status(match=match)[:4]
836 836 else:
837 837 m, a, r, d = self.check_localchanges(repo, force=True)
838 838 match = cmdutil.matchfiles(repo, m + a + r)
839 839 if len(repo[None].parents()) > 1:
840 840 raise util.Abort(_('cannot manage merge changesets'))
841 841 commitfiles = m + a + r
842 842 self.check_toppatch(repo)
843 843 insert = self.full_series_end()
844 844 wlock = repo.wlock()
845 845 try:
846 846 try:
847 847 # if patch file write fails, abort early
848 848 p = self.opener(patchfn, "w")
849 849 except IOError, e:
850 850 raise util.Abort(_('cannot write patch "%s": %s')
851 851 % (patchfn, e.strerror))
852 852 try:
853 853 if self.plainmode:
854 854 if user:
855 855 p.write("From: " + user + "\n")
856 856 if not date:
857 857 p.write("\n")
858 858 if date:
859 859 p.write("Date: %d %d\n\n" % date)
860 860 else:
861 861 p.write("# HG changeset patch\n")
862 862 p.write("# Parent "
863 863 + hex(repo[None].parents()[0].node()) + "\n")
864 864 if user:
865 865 p.write("# User " + user + "\n")
866 866 if date:
867 867 p.write("# Date %s %s\n\n" % date)
868 868 if hasattr(msg, '__call__'):
869 869 msg = msg()
870 870 commitmsg = msg and msg or ("[mq]: %s" % patchfn)
871 871 n = repo.commit(commitmsg, user, date, match=match, force=True)
872 872 if n is None:
873 873 raise util.Abort(_("repo commit failed"))
874 874 try:
875 875 self.full_series[insert:insert] = [patchfn]
876 876 self.applied.append(statusentry(n, patchfn))
877 877 self.parse_series()
878 878 self.series_dirty = 1
879 879 self.applied_dirty = 1
880 880 if msg:
881 881 msg = msg + "\n\n"
882 882 p.write(msg)
883 883 if commitfiles:
884 884 parent = self.qparents(repo, n)
885 885 chunks = patch.diff(repo, node1=parent, node2=n,
886 886 match=match, opts=diffopts)
887 887 for chunk in chunks:
888 888 p.write(chunk)
889 889 p.close()
890 890 wlock.release()
891 891 wlock = None
892 892 r = self.qrepo()
893 893 if r:
894 894 r[None].add([patchfn])
895 895 except:
896 896 repo.rollback()
897 897 raise
898 898 except Exception:
899 899 patchpath = self.join(patchfn)
900 900 try:
901 901 os.unlink(patchpath)
902 902 except:
903 903 self.ui.warn(_('error unlinking %s\n') % patchpath)
904 904 raise
905 905 self.removeundo(repo)
906 906 finally:
907 907 release(wlock)
908 908
909 909 def strip(self, repo, revs, update=True, backup="all", force=None):
910 910 wlock = lock = None
911 911 try:
912 912 wlock = repo.wlock()
913 913 lock = repo.lock()
914 914
915 915 if update:
916 916 self.check_localchanges(repo, force=force, refresh=False)
917 917 urev = self.qparents(repo, revs[0])
918 918 hg.clean(repo, urev)
919 919 repo.dirstate.write()
920 920
921 921 self.removeundo(repo)
922 922 for rev in revs:
923 923 repair.strip(self.ui, repo, rev, backup)
924 924 # strip may have unbundled a set of backed up revisions after
925 925 # the actual strip
926 926 self.removeundo(repo)
927 927 finally:
928 928 release(lock, wlock)
929 929
930 930 def isapplied(self, patch):
931 931 """returns (index, rev, patch)"""
932 932 for i, a in enumerate(self.applied):
933 933 if a.name == patch:
934 934 return (i, a.node, a.name)
935 935 return None
936 936
937 937 # if the exact patch name does not exist, we try a few
938 938 # variations. If strict is passed, we try only #1
939 939 #
940 940 # 1) a number to indicate an offset in the series file
941 941 # 2) a unique substring of the patch name was given
942 942 # 3) patchname[-+]num to indicate an offset in the series file
943 943 def lookup(self, patch, strict=False):
944 944 patch = patch and str(patch)
945 945
946 946 def partial_name(s):
947 947 if s in self.series:
948 948 return s
949 949 matches = [x for x in self.series if s in x]
950 950 if len(matches) > 1:
951 951 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
952 952 for m in matches:
953 953 self.ui.warn(' %s\n' % m)
954 954 return None
955 955 if matches:
956 956 return matches[0]
957 957 if self.series and self.applied:
958 958 if s == 'qtip':
959 959 return self.series[self.series_end(True)-1]
960 960 if s == 'qbase':
961 961 return self.series[0]
962 962 return None
963 963
964 964 if patch is None:
965 965 return None
966 966 if patch in self.series:
967 967 return patch
968 968
969 969 if not os.path.isfile(self.join(patch)):
970 970 try:
971 971 sno = int(patch)
972 972 except (ValueError, OverflowError):
973 973 pass
974 974 else:
975 975 if -len(self.series) <= sno < len(self.series):
976 976 return self.series[sno]
977 977
978 978 if not strict:
979 979 res = partial_name(patch)
980 980 if res:
981 981 return res
982 982 minus = patch.rfind('-')
983 983 if minus >= 0:
984 984 res = partial_name(patch[:minus])
985 985 if res:
986 986 i = self.series.index(res)
987 987 try:
988 988 off = int(patch[minus + 1:] or 1)
989 989 except (ValueError, OverflowError):
990 990 pass
991 991 else:
992 992 if i - off >= 0:
993 993 return self.series[i - off]
994 994 plus = patch.rfind('+')
995 995 if plus >= 0:
996 996 res = partial_name(patch[:plus])
997 997 if res:
998 998 i = self.series.index(res)
999 999 try:
1000 1000 off = int(patch[plus + 1:] or 1)
1001 1001 except (ValueError, OverflowError):
1002 1002 pass
1003 1003 else:
1004 1004 if i + off < len(self.series):
1005 1005 return self.series[i + off]
1006 1006 raise util.Abort(_("patch %s not in series") % patch)
1007 1007
1008 1008 def push(self, repo, patch=None, force=False, list=False,
1009 1009 mergeq=None, all=False, move=False, exact=False):
1010 1010 diffopts = self.diffopts()
1011 1011 wlock = repo.wlock()
1012 1012 try:
1013 1013 heads = []
1014 1014 for b, ls in repo.branchmap().iteritems():
1015 1015 heads += ls
1016 1016 if not heads:
1017 1017 heads = [nullid]
1018 1018 if repo.dirstate.parents()[0] not in heads and not exact:
1019 1019 self.ui.status(_("(working directory not at a head)\n"))
1020 1020
1021 1021 if not self.series:
1022 1022 self.ui.warn(_('no patches in series\n'))
1023 1023 return 0
1024 1024
1025 1025 patch = self.lookup(patch)
1026 1026 # Suppose our series file is: A B C and the current 'top'
1027 1027 # patch is B. qpush C should be performed (moving forward)
1028 1028 # qpush B is a NOP (no change) qpush A is an error (can't
1029 1029 # go backwards with qpush)
1030 1030 if patch:
1031 1031 info = self.isapplied(patch)
1032 1032 if info:
1033 1033 if info[0] < len(self.applied) - 1:
1034 1034 raise util.Abort(
1035 1035 _("cannot push to a previous patch: %s") % patch)
1036 1036 self.ui.warn(
1037 1037 _('qpush: %s is already at the top\n') % patch)
1038 1038 return 0
1039 1039 pushable, reason = self.pushable(patch)
1040 1040 if not pushable:
1041 1041 if reason:
1042 1042 reason = _('guarded by %r') % reason
1043 1043 else:
1044 1044 reason = _('no matching guards')
1045 1045 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
1046 1046 return 1
1047 1047 elif all:
1048 1048 patch = self.series[-1]
1049 1049 if self.isapplied(patch):
1050 1050 self.ui.warn(_('all patches are currently applied\n'))
1051 1051 return 0
1052 1052
1053 1053 # Following the above example, starting at 'top' of B:
1054 1054 # qpush should be performed (pushes C), but a subsequent
1055 1055 # qpush without an argument is an error (nothing to
1056 1056 # apply). This allows a loop of "...while hg qpush..." to
1057 1057 # work as it detects an error when done
1058 1058 start = self.series_end()
1059 1059 if start == len(self.series):
1060 1060 self.ui.warn(_('patch series already fully applied\n'))
1061 1061 return 1
1062 1062 if not force:
1063 1063 self.check_localchanges(repo)
1064 1064
1065 1065 if exact:
1066 1066 if move:
1067 1067 raise util.Abort(_("cannot use --exact and --move together"))
1068 1068 if self.applied:
1069 1069 raise util.Abort(_("cannot push --exact with applied patches"))
1070 1070 root = self.series[start]
1071 1071 target = patchheader(self.join(root), self.plainmode).parent
1072 1072 if not target:
1073 1073 raise util.Abort(_("%s does not have a parent recorded" % root))
1074 1074 if not repo[target] == repo['.']:
1075 1075 hg.update(repo, target)
1076 1076
1077 1077 if move:
1078 1078 if not patch:
1079 raise util.Abort(_("please specify the patch to move"))
1079 raise util.Abort(_("please specify the patch to move"))
1080 1080 for i, rpn in enumerate(self.full_series[start:]):
1081 1081 # strip markers for patch guards
1082 1082 if self.guard_re.split(rpn, 1)[0] == patch:
1083 1083 break
1084 1084 index = start + i
1085 1085 assert index < len(self.full_series)
1086 1086 fullpatch = self.full_series[index]
1087 1087 del self.full_series[index]
1088 1088 self.full_series.insert(start, fullpatch)
1089 1089 self.parse_series()
1090 1090 self.series_dirty = 1
1091 1091
1092 1092 self.applied_dirty = 1
1093 1093 if start > 0:
1094 1094 self.check_toppatch(repo)
1095 1095 if not patch:
1096 1096 patch = self.series[start]
1097 1097 end = start + 1
1098 1098 else:
1099 1099 end = self.series.index(patch, start) + 1
1100 1100
1101 1101 s = self.series[start:end]
1102 1102 all_files = set()
1103 1103 try:
1104 1104 if mergeq:
1105 1105 ret = self.mergepatch(repo, mergeq, s, diffopts)
1106 1106 else:
1107 1107 ret = self.apply(repo, s, list, all_files=all_files)
1108 1108 except:
1109 1109 self.ui.warn(_('cleaning up working directory...'))
1110 1110 node = repo.dirstate.parents()[0]
1111 1111 hg.revert(repo, node, None)
1112 1112 # only remove unknown files that we know we touched or
1113 1113 # created while patching
1114 1114 for f in all_files:
1115 1115 if f not in repo.dirstate:
1116 1116 try:
1117 1117 util.unlink(repo.wjoin(f))
1118 1118 except OSError, inst:
1119 1119 if inst.errno != errno.ENOENT:
1120 1120 raise
1121 1121 self.ui.warn(_('done\n'))
1122 1122 raise
1123 1123
1124 1124 if not self.applied:
1125 1125 return ret[0]
1126 1126 top = self.applied[-1].name
1127 1127 if ret[0] and ret[0] > 1:
1128 1128 msg = _("errors during apply, please fix and refresh %s\n")
1129 1129 self.ui.write(msg % top)
1130 1130 else:
1131 1131 self.ui.write(_("now at: %s\n") % top)
1132 1132 return ret[0]
1133 1133
1134 1134 finally:
1135 1135 wlock.release()
1136 1136
1137 1137 def pop(self, repo, patch=None, force=False, update=True, all=False):
1138 1138 wlock = repo.wlock()
1139 1139 try:
1140 1140 if patch:
1141 1141 # index, rev, patch
1142 1142 info = self.isapplied(patch)
1143 1143 if not info:
1144 1144 patch = self.lookup(patch)
1145 1145 info = self.isapplied(patch)
1146 1146 if not info:
1147 1147 raise util.Abort(_("patch %s is not applied") % patch)
1148 1148
1149 1149 if not self.applied:
1150 1150 # Allow qpop -a to work repeatedly,
1151 1151 # but not qpop without an argument
1152 1152 self.ui.warn(_("no patches applied\n"))
1153 1153 return not all
1154 1154
1155 1155 if all:
1156 1156 start = 0
1157 1157 elif patch:
1158 1158 start = info[0] + 1
1159 1159 else:
1160 1160 start = len(self.applied) - 1
1161 1161
1162 1162 if start >= len(self.applied):
1163 1163 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1164 1164 return
1165 1165
1166 1166 if not update:
1167 1167 parents = repo.dirstate.parents()
1168 1168 rr = [x.node for x in self.applied]
1169 1169 for p in parents:
1170 1170 if p in rr:
1171 1171 self.ui.warn(_("qpop: forcing dirstate update\n"))
1172 1172 update = True
1173 1173 else:
1174 1174 parents = [p.node() for p in repo[None].parents()]
1175 1175 needupdate = False
1176 1176 for entry in self.applied[start:]:
1177 1177 if entry.node in parents:
1178 1178 needupdate = True
1179 1179 break
1180 1180 update = needupdate
1181 1181
1182 1182 if not force and update:
1183 1183 self.check_localchanges(repo)
1184 1184
1185 1185 self.applied_dirty = 1
1186 1186 end = len(self.applied)
1187 1187 rev = self.applied[start].node
1188 1188 if update:
1189 1189 top = self.check_toppatch(repo)[0]
1190 1190
1191 1191 try:
1192 1192 heads = repo.changelog.heads(rev)
1193 1193 except error.LookupError:
1194 1194 node = short(rev)
1195 1195 raise util.Abort(_('trying to pop unknown node %s') % node)
1196 1196
1197 1197 if heads != [self.applied[-1].node]:
1198 1198 raise util.Abort(_("popping would remove a revision not "
1199 1199 "managed by this patch queue"))
1200 1200
1201 1201 # we know there are no local changes, so we can make a simplified
1202 1202 # form of hg.update.
1203 1203 if update:
1204 1204 qp = self.qparents(repo, rev)
1205 1205 ctx = repo[qp]
1206 1206 m, a, r, d = repo.status(qp, top)[:4]
1207 1207 if d:
1208 1208 raise util.Abort(_("deletions found between repo revs"))
1209 1209 for f in a:
1210 1210 try:
1211 1211 util.unlink(repo.wjoin(f))
1212 1212 except OSError, e:
1213 1213 if e.errno != errno.ENOENT:
1214 1214 raise
1215 1215 repo.dirstate.forget(f)
1216 1216 for f in m + r:
1217 1217 fctx = ctx[f]
1218 1218 repo.wwrite(f, fctx.data(), fctx.flags())
1219 1219 repo.dirstate.normal(f)
1220 1220 repo.dirstate.setparents(qp, nullid)
1221 1221 for patch in reversed(self.applied[start:end]):
1222 1222 self.ui.status(_("popping %s\n") % patch.name)
1223 1223 del self.applied[start:end]
1224 1224 self.strip(repo, [rev], update=False, backup='strip')
1225 1225 if self.applied:
1226 1226 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1227 1227 else:
1228 1228 self.ui.write(_("patch queue now empty\n"))
1229 1229 finally:
1230 1230 wlock.release()
1231 1231
1232 1232 def diff(self, repo, pats, opts):
1233 1233 top, patch = self.check_toppatch(repo)
1234 1234 if not top:
1235 1235 self.ui.write(_("no patches applied\n"))
1236 1236 return
1237 1237 qp = self.qparents(repo, top)
1238 1238 if opts.get('reverse'):
1239 1239 node1, node2 = None, qp
1240 1240 else:
1241 1241 node1, node2 = qp, None
1242 1242 diffopts = self.diffopts(opts, patch)
1243 1243 self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
1244 1244
1245 1245 def refresh(self, repo, pats=None, **opts):
1246 1246 if not self.applied:
1247 1247 self.ui.write(_("no patches applied\n"))
1248 1248 return 1
1249 1249 msg = opts.get('msg', '').rstrip()
1250 1250 newuser = opts.get('user')
1251 1251 newdate = opts.get('date')
1252 1252 if newdate:
1253 1253 newdate = '%d %d' % util.parsedate(newdate)
1254 1254 wlock = repo.wlock()
1255 1255
1256 1256 try:
1257 1257 self.check_toppatch(repo)
1258 1258 (top, patchfn) = (self.applied[-1].node, self.applied[-1].name)
1259 1259 if repo.changelog.heads(top) != [top]:
1260 1260 raise util.Abort(_("cannot refresh a revision with children"))
1261 1261
1262 1262 cparents = repo.changelog.parents(top)
1263 1263 patchparent = self.qparents(repo, top)
1264 1264 ph = patchheader(self.join(patchfn), self.plainmode)
1265 1265 diffopts = self.diffopts({'git': opts.get('git')}, patchfn)
1266 1266 if msg:
1267 1267 ph.setmessage(msg)
1268 1268 if newuser:
1269 1269 ph.setuser(newuser)
1270 1270 if newdate:
1271 1271 ph.setdate(newdate)
1272 1272 ph.setparent(hex(patchparent))
1273 1273
1274 1274 # only commit new patch when write is complete
1275 1275 patchf = self.opener(patchfn, 'w', atomictemp=True)
1276 1276
1277 1277 comments = str(ph)
1278 1278 if comments:
1279 1279 patchf.write(comments)
1280 1280
1281 1281 # update the dirstate in place, strip off the qtip commit
1282 1282 # and then commit.
1283 1283 #
1284 1284 # this should really read:
1285 1285 # mm, dd, aa = repo.status(top, patchparent)[:3]
1286 1286 # but we do it backwards to take advantage of manifest/chlog
1287 1287 # caching against the next repo.status call
1288 1288 mm, aa, dd = repo.status(patchparent, top)[:3]
1289 1289 changes = repo.changelog.read(top)
1290 1290 man = repo.manifest.read(changes[0])
1291 1291 aaa = aa[:]
1292 1292 matchfn = cmdutil.match(repo, pats, opts)
1293 1293 # in short mode, we only diff the files included in the
1294 1294 # patch already plus specified files
1295 1295 if opts.get('short'):
1296 1296 # if amending a patch, we start with existing
1297 1297 # files plus specified files - unfiltered
1298 1298 match = cmdutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1299 1299 # filter with inc/exl options
1300 1300 matchfn = cmdutil.match(repo, opts=opts)
1301 1301 else:
1302 1302 match = cmdutil.matchall(repo)
1303 1303 m, a, r, d = repo.status(match=match)[:4]
1304 1304 mm = set(mm)
1305 1305 aa = set(aa)
1306 1306 dd = set(dd)
1307 1307
1308 1308 # we might end up with files that were added between
1309 1309 # qtip and the dirstate parent, but then changed in the
1310 1310 # local dirstate. in this case, we want them to only
1311 1311 # show up in the added section
1312 1312 for x in m:
1313 1313 if x == '.hgsub' or x == '.hgsubstate':
1314 1314 self.ui.warn(_('warning: not refreshing %s\n') % x)
1315 1315 continue
1316 1316 if x not in aa:
1317 1317 mm.add(x)
1318 1318 # we might end up with files added by the local dirstate that
1319 1319 # were deleted by the patch. In this case, they should only
1320 1320 # show up in the changed section.
1321 1321 for x in a:
1322 1322 if x == '.hgsub' or x == '.hgsubstate':
1323 1323 self.ui.warn(_('warning: not adding %s\n') % x)
1324 1324 continue
1325 1325 if x in dd:
1326 1326 dd.remove(x)
1327 1327 mm.add(x)
1328 1328 else:
1329 1329 aa.add(x)
1330 1330 # make sure any files deleted in the local dirstate
1331 1331 # are not in the add or change column of the patch
1332 1332 forget = []
1333 1333 for x in d + r:
1334 1334 if x == '.hgsub' or x == '.hgsubstate':
1335 1335 self.ui.warn(_('warning: not removing %s\n') % x)
1336 1336 continue
1337 1337 if x in aa:
1338 1338 aa.remove(x)
1339 1339 forget.append(x)
1340 1340 continue
1341 1341 else:
1342 1342 mm.discard(x)
1343 1343 dd.add(x)
1344 1344
1345 1345 m = list(mm)
1346 1346 r = list(dd)
1347 1347 a = list(aa)
1348 1348 c = [filter(matchfn, l) for l in (m, a, r)]
1349 1349 match = cmdutil.matchfiles(repo, set(c[0] + c[1] + c[2]))
1350 1350 chunks = patch.diff(repo, patchparent, match=match,
1351 1351 changes=c, opts=diffopts)
1352 1352 for chunk in chunks:
1353 1353 patchf.write(chunk)
1354 1354
1355 1355 try:
1356 1356 if diffopts.git or diffopts.upgrade:
1357 1357 copies = {}
1358 1358 for dst in a:
1359 1359 src = repo.dirstate.copied(dst)
1360 1360 # during qfold, the source file for copies may
1361 1361 # be removed. Treat this as a simple add.
1362 1362 if src is not None and src in repo.dirstate:
1363 1363 copies.setdefault(src, []).append(dst)
1364 1364 repo.dirstate.add(dst)
1365 1365 # remember the copies between patchparent and qtip
1366 1366 for dst in aaa:
1367 1367 f = repo.file(dst)
1368 1368 src = f.renamed(man[dst])
1369 1369 if src:
1370 1370 copies.setdefault(src[0], []).extend(
1371 1371 copies.get(dst, []))
1372 1372 if dst in a:
1373 1373 copies[src[0]].append(dst)
1374 1374 # we can't copy a file created by the patch itself
1375 1375 if dst in copies:
1376 1376 del copies[dst]
1377 1377 for src, dsts in copies.iteritems():
1378 1378 for dst in dsts:
1379 1379 repo.dirstate.copy(src, dst)
1380 1380 else:
1381 1381 for dst in a:
1382 1382 repo.dirstate.add(dst)
1383 1383 # Drop useless copy information
1384 1384 for f in list(repo.dirstate.copies()):
1385 1385 repo.dirstate.copy(None, f)
1386 1386 for f in r:
1387 1387 repo.dirstate.remove(f)
1388 1388 # if the patch excludes a modified file, mark that
1389 1389 # file with mtime=0 so status can see it.
1390 1390 mm = []
1391 1391 for i in xrange(len(m)-1, -1, -1):
1392 1392 if not matchfn(m[i]):
1393 1393 mm.append(m[i])
1394 1394 del m[i]
1395 1395 for f in m:
1396 1396 repo.dirstate.normal(f)
1397 1397 for f in mm:
1398 1398 repo.dirstate.normallookup(f)
1399 1399 for f in forget:
1400 1400 repo.dirstate.forget(f)
1401 1401
1402 1402 if not msg:
1403 1403 if not ph.message:
1404 1404 message = "[mq]: %s\n" % patchfn
1405 1405 else:
1406 1406 message = "\n".join(ph.message)
1407 1407 else:
1408 1408 message = msg
1409 1409
1410 1410 user = ph.user or changes[1]
1411 1411
1412 1412 # assumes strip can roll itself back if interrupted
1413 1413 repo.dirstate.setparents(*cparents)
1414 1414 self.applied.pop()
1415 1415 self.applied_dirty = 1
1416 1416 self.strip(repo, [top], update=False,
1417 1417 backup='strip')
1418 1418 except:
1419 1419 repo.dirstate.invalidate()
1420 1420 raise
1421 1421
1422 1422 try:
1423 1423 # might be nice to attempt to roll back strip after this
1424 1424 patchf.rename()
1425 1425 n = repo.commit(message, user, ph.date, match=match,
1426 1426 force=True)
1427 1427 self.applied.append(statusentry(n, patchfn))
1428 1428 except:
1429 1429 ctx = repo[cparents[0]]
1430 1430 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1431 1431 self.save_dirty()
1432 1432 self.ui.warn(_('refresh interrupted while patch was popped! '
1433 1433 '(revert --all, qpush to recover)\n'))
1434 1434 raise
1435 1435 finally:
1436 1436 wlock.release()
1437 1437 self.removeundo(repo)
1438 1438
1439 1439 def init(self, repo, create=False):
1440 1440 if not create and os.path.isdir(self.path):
1441 1441 raise util.Abort(_("patch queue directory already exists"))
1442 1442 try:
1443 1443 os.mkdir(self.path)
1444 1444 except OSError, inst:
1445 1445 if inst.errno != errno.EEXIST or not create:
1446 1446 raise
1447 1447 if create:
1448 1448 return self.qrepo(create=True)
1449 1449
1450 1450 def unapplied(self, repo, patch=None):
1451 1451 if patch and patch not in self.series:
1452 1452 raise util.Abort(_("patch %s is not in series file") % patch)
1453 1453 if not patch:
1454 1454 start = self.series_end()
1455 1455 else:
1456 1456 start = self.series.index(patch) + 1
1457 1457 unapplied = []
1458 1458 for i in xrange(start, len(self.series)):
1459 1459 pushable, reason = self.pushable(i)
1460 1460 if pushable:
1461 1461 unapplied.append((i, self.series[i]))
1462 1462 self.explain_pushable(i)
1463 1463 return unapplied
1464 1464
1465 1465 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1466 1466 summary=False):
1467 1467 def displayname(pfx, patchname, state):
1468 1468 if pfx:
1469 1469 self.ui.write(pfx)
1470 1470 if summary:
1471 1471 ph = patchheader(self.join(patchname), self.plainmode)
1472 1472 msg = ph.message and ph.message[0] or ''
1473 1473 if self.ui.formatted():
1474 1474 width = self.ui.termwidth() - len(pfx) - len(patchname) - 2
1475 1475 if width > 0:
1476 1476 msg = util.ellipsis(msg, width)
1477 1477 else:
1478 1478 msg = ''
1479 1479 self.ui.write(patchname, label='qseries.' + state)
1480 1480 self.ui.write(': ')
1481 1481 self.ui.write(msg, label='qseries.message.' + state)
1482 1482 else:
1483 1483 self.ui.write(patchname, label='qseries.' + state)
1484 1484 self.ui.write('\n')
1485 1485
1486 1486 applied = set([p.name for p in self.applied])
1487 1487 if length is None:
1488 1488 length = len(self.series) - start
1489 1489 if not missing:
1490 1490 if self.ui.verbose:
1491 1491 idxwidth = len(str(start + length - 1))
1492 1492 for i in xrange(start, start + length):
1493 1493 patch = self.series[i]
1494 1494 if patch in applied:
1495 1495 char, state = 'A', 'applied'
1496 1496 elif self.pushable(i)[0]:
1497 1497 char, state = 'U', 'unapplied'
1498 1498 else:
1499 1499 char, state = 'G', 'guarded'
1500 1500 pfx = ''
1501 1501 if self.ui.verbose:
1502 1502 pfx = '%*d %s ' % (idxwidth, i, char)
1503 1503 elif status and status != char:
1504 1504 continue
1505 1505 displayname(pfx, patch, state)
1506 1506 else:
1507 1507 msng_list = []
1508 1508 for root, dirs, files in os.walk(self.path):
1509 1509 d = root[len(self.path) + 1:]
1510 1510 for f in files:
1511 1511 fl = os.path.join(d, f)
1512 1512 if (fl not in self.series and
1513 1513 fl not in (self.status_path, self.series_path,
1514 1514 self.guards_path)
1515 1515 and not fl.startswith('.')):
1516 1516 msng_list.append(fl)
1517 1517 for x in sorted(msng_list):
1518 1518 pfx = self.ui.verbose and ('D ') or ''
1519 1519 displayname(pfx, x, 'missing')
1520 1520
1521 1521 def issaveline(self, l):
1522 1522 if l.name == '.hg.patches.save.line':
1523 1523 return True
1524 1524
1525 1525 def qrepo(self, create=False):
1526 1526 ui = self.ui.copy()
1527 1527 ui.setconfig('paths', 'default', '', overlay=False)
1528 1528 ui.setconfig('paths', 'default-push', '', overlay=False)
1529 1529 if create or os.path.isdir(self.join(".hg")):
1530 1530 return hg.repository(ui, path=self.path, create=create)
1531 1531
1532 1532 def restore(self, repo, rev, delete=None, qupdate=None):
1533 1533 desc = repo[rev].description().strip()
1534 1534 lines = desc.splitlines()
1535 1535 i = 0
1536 1536 datastart = None
1537 1537 series = []
1538 1538 applied = []
1539 1539 qpp = None
1540 1540 for i, line in enumerate(lines):
1541 1541 if line == 'Patch Data:':
1542 1542 datastart = i + 1
1543 1543 elif line.startswith('Dirstate:'):
1544 1544 l = line.rstrip()
1545 1545 l = l[10:].split(' ')
1546 1546 qpp = [bin(x) for x in l]
1547 1547 elif datastart is not None:
1548 1548 l = line.rstrip()
1549 1549 n, name = l.split(':', 1)
1550 1550 if n:
1551 1551 applied.append(statusentry(bin(n), name))
1552 1552 else:
1553 1553 series.append(l)
1554 1554 if datastart is None:
1555 1555 self.ui.warn(_("No saved patch data found\n"))
1556 1556 return 1
1557 1557 self.ui.warn(_("restoring status: %s\n") % lines[0])
1558 1558 self.full_series = series
1559 1559 self.applied = applied
1560 1560 self.parse_series()
1561 1561 self.series_dirty = 1
1562 1562 self.applied_dirty = 1
1563 1563 heads = repo.changelog.heads()
1564 1564 if delete:
1565 1565 if rev not in heads:
1566 1566 self.ui.warn(_("save entry has children, leaving it alone\n"))
1567 1567 else:
1568 1568 self.ui.warn(_("removing save entry %s\n") % short(rev))
1569 1569 pp = repo.dirstate.parents()
1570 1570 if rev in pp:
1571 1571 update = True
1572 1572 else:
1573 1573 update = False
1574 1574 self.strip(repo, [rev], update=update, backup='strip')
1575 1575 if qpp:
1576 1576 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1577 1577 (short(qpp[0]), short(qpp[1])))
1578 1578 if qupdate:
1579 1579 self.ui.status(_("updating queue directory\n"))
1580 1580 r = self.qrepo()
1581 1581 if not r:
1582 1582 self.ui.warn(_("Unable to load queue repository\n"))
1583 1583 return 1
1584 1584 hg.clean(r, qpp[0])
1585 1585
1586 1586 def save(self, repo, msg=None):
1587 1587 if not self.applied:
1588 1588 self.ui.warn(_("save: no patches applied, exiting\n"))
1589 1589 return 1
1590 1590 if self.issaveline(self.applied[-1]):
1591 1591 self.ui.warn(_("status is already saved\n"))
1592 1592 return 1
1593 1593
1594 1594 if not msg:
1595 1595 msg = _("hg patches saved state")
1596 1596 else:
1597 1597 msg = "hg patches: " + msg.rstrip('\r\n')
1598 1598 r = self.qrepo()
1599 1599 if r:
1600 1600 pp = r.dirstate.parents()
1601 1601 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
1602 1602 msg += "\n\nPatch Data:\n"
1603 1603 msg += ''.join('%s\n' % x for x in self.applied)
1604 1604 msg += ''.join(':%s\n' % x for x in self.full_series)
1605 1605 n = repo.commit(msg, force=True)
1606 1606 if not n:
1607 1607 self.ui.warn(_("repo commit failed\n"))
1608 1608 return 1
1609 1609 self.applied.append(statusentry(n, '.hg.patches.save.line'))
1610 1610 self.applied_dirty = 1
1611 1611 self.removeundo(repo)
1612 1612
1613 1613 def full_series_end(self):
1614 1614 if self.applied:
1615 1615 p = self.applied[-1].name
1616 1616 end = self.find_series(p)
1617 1617 if end is None:
1618 1618 return len(self.full_series)
1619 1619 return end + 1
1620 1620 return 0
1621 1621
1622 1622 def series_end(self, all_patches=False):
1623 1623 """If all_patches is False, return the index of the next pushable patch
1624 1624 in the series, or the series length. If all_patches is True, return the
1625 1625 index of the first patch past the last applied one.
1626 1626 """
1627 1627 end = 0
1628 1628 def next(start):
1629 1629 if all_patches or start >= len(self.series):
1630 1630 return start
1631 1631 for i in xrange(start, len(self.series)):
1632 1632 p, reason = self.pushable(i)
1633 1633 if p:
1634 1634 break
1635 1635 self.explain_pushable(i)
1636 1636 return i
1637 1637 if self.applied:
1638 1638 p = self.applied[-1].name
1639 1639 try:
1640 1640 end = self.series.index(p)
1641 1641 except ValueError:
1642 1642 return 0
1643 1643 return next(end + 1)
1644 1644 return next(end)
1645 1645
1646 1646 def appliedname(self, index):
1647 1647 pname = self.applied[index].name
1648 1648 if not self.ui.verbose:
1649 1649 p = pname
1650 1650 else:
1651 1651 p = str(self.series.index(pname)) + " " + pname
1652 1652 return p
1653 1653
1654 1654 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1655 1655 force=None, git=False):
1656 1656 def checkseries(patchname):
1657 1657 if patchname in self.series:
1658 1658 raise util.Abort(_('patch %s is already in the series file')
1659 1659 % patchname)
1660 1660 def checkfile(patchname):
1661 1661 if not force and os.path.exists(self.join(patchname)):
1662 1662 raise util.Abort(_('patch "%s" already exists')
1663 1663 % patchname)
1664 1664
1665 1665 if rev:
1666 1666 if files:
1667 1667 raise util.Abort(_('option "-r" not valid when importing '
1668 1668 'files'))
1669 1669 rev = cmdutil.revrange(repo, rev)
1670 1670 rev.sort(reverse=True)
1671 1671 if (len(files) > 1 or len(rev) > 1) and patchname:
1672 1672 raise util.Abort(_('option "-n" not valid when importing multiple '
1673 1673 'patches'))
1674 1674 if rev:
1675 1675 # If mq patches are applied, we can only import revisions
1676 1676 # that form a linear path to qbase.
1677 1677 # Otherwise, they should form a linear path to a head.
1678 1678 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1679 1679 if len(heads) > 1:
1680 1680 raise util.Abort(_('revision %d is the root of more than one '
1681 1681 'branch') % rev[-1])
1682 1682 if self.applied:
1683 1683 base = repo.changelog.node(rev[0])
1684 1684 if base in [n.node for n in self.applied]:
1685 1685 raise util.Abort(_('revision %d is already managed')
1686 1686 % rev[0])
1687 1687 if heads != [self.applied[-1].node]:
1688 1688 raise util.Abort(_('revision %d is not the parent of '
1689 1689 'the queue') % rev[0])
1690 1690 base = repo.changelog.rev(self.applied[0].node)
1691 1691 lastparent = repo.changelog.parentrevs(base)[0]
1692 1692 else:
1693 1693 if heads != [repo.changelog.node(rev[0])]:
1694 1694 raise util.Abort(_('revision %d has unmanaged children')
1695 1695 % rev[0])
1696 1696 lastparent = None
1697 1697
1698 1698 diffopts = self.diffopts({'git': git})
1699 1699 for r in rev:
1700 1700 p1, p2 = repo.changelog.parentrevs(r)
1701 1701 n = repo.changelog.node(r)
1702 1702 if p2 != nullrev:
1703 1703 raise util.Abort(_('cannot import merge revision %d') % r)
1704 1704 if lastparent and lastparent != r:
1705 1705 raise util.Abort(_('revision %d is not the parent of %d')
1706 1706 % (r, lastparent))
1707 1707 lastparent = p1
1708 1708
1709 1709 if not patchname:
1710 1710 patchname = normname('%d.diff' % r)
1711 1711 self.check_reserved_name(patchname)
1712 1712 checkseries(patchname)
1713 1713 checkfile(patchname)
1714 1714 self.full_series.insert(0, patchname)
1715 1715
1716 1716 patchf = self.opener(patchname, "w")
1717 1717 cmdutil.export(repo, [n], fp=patchf, opts=diffopts)
1718 1718 patchf.close()
1719 1719
1720 1720 se = statusentry(n, patchname)
1721 1721 self.applied.insert(0, se)
1722 1722
1723 1723 self.added.append(patchname)
1724 1724 patchname = None
1725 1725 self.parse_series()
1726 1726 self.applied_dirty = 1
1727 1727 self.series_dirty = True
1728 1728
1729 1729 for i, filename in enumerate(files):
1730 1730 if existing:
1731 1731 if filename == '-':
1732 1732 raise util.Abort(_('-e is incompatible with import from -'))
1733 1733 filename = normname(filename)
1734 1734 self.check_reserved_name(filename)
1735 1735 originpath = self.join(filename)
1736 1736 if not os.path.isfile(originpath):
1737 1737 raise util.Abort(_("patch %s does not exist") % filename)
1738 1738
1739 1739 if patchname:
1740 1740 self.check_reserved_name(patchname)
1741 1741 checkfile(patchname)
1742 1742
1743 1743 self.ui.write(_('renaming %s to %s\n')
1744 1744 % (filename, patchname))
1745 1745 util.rename(originpath, self.join(patchname))
1746 1746 else:
1747 1747 patchname = filename
1748 1748
1749 1749 else:
1750 1750 try:
1751 1751 if filename == '-':
1752 1752 if not patchname:
1753 1753 raise util.Abort(
1754 1754 _('need --name to import a patch from -'))
1755 1755 text = sys.stdin.read()
1756 1756 else:
1757 1757 text = url.open(self.ui, filename).read()
1758 1758 except (OSError, IOError):
1759 1759 raise util.Abort(_("unable to read file %s") % filename)
1760 1760 if not patchname:
1761 1761 patchname = normname(os.path.basename(filename))
1762 1762 self.check_reserved_name(patchname)
1763 1763 checkfile(patchname)
1764 1764 patchf = self.opener(patchname, "w")
1765 1765 patchf.write(text)
1766 1766 if not force:
1767 1767 checkseries(patchname)
1768 1768 if patchname not in self.series:
1769 1769 index = self.full_series_end() + i
1770 1770 self.full_series[index:index] = [patchname]
1771 1771 self.parse_series()
1772 1772 self.series_dirty = True
1773 1773 self.ui.warn(_("adding %s to series file\n") % patchname)
1774 1774 self.added.append(patchname)
1775 1775 patchname = None
1776 1776
1777 1777 def delete(ui, repo, *patches, **opts):
1778 1778 """remove patches from queue
1779 1779
1780 1780 The patches must not be applied, and at least one patch is required. With
1781 1781 -k/--keep, the patch files are preserved in the patch directory.
1782 1782
1783 1783 To stop managing a patch and move it into permanent history,
1784 1784 use the :hg:`qfinish` command."""
1785 1785 q = repo.mq
1786 1786 q.delete(repo, patches, opts)
1787 1787 q.save_dirty()
1788 1788 return 0
1789 1789
1790 1790 def applied(ui, repo, patch=None, **opts):
1791 1791 """print the patches already applied
1792 1792
1793 1793 Returns 0 on success."""
1794 1794
1795 1795 q = repo.mq
1796 1796
1797 1797 if patch:
1798 1798 if patch not in q.series:
1799 1799 raise util.Abort(_("patch %s is not in series file") % patch)
1800 1800 end = q.series.index(patch) + 1
1801 1801 else:
1802 1802 end = q.series_end(True)
1803 1803
1804 1804 if opts.get('last') and not end:
1805 1805 ui.write(_("no patches applied\n"))
1806 1806 return 1
1807 1807 elif opts.get('last') and end == 1:
1808 1808 ui.write(_("only one patch applied\n"))
1809 1809 return 1
1810 1810 elif opts.get('last'):
1811 1811 start = end - 2
1812 1812 end = 1
1813 1813 else:
1814 1814 start = 0
1815 1815
1816 1816 q.qseries(repo, length=end, start=start, status='A',
1817 1817 summary=opts.get('summary'))
1818 1818
1819 1819
1820 1820 def unapplied(ui, repo, patch=None, **opts):
1821 1821 """print the patches not yet applied
1822 1822
1823 1823 Returns 0 on success."""
1824 1824
1825 1825 q = repo.mq
1826 1826 if patch:
1827 1827 if patch not in q.series:
1828 1828 raise util.Abort(_("patch %s is not in series file") % patch)
1829 1829 start = q.series.index(patch) + 1
1830 1830 else:
1831 1831 start = q.series_end(True)
1832 1832
1833 1833 if start == len(q.series) and opts.get('first'):
1834 1834 ui.write(_("all patches applied\n"))
1835 1835 return 1
1836 1836
1837 1837 length = opts.get('first') and 1 or None
1838 1838 q.qseries(repo, start=start, length=length, status='U',
1839 1839 summary=opts.get('summary'))
1840 1840
1841 1841 def qimport(ui, repo, *filename, **opts):
1842 1842 """import a patch
1843 1843
1844 1844 The patch is inserted into the series after the last applied
1845 1845 patch. If no patches have been applied, qimport prepends the patch
1846 1846 to the series.
1847 1847
1848 1848 The patch will have the same name as its source file unless you
1849 1849 give it a new one with -n/--name.
1850 1850
1851 1851 You can register an existing patch inside the patch directory with
1852 1852 the -e/--existing flag.
1853 1853
1854 1854 With -f/--force, an existing patch of the same name will be
1855 1855 overwritten.
1856 1856
1857 1857 An existing changeset may be placed under mq control with -r/--rev
1858 1858 (e.g. qimport --rev tip -n patch will place tip under mq control).
1859 1859 With -g/--git, patches imported with --rev will use the git diff
1860 1860 format. See the diffs help topic for information on why this is
1861 1861 important for preserving rename/copy information and permission
1862 1862 changes.
1863 1863
1864 1864 To import a patch from standard input, pass - as the patch file.
1865 1865 When importing from standard input, a patch name must be specified
1866 1866 using the --name flag.
1867 1867
1868 1868 To import an existing patch while renaming it::
1869 1869
1870 1870 hg qimport -e existing-patch -n new-name
1871 1871
1872 1872 Returns 0 if import succeeded.
1873 1873 """
1874 1874 q = repo.mq
1875 1875 try:
1876 1876 q.qimport(repo, filename, patchname=opts.get('name'),
1877 1877 existing=opts.get('existing'), force=opts.get('force'),
1878 1878 rev=opts.get('rev'), git=opts.get('git'))
1879 1879 finally:
1880 1880 q.save_dirty()
1881 1881
1882 1882 if opts.get('push') and not opts.get('rev'):
1883 1883 return q.push(repo, None)
1884 1884 return 0
1885 1885
1886 1886 def qinit(ui, repo, create):
1887 1887 """initialize a new queue repository
1888 1888
1889 1889 This command also creates a series file for ordering patches, and
1890 1890 an mq-specific .hgignore file in the queue repository, to exclude
1891 1891 the status and guards files (these contain mostly transient state).
1892 1892
1893 1893 Returns 0 if initialization succeeded."""
1894 1894 q = repo.mq
1895 1895 r = q.init(repo, create)
1896 1896 q.save_dirty()
1897 1897 if r:
1898 1898 if not os.path.exists(r.wjoin('.hgignore')):
1899 1899 fp = r.wopener('.hgignore', 'w')
1900 1900 fp.write('^\\.hg\n')
1901 1901 fp.write('^\\.mq\n')
1902 1902 fp.write('syntax: glob\n')
1903 1903 fp.write('status\n')
1904 1904 fp.write('guards\n')
1905 1905 fp.close()
1906 1906 if not os.path.exists(r.wjoin('series')):
1907 1907 r.wopener('series', 'w').close()
1908 1908 r[None].add(['.hgignore', 'series'])
1909 1909 commands.add(ui, r)
1910 1910 return 0
1911 1911
1912 1912 def init(ui, repo, **opts):
1913 1913 """init a new queue repository (DEPRECATED)
1914 1914
1915 1915 The queue repository is unversioned by default. If
1916 1916 -c/--create-repo is specified, qinit will create a separate nested
1917 1917 repository for patches (qinit -c may also be run later to convert
1918 1918 an unversioned patch repository into a versioned one). You can use
1919 1919 qcommit to commit changes to this queue repository.
1920 1920
1921 1921 This command is deprecated. Without -c, it's implied by other relevant
1922 1922 commands. With -c, use :hg:`init --mq` instead."""
1923 1923 return qinit(ui, repo, create=opts.get('create_repo'))
1924 1924
1925 1925 def clone(ui, source, dest=None, **opts):
1926 1926 '''clone main and patch repository at same time
1927 1927
1928 1928 If source is local, destination will have no patches applied. If
1929 1929 source is remote, this command can not check if patches are
1930 1930 applied in source, so cannot guarantee that patches are not
1931 1931 applied in destination. If you clone remote repository, be sure
1932 1932 before that it has no patches applied.
1933 1933
1934 1934 Source patch repository is looked for in <src>/.hg/patches by
1935 1935 default. Use -p <url> to change.
1936 1936
1937 1937 The patch directory must be a nested Mercurial repository, as
1938 1938 would be created by :hg:`init --mq`.
1939 1939
1940 1940 Return 0 on success.
1941 1941 '''
1942 1942 def patchdir(repo):
1943 1943 url = repo.url()
1944 1944 if url.endswith('/'):
1945 1945 url = url[:-1]
1946 1946 return url + '/.hg/patches'
1947 1947 if dest is None:
1948 1948 dest = hg.defaultdest(source)
1949 1949 sr = hg.repository(hg.remoteui(ui, opts), ui.expandpath(source))
1950 1950 if opts.get('patches'):
1951 1951 patchespath = ui.expandpath(opts.get('patches'))
1952 1952 else:
1953 1953 patchespath = patchdir(sr)
1954 1954 try:
1955 1955 hg.repository(ui, patchespath)
1956 1956 except error.RepoError:
1957 1957 raise util.Abort(_('versioned patch repository not found'
1958 1958 ' (see init --mq)'))
1959 1959 qbase, destrev = None, None
1960 1960 if sr.local():
1961 1961 if sr.mq.applied:
1962 1962 qbase = sr.mq.applied[0].node
1963 1963 if not hg.islocal(dest):
1964 1964 heads = set(sr.heads())
1965 1965 destrev = list(heads.difference(sr.heads(qbase)))
1966 1966 destrev.append(sr.changelog.parents(qbase)[0])
1967 1967 elif sr.capable('lookup'):
1968 1968 try:
1969 1969 qbase = sr.lookup('qbase')
1970 1970 except error.RepoError:
1971 1971 pass
1972 1972 ui.note(_('cloning main repository\n'))
1973 1973 sr, dr = hg.clone(ui, sr.url(), dest,
1974 1974 pull=opts.get('pull'),
1975 1975 rev=destrev,
1976 1976 update=False,
1977 1977 stream=opts.get('uncompressed'))
1978 1978 ui.note(_('cloning patch repository\n'))
1979 1979 hg.clone(ui, opts.get('patches') or patchdir(sr), patchdir(dr),
1980 1980 pull=opts.get('pull'), update=not opts.get('noupdate'),
1981 1981 stream=opts.get('uncompressed'))
1982 1982 if dr.local():
1983 1983 if qbase:
1984 1984 ui.note(_('stripping applied patches from destination '
1985 1985 'repository\n'))
1986 1986 dr.mq.strip(dr, [qbase], update=False, backup=None)
1987 1987 if not opts.get('noupdate'):
1988 1988 ui.note(_('updating destination repository\n'))
1989 1989 hg.update(dr, dr.changelog.tip())
1990 1990
1991 1991 def commit(ui, repo, *pats, **opts):
1992 1992 """commit changes in the queue repository (DEPRECATED)
1993 1993
1994 1994 This command is deprecated; use :hg:`commit --mq` instead."""
1995 1995 q = repo.mq
1996 1996 r = q.qrepo()
1997 1997 if not r:
1998 1998 raise util.Abort('no queue repository')
1999 1999 commands.commit(r.ui, r, *pats, **opts)
2000 2000
2001 2001 def series(ui, repo, **opts):
2002 2002 """print the entire series file
2003 2003
2004 2004 Returns 0 on success."""
2005 2005 repo.mq.qseries(repo, missing=opts.get('missing'), summary=opts.get('summary'))
2006 2006 return 0
2007 2007
2008 2008 def top(ui, repo, **opts):
2009 2009 """print the name of the current patch
2010 2010
2011 2011 Returns 0 on success."""
2012 2012 q = repo.mq
2013 2013 t = q.applied and q.series_end(True) or 0
2014 2014 if t:
2015 2015 q.qseries(repo, start=t - 1, length=1, status='A',
2016 2016 summary=opts.get('summary'))
2017 2017 else:
2018 2018 ui.write(_("no patches applied\n"))
2019 2019 return 1
2020 2020
2021 2021 def next(ui, repo, **opts):
2022 2022 """print the name of the next patch
2023 2023
2024 2024 Returns 0 on success."""
2025 2025 q = repo.mq
2026 2026 end = q.series_end()
2027 2027 if end == len(q.series):
2028 2028 ui.write(_("all patches applied\n"))
2029 2029 return 1
2030 2030 q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
2031 2031
2032 2032 def prev(ui, repo, **opts):
2033 2033 """print the name of the previous patch
2034 2034
2035 2035 Returns 0 on success."""
2036 2036 q = repo.mq
2037 2037 l = len(q.applied)
2038 2038 if l == 1:
2039 2039 ui.write(_("only one patch applied\n"))
2040 2040 return 1
2041 2041 if not l:
2042 2042 ui.write(_("no patches applied\n"))
2043 2043 return 1
2044 2044 q.qseries(repo, start=l - 2, length=1, status='A',
2045 2045 summary=opts.get('summary'))
2046 2046
2047 2047 def setupheaderopts(ui, opts):
2048 2048 if not opts.get('user') and opts.get('currentuser'):
2049 2049 opts['user'] = ui.username()
2050 2050 if not opts.get('date') and opts.get('currentdate'):
2051 2051 opts['date'] = "%d %d" % util.makedate()
2052 2052
2053 2053 def new(ui, repo, patch, *args, **opts):
2054 2054 """create a new patch
2055 2055
2056 2056 qnew creates a new patch on top of the currently-applied patch (if
2057 2057 any). The patch will be initialized with any outstanding changes
2058 2058 in the working directory. You may also use -I/--include,
2059 2059 -X/--exclude, and/or a list of files after the patch name to add
2060 2060 only changes to matching files to the new patch, leaving the rest
2061 2061 as uncommitted modifications.
2062 2062
2063 2063 -u/--user and -d/--date can be used to set the (given) user and
2064 2064 date, respectively. -U/--currentuser and -D/--currentdate set user
2065 2065 to current user and date to current date.
2066 2066
2067 2067 -e/--edit, -m/--message or -l/--logfile set the patch header as
2068 2068 well as the commit message. If none is specified, the header is
2069 2069 empty and the commit message is '[mq]: PATCH'.
2070 2070
2071 2071 Use the -g/--git option to keep the patch in the git extended diff
2072 2072 format. Read the diffs help topic for more information on why this
2073 2073 is important for preserving permission changes and copy/rename
2074 2074 information.
2075 2075
2076 2076 Returns 0 on successful creation of a new patch.
2077 2077 """
2078 2078 msg = cmdutil.logmessage(opts)
2079 2079 def getmsg():
2080 2080 return ui.edit(msg, opts.get('user') or ui.username())
2081 2081 q = repo.mq
2082 2082 opts['msg'] = msg
2083 2083 if opts.get('edit'):
2084 2084 opts['msg'] = getmsg
2085 2085 else:
2086 2086 opts['msg'] = msg
2087 2087 setupheaderopts(ui, opts)
2088 2088 q.new(repo, patch, *args, **opts)
2089 2089 q.save_dirty()
2090 2090 return 0
2091 2091
2092 2092 def refresh(ui, repo, *pats, **opts):
2093 2093 """update the current patch
2094 2094
2095 2095 If any file patterns are provided, the refreshed patch will
2096 2096 contain only the modifications that match those patterns; the
2097 2097 remaining modifications will remain in the working directory.
2098 2098
2099 2099 If -s/--short is specified, files currently included in the patch
2100 2100 will be refreshed just like matched files and remain in the patch.
2101 2101
2102 2102 If -e/--edit is specified, Mercurial will start your configured editor for
2103 2103 you to enter a message. In case qrefresh fails, you will find a backup of
2104 2104 your message in ``.hg/last-message.txt``.
2105 2105
2106 2106 hg add/remove/copy/rename work as usual, though you might want to
2107 2107 use git-style patches (-g/--git or [diff] git=1) to track copies
2108 2108 and renames. See the diffs help topic for more information on the
2109 2109 git diff format.
2110 2110
2111 2111 Returns 0 on success.
2112 2112 """
2113 2113 q = repo.mq
2114 2114 message = cmdutil.logmessage(opts)
2115 2115 if opts.get('edit'):
2116 2116 if not q.applied:
2117 2117 ui.write(_("no patches applied\n"))
2118 2118 return 1
2119 2119 if message:
2120 2120 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2121 2121 patch = q.applied[-1].name
2122 2122 ph = patchheader(q.join(patch), q.plainmode)
2123 2123 message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
2124 2124 # We don't want to lose the patch message if qrefresh fails (issue2062)
2125 2125 msgfile = repo.opener('last-message.txt', 'wb')
2126 2126 msgfile.write(message)
2127 2127 msgfile.close()
2128 2128 setupheaderopts(ui, opts)
2129 2129 ret = q.refresh(repo, pats, msg=message, **opts)
2130 2130 q.save_dirty()
2131 2131 return ret
2132 2132
2133 2133 def diff(ui, repo, *pats, **opts):
2134 2134 """diff of the current patch and subsequent modifications
2135 2135
2136 2136 Shows a diff which includes the current patch as well as any
2137 2137 changes which have been made in the working directory since the
2138 2138 last refresh (thus showing what the current patch would become
2139 2139 after a qrefresh).
2140 2140
2141 2141 Use :hg:`diff` if you only want to see the changes made since the
2142 2142 last qrefresh, or :hg:`export qtip` if you want to see changes
2143 2143 made by the current patch without including changes made since the
2144 2144 qrefresh.
2145 2145
2146 2146 Returns 0 on success.
2147 2147 """
2148 2148 repo.mq.diff(repo, pats, opts)
2149 2149 return 0
2150 2150
2151 2151 def fold(ui, repo, *files, **opts):
2152 2152 """fold the named patches into the current patch
2153 2153
2154 2154 Patches must not yet be applied. Each patch will be successively
2155 2155 applied to the current patch in the order given. If all the
2156 2156 patches apply successfully, the current patch will be refreshed
2157 2157 with the new cumulative patch, and the folded patches will be
2158 2158 deleted. With -k/--keep, the folded patch files will not be
2159 2159 removed afterwards.
2160 2160
2161 2161 The header for each folded patch will be concatenated with the
2162 2162 current patch header, separated by a line of ``* * *``.
2163 2163
2164 2164 Returns 0 on success."""
2165 2165
2166 2166 q = repo.mq
2167 2167
2168 2168 if not files:
2169 2169 raise util.Abort(_('qfold requires at least one patch name'))
2170 2170 if not q.check_toppatch(repo)[0]:
2171 2171 raise util.Abort(_('no patches applied'))
2172 2172 q.check_localchanges(repo)
2173 2173
2174 2174 message = cmdutil.logmessage(opts)
2175 2175 if opts.get('edit'):
2176 2176 if message:
2177 2177 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2178 2178
2179 2179 parent = q.lookup('qtip')
2180 2180 patches = []
2181 2181 messages = []
2182 2182 for f in files:
2183 2183 p = q.lookup(f)
2184 2184 if p in patches or p == parent:
2185 2185 ui.warn(_('Skipping already folded patch %s\n') % p)
2186 2186 if q.isapplied(p):
2187 2187 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
2188 2188 patches.append(p)
2189 2189
2190 2190 for p in patches:
2191 2191 if not message:
2192 2192 ph = patchheader(q.join(p), q.plainmode)
2193 2193 if ph.message:
2194 2194 messages.append(ph.message)
2195 2195 pf = q.join(p)
2196 2196 (patchsuccess, files, fuzz) = q.patch(repo, pf)
2197 2197 if not patchsuccess:
2198 2198 raise util.Abort(_('error folding patch %s') % p)
2199 2199 cmdutil.updatedir(ui, repo, files)
2200 2200
2201 2201 if not message:
2202 2202 ph = patchheader(q.join(parent), q.plainmode)
2203 2203 message, user = ph.message, ph.user
2204 2204 for msg in messages:
2205 2205 message.append('* * *')
2206 2206 message.extend(msg)
2207 2207 message = '\n'.join(message)
2208 2208
2209 2209 if opts.get('edit'):
2210 2210 message = ui.edit(message, user or ui.username())
2211 2211
2212 2212 diffopts = q.patchopts(q.diffopts(), *patches)
2213 2213 q.refresh(repo, msg=message, git=diffopts.git)
2214 2214 q.delete(repo, patches, opts)
2215 2215 q.save_dirty()
2216 2216
2217 2217 def goto(ui, repo, patch, **opts):
2218 2218 '''push or pop patches until named patch is at top of stack
2219 2219
2220 2220 Returns 0 on success.'''
2221 2221 q = repo.mq
2222 2222 patch = q.lookup(patch)
2223 2223 if q.isapplied(patch):
2224 2224 ret = q.pop(repo, patch, force=opts.get('force'))
2225 2225 else:
2226 2226 ret = q.push(repo, patch, force=opts.get('force'))
2227 2227 q.save_dirty()
2228 2228 return ret
2229 2229
2230 2230 def guard(ui, repo, *args, **opts):
2231 2231 '''set or print guards for a patch
2232 2232
2233 2233 Guards control whether a patch can be pushed. A patch with no
2234 2234 guards is always pushed. A patch with a positive guard ("+foo") is
2235 2235 pushed only if the :hg:`qselect` command has activated it. A patch with
2236 2236 a negative guard ("-foo") is never pushed if the :hg:`qselect` command
2237 2237 has activated it.
2238 2238
2239 2239 With no arguments, print the currently active guards.
2240 2240 With arguments, set guards for the named patch.
2241 2241
2242 2242 .. note::
2243 2243 Specifying negative guards now requires '--'.
2244 2244
2245 2245 To set guards on another patch::
2246 2246
2247 2247 hg qguard other.patch -- +2.6.17 -stable
2248 2248
2249 2249 Returns 0 on success.
2250 2250 '''
2251 2251 def status(idx):
2252 2252 guards = q.series_guards[idx] or ['unguarded']
2253 2253 if q.series[idx] in applied:
2254 2254 state = 'applied'
2255 2255 elif q.pushable(idx)[0]:
2256 2256 state = 'unapplied'
2257 2257 else:
2258 2258 state = 'guarded'
2259 2259 label = 'qguard.patch qguard.%s qseries.%s' % (state, state)
2260 2260 ui.write('%s: ' % ui.label(q.series[idx], label))
2261 2261
2262 2262 for i, guard in enumerate(guards):
2263 2263 if guard.startswith('+'):
2264 2264 ui.write(guard, label='qguard.positive')
2265 2265 elif guard.startswith('-'):
2266 2266 ui.write(guard, label='qguard.negative')
2267 2267 else:
2268 2268 ui.write(guard, label='qguard.unguarded')
2269 2269 if i != len(guards) - 1:
2270 2270 ui.write(' ')
2271 2271 ui.write('\n')
2272 2272 q = repo.mq
2273 2273 applied = set(p.name for p in q.applied)
2274 2274 patch = None
2275 2275 args = list(args)
2276 2276 if opts.get('list'):
2277 2277 if args or opts.get('none'):
2278 2278 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
2279 2279 for i in xrange(len(q.series)):
2280 2280 status(i)
2281 2281 return
2282 2282 if not args or args[0][0:1] in '-+':
2283 2283 if not q.applied:
2284 2284 raise util.Abort(_('no patches applied'))
2285 2285 patch = q.applied[-1].name
2286 2286 if patch is None and args[0][0:1] not in '-+':
2287 2287 patch = args.pop(0)
2288 2288 if patch is None:
2289 2289 raise util.Abort(_('no patch to work with'))
2290 2290 if args or opts.get('none'):
2291 2291 idx = q.find_series(patch)
2292 2292 if idx is None:
2293 2293 raise util.Abort(_('no patch named %s') % patch)
2294 2294 q.set_guards(idx, args)
2295 2295 q.save_dirty()
2296 2296 else:
2297 2297 status(q.series.index(q.lookup(patch)))
2298 2298
2299 2299 def header(ui, repo, patch=None):
2300 2300 """print the header of the topmost or specified patch
2301 2301
2302 2302 Returns 0 on success."""
2303 2303 q = repo.mq
2304 2304
2305 2305 if patch:
2306 2306 patch = q.lookup(patch)
2307 2307 else:
2308 2308 if not q.applied:
2309 2309 ui.write(_('no patches applied\n'))
2310 2310 return 1
2311 2311 patch = q.lookup('qtip')
2312 2312 ph = patchheader(q.join(patch), q.plainmode)
2313 2313
2314 2314 ui.write('\n'.join(ph.message) + '\n')
2315 2315
2316 2316 def lastsavename(path):
2317 2317 (directory, base) = os.path.split(path)
2318 2318 names = os.listdir(directory)
2319 2319 namere = re.compile("%s.([0-9]+)" % base)
2320 2320 maxindex = None
2321 2321 maxname = None
2322 2322 for f in names:
2323 2323 m = namere.match(f)
2324 2324 if m:
2325 2325 index = int(m.group(1))
2326 2326 if maxindex is None or index > maxindex:
2327 2327 maxindex = index
2328 2328 maxname = f
2329 2329 if maxname:
2330 2330 return (os.path.join(directory, maxname), maxindex)
2331 2331 return (None, None)
2332 2332
2333 2333 def savename(path):
2334 2334 (last, index) = lastsavename(path)
2335 2335 if last is None:
2336 2336 index = 0
2337 2337 newpath = path + ".%d" % (index + 1)
2338 2338 return newpath
2339 2339
2340 2340 def push(ui, repo, patch=None, **opts):
2341 2341 """push the next patch onto the stack
2342 2342
2343 2343 When -f/--force is applied, all local changes in patched files
2344 2344 will be lost.
2345 2345
2346 2346 Return 0 on succces.
2347 2347 """
2348 2348 q = repo.mq
2349 2349 mergeq = None
2350 2350
2351 2351 if opts.get('merge'):
2352 2352 if opts.get('name'):
2353 2353 newpath = repo.join(opts.get('name'))
2354 2354 else:
2355 2355 newpath, i = lastsavename(q.path)
2356 2356 if not newpath:
2357 2357 ui.warn(_("no saved queues found, please use -n\n"))
2358 2358 return 1
2359 2359 mergeq = queue(ui, repo.join(""), newpath)
2360 2360 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2361 2361 ret = q.push(repo, patch, force=opts.get('force'), list=opts.get('list'),
2362 2362 mergeq=mergeq, all=opts.get('all'), move=opts.get('move'),
2363 2363 exact=opts.get('exact'))
2364 2364 return ret
2365 2365
2366 2366 def pop(ui, repo, patch=None, **opts):
2367 2367 """pop the current patch off the stack
2368 2368
2369 2369 By default, pops off the top of the patch stack. If given a patch
2370 2370 name, keeps popping off patches until the named patch is at the
2371 2371 top of the stack.
2372 2372
2373 2373 Return 0 on success.
2374 2374 """
2375 2375 localupdate = True
2376 2376 if opts.get('name'):
2377 2377 q = queue(ui, repo.join(""), repo.join(opts.get('name')))
2378 2378 ui.warn(_('using patch queue: %s\n') % q.path)
2379 2379 localupdate = False
2380 2380 else:
2381 2381 q = repo.mq
2382 2382 ret = q.pop(repo, patch, force=opts.get('force'), update=localupdate,
2383 2383 all=opts.get('all'))
2384 2384 q.save_dirty()
2385 2385 return ret
2386 2386
2387 2387 def rename(ui, repo, patch, name=None, **opts):
2388 2388 """rename a patch
2389 2389
2390 2390 With one argument, renames the current patch to PATCH1.
2391 2391 With two arguments, renames PATCH1 to PATCH2.
2392 2392
2393 2393 Returns 0 on success."""
2394 2394
2395 2395 q = repo.mq
2396 2396
2397 2397 if not name:
2398 2398 name = patch
2399 2399 patch = None
2400 2400
2401 2401 if patch:
2402 2402 patch = q.lookup(patch)
2403 2403 else:
2404 2404 if not q.applied:
2405 2405 ui.write(_('no patches applied\n'))
2406 2406 return
2407 2407 patch = q.lookup('qtip')
2408 2408 absdest = q.join(name)
2409 2409 if os.path.isdir(absdest):
2410 2410 name = normname(os.path.join(name, os.path.basename(patch)))
2411 2411 absdest = q.join(name)
2412 2412 if os.path.exists(absdest):
2413 2413 raise util.Abort(_('%s already exists') % absdest)
2414 2414
2415 2415 if name in q.series:
2416 2416 raise util.Abort(
2417 2417 _('A patch named %s already exists in the series file') % name)
2418 2418
2419 2419 ui.note(_('renaming %s to %s\n') % (patch, name))
2420 2420 i = q.find_series(patch)
2421 2421 guards = q.guard_re.findall(q.full_series[i])
2422 2422 q.full_series[i] = name + ''.join([' #' + g for g in guards])
2423 2423 q.parse_series()
2424 2424 q.series_dirty = 1
2425 2425
2426 2426 info = q.isapplied(patch)
2427 2427 if info:
2428 2428 q.applied[info[0]] = statusentry(info[1], name)
2429 2429 q.applied_dirty = 1
2430 2430
2431 2431 destdir = os.path.dirname(absdest)
2432 2432 if not os.path.isdir(destdir):
2433 2433 os.makedirs(destdir)
2434 2434 util.rename(q.join(patch), absdest)
2435 2435 r = q.qrepo()
2436 2436 if r and patch in r.dirstate:
2437 2437 wctx = r[None]
2438 2438 wlock = r.wlock()
2439 2439 try:
2440 2440 if r.dirstate[patch] == 'a':
2441 2441 r.dirstate.forget(patch)
2442 2442 r.dirstate.add(name)
2443 2443 else:
2444 2444 if r.dirstate[name] == 'r':
2445 2445 wctx.undelete([name])
2446 2446 wctx.copy(patch, name)
2447 2447 wctx.remove([patch], False)
2448 2448 finally:
2449 2449 wlock.release()
2450 2450
2451 2451 q.save_dirty()
2452 2452
2453 2453 def restore(ui, repo, rev, **opts):
2454 2454 """restore the queue state saved by a revision (DEPRECATED)
2455 2455
2456 2456 This command is deprecated, use :hg:`rebase` instead."""
2457 2457 rev = repo.lookup(rev)
2458 2458 q = repo.mq
2459 2459 q.restore(repo, rev, delete=opts.get('delete'),
2460 2460 qupdate=opts.get('update'))
2461 2461 q.save_dirty()
2462 2462 return 0
2463 2463
2464 2464 def save(ui, repo, **opts):
2465 2465 """save current queue state (DEPRECATED)
2466 2466
2467 2467 This command is deprecated, use :hg:`rebase` instead."""
2468 2468 q = repo.mq
2469 2469 message = cmdutil.logmessage(opts)
2470 2470 ret = q.save(repo, msg=message)
2471 2471 if ret:
2472 2472 return ret
2473 2473 q.save_dirty()
2474 2474 if opts.get('copy'):
2475 2475 path = q.path
2476 2476 if opts.get('name'):
2477 2477 newpath = os.path.join(q.basepath, opts.get('name'))
2478 2478 if os.path.exists(newpath):
2479 2479 if not os.path.isdir(newpath):
2480 2480 raise util.Abort(_('destination %s exists and is not '
2481 2481 'a directory') % newpath)
2482 2482 if not opts.get('force'):
2483 2483 raise util.Abort(_('destination %s exists, '
2484 2484 'use -f to force') % newpath)
2485 2485 else:
2486 2486 newpath = savename(path)
2487 2487 ui.warn(_("copy %s to %s\n") % (path, newpath))
2488 2488 util.copyfiles(path, newpath)
2489 2489 if opts.get('empty'):
2490 2490 try:
2491 2491 os.unlink(q.join(q.status_path))
2492 2492 except:
2493 2493 pass
2494 2494 return 0
2495 2495
2496 2496 def strip(ui, repo, *revs, **opts):
2497 2497 """strip changesets and all their descendants from the repository
2498 2498
2499 2499 The strip command removes the specified changesets and all their
2500 2500 descendants. If the working directory has uncommitted changes,
2501 2501 the operation is aborted unless the --force flag is supplied.
2502 2502
2503 2503 If a parent of the working directory is stripped, then the working
2504 2504 directory will automatically be updated to the most recent
2505 2505 available ancestor of the stripped parent after the operation
2506 2506 completes.
2507 2507
2508 2508 Any stripped changesets are stored in ``.hg/strip-backup`` as a
2509 2509 bundle (see :hg:`help bundle` and :hg:`help unbundle`). They can
2510 2510 be restored by running :hg:`unbundle .hg/strip-backup/BUNDLE`,
2511 2511 where BUNDLE is the bundle file created by the strip. Note that
2512 2512 the local revision numbers will in general be different after the
2513 2513 restore.
2514 2514
2515 2515 Use the --no-backup option to discard the backup bundle once the
2516 2516 operation completes.
2517 2517
2518 2518 Return 0 on success.
2519 2519 """
2520 2520 backup = 'all'
2521 2521 if opts.get('backup'):
2522 2522 backup = 'strip'
2523 2523 elif opts.get('no-backup') or opts.get('nobackup'):
2524 2524 backup = 'none'
2525 2525
2526 2526 cl = repo.changelog
2527 2527 revs = set(cmdutil.revrange(repo, revs))
2528 2528 if not revs:
2529 2529 raise util.Abort(_('empty revision set'))
2530 2530
2531 2531 descendants = set(cl.descendants(*revs))
2532 2532 strippedrevs = revs.union(descendants)
2533 2533 roots = revs.difference(descendants)
2534 2534
2535 2535 update = False
2536 2536 # if one of the wdir parent is stripped we'll need
2537 2537 # to update away to an earlier revision
2538 2538 for p in repo.dirstate.parents():
2539 2539 if p != nullid and cl.rev(p) in strippedrevs:
2540 2540 update = True
2541 2541 break
2542 2542
2543 2543 rootnodes = set(cl.node(r) for r in roots)
2544 2544
2545 2545 q = repo.mq
2546 2546 if q.applied:
2547 2547 # refresh queue state if we're about to strip
2548 2548 # applied patches
2549 2549 if cl.rev(repo.lookup('qtip')) in strippedrevs:
2550 2550 q.applied_dirty = True
2551 2551 start = 0
2552 2552 end = len(q.applied)
2553 2553 for i, statusentry in enumerate(q.applied):
2554 2554 if statusentry.node in rootnodes:
2555 2555 # if one of the stripped roots is an applied
2556 2556 # patch, only part of the queue is stripped
2557 2557 start = i
2558 2558 break
2559 2559 del q.applied[start:end]
2560 2560 q.save_dirty()
2561 2561
2562 2562 revs = list(rootnodes)
2563 2563 if update and opts.get('keep'):
2564 2564 wlock = repo.wlock()
2565 2565 try:
2566 2566 urev = repo.mq.qparents(repo, revs[0])
2567 2567 repo.dirstate.rebuild(urev, repo[urev].manifest())
2568 2568 repo.dirstate.write()
2569 2569 update = False
2570 2570 finally:
2571 2571 wlock.release()
2572 2572
2573 2573 repo.mq.strip(repo, revs, backup=backup, update=update,
2574 2574 force=opts.get('force'))
2575 2575 return 0
2576 2576
2577 2577 def select(ui, repo, *args, **opts):
2578 2578 '''set or print guarded patches to push
2579 2579
2580 2580 Use the :hg:`qguard` command to set or print guards on patch, then use
2581 2581 qselect to tell mq which guards to use. A patch will be pushed if
2582 2582 it has no guards or any positive guards match the currently
2583 2583 selected guard, but will not be pushed if any negative guards
2584 2584 match the current guard. For example::
2585 2585
2586 2586 qguard foo.patch -stable (negative guard)
2587 2587 qguard bar.patch +stable (positive guard)
2588 2588 qselect stable
2589 2589
2590 2590 This activates the "stable" guard. mq will skip foo.patch (because
2591 2591 it has a negative match) but push bar.patch (because it has a
2592 2592 positive match).
2593 2593
2594 2594 With no arguments, prints the currently active guards.
2595 2595 With one argument, sets the active guard.
2596 2596
2597 2597 Use -n/--none to deactivate guards (no other arguments needed).
2598 2598 When no guards are active, patches with positive guards are
2599 2599 skipped and patches with negative guards are pushed.
2600 2600
2601 2601 qselect can change the guards on applied patches. It does not pop
2602 2602 guarded patches by default. Use --pop to pop back to the last
2603 2603 applied patch that is not guarded. Use --reapply (which implies
2604 2604 --pop) to push back to the current patch afterwards, but skip
2605 2605 guarded patches.
2606 2606
2607 2607 Use -s/--series to print a list of all guards in the series file
2608 2608 (no other arguments needed). Use -v for more information.
2609 2609
2610 2610 Returns 0 on success.'''
2611 2611
2612 2612 q = repo.mq
2613 2613 guards = q.active()
2614 2614 if args or opts.get('none'):
2615 2615 old_unapplied = q.unapplied(repo)
2616 2616 old_guarded = [i for i in xrange(len(q.applied)) if
2617 2617 not q.pushable(i)[0]]
2618 2618 q.set_active(args)
2619 2619 q.save_dirty()
2620 2620 if not args:
2621 2621 ui.status(_('guards deactivated\n'))
2622 2622 if not opts.get('pop') and not opts.get('reapply'):
2623 2623 unapplied = q.unapplied(repo)
2624 2624 guarded = [i for i in xrange(len(q.applied))
2625 2625 if not q.pushable(i)[0]]
2626 2626 if len(unapplied) != len(old_unapplied):
2627 2627 ui.status(_('number of unguarded, unapplied patches has '
2628 2628 'changed from %d to %d\n') %
2629 2629 (len(old_unapplied), len(unapplied)))
2630 2630 if len(guarded) != len(old_guarded):
2631 2631 ui.status(_('number of guarded, applied patches has changed '
2632 2632 'from %d to %d\n') %
2633 2633 (len(old_guarded), len(guarded)))
2634 2634 elif opts.get('series'):
2635 2635 guards = {}
2636 2636 noguards = 0
2637 2637 for gs in q.series_guards:
2638 2638 if not gs:
2639 2639 noguards += 1
2640 2640 for g in gs:
2641 2641 guards.setdefault(g, 0)
2642 2642 guards[g] += 1
2643 2643 if ui.verbose:
2644 2644 guards['NONE'] = noguards
2645 2645 guards = guards.items()
2646 2646 guards.sort(key=lambda x: x[0][1:])
2647 2647 if guards:
2648 2648 ui.note(_('guards in series file:\n'))
2649 2649 for guard, count in guards:
2650 2650 ui.note('%2d ' % count)
2651 2651 ui.write(guard, '\n')
2652 2652 else:
2653 2653 ui.note(_('no guards in series file\n'))
2654 2654 else:
2655 2655 if guards:
2656 2656 ui.note(_('active guards:\n'))
2657 2657 for g in guards:
2658 2658 ui.write(g, '\n')
2659 2659 else:
2660 2660 ui.write(_('no active guards\n'))
2661 2661 reapply = opts.get('reapply') and q.applied and q.appliedname(-1)
2662 2662 popped = False
2663 2663 if opts.get('pop') or opts.get('reapply'):
2664 2664 for i in xrange(len(q.applied)):
2665 2665 pushable, reason = q.pushable(i)
2666 2666 if not pushable:
2667 2667 ui.status(_('popping guarded patches\n'))
2668 2668 popped = True
2669 2669 if i == 0:
2670 2670 q.pop(repo, all=True)
2671 2671 else:
2672 2672 q.pop(repo, i - 1)
2673 2673 break
2674 2674 if popped:
2675 2675 try:
2676 2676 if reapply:
2677 2677 ui.status(_('reapplying unguarded patches\n'))
2678 2678 q.push(repo, reapply)
2679 2679 finally:
2680 2680 q.save_dirty()
2681 2681
2682 2682 def finish(ui, repo, *revrange, **opts):
2683 2683 """move applied patches into repository history
2684 2684
2685 2685 Finishes the specified revisions (corresponding to applied
2686 2686 patches) by moving them out of mq control into regular repository
2687 2687 history.
2688 2688
2689 2689 Accepts a revision range or the -a/--applied option. If --applied
2690 2690 is specified, all applied mq revisions are removed from mq
2691 2691 control. Otherwise, the given revisions must be at the base of the
2692 2692 stack of applied patches.
2693 2693
2694 2694 This can be especially useful if your changes have been applied to
2695 2695 an upstream repository, or if you are about to push your changes
2696 2696 to upstream.
2697 2697
2698 2698 Returns 0 on success.
2699 2699 """
2700 2700 if not opts.get('applied') and not revrange:
2701 2701 raise util.Abort(_('no revisions specified'))
2702 2702 elif opts.get('applied'):
2703 2703 revrange = ('qbase::qtip',) + revrange
2704 2704
2705 2705 q = repo.mq
2706 2706 if not q.applied:
2707 2707 ui.status(_('no patches applied\n'))
2708 2708 return 0
2709 2709
2710 2710 revs = cmdutil.revrange(repo, revrange)
2711 2711 q.finish(repo, revs)
2712 2712 q.save_dirty()
2713 2713 return 0
2714 2714
2715 2715 def qqueue(ui, repo, name=None, **opts):
2716 2716 '''manage multiple patch queues
2717 2717
2718 2718 Supports switching between different patch queues, as well as creating
2719 2719 new patch queues and deleting existing ones.
2720 2720
2721 2721 Omitting a queue name or specifying -l/--list will show you the registered
2722 2722 queues - by default the "normal" patches queue is registered. The currently
2723 2723 active queue will be marked with "(active)".
2724 2724
2725 2725 To create a new queue, use -c/--create. The queue is automatically made
2726 2726 active, except in the case where there are applied patches from the
2727 2727 currently active queue in the repository. Then the queue will only be
2728 2728 created and switching will fail.
2729 2729
2730 2730 To delete an existing queue, use --delete. You cannot delete the currently
2731 2731 active queue.
2732 2732
2733 2733 Returns 0 on success.
2734 2734 '''
2735 2735
2736 2736 q = repo.mq
2737 2737
2738 2738 _defaultqueue = 'patches'
2739 2739 _allqueues = 'patches.queues'
2740 2740 _activequeue = 'patches.queue'
2741 2741
2742 2742 def _getcurrent():
2743 2743 cur = os.path.basename(q.path)
2744 2744 if cur.startswith('patches-'):
2745 2745 cur = cur[8:]
2746 2746 return cur
2747 2747
2748 2748 def _noqueues():
2749 2749 try:
2750 2750 fh = repo.opener(_allqueues, 'r')
2751 2751 fh.close()
2752 2752 except IOError:
2753 2753 return True
2754 2754
2755 2755 return False
2756 2756
2757 2757 def _getqueues():
2758 2758 current = _getcurrent()
2759 2759
2760 2760 try:
2761 2761 fh = repo.opener(_allqueues, 'r')
2762 2762 queues = [queue.strip() for queue in fh if queue.strip()]
2763 2763 if current not in queues:
2764 2764 queues.append(current)
2765 2765 except IOError:
2766 2766 queues = [_defaultqueue]
2767 2767
2768 2768 return sorted(queues)
2769 2769
2770 2770 def _setactive(name):
2771 2771 if q.applied:
2772 2772 raise util.Abort(_('patches applied - cannot set new queue active'))
2773 2773 _setactivenocheck(name)
2774 2774
2775 2775 def _setactivenocheck(name):
2776 2776 fh = repo.opener(_activequeue, 'w')
2777 2777 if name != 'patches':
2778 2778 fh.write(name)
2779 2779 fh.close()
2780 2780
2781 2781 def _addqueue(name):
2782 2782 fh = repo.opener(_allqueues, 'a')
2783 2783 fh.write('%s\n' % (name,))
2784 2784 fh.close()
2785 2785
2786 2786 def _queuedir(name):
2787 2787 if name == 'patches':
2788 2788 return repo.join('patches')
2789 2789 else:
2790 2790 return repo.join('patches-' + name)
2791 2791
2792 2792 def _validname(name):
2793 2793 for n in name:
2794 2794 if n in ':\\/.':
2795 2795 return False
2796 2796 return True
2797 2797
2798 2798 def _delete(name):
2799 2799 if name not in existing:
2800 2800 raise util.Abort(_('cannot delete queue that does not exist'))
2801 2801
2802 2802 current = _getcurrent()
2803 2803
2804 2804 if name == current:
2805 2805 raise util.Abort(_('cannot delete currently active queue'))
2806 2806
2807 2807 fh = repo.opener('patches.queues.new', 'w')
2808 2808 for queue in existing:
2809 2809 if queue == name:
2810 2810 continue
2811 2811 fh.write('%s\n' % (queue,))
2812 2812 fh.close()
2813 2813 util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
2814 2814
2815 2815 if not name or opts.get('list'):
2816 2816 current = _getcurrent()
2817 2817 for queue in _getqueues():
2818 2818 ui.write('%s' % (queue,))
2819 2819 if queue == current and not ui.quiet:
2820 2820 ui.write(_(' (active)\n'))
2821 2821 else:
2822 2822 ui.write('\n')
2823 2823 return
2824 2824
2825 2825 if not _validname(name):
2826 2826 raise util.Abort(
2827 2827 _('invalid queue name, may not contain the characters ":\\/."'))
2828 2828
2829 2829 existing = _getqueues()
2830 2830
2831 2831 if opts.get('create'):
2832 2832 if name in existing:
2833 2833 raise util.Abort(_('queue "%s" already exists') % name)
2834 2834 if _noqueues():
2835 2835 _addqueue(_defaultqueue)
2836 2836 _addqueue(name)
2837 2837 _setactive(name)
2838 2838 elif opts.get('rename'):
2839 2839 current = _getcurrent()
2840 2840 if name == current:
2841 2841 raise util.Abort(_('can\'t rename "%s" to its current name') % name)
2842 2842 if name in existing:
2843 2843 raise util.Abort(_('queue "%s" already exists') % name)
2844 2844
2845 2845 olddir = _queuedir(current)
2846 2846 newdir = _queuedir(name)
2847 2847
2848 2848 if os.path.exists(newdir):
2849 2849 raise util.Abort(_('non-queue directory "%s" already exists') %
2850 2850 newdir)
2851 2851
2852 2852 fh = repo.opener('patches.queues.new', 'w')
2853 2853 for queue in existing:
2854 2854 if queue == current:
2855 2855 fh.write('%s\n' % (name,))
2856 2856 if os.path.exists(olddir):
2857 2857 util.rename(olddir, newdir)
2858 2858 else:
2859 2859 fh.write('%s\n' % (queue,))
2860 2860 fh.close()
2861 2861 util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
2862 2862 _setactivenocheck(name)
2863 2863 elif opts.get('delete'):
2864 2864 _delete(name)
2865 2865 elif opts.get('purge'):
2866 2866 if name in existing:
2867 2867 _delete(name)
2868 2868 qdir = _queuedir(name)
2869 2869 if os.path.exists(qdir):
2870 2870 shutil.rmtree(qdir)
2871 2871 else:
2872 2872 if name not in existing:
2873 2873 raise util.Abort(_('use --create to create a new queue'))
2874 2874 _setactive(name)
2875 2875
2876 2876 def reposetup(ui, repo):
2877 2877 class mqrepo(repo.__class__):
2878 2878 @util.propertycache
2879 2879 def mq(self):
2880 2880 return queue(self.ui, self.join(""))
2881 2881
2882 2882 def abort_if_wdir_patched(self, errmsg, force=False):
2883 2883 if self.mq.applied and not force:
2884 2884 parent = self.dirstate.parents()[0]
2885 2885 if parent in [s.node for s in self.mq.applied]:
2886 2886 raise util.Abort(errmsg)
2887 2887
2888 2888 def commit(self, text="", user=None, date=None, match=None,
2889 2889 force=False, editor=False, extra={}):
2890 2890 self.abort_if_wdir_patched(
2891 2891 _('cannot commit over an applied mq patch'),
2892 2892 force)
2893 2893
2894 2894 return super(mqrepo, self).commit(text, user, date, match, force,
2895 2895 editor, extra)
2896 2896
2897 2897 def push(self, remote, force=False, revs=None, newbranch=False):
2898 2898 if self.mq.applied and not force:
2899 2899 haspatches = True
2900 2900 if revs:
2901 2901 # Assume applied patches have no non-patch descendants
2902 2902 # and are not on remote already. If they appear in the
2903 2903 # set of resolved 'revs', bail out.
2904 2904 applied = set(e.node for e in self.mq.applied)
2905 2905 haspatches = bool([n for n in revs if n in applied])
2906 2906 if haspatches:
2907 2907 raise util.Abort(_('source has mq patches applied'))
2908 2908 return super(mqrepo, self).push(remote, force, revs, newbranch)
2909 2909
2910 2910 def _findtags(self):
2911 2911 '''augment tags from base class with patch tags'''
2912 2912 result = super(mqrepo, self)._findtags()
2913 2913
2914 2914 q = self.mq
2915 2915 if not q.applied:
2916 2916 return result
2917 2917
2918 2918 mqtags = [(patch.node, patch.name) for patch in q.applied]
2919 2919
2920 2920 if mqtags[-1][0] not in self.changelog.nodemap:
2921 2921 self.ui.warn(_('mq status file refers to unknown node %s\n')
2922 2922 % short(mqtags[-1][0]))
2923 2923 return result
2924 2924
2925 2925 mqtags.append((mqtags[-1][0], 'qtip'))
2926 2926 mqtags.append((mqtags[0][0], 'qbase'))
2927 2927 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2928 2928 tags = result[0]
2929 2929 for patch in mqtags:
2930 2930 if patch[1] in tags:
2931 2931 self.ui.warn(_('Tag %s overrides mq patch of the same name\n')
2932 2932 % patch[1])
2933 2933 else:
2934 2934 tags[patch[1]] = patch[0]
2935 2935
2936 2936 return result
2937 2937
2938 2938 def _branchtags(self, partial, lrev):
2939 2939 q = self.mq
2940 2940 if not q.applied:
2941 2941 return super(mqrepo, self)._branchtags(partial, lrev)
2942 2942
2943 2943 cl = self.changelog
2944 2944 qbasenode = q.applied[0].node
2945 2945 if qbasenode not in cl.nodemap:
2946 2946 self.ui.warn(_('mq status file refers to unknown node %s\n')
2947 2947 % short(qbasenode))
2948 2948 return super(mqrepo, self)._branchtags(partial, lrev)
2949 2949
2950 2950 qbase = cl.rev(qbasenode)
2951 2951 start = lrev + 1
2952 2952 if start < qbase:
2953 2953 # update the cache (excluding the patches) and save it
2954 2954 ctxgen = (self[r] for r in xrange(lrev + 1, qbase))
2955 2955 self._updatebranchcache(partial, ctxgen)
2956 2956 self._writebranchcache(partial, cl.node(qbase - 1), qbase - 1)
2957 2957 start = qbase
2958 2958 # if start = qbase, the cache is as updated as it should be.
2959 2959 # if start > qbase, the cache includes (part of) the patches.
2960 2960 # we might as well use it, but we won't save it.
2961 2961
2962 2962 # update the cache up to the tip
2963 2963 ctxgen = (self[r] for r in xrange(start, len(cl)))
2964 2964 self._updatebranchcache(partial, ctxgen)
2965 2965
2966 2966 return partial
2967 2967
2968 2968 if repo.local():
2969 2969 repo.__class__ = mqrepo
2970 2970
2971 2971 def mqimport(orig, ui, repo, *args, **kwargs):
2972 2972 if (hasattr(repo, 'abort_if_wdir_patched')
2973 2973 and not kwargs.get('no_commit', False)):
2974 2974 repo.abort_if_wdir_patched(_('cannot import over an applied patch'),
2975 2975 kwargs.get('force'))
2976 2976 return orig(ui, repo, *args, **kwargs)
2977 2977
2978 2978 def mqinit(orig, ui, *args, **kwargs):
2979 2979 mq = kwargs.pop('mq', None)
2980 2980
2981 2981 if not mq:
2982 2982 return orig(ui, *args, **kwargs)
2983 2983
2984 2984 if args:
2985 2985 repopath = args[0]
2986 2986 if not hg.islocal(repopath):
2987 2987 raise util.Abort(_('only a local queue repository '
2988 2988 'may be initialized'))
2989 2989 else:
2990 2990 repopath = cmdutil.findrepo(os.getcwd())
2991 2991 if not repopath:
2992 2992 raise util.Abort(_('there is no Mercurial repository here '
2993 2993 '(.hg not found)'))
2994 2994 repo = hg.repository(ui, repopath)
2995 2995 return qinit(ui, repo, True)
2996 2996
2997 2997 def mqcommand(orig, ui, repo, *args, **kwargs):
2998 2998 """Add --mq option to operate on patch repository instead of main"""
2999 2999
3000 3000 # some commands do not like getting unknown options
3001 3001 mq = kwargs.pop('mq', None)
3002 3002
3003 3003 if not mq:
3004 3004 return orig(ui, repo, *args, **kwargs)
3005 3005
3006 3006 q = repo.mq
3007 3007 r = q.qrepo()
3008 3008 if not r:
3009 3009 raise util.Abort(_('no queue repository'))
3010 3010 return orig(r.ui, r, *args, **kwargs)
3011 3011
3012 3012 def summary(orig, ui, repo, *args, **kwargs):
3013 3013 r = orig(ui, repo, *args, **kwargs)
3014 3014 q = repo.mq
3015 3015 m = []
3016 3016 a, u = len(q.applied), len(q.unapplied(repo))
3017 3017 if a:
3018 3018 m.append(ui.label(_("%d applied"), 'qseries.applied') % a)
3019 3019 if u:
3020 3020 m.append(ui.label(_("%d unapplied"), 'qseries.unapplied') % u)
3021 3021 if m:
3022 3022 ui.write("mq: %s\n" % ', '.join(m))
3023 3023 else:
3024 3024 ui.note(_("mq: (empty queue)\n"))
3025 3025 return r
3026 3026
3027 3027 def uisetup(ui):
3028 3028 mqopt = [('', 'mq', None, _("operate on patch repository"))]
3029 3029
3030 3030 extensions.wrapcommand(commands.table, 'import', mqimport)
3031 3031 extensions.wrapcommand(commands.table, 'summary', summary)
3032 3032
3033 3033 entry = extensions.wrapcommand(commands.table, 'init', mqinit)
3034 3034 entry[1].extend(mqopt)
3035 3035
3036 3036 nowrap = set(commands.norepo.split(" ") + ['qrecord'])
3037 3037
3038 3038 def dotable(cmdtable):
3039 3039 for cmd in cmdtable.keys():
3040 3040 cmd = cmdutil.parsealiases(cmd)[0]
3041 3041 if cmd in nowrap:
3042 3042 continue
3043 3043 entry = extensions.wrapcommand(cmdtable, cmd, mqcommand)
3044 3044 entry[1].extend(mqopt)
3045 3045
3046 3046 dotable(commands.table)
3047 3047
3048 3048 for extname, extmodule in extensions.extensions():
3049 3049 if extmodule.__file__ != __file__:
3050 3050 dotable(getattr(extmodule, 'cmdtable', {}))
3051 3051
3052 3052 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
3053 3053
3054 3054 cmdtable = {
3055 3055 "qapplied":
3056 3056 (applied,
3057 3057 [('1', 'last', None, _('show only the last patch'))] + seriesopts,
3058 3058 _('hg qapplied [-1] [-s] [PATCH]')),
3059 3059 "qclone":
3060 3060 (clone,
3061 3061 [('', 'pull', None, _('use pull protocol to copy metadata')),
3062 3062 ('U', 'noupdate', None, _('do not update the new working directories')),
3063 3063 ('', 'uncompressed', None,
3064 3064 _('use uncompressed transfer (fast over LAN)')),
3065 3065 ('p', 'patches', '',
3066 3066 _('location of source patch repository'), _('REPO')),
3067 3067 ] + commands.remoteopts,
3068 3068 _('hg qclone [OPTION]... SOURCE [DEST]')),
3069 3069 "qcommit|qci":
3070 3070 (commit,
3071 3071 commands.table["^commit|ci"][1],
3072 3072 _('hg qcommit [OPTION]... [FILE]...')),
3073 3073 "^qdiff":
3074 3074 (diff,
3075 3075 commands.diffopts + commands.diffopts2 + commands.walkopts,
3076 3076 _('hg qdiff [OPTION]... [FILE]...')),
3077 3077 "qdelete|qremove|qrm":
3078 3078 (delete,
3079 3079 [('k', 'keep', None, _('keep patch file')),
3080 3080 ('r', 'rev', [],
3081 3081 _('stop managing a revision (DEPRECATED)'), _('REV'))],
3082 3082 _('hg qdelete [-k] [PATCH]...')),
3083 3083 'qfold':
3084 3084 (fold,
3085 3085 [('e', 'edit', None, _('edit patch header')),
3086 3086 ('k', 'keep', None, _('keep folded patch files')),
3087 3087 ] + commands.commitopts,
3088 3088 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')),
3089 3089 'qgoto':
3090 3090 (goto,
3091 3091 [('f', 'force', None, _('overwrite any local changes'))],
3092 3092 _('hg qgoto [OPTION]... PATCH')),
3093 3093 'qguard':
3094 3094 (guard,
3095 3095 [('l', 'list', None, _('list all patches and guards')),
3096 3096 ('n', 'none', None, _('drop all guards'))],
3097 3097 _('hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]')),
3098 3098 'qheader': (header, [], _('hg qheader [PATCH]')),
3099 3099 "qimport":
3100 3100 (qimport,
3101 3101 [('e', 'existing', None, _('import file in patch directory')),
3102 3102 ('n', 'name', '',
3103 3103 _('name of patch file'), _('NAME')),
3104 3104 ('f', 'force', None, _('overwrite existing files')),
3105 3105 ('r', 'rev', [],
3106 3106 _('place existing revisions under mq control'), _('REV')),
3107 3107 ('g', 'git', None, _('use git extended diff format')),
3108 3108 ('P', 'push', None, _('qpush after importing'))],
3109 3109 _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... FILE...')),
3110 3110 "^qinit":
3111 3111 (init,
3112 3112 [('c', 'create-repo', None, _('create queue repository'))],
3113 3113 _('hg qinit [-c]')),
3114 3114 "^qnew":
3115 3115 (new,
3116 3116 [('e', 'edit', None, _('edit commit message')),
3117 3117 ('f', 'force', None, _('import uncommitted changes (DEPRECATED)')),
3118 3118 ('g', 'git', None, _('use git extended diff format')),
3119 3119 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
3120 3120 ('u', 'user', '',
3121 3121 _('add "From: <USER>" to patch'), _('USER')),
3122 3122 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
3123 3123 ('d', 'date', '',
3124 3124 _('add "Date: <DATE>" to patch'), _('DATE'))
3125 3125 ] + commands.walkopts + commands.commitopts,
3126 3126 _('hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...')),
3127 3127 "qnext": (next, [] + seriesopts, _('hg qnext [-s]')),
3128 3128 "qprev": (prev, [] + seriesopts, _('hg qprev [-s]')),
3129 3129 "^qpop":
3130 3130 (pop,
3131 3131 [('a', 'all', None, _('pop all patches')),
3132 3132 ('n', 'name', '',
3133 3133 _('queue name to pop (DEPRECATED)'), _('NAME')),
3134 3134 ('f', 'force', None, _('forget any local changes to patched files'))],
3135 3135 _('hg qpop [-a] [-f] [PATCH | INDEX]')),
3136 3136 "^qpush":
3137 3137 (push,
3138 3138 [('f', 'force', None, _('apply on top of local changes')),
3139 3139 ('e', 'exact', None, _('apply the target patch to its recorded parent')),
3140 3140 ('l', 'list', None, _('list patch name in commit text')),
3141 3141 ('a', 'all', None, _('apply all patches')),
3142 3142 ('m', 'merge', None, _('merge from another queue (DEPRECATED)')),
3143 3143 ('n', 'name', '',
3144 3144 _('merge queue name (DEPRECATED)'), _('NAME')),
3145 3145 ('', 'move', None, _('reorder patch series and apply only the patch'))],
3146 3146 _('hg qpush [-f] [-l] [-a] [--move] [PATCH | INDEX]')),
3147 3147 "^qrefresh":
3148 3148 (refresh,
3149 3149 [('e', 'edit', None, _('edit commit message')),
3150 3150 ('g', 'git', None, _('use git extended diff format')),
3151 3151 ('s', 'short', None,
3152 3152 _('refresh only files already in the patch and specified files')),
3153 3153 ('U', 'currentuser', None,
3154 3154 _('add/update author field in patch with current user')),
3155 3155 ('u', 'user', '',
3156 3156 _('add/update author field in patch with given user'), _('USER')),
3157 3157 ('D', 'currentdate', None,
3158 3158 _('add/update date field in patch with current date')),
3159 3159 ('d', 'date', '',
3160 3160 _('add/update date field in patch with given date'), _('DATE'))
3161 3161 ] + commands.walkopts + commands.commitopts,
3162 3162 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')),
3163 3163 'qrename|qmv':
3164 3164 (rename, [], _('hg qrename PATCH1 [PATCH2]')),
3165 3165 "qrestore":
3166 3166 (restore,
3167 3167 [('d', 'delete', None, _('delete save entry')),
3168 3168 ('u', 'update', None, _('update queue working directory'))],
3169 3169 _('hg qrestore [-d] [-u] REV')),
3170 3170 "qsave":
3171 3171 (save,
3172 3172 [('c', 'copy', None, _('copy patch directory')),
3173 3173 ('n', 'name', '',
3174 3174 _('copy directory name'), _('NAME')),
3175 3175 ('e', 'empty', None, _('clear queue status file')),
3176 3176 ('f', 'force', None, _('force copy'))] + commands.commitopts,
3177 3177 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')),
3178 3178 "qselect":
3179 3179 (select,
3180 3180 [('n', 'none', None, _('disable all guards')),
3181 3181 ('s', 'series', None, _('list all guards in series file')),
3182 3182 ('', 'pop', None, _('pop to before first guarded applied patch')),
3183 3183 ('', 'reapply', None, _('pop, then reapply patches'))],
3184 3184 _('hg qselect [OPTION]... [GUARD]...')),
3185 3185 "qseries":
3186 3186 (series,
3187 3187 [('m', 'missing', None, _('print patches not in series')),
3188 3188 ] + seriesopts,
3189 3189 _('hg qseries [-ms]')),
3190 3190 "strip":
3191 3191 (strip,
3192 3192 [('f', 'force', None, _('force removal of changesets even if the '
3193 3193 'working directory has uncommitted changes')),
3194 3194 ('b', 'backup', None, _('bundle only changesets with local revision'
3195 3195 ' number greater than REV which are not'
3196 3196 ' descendants of REV (DEPRECATED)')),
3197 3197 ('n', 'no-backup', None, _('no backups')),
3198 3198 ('', 'nobackup', None, _('no backups (DEPRECATED)')),
3199 3199 ('k', 'keep', None, _("do not modify working copy during strip"))],
3200 3200 _('hg strip [-k] [-f] [-n] REV...')),
3201 3201 "qtop": (top, [] + seriesopts, _('hg qtop [-s]')),
3202 3202 "qunapplied":
3203 3203 (unapplied,
3204 3204 [('1', 'first', None, _('show only the first patch'))] + seriesopts,
3205 3205 _('hg qunapplied [-1] [-s] [PATCH]')),
3206 3206 "qfinish":
3207 3207 (finish,
3208 3208 [('a', 'applied', None, _('finish all applied changesets'))],
3209 3209 _('hg qfinish [-a] [REV]...')),
3210 3210 'qqueue':
3211 3211 (qqueue,
3212 3212 [
3213 3213 ('l', 'list', False, _('list all available queues')),
3214 3214 ('c', 'create', False, _('create new queue')),
3215 3215 ('', 'rename', False, _('rename active queue')),
3216 3216 ('', 'delete', False, _('delete reference to queue')),
3217 3217 ('', 'purge', False, _('delete queue, and remove patch dir')),
3218 3218 ],
3219 3219 _('[OPTION] [QUEUE]')),
3220 3220 }
3221 3221
3222 3222 colortable = {'qguard.negative': 'red',
3223 3223 'qguard.positive': 'yellow',
3224 3224 'qguard.unguarded': 'green',
3225 3225 'qseries.applied': 'blue bold underline',
3226 3226 'qseries.guarded': 'black bold',
3227 3227 'qseries.missing': 'red bold',
3228 3228 'qseries.unapplied': 'black bold'}
@@ -1,569 +1,569 b''
1 1 # record.py
2 2 #
3 3 # Copyright 2007 Bryan O'Sullivan <bos@serpentine.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''commands to interactively select changes for commit/qrefresh'''
9 9
10 10 from mercurial.i18n import gettext, _
11 11 from mercurial import cmdutil, commands, extensions, hg, mdiff, patch
12 12 from mercurial import util
13 13 import copy, cStringIO, errno, os, re, tempfile
14 14
15 15 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
16 16
17 17 def scanpatch(fp):
18 18 """like patch.iterhunks, but yield different events
19 19
20 20 - ('file', [header_lines + fromfile + tofile])
21 21 - ('context', [context_lines])
22 22 - ('hunk', [hunk_lines])
23 23 - ('range', (-start,len, +start,len, diffp))
24 24 """
25 25 lr = patch.linereader(fp)
26 26
27 27 def scanwhile(first, p):
28 28 """scan lr while predicate holds"""
29 29 lines = [first]
30 30 while True:
31 31 line = lr.readline()
32 32 if not line:
33 33 break
34 34 if p(line):
35 35 lines.append(line)
36 36 else:
37 37 lr.push(line)
38 38 break
39 39 return lines
40 40
41 41 while True:
42 42 line = lr.readline()
43 43 if not line:
44 44 break
45 45 if line.startswith('diff --git a/'):
46 46 def notheader(line):
47 47 s = line.split(None, 1)
48 48 return not s or s[0] not in ('---', 'diff')
49 49 header = scanwhile(line, notheader)
50 50 fromfile = lr.readline()
51 51 if fromfile.startswith('---'):
52 52 tofile = lr.readline()
53 53 header += [fromfile, tofile]
54 54 else:
55 55 lr.push(fromfile)
56 56 yield 'file', header
57 57 elif line[0] == ' ':
58 58 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
59 59 elif line[0] in '-+':
60 60 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
61 61 else:
62 62 m = lines_re.match(line)
63 63 if m:
64 64 yield 'range', m.groups()
65 65 else:
66 66 raise patch.PatchError('unknown patch content: %r' % line)
67 67
68 68 class header(object):
69 69 """patch header
70 70
71 71 XXX shoudn't we move this to mercurial/patch.py ?
72 72 """
73 73 diff_re = re.compile('diff --git a/(.*) b/(.*)$')
74 74 allhunks_re = re.compile('(?:index|new file|deleted file) ')
75 75 pretty_re = re.compile('(?:new file|deleted file) ')
76 76 special_re = re.compile('(?:index|new|deleted|copy|rename) ')
77 77
78 78 def __init__(self, header):
79 79 self.header = header
80 80 self.hunks = []
81 81
82 82 def binary(self):
83 83 for h in self.header:
84 84 if h.startswith('index '):
85 85 return True
86 86
87 87 def pretty(self, fp):
88 88 for h in self.header:
89 89 if h.startswith('index '):
90 90 fp.write(_('this modifies a binary file (all or nothing)\n'))
91 91 break
92 92 if self.pretty_re.match(h):
93 93 fp.write(h)
94 94 if self.binary():
95 95 fp.write(_('this is a binary file\n'))
96 96 break
97 97 if h.startswith('---'):
98 98 fp.write(_('%d hunks, %d lines changed\n') %
99 99 (len(self.hunks),
100 100 sum([max(h.added, h.removed) for h in self.hunks])))
101 101 break
102 102 fp.write(h)
103 103
104 104 def write(self, fp):
105 105 fp.write(''.join(self.header))
106 106
107 107 def allhunks(self):
108 108 for h in self.header:
109 109 if self.allhunks_re.match(h):
110 110 return True
111 111
112 112 def files(self):
113 113 fromfile, tofile = self.diff_re.match(self.header[0]).groups()
114 114 if fromfile == tofile:
115 115 return [fromfile]
116 116 return [fromfile, tofile]
117 117
118 118 def filename(self):
119 119 return self.files()[-1]
120 120
121 121 def __repr__(self):
122 122 return '<header %s>' % (' '.join(map(repr, self.files())))
123 123
124 124 def special(self):
125 125 for h in self.header:
126 126 if self.special_re.match(h):
127 127 return True
128 128
129 129 def countchanges(hunk):
130 130 """hunk -> (n+,n-)"""
131 131 add = len([h for h in hunk if h[0] == '+'])
132 132 rem = len([h for h in hunk if h[0] == '-'])
133 133 return add, rem
134 134
135 135 class hunk(object):
136 136 """patch hunk
137 137
138 138 XXX shouldn't we merge this with patch.hunk ?
139 139 """
140 140 maxcontext = 3
141 141
142 142 def __init__(self, header, fromline, toline, proc, before, hunk, after):
143 143 def trimcontext(number, lines):
144 144 delta = len(lines) - self.maxcontext
145 145 if False and delta > 0:
146 146 return number + delta, lines[:self.maxcontext]
147 147 return number, lines
148 148
149 149 self.header = header
150 150 self.fromline, self.before = trimcontext(fromline, before)
151 151 self.toline, self.after = trimcontext(toline, after)
152 152 self.proc = proc
153 153 self.hunk = hunk
154 154 self.added, self.removed = countchanges(self.hunk)
155 155
156 156 def write(self, fp):
157 157 delta = len(self.before) + len(self.after)
158 158 if self.after and self.after[-1] == '\\ No newline at end of file\n':
159 159 delta -= 1
160 160 fromlen = delta + self.removed
161 161 tolen = delta + self.added
162 162 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
163 163 (self.fromline, fromlen, self.toline, tolen,
164 164 self.proc and (' ' + self.proc)))
165 165 fp.write(''.join(self.before + self.hunk + self.after))
166 166
167 167 pretty = write
168 168
169 169 def filename(self):
170 170 return self.header.filename()
171 171
172 172 def __repr__(self):
173 173 return '<hunk %r@%d>' % (self.filename(), self.fromline)
174 174
175 175 def parsepatch(fp):
176 176 """patch -> [] of hunks """
177 177 class parser(object):
178 178 """patch parsing state machine"""
179 179 def __init__(self):
180 180 self.fromline = 0
181 181 self.toline = 0
182 182 self.proc = ''
183 183 self.header = None
184 184 self.context = []
185 185 self.before = []
186 186 self.hunk = []
187 187 self.stream = []
188 188
189 189 def addrange(self, limits):
190 190 fromstart, fromend, tostart, toend, proc = limits
191 191 self.fromline = int(fromstart)
192 192 self.toline = int(tostart)
193 193 self.proc = proc
194 194
195 195 def addcontext(self, context):
196 196 if self.hunk:
197 197 h = hunk(self.header, self.fromline, self.toline, self.proc,
198 198 self.before, self.hunk, context)
199 199 self.header.hunks.append(h)
200 200 self.stream.append(h)
201 201 self.fromline += len(self.before) + h.removed
202 202 self.toline += len(self.before) + h.added
203 203 self.before = []
204 204 self.hunk = []
205 205 self.proc = ''
206 206 self.context = context
207 207
208 208 def addhunk(self, hunk):
209 209 if self.context:
210 210 self.before = self.context
211 211 self.context = []
212 212 self.hunk = hunk
213 213
214 214 def newfile(self, hdr):
215 215 self.addcontext([])
216 216 h = header(hdr)
217 217 self.stream.append(h)
218 218 self.header = h
219 219
220 220 def finished(self):
221 221 self.addcontext([])
222 222 return self.stream
223 223
224 224 transitions = {
225 225 'file': {'context': addcontext,
226 226 'file': newfile,
227 227 'hunk': addhunk,
228 228 'range': addrange},
229 229 'context': {'file': newfile,
230 230 'hunk': addhunk,
231 231 'range': addrange},
232 232 'hunk': {'context': addcontext,
233 233 'file': newfile,
234 234 'range': addrange},
235 235 'range': {'context': addcontext,
236 236 'hunk': addhunk},
237 237 }
238 238
239 239 p = parser()
240 240
241 241 state = 'context'
242 242 for newstate, data in scanpatch(fp):
243 243 try:
244 244 p.transitions[state][newstate](p, data)
245 245 except KeyError:
246 246 raise patch.PatchError('unhandled transition: %s -> %s' %
247 247 (state, newstate))
248 248 state = newstate
249 249 return p.finished()
250 250
251 251 def filterpatch(ui, chunks):
252 252 """Interactively filter patch chunks into applied-only chunks"""
253 253 chunks = list(chunks)
254 254 chunks.reverse()
255 255 seen = set()
256 256 def consumefile():
257 257 """fetch next portion from chunks until a 'header' is seen
258 258 NB: header == new-file mark
259 259 """
260 260 consumed = []
261 261 while chunks:
262 262 if isinstance(chunks[-1], header):
263 263 break
264 264 else:
265 265 consumed.append(chunks.pop())
266 266 return consumed
267 267
268 268 resp_all = [None] # this two are changed from inside prompt,
269 269 resp_file = [None] # so can't be usual variables
270 270 applied = {} # 'filename' -> [] of chunks
271 271 def prompt(query):
272 272 """prompt query, and process base inputs
273 273
274 274 - y/n for the rest of file
275 275 - y/n for the rest
276 276 - ? (help)
277 277 - q (quit)
278 278
279 279 Returns True/False and sets reps_all and resp_file as
280 280 appropriate.
281 281 """
282 282 if resp_all[0] is not None:
283 283 return resp_all[0]
284 284 if resp_file[0] is not None:
285 285 return resp_file[0]
286 286 while True:
287 287 resps = _('[Ynsfdaq?]')
288 288 choices = (_('&Yes, record this change'),
289 289 _('&No, skip this change'),
290 290 _('&Skip remaining changes to this file'),
291 291 _('Record remaining changes to this &file'),
292 292 _('&Done, skip remaining changes and files'),
293 293 _('Record &all changes to all remaining files'),
294 294 _('&Quit, recording no changes'),
295 295 _('&?'))
296 296 r = ui.promptchoice("%s %s" % (query, resps), choices)
297 297 ui.write("\n")
298 298 if r == 7: # ?
299 299 doc = gettext(record.__doc__)
300 300 c = doc.find('::') + 2
301 301 for l in doc[c:].splitlines():
302 302 if l.startswith(' '):
303 303 ui.write(l.strip(), '\n')
304 304 continue
305 305 elif r == 0: # yes
306 306 ret = True
307 307 elif r == 1: # no
308 308 ret = False
309 309 elif r == 2: # Skip
310 310 ret = resp_file[0] = False
311 311 elif r == 3: # file (Record remaining)
312 312 ret = resp_file[0] = True
313 313 elif r == 4: # done, skip remaining
314 314 ret = resp_all[0] = False
315 315 elif r == 5: # all
316 316 ret = resp_all[0] = True
317 317 elif r == 6: # quit
318 318 raise util.Abort(_('user quit'))
319 319 return ret
320 320 pos, total = 0, len(chunks) - 1
321 321 while chunks:
322 322 pos = total - len(chunks) + 1
323 323 chunk = chunks.pop()
324 324 if isinstance(chunk, header):
325 325 # new-file mark
326 326 resp_file = [None]
327 327 fixoffset = 0
328 328 hdr = ''.join(chunk.header)
329 329 if hdr in seen:
330 330 consumefile()
331 331 continue
332 332 seen.add(hdr)
333 333 if resp_all[0] is None:
334 334 chunk.pretty(ui)
335 335 r = prompt(_('examine changes to %s?') %
336 336 _(' and ').join(map(repr, chunk.files())))
337 337 if r:
338 338 applied[chunk.filename()] = [chunk]
339 339 if chunk.allhunks():
340 340 applied[chunk.filename()] += consumefile()
341 341 else:
342 342 consumefile()
343 343 else:
344 344 # new hunk
345 345 if resp_file[0] is None and resp_all[0] is None:
346 346 chunk.pretty(ui)
347 r = total == 1 and prompt(_('record this change to %r?') %
348 chunk.filename()) \
349 or prompt(_('record change %d/%d to %r?') %
350 (pos, total, chunk.filename()))
347 r = (total == 1
348 and prompt(_('record this change to %r?') % chunk.filename())
349 or prompt(_('record change %d/%d to %r?') %
350 (pos, total, chunk.filename())))
351 351 if r:
352 352 if fixoffset:
353 353 chunk = copy.copy(chunk)
354 354 chunk.toline += fixoffset
355 355 applied[chunk.filename()].append(chunk)
356 356 else:
357 357 fixoffset += chunk.removed - chunk.added
358 358 return sum([h for h in applied.itervalues()
359 359 if h[0].special() or len(h) > 1], [])
360 360
361 361 def record(ui, repo, *pats, **opts):
362 362 '''interactively select changes to commit
363 363
364 364 If a list of files is omitted, all changes reported by :hg:`status`
365 365 will be candidates for recording.
366 366
367 367 See :hg:`help dates` for a list of formats valid for -d/--date.
368 368
369 369 You will be prompted for whether to record changes to each
370 370 modified file, and for files with multiple changes, for each
371 371 change to use. For each query, the following responses are
372 372 possible::
373 373
374 374 y - record this change
375 375 n - skip this change
376 376
377 377 s - skip remaining changes to this file
378 378 f - record remaining changes to this file
379 379
380 380 d - done, skip remaining changes and files
381 381 a - record all changes to all remaining files
382 382 q - quit, recording no changes
383 383
384 384 ? - display help
385 385
386 386 This command is not available when committing a merge.'''
387 387
388 388 dorecord(ui, repo, commands.commit, *pats, **opts)
389 389
390 390
391 391 def qrecord(ui, repo, patch, *pats, **opts):
392 392 '''interactively record a new patch
393 393
394 394 See :hg:`help qnew` & :hg:`help record` for more information and
395 395 usage.
396 396 '''
397 397
398 398 try:
399 399 mq = extensions.find('mq')
400 400 except KeyError:
401 401 raise util.Abort(_("'mq' extension not loaded"))
402 402
403 403 def committomq(ui, repo, *pats, **opts):
404 404 mq.new(ui, repo, patch, *pats, **opts)
405 405
406 406 opts = opts.copy()
407 407 opts['force'] = True # always 'qnew -f'
408 408 dorecord(ui, repo, committomq, *pats, **opts)
409 409
410 410
411 411 def dorecord(ui, repo, commitfunc, *pats, **opts):
412 412 if not ui.interactive():
413 413 raise util.Abort(_('running non-interactively, use commit instead'))
414 414
415 415 def recordfunc(ui, repo, message, match, opts):
416 416 """This is generic record driver.
417 417
418 418 Its job is to interactively filter local changes, and accordingly
419 419 prepare working dir into a state, where the job can be delegated to
420 420 non-interactive commit command such as 'commit' or 'qrefresh'.
421 421
422 422 After the actual job is done by non-interactive command, working dir
423 423 state is restored to original.
424 424
425 425 In the end we'll record interesting changes, and everything else will be
426 426 left in place, so the user can continue his work.
427 427 """
428 428
429 429 merge = len(repo[None].parents()) > 1
430 430 if merge:
431 431 raise util.Abort(_('cannot partially commit a merge '
432 432 '(use "hg commit" instead)'))
433 433
434 434 changes = repo.status(match=match)[:3]
435 435 diffopts = mdiff.diffopts(git=True, nodates=True)
436 436 chunks = patch.diff(repo, changes=changes, opts=diffopts)
437 437 fp = cStringIO.StringIO()
438 438 fp.write(''.join(chunks))
439 439 fp.seek(0)
440 440
441 441 # 1. filter patch, so we have intending-to apply subset of it
442 442 chunks = filterpatch(ui, parsepatch(fp))
443 443 del fp
444 444
445 445 contenders = set()
446 446 for h in chunks:
447 447 try:
448 448 contenders.update(set(h.files()))
449 449 except AttributeError:
450 450 pass
451 451
452 452 changed = changes[0] + changes[1] + changes[2]
453 453 newfiles = [f for f in changed if f in contenders]
454 454 if not newfiles:
455 455 ui.status(_('no changes to record\n'))
456 456 return 0
457 457
458 458 modified = set(changes[0])
459 459
460 460 # 2. backup changed files, so we can restore them in the end
461 461 backups = {}
462 462 backupdir = repo.join('record-backups')
463 463 try:
464 464 os.mkdir(backupdir)
465 465 except OSError, err:
466 466 if err.errno != errno.EEXIST:
467 467 raise
468 468 try:
469 469 # backup continues
470 470 for f in newfiles:
471 471 if f not in modified:
472 472 continue
473 473 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
474 474 dir=backupdir)
475 475 os.close(fd)
476 476 ui.debug('backup %r as %r\n' % (f, tmpname))
477 477 util.copyfile(repo.wjoin(f), tmpname)
478 478 backups[f] = tmpname
479 479
480 480 fp = cStringIO.StringIO()
481 481 for c in chunks:
482 482 if c.filename() in backups:
483 483 c.write(fp)
484 484 dopatch = fp.tell()
485 485 fp.seek(0)
486 486
487 487 # 3a. apply filtered patch to clean repo (clean)
488 488 if backups:
489 489 hg.revert(repo, repo.dirstate.parents()[0],
490 490 lambda key: key in backups)
491 491
492 492 # 3b. (apply)
493 493 if dopatch:
494 494 try:
495 495 ui.debug('applying patch\n')
496 496 ui.debug(fp.getvalue())
497 497 pfiles = {}
498 498 patch.internalpatch(fp, ui, 1, repo.root, files=pfiles,
499 499 eolmode=None)
500 500 cmdutil.updatedir(ui, repo, pfiles)
501 501 except patch.PatchError, err:
502 502 raise util.Abort(str(err))
503 503 del fp
504 504
505 505 # 4. We prepared working directory according to filtered patch.
506 506 # Now is the time to delegate the job to commit/qrefresh or the like!
507 507
508 508 # it is important to first chdir to repo root -- we'll call a
509 509 # highlevel command with list of pathnames relative to repo root
510 510 cwd = os.getcwd()
511 511 os.chdir(repo.root)
512 512 try:
513 513 commitfunc(ui, repo, *newfiles, **opts)
514 514 finally:
515 515 os.chdir(cwd)
516 516
517 517 return 0
518 518 finally:
519 519 # 5. finally restore backed-up files
520 520 try:
521 521 for realname, tmpname in backups.iteritems():
522 522 ui.debug('restoring %r to %r\n' % (tmpname, realname))
523 523 util.copyfile(tmpname, repo.wjoin(realname))
524 524 os.unlink(tmpname)
525 525 os.rmdir(backupdir)
526 526 except OSError:
527 527 pass
528 528
529 529 # wrap ui.write so diff output can be labeled/colorized
530 530 def wrapwrite(orig, *args, **kw):
531 531 label = kw.pop('label', '')
532 532 for chunk, l in patch.difflabel(lambda: args):
533 533 orig(chunk, label=label + l)
534 534 oldwrite = ui.write
535 535 extensions.wrapfunction(ui, 'write', wrapwrite)
536 536 try:
537 537 return cmdutil.commit(ui, repo, recordfunc, pats, opts)
538 538 finally:
539 539 ui.write = oldwrite
540 540
541 541 cmdtable = {
542 542 "record":
543 543 (record,
544 544
545 545 # add commit options
546 546 commands.table['^commit|ci'][1],
547 547
548 548 _('hg record [OPTION]... [FILE]...')),
549 549 }
550 550
551 551
552 552 def uisetup(ui):
553 553 try:
554 554 mq = extensions.find('mq')
555 555 except KeyError:
556 556 return
557 557
558 558 qcmdtable = {
559 559 "qrecord":
560 560 (qrecord,
561 561
562 562 # add qnew options, except '--force'
563 563 [opt for opt in mq.cmdtable['^qnew'][1] if opt[1] != 'force'],
564 564
565 565 _('hg qrecord [OPTION]... PATCH [FILE]...')),
566 566 }
567 567
568 568 cmdtable.update(qcmdtable)
569 569
@@ -1,1917 +1,1917 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context
12 12 import lock, transaction, store, encoding
13 13 import util, extensions, hook, error
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 import url as urlmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect
20 20 propertycache = util.propertycache
21 21
22 22 class localrepository(repo.repository):
23 23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
24 24 supportedformats = set(('revlogv1', 'parentdelta'))
25 25 supported = supportedformats | set(('store', 'fncache', 'shared',
26 26 'dotencode'))
27 27
28 28 def __init__(self, baseui, path=None, create=0):
29 29 repo.repository.__init__(self)
30 30 self.root = os.path.realpath(util.expandpath(path))
31 31 self.path = os.path.join(self.root, ".hg")
32 32 self.origroot = path
33 33 self.auditor = util.path_auditor(self.root, self._checknested)
34 34 self.opener = util.opener(self.path)
35 35 self.wopener = util.opener(self.root)
36 36 self.baseui = baseui
37 37 self.ui = baseui.copy()
38 38
39 39 try:
40 40 self.ui.readconfig(self.join("hgrc"), self.root)
41 41 extensions.loadall(self.ui)
42 42 except IOError:
43 43 pass
44 44
45 45 if not os.path.isdir(self.path):
46 46 if create:
47 47 if not os.path.exists(path):
48 48 util.makedirs(path)
49 49 os.mkdir(self.path)
50 50 requirements = ["revlogv1"]
51 51 if self.ui.configbool('format', 'usestore', True):
52 52 os.mkdir(os.path.join(self.path, "store"))
53 53 requirements.append("store")
54 54 if self.ui.configbool('format', 'usefncache', True):
55 55 requirements.append("fncache")
56 56 if self.ui.configbool('format', 'dotencode', True):
57 57 requirements.append('dotencode')
58 58 # create an invalid changelog
59 59 self.opener("00changelog.i", "a").write(
60 60 '\0\0\0\2' # represents revlogv2
61 61 ' dummy changelog to prevent using the old repo layout'
62 62 )
63 63 if self.ui.configbool('format', 'parentdelta', False):
64 64 requirements.append("parentdelta")
65 65 else:
66 66 raise error.RepoError(_("repository %s not found") % path)
67 67 elif create:
68 68 raise error.RepoError(_("repository %s already exists") % path)
69 69 else:
70 70 # find requirements
71 71 requirements = set()
72 72 try:
73 73 requirements = set(self.opener("requires").read().splitlines())
74 74 except IOError, inst:
75 75 if inst.errno != errno.ENOENT:
76 76 raise
77 77 for r in requirements - self.supported:
78 78 raise error.RepoError(_("requirement '%s' not supported") % r)
79 79
80 80 self.sharedpath = self.path
81 81 try:
82 82 s = os.path.realpath(self.opener("sharedpath").read())
83 83 if not os.path.exists(s):
84 84 raise error.RepoError(
85 85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 86 self.sharedpath = s
87 87 except IOError, inst:
88 88 if inst.errno != errno.ENOENT:
89 89 raise
90 90
91 91 self.store = store.store(requirements, self.sharedpath, util.opener)
92 92 self.spath = self.store.path
93 93 self.sopener = self.store.opener
94 94 self.sjoin = self.store.join
95 95 self.opener.createmode = self.store.createmode
96 96 self._applyrequirements(requirements)
97 97 if create:
98 98 self._writerequirements()
99 99
100 100 # These two define the set of tags for this repository. _tags
101 101 # maps tag name to node; _tagtypes maps tag name to 'global' or
102 102 # 'local'. (Global tags are defined by .hgtags across all
103 103 # heads, and local tags are defined in .hg/localtags.) They
104 104 # constitute the in-memory cache of tags.
105 105 self._tags = None
106 106 self._tagtypes = None
107 107
108 108 self._branchcache = None
109 109 self._branchcachetip = None
110 110 self.nodetagscache = None
111 111 self.filterpats = {}
112 112 self._datafilters = {}
113 113 self._transref = self._lockref = self._wlockref = None
114 114
115 115 def _applyrequirements(self, requirements):
116 116 self.requirements = requirements
117 117 self.sopener.options = {}
118 118 if 'parentdelta' in requirements:
119 119 self.sopener.options['parentdelta'] = 1
120 120
121 121 def _writerequirements(self):
122 122 reqfile = self.opener("requires", "w")
123 123 for r in self.requirements:
124 124 reqfile.write("%s\n" % r)
125 125 reqfile.close()
126 126
127 127 def _checknested(self, path):
128 128 """Determine if path is a legal nested repository."""
129 129 if not path.startswith(self.root):
130 130 return False
131 131 subpath = path[len(self.root) + 1:]
132 132
133 133 # XXX: Checking against the current working copy is wrong in
134 134 # the sense that it can reject things like
135 135 #
136 136 # $ hg cat -r 10 sub/x.txt
137 137 #
138 138 # if sub/ is no longer a subrepository in the working copy
139 139 # parent revision.
140 140 #
141 141 # However, it can of course also allow things that would have
142 142 # been rejected before, such as the above cat command if sub/
143 143 # is a subrepository now, but was a normal directory before.
144 144 # The old path auditor would have rejected by mistake since it
145 145 # panics when it sees sub/.hg/.
146 146 #
147 147 # All in all, checking against the working copy seems sensible
148 148 # since we want to prevent access to nested repositories on
149 149 # the filesystem *now*.
150 150 ctx = self[None]
151 151 parts = util.splitpath(subpath)
152 152 while parts:
153 153 prefix = os.sep.join(parts)
154 154 if prefix in ctx.substate:
155 155 if prefix == subpath:
156 156 return True
157 157 else:
158 158 sub = ctx.sub(prefix)
159 159 return sub.checknested(subpath[len(prefix) + 1:])
160 160 else:
161 161 parts.pop()
162 162 return False
163 163
164 164
165 165 @propertycache
166 166 def changelog(self):
167 167 c = changelog.changelog(self.sopener)
168 168 if 'HG_PENDING' in os.environ:
169 169 p = os.environ['HG_PENDING']
170 170 if p.startswith(self.root):
171 171 c.readpending('00changelog.i.a')
172 172 self.sopener.options['defversion'] = c.version
173 173 return c
174 174
175 175 @propertycache
176 176 def manifest(self):
177 177 return manifest.manifest(self.sopener)
178 178
179 179 @propertycache
180 180 def dirstate(self):
181 181 warned = [0]
182 182 def validate(node):
183 183 try:
184 184 r = self.changelog.rev(node)
185 185 return node
186 186 except error.LookupError:
187 187 if not warned[0]:
188 188 warned[0] = True
189 189 self.ui.warn(_("warning: ignoring unknown"
190 190 " working parent %s!\n") % short(node))
191 191 return nullid
192 192
193 193 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
194 194
195 195 def __getitem__(self, changeid):
196 196 if changeid is None:
197 197 return context.workingctx(self)
198 198 return context.changectx(self, changeid)
199 199
200 200 def __contains__(self, changeid):
201 201 try:
202 202 return bool(self.lookup(changeid))
203 203 except error.RepoLookupError:
204 204 return False
205 205
206 206 def __nonzero__(self):
207 207 return True
208 208
209 209 def __len__(self):
210 210 return len(self.changelog)
211 211
212 212 def __iter__(self):
213 213 for i in xrange(len(self)):
214 214 yield i
215 215
216 216 def url(self):
217 217 return 'file:' + self.root
218 218
219 219 def hook(self, name, throw=False, **args):
220 220 return hook.hook(self.ui, self, name, throw, **args)
221 221
222 222 tag_disallowed = ':\r\n'
223 223
224 224 def _tag(self, names, node, message, local, user, date, extra={}):
225 225 if isinstance(names, str):
226 226 allchars = names
227 227 names = (names,)
228 228 else:
229 229 allchars = ''.join(names)
230 230 for c in self.tag_disallowed:
231 231 if c in allchars:
232 232 raise util.Abort(_('%r cannot be used in a tag name') % c)
233 233
234 234 branches = self.branchmap()
235 235 for name in names:
236 236 self.hook('pretag', throw=True, node=hex(node), tag=name,
237 237 local=local)
238 238 if name in branches:
239 239 self.ui.warn(_("warning: tag %s conflicts with existing"
240 240 " branch name\n") % name)
241 241
242 242 def writetags(fp, names, munge, prevtags):
243 243 fp.seek(0, 2)
244 244 if prevtags and prevtags[-1] != '\n':
245 245 fp.write('\n')
246 246 for name in names:
247 247 m = munge and munge(name) or name
248 248 if self._tagtypes and name in self._tagtypes:
249 249 old = self._tags.get(name, nullid)
250 250 fp.write('%s %s\n' % (hex(old), m))
251 251 fp.write('%s %s\n' % (hex(node), m))
252 252 fp.close()
253 253
254 254 prevtags = ''
255 255 if local:
256 256 try:
257 257 fp = self.opener('localtags', 'r+')
258 258 except IOError:
259 259 fp = self.opener('localtags', 'a')
260 260 else:
261 261 prevtags = fp.read()
262 262
263 263 # local tags are stored in the current charset
264 264 writetags(fp, names, None, prevtags)
265 265 for name in names:
266 266 self.hook('tag', node=hex(node), tag=name, local=local)
267 267 return
268 268
269 269 try:
270 270 fp = self.wfile('.hgtags', 'rb+')
271 271 except IOError:
272 272 fp = self.wfile('.hgtags', 'ab')
273 273 else:
274 274 prevtags = fp.read()
275 275
276 276 # committed tags are stored in UTF-8
277 277 writetags(fp, names, encoding.fromlocal, prevtags)
278 278
279 279 if '.hgtags' not in self.dirstate:
280 280 self[None].add(['.hgtags'])
281 281
282 282 m = matchmod.exact(self.root, '', ['.hgtags'])
283 283 tagnode = self.commit(message, user, date, extra=extra, match=m)
284 284
285 285 for name in names:
286 286 self.hook('tag', node=hex(node), tag=name, local=local)
287 287
288 288 return tagnode
289 289
290 290 def tag(self, names, node, message, local, user, date):
291 291 '''tag a revision with one or more symbolic names.
292 292
293 293 names is a list of strings or, when adding a single tag, names may be a
294 294 string.
295 295
296 296 if local is True, the tags are stored in a per-repository file.
297 297 otherwise, they are stored in the .hgtags file, and a new
298 298 changeset is committed with the change.
299 299
300 300 keyword arguments:
301 301
302 302 local: whether to store tags in non-version-controlled file
303 303 (default False)
304 304
305 305 message: commit message to use if committing
306 306
307 307 user: name of user to use if committing
308 308
309 309 date: date tuple to use if committing'''
310 310
311 311 for x in self.status()[:5]:
312 312 if '.hgtags' in x:
313 313 raise util.Abort(_('working copy of .hgtags is changed '
314 314 '(please commit .hgtags manually)'))
315 315
316 316 self.tags() # instantiate the cache
317 317 self._tag(names, node, message, local, user, date)
318 318
319 319 def tags(self):
320 320 '''return a mapping of tag to node'''
321 321 if self._tags is None:
322 322 (self._tags, self._tagtypes) = self._findtags()
323 323
324 324 return self._tags
325 325
326 326 def _findtags(self):
327 327 '''Do the hard work of finding tags. Return a pair of dicts
328 328 (tags, tagtypes) where tags maps tag name to node, and tagtypes
329 329 maps tag name to a string like \'global\' or \'local\'.
330 330 Subclasses or extensions are free to add their own tags, but
331 331 should be aware that the returned dicts will be retained for the
332 332 duration of the localrepo object.'''
333 333
334 334 # XXX what tagtype should subclasses/extensions use? Currently
335 335 # mq and bookmarks add tags, but do not set the tagtype at all.
336 336 # Should each extension invent its own tag type? Should there
337 337 # be one tagtype for all such "virtual" tags? Or is the status
338 338 # quo fine?
339 339
340 340 alltags = {} # map tag name to (node, hist)
341 341 tagtypes = {}
342 342
343 343 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
344 344 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
345 345
346 346 # Build the return dicts. Have to re-encode tag names because
347 347 # the tags module always uses UTF-8 (in order not to lose info
348 348 # writing to the cache), but the rest of Mercurial wants them in
349 349 # local encoding.
350 350 tags = {}
351 351 for (name, (node, hist)) in alltags.iteritems():
352 352 if node != nullid:
353 353 tags[encoding.tolocal(name)] = node
354 354 tags['tip'] = self.changelog.tip()
355 355 tagtypes = dict([(encoding.tolocal(name), value)
356 356 for (name, value) in tagtypes.iteritems()])
357 357 return (tags, tagtypes)
358 358
359 359 def tagtype(self, tagname):
360 360 '''
361 361 return the type of the given tag. result can be:
362 362
363 363 'local' : a local tag
364 364 'global' : a global tag
365 365 None : tag does not exist
366 366 '''
367 367
368 368 self.tags()
369 369
370 370 return self._tagtypes.get(tagname)
371 371
372 372 def tagslist(self):
373 373 '''return a list of tags ordered by revision'''
374 374 l = []
375 375 for t, n in self.tags().iteritems():
376 376 try:
377 377 r = self.changelog.rev(n)
378 378 except:
379 379 r = -2 # sort to the beginning of the list if unknown
380 380 l.append((r, t, n))
381 381 return [(t, n) for r, t, n in sorted(l)]
382 382
383 383 def nodetags(self, node):
384 384 '''return the tags associated with a node'''
385 385 if not self.nodetagscache:
386 386 self.nodetagscache = {}
387 387 for t, n in self.tags().iteritems():
388 388 self.nodetagscache.setdefault(n, []).append(t)
389 389 for tags in self.nodetagscache.itervalues():
390 390 tags.sort()
391 391 return self.nodetagscache.get(node, [])
392 392
393 393 def _branchtags(self, partial, lrev):
394 394 # TODO: rename this function?
395 395 tiprev = len(self) - 1
396 396 if lrev != tiprev:
397 397 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
398 398 self._updatebranchcache(partial, ctxgen)
399 399 self._writebranchcache(partial, self.changelog.tip(), tiprev)
400 400
401 401 return partial
402 402
403 403 def updatebranchcache(self):
404 404 tip = self.changelog.tip()
405 405 if self._branchcache is not None and self._branchcachetip == tip:
406 406 return self._branchcache
407 407
408 408 oldtip = self._branchcachetip
409 409 self._branchcachetip = tip
410 410 if oldtip is None or oldtip not in self.changelog.nodemap:
411 411 partial, last, lrev = self._readbranchcache()
412 412 else:
413 413 lrev = self.changelog.rev(oldtip)
414 414 partial = self._branchcache
415 415
416 416 self._branchtags(partial, lrev)
417 417 # this private cache holds all heads (not just tips)
418 418 self._branchcache = partial
419 419
420 420 def branchmap(self):
421 421 '''returns a dictionary {branch: [branchheads]}'''
422 422 self.updatebranchcache()
423 423 return self._branchcache
424 424
425 425 def branchtags(self):
426 426 '''return a dict where branch names map to the tipmost head of
427 427 the branch, open heads come before closed'''
428 428 bt = {}
429 429 for bn, heads in self.branchmap().iteritems():
430 430 tip = heads[-1]
431 431 for h in reversed(heads):
432 432 if 'close' not in self.changelog.read(h)[5]:
433 433 tip = h
434 434 break
435 435 bt[bn] = tip
436 436 return bt
437 437
438 438 def _readbranchcache(self):
439 439 partial = {}
440 440 try:
441 441 f = self.opener("branchheads.cache")
442 442 lines = f.read().split('\n')
443 443 f.close()
444 444 except (IOError, OSError):
445 445 return {}, nullid, nullrev
446 446
447 447 try:
448 448 last, lrev = lines.pop(0).split(" ", 1)
449 449 last, lrev = bin(last), int(lrev)
450 450 if lrev >= len(self) or self[lrev].node() != last:
451 451 # invalidate the cache
452 452 raise ValueError('invalidating branch cache (tip differs)')
453 453 for l in lines:
454 454 if not l:
455 455 continue
456 456 node, label = l.split(" ", 1)
457 457 label = encoding.tolocal(label.strip())
458 458 partial.setdefault(label, []).append(bin(node))
459 459 except KeyboardInterrupt:
460 460 raise
461 461 except Exception, inst:
462 462 if self.ui.debugflag:
463 463 self.ui.warn(str(inst), '\n')
464 464 partial, last, lrev = {}, nullid, nullrev
465 465 return partial, last, lrev
466 466
467 467 def _writebranchcache(self, branches, tip, tiprev):
468 468 try:
469 469 f = self.opener("branchheads.cache", "w", atomictemp=True)
470 470 f.write("%s %s\n" % (hex(tip), tiprev))
471 471 for label, nodes in branches.iteritems():
472 472 for node in nodes:
473 473 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
474 474 f.rename()
475 475 except (IOError, OSError):
476 476 pass
477 477
478 478 def _updatebranchcache(self, partial, ctxgen):
479 479 # collect new branch entries
480 480 newbranches = {}
481 481 for c in ctxgen:
482 482 newbranches.setdefault(c.branch(), []).append(c.node())
483 483 # if older branchheads are reachable from new ones, they aren't
484 484 # really branchheads. Note checking parents is insufficient:
485 485 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
486 486 for branch, newnodes in newbranches.iteritems():
487 487 bheads = partial.setdefault(branch, [])
488 488 bheads.extend(newnodes)
489 489 if len(bheads) <= 1:
490 490 continue
491 491 # starting from tip means fewer passes over reachable
492 492 while newnodes:
493 493 latest = newnodes.pop()
494 494 if latest not in bheads:
495 495 continue
496 496 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
497 497 reachable = self.changelog.reachable(latest, minbhrev)
498 498 reachable.remove(latest)
499 499 bheads = [b for b in bheads if b not in reachable]
500 500 partial[branch] = bheads
501 501
502 502 def lookup(self, key):
503 503 if isinstance(key, int):
504 504 return self.changelog.node(key)
505 505 elif key == '.':
506 506 return self.dirstate.parents()[0]
507 507 elif key == 'null':
508 508 return nullid
509 509 elif key == 'tip':
510 510 return self.changelog.tip()
511 511 n = self.changelog._match(key)
512 512 if n:
513 513 return n
514 514 if key in self.tags():
515 515 return self.tags()[key]
516 516 if key in self.branchtags():
517 517 return self.branchtags()[key]
518 518 n = self.changelog._partialmatch(key)
519 519 if n:
520 520 return n
521 521
522 522 # can't find key, check if it might have come from damaged dirstate
523 523 if key in self.dirstate.parents():
524 524 raise error.Abort(_("working directory has unknown parent '%s'!")
525 525 % short(key))
526 526 try:
527 527 if len(key) == 20:
528 528 key = hex(key)
529 529 except:
530 530 pass
531 531 raise error.RepoLookupError(_("unknown revision '%s'") % key)
532 532
533 533 def lookupbranch(self, key, remote=None):
534 534 repo = remote or self
535 535 if key in repo.branchmap():
536 536 return key
537 537
538 538 repo = (remote and remote.local()) and remote or self
539 539 return repo[key].branch()
540 540
541 541 def local(self):
542 542 return True
543 543
544 544 def join(self, f):
545 545 return os.path.join(self.path, f)
546 546
547 547 def wjoin(self, f):
548 548 return os.path.join(self.root, f)
549 549
550 550 def file(self, f):
551 551 if f[0] == '/':
552 552 f = f[1:]
553 553 return filelog.filelog(self.sopener, f)
554 554
555 555 def changectx(self, changeid):
556 556 return self[changeid]
557 557
558 558 def parents(self, changeid=None):
559 559 '''get list of changectxs for parents of changeid'''
560 560 return self[changeid].parents()
561 561
562 562 def filectx(self, path, changeid=None, fileid=None):
563 563 """changeid can be a changeset revision, node, or tag.
564 564 fileid can be a file revision or node."""
565 565 return context.filectx(self, path, changeid, fileid)
566 566
567 567 def getcwd(self):
568 568 return self.dirstate.getcwd()
569 569
570 570 def pathto(self, f, cwd=None):
571 571 return self.dirstate.pathto(f, cwd)
572 572
573 573 def wfile(self, f, mode='r'):
574 574 return self.wopener(f, mode)
575 575
576 576 def _link(self, f):
577 577 return os.path.islink(self.wjoin(f))
578 578
579 579 def _loadfilter(self, filter):
580 580 if filter not in self.filterpats:
581 581 l = []
582 582 for pat, cmd in self.ui.configitems(filter):
583 583 if cmd == '!':
584 584 continue
585 585 mf = matchmod.match(self.root, '', [pat])
586 586 fn = None
587 587 params = cmd
588 588 for name, filterfn in self._datafilters.iteritems():
589 589 if cmd.startswith(name):
590 590 fn = filterfn
591 591 params = cmd[len(name):].lstrip()
592 592 break
593 593 if not fn:
594 594 fn = lambda s, c, **kwargs: util.filter(s, c)
595 595 # Wrap old filters not supporting keyword arguments
596 596 if not inspect.getargspec(fn)[2]:
597 597 oldfn = fn
598 598 fn = lambda s, c, **kwargs: oldfn(s, c)
599 599 l.append((mf, fn, params))
600 600 self.filterpats[filter] = l
601 601 return self.filterpats[filter]
602 602
603 603 def _filter(self, filterpats, filename, data):
604 604 for mf, fn, cmd in filterpats:
605 605 if mf(filename):
606 606 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
607 607 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
608 608 break
609 609
610 610 return data
611 611
612 612 @propertycache
613 613 def _encodefilterpats(self):
614 614 return self._loadfilter('encode')
615 615
616 616 @propertycache
617 617 def _decodefilterpats(self):
618 618 return self._loadfilter('decode')
619 619
620 620 def adddatafilter(self, name, filter):
621 621 self._datafilters[name] = filter
622 622
623 623 def wread(self, filename):
624 624 if self._link(filename):
625 625 data = os.readlink(self.wjoin(filename))
626 626 else:
627 627 data = self.wopener(filename, 'r').read()
628 628 return self._filter(self._encodefilterpats, filename, data)
629 629
630 630 def wwrite(self, filename, data, flags):
631 631 data = self._filter(self._decodefilterpats, filename, data)
632 632 try:
633 633 os.unlink(self.wjoin(filename))
634 634 except OSError:
635 635 pass
636 636 if 'l' in flags:
637 637 self.wopener.symlink(data, filename)
638 638 else:
639 639 self.wopener(filename, 'w').write(data)
640 640 if 'x' in flags:
641 641 util.set_flags(self.wjoin(filename), False, True)
642 642
643 643 def wwritedata(self, filename, data):
644 644 return self._filter(self._decodefilterpats, filename, data)
645 645
646 646 def transaction(self, desc):
647 647 tr = self._transref and self._transref() or None
648 648 if tr and tr.running():
649 649 return tr.nest()
650 650
651 651 # abort here if the journal already exists
652 652 if os.path.exists(self.sjoin("journal")):
653 653 raise error.RepoError(
654 654 _("abandoned transaction found - run hg recover"))
655 655
656 656 # save dirstate for rollback
657 657 try:
658 658 ds = self.opener("dirstate").read()
659 659 except IOError:
660 660 ds = ""
661 661 self.opener("journal.dirstate", "w").write(ds)
662 662 self.opener("journal.branch", "w").write(
663 663 encoding.fromlocal(self.dirstate.branch()))
664 664 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
665 665
666 666 renames = [(self.sjoin("journal"), self.sjoin("undo")),
667 667 (self.join("journal.dirstate"), self.join("undo.dirstate")),
668 668 (self.join("journal.branch"), self.join("undo.branch")),
669 669 (self.join("journal.desc"), self.join("undo.desc"))]
670 670 tr = transaction.transaction(self.ui.warn, self.sopener,
671 671 self.sjoin("journal"),
672 672 aftertrans(renames),
673 673 self.store.createmode)
674 674 self._transref = weakref.ref(tr)
675 675 return tr
676 676
677 677 def recover(self):
678 678 lock = self.lock()
679 679 try:
680 680 if os.path.exists(self.sjoin("journal")):
681 681 self.ui.status(_("rolling back interrupted transaction\n"))
682 682 transaction.rollback(self.sopener, self.sjoin("journal"),
683 683 self.ui.warn)
684 684 self.invalidate()
685 685 return True
686 686 else:
687 687 self.ui.warn(_("no interrupted transaction available\n"))
688 688 return False
689 689 finally:
690 690 lock.release()
691 691
692 692 def rollback(self, dryrun=False):
693 693 wlock = lock = None
694 694 try:
695 695 wlock = self.wlock()
696 696 lock = self.lock()
697 697 if os.path.exists(self.sjoin("undo")):
698 698 try:
699 699 args = self.opener("undo.desc", "r").read().splitlines()
700 700 if len(args) >= 3 and self.ui.verbose:
701 701 desc = _("rolling back to revision %s"
702 702 " (undo %s: %s)\n") % (
703 703 int(args[0]) - 1, args[1], args[2])
704 704 elif len(args) >= 2:
705 705 desc = _("rolling back to revision %s (undo %s)\n") % (
706 706 int(args[0]) - 1, args[1])
707 707 except IOError:
708 708 desc = _("rolling back unknown transaction\n")
709 709 self.ui.status(desc)
710 710 if dryrun:
711 711 return
712 712 transaction.rollback(self.sopener, self.sjoin("undo"),
713 713 self.ui.warn)
714 714 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
715 715 try:
716 716 branch = self.opener("undo.branch").read()
717 717 self.dirstate.setbranch(branch)
718 718 except IOError:
719 719 self.ui.warn(_("Named branch could not be reset, "
720 720 "current branch still is: %s\n")
721 721 % self.dirstate.branch())
722 722 self.invalidate()
723 723 self.dirstate.invalidate()
724 724 self.destroyed()
725 725 else:
726 726 self.ui.warn(_("no rollback information available\n"))
727 727 return 1
728 728 finally:
729 729 release(lock, wlock)
730 730
731 731 def invalidatecaches(self):
732 732 self._tags = None
733 733 self._tagtypes = None
734 734 self.nodetagscache = None
735 735 self._branchcache = None # in UTF-8
736 736 self._branchcachetip = None
737 737
738 738 def invalidate(self):
739 739 for a in "changelog manifest".split():
740 740 if a in self.__dict__:
741 741 delattr(self, a)
742 742 self.invalidatecaches()
743 743
744 744 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
745 745 try:
746 746 l = lock.lock(lockname, 0, releasefn, desc=desc)
747 747 except error.LockHeld, inst:
748 748 if not wait:
749 749 raise
750 750 self.ui.warn(_("waiting for lock on %s held by %r\n") %
751 751 (desc, inst.locker))
752 752 # default to 600 seconds timeout
753 753 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
754 754 releasefn, desc=desc)
755 755 if acquirefn:
756 756 acquirefn()
757 757 return l
758 758
759 759 def lock(self, wait=True):
760 760 '''Lock the repository store (.hg/store) and return a weak reference
761 761 to the lock. Use this before modifying the store (e.g. committing or
762 762 stripping). If you are opening a transaction, get a lock as well.)'''
763 763 l = self._lockref and self._lockref()
764 764 if l is not None and l.held:
765 765 l.lock()
766 766 return l
767 767
768 768 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
769 769 _('repository %s') % self.origroot)
770 770 self._lockref = weakref.ref(l)
771 771 return l
772 772
773 773 def wlock(self, wait=True):
774 774 '''Lock the non-store parts of the repository (everything under
775 775 .hg except .hg/store) and return a weak reference to the lock.
776 776 Use this before modifying files in .hg.'''
777 777 l = self._wlockref and self._wlockref()
778 778 if l is not None and l.held:
779 779 l.lock()
780 780 return l
781 781
782 782 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
783 783 self.dirstate.invalidate, _('working directory of %s') %
784 784 self.origroot)
785 785 self._wlockref = weakref.ref(l)
786 786 return l
787 787
788 788 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
789 789 """
790 790 commit an individual file as part of a larger transaction
791 791 """
792 792
793 793 fname = fctx.path()
794 794 text = fctx.data()
795 795 flog = self.file(fname)
796 796 fparent1 = manifest1.get(fname, nullid)
797 797 fparent2 = fparent2o = manifest2.get(fname, nullid)
798 798
799 799 meta = {}
800 800 copy = fctx.renamed()
801 801 if copy and copy[0] != fname:
802 802 # Mark the new revision of this file as a copy of another
803 803 # file. This copy data will effectively act as a parent
804 804 # of this new revision. If this is a merge, the first
805 805 # parent will be the nullid (meaning "look up the copy data")
806 806 # and the second one will be the other parent. For example:
807 807 #
808 808 # 0 --- 1 --- 3 rev1 changes file foo
809 809 # \ / rev2 renames foo to bar and changes it
810 810 # \- 2 -/ rev3 should have bar with all changes and
811 811 # should record that bar descends from
812 812 # bar in rev2 and foo in rev1
813 813 #
814 814 # this allows this merge to succeed:
815 815 #
816 816 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
817 817 # \ / merging rev3 and rev4 should use bar@rev2
818 818 # \- 2 --- 4 as the merge base
819 819 #
820 820
821 821 cfname = copy[0]
822 822 crev = manifest1.get(cfname)
823 823 newfparent = fparent2
824 824
825 825 if manifest2: # branch merge
826 826 if fparent2 == nullid or crev is None: # copied on remote side
827 827 if cfname in manifest2:
828 828 crev = manifest2[cfname]
829 829 newfparent = fparent1
830 830
831 831 # find source in nearest ancestor if we've lost track
832 832 if not crev:
833 833 self.ui.debug(" %s: searching for copy revision for %s\n" %
834 834 (fname, cfname))
835 835 for ancestor in self[None].ancestors():
836 836 if cfname in ancestor:
837 837 crev = ancestor[cfname].filenode()
838 838 break
839 839
840 840 if crev:
841 841 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
842 842 meta["copy"] = cfname
843 843 meta["copyrev"] = hex(crev)
844 844 fparent1, fparent2 = nullid, newfparent
845 845 else:
846 846 self.ui.warn(_("warning: can't find ancestor for '%s' "
847 847 "copied from '%s'!\n") % (fname, cfname))
848 848
849 849 elif fparent2 != nullid:
850 850 # is one parent an ancestor of the other?
851 851 fparentancestor = flog.ancestor(fparent1, fparent2)
852 852 if fparentancestor == fparent1:
853 853 fparent1, fparent2 = fparent2, nullid
854 854 elif fparentancestor == fparent2:
855 855 fparent2 = nullid
856 856
857 857 # is the file changed?
858 858 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
859 859 changelist.append(fname)
860 860 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
861 861
862 862 # are just the flags changed during merge?
863 863 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
864 864 changelist.append(fname)
865 865
866 866 return fparent1
867 867
868 868 def commit(self, text="", user=None, date=None, match=None, force=False,
869 869 editor=False, extra={}):
870 870 """Add a new revision to current repository.
871 871
872 872 Revision information is gathered from the working directory,
873 873 match can be used to filter the committed files. If editor is
874 874 supplied, it is called to get a commit message.
875 875 """
876 876
877 877 def fail(f, msg):
878 878 raise util.Abort('%s: %s' % (f, msg))
879 879
880 880 if not match:
881 881 match = matchmod.always(self.root, '')
882 882
883 883 if not force:
884 884 vdirs = []
885 885 match.dir = vdirs.append
886 886 match.bad = fail
887 887
888 888 wlock = self.wlock()
889 889 try:
890 890 wctx = self[None]
891 891 merge = len(wctx.parents()) > 1
892 892
893 893 if (not force and merge and match and
894 894 (match.files() or match.anypats())):
895 895 raise util.Abort(_('cannot partially commit a merge '
896 896 '(do not specify files or patterns)'))
897 897
898 898 changes = self.status(match=match, clean=force)
899 899 if force:
900 900 changes[0].extend(changes[6]) # mq may commit unchanged files
901 901
902 902 # check subrepos
903 903 subs = []
904 904 removedsubs = set()
905 905 for p in wctx.parents():
906 906 removedsubs.update(s for s in p.substate if match(s))
907 907 for s in wctx.substate:
908 908 removedsubs.discard(s)
909 909 if match(s) and wctx.sub(s).dirty():
910 910 subs.append(s)
911 911 if (subs or removedsubs):
912 912 if (not match('.hgsub') and
913 913 '.hgsub' in (wctx.modified() + wctx.added())):
914 914 raise util.Abort(_("can't commit subrepos without .hgsub"))
915 915 if '.hgsubstate' not in changes[0]:
916 916 changes[0].insert(0, '.hgsubstate')
917 917
918 918 # make sure all explicit patterns are matched
919 919 if not force and match.files():
920 920 matched = set(changes[0] + changes[1] + changes[2])
921 921
922 922 for f in match.files():
923 923 if f == '.' or f in matched or f in wctx.substate:
924 924 continue
925 925 if f in changes[3]: # missing
926 926 fail(f, _('file not found!'))
927 927 if f in vdirs: # visited directory
928 928 d = f + '/'
929 929 for mf in matched:
930 930 if mf.startswith(d):
931 931 break
932 932 else:
933 933 fail(f, _("no match under directory!"))
934 934 elif f not in self.dirstate:
935 935 fail(f, _("file not tracked!"))
936 936
937 937 if (not force and not extra.get("close") and not merge
938 938 and not (changes[0] or changes[1] or changes[2])
939 939 and wctx.branch() == wctx.p1().branch()):
940 940 return None
941 941
942 942 ms = mergemod.mergestate(self)
943 943 for f in changes[0]:
944 944 if f in ms and ms[f] == 'u':
945 945 raise util.Abort(_("unresolved merge conflicts "
946 946 "(see hg resolve)"))
947 947
948 948 cctx = context.workingctx(self, text, user, date, extra, changes)
949 949 if editor:
950 950 cctx._text = editor(self, cctx, subs)
951 951 edited = (text != cctx._text)
952 952
953 953 # commit subs
954 954 if subs or removedsubs:
955 955 state = wctx.substate.copy()
956 956 for s in sorted(subs):
957 957 sub = wctx.sub(s)
958 958 self.ui.status(_('committing subrepository %s\n') %
959 959 subrepo.subrelpath(sub))
960 960 sr = sub.commit(cctx._text, user, date)
961 961 state[s] = (state[s][0], sr)
962 962 subrepo.writestate(self, state)
963 963
964 964 # Save commit message in case this transaction gets rolled back
965 965 # (e.g. by a pretxncommit hook). Leave the content alone on
966 966 # the assumption that the user will use the same editor again.
967 967 msgfile = self.opener('last-message.txt', 'wb')
968 968 msgfile.write(cctx._text)
969 969 msgfile.close()
970 970
971 971 p1, p2 = self.dirstate.parents()
972 972 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
973 973 try:
974 974 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
975 975 ret = self.commitctx(cctx, True)
976 976 except:
977 977 if edited:
978 978 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
979 979 self.ui.write(
980 980 _('note: commit message saved in %s\n') % msgfn)
981 981 raise
982 982
983 983 # update dirstate and mergestate
984 984 for f in changes[0] + changes[1]:
985 985 self.dirstate.normal(f)
986 986 for f in changes[2]:
987 987 self.dirstate.forget(f)
988 988 self.dirstate.setparents(ret)
989 989 ms.reset()
990 990 finally:
991 991 wlock.release()
992 992
993 993 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
994 994 return ret
995 995
996 996 def commitctx(self, ctx, error=False):
997 997 """Add a new revision to current repository.
998 998 Revision information is passed via the context argument.
999 999 """
1000 1000
1001 1001 tr = lock = None
1002 1002 removed = list(ctx.removed())
1003 1003 p1, p2 = ctx.p1(), ctx.p2()
1004 1004 m1 = p1.manifest().copy()
1005 1005 m2 = p2.manifest()
1006 1006 user = ctx.user()
1007 1007
1008 1008 lock = self.lock()
1009 1009 try:
1010 1010 tr = self.transaction("commit")
1011 1011 trp = weakref.proxy(tr)
1012 1012
1013 1013 # check in files
1014 1014 new = {}
1015 1015 changed = []
1016 1016 linkrev = len(self)
1017 1017 for f in sorted(ctx.modified() + ctx.added()):
1018 1018 self.ui.note(f + "\n")
1019 1019 try:
1020 1020 fctx = ctx[f]
1021 1021 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1022 1022 changed)
1023 1023 m1.set(f, fctx.flags())
1024 1024 except OSError, inst:
1025 1025 self.ui.warn(_("trouble committing %s!\n") % f)
1026 1026 raise
1027 1027 except IOError, inst:
1028 1028 errcode = getattr(inst, 'errno', errno.ENOENT)
1029 1029 if error or errcode and errcode != errno.ENOENT:
1030 1030 self.ui.warn(_("trouble committing %s!\n") % f)
1031 1031 raise
1032 1032 else:
1033 1033 removed.append(f)
1034 1034
1035 1035 # update manifest
1036 1036 m1.update(new)
1037 1037 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1038 1038 drop = [f for f in removed if f in m1]
1039 1039 for f in drop:
1040 1040 del m1[f]
1041 1041 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1042 1042 p2.manifestnode(), (new, drop))
1043 1043
1044 1044 # update changelog
1045 1045 self.changelog.delayupdate()
1046 1046 n = self.changelog.add(mn, changed + removed, ctx.description(),
1047 1047 trp, p1.node(), p2.node(),
1048 1048 user, ctx.date(), ctx.extra().copy())
1049 1049 p = lambda: self.changelog.writepending() and self.root or ""
1050 1050 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1051 1051 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1052 1052 parent2=xp2, pending=p)
1053 1053 self.changelog.finalize(trp)
1054 1054 tr.close()
1055 1055
1056 1056 if self._branchcache:
1057 1057 self.updatebranchcache()
1058 1058 return n
1059 1059 finally:
1060 1060 if tr:
1061 1061 tr.release()
1062 1062 lock.release()
1063 1063
1064 1064 def destroyed(self):
1065 1065 '''Inform the repository that nodes have been destroyed.
1066 1066 Intended for use by strip and rollback, so there's a common
1067 1067 place for anything that has to be done after destroying history.'''
1068 1068 # XXX it might be nice if we could take the list of destroyed
1069 1069 # nodes, but I don't see an easy way for rollback() to do that
1070 1070
1071 1071 # Ensure the persistent tag cache is updated. Doing it now
1072 1072 # means that the tag cache only has to worry about destroyed
1073 1073 # heads immediately after a strip/rollback. That in turn
1074 1074 # guarantees that "cachetip == currenttip" (comparing both rev
1075 1075 # and node) always means no nodes have been added or destroyed.
1076 1076
1077 1077 # XXX this is suboptimal when qrefresh'ing: we strip the current
1078 1078 # head, refresh the tag cache, then immediately add a new head.
1079 1079 # But I think doing it this way is necessary for the "instant
1080 1080 # tag cache retrieval" case to work.
1081 1081 self.invalidatecaches()
1082 1082
1083 1083 def walk(self, match, node=None):
1084 1084 '''
1085 1085 walk recursively through the directory tree or a given
1086 1086 changeset, finding all files matched by the match
1087 1087 function
1088 1088 '''
1089 1089 return self[node].walk(match)
1090 1090
1091 1091 def status(self, node1='.', node2=None, match=None,
1092 1092 ignored=False, clean=False, unknown=False,
1093 1093 listsubrepos=False):
1094 1094 """return status of files between two nodes or node and working directory
1095 1095
1096 1096 If node1 is None, use the first dirstate parent instead.
1097 1097 If node2 is None, compare node1 with working directory.
1098 1098 """
1099 1099
1100 1100 def mfmatches(ctx):
1101 1101 mf = ctx.manifest().copy()
1102 1102 for fn in mf.keys():
1103 1103 if not match(fn):
1104 1104 del mf[fn]
1105 1105 return mf
1106 1106
1107 1107 if isinstance(node1, context.changectx):
1108 1108 ctx1 = node1
1109 1109 else:
1110 1110 ctx1 = self[node1]
1111 1111 if isinstance(node2, context.changectx):
1112 1112 ctx2 = node2
1113 1113 else:
1114 1114 ctx2 = self[node2]
1115 1115
1116 1116 working = ctx2.rev() is None
1117 1117 parentworking = working and ctx1 == self['.']
1118 1118 match = match or matchmod.always(self.root, self.getcwd())
1119 1119 listignored, listclean, listunknown = ignored, clean, unknown
1120 1120
1121 1121 # load earliest manifest first for caching reasons
1122 1122 if not working and ctx2.rev() < ctx1.rev():
1123 1123 ctx2.manifest()
1124 1124
1125 1125 if not parentworking:
1126 1126 def bad(f, msg):
1127 1127 if f not in ctx1:
1128 1128 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1129 1129 match.bad = bad
1130 1130
1131 1131 if working: # we need to scan the working dir
1132 1132 subrepos = []
1133 1133 if '.hgsub' in self.dirstate:
1134 1134 subrepos = ctx1.substate.keys()
1135 1135 s = self.dirstate.status(match, subrepos, listignored,
1136 1136 listclean, listunknown)
1137 1137 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1138 1138
1139 1139 # check for any possibly clean files
1140 1140 if parentworking and cmp:
1141 1141 fixup = []
1142 1142 # do a full compare of any files that might have changed
1143 1143 for f in sorted(cmp):
1144 1144 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1145 1145 or ctx1[f].cmp(ctx2[f])):
1146 1146 modified.append(f)
1147 1147 else:
1148 1148 fixup.append(f)
1149 1149
1150 1150 # update dirstate for files that are actually clean
1151 1151 if fixup:
1152 1152 if listclean:
1153 1153 clean += fixup
1154 1154
1155 1155 try:
1156 1156 # updating the dirstate is optional
1157 1157 # so we don't wait on the lock
1158 1158 wlock = self.wlock(False)
1159 1159 try:
1160 1160 for f in fixup:
1161 1161 self.dirstate.normal(f)
1162 1162 finally:
1163 1163 wlock.release()
1164 1164 except error.LockError:
1165 1165 pass
1166 1166
1167 1167 if not parentworking:
1168 1168 mf1 = mfmatches(ctx1)
1169 1169 if working:
1170 1170 # we are comparing working dir against non-parent
1171 1171 # generate a pseudo-manifest for the working dir
1172 1172 mf2 = mfmatches(self['.'])
1173 1173 for f in cmp + modified + added:
1174 1174 mf2[f] = None
1175 1175 mf2.set(f, ctx2.flags(f))
1176 1176 for f in removed:
1177 1177 if f in mf2:
1178 1178 del mf2[f]
1179 1179 else:
1180 1180 # we are comparing two revisions
1181 1181 deleted, unknown, ignored = [], [], []
1182 1182 mf2 = mfmatches(ctx2)
1183 1183
1184 1184 modified, added, clean = [], [], []
1185 1185 for fn in mf2:
1186 1186 if fn in mf1:
1187 1187 if (mf1.flags(fn) != mf2.flags(fn) or
1188 1188 (mf1[fn] != mf2[fn] and
1189 1189 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1190 1190 modified.append(fn)
1191 1191 elif listclean:
1192 1192 clean.append(fn)
1193 1193 del mf1[fn]
1194 1194 else:
1195 1195 added.append(fn)
1196 1196 removed = mf1.keys()
1197 1197
1198 1198 r = modified, added, removed, deleted, unknown, ignored, clean
1199 1199
1200 1200 if listsubrepos:
1201 1201 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1202 1202 if working:
1203 1203 rev2 = None
1204 1204 else:
1205 1205 rev2 = ctx2.substate[subpath][1]
1206 1206 try:
1207 1207 submatch = matchmod.narrowmatcher(subpath, match)
1208 1208 s = sub.status(rev2, match=submatch, ignored=listignored,
1209 1209 clean=listclean, unknown=listunknown,
1210 1210 listsubrepos=True)
1211 1211 for rfiles, sfiles in zip(r, s):
1212 1212 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1213 1213 except error.LookupError:
1214 1214 self.ui.status(_("skipping missing subrepository: %s\n")
1215 1215 % subpath)
1216 1216
1217 1217 [l.sort() for l in r]
1218 1218 return r
1219 1219
1220 1220 def heads(self, start=None):
1221 1221 heads = self.changelog.heads(start)
1222 1222 # sort the output in rev descending order
1223 return sorted(heads, key=self.changelog.rev, reverse=True)
1223 return sorted(heads, key=self.changelog.rev, reverse=True)
1224 1224
1225 1225 def branchheads(self, branch=None, start=None, closed=False):
1226 1226 '''return a (possibly filtered) list of heads for the given branch
1227 1227
1228 1228 Heads are returned in topological order, from newest to oldest.
1229 1229 If branch is None, use the dirstate branch.
1230 1230 If start is not None, return only heads reachable from start.
1231 1231 If closed is True, return heads that are marked as closed as well.
1232 1232 '''
1233 1233 if branch is None:
1234 1234 branch = self[None].branch()
1235 1235 branches = self.branchmap()
1236 1236 if branch not in branches:
1237 1237 return []
1238 1238 # the cache returns heads ordered lowest to highest
1239 1239 bheads = list(reversed(branches[branch]))
1240 1240 if start is not None:
1241 1241 # filter out the heads that cannot be reached from startrev
1242 1242 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1243 1243 bheads = [h for h in bheads if h in fbheads]
1244 1244 if not closed:
1245 1245 bheads = [h for h in bheads if
1246 1246 ('close' not in self.changelog.read(h)[5])]
1247 1247 return bheads
1248 1248
1249 1249 def branches(self, nodes):
1250 1250 if not nodes:
1251 1251 nodes = [self.changelog.tip()]
1252 1252 b = []
1253 1253 for n in nodes:
1254 1254 t = n
1255 1255 while 1:
1256 1256 p = self.changelog.parents(n)
1257 1257 if p[1] != nullid or p[0] == nullid:
1258 1258 b.append((t, n, p[0], p[1]))
1259 1259 break
1260 1260 n = p[0]
1261 1261 return b
1262 1262
1263 1263 def between(self, pairs):
1264 1264 r = []
1265 1265
1266 1266 for top, bottom in pairs:
1267 1267 n, l, i = top, [], 0
1268 1268 f = 1
1269 1269
1270 1270 while n != bottom and n != nullid:
1271 1271 p = self.changelog.parents(n)[0]
1272 1272 if i == f:
1273 1273 l.append(n)
1274 1274 f = f * 2
1275 1275 n = p
1276 1276 i += 1
1277 1277
1278 1278 r.append(l)
1279 1279
1280 1280 return r
1281 1281
1282 1282 def pull(self, remote, heads=None, force=False):
1283 1283 lock = self.lock()
1284 1284 try:
1285 1285 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1286 1286 force=force)
1287 1287 common, fetch, rheads = tmp
1288 1288 if not fetch:
1289 1289 self.ui.status(_("no changes found\n"))
1290 1290 return 0
1291 1291
1292 1292 if heads is None and fetch == [nullid]:
1293 1293 self.ui.status(_("requesting all changes\n"))
1294 1294 elif heads is None and remote.capable('changegroupsubset'):
1295 1295 # issue1320, avoid a race if remote changed after discovery
1296 1296 heads = rheads
1297 1297
1298 1298 if heads is None:
1299 1299 cg = remote.changegroup(fetch, 'pull')
1300 1300 else:
1301 1301 if not remote.capable('changegroupsubset'):
1302 1302 raise util.Abort(_("partial pull cannot be done because "
1303 1303 "other repository doesn't support "
1304 1304 "changegroupsubset."))
1305 1305 cg = remote.changegroupsubset(fetch, heads, 'pull')
1306 1306 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1307 1307 finally:
1308 1308 lock.release()
1309 1309
1310 1310 def push(self, remote, force=False, revs=None, newbranch=False):
1311 1311 '''Push outgoing changesets (limited by revs) from the current
1312 1312 repository to remote. Return an integer:
1313 1313 - 0 means HTTP error *or* nothing to push
1314 1314 - 1 means we pushed and remote head count is unchanged *or*
1315 1315 we have outgoing changesets but refused to push
1316 1316 - other values as described by addchangegroup()
1317 1317 '''
1318 1318 # there are two ways to push to remote repo:
1319 1319 #
1320 1320 # addchangegroup assumes local user can lock remote
1321 1321 # repo (local filesystem, old ssh servers).
1322 1322 #
1323 1323 # unbundle assumes local user cannot lock remote repo (new ssh
1324 1324 # servers, http servers).
1325 1325
1326 1326 lock = None
1327 1327 unbundle = remote.capable('unbundle')
1328 1328 if not unbundle:
1329 1329 lock = remote.lock()
1330 1330 try:
1331 1331 ret = discovery.prepush(self, remote, force, revs, newbranch)
1332 1332 if ret[0] is None:
1333 1333 # and here we return 0 for "nothing to push" or 1 for
1334 1334 # "something to push but I refuse"
1335 1335 return ret[1]
1336 1336
1337 1337 cg, remote_heads = ret
1338 1338 if unbundle:
1339 1339 # local repo finds heads on server, finds out what revs it must
1340 1340 # push. once revs transferred, if server finds it has
1341 1341 # different heads (someone else won commit/push race), server
1342 1342 # aborts.
1343 1343 if force:
1344 1344 remote_heads = ['force']
1345 1345 # ssh: return remote's addchangegroup()
1346 1346 # http: return remote's addchangegroup() or 0 for error
1347 1347 return remote.unbundle(cg, remote_heads, 'push')
1348 1348 else:
1349 1349 # we return an integer indicating remote head count change
1350 1350 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1351 1351 finally:
1352 1352 if lock is not None:
1353 1353 lock.release()
1354 1354
1355 1355 def changegroupinfo(self, nodes, source):
1356 1356 if self.ui.verbose or source == 'bundle':
1357 1357 self.ui.status(_("%d changesets found\n") % len(nodes))
1358 1358 if self.ui.debugflag:
1359 1359 self.ui.debug("list of changesets:\n")
1360 1360 for node in nodes:
1361 1361 self.ui.debug("%s\n" % hex(node))
1362 1362
1363 1363 def changegroupsubset(self, bases, heads, source, extranodes=None):
1364 1364 """Compute a changegroup consisting of all the nodes that are
1365 1365 descendents of any of the bases and ancestors of any of the heads.
1366 1366 Return a chunkbuffer object whose read() method will return
1367 1367 successive changegroup chunks.
1368 1368
1369 1369 It is fairly complex as determining which filenodes and which
1370 1370 manifest nodes need to be included for the changeset to be complete
1371 1371 is non-trivial.
1372 1372
1373 1373 Another wrinkle is doing the reverse, figuring out which changeset in
1374 1374 the changegroup a particular filenode or manifestnode belongs to.
1375 1375
1376 1376 The caller can specify some nodes that must be included in the
1377 1377 changegroup using the extranodes argument. It should be a dict
1378 1378 where the keys are the filenames (or 1 for the manifest), and the
1379 1379 values are lists of (node, linknode) tuples, where node is a wanted
1380 1380 node and linknode is the changelog node that should be transmitted as
1381 1381 the linkrev.
1382 1382 """
1383 1383
1384 1384 # Set up some initial variables
1385 1385 # Make it easy to refer to self.changelog
1386 1386 cl = self.changelog
1387 1387 # Compute the list of changesets in this changegroup.
1388 1388 # Some bases may turn out to be superfluous, and some heads may be
1389 1389 # too. nodesbetween will return the minimal set of bases and heads
1390 1390 # necessary to re-create the changegroup.
1391 1391 if not bases:
1392 1392 bases = [nullid]
1393 1393 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1394 1394
1395 1395 if extranodes is None:
1396 1396 # can we go through the fast path ?
1397 1397 heads.sort()
1398 1398 allheads = self.heads()
1399 1399 allheads.sort()
1400 1400 if heads == allheads:
1401 1401 return self._changegroup(msng_cl_lst, source)
1402 1402
1403 1403 # slow path
1404 1404 self.hook('preoutgoing', throw=True, source=source)
1405 1405
1406 1406 self.changegroupinfo(msng_cl_lst, source)
1407 1407
1408 1408 # We assume that all ancestors of bases are known
1409 1409 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1410 1410
1411 1411 # Make it easy to refer to self.manifest
1412 1412 mnfst = self.manifest
1413 1413 # We don't know which manifests are missing yet
1414 1414 msng_mnfst_set = {}
1415 1415 # Nor do we know which filenodes are missing.
1416 1416 msng_filenode_set = {}
1417 1417
1418 1418 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1419 1419 junk = None
1420 1420
1421 1421 # A changeset always belongs to itself, so the changenode lookup
1422 1422 # function for a changenode is identity.
1423 1423 def identity(x):
1424 1424 return x
1425 1425
1426 1426 # A function generating function that sets up the initial environment
1427 1427 # the inner function.
1428 1428 def filenode_collector(changedfiles):
1429 1429 # This gathers information from each manifestnode included in the
1430 1430 # changegroup about which filenodes the manifest node references
1431 1431 # so we can include those in the changegroup too.
1432 1432 #
1433 1433 # It also remembers which changenode each filenode belongs to. It
1434 1434 # does this by assuming the a filenode belongs to the changenode
1435 1435 # the first manifest that references it belongs to.
1436 1436 def collect_msng_filenodes(mnfstnode):
1437 1437 r = mnfst.rev(mnfstnode)
1438 1438 if mnfst.deltaparent(r) in mnfst.parentrevs(r):
1439 1439 # If the previous rev is one of the parents,
1440 1440 # we only need to see a diff.
1441 1441 deltamf = mnfst.readdelta(mnfstnode)
1442 1442 # For each line in the delta
1443 1443 for f, fnode in deltamf.iteritems():
1444 1444 # And if the file is in the list of files we care
1445 1445 # about.
1446 1446 if f in changedfiles:
1447 1447 # Get the changenode this manifest belongs to
1448 1448 clnode = msng_mnfst_set[mnfstnode]
1449 1449 # Create the set of filenodes for the file if
1450 1450 # there isn't one already.
1451 1451 ndset = msng_filenode_set.setdefault(f, {})
1452 1452 # And set the filenode's changelog node to the
1453 1453 # manifest's if it hasn't been set already.
1454 1454 ndset.setdefault(fnode, clnode)
1455 1455 else:
1456 1456 # Otherwise we need a full manifest.
1457 1457 m = mnfst.read(mnfstnode)
1458 1458 # For every file in we care about.
1459 1459 for f in changedfiles:
1460 1460 fnode = m.get(f, None)
1461 1461 # If it's in the manifest
1462 1462 if fnode is not None:
1463 1463 # See comments above.
1464 1464 clnode = msng_mnfst_set[mnfstnode]
1465 1465 ndset = msng_filenode_set.setdefault(f, {})
1466 1466 ndset.setdefault(fnode, clnode)
1467 1467 return collect_msng_filenodes
1468 1468
1469 1469 # If we determine that a particular file or manifest node must be a
1470 1470 # node that the recipient of the changegroup will already have, we can
1471 1471 # also assume the recipient will have all the parents. This function
1472 1472 # prunes them from the set of missing nodes.
1473 1473 def prune(revlog, missingnodes):
1474 1474 hasset = set()
1475 1475 # If a 'missing' filenode thinks it belongs to a changenode we
1476 1476 # assume the recipient must have, then the recipient must have
1477 1477 # that filenode.
1478 1478 for n in missingnodes:
1479 1479 clrev = revlog.linkrev(revlog.rev(n))
1480 1480 if clrev in commonrevs:
1481 1481 hasset.add(n)
1482 1482 for n in hasset:
1483 1483 missingnodes.pop(n, None)
1484 1484 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1485 1485 missingnodes.pop(revlog.node(r), None)
1486 1486
1487 1487 # Add the nodes that were explicitly requested.
1488 1488 def add_extra_nodes(name, nodes):
1489 1489 if not extranodes or name not in extranodes:
1490 1490 return
1491 1491
1492 1492 for node, linknode in extranodes[name]:
1493 1493 if node not in nodes:
1494 1494 nodes[node] = linknode
1495 1495
1496 1496 # Now that we have all theses utility functions to help out and
1497 1497 # logically divide up the task, generate the group.
1498 1498 def gengroup():
1499 1499 # The set of changed files starts empty.
1500 1500 changedfiles = set()
1501 1501 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1502 1502
1503 1503 # Create a changenode group generator that will call our functions
1504 1504 # back to lookup the owning changenode and collect information.
1505 1505 group = cl.group(msng_cl_lst, identity, collect)
1506 1506 for cnt, chnk in enumerate(group):
1507 1507 yield chnk
1508 1508 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1509 1509 self.ui.progress(_('bundling changes'), None)
1510 1510
1511 1511 prune(mnfst, msng_mnfst_set)
1512 1512 add_extra_nodes(1, msng_mnfst_set)
1513 1513 msng_mnfst_lst = msng_mnfst_set.keys()
1514 1514 # Sort the manifestnodes by revision number.
1515 1515 msng_mnfst_lst.sort(key=mnfst.rev)
1516 1516 # Create a generator for the manifestnodes that calls our lookup
1517 1517 # and data collection functions back.
1518 1518 group = mnfst.group(msng_mnfst_lst,
1519 1519 lambda mnode: msng_mnfst_set[mnode],
1520 1520 filenode_collector(changedfiles))
1521 1521 for cnt, chnk in enumerate(group):
1522 1522 yield chnk
1523 1523 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1524 1524 self.ui.progress(_('bundling manifests'), None)
1525 1525
1526 1526 # These are no longer needed, dereference and toss the memory for
1527 1527 # them.
1528 1528 msng_mnfst_lst = None
1529 1529 msng_mnfst_set.clear()
1530 1530
1531 1531 if extranodes:
1532 1532 for fname in extranodes:
1533 1533 if isinstance(fname, int):
1534 1534 continue
1535 1535 msng_filenode_set.setdefault(fname, {})
1536 1536 changedfiles.add(fname)
1537 1537 # Go through all our files in order sorted by name.
1538 1538 cnt = 0
1539 1539 for fname in sorted(changedfiles):
1540 1540 filerevlog = self.file(fname)
1541 1541 if not len(filerevlog):
1542 1542 raise util.Abort(_("empty or missing revlog for %s") % fname)
1543 1543 # Toss out the filenodes that the recipient isn't really
1544 1544 # missing.
1545 1545 missingfnodes = msng_filenode_set.pop(fname, {})
1546 1546 prune(filerevlog, missingfnodes)
1547 1547 add_extra_nodes(fname, missingfnodes)
1548 1548 # If any filenodes are left, generate the group for them,
1549 1549 # otherwise don't bother.
1550 1550 if missingfnodes:
1551 1551 yield changegroup.chunkheader(len(fname))
1552 1552 yield fname
1553 1553 # Sort the filenodes by their revision # (topological order)
1554 1554 nodeiter = list(missingfnodes)
1555 1555 nodeiter.sort(key=filerevlog.rev)
1556 1556 # Create a group generator and only pass in a changenode
1557 1557 # lookup function as we need to collect no information
1558 1558 # from filenodes.
1559 1559 group = filerevlog.group(nodeiter,
1560 1560 lambda fnode: missingfnodes[fnode])
1561 1561 for chnk in group:
1562 1562 self.ui.progress(
1563 1563 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1564 1564 cnt += 1
1565 1565 yield chnk
1566 1566 # Signal that no more groups are left.
1567 1567 yield changegroup.closechunk()
1568 1568 self.ui.progress(_('bundling files'), None)
1569 1569
1570 1570 if msng_cl_lst:
1571 1571 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1572 1572
1573 1573 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1574 1574
1575 1575 def changegroup(self, basenodes, source):
1576 1576 # to avoid a race we use changegroupsubset() (issue1320)
1577 1577 return self.changegroupsubset(basenodes, self.heads(), source)
1578 1578
1579 1579 def _changegroup(self, nodes, source):
1580 1580 """Compute the changegroup of all nodes that we have that a recipient
1581 1581 doesn't. Return a chunkbuffer object whose read() method will return
1582 1582 successive changegroup chunks.
1583 1583
1584 1584 This is much easier than the previous function as we can assume that
1585 1585 the recipient has any changenode we aren't sending them.
1586 1586
1587 1587 nodes is the set of nodes to send"""
1588 1588
1589 1589 self.hook('preoutgoing', throw=True, source=source)
1590 1590
1591 1591 cl = self.changelog
1592 1592 revset = set([cl.rev(n) for n in nodes])
1593 1593 self.changegroupinfo(nodes, source)
1594 1594
1595 1595 def identity(x):
1596 1596 return x
1597 1597
1598 1598 def gennodelst(log):
1599 1599 for r in log:
1600 1600 if log.linkrev(r) in revset:
1601 1601 yield log.node(r)
1602 1602
1603 1603 def lookuplinkrev_func(revlog):
1604 1604 def lookuplinkrev(n):
1605 1605 return cl.node(revlog.linkrev(revlog.rev(n)))
1606 1606 return lookuplinkrev
1607 1607
1608 1608 def gengroup():
1609 1609 '''yield a sequence of changegroup chunks (strings)'''
1610 1610 # construct a list of all changed files
1611 1611 changedfiles = set()
1612 1612 mmfs = {}
1613 1613 collect = changegroup.collector(cl, mmfs, changedfiles)
1614 1614
1615 1615 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1616 1616 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1617 1617 yield chnk
1618 1618 self.ui.progress(_('bundling changes'), None)
1619 1619
1620 1620 mnfst = self.manifest
1621 1621 nodeiter = gennodelst(mnfst)
1622 1622 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1623 1623 lookuplinkrev_func(mnfst))):
1624 1624 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1625 1625 yield chnk
1626 1626 self.ui.progress(_('bundling manifests'), None)
1627 1627
1628 1628 cnt = 0
1629 1629 for fname in sorted(changedfiles):
1630 1630 filerevlog = self.file(fname)
1631 1631 if not len(filerevlog):
1632 1632 raise util.Abort(_("empty or missing revlog for %s") % fname)
1633 1633 nodeiter = gennodelst(filerevlog)
1634 1634 nodeiter = list(nodeiter)
1635 1635 if nodeiter:
1636 1636 yield changegroup.chunkheader(len(fname))
1637 1637 yield fname
1638 1638 lookup = lookuplinkrev_func(filerevlog)
1639 1639 for chnk in filerevlog.group(nodeiter, lookup):
1640 1640 self.ui.progress(
1641 1641 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1642 1642 cnt += 1
1643 1643 yield chnk
1644 1644 self.ui.progress(_('bundling files'), None)
1645 1645
1646 1646 yield changegroup.closechunk()
1647 1647
1648 1648 if nodes:
1649 1649 self.hook('outgoing', node=hex(nodes[0]), source=source)
1650 1650
1651 1651 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1652 1652
1653 1653 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1654 1654 """Add the changegroup returned by source.read() to this repo.
1655 1655 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1656 1656 the URL of the repo where this changegroup is coming from.
1657 1657
1658 1658 Return an integer summarizing the change to this repo:
1659 1659 - nothing changed or no source: 0
1660 1660 - more heads than before: 1+added heads (2..n)
1661 1661 - fewer heads than before: -1-removed heads (-2..-n)
1662 1662 - number of heads stays the same: 1
1663 1663 """
1664 1664 def csmap(x):
1665 1665 self.ui.debug("add changeset %s\n" % short(x))
1666 1666 return len(cl)
1667 1667
1668 1668 def revmap(x):
1669 1669 return cl.rev(x)
1670 1670
1671 1671 if not source:
1672 1672 return 0
1673 1673
1674 1674 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1675 1675
1676 1676 changesets = files = revisions = 0
1677 1677 efiles = set()
1678 1678
1679 1679 # write changelog data to temp files so concurrent readers will not see
1680 1680 # inconsistent view
1681 1681 cl = self.changelog
1682 1682 cl.delayupdate()
1683 1683 oldheads = len(cl.heads())
1684 1684
1685 1685 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1686 1686 try:
1687 1687 trp = weakref.proxy(tr)
1688 1688 # pull off the changeset group
1689 1689 self.ui.status(_("adding changesets\n"))
1690 1690 clstart = len(cl)
1691 1691 class prog(object):
1692 1692 step = _('changesets')
1693 1693 count = 1
1694 1694 ui = self.ui
1695 1695 total = None
1696 1696 def __call__(self):
1697 1697 self.ui.progress(self.step, self.count, unit=_('chunks'),
1698 1698 total=self.total)
1699 1699 self.count += 1
1700 1700 pr = prog()
1701 1701 source.callback = pr
1702 1702
1703 1703 if (cl.addgroup(source, csmap, trp) is None
1704 1704 and not emptyok):
1705 1705 raise util.Abort(_("received changelog group is empty"))
1706 1706 clend = len(cl)
1707 1707 changesets = clend - clstart
1708 1708 for c in xrange(clstart, clend):
1709 1709 efiles.update(self[c].files())
1710 1710 efiles = len(efiles)
1711 1711 self.ui.progress(_('changesets'), None)
1712 1712
1713 1713 # pull off the manifest group
1714 1714 self.ui.status(_("adding manifests\n"))
1715 1715 pr.step = _('manifests')
1716 1716 pr.count = 1
1717 1717 pr.total = changesets # manifests <= changesets
1718 1718 # no need to check for empty manifest group here:
1719 1719 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1720 1720 # no new manifest will be created and the manifest group will
1721 1721 # be empty during the pull
1722 1722 self.manifest.addgroup(source, revmap, trp)
1723 1723 self.ui.progress(_('manifests'), None)
1724 1724
1725 1725 needfiles = {}
1726 1726 if self.ui.configbool('server', 'validate', default=False):
1727 1727 # validate incoming csets have their manifests
1728 1728 for cset in xrange(clstart, clend):
1729 1729 mfest = self.changelog.read(self.changelog.node(cset))[0]
1730 1730 mfest = self.manifest.readdelta(mfest)
1731 1731 # store file nodes we must see
1732 1732 for f, n in mfest.iteritems():
1733 1733 needfiles.setdefault(f, set()).add(n)
1734 1734
1735 1735 # process the files
1736 1736 self.ui.status(_("adding file changes\n"))
1737 1737 pr.step = 'files'
1738 1738 pr.count = 1
1739 1739 pr.total = efiles
1740 1740 source.callback = None
1741 1741
1742 1742 while 1:
1743 1743 f = source.chunk()
1744 1744 if not f:
1745 1745 break
1746 1746 self.ui.debug("adding %s revisions\n" % f)
1747 1747 pr()
1748 1748 fl = self.file(f)
1749 1749 o = len(fl)
1750 1750 if fl.addgroup(source, revmap, trp) is None:
1751 1751 raise util.Abort(_("received file revlog group is empty"))
1752 1752 revisions += len(fl) - o
1753 1753 files += 1
1754 1754 if f in needfiles:
1755 1755 needs = needfiles[f]
1756 1756 for new in xrange(o, len(fl)):
1757 1757 n = fl.node(new)
1758 1758 if n in needs:
1759 1759 needs.remove(n)
1760 1760 if not needs:
1761 1761 del needfiles[f]
1762 1762 self.ui.progress(_('files'), None)
1763 1763
1764 1764 for f, needs in needfiles.iteritems():
1765 1765 fl = self.file(f)
1766 1766 for n in needs:
1767 1767 try:
1768 1768 fl.rev(n)
1769 1769 except error.LookupError:
1770 1770 raise util.Abort(
1771 1771 _('missing file data for %s:%s - run hg verify') %
1772 1772 (f, hex(n)))
1773 1773
1774 1774 newheads = len(cl.heads())
1775 1775 heads = ""
1776 1776 if oldheads and newheads != oldheads:
1777 1777 heads = _(" (%+d heads)") % (newheads - oldheads)
1778 1778
1779 1779 self.ui.status(_("added %d changesets"
1780 1780 " with %d changes to %d files%s\n")
1781 1781 % (changesets, revisions, files, heads))
1782 1782
1783 1783 if changesets > 0:
1784 1784 p = lambda: cl.writepending() and self.root or ""
1785 1785 self.hook('pretxnchangegroup', throw=True,
1786 1786 node=hex(cl.node(clstart)), source=srctype,
1787 1787 url=url, pending=p)
1788 1788
1789 1789 # make changelog see real files again
1790 1790 cl.finalize(trp)
1791 1791
1792 1792 tr.close()
1793 1793 finally:
1794 1794 tr.release()
1795 1795 if lock:
1796 1796 lock.release()
1797 1797
1798 1798 if changesets > 0:
1799 1799 # forcefully update the on-disk branch cache
1800 1800 self.ui.debug("updating the branch cache\n")
1801 1801 self.updatebranchcache()
1802 1802 self.hook("changegroup", node=hex(cl.node(clstart)),
1803 1803 source=srctype, url=url)
1804 1804
1805 1805 for i in xrange(clstart, clend):
1806 1806 self.hook("incoming", node=hex(cl.node(i)),
1807 1807 source=srctype, url=url)
1808 1808
1809 1809 # never return 0 here:
1810 1810 if newheads < oldheads:
1811 1811 return newheads - oldheads - 1
1812 1812 else:
1813 1813 return newheads - oldheads + 1
1814 1814
1815 1815
1816 1816 def stream_in(self, remote, requirements):
1817 1817 fp = remote.stream_out()
1818 1818 l = fp.readline()
1819 1819 try:
1820 1820 resp = int(l)
1821 1821 except ValueError:
1822 1822 raise error.ResponseError(
1823 1823 _('Unexpected response from remote server:'), l)
1824 1824 if resp == 1:
1825 1825 raise util.Abort(_('operation forbidden by server'))
1826 1826 elif resp == 2:
1827 1827 raise util.Abort(_('locking the remote repository failed'))
1828 1828 elif resp != 0:
1829 1829 raise util.Abort(_('the server sent an unknown error code'))
1830 1830 self.ui.status(_('streaming all changes\n'))
1831 1831 l = fp.readline()
1832 1832 try:
1833 1833 total_files, total_bytes = map(int, l.split(' ', 1))
1834 1834 except (ValueError, TypeError):
1835 1835 raise error.ResponseError(
1836 1836 _('Unexpected response from remote server:'), l)
1837 1837 self.ui.status(_('%d files to transfer, %s of data\n') %
1838 1838 (total_files, util.bytecount(total_bytes)))
1839 1839 start = time.time()
1840 1840 for i in xrange(total_files):
1841 1841 # XXX doesn't support '\n' or '\r' in filenames
1842 1842 l = fp.readline()
1843 1843 try:
1844 1844 name, size = l.split('\0', 1)
1845 1845 size = int(size)
1846 1846 except (ValueError, TypeError):
1847 1847 raise error.ResponseError(
1848 1848 _('Unexpected response from remote server:'), l)
1849 1849 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1850 1850 # for backwards compat, name was partially encoded
1851 1851 ofp = self.sopener(store.decodedir(name), 'w')
1852 1852 for chunk in util.filechunkiter(fp, limit=size):
1853 1853 ofp.write(chunk)
1854 1854 ofp.close()
1855 1855 elapsed = time.time() - start
1856 1856 if elapsed <= 0:
1857 1857 elapsed = 0.001
1858 1858 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1859 1859 (util.bytecount(total_bytes), elapsed,
1860 1860 util.bytecount(total_bytes / elapsed)))
1861 1861
1862 1862 # new requirements = old non-format requirements + new format-related
1863 1863 # requirements from the streamed-in repository
1864 1864 requirements.update(set(self.requirements) - self.supportedformats)
1865 1865 self._applyrequirements(requirements)
1866 1866 self._writerequirements()
1867 1867
1868 1868 self.invalidate()
1869 1869 return len(self.heads()) + 1
1870 1870
1871 1871 def clone(self, remote, heads=[], stream=False):
1872 1872 '''clone remote repository.
1873 1873
1874 1874 keyword arguments:
1875 1875 heads: list of revs to clone (forces use of pull)
1876 1876 stream: use streaming clone if possible'''
1877 1877
1878 1878 # now, all clients that can request uncompressed clones can
1879 1879 # read repo formats supported by all servers that can serve
1880 1880 # them.
1881 1881
1882 1882 # if revlog format changes, client will have to check version
1883 1883 # and format flags on "stream" capability, and use
1884 1884 # uncompressed only if compatible.
1885 1885
1886 1886 if stream and not heads:
1887 1887 # 'stream' means remote revlog format is revlogv1 only
1888 1888 if remote.capable('stream'):
1889 1889 return self.stream_in(remote, set(('revlogv1',)))
1890 1890 # otherwise, 'streamreqs' contains the remote revlog format
1891 1891 streamreqs = remote.capable('streamreqs')
1892 1892 if streamreqs:
1893 1893 streamreqs = set(streamreqs.split(','))
1894 1894 # if we support it, stream in and adjust our requirements
1895 1895 if not streamreqs - self.supportedformats:
1896 1896 return self.stream_in(remote, streamreqs)
1897 1897 return self.pull(remote, heads)
1898 1898
1899 1899 def pushkey(self, namespace, key, old, new):
1900 1900 return pushkey.push(self, namespace, key, old, new)
1901 1901
1902 1902 def listkeys(self, namespace):
1903 1903 return pushkey.list(self, namespace)
1904 1904
1905 1905 # used to avoid circular references so destructors work
1906 1906 def aftertrans(files):
1907 1907 renamefiles = [tuple(t) for t in files]
1908 1908 def a():
1909 1909 for src, dest in renamefiles:
1910 1910 util.rename(src, dest)
1911 1911 return a
1912 1912
1913 1913 def instance(ui, path, create):
1914 1914 return localrepository(ui, util.drop_scheme('file', path), create)
1915 1915
1916 1916 def islocal(path):
1917 1917 return True
General Comments 0
You need to be logged in to leave comments. Login now