##// END OF EJS Templates
subrepo: split non-core functions to new module...
Yuya Nishihara -
r36026:55e8efa2 default
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

@@ -1,3658 +1,3658
1 1 # mq.py - patch queues for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''manage a stack of patches
9 9
10 10 This extension lets you work with a stack of patches in a Mercurial
11 11 repository. It manages two stacks of patches - all known patches, and
12 12 applied patches (subset of known patches).
13 13
14 14 Known patches are represented as patch files in the .hg/patches
15 15 directory. Applied patches are both patch files and changesets.
16 16
17 17 Common tasks (use :hg:`help COMMAND` for more details)::
18 18
19 19 create new patch qnew
20 20 import existing patch qimport
21 21
22 22 print patch series qseries
23 23 print applied patches qapplied
24 24
25 25 add known patch to applied stack qpush
26 26 remove patch from applied stack qpop
27 27 refresh contents of top applied patch qrefresh
28 28
29 29 By default, mq will automatically use git patches when required to
30 30 avoid losing file mode changes, copy records, binary files or empty
31 31 files creations or deletions. This behavior can be configured with::
32 32
33 33 [mq]
34 34 git = auto/keep/yes/no
35 35
36 36 If set to 'keep', mq will obey the [diff] section configuration while
37 37 preserving existing git patches upon qrefresh. If set to 'yes' or
38 38 'no', mq will override the [diff] section and always generate git or
39 39 regular patches, possibly losing data in the second case.
40 40
41 41 It may be desirable for mq changesets to be kept in the secret phase (see
42 42 :hg:`help phases`), which can be enabled with the following setting::
43 43
44 44 [mq]
45 45 secret = True
46 46
47 47 You will by default be managing a patch queue named "patches". You can
48 48 create other, independent patch queues with the :hg:`qqueue` command.
49 49
50 50 If the working directory contains uncommitted files, qpush, qpop and
51 51 qgoto abort immediately. If -f/--force is used, the changes are
52 52 discarded. Setting::
53 53
54 54 [mq]
55 55 keepchanges = True
56 56
57 57 make them behave as if --keep-changes were passed, and non-conflicting
58 58 local changes will be tolerated and preserved. If incompatible options
59 59 such as -f/--force or --exact are passed, this setting is ignored.
60 60
61 61 This extension used to provide a strip command. This command now lives
62 62 in the strip extension.
63 63 '''
64 64
65 65 from __future__ import absolute_import, print_function
66 66
67 67 import errno
68 68 import os
69 69 import re
70 70 import shutil
71 71 from mercurial.i18n import _
72 72 from mercurial.node import (
73 73 bin,
74 74 hex,
75 75 nullid,
76 76 nullrev,
77 77 short,
78 78 )
79 79 from mercurial import (
80 80 cmdutil,
81 81 commands,
82 82 dirstateguard,
83 83 encoding,
84 84 error,
85 85 extensions,
86 86 hg,
87 87 localrepo,
88 88 lock as lockmod,
89 89 logcmdutil,
90 90 patch as patchmod,
91 91 phases,
92 92 pycompat,
93 93 registrar,
94 94 revsetlang,
95 95 scmutil,
96 96 smartset,
97 subrepo,
97 subrepoutil,
98 98 util,
99 99 vfs as vfsmod,
100 100 )
101 101
102 102 release = lockmod.release
103 103 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
104 104
105 105 cmdtable = {}
106 106 command = registrar.command(cmdtable)
107 107 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
108 108 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
109 109 # be specifying the version(s) of Mercurial they are tested with, or
110 110 # leave the attribute unspecified.
111 111 testedwith = 'ships-with-hg-core'
112 112
113 113 configtable = {}
114 114 configitem = registrar.configitem(configtable)
115 115
116 116 configitem('mq', 'git',
117 117 default='auto',
118 118 )
119 119 configitem('mq', 'keepchanges',
120 120 default=False,
121 121 )
122 122 configitem('mq', 'plain',
123 123 default=False,
124 124 )
125 125 configitem('mq', 'secret',
126 126 default=False,
127 127 )
128 128
129 129 # force load strip extension formerly included in mq and import some utility
130 130 try:
131 131 stripext = extensions.find('strip')
132 132 except KeyError:
133 133 # note: load is lazy so we could avoid the try-except,
134 134 # but I (marmoute) prefer this explicit code.
135 135 class dummyui(object):
136 136 def debug(self, msg):
137 137 pass
138 138 stripext = extensions.load(dummyui(), 'strip', '')
139 139
140 140 strip = stripext.strip
141 141 checksubstate = stripext.checksubstate
142 142 checklocalchanges = stripext.checklocalchanges
143 143
144 144
145 145 # Patch names looks like unix-file names.
146 146 # They must be joinable with queue directory and result in the patch path.
147 147 normname = util.normpath
148 148
149 149 class statusentry(object):
150 150 def __init__(self, node, name):
151 151 self.node, self.name = node, name
152 152
153 153 def __bytes__(self):
154 154 return hex(self.node) + ':' + self.name
155 155
156 156 __str__ = encoding.strmethod(__bytes__)
157 157 __repr__ = encoding.strmethod(__bytes__)
158 158
159 159 # The order of the headers in 'hg export' HG patches:
160 160 HGHEADERS = [
161 161 # '# HG changeset patch',
162 162 '# User ',
163 163 '# Date ',
164 164 '# ',
165 165 '# Branch ',
166 166 '# Node ID ',
167 167 '# Parent ', # can occur twice for merges - but that is not relevant for mq
168 168 ]
169 169 # The order of headers in plain 'mail style' patches:
170 170 PLAINHEADERS = {
171 171 'from': 0,
172 172 'date': 1,
173 173 'subject': 2,
174 174 }
175 175
176 176 def inserthgheader(lines, header, value):
177 177 """Assuming lines contains a HG patch header, add a header line with value.
178 178 >>> try: inserthgheader([], b'# Date ', b'z')
179 179 ... except ValueError as inst: print("oops")
180 180 oops
181 181 >>> inserthgheader([b'# HG changeset patch'], b'# Date ', b'z')
182 182 ['# HG changeset patch', '# Date z']
183 183 >>> inserthgheader([b'# HG changeset patch', b''], b'# Date ', b'z')
184 184 ['# HG changeset patch', '# Date z', '']
185 185 >>> inserthgheader([b'# HG changeset patch', b'# User y'], b'# Date ', b'z')
186 186 ['# HG changeset patch', '# User y', '# Date z']
187 187 >>> inserthgheader([b'# HG changeset patch', b'# Date x', b'# User y'],
188 188 ... b'# User ', b'z')
189 189 ['# HG changeset patch', '# Date x', '# User z']
190 190 >>> inserthgheader([b'# HG changeset patch', b'# Date y'], b'# Date ', b'z')
191 191 ['# HG changeset patch', '# Date z']
192 192 >>> inserthgheader([b'# HG changeset patch', b'', b'# Date y'],
193 193 ... b'# Date ', b'z')
194 194 ['# HG changeset patch', '# Date z', '', '# Date y']
195 195 >>> inserthgheader([b'# HG changeset patch', b'# Parent y'],
196 196 ... b'# Date ', b'z')
197 197 ['# HG changeset patch', '# Date z', '# Parent y']
198 198 """
199 199 start = lines.index('# HG changeset patch') + 1
200 200 newindex = HGHEADERS.index(header)
201 201 bestpos = len(lines)
202 202 for i in range(start, len(lines)):
203 203 line = lines[i]
204 204 if not line.startswith('# '):
205 205 bestpos = min(bestpos, i)
206 206 break
207 207 for lineindex, h in enumerate(HGHEADERS):
208 208 if line.startswith(h):
209 209 if lineindex == newindex:
210 210 lines[i] = header + value
211 211 return lines
212 212 if lineindex > newindex:
213 213 bestpos = min(bestpos, i)
214 214 break # next line
215 215 lines.insert(bestpos, header + value)
216 216 return lines
217 217
218 218 def insertplainheader(lines, header, value):
219 219 """For lines containing a plain patch header, add a header line with value.
220 220 >>> insertplainheader([], b'Date', b'z')
221 221 ['Date: z']
222 222 >>> insertplainheader([b''], b'Date', b'z')
223 223 ['Date: z', '']
224 224 >>> insertplainheader([b'x'], b'Date', b'z')
225 225 ['Date: z', '', 'x']
226 226 >>> insertplainheader([b'From: y', b'x'], b'Date', b'z')
227 227 ['From: y', 'Date: z', '', 'x']
228 228 >>> insertplainheader([b' date : x', b' from : y', b''], b'From', b'z')
229 229 [' date : x', 'From: z', '']
230 230 >>> insertplainheader([b'', b'Date: y'], b'Date', b'z')
231 231 ['Date: z', '', 'Date: y']
232 232 >>> insertplainheader([b'foo: bar', b'DATE: z', b'x'], b'From', b'y')
233 233 ['From: y', 'foo: bar', 'DATE: z', '', 'x']
234 234 """
235 235 newprio = PLAINHEADERS[header.lower()]
236 236 bestpos = len(lines)
237 237 for i, line in enumerate(lines):
238 238 if ':' in line:
239 239 lheader = line.split(':', 1)[0].strip().lower()
240 240 lprio = PLAINHEADERS.get(lheader, newprio + 1)
241 241 if lprio == newprio:
242 242 lines[i] = '%s: %s' % (header, value)
243 243 return lines
244 244 if lprio > newprio and i < bestpos:
245 245 bestpos = i
246 246 else:
247 247 if line:
248 248 lines.insert(i, '')
249 249 if i < bestpos:
250 250 bestpos = i
251 251 break
252 252 lines.insert(bestpos, '%s: %s' % (header, value))
253 253 return lines
254 254
255 255 class patchheader(object):
256 256 def __init__(self, pf, plainmode=False):
257 257 def eatdiff(lines):
258 258 while lines:
259 259 l = lines[-1]
260 260 if (l.startswith("diff -") or
261 261 l.startswith("Index:") or
262 262 l.startswith("===========")):
263 263 del lines[-1]
264 264 else:
265 265 break
266 266 def eatempty(lines):
267 267 while lines:
268 268 if not lines[-1].strip():
269 269 del lines[-1]
270 270 else:
271 271 break
272 272
273 273 message = []
274 274 comments = []
275 275 user = None
276 276 date = None
277 277 parent = None
278 278 format = None
279 279 subject = None
280 280 branch = None
281 281 nodeid = None
282 282 diffstart = 0
283 283
284 284 for line in open(pf, 'rb'):
285 285 line = line.rstrip()
286 286 if (line.startswith('diff --git')
287 287 or (diffstart and line.startswith('+++ '))):
288 288 diffstart = 2
289 289 break
290 290 diffstart = 0 # reset
291 291 if line.startswith("--- "):
292 292 diffstart = 1
293 293 continue
294 294 elif format == "hgpatch":
295 295 # parse values when importing the result of an hg export
296 296 if line.startswith("# User "):
297 297 user = line[7:]
298 298 elif line.startswith("# Date "):
299 299 date = line[7:]
300 300 elif line.startswith("# Parent "):
301 301 parent = line[9:].lstrip() # handle double trailing space
302 302 elif line.startswith("# Branch "):
303 303 branch = line[9:]
304 304 elif line.startswith("# Node ID "):
305 305 nodeid = line[10:]
306 306 elif not line.startswith("# ") and line:
307 307 message.append(line)
308 308 format = None
309 309 elif line == '# HG changeset patch':
310 310 message = []
311 311 format = "hgpatch"
312 312 elif (format != "tagdone" and (line.startswith("Subject: ") or
313 313 line.startswith("subject: "))):
314 314 subject = line[9:]
315 315 format = "tag"
316 316 elif (format != "tagdone" and (line.startswith("From: ") or
317 317 line.startswith("from: "))):
318 318 user = line[6:]
319 319 format = "tag"
320 320 elif (format != "tagdone" and (line.startswith("Date: ") or
321 321 line.startswith("date: "))):
322 322 date = line[6:]
323 323 format = "tag"
324 324 elif format == "tag" and line == "":
325 325 # when looking for tags (subject: from: etc) they
326 326 # end once you find a blank line in the source
327 327 format = "tagdone"
328 328 elif message or line:
329 329 message.append(line)
330 330 comments.append(line)
331 331
332 332 eatdiff(message)
333 333 eatdiff(comments)
334 334 # Remember the exact starting line of the patch diffs before consuming
335 335 # empty lines, for external use by TortoiseHg and others
336 336 self.diffstartline = len(comments)
337 337 eatempty(message)
338 338 eatempty(comments)
339 339
340 340 # make sure message isn't empty
341 341 if format and format.startswith("tag") and subject:
342 342 message.insert(0, subject)
343 343
344 344 self.message = message
345 345 self.comments = comments
346 346 self.user = user
347 347 self.date = date
348 348 self.parent = parent
349 349 # nodeid and branch are for external use by TortoiseHg and others
350 350 self.nodeid = nodeid
351 351 self.branch = branch
352 352 self.haspatch = diffstart > 1
353 353 self.plainmode = (plainmode or
354 354 '# HG changeset patch' not in self.comments and
355 355 any(c.startswith('Date: ') or
356 356 c.startswith('From: ')
357 357 for c in self.comments))
358 358
359 359 def setuser(self, user):
360 360 try:
361 361 inserthgheader(self.comments, '# User ', user)
362 362 except ValueError:
363 363 if self.plainmode:
364 364 insertplainheader(self.comments, 'From', user)
365 365 else:
366 366 tmp = ['# HG changeset patch', '# User ' + user]
367 367 self.comments = tmp + self.comments
368 368 self.user = user
369 369
370 370 def setdate(self, date):
371 371 try:
372 372 inserthgheader(self.comments, '# Date ', date)
373 373 except ValueError:
374 374 if self.plainmode:
375 375 insertplainheader(self.comments, 'Date', date)
376 376 else:
377 377 tmp = ['# HG changeset patch', '# Date ' + date]
378 378 self.comments = tmp + self.comments
379 379 self.date = date
380 380
381 381 def setparent(self, parent):
382 382 try:
383 383 inserthgheader(self.comments, '# Parent ', parent)
384 384 except ValueError:
385 385 if not self.plainmode:
386 386 tmp = ['# HG changeset patch', '# Parent ' + parent]
387 387 self.comments = tmp + self.comments
388 388 self.parent = parent
389 389
390 390 def setmessage(self, message):
391 391 if self.comments:
392 392 self._delmsg()
393 393 self.message = [message]
394 394 if message:
395 395 if self.plainmode and self.comments and self.comments[-1]:
396 396 self.comments.append('')
397 397 self.comments.append(message)
398 398
399 399 def __bytes__(self):
400 400 s = '\n'.join(self.comments).rstrip()
401 401 if not s:
402 402 return ''
403 403 return s + '\n\n'
404 404
405 405 __str__ = encoding.strmethod(__bytes__)
406 406
407 407 def _delmsg(self):
408 408 '''Remove existing message, keeping the rest of the comments fields.
409 409 If comments contains 'subject: ', message will prepend
410 410 the field and a blank line.'''
411 411 if self.message:
412 412 subj = 'subject: ' + self.message[0].lower()
413 413 for i in xrange(len(self.comments)):
414 414 if subj == self.comments[i].lower():
415 415 del self.comments[i]
416 416 self.message = self.message[2:]
417 417 break
418 418 ci = 0
419 419 for mi in self.message:
420 420 while mi != self.comments[ci]:
421 421 ci += 1
422 422 del self.comments[ci]
423 423
424 424 def newcommit(repo, phase, *args, **kwargs):
425 425 """helper dedicated to ensure a commit respect mq.secret setting
426 426
427 427 It should be used instead of repo.commit inside the mq source for operation
428 428 creating new changeset.
429 429 """
430 430 repo = repo.unfiltered()
431 431 if phase is None:
432 432 if repo.ui.configbool('mq', 'secret'):
433 433 phase = phases.secret
434 434 overrides = {('ui', 'allowemptycommit'): True}
435 435 if phase is not None:
436 436 overrides[('phases', 'new-commit')] = phase
437 437 with repo.ui.configoverride(overrides, 'mq'):
438 438 repo.ui.setconfig('ui', 'allowemptycommit', True)
439 439 return repo.commit(*args, **kwargs)
440 440
441 441 class AbortNoCleanup(error.Abort):
442 442 pass
443 443
444 444 class queue(object):
445 445 def __init__(self, ui, baseui, path, patchdir=None):
446 446 self.basepath = path
447 447 try:
448 448 fh = open(os.path.join(path, 'patches.queue'))
449 449 cur = fh.read().rstrip()
450 450 fh.close()
451 451 if not cur:
452 452 curpath = os.path.join(path, 'patches')
453 453 else:
454 454 curpath = os.path.join(path, 'patches-' + cur)
455 455 except IOError:
456 456 curpath = os.path.join(path, 'patches')
457 457 self.path = patchdir or curpath
458 458 self.opener = vfsmod.vfs(self.path)
459 459 self.ui = ui
460 460 self.baseui = baseui
461 461 self.applieddirty = False
462 462 self.seriesdirty = False
463 463 self.added = []
464 464 self.seriespath = "series"
465 465 self.statuspath = "status"
466 466 self.guardspath = "guards"
467 467 self.activeguards = None
468 468 self.guardsdirty = False
469 469 # Handle mq.git as a bool with extended values
470 470 gitmode = ui.config('mq', 'git').lower()
471 471 boolmode = util.parsebool(gitmode)
472 472 if boolmode is not None:
473 473 if boolmode:
474 474 gitmode = 'yes'
475 475 else:
476 476 gitmode = 'no'
477 477 self.gitmode = gitmode
478 478 # deprecated config: mq.plain
479 479 self.plainmode = ui.configbool('mq', 'plain')
480 480 self.checkapplied = True
481 481
482 482 @util.propertycache
483 483 def applied(self):
484 484 def parselines(lines):
485 485 for l in lines:
486 486 entry = l.split(':', 1)
487 487 if len(entry) > 1:
488 488 n, name = entry
489 489 yield statusentry(bin(n), name)
490 490 elif l.strip():
491 491 self.ui.warn(_('malformated mq status line: %s\n') % entry)
492 492 # else we ignore empty lines
493 493 try:
494 494 lines = self.opener.read(self.statuspath).splitlines()
495 495 return list(parselines(lines))
496 496 except IOError as e:
497 497 if e.errno == errno.ENOENT:
498 498 return []
499 499 raise
500 500
501 501 @util.propertycache
502 502 def fullseries(self):
503 503 try:
504 504 return self.opener.read(self.seriespath).splitlines()
505 505 except IOError as e:
506 506 if e.errno == errno.ENOENT:
507 507 return []
508 508 raise
509 509
510 510 @util.propertycache
511 511 def series(self):
512 512 self.parseseries()
513 513 return self.series
514 514
515 515 @util.propertycache
516 516 def seriesguards(self):
517 517 self.parseseries()
518 518 return self.seriesguards
519 519
520 520 def invalidate(self):
521 521 for a in 'applied fullseries series seriesguards'.split():
522 522 if a in self.__dict__:
523 523 delattr(self, a)
524 524 self.applieddirty = False
525 525 self.seriesdirty = False
526 526 self.guardsdirty = False
527 527 self.activeguards = None
528 528
529 529 def diffopts(self, opts=None, patchfn=None, plain=False):
530 530 """Return diff options tweaked for this mq use, possibly upgrading to
531 531 git format, and possibly plain and without lossy options."""
532 532 diffopts = patchmod.difffeatureopts(self.ui, opts,
533 533 git=True, whitespace=not plain, formatchanging=not plain)
534 534 if self.gitmode == 'auto':
535 535 diffopts.upgrade = True
536 536 elif self.gitmode == 'keep':
537 537 pass
538 538 elif self.gitmode in ('yes', 'no'):
539 539 diffopts.git = self.gitmode == 'yes'
540 540 else:
541 541 raise error.Abort(_('mq.git option can be auto/keep/yes/no'
542 542 ' got %s') % self.gitmode)
543 543 if patchfn:
544 544 diffopts = self.patchopts(diffopts, patchfn)
545 545 return diffopts
546 546
547 547 def patchopts(self, diffopts, *patches):
548 548 """Return a copy of input diff options with git set to true if
549 549 referenced patch is a git patch and should be preserved as such.
550 550 """
551 551 diffopts = diffopts.copy()
552 552 if not diffopts.git and self.gitmode == 'keep':
553 553 for patchfn in patches:
554 554 patchf = self.opener(patchfn, 'r')
555 555 # if the patch was a git patch, refresh it as a git patch
556 556 for line in patchf:
557 557 if line.startswith('diff --git'):
558 558 diffopts.git = True
559 559 break
560 560 patchf.close()
561 561 return diffopts
562 562
563 563 def join(self, *p):
564 564 return os.path.join(self.path, *p)
565 565
566 566 def findseries(self, patch):
567 567 def matchpatch(l):
568 568 l = l.split('#', 1)[0]
569 569 return l.strip() == patch
570 570 for index, l in enumerate(self.fullseries):
571 571 if matchpatch(l):
572 572 return index
573 573 return None
574 574
575 575 guard_re = re.compile(br'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
576 576
577 577 def parseseries(self):
578 578 self.series = []
579 579 self.seriesguards = []
580 580 for l in self.fullseries:
581 581 h = l.find('#')
582 582 if h == -1:
583 583 patch = l
584 584 comment = ''
585 585 elif h == 0:
586 586 continue
587 587 else:
588 588 patch = l[:h]
589 589 comment = l[h:]
590 590 patch = patch.strip()
591 591 if patch:
592 592 if patch in self.series:
593 593 raise error.Abort(_('%s appears more than once in %s') %
594 594 (patch, self.join(self.seriespath)))
595 595 self.series.append(patch)
596 596 self.seriesguards.append(self.guard_re.findall(comment))
597 597
598 598 def checkguard(self, guard):
599 599 if not guard:
600 600 return _('guard cannot be an empty string')
601 601 bad_chars = '# \t\r\n\f'
602 602 first = guard[0]
603 603 if first in '-+':
604 604 return (_('guard %r starts with invalid character: %r') %
605 605 (guard, first))
606 606 for c in bad_chars:
607 607 if c in guard:
608 608 return _('invalid character in guard %r: %r') % (guard, c)
609 609
610 610 def setactive(self, guards):
611 611 for guard in guards:
612 612 bad = self.checkguard(guard)
613 613 if bad:
614 614 raise error.Abort(bad)
615 615 guards = sorted(set(guards))
616 616 self.ui.debug('active guards: %s\n' % ' '.join(guards))
617 617 self.activeguards = guards
618 618 self.guardsdirty = True
619 619
620 620 def active(self):
621 621 if self.activeguards is None:
622 622 self.activeguards = []
623 623 try:
624 624 guards = self.opener.read(self.guardspath).split()
625 625 except IOError as err:
626 626 if err.errno != errno.ENOENT:
627 627 raise
628 628 guards = []
629 629 for i, guard in enumerate(guards):
630 630 bad = self.checkguard(guard)
631 631 if bad:
632 632 self.ui.warn('%s:%d: %s\n' %
633 633 (self.join(self.guardspath), i + 1, bad))
634 634 else:
635 635 self.activeguards.append(guard)
636 636 return self.activeguards
637 637
638 638 def setguards(self, idx, guards):
639 639 for g in guards:
640 640 if len(g) < 2:
641 641 raise error.Abort(_('guard %r too short') % g)
642 642 if g[0] not in '-+':
643 643 raise error.Abort(_('guard %r starts with invalid char') % g)
644 644 bad = self.checkguard(g[1:])
645 645 if bad:
646 646 raise error.Abort(bad)
647 647 drop = self.guard_re.sub('', self.fullseries[idx])
648 648 self.fullseries[idx] = drop + ''.join([' #' + g for g in guards])
649 649 self.parseseries()
650 650 self.seriesdirty = True
651 651
652 652 def pushable(self, idx):
653 653 if isinstance(idx, str):
654 654 idx = self.series.index(idx)
655 655 patchguards = self.seriesguards[idx]
656 656 if not patchguards:
657 657 return True, None
658 658 guards = self.active()
659 659 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
660 660 if exactneg:
661 661 return False, repr(exactneg[0])
662 662 pos = [g for g in patchguards if g[0] == '+']
663 663 exactpos = [g for g in pos if g[1:] in guards]
664 664 if pos:
665 665 if exactpos:
666 666 return True, repr(exactpos[0])
667 667 return False, ' '.join(map(repr, pos))
668 668 return True, ''
669 669
670 670 def explainpushable(self, idx, all_patches=False):
671 671 if all_patches:
672 672 write = self.ui.write
673 673 else:
674 674 write = self.ui.warn
675 675
676 676 if all_patches or self.ui.verbose:
677 677 if isinstance(idx, str):
678 678 idx = self.series.index(idx)
679 679 pushable, why = self.pushable(idx)
680 680 if all_patches and pushable:
681 681 if why is None:
682 682 write(_('allowing %s - no guards in effect\n') %
683 683 self.series[idx])
684 684 else:
685 685 if not why:
686 686 write(_('allowing %s - no matching negative guards\n') %
687 687 self.series[idx])
688 688 else:
689 689 write(_('allowing %s - guarded by %s\n') %
690 690 (self.series[idx], why))
691 691 if not pushable:
692 692 if why:
693 693 write(_('skipping %s - guarded by %s\n') %
694 694 (self.series[idx], why))
695 695 else:
696 696 write(_('skipping %s - no matching guards\n') %
697 697 self.series[idx])
698 698
699 699 def savedirty(self):
700 700 def writelist(items, path):
701 701 fp = self.opener(path, 'wb')
702 702 for i in items:
703 703 fp.write("%s\n" % i)
704 704 fp.close()
705 705 if self.applieddirty:
706 706 writelist(map(bytes, self.applied), self.statuspath)
707 707 self.applieddirty = False
708 708 if self.seriesdirty:
709 709 writelist(self.fullseries, self.seriespath)
710 710 self.seriesdirty = False
711 711 if self.guardsdirty:
712 712 writelist(self.activeguards, self.guardspath)
713 713 self.guardsdirty = False
714 714 if self.added:
715 715 qrepo = self.qrepo()
716 716 if qrepo:
717 717 qrepo[None].add(f for f in self.added if f not in qrepo[None])
718 718 self.added = []
719 719
720 720 def removeundo(self, repo):
721 721 undo = repo.sjoin('undo')
722 722 if not os.path.exists(undo):
723 723 return
724 724 try:
725 725 os.unlink(undo)
726 726 except OSError as inst:
727 727 self.ui.warn(_('error removing undo: %s\n') % str(inst))
728 728
729 729 def backup(self, repo, files, copy=False):
730 730 # backup local changes in --force case
731 731 for f in sorted(files):
732 732 absf = repo.wjoin(f)
733 733 if os.path.lexists(absf):
734 734 self.ui.note(_('saving current version of %s as %s\n') %
735 735 (f, scmutil.origpath(self.ui, repo, f)))
736 736
737 737 absorig = scmutil.origpath(self.ui, repo, absf)
738 738 if copy:
739 739 util.copyfile(absf, absorig)
740 740 else:
741 741 util.rename(absf, absorig)
742 742
743 743 def printdiff(self, repo, diffopts, node1, node2=None, files=None,
744 744 fp=None, changes=None, opts=None):
745 745 if opts is None:
746 746 opts = {}
747 747 stat = opts.get('stat')
748 748 m = scmutil.match(repo[node1], files, opts)
749 749 logcmdutil.diffordiffstat(self.ui, repo, diffopts, node1, node2, m,
750 750 changes, stat, fp)
751 751
752 752 def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
753 753 # first try just applying the patch
754 754 (err, n) = self.apply(repo, [patch], update_status=False,
755 755 strict=True, merge=rev)
756 756
757 757 if err == 0:
758 758 return (err, n)
759 759
760 760 if n is None:
761 761 raise error.Abort(_("apply failed for patch %s") % patch)
762 762
763 763 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
764 764
765 765 # apply failed, strip away that rev and merge.
766 766 hg.clean(repo, head)
767 767 strip(self.ui, repo, [n], update=False, backup=False)
768 768
769 769 ctx = repo[rev]
770 770 ret = hg.merge(repo, rev)
771 771 if ret:
772 772 raise error.Abort(_("update returned %d") % ret)
773 773 n = newcommit(repo, None, ctx.description(), ctx.user(), force=True)
774 774 if n is None:
775 775 raise error.Abort(_("repo commit failed"))
776 776 try:
777 777 ph = patchheader(mergeq.join(patch), self.plainmode)
778 778 except Exception:
779 779 raise error.Abort(_("unable to read %s") % patch)
780 780
781 781 diffopts = self.patchopts(diffopts, patch)
782 782 patchf = self.opener(patch, "w")
783 783 comments = str(ph)
784 784 if comments:
785 785 patchf.write(comments)
786 786 self.printdiff(repo, diffopts, head, n, fp=patchf)
787 787 patchf.close()
788 788 self.removeundo(repo)
789 789 return (0, n)
790 790
791 791 def qparents(self, repo, rev=None):
792 792 """return the mq handled parent or p1
793 793
794 794 In some case where mq get himself in being the parent of a merge the
795 795 appropriate parent may be p2.
796 796 (eg: an in progress merge started with mq disabled)
797 797
798 798 If no parent are managed by mq, p1 is returned.
799 799 """
800 800 if rev is None:
801 801 (p1, p2) = repo.dirstate.parents()
802 802 if p2 == nullid:
803 803 return p1
804 804 if not self.applied:
805 805 return None
806 806 return self.applied[-1].node
807 807 p1, p2 = repo.changelog.parents(rev)
808 808 if p2 != nullid and p2 in [x.node for x in self.applied]:
809 809 return p2
810 810 return p1
811 811
812 812 def mergepatch(self, repo, mergeq, series, diffopts):
813 813 if not self.applied:
814 814 # each of the patches merged in will have two parents. This
815 815 # can confuse the qrefresh, qdiff, and strip code because it
816 816 # needs to know which parent is actually in the patch queue.
817 817 # so, we insert a merge marker with only one parent. This way
818 818 # the first patch in the queue is never a merge patch
819 819 #
820 820 pname = ".hg.patches.merge.marker"
821 821 n = newcommit(repo, None, '[mq]: merge marker', force=True)
822 822 self.removeundo(repo)
823 823 self.applied.append(statusentry(n, pname))
824 824 self.applieddirty = True
825 825
826 826 head = self.qparents(repo)
827 827
828 828 for patch in series:
829 829 patch = mergeq.lookup(patch, strict=True)
830 830 if not patch:
831 831 self.ui.warn(_("patch %s does not exist\n") % patch)
832 832 return (1, None)
833 833 pushable, reason = self.pushable(patch)
834 834 if not pushable:
835 835 self.explainpushable(patch, all_patches=True)
836 836 continue
837 837 info = mergeq.isapplied(patch)
838 838 if not info:
839 839 self.ui.warn(_("patch %s is not applied\n") % patch)
840 840 return (1, None)
841 841 rev = info[1]
842 842 err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
843 843 if head:
844 844 self.applied.append(statusentry(head, patch))
845 845 self.applieddirty = True
846 846 if err:
847 847 return (err, head)
848 848 self.savedirty()
849 849 return (0, head)
850 850
851 851 def patch(self, repo, patchfile):
852 852 '''Apply patchfile to the working directory.
853 853 patchfile: name of patch file'''
854 854 files = set()
855 855 try:
856 856 fuzz = patchmod.patch(self.ui, repo, patchfile, strip=1,
857 857 files=files, eolmode=None)
858 858 return (True, list(files), fuzz)
859 859 except Exception as inst:
860 860 self.ui.note(str(inst) + '\n')
861 861 if not self.ui.verbose:
862 862 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
863 863 self.ui.traceback()
864 864 return (False, list(files), False)
865 865
866 866 def apply(self, repo, series, list=False, update_status=True,
867 867 strict=False, patchdir=None, merge=None, all_files=None,
868 868 tobackup=None, keepchanges=False):
869 869 wlock = lock = tr = None
870 870 try:
871 871 wlock = repo.wlock()
872 872 lock = repo.lock()
873 873 tr = repo.transaction("qpush")
874 874 try:
875 875 ret = self._apply(repo, series, list, update_status,
876 876 strict, patchdir, merge, all_files=all_files,
877 877 tobackup=tobackup, keepchanges=keepchanges)
878 878 tr.close()
879 879 self.savedirty()
880 880 return ret
881 881 except AbortNoCleanup:
882 882 tr.close()
883 883 self.savedirty()
884 884 raise
885 885 except: # re-raises
886 886 try:
887 887 tr.abort()
888 888 finally:
889 889 self.invalidate()
890 890 raise
891 891 finally:
892 892 release(tr, lock, wlock)
893 893 self.removeundo(repo)
894 894
895 895 def _apply(self, repo, series, list=False, update_status=True,
896 896 strict=False, patchdir=None, merge=None, all_files=None,
897 897 tobackup=None, keepchanges=False):
898 898 """returns (error, hash)
899 899
900 900 error = 1 for unable to read, 2 for patch failed, 3 for patch
901 901 fuzz. tobackup is None or a set of files to backup before they
902 902 are modified by a patch.
903 903 """
904 904 # TODO unify with commands.py
905 905 if not patchdir:
906 906 patchdir = self.path
907 907 err = 0
908 908 n = None
909 909 for patchname in series:
910 910 pushable, reason = self.pushable(patchname)
911 911 if not pushable:
912 912 self.explainpushable(patchname, all_patches=True)
913 913 continue
914 914 self.ui.status(_("applying %s\n") % patchname)
915 915 pf = os.path.join(patchdir, patchname)
916 916
917 917 try:
918 918 ph = patchheader(self.join(patchname), self.plainmode)
919 919 except IOError:
920 920 self.ui.warn(_("unable to read %s\n") % patchname)
921 921 err = 1
922 922 break
923 923
924 924 message = ph.message
925 925 if not message:
926 926 # The commit message should not be translated
927 927 message = "imported patch %s\n" % patchname
928 928 else:
929 929 if list:
930 930 # The commit message should not be translated
931 931 message.append("\nimported patch %s" % patchname)
932 932 message = '\n'.join(message)
933 933
934 934 if ph.haspatch:
935 935 if tobackup:
936 936 touched = patchmod.changedfiles(self.ui, repo, pf)
937 937 touched = set(touched) & tobackup
938 938 if touched and keepchanges:
939 939 raise AbortNoCleanup(
940 940 _("conflicting local changes found"),
941 941 hint=_("did you forget to qrefresh?"))
942 942 self.backup(repo, touched, copy=True)
943 943 tobackup = tobackup - touched
944 944 (patcherr, files, fuzz) = self.patch(repo, pf)
945 945 if all_files is not None:
946 946 all_files.update(files)
947 947 patcherr = not patcherr
948 948 else:
949 949 self.ui.warn(_("patch %s is empty\n") % patchname)
950 950 patcherr, files, fuzz = 0, [], 0
951 951
952 952 if merge and files:
953 953 # Mark as removed/merged and update dirstate parent info
954 954 removed = []
955 955 merged = []
956 956 for f in files:
957 957 if os.path.lexists(repo.wjoin(f)):
958 958 merged.append(f)
959 959 else:
960 960 removed.append(f)
961 961 with repo.dirstate.parentchange():
962 962 for f in removed:
963 963 repo.dirstate.remove(f)
964 964 for f in merged:
965 965 repo.dirstate.merge(f)
966 966 p1, p2 = repo.dirstate.parents()
967 967 repo.setparents(p1, merge)
968 968
969 969 if all_files and '.hgsubstate' in all_files:
970 970 wctx = repo[None]
971 971 pctx = repo['.']
972 972 overwrite = False
973 mergedsubstate = subrepo.submerge(repo, pctx, wctx, wctx,
974 overwrite)
973 mergedsubstate = subrepoutil.submerge(repo, pctx, wctx, wctx,
974 overwrite)
975 975 files += mergedsubstate.keys()
976 976
977 977 match = scmutil.matchfiles(repo, files or [])
978 978 oldtip = repo['tip']
979 979 n = newcommit(repo, None, message, ph.user, ph.date, match=match,
980 980 force=True)
981 981 if repo['tip'] == oldtip:
982 982 raise error.Abort(_("qpush exactly duplicates child changeset"))
983 983 if n is None:
984 984 raise error.Abort(_("repository commit failed"))
985 985
986 986 if update_status:
987 987 self.applied.append(statusentry(n, patchname))
988 988
989 989 if patcherr:
990 990 self.ui.warn(_("patch failed, rejects left in working "
991 991 "directory\n"))
992 992 err = 2
993 993 break
994 994
995 995 if fuzz and strict:
996 996 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
997 997 err = 3
998 998 break
999 999 return (err, n)
1000 1000
1001 1001 def _cleanup(self, patches, numrevs, keep=False):
1002 1002 if not keep:
1003 1003 r = self.qrepo()
1004 1004 if r:
1005 1005 r[None].forget(patches)
1006 1006 for p in patches:
1007 1007 try:
1008 1008 os.unlink(self.join(p))
1009 1009 except OSError as inst:
1010 1010 if inst.errno != errno.ENOENT:
1011 1011 raise
1012 1012
1013 1013 qfinished = []
1014 1014 if numrevs:
1015 1015 qfinished = self.applied[:numrevs]
1016 1016 del self.applied[:numrevs]
1017 1017 self.applieddirty = True
1018 1018
1019 1019 unknown = []
1020 1020
1021 1021 for (i, p) in sorted([(self.findseries(p), p) for p in patches],
1022 1022 reverse=True):
1023 1023 if i is not None:
1024 1024 del self.fullseries[i]
1025 1025 else:
1026 1026 unknown.append(p)
1027 1027
1028 1028 if unknown:
1029 1029 if numrevs:
1030 1030 rev = dict((entry.name, entry.node) for entry in qfinished)
1031 1031 for p in unknown:
1032 1032 msg = _('revision %s refers to unknown patches: %s\n')
1033 1033 self.ui.warn(msg % (short(rev[p]), p))
1034 1034 else:
1035 1035 msg = _('unknown patches: %s\n')
1036 1036 raise error.Abort(''.join(msg % p for p in unknown))
1037 1037
1038 1038 self.parseseries()
1039 1039 self.seriesdirty = True
1040 1040 return [entry.node for entry in qfinished]
1041 1041
1042 1042 def _revpatches(self, repo, revs):
1043 1043 firstrev = repo[self.applied[0].node].rev()
1044 1044 patches = []
1045 1045 for i, rev in enumerate(revs):
1046 1046
1047 1047 if rev < firstrev:
1048 1048 raise error.Abort(_('revision %d is not managed') % rev)
1049 1049
1050 1050 ctx = repo[rev]
1051 1051 base = self.applied[i].node
1052 1052 if ctx.node() != base:
1053 1053 msg = _('cannot delete revision %d above applied patches')
1054 1054 raise error.Abort(msg % rev)
1055 1055
1056 1056 patch = self.applied[i].name
1057 1057 for fmt in ('[mq]: %s', 'imported patch %s'):
1058 1058 if ctx.description() == fmt % patch:
1059 1059 msg = _('patch %s finalized without changeset message\n')
1060 1060 repo.ui.status(msg % patch)
1061 1061 break
1062 1062
1063 1063 patches.append(patch)
1064 1064 return patches
1065 1065
1066 1066 def finish(self, repo, revs):
1067 1067 # Manually trigger phase computation to ensure phasedefaults is
1068 1068 # executed before we remove the patches.
1069 1069 repo._phasecache
1070 1070 patches = self._revpatches(repo, sorted(revs))
1071 1071 qfinished = self._cleanup(patches, len(patches))
1072 1072 if qfinished and repo.ui.configbool('mq', 'secret'):
1073 1073 # only use this logic when the secret option is added
1074 1074 oldqbase = repo[qfinished[0]]
1075 1075 tphase = phases.newcommitphase(repo.ui)
1076 1076 if oldqbase.phase() > tphase and oldqbase.p1().phase() <= tphase:
1077 1077 with repo.transaction('qfinish') as tr:
1078 1078 phases.advanceboundary(repo, tr, tphase, qfinished)
1079 1079
1080 1080 def delete(self, repo, patches, opts):
1081 1081 if not patches and not opts.get('rev'):
1082 1082 raise error.Abort(_('qdelete requires at least one revision or '
1083 1083 'patch name'))
1084 1084
1085 1085 realpatches = []
1086 1086 for patch in patches:
1087 1087 patch = self.lookup(patch, strict=True)
1088 1088 info = self.isapplied(patch)
1089 1089 if info:
1090 1090 raise error.Abort(_("cannot delete applied patch %s") % patch)
1091 1091 if patch not in self.series:
1092 1092 raise error.Abort(_("patch %s not in series file") % patch)
1093 1093 if patch not in realpatches:
1094 1094 realpatches.append(patch)
1095 1095
1096 1096 numrevs = 0
1097 1097 if opts.get('rev'):
1098 1098 if not self.applied:
1099 1099 raise error.Abort(_('no patches applied'))
1100 1100 revs = scmutil.revrange(repo, opts.get('rev'))
1101 1101 revs.sort()
1102 1102 revpatches = self._revpatches(repo, revs)
1103 1103 realpatches += revpatches
1104 1104 numrevs = len(revpatches)
1105 1105
1106 1106 self._cleanup(realpatches, numrevs, opts.get('keep'))
1107 1107
1108 1108 def checktoppatch(self, repo):
1109 1109 '''check that working directory is at qtip'''
1110 1110 if self.applied:
1111 1111 top = self.applied[-1].node
1112 1112 patch = self.applied[-1].name
1113 1113 if repo.dirstate.p1() != top:
1114 1114 raise error.Abort(_("working directory revision is not qtip"))
1115 1115 return top, patch
1116 1116 return None, None
1117 1117
1118 1118 def putsubstate2changes(self, substatestate, changes):
1119 1119 for files in changes[:3]:
1120 1120 if '.hgsubstate' in files:
1121 1121 return # already listed up
1122 1122 # not yet listed up
1123 1123 if substatestate in 'a?':
1124 1124 changes[1].append('.hgsubstate')
1125 1125 elif substatestate in 'r':
1126 1126 changes[2].append('.hgsubstate')
1127 1127 else: # modified
1128 1128 changes[0].append('.hgsubstate')
1129 1129
1130 1130 def checklocalchanges(self, repo, force=False, refresh=True):
1131 1131 excsuffix = ''
1132 1132 if refresh:
1133 1133 excsuffix = ', qrefresh first'
1134 1134 # plain versions for i18n tool to detect them
1135 1135 _("local changes found, qrefresh first")
1136 1136 _("local changed subrepos found, qrefresh first")
1137 1137 return checklocalchanges(repo, force, excsuffix)
1138 1138
1139 1139 _reserved = ('series', 'status', 'guards', '.', '..')
1140 1140 def checkreservedname(self, name):
1141 1141 if name in self._reserved:
1142 1142 raise error.Abort(_('"%s" cannot be used as the name of a patch')
1143 1143 % name)
1144 1144 if name != name.strip():
1145 1145 # whitespace is stripped by parseseries()
1146 1146 raise error.Abort(_('patch name cannot begin or end with '
1147 1147 'whitespace'))
1148 1148 for prefix in ('.hg', '.mq'):
1149 1149 if name.startswith(prefix):
1150 1150 raise error.Abort(_('patch name cannot begin with "%s"')
1151 1151 % prefix)
1152 1152 for c in ('#', ':', '\r', '\n'):
1153 1153 if c in name:
1154 1154 raise error.Abort(_('%r cannot be used in the name of a patch')
1155 1155 % c)
1156 1156
1157 1157 def checkpatchname(self, name, force=False):
1158 1158 self.checkreservedname(name)
1159 1159 if not force and os.path.exists(self.join(name)):
1160 1160 if os.path.isdir(self.join(name)):
1161 1161 raise error.Abort(_('"%s" already exists as a directory')
1162 1162 % name)
1163 1163 else:
1164 1164 raise error.Abort(_('patch "%s" already exists') % name)
1165 1165
1166 1166 def makepatchname(self, title, fallbackname):
1167 1167 """Return a suitable filename for title, adding a suffix to make
1168 1168 it unique in the existing list"""
1169 1169 namebase = re.sub('[\s\W_]+', '_', title.lower()).strip('_')
1170 1170 namebase = namebase[:75] # avoid too long name (issue5117)
1171 1171 if namebase:
1172 1172 try:
1173 1173 self.checkreservedname(namebase)
1174 1174 except error.Abort:
1175 1175 namebase = fallbackname
1176 1176 else:
1177 1177 namebase = fallbackname
1178 1178 name = namebase
1179 1179 i = 0
1180 1180 while True:
1181 1181 if name not in self.fullseries:
1182 1182 try:
1183 1183 self.checkpatchname(name)
1184 1184 break
1185 1185 except error.Abort:
1186 1186 pass
1187 1187 i += 1
1188 1188 name = '%s__%s' % (namebase, i)
1189 1189 return name
1190 1190
1191 1191 def checkkeepchanges(self, keepchanges, force):
1192 1192 if force and keepchanges:
1193 1193 raise error.Abort(_('cannot use both --force and --keep-changes'))
1194 1194
1195 1195 def new(self, repo, patchfn, *pats, **opts):
1196 1196 """options:
1197 1197 msg: a string or a no-argument function returning a string
1198 1198 """
1199 1199 msg = opts.get('msg')
1200 1200 edit = opts.get('edit')
1201 1201 editform = opts.get('editform', 'mq.qnew')
1202 1202 user = opts.get('user')
1203 1203 date = opts.get('date')
1204 1204 if date:
1205 1205 date = util.parsedate(date)
1206 1206 diffopts = self.diffopts({'git': opts.get('git')}, plain=True)
1207 1207 if opts.get('checkname', True):
1208 1208 self.checkpatchname(patchfn)
1209 1209 inclsubs = checksubstate(repo)
1210 1210 if inclsubs:
1211 1211 substatestate = repo.dirstate['.hgsubstate']
1212 1212 if opts.get('include') or opts.get('exclude') or pats:
1213 1213 # detect missing files in pats
1214 1214 def badfn(f, msg):
1215 1215 if f != '.hgsubstate': # .hgsubstate is auto-created
1216 1216 raise error.Abort('%s: %s' % (f, msg))
1217 1217 match = scmutil.match(repo[None], pats, opts, badfn=badfn)
1218 1218 changes = repo.status(match=match)
1219 1219 else:
1220 1220 changes = self.checklocalchanges(repo, force=True)
1221 1221 commitfiles = list(inclsubs)
1222 1222 for files in changes[:3]:
1223 1223 commitfiles.extend(files)
1224 1224 match = scmutil.matchfiles(repo, commitfiles)
1225 1225 if len(repo[None].parents()) > 1:
1226 1226 raise error.Abort(_('cannot manage merge changesets'))
1227 1227 self.checktoppatch(repo)
1228 1228 insert = self.fullseriesend()
1229 1229 with repo.wlock():
1230 1230 try:
1231 1231 # if patch file write fails, abort early
1232 1232 p = self.opener(patchfn, "w")
1233 1233 except IOError as e:
1234 1234 raise error.Abort(_('cannot write patch "%s": %s')
1235 1235 % (patchfn, encoding.strtolocal(e.strerror)))
1236 1236 try:
1237 1237 defaultmsg = "[mq]: %s" % patchfn
1238 1238 editor = cmdutil.getcommiteditor(editform=editform)
1239 1239 if edit:
1240 1240 def finishdesc(desc):
1241 1241 if desc.rstrip():
1242 1242 return desc
1243 1243 else:
1244 1244 return defaultmsg
1245 1245 # i18n: this message is shown in editor with "HG: " prefix
1246 1246 extramsg = _('Leave message empty to use default message.')
1247 1247 editor = cmdutil.getcommiteditor(finishdesc=finishdesc,
1248 1248 extramsg=extramsg,
1249 1249 editform=editform)
1250 1250 commitmsg = msg
1251 1251 else:
1252 1252 commitmsg = msg or defaultmsg
1253 1253
1254 1254 n = newcommit(repo, None, commitmsg, user, date, match=match,
1255 1255 force=True, editor=editor)
1256 1256 if n is None:
1257 1257 raise error.Abort(_("repo commit failed"))
1258 1258 try:
1259 1259 self.fullseries[insert:insert] = [patchfn]
1260 1260 self.applied.append(statusentry(n, patchfn))
1261 1261 self.parseseries()
1262 1262 self.seriesdirty = True
1263 1263 self.applieddirty = True
1264 1264 nctx = repo[n]
1265 1265 ph = patchheader(self.join(patchfn), self.plainmode)
1266 1266 if user:
1267 1267 ph.setuser(user)
1268 1268 if date:
1269 1269 ph.setdate('%s %s' % date)
1270 1270 ph.setparent(hex(nctx.p1().node()))
1271 1271 msg = nctx.description().strip()
1272 1272 if msg == defaultmsg.strip():
1273 1273 msg = ''
1274 1274 ph.setmessage(msg)
1275 1275 p.write(bytes(ph))
1276 1276 if commitfiles:
1277 1277 parent = self.qparents(repo, n)
1278 1278 if inclsubs:
1279 1279 self.putsubstate2changes(substatestate, changes)
1280 1280 chunks = patchmod.diff(repo, node1=parent, node2=n,
1281 1281 changes=changes, opts=diffopts)
1282 1282 for chunk in chunks:
1283 1283 p.write(chunk)
1284 1284 p.close()
1285 1285 r = self.qrepo()
1286 1286 if r:
1287 1287 r[None].add([patchfn])
1288 1288 except: # re-raises
1289 1289 repo.rollback()
1290 1290 raise
1291 1291 except Exception:
1292 1292 patchpath = self.join(patchfn)
1293 1293 try:
1294 1294 os.unlink(patchpath)
1295 1295 except OSError:
1296 1296 self.ui.warn(_('error unlinking %s\n') % patchpath)
1297 1297 raise
1298 1298 self.removeundo(repo)
1299 1299
1300 1300 def isapplied(self, patch):
1301 1301 """returns (index, rev, patch)"""
1302 1302 for i, a in enumerate(self.applied):
1303 1303 if a.name == patch:
1304 1304 return (i, a.node, a.name)
1305 1305 return None
1306 1306
1307 1307 # if the exact patch name does not exist, we try a few
1308 1308 # variations. If strict is passed, we try only #1
1309 1309 #
1310 1310 # 1) a number (as string) to indicate an offset in the series file
1311 1311 # 2) a unique substring of the patch name was given
1312 1312 # 3) patchname[-+]num to indicate an offset in the series file
1313 1313 def lookup(self, patch, strict=False):
1314 1314 def partialname(s):
1315 1315 if s in self.series:
1316 1316 return s
1317 1317 matches = [x for x in self.series if s in x]
1318 1318 if len(matches) > 1:
1319 1319 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
1320 1320 for m in matches:
1321 1321 self.ui.warn(' %s\n' % m)
1322 1322 return None
1323 1323 if matches:
1324 1324 return matches[0]
1325 1325 if self.series and self.applied:
1326 1326 if s == 'qtip':
1327 1327 return self.series[self.seriesend(True) - 1]
1328 1328 if s == 'qbase':
1329 1329 return self.series[0]
1330 1330 return None
1331 1331
1332 1332 if patch in self.series:
1333 1333 return patch
1334 1334
1335 1335 if not os.path.isfile(self.join(patch)):
1336 1336 try:
1337 1337 sno = int(patch)
1338 1338 except (ValueError, OverflowError):
1339 1339 pass
1340 1340 else:
1341 1341 if -len(self.series) <= sno < len(self.series):
1342 1342 return self.series[sno]
1343 1343
1344 1344 if not strict:
1345 1345 res = partialname(patch)
1346 1346 if res:
1347 1347 return res
1348 1348 minus = patch.rfind('-')
1349 1349 if minus >= 0:
1350 1350 res = partialname(patch[:minus])
1351 1351 if res:
1352 1352 i = self.series.index(res)
1353 1353 try:
1354 1354 off = int(patch[minus + 1:] or 1)
1355 1355 except (ValueError, OverflowError):
1356 1356 pass
1357 1357 else:
1358 1358 if i - off >= 0:
1359 1359 return self.series[i - off]
1360 1360 plus = patch.rfind('+')
1361 1361 if plus >= 0:
1362 1362 res = partialname(patch[:plus])
1363 1363 if res:
1364 1364 i = self.series.index(res)
1365 1365 try:
1366 1366 off = int(patch[plus + 1:] or 1)
1367 1367 except (ValueError, OverflowError):
1368 1368 pass
1369 1369 else:
1370 1370 if i + off < len(self.series):
1371 1371 return self.series[i + off]
1372 1372 raise error.Abort(_("patch %s not in series") % patch)
1373 1373
1374 1374 def push(self, repo, patch=None, force=False, list=False, mergeq=None,
1375 1375 all=False, move=False, exact=False, nobackup=False,
1376 1376 keepchanges=False):
1377 1377 self.checkkeepchanges(keepchanges, force)
1378 1378 diffopts = self.diffopts()
1379 1379 with repo.wlock():
1380 1380 heads = []
1381 1381 for hs in repo.branchmap().itervalues():
1382 1382 heads.extend(hs)
1383 1383 if not heads:
1384 1384 heads = [nullid]
1385 1385 if repo.dirstate.p1() not in heads and not exact:
1386 1386 self.ui.status(_("(working directory not at a head)\n"))
1387 1387
1388 1388 if not self.series:
1389 1389 self.ui.warn(_('no patches in series\n'))
1390 1390 return 0
1391 1391
1392 1392 # Suppose our series file is: A B C and the current 'top'
1393 1393 # patch is B. qpush C should be performed (moving forward)
1394 1394 # qpush B is a NOP (no change) qpush A is an error (can't
1395 1395 # go backwards with qpush)
1396 1396 if patch:
1397 1397 patch = self.lookup(patch)
1398 1398 info = self.isapplied(patch)
1399 1399 if info and info[0] >= len(self.applied) - 1:
1400 1400 self.ui.warn(
1401 1401 _('qpush: %s is already at the top\n') % patch)
1402 1402 return 0
1403 1403
1404 1404 pushable, reason = self.pushable(patch)
1405 1405 if pushable:
1406 1406 if self.series.index(patch) < self.seriesend():
1407 1407 raise error.Abort(
1408 1408 _("cannot push to a previous patch: %s") % patch)
1409 1409 else:
1410 1410 if reason:
1411 1411 reason = _('guarded by %s') % reason
1412 1412 else:
1413 1413 reason = _('no matching guards')
1414 1414 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
1415 1415 return 1
1416 1416 elif all:
1417 1417 patch = self.series[-1]
1418 1418 if self.isapplied(patch):
1419 1419 self.ui.warn(_('all patches are currently applied\n'))
1420 1420 return 0
1421 1421
1422 1422 # Following the above example, starting at 'top' of B:
1423 1423 # qpush should be performed (pushes C), but a subsequent
1424 1424 # qpush without an argument is an error (nothing to
1425 1425 # apply). This allows a loop of "...while hg qpush..." to
1426 1426 # work as it detects an error when done
1427 1427 start = self.seriesend()
1428 1428 if start == len(self.series):
1429 1429 self.ui.warn(_('patch series already fully applied\n'))
1430 1430 return 1
1431 1431 if not force and not keepchanges:
1432 1432 self.checklocalchanges(repo, refresh=self.applied)
1433 1433
1434 1434 if exact:
1435 1435 if keepchanges:
1436 1436 raise error.Abort(
1437 1437 _("cannot use --exact and --keep-changes together"))
1438 1438 if move:
1439 1439 raise error.Abort(_('cannot use --exact and --move '
1440 1440 'together'))
1441 1441 if self.applied:
1442 1442 raise error.Abort(_('cannot push --exact with applied '
1443 1443 'patches'))
1444 1444 root = self.series[start]
1445 1445 target = patchheader(self.join(root), self.plainmode).parent
1446 1446 if not target:
1447 1447 raise error.Abort(
1448 1448 _("%s does not have a parent recorded") % root)
1449 1449 if not repo[target] == repo['.']:
1450 1450 hg.update(repo, target)
1451 1451
1452 1452 if move:
1453 1453 if not patch:
1454 1454 raise error.Abort(_("please specify the patch to move"))
1455 1455 for fullstart, rpn in enumerate(self.fullseries):
1456 1456 # strip markers for patch guards
1457 1457 if self.guard_re.split(rpn, 1)[0] == self.series[start]:
1458 1458 break
1459 1459 for i, rpn in enumerate(self.fullseries[fullstart:]):
1460 1460 # strip markers for patch guards
1461 1461 if self.guard_re.split(rpn, 1)[0] == patch:
1462 1462 break
1463 1463 index = fullstart + i
1464 1464 assert index < len(self.fullseries)
1465 1465 fullpatch = self.fullseries[index]
1466 1466 del self.fullseries[index]
1467 1467 self.fullseries.insert(fullstart, fullpatch)
1468 1468 self.parseseries()
1469 1469 self.seriesdirty = True
1470 1470
1471 1471 self.applieddirty = True
1472 1472 if start > 0:
1473 1473 self.checktoppatch(repo)
1474 1474 if not patch:
1475 1475 patch = self.series[start]
1476 1476 end = start + 1
1477 1477 else:
1478 1478 end = self.series.index(patch, start) + 1
1479 1479
1480 1480 tobackup = set()
1481 1481 if (not nobackup and force) or keepchanges:
1482 1482 status = self.checklocalchanges(repo, force=True)
1483 1483 if keepchanges:
1484 1484 tobackup.update(status.modified + status.added +
1485 1485 status.removed + status.deleted)
1486 1486 else:
1487 1487 tobackup.update(status.modified + status.added)
1488 1488
1489 1489 s = self.series[start:end]
1490 1490 all_files = set()
1491 1491 try:
1492 1492 if mergeq:
1493 1493 ret = self.mergepatch(repo, mergeq, s, diffopts)
1494 1494 else:
1495 1495 ret = self.apply(repo, s, list, all_files=all_files,
1496 1496 tobackup=tobackup, keepchanges=keepchanges)
1497 1497 except AbortNoCleanup:
1498 1498 raise
1499 1499 except: # re-raises
1500 1500 self.ui.warn(_('cleaning up working directory...\n'))
1501 1501 cmdutil.revert(self.ui, repo, repo['.'],
1502 1502 repo.dirstate.parents(), no_backup=True)
1503 1503 # only remove unknown files that we know we touched or
1504 1504 # created while patching
1505 1505 for f in all_files:
1506 1506 if f not in repo.dirstate:
1507 1507 repo.wvfs.unlinkpath(f, ignoremissing=True)
1508 1508 self.ui.warn(_('done\n'))
1509 1509 raise
1510 1510
1511 1511 if not self.applied:
1512 1512 return ret[0]
1513 1513 top = self.applied[-1].name
1514 1514 if ret[0] and ret[0] > 1:
1515 1515 msg = _("errors during apply, please fix and qrefresh %s\n")
1516 1516 self.ui.write(msg % top)
1517 1517 else:
1518 1518 self.ui.write(_("now at: %s\n") % top)
1519 1519 return ret[0]
1520 1520
1521 1521 def pop(self, repo, patch=None, force=False, update=True, all=False,
1522 1522 nobackup=False, keepchanges=False):
1523 1523 self.checkkeepchanges(keepchanges, force)
1524 1524 with repo.wlock():
1525 1525 if patch:
1526 1526 # index, rev, patch
1527 1527 info = self.isapplied(patch)
1528 1528 if not info:
1529 1529 patch = self.lookup(patch)
1530 1530 info = self.isapplied(patch)
1531 1531 if not info:
1532 1532 raise error.Abort(_("patch %s is not applied") % patch)
1533 1533
1534 1534 if not self.applied:
1535 1535 # Allow qpop -a to work repeatedly,
1536 1536 # but not qpop without an argument
1537 1537 self.ui.warn(_("no patches applied\n"))
1538 1538 return not all
1539 1539
1540 1540 if all:
1541 1541 start = 0
1542 1542 elif patch:
1543 1543 start = info[0] + 1
1544 1544 else:
1545 1545 start = len(self.applied) - 1
1546 1546
1547 1547 if start >= len(self.applied):
1548 1548 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1549 1549 return
1550 1550
1551 1551 if not update:
1552 1552 parents = repo.dirstate.parents()
1553 1553 rr = [x.node for x in self.applied]
1554 1554 for p in parents:
1555 1555 if p in rr:
1556 1556 self.ui.warn(_("qpop: forcing dirstate update\n"))
1557 1557 update = True
1558 1558 else:
1559 1559 parents = [p.node() for p in repo[None].parents()]
1560 1560 needupdate = False
1561 1561 for entry in self.applied[start:]:
1562 1562 if entry.node in parents:
1563 1563 needupdate = True
1564 1564 break
1565 1565 update = needupdate
1566 1566
1567 1567 tobackup = set()
1568 1568 if update:
1569 1569 s = self.checklocalchanges(repo, force=force or keepchanges)
1570 1570 if force:
1571 1571 if not nobackup:
1572 1572 tobackup.update(s.modified + s.added)
1573 1573 elif keepchanges:
1574 1574 tobackup.update(s.modified + s.added +
1575 1575 s.removed + s.deleted)
1576 1576
1577 1577 self.applieddirty = True
1578 1578 end = len(self.applied)
1579 1579 rev = self.applied[start].node
1580 1580
1581 1581 try:
1582 1582 heads = repo.changelog.heads(rev)
1583 1583 except error.LookupError:
1584 1584 node = short(rev)
1585 1585 raise error.Abort(_('trying to pop unknown node %s') % node)
1586 1586
1587 1587 if heads != [self.applied[-1].node]:
1588 1588 raise error.Abort(_("popping would remove a revision not "
1589 1589 "managed by this patch queue"))
1590 1590 if not repo[self.applied[-1].node].mutable():
1591 1591 raise error.Abort(
1592 1592 _("popping would remove a public revision"),
1593 1593 hint=_("see 'hg help phases' for details"))
1594 1594
1595 1595 # we know there are no local changes, so we can make a simplified
1596 1596 # form of hg.update.
1597 1597 if update:
1598 1598 qp = self.qparents(repo, rev)
1599 1599 ctx = repo[qp]
1600 1600 m, a, r, d = repo.status(qp, '.')[:4]
1601 1601 if d:
1602 1602 raise error.Abort(_("deletions found between repo revs"))
1603 1603
1604 1604 tobackup = set(a + m + r) & tobackup
1605 1605 if keepchanges and tobackup:
1606 1606 raise error.Abort(_("local changes found, qrefresh first"))
1607 1607 self.backup(repo, tobackup)
1608 1608 with repo.dirstate.parentchange():
1609 1609 for f in a:
1610 1610 repo.wvfs.unlinkpath(f, ignoremissing=True)
1611 1611 repo.dirstate.drop(f)
1612 1612 for f in m + r:
1613 1613 fctx = ctx[f]
1614 1614 repo.wwrite(f, fctx.data(), fctx.flags())
1615 1615 repo.dirstate.normal(f)
1616 1616 repo.setparents(qp, nullid)
1617 1617 for patch in reversed(self.applied[start:end]):
1618 1618 self.ui.status(_("popping %s\n") % patch.name)
1619 1619 del self.applied[start:end]
1620 1620 strip(self.ui, repo, [rev], update=False, backup=False)
1621 1621 for s, state in repo['.'].substate.items():
1622 1622 repo['.'].sub(s).get(state)
1623 1623 if self.applied:
1624 1624 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1625 1625 else:
1626 1626 self.ui.write(_("patch queue now empty\n"))
1627 1627
1628 1628 def diff(self, repo, pats, opts):
1629 1629 top, patch = self.checktoppatch(repo)
1630 1630 if not top:
1631 1631 self.ui.write(_("no patches applied\n"))
1632 1632 return
1633 1633 qp = self.qparents(repo, top)
1634 1634 if opts.get('reverse'):
1635 1635 node1, node2 = None, qp
1636 1636 else:
1637 1637 node1, node2 = qp, None
1638 1638 diffopts = self.diffopts(opts, patch)
1639 1639 self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
1640 1640
1641 1641 def refresh(self, repo, pats=None, **opts):
1642 1642 if not self.applied:
1643 1643 self.ui.write(_("no patches applied\n"))
1644 1644 return 1
1645 1645 msg = opts.get('msg', '').rstrip()
1646 1646 edit = opts.get('edit')
1647 1647 editform = opts.get('editform', 'mq.qrefresh')
1648 1648 newuser = opts.get('user')
1649 1649 newdate = opts.get('date')
1650 1650 if newdate:
1651 1651 newdate = '%d %d' % util.parsedate(newdate)
1652 1652 wlock = repo.wlock()
1653 1653
1654 1654 try:
1655 1655 self.checktoppatch(repo)
1656 1656 (top, patchfn) = (self.applied[-1].node, self.applied[-1].name)
1657 1657 if repo.changelog.heads(top) != [top]:
1658 1658 raise error.Abort(_("cannot qrefresh a revision with children"))
1659 1659 if not repo[top].mutable():
1660 1660 raise error.Abort(_("cannot qrefresh public revision"),
1661 1661 hint=_("see 'hg help phases' for details"))
1662 1662
1663 1663 cparents = repo.changelog.parents(top)
1664 1664 patchparent = self.qparents(repo, top)
1665 1665
1666 1666 inclsubs = checksubstate(repo, hex(patchparent))
1667 1667 if inclsubs:
1668 1668 substatestate = repo.dirstate['.hgsubstate']
1669 1669
1670 1670 ph = patchheader(self.join(patchfn), self.plainmode)
1671 1671 diffopts = self.diffopts({'git': opts.get('git')}, patchfn,
1672 1672 plain=True)
1673 1673 if newuser:
1674 1674 ph.setuser(newuser)
1675 1675 if newdate:
1676 1676 ph.setdate(newdate)
1677 1677 ph.setparent(hex(patchparent))
1678 1678
1679 1679 # only commit new patch when write is complete
1680 1680 patchf = self.opener(patchfn, 'w', atomictemp=True)
1681 1681
1682 1682 # update the dirstate in place, strip off the qtip commit
1683 1683 # and then commit.
1684 1684 #
1685 1685 # this should really read:
1686 1686 # mm, dd, aa = repo.status(top, patchparent)[:3]
1687 1687 # but we do it backwards to take advantage of manifest/changelog
1688 1688 # caching against the next repo.status call
1689 1689 mm, aa, dd = repo.status(patchparent, top)[:3]
1690 1690 changes = repo.changelog.read(top)
1691 1691 man = repo.manifestlog[changes[0]].read()
1692 1692 aaa = aa[:]
1693 1693 match1 = scmutil.match(repo[None], pats, opts)
1694 1694 # in short mode, we only diff the files included in the
1695 1695 # patch already plus specified files
1696 1696 if opts.get('short'):
1697 1697 # if amending a patch, we start with existing
1698 1698 # files plus specified files - unfiltered
1699 1699 match = scmutil.matchfiles(repo, mm + aa + dd + match1.files())
1700 1700 # filter with include/exclude options
1701 1701 match1 = scmutil.match(repo[None], opts=opts)
1702 1702 else:
1703 1703 match = scmutil.matchall(repo)
1704 1704 m, a, r, d = repo.status(match=match)[:4]
1705 1705 mm = set(mm)
1706 1706 aa = set(aa)
1707 1707 dd = set(dd)
1708 1708
1709 1709 # we might end up with files that were added between
1710 1710 # qtip and the dirstate parent, but then changed in the
1711 1711 # local dirstate. in this case, we want them to only
1712 1712 # show up in the added section
1713 1713 for x in m:
1714 1714 if x not in aa:
1715 1715 mm.add(x)
1716 1716 # we might end up with files added by the local dirstate that
1717 1717 # were deleted by the patch. In this case, they should only
1718 1718 # show up in the changed section.
1719 1719 for x in a:
1720 1720 if x in dd:
1721 1721 dd.remove(x)
1722 1722 mm.add(x)
1723 1723 else:
1724 1724 aa.add(x)
1725 1725 # make sure any files deleted in the local dirstate
1726 1726 # are not in the add or change column of the patch
1727 1727 forget = []
1728 1728 for x in d + r:
1729 1729 if x in aa:
1730 1730 aa.remove(x)
1731 1731 forget.append(x)
1732 1732 continue
1733 1733 else:
1734 1734 mm.discard(x)
1735 1735 dd.add(x)
1736 1736
1737 1737 m = list(mm)
1738 1738 r = list(dd)
1739 1739 a = list(aa)
1740 1740
1741 1741 # create 'match' that includes the files to be recommitted.
1742 1742 # apply match1 via repo.status to ensure correct case handling.
1743 1743 cm, ca, cr, cd = repo.status(patchparent, match=match1)[:4]
1744 1744 allmatches = set(cm + ca + cr + cd)
1745 1745 refreshchanges = [x.intersection(allmatches) for x in (mm, aa, dd)]
1746 1746
1747 1747 files = set(inclsubs)
1748 1748 for x in refreshchanges:
1749 1749 files.update(x)
1750 1750 match = scmutil.matchfiles(repo, files)
1751 1751
1752 1752 bmlist = repo[top].bookmarks()
1753 1753
1754 1754 dsguard = None
1755 1755 try:
1756 1756 dsguard = dirstateguard.dirstateguard(repo, 'mq.refresh')
1757 1757 if diffopts.git or diffopts.upgrade:
1758 1758 copies = {}
1759 1759 for dst in a:
1760 1760 src = repo.dirstate.copied(dst)
1761 1761 # during qfold, the source file for copies may
1762 1762 # be removed. Treat this as a simple add.
1763 1763 if src is not None and src in repo.dirstate:
1764 1764 copies.setdefault(src, []).append(dst)
1765 1765 repo.dirstate.add(dst)
1766 1766 # remember the copies between patchparent and qtip
1767 1767 for dst in aaa:
1768 1768 f = repo.file(dst)
1769 1769 src = f.renamed(man[dst])
1770 1770 if src:
1771 1771 copies.setdefault(src[0], []).extend(
1772 1772 copies.get(dst, []))
1773 1773 if dst in a:
1774 1774 copies[src[0]].append(dst)
1775 1775 # we can't copy a file created by the patch itself
1776 1776 if dst in copies:
1777 1777 del copies[dst]
1778 1778 for src, dsts in copies.iteritems():
1779 1779 for dst in dsts:
1780 1780 repo.dirstate.copy(src, dst)
1781 1781 else:
1782 1782 for dst in a:
1783 1783 repo.dirstate.add(dst)
1784 1784 # Drop useless copy information
1785 1785 for f in list(repo.dirstate.copies()):
1786 1786 repo.dirstate.copy(None, f)
1787 1787 for f in r:
1788 1788 repo.dirstate.remove(f)
1789 1789 # if the patch excludes a modified file, mark that
1790 1790 # file with mtime=0 so status can see it.
1791 1791 mm = []
1792 1792 for i in xrange(len(m) - 1, -1, -1):
1793 1793 if not match1(m[i]):
1794 1794 mm.append(m[i])
1795 1795 del m[i]
1796 1796 for f in m:
1797 1797 repo.dirstate.normal(f)
1798 1798 for f in mm:
1799 1799 repo.dirstate.normallookup(f)
1800 1800 for f in forget:
1801 1801 repo.dirstate.drop(f)
1802 1802
1803 1803 user = ph.user or changes[1]
1804 1804
1805 1805 oldphase = repo[top].phase()
1806 1806
1807 1807 # assumes strip can roll itself back if interrupted
1808 1808 repo.setparents(*cparents)
1809 1809 self.applied.pop()
1810 1810 self.applieddirty = True
1811 1811 strip(self.ui, repo, [top], update=False, backup=False)
1812 1812 dsguard.close()
1813 1813 finally:
1814 1814 release(dsguard)
1815 1815
1816 1816 try:
1817 1817 # might be nice to attempt to roll back strip after this
1818 1818
1819 1819 defaultmsg = "[mq]: %s" % patchfn
1820 1820 editor = cmdutil.getcommiteditor(editform=editform)
1821 1821 if edit:
1822 1822 def finishdesc(desc):
1823 1823 if desc.rstrip():
1824 1824 ph.setmessage(desc)
1825 1825 return desc
1826 1826 return defaultmsg
1827 1827 # i18n: this message is shown in editor with "HG: " prefix
1828 1828 extramsg = _('Leave message empty to use default message.')
1829 1829 editor = cmdutil.getcommiteditor(finishdesc=finishdesc,
1830 1830 extramsg=extramsg,
1831 1831 editform=editform)
1832 1832 message = msg or "\n".join(ph.message)
1833 1833 elif not msg:
1834 1834 if not ph.message:
1835 1835 message = defaultmsg
1836 1836 else:
1837 1837 message = "\n".join(ph.message)
1838 1838 else:
1839 1839 message = msg
1840 1840 ph.setmessage(msg)
1841 1841
1842 1842 # Ensure we create a new changeset in the same phase than
1843 1843 # the old one.
1844 1844 lock = tr = None
1845 1845 try:
1846 1846 lock = repo.lock()
1847 1847 tr = repo.transaction('mq')
1848 1848 n = newcommit(repo, oldphase, message, user, ph.date,
1849 1849 match=match, force=True, editor=editor)
1850 1850 # only write patch after a successful commit
1851 1851 c = [list(x) for x in refreshchanges]
1852 1852 if inclsubs:
1853 1853 self.putsubstate2changes(substatestate, c)
1854 1854 chunks = patchmod.diff(repo, patchparent,
1855 1855 changes=c, opts=diffopts)
1856 1856 comments = bytes(ph)
1857 1857 if comments:
1858 1858 patchf.write(comments)
1859 1859 for chunk in chunks:
1860 1860 patchf.write(chunk)
1861 1861 patchf.close()
1862 1862
1863 1863 marks = repo._bookmarks
1864 1864 marks.applychanges(repo, tr, [(bm, n) for bm in bmlist])
1865 1865 tr.close()
1866 1866
1867 1867 self.applied.append(statusentry(n, patchfn))
1868 1868 finally:
1869 1869 lockmod.release(tr, lock)
1870 1870 except: # re-raises
1871 1871 ctx = repo[cparents[0]]
1872 1872 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1873 1873 self.savedirty()
1874 1874 self.ui.warn(_('qrefresh interrupted while patch was popped! '
1875 1875 '(revert --all, qpush to recover)\n'))
1876 1876 raise
1877 1877 finally:
1878 1878 wlock.release()
1879 1879 self.removeundo(repo)
1880 1880
1881 1881 def init(self, repo, create=False):
1882 1882 if not create and os.path.isdir(self.path):
1883 1883 raise error.Abort(_("patch queue directory already exists"))
1884 1884 try:
1885 1885 os.mkdir(self.path)
1886 1886 except OSError as inst:
1887 1887 if inst.errno != errno.EEXIST or not create:
1888 1888 raise
1889 1889 if create:
1890 1890 return self.qrepo(create=True)
1891 1891
1892 1892 def unapplied(self, repo, patch=None):
1893 1893 if patch and patch not in self.series:
1894 1894 raise error.Abort(_("patch %s is not in series file") % patch)
1895 1895 if not patch:
1896 1896 start = self.seriesend()
1897 1897 else:
1898 1898 start = self.series.index(patch) + 1
1899 1899 unapplied = []
1900 1900 for i in xrange(start, len(self.series)):
1901 1901 pushable, reason = self.pushable(i)
1902 1902 if pushable:
1903 1903 unapplied.append((i, self.series[i]))
1904 1904 self.explainpushable(i)
1905 1905 return unapplied
1906 1906
1907 1907 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1908 1908 summary=False):
1909 1909 def displayname(pfx, patchname, state):
1910 1910 if pfx:
1911 1911 self.ui.write(pfx)
1912 1912 if summary:
1913 1913 ph = patchheader(self.join(patchname), self.plainmode)
1914 1914 if ph.message:
1915 1915 msg = ph.message[0]
1916 1916 else:
1917 1917 msg = ''
1918 1918
1919 1919 if self.ui.formatted():
1920 1920 width = self.ui.termwidth() - len(pfx) - len(patchname) - 2
1921 1921 if width > 0:
1922 1922 msg = util.ellipsis(msg, width)
1923 1923 else:
1924 1924 msg = ''
1925 1925 self.ui.write(patchname, label='qseries.' + state)
1926 1926 self.ui.write(': ')
1927 1927 self.ui.write(msg, label='qseries.message.' + state)
1928 1928 else:
1929 1929 self.ui.write(patchname, label='qseries.' + state)
1930 1930 self.ui.write('\n')
1931 1931
1932 1932 applied = set([p.name for p in self.applied])
1933 1933 if length is None:
1934 1934 length = len(self.series) - start
1935 1935 if not missing:
1936 1936 if self.ui.verbose:
1937 1937 idxwidth = len(str(start + length - 1))
1938 1938 for i in xrange(start, start + length):
1939 1939 patch = self.series[i]
1940 1940 if patch in applied:
1941 1941 char, state = 'A', 'applied'
1942 1942 elif self.pushable(i)[0]:
1943 1943 char, state = 'U', 'unapplied'
1944 1944 else:
1945 1945 char, state = 'G', 'guarded'
1946 1946 pfx = ''
1947 1947 if self.ui.verbose:
1948 1948 pfx = '%*d %s ' % (idxwidth, i, char)
1949 1949 elif status and status != char:
1950 1950 continue
1951 1951 displayname(pfx, patch, state)
1952 1952 else:
1953 1953 msng_list = []
1954 1954 for root, dirs, files in os.walk(self.path):
1955 1955 d = root[len(self.path) + 1:]
1956 1956 for f in files:
1957 1957 fl = os.path.join(d, f)
1958 1958 if (fl not in self.series and
1959 1959 fl not in (self.statuspath, self.seriespath,
1960 1960 self.guardspath)
1961 1961 and not fl.startswith('.')):
1962 1962 msng_list.append(fl)
1963 1963 for x in sorted(msng_list):
1964 1964 pfx = self.ui.verbose and ('D ') or ''
1965 1965 displayname(pfx, x, 'missing')
1966 1966
1967 1967 def issaveline(self, l):
1968 1968 if l.name == '.hg.patches.save.line':
1969 1969 return True
1970 1970
1971 1971 def qrepo(self, create=False):
1972 1972 ui = self.baseui.copy()
1973 1973 # copy back attributes set by ui.pager()
1974 1974 if self.ui.pageractive and not ui.pageractive:
1975 1975 ui.pageractive = self.ui.pageractive
1976 1976 # internal config: ui.formatted
1977 1977 ui.setconfig('ui', 'formatted',
1978 1978 self.ui.config('ui', 'formatted'), 'mqpager')
1979 1979 ui.setconfig('ui', 'interactive',
1980 1980 self.ui.config('ui', 'interactive'), 'mqpager')
1981 1981 if create or os.path.isdir(self.join(".hg")):
1982 1982 return hg.repository(ui, path=self.path, create=create)
1983 1983
1984 1984 def restore(self, repo, rev, delete=None, qupdate=None):
1985 1985 desc = repo[rev].description().strip()
1986 1986 lines = desc.splitlines()
1987 1987 i = 0
1988 1988 datastart = None
1989 1989 series = []
1990 1990 applied = []
1991 1991 qpp = None
1992 1992 for i, line in enumerate(lines):
1993 1993 if line == 'Patch Data:':
1994 1994 datastart = i + 1
1995 1995 elif line.startswith('Dirstate:'):
1996 1996 l = line.rstrip()
1997 1997 l = l[10:].split(' ')
1998 1998 qpp = [bin(x) for x in l]
1999 1999 elif datastart is not None:
2000 2000 l = line.rstrip()
2001 2001 n, name = l.split(':', 1)
2002 2002 if n:
2003 2003 applied.append(statusentry(bin(n), name))
2004 2004 else:
2005 2005 series.append(l)
2006 2006 if datastart is None:
2007 2007 self.ui.warn(_("no saved patch data found\n"))
2008 2008 return 1
2009 2009 self.ui.warn(_("restoring status: %s\n") % lines[0])
2010 2010 self.fullseries = series
2011 2011 self.applied = applied
2012 2012 self.parseseries()
2013 2013 self.seriesdirty = True
2014 2014 self.applieddirty = True
2015 2015 heads = repo.changelog.heads()
2016 2016 if delete:
2017 2017 if rev not in heads:
2018 2018 self.ui.warn(_("save entry has children, leaving it alone\n"))
2019 2019 else:
2020 2020 self.ui.warn(_("removing save entry %s\n") % short(rev))
2021 2021 pp = repo.dirstate.parents()
2022 2022 if rev in pp:
2023 2023 update = True
2024 2024 else:
2025 2025 update = False
2026 2026 strip(self.ui, repo, [rev], update=update, backup=False)
2027 2027 if qpp:
2028 2028 self.ui.warn(_("saved queue repository parents: %s %s\n") %
2029 2029 (short(qpp[0]), short(qpp[1])))
2030 2030 if qupdate:
2031 2031 self.ui.status(_("updating queue directory\n"))
2032 2032 r = self.qrepo()
2033 2033 if not r:
2034 2034 self.ui.warn(_("unable to load queue repository\n"))
2035 2035 return 1
2036 2036 hg.clean(r, qpp[0])
2037 2037
2038 2038 def save(self, repo, msg=None):
2039 2039 if not self.applied:
2040 2040 self.ui.warn(_("save: no patches applied, exiting\n"))
2041 2041 return 1
2042 2042 if self.issaveline(self.applied[-1]):
2043 2043 self.ui.warn(_("status is already saved\n"))
2044 2044 return 1
2045 2045
2046 2046 if not msg:
2047 2047 msg = _("hg patches saved state")
2048 2048 else:
2049 2049 msg = "hg patches: " + msg.rstrip('\r\n')
2050 2050 r = self.qrepo()
2051 2051 if r:
2052 2052 pp = r.dirstate.parents()
2053 2053 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
2054 2054 msg += "\n\nPatch Data:\n"
2055 2055 msg += ''.join('%s\n' % x for x in self.applied)
2056 2056 msg += ''.join(':%s\n' % x for x in self.fullseries)
2057 2057 n = repo.commit(msg, force=True)
2058 2058 if not n:
2059 2059 self.ui.warn(_("repo commit failed\n"))
2060 2060 return 1
2061 2061 self.applied.append(statusentry(n, '.hg.patches.save.line'))
2062 2062 self.applieddirty = True
2063 2063 self.removeundo(repo)
2064 2064
2065 2065 def fullseriesend(self):
2066 2066 if self.applied:
2067 2067 p = self.applied[-1].name
2068 2068 end = self.findseries(p)
2069 2069 if end is None:
2070 2070 return len(self.fullseries)
2071 2071 return end + 1
2072 2072 return 0
2073 2073
2074 2074 def seriesend(self, all_patches=False):
2075 2075 """If all_patches is False, return the index of the next pushable patch
2076 2076 in the series, or the series length. If all_patches is True, return the
2077 2077 index of the first patch past the last applied one.
2078 2078 """
2079 2079 end = 0
2080 2080 def nextpatch(start):
2081 2081 if all_patches or start >= len(self.series):
2082 2082 return start
2083 2083 for i in xrange(start, len(self.series)):
2084 2084 p, reason = self.pushable(i)
2085 2085 if p:
2086 2086 return i
2087 2087 self.explainpushable(i)
2088 2088 return len(self.series)
2089 2089 if self.applied:
2090 2090 p = self.applied[-1].name
2091 2091 try:
2092 2092 end = self.series.index(p)
2093 2093 except ValueError:
2094 2094 return 0
2095 2095 return nextpatch(end + 1)
2096 2096 return nextpatch(end)
2097 2097
2098 2098 def appliedname(self, index):
2099 2099 pname = self.applied[index].name
2100 2100 if not self.ui.verbose:
2101 2101 p = pname
2102 2102 else:
2103 2103 p = str(self.series.index(pname)) + " " + pname
2104 2104 return p
2105 2105
2106 2106 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
2107 2107 force=None, git=False):
2108 2108 def checkseries(patchname):
2109 2109 if patchname in self.series:
2110 2110 raise error.Abort(_('patch %s is already in the series file')
2111 2111 % patchname)
2112 2112
2113 2113 if rev:
2114 2114 if files:
2115 2115 raise error.Abort(_('option "-r" not valid when importing '
2116 2116 'files'))
2117 2117 rev = scmutil.revrange(repo, rev)
2118 2118 rev.sort(reverse=True)
2119 2119 elif not files:
2120 2120 raise error.Abort(_('no files or revisions specified'))
2121 2121 if (len(files) > 1 or len(rev) > 1) and patchname:
2122 2122 raise error.Abort(_('option "-n" not valid when importing multiple '
2123 2123 'patches'))
2124 2124 imported = []
2125 2125 if rev:
2126 2126 # If mq patches are applied, we can only import revisions
2127 2127 # that form a linear path to qbase.
2128 2128 # Otherwise, they should form a linear path to a head.
2129 2129 heads = repo.changelog.heads(repo.changelog.node(rev.first()))
2130 2130 if len(heads) > 1:
2131 2131 raise error.Abort(_('revision %d is the root of more than one '
2132 2132 'branch') % rev.last())
2133 2133 if self.applied:
2134 2134 base = repo.changelog.node(rev.first())
2135 2135 if base in [n.node for n in self.applied]:
2136 2136 raise error.Abort(_('revision %d is already managed')
2137 2137 % rev.first())
2138 2138 if heads != [self.applied[-1].node]:
2139 2139 raise error.Abort(_('revision %d is not the parent of '
2140 2140 'the queue') % rev.first())
2141 2141 base = repo.changelog.rev(self.applied[0].node)
2142 2142 lastparent = repo.changelog.parentrevs(base)[0]
2143 2143 else:
2144 2144 if heads != [repo.changelog.node(rev.first())]:
2145 2145 raise error.Abort(_('revision %d has unmanaged children')
2146 2146 % rev.first())
2147 2147 lastparent = None
2148 2148
2149 2149 diffopts = self.diffopts({'git': git})
2150 2150 with repo.transaction('qimport') as tr:
2151 2151 for r in rev:
2152 2152 if not repo[r].mutable():
2153 2153 raise error.Abort(_('revision %d is not mutable') % r,
2154 2154 hint=_("see 'hg help phases' "
2155 2155 'for details'))
2156 2156 p1, p2 = repo.changelog.parentrevs(r)
2157 2157 n = repo.changelog.node(r)
2158 2158 if p2 != nullrev:
2159 2159 raise error.Abort(_('cannot import merge revision %d')
2160 2160 % r)
2161 2161 if lastparent and lastparent != r:
2162 2162 raise error.Abort(_('revision %d is not the parent of '
2163 2163 '%d')
2164 2164 % (r, lastparent))
2165 2165 lastparent = p1
2166 2166
2167 2167 if not patchname:
2168 2168 patchname = self.makepatchname(
2169 2169 repo[r].description().split('\n', 1)[0],
2170 2170 '%d.diff' % r)
2171 2171 checkseries(patchname)
2172 2172 self.checkpatchname(patchname, force)
2173 2173 self.fullseries.insert(0, patchname)
2174 2174
2175 2175 patchf = self.opener(patchname, "w")
2176 2176 cmdutil.export(repo, [n], fp=patchf, opts=diffopts)
2177 2177 patchf.close()
2178 2178
2179 2179 se = statusentry(n, patchname)
2180 2180 self.applied.insert(0, se)
2181 2181
2182 2182 self.added.append(patchname)
2183 2183 imported.append(patchname)
2184 2184 patchname = None
2185 2185 if rev and repo.ui.configbool('mq', 'secret'):
2186 2186 # if we added anything with --rev, move the secret root
2187 2187 phases.retractboundary(repo, tr, phases.secret, [n])
2188 2188 self.parseseries()
2189 2189 self.applieddirty = True
2190 2190 self.seriesdirty = True
2191 2191
2192 2192 for i, filename in enumerate(files):
2193 2193 if existing:
2194 2194 if filename == '-':
2195 2195 raise error.Abort(_('-e is incompatible with import from -')
2196 2196 )
2197 2197 filename = normname(filename)
2198 2198 self.checkreservedname(filename)
2199 2199 if util.url(filename).islocal():
2200 2200 originpath = self.join(filename)
2201 2201 if not os.path.isfile(originpath):
2202 2202 raise error.Abort(
2203 2203 _("patch %s does not exist") % filename)
2204 2204
2205 2205 if patchname:
2206 2206 self.checkpatchname(patchname, force)
2207 2207
2208 2208 self.ui.write(_('renaming %s to %s\n')
2209 2209 % (filename, patchname))
2210 2210 util.rename(originpath, self.join(patchname))
2211 2211 else:
2212 2212 patchname = filename
2213 2213
2214 2214 else:
2215 2215 if filename == '-' and not patchname:
2216 2216 raise error.Abort(_('need --name to import a patch from -'))
2217 2217 elif not patchname:
2218 2218 patchname = normname(os.path.basename(filename.rstrip('/')))
2219 2219 self.checkpatchname(patchname, force)
2220 2220 try:
2221 2221 if filename == '-':
2222 2222 text = self.ui.fin.read()
2223 2223 else:
2224 2224 fp = hg.openpath(self.ui, filename)
2225 2225 text = fp.read()
2226 2226 fp.close()
2227 2227 except (OSError, IOError):
2228 2228 raise error.Abort(_("unable to read file %s") % filename)
2229 2229 patchf = self.opener(patchname, "w")
2230 2230 patchf.write(text)
2231 2231 patchf.close()
2232 2232 if not force:
2233 2233 checkseries(patchname)
2234 2234 if patchname not in self.series:
2235 2235 index = self.fullseriesend() + i
2236 2236 self.fullseries[index:index] = [patchname]
2237 2237 self.parseseries()
2238 2238 self.seriesdirty = True
2239 2239 self.ui.warn(_("adding %s to series file\n") % patchname)
2240 2240 self.added.append(patchname)
2241 2241 imported.append(patchname)
2242 2242 patchname = None
2243 2243
2244 2244 self.removeundo(repo)
2245 2245 return imported
2246 2246
2247 2247 def fixkeepchangesopts(ui, opts):
2248 2248 if (not ui.configbool('mq', 'keepchanges') or opts.get('force')
2249 2249 or opts.get('exact')):
2250 2250 return opts
2251 2251 opts = dict(opts)
2252 2252 opts['keep_changes'] = True
2253 2253 return opts
2254 2254
2255 2255 @command("qdelete|qremove|qrm",
2256 2256 [('k', 'keep', None, _('keep patch file')),
2257 2257 ('r', 'rev', [],
2258 2258 _('stop managing a revision (DEPRECATED)'), _('REV'))],
2259 2259 _('hg qdelete [-k] [PATCH]...'))
2260 2260 def delete(ui, repo, *patches, **opts):
2261 2261 """remove patches from queue
2262 2262
2263 2263 The patches must not be applied, and at least one patch is required. Exact
2264 2264 patch identifiers must be given. With -k/--keep, the patch files are
2265 2265 preserved in the patch directory.
2266 2266
2267 2267 To stop managing a patch and move it into permanent history,
2268 2268 use the :hg:`qfinish` command."""
2269 2269 q = repo.mq
2270 2270 q.delete(repo, patches, opts)
2271 2271 q.savedirty()
2272 2272 return 0
2273 2273
2274 2274 @command("qapplied",
2275 2275 [('1', 'last', None, _('show only the preceding applied patch'))
2276 2276 ] + seriesopts,
2277 2277 _('hg qapplied [-1] [-s] [PATCH]'))
2278 2278 def applied(ui, repo, patch=None, **opts):
2279 2279 """print the patches already applied
2280 2280
2281 2281 Returns 0 on success."""
2282 2282
2283 2283 q = repo.mq
2284 2284 opts = pycompat.byteskwargs(opts)
2285 2285
2286 2286 if patch:
2287 2287 if patch not in q.series:
2288 2288 raise error.Abort(_("patch %s is not in series file") % patch)
2289 2289 end = q.series.index(patch) + 1
2290 2290 else:
2291 2291 end = q.seriesend(True)
2292 2292
2293 2293 if opts.get('last') and not end:
2294 2294 ui.write(_("no patches applied\n"))
2295 2295 return 1
2296 2296 elif opts.get('last') and end == 1:
2297 2297 ui.write(_("only one patch applied\n"))
2298 2298 return 1
2299 2299 elif opts.get('last'):
2300 2300 start = end - 2
2301 2301 end = 1
2302 2302 else:
2303 2303 start = 0
2304 2304
2305 2305 q.qseries(repo, length=end, start=start, status='A',
2306 2306 summary=opts.get('summary'))
2307 2307
2308 2308
2309 2309 @command("qunapplied",
2310 2310 [('1', 'first', None, _('show only the first patch'))] + seriesopts,
2311 2311 _('hg qunapplied [-1] [-s] [PATCH]'))
2312 2312 def unapplied(ui, repo, patch=None, **opts):
2313 2313 """print the patches not yet applied
2314 2314
2315 2315 Returns 0 on success."""
2316 2316
2317 2317 q = repo.mq
2318 2318 opts = pycompat.byteskwargs(opts)
2319 2319 if patch:
2320 2320 if patch not in q.series:
2321 2321 raise error.Abort(_("patch %s is not in series file") % patch)
2322 2322 start = q.series.index(patch) + 1
2323 2323 else:
2324 2324 start = q.seriesend(True)
2325 2325
2326 2326 if start == len(q.series) and opts.get('first'):
2327 2327 ui.write(_("all patches applied\n"))
2328 2328 return 1
2329 2329
2330 2330 if opts.get('first'):
2331 2331 length = 1
2332 2332 else:
2333 2333 length = None
2334 2334 q.qseries(repo, start=start, length=length, status='U',
2335 2335 summary=opts.get('summary'))
2336 2336
2337 2337 @command("qimport",
2338 2338 [('e', 'existing', None, _('import file in patch directory')),
2339 2339 ('n', 'name', '',
2340 2340 _('name of patch file'), _('NAME')),
2341 2341 ('f', 'force', None, _('overwrite existing files')),
2342 2342 ('r', 'rev', [],
2343 2343 _('place existing revisions under mq control'), _('REV')),
2344 2344 ('g', 'git', None, _('use git extended diff format')),
2345 2345 ('P', 'push', None, _('qpush after importing'))],
2346 2346 _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... [FILE]...'))
2347 2347 def qimport(ui, repo, *filename, **opts):
2348 2348 """import a patch or existing changeset
2349 2349
2350 2350 The patch is inserted into the series after the last applied
2351 2351 patch. If no patches have been applied, qimport prepends the patch
2352 2352 to the series.
2353 2353
2354 2354 The patch will have the same name as its source file unless you
2355 2355 give it a new one with -n/--name.
2356 2356
2357 2357 You can register an existing patch inside the patch directory with
2358 2358 the -e/--existing flag.
2359 2359
2360 2360 With -f/--force, an existing patch of the same name will be
2361 2361 overwritten.
2362 2362
2363 2363 An existing changeset may be placed under mq control with -r/--rev
2364 2364 (e.g. qimport --rev . -n patch will place the current revision
2365 2365 under mq control). With -g/--git, patches imported with --rev will
2366 2366 use the git diff format. See the diffs help topic for information
2367 2367 on why this is important for preserving rename/copy information
2368 2368 and permission changes. Use :hg:`qfinish` to remove changesets
2369 2369 from mq control.
2370 2370
2371 2371 To import a patch from standard input, pass - as the patch file.
2372 2372 When importing from standard input, a patch name must be specified
2373 2373 using the --name flag.
2374 2374
2375 2375 To import an existing patch while renaming it::
2376 2376
2377 2377 hg qimport -e existing-patch -n new-name
2378 2378
2379 2379 Returns 0 if import succeeded.
2380 2380 """
2381 2381 opts = pycompat.byteskwargs(opts)
2382 2382 with repo.lock(): # cause this may move phase
2383 2383 q = repo.mq
2384 2384 try:
2385 2385 imported = q.qimport(
2386 2386 repo, filename, patchname=opts.get('name'),
2387 2387 existing=opts.get('existing'), force=opts.get('force'),
2388 2388 rev=opts.get('rev'), git=opts.get('git'))
2389 2389 finally:
2390 2390 q.savedirty()
2391 2391
2392 2392 if imported and opts.get('push') and not opts.get('rev'):
2393 2393 return q.push(repo, imported[-1])
2394 2394 return 0
2395 2395
2396 2396 def qinit(ui, repo, create):
2397 2397 """initialize a new queue repository
2398 2398
2399 2399 This command also creates a series file for ordering patches, and
2400 2400 an mq-specific .hgignore file in the queue repository, to exclude
2401 2401 the status and guards files (these contain mostly transient state).
2402 2402
2403 2403 Returns 0 if initialization succeeded."""
2404 2404 q = repo.mq
2405 2405 r = q.init(repo, create)
2406 2406 q.savedirty()
2407 2407 if r:
2408 2408 if not os.path.exists(r.wjoin('.hgignore')):
2409 2409 fp = r.wvfs('.hgignore', 'w')
2410 2410 fp.write('^\\.hg\n')
2411 2411 fp.write('^\\.mq\n')
2412 2412 fp.write('syntax: glob\n')
2413 2413 fp.write('status\n')
2414 2414 fp.write('guards\n')
2415 2415 fp.close()
2416 2416 if not os.path.exists(r.wjoin('series')):
2417 2417 r.wvfs('series', 'w').close()
2418 2418 r[None].add(['.hgignore', 'series'])
2419 2419 commands.add(ui, r)
2420 2420 return 0
2421 2421
2422 2422 @command("^qinit",
2423 2423 [('c', 'create-repo', None, _('create queue repository'))],
2424 2424 _('hg qinit [-c]'))
2425 2425 def init(ui, repo, **opts):
2426 2426 """init a new queue repository (DEPRECATED)
2427 2427
2428 2428 The queue repository is unversioned by default. If
2429 2429 -c/--create-repo is specified, qinit will create a separate nested
2430 2430 repository for patches (qinit -c may also be run later to convert
2431 2431 an unversioned patch repository into a versioned one). You can use
2432 2432 qcommit to commit changes to this queue repository.
2433 2433
2434 2434 This command is deprecated. Without -c, it's implied by other relevant
2435 2435 commands. With -c, use :hg:`init --mq` instead."""
2436 2436 return qinit(ui, repo, create=opts.get(r'create_repo'))
2437 2437
2438 2438 @command("qclone",
2439 2439 [('', 'pull', None, _('use pull protocol to copy metadata')),
2440 2440 ('U', 'noupdate', None,
2441 2441 _('do not update the new working directories')),
2442 2442 ('', 'uncompressed', None,
2443 2443 _('use uncompressed transfer (fast over LAN)')),
2444 2444 ('p', 'patches', '',
2445 2445 _('location of source patch repository'), _('REPO')),
2446 2446 ] + cmdutil.remoteopts,
2447 2447 _('hg qclone [OPTION]... SOURCE [DEST]'),
2448 2448 norepo=True)
2449 2449 def clone(ui, source, dest=None, **opts):
2450 2450 '''clone main and patch repository at same time
2451 2451
2452 2452 If source is local, destination will have no patches applied. If
2453 2453 source is remote, this command can not check if patches are
2454 2454 applied in source, so cannot guarantee that patches are not
2455 2455 applied in destination. If you clone remote repository, be sure
2456 2456 before that it has no patches applied.
2457 2457
2458 2458 Source patch repository is looked for in <src>/.hg/patches by
2459 2459 default. Use -p <url> to change.
2460 2460
2461 2461 The patch directory must be a nested Mercurial repository, as
2462 2462 would be created by :hg:`init --mq`.
2463 2463
2464 2464 Return 0 on success.
2465 2465 '''
2466 2466 opts = pycompat.byteskwargs(opts)
2467 2467 def patchdir(repo):
2468 2468 """compute a patch repo url from a repo object"""
2469 2469 url = repo.url()
2470 2470 if url.endswith('/'):
2471 2471 url = url[:-1]
2472 2472 return url + '/.hg/patches'
2473 2473
2474 2474 # main repo (destination and sources)
2475 2475 if dest is None:
2476 2476 dest = hg.defaultdest(source)
2477 2477 sr = hg.peer(ui, opts, ui.expandpath(source))
2478 2478
2479 2479 # patches repo (source only)
2480 2480 if opts.get('patches'):
2481 2481 patchespath = ui.expandpath(opts.get('patches'))
2482 2482 else:
2483 2483 patchespath = patchdir(sr)
2484 2484 try:
2485 2485 hg.peer(ui, opts, patchespath)
2486 2486 except error.RepoError:
2487 2487 raise error.Abort(_('versioned patch repository not found'
2488 2488 ' (see init --mq)'))
2489 2489 qbase, destrev = None, None
2490 2490 if sr.local():
2491 2491 repo = sr.local()
2492 2492 if repo.mq.applied and repo[qbase].phase() != phases.secret:
2493 2493 qbase = repo.mq.applied[0].node
2494 2494 if not hg.islocal(dest):
2495 2495 heads = set(repo.heads())
2496 2496 destrev = list(heads.difference(repo.heads(qbase)))
2497 2497 destrev.append(repo.changelog.parents(qbase)[0])
2498 2498 elif sr.capable('lookup'):
2499 2499 try:
2500 2500 qbase = sr.lookup('qbase')
2501 2501 except error.RepoError:
2502 2502 pass
2503 2503
2504 2504 ui.note(_('cloning main repository\n'))
2505 2505 sr, dr = hg.clone(ui, opts, sr.url(), dest,
2506 2506 pull=opts.get('pull'),
2507 2507 rev=destrev,
2508 2508 update=False,
2509 2509 stream=opts.get('uncompressed'))
2510 2510
2511 2511 ui.note(_('cloning patch repository\n'))
2512 2512 hg.clone(ui, opts, opts.get('patches') or patchdir(sr), patchdir(dr),
2513 2513 pull=opts.get('pull'), update=not opts.get('noupdate'),
2514 2514 stream=opts.get('uncompressed'))
2515 2515
2516 2516 if dr.local():
2517 2517 repo = dr.local()
2518 2518 if qbase:
2519 2519 ui.note(_('stripping applied patches from destination '
2520 2520 'repository\n'))
2521 2521 strip(ui, repo, [qbase], update=False, backup=None)
2522 2522 if not opts.get('noupdate'):
2523 2523 ui.note(_('updating destination repository\n'))
2524 2524 hg.update(repo, repo.changelog.tip())
2525 2525
2526 2526 @command("qcommit|qci",
2527 2527 commands.table["^commit|ci"][1],
2528 2528 _('hg qcommit [OPTION]... [FILE]...'),
2529 2529 inferrepo=True)
2530 2530 def commit(ui, repo, *pats, **opts):
2531 2531 """commit changes in the queue repository (DEPRECATED)
2532 2532
2533 2533 This command is deprecated; use :hg:`commit --mq` instead."""
2534 2534 q = repo.mq
2535 2535 r = q.qrepo()
2536 2536 if not r:
2537 2537 raise error.Abort('no queue repository')
2538 2538 commands.commit(r.ui, r, *pats, **opts)
2539 2539
2540 2540 @command("qseries",
2541 2541 [('m', 'missing', None, _('print patches not in series')),
2542 2542 ] + seriesopts,
2543 2543 _('hg qseries [-ms]'))
2544 2544 def series(ui, repo, **opts):
2545 2545 """print the entire series file
2546 2546
2547 2547 Returns 0 on success."""
2548 2548 repo.mq.qseries(repo, missing=opts.get(r'missing'),
2549 2549 summary=opts.get(r'summary'))
2550 2550 return 0
2551 2551
2552 2552 @command("qtop", seriesopts, _('hg qtop [-s]'))
2553 2553 def top(ui, repo, **opts):
2554 2554 """print the name of the current patch
2555 2555
2556 2556 Returns 0 on success."""
2557 2557 q = repo.mq
2558 2558 if q.applied:
2559 2559 t = q.seriesend(True)
2560 2560 else:
2561 2561 t = 0
2562 2562
2563 2563 if t:
2564 2564 q.qseries(repo, start=t - 1, length=1, status='A',
2565 2565 summary=opts.get(r'summary'))
2566 2566 else:
2567 2567 ui.write(_("no patches applied\n"))
2568 2568 return 1
2569 2569
2570 2570 @command("qnext", seriesopts, _('hg qnext [-s]'))
2571 2571 def next(ui, repo, **opts):
2572 2572 """print the name of the next pushable patch
2573 2573
2574 2574 Returns 0 on success."""
2575 2575 q = repo.mq
2576 2576 end = q.seriesend()
2577 2577 if end == len(q.series):
2578 2578 ui.write(_("all patches applied\n"))
2579 2579 return 1
2580 2580 q.qseries(repo, start=end, length=1, summary=opts.get(r'summary'))
2581 2581
2582 2582 @command("qprev", seriesopts, _('hg qprev [-s]'))
2583 2583 def prev(ui, repo, **opts):
2584 2584 """print the name of the preceding applied patch
2585 2585
2586 2586 Returns 0 on success."""
2587 2587 q = repo.mq
2588 2588 l = len(q.applied)
2589 2589 if l == 1:
2590 2590 ui.write(_("only one patch applied\n"))
2591 2591 return 1
2592 2592 if not l:
2593 2593 ui.write(_("no patches applied\n"))
2594 2594 return 1
2595 2595 idx = q.series.index(q.applied[-2].name)
2596 2596 q.qseries(repo, start=idx, length=1, status='A',
2597 2597 summary=opts.get(r'summary'))
2598 2598
2599 2599 def setupheaderopts(ui, opts):
2600 2600 if not opts.get('user') and opts.get('currentuser'):
2601 2601 opts['user'] = ui.username()
2602 2602 if not opts.get('date') and opts.get('currentdate'):
2603 2603 opts['date'] = "%d %d" % util.makedate()
2604 2604
2605 2605 @command("^qnew",
2606 2606 [('e', 'edit', None, _('invoke editor on commit messages')),
2607 2607 ('f', 'force', None, _('import uncommitted changes (DEPRECATED)')),
2608 2608 ('g', 'git', None, _('use git extended diff format')),
2609 2609 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2610 2610 ('u', 'user', '',
2611 2611 _('add "From: <USER>" to patch'), _('USER')),
2612 2612 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2613 2613 ('d', 'date', '',
2614 2614 _('add "Date: <DATE>" to patch'), _('DATE'))
2615 2615 ] + cmdutil.walkopts + cmdutil.commitopts,
2616 2616 _('hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...'),
2617 2617 inferrepo=True)
2618 2618 def new(ui, repo, patch, *args, **opts):
2619 2619 """create a new patch
2620 2620
2621 2621 qnew creates a new patch on top of the currently-applied patch (if
2622 2622 any). The patch will be initialized with any outstanding changes
2623 2623 in the working directory. You may also use -I/--include,
2624 2624 -X/--exclude, and/or a list of files after the patch name to add
2625 2625 only changes to matching files to the new patch, leaving the rest
2626 2626 as uncommitted modifications.
2627 2627
2628 2628 -u/--user and -d/--date can be used to set the (given) user and
2629 2629 date, respectively. -U/--currentuser and -D/--currentdate set user
2630 2630 to current user and date to current date.
2631 2631
2632 2632 -e/--edit, -m/--message or -l/--logfile set the patch header as
2633 2633 well as the commit message. If none is specified, the header is
2634 2634 empty and the commit message is '[mq]: PATCH'.
2635 2635
2636 2636 Use the -g/--git option to keep the patch in the git extended diff
2637 2637 format. Read the diffs help topic for more information on why this
2638 2638 is important for preserving permission changes and copy/rename
2639 2639 information.
2640 2640
2641 2641 Returns 0 on successful creation of a new patch.
2642 2642 """
2643 2643 opts = pycompat.byteskwargs(opts)
2644 2644 msg = cmdutil.logmessage(ui, opts)
2645 2645 q = repo.mq
2646 2646 opts['msg'] = msg
2647 2647 setupheaderopts(ui, opts)
2648 2648 q.new(repo, patch, *args, **pycompat.strkwargs(opts))
2649 2649 q.savedirty()
2650 2650 return 0
2651 2651
2652 2652 @command("^qrefresh",
2653 2653 [('e', 'edit', None, _('invoke editor on commit messages')),
2654 2654 ('g', 'git', None, _('use git extended diff format')),
2655 2655 ('s', 'short', None,
2656 2656 _('refresh only files already in the patch and specified files')),
2657 2657 ('U', 'currentuser', None,
2658 2658 _('add/update author field in patch with current user')),
2659 2659 ('u', 'user', '',
2660 2660 _('add/update author field in patch with given user'), _('USER')),
2661 2661 ('D', 'currentdate', None,
2662 2662 _('add/update date field in patch with current date')),
2663 2663 ('d', 'date', '',
2664 2664 _('add/update date field in patch with given date'), _('DATE'))
2665 2665 ] + cmdutil.walkopts + cmdutil.commitopts,
2666 2666 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...'),
2667 2667 inferrepo=True)
2668 2668 def refresh(ui, repo, *pats, **opts):
2669 2669 """update the current patch
2670 2670
2671 2671 If any file patterns are provided, the refreshed patch will
2672 2672 contain only the modifications that match those patterns; the
2673 2673 remaining modifications will remain in the working directory.
2674 2674
2675 2675 If -s/--short is specified, files currently included in the patch
2676 2676 will be refreshed just like matched files and remain in the patch.
2677 2677
2678 2678 If -e/--edit is specified, Mercurial will start your configured editor for
2679 2679 you to enter a message. In case qrefresh fails, you will find a backup of
2680 2680 your message in ``.hg/last-message.txt``.
2681 2681
2682 2682 hg add/remove/copy/rename work as usual, though you might want to
2683 2683 use git-style patches (-g/--git or [diff] git=1) to track copies
2684 2684 and renames. See the diffs help topic for more information on the
2685 2685 git diff format.
2686 2686
2687 2687 Returns 0 on success.
2688 2688 """
2689 2689 opts = pycompat.byteskwargs(opts)
2690 2690 q = repo.mq
2691 2691 message = cmdutil.logmessage(ui, opts)
2692 2692 setupheaderopts(ui, opts)
2693 2693 with repo.wlock():
2694 2694 ret = q.refresh(repo, pats, msg=message, **pycompat.strkwargs(opts))
2695 2695 q.savedirty()
2696 2696 return ret
2697 2697
2698 2698 @command("^qdiff",
2699 2699 cmdutil.diffopts + cmdutil.diffopts2 + cmdutil.walkopts,
2700 2700 _('hg qdiff [OPTION]... [FILE]...'),
2701 2701 inferrepo=True)
2702 2702 def diff(ui, repo, *pats, **opts):
2703 2703 """diff of the current patch and subsequent modifications
2704 2704
2705 2705 Shows a diff which includes the current patch as well as any
2706 2706 changes which have been made in the working directory since the
2707 2707 last refresh (thus showing what the current patch would become
2708 2708 after a qrefresh).
2709 2709
2710 2710 Use :hg:`diff` if you only want to see the changes made since the
2711 2711 last qrefresh, or :hg:`export qtip` if you want to see changes
2712 2712 made by the current patch without including changes made since the
2713 2713 qrefresh.
2714 2714
2715 2715 Returns 0 on success.
2716 2716 """
2717 2717 ui.pager('qdiff')
2718 2718 repo.mq.diff(repo, pats, pycompat.byteskwargs(opts))
2719 2719 return 0
2720 2720
2721 2721 @command('qfold',
2722 2722 [('e', 'edit', None, _('invoke editor on commit messages')),
2723 2723 ('k', 'keep', None, _('keep folded patch files')),
2724 2724 ] + cmdutil.commitopts,
2725 2725 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...'))
2726 2726 def fold(ui, repo, *files, **opts):
2727 2727 """fold the named patches into the current patch
2728 2728
2729 2729 Patches must not yet be applied. Each patch will be successively
2730 2730 applied to the current patch in the order given. If all the
2731 2731 patches apply successfully, the current patch will be refreshed
2732 2732 with the new cumulative patch, and the folded patches will be
2733 2733 deleted. With -k/--keep, the folded patch files will not be
2734 2734 removed afterwards.
2735 2735
2736 2736 The header for each folded patch will be concatenated with the
2737 2737 current patch header, separated by a line of ``* * *``.
2738 2738
2739 2739 Returns 0 on success."""
2740 2740 opts = pycompat.byteskwargs(opts)
2741 2741 q = repo.mq
2742 2742 if not files:
2743 2743 raise error.Abort(_('qfold requires at least one patch name'))
2744 2744 if not q.checktoppatch(repo)[0]:
2745 2745 raise error.Abort(_('no patches applied'))
2746 2746 q.checklocalchanges(repo)
2747 2747
2748 2748 message = cmdutil.logmessage(ui, opts)
2749 2749
2750 2750 parent = q.lookup('qtip')
2751 2751 patches = []
2752 2752 messages = []
2753 2753 for f in files:
2754 2754 p = q.lookup(f)
2755 2755 if p in patches or p == parent:
2756 2756 ui.warn(_('skipping already folded patch %s\n') % p)
2757 2757 if q.isapplied(p):
2758 2758 raise error.Abort(_('qfold cannot fold already applied patch %s')
2759 2759 % p)
2760 2760 patches.append(p)
2761 2761
2762 2762 for p in patches:
2763 2763 if not message:
2764 2764 ph = patchheader(q.join(p), q.plainmode)
2765 2765 if ph.message:
2766 2766 messages.append(ph.message)
2767 2767 pf = q.join(p)
2768 2768 (patchsuccess, files, fuzz) = q.patch(repo, pf)
2769 2769 if not patchsuccess:
2770 2770 raise error.Abort(_('error folding patch %s') % p)
2771 2771
2772 2772 if not message:
2773 2773 ph = patchheader(q.join(parent), q.plainmode)
2774 2774 message = ph.message
2775 2775 for msg in messages:
2776 2776 if msg:
2777 2777 if message:
2778 2778 message.append('* * *')
2779 2779 message.extend(msg)
2780 2780 message = '\n'.join(message)
2781 2781
2782 2782 diffopts = q.patchopts(q.diffopts(), *patches)
2783 2783 with repo.wlock():
2784 2784 q.refresh(repo, msg=message, git=diffopts.git, edit=opts.get('edit'),
2785 2785 editform='mq.qfold')
2786 2786 q.delete(repo, patches, opts)
2787 2787 q.savedirty()
2788 2788
2789 2789 @command("qgoto",
2790 2790 [('', 'keep-changes', None,
2791 2791 _('tolerate non-conflicting local changes')),
2792 2792 ('f', 'force', None, _('overwrite any local changes')),
2793 2793 ('', 'no-backup', None, _('do not save backup copies of files'))],
2794 2794 _('hg qgoto [OPTION]... PATCH'))
2795 2795 def goto(ui, repo, patch, **opts):
2796 2796 '''push or pop patches until named patch is at top of stack
2797 2797
2798 2798 Returns 0 on success.'''
2799 2799 opts = pycompat.byteskwargs(opts)
2800 2800 opts = fixkeepchangesopts(ui, opts)
2801 2801 q = repo.mq
2802 2802 patch = q.lookup(patch)
2803 2803 nobackup = opts.get('no_backup')
2804 2804 keepchanges = opts.get('keep_changes')
2805 2805 if q.isapplied(patch):
2806 2806 ret = q.pop(repo, patch, force=opts.get('force'), nobackup=nobackup,
2807 2807 keepchanges=keepchanges)
2808 2808 else:
2809 2809 ret = q.push(repo, patch, force=opts.get('force'), nobackup=nobackup,
2810 2810 keepchanges=keepchanges)
2811 2811 q.savedirty()
2812 2812 return ret
2813 2813
2814 2814 @command("qguard",
2815 2815 [('l', 'list', None, _('list all patches and guards')),
2816 2816 ('n', 'none', None, _('drop all guards'))],
2817 2817 _('hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]'))
2818 2818 def guard(ui, repo, *args, **opts):
2819 2819 '''set or print guards for a patch
2820 2820
2821 2821 Guards control whether a patch can be pushed. A patch with no
2822 2822 guards is always pushed. A patch with a positive guard ("+foo") is
2823 2823 pushed only if the :hg:`qselect` command has activated it. A patch with
2824 2824 a negative guard ("-foo") is never pushed if the :hg:`qselect` command
2825 2825 has activated it.
2826 2826
2827 2827 With no arguments, print the currently active guards.
2828 2828 With arguments, set guards for the named patch.
2829 2829
2830 2830 .. note::
2831 2831
2832 2832 Specifying negative guards now requires '--'.
2833 2833
2834 2834 To set guards on another patch::
2835 2835
2836 2836 hg qguard other.patch -- +2.6.17 -stable
2837 2837
2838 2838 Returns 0 on success.
2839 2839 '''
2840 2840 def status(idx):
2841 2841 guards = q.seriesguards[idx] or ['unguarded']
2842 2842 if q.series[idx] in applied:
2843 2843 state = 'applied'
2844 2844 elif q.pushable(idx)[0]:
2845 2845 state = 'unapplied'
2846 2846 else:
2847 2847 state = 'guarded'
2848 2848 label = 'qguard.patch qguard.%s qseries.%s' % (state, state)
2849 2849 ui.write('%s: ' % ui.label(q.series[idx], label))
2850 2850
2851 2851 for i, guard in enumerate(guards):
2852 2852 if guard.startswith('+'):
2853 2853 ui.write(guard, label='qguard.positive')
2854 2854 elif guard.startswith('-'):
2855 2855 ui.write(guard, label='qguard.negative')
2856 2856 else:
2857 2857 ui.write(guard, label='qguard.unguarded')
2858 2858 if i != len(guards) - 1:
2859 2859 ui.write(' ')
2860 2860 ui.write('\n')
2861 2861 q = repo.mq
2862 2862 applied = set(p.name for p in q.applied)
2863 2863 patch = None
2864 2864 args = list(args)
2865 2865 if opts.get(r'list'):
2866 2866 if args or opts.get('none'):
2867 2867 raise error.Abort(_('cannot mix -l/--list with options or '
2868 2868 'arguments'))
2869 2869 for i in xrange(len(q.series)):
2870 2870 status(i)
2871 2871 return
2872 2872 if not args or args[0][0:1] in '-+':
2873 2873 if not q.applied:
2874 2874 raise error.Abort(_('no patches applied'))
2875 2875 patch = q.applied[-1].name
2876 2876 if patch is None and args[0][0:1] not in '-+':
2877 2877 patch = args.pop(0)
2878 2878 if patch is None:
2879 2879 raise error.Abort(_('no patch to work with'))
2880 2880 if args or opts.get('none'):
2881 2881 idx = q.findseries(patch)
2882 2882 if idx is None:
2883 2883 raise error.Abort(_('no patch named %s') % patch)
2884 2884 q.setguards(idx, args)
2885 2885 q.savedirty()
2886 2886 else:
2887 2887 status(q.series.index(q.lookup(patch)))
2888 2888
2889 2889 @command("qheader", [], _('hg qheader [PATCH]'))
2890 2890 def header(ui, repo, patch=None):
2891 2891 """print the header of the topmost or specified patch
2892 2892
2893 2893 Returns 0 on success."""
2894 2894 q = repo.mq
2895 2895
2896 2896 if patch:
2897 2897 patch = q.lookup(patch)
2898 2898 else:
2899 2899 if not q.applied:
2900 2900 ui.write(_('no patches applied\n'))
2901 2901 return 1
2902 2902 patch = q.lookup('qtip')
2903 2903 ph = patchheader(q.join(patch), q.plainmode)
2904 2904
2905 2905 ui.write('\n'.join(ph.message) + '\n')
2906 2906
2907 2907 def lastsavename(path):
2908 2908 (directory, base) = os.path.split(path)
2909 2909 names = os.listdir(directory)
2910 2910 namere = re.compile("%s.([0-9]+)" % base)
2911 2911 maxindex = None
2912 2912 maxname = None
2913 2913 for f in names:
2914 2914 m = namere.match(f)
2915 2915 if m:
2916 2916 index = int(m.group(1))
2917 2917 if maxindex is None or index > maxindex:
2918 2918 maxindex = index
2919 2919 maxname = f
2920 2920 if maxname:
2921 2921 return (os.path.join(directory, maxname), maxindex)
2922 2922 return (None, None)
2923 2923
2924 2924 def savename(path):
2925 2925 (last, index) = lastsavename(path)
2926 2926 if last is None:
2927 2927 index = 0
2928 2928 newpath = path + ".%d" % (index + 1)
2929 2929 return newpath
2930 2930
2931 2931 @command("^qpush",
2932 2932 [('', 'keep-changes', None,
2933 2933 _('tolerate non-conflicting local changes')),
2934 2934 ('f', 'force', None, _('apply on top of local changes')),
2935 2935 ('e', 'exact', None,
2936 2936 _('apply the target patch to its recorded parent')),
2937 2937 ('l', 'list', None, _('list patch name in commit text')),
2938 2938 ('a', 'all', None, _('apply all patches')),
2939 2939 ('m', 'merge', None, _('merge from another queue (DEPRECATED)')),
2940 2940 ('n', 'name', '',
2941 2941 _('merge queue name (DEPRECATED)'), _('NAME')),
2942 2942 ('', 'move', None,
2943 2943 _('reorder patch series and apply only the patch')),
2944 2944 ('', 'no-backup', None, _('do not save backup copies of files'))],
2945 2945 _('hg qpush [-f] [-l] [-a] [--move] [PATCH | INDEX]'))
2946 2946 def push(ui, repo, patch=None, **opts):
2947 2947 """push the next patch onto the stack
2948 2948
2949 2949 By default, abort if the working directory contains uncommitted
2950 2950 changes. With --keep-changes, abort only if the uncommitted files
2951 2951 overlap with patched files. With -f/--force, backup and patch over
2952 2952 uncommitted changes.
2953 2953
2954 2954 Return 0 on success.
2955 2955 """
2956 2956 q = repo.mq
2957 2957 mergeq = None
2958 2958
2959 2959 opts = pycompat.byteskwargs(opts)
2960 2960 opts = fixkeepchangesopts(ui, opts)
2961 2961 if opts.get('merge'):
2962 2962 if opts.get('name'):
2963 2963 newpath = repo.vfs.join(opts.get('name'))
2964 2964 else:
2965 2965 newpath, i = lastsavename(q.path)
2966 2966 if not newpath:
2967 2967 ui.warn(_("no saved queues found, please use -n\n"))
2968 2968 return 1
2969 2969 mergeq = queue(ui, repo.baseui, repo.path, newpath)
2970 2970 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2971 2971 ret = q.push(repo, patch, force=opts.get('force'), list=opts.get('list'),
2972 2972 mergeq=mergeq, all=opts.get('all'), move=opts.get('move'),
2973 2973 exact=opts.get('exact'), nobackup=opts.get('no_backup'),
2974 2974 keepchanges=opts.get('keep_changes'))
2975 2975 return ret
2976 2976
2977 2977 @command("^qpop",
2978 2978 [('a', 'all', None, _('pop all patches')),
2979 2979 ('n', 'name', '',
2980 2980 _('queue name to pop (DEPRECATED)'), _('NAME')),
2981 2981 ('', 'keep-changes', None,
2982 2982 _('tolerate non-conflicting local changes')),
2983 2983 ('f', 'force', None, _('forget any local changes to patched files')),
2984 2984 ('', 'no-backup', None, _('do not save backup copies of files'))],
2985 2985 _('hg qpop [-a] [-f] [PATCH | INDEX]'))
2986 2986 def pop(ui, repo, patch=None, **opts):
2987 2987 """pop the current patch off the stack
2988 2988
2989 2989 Without argument, pops off the top of the patch stack. If given a
2990 2990 patch name, keeps popping off patches until the named patch is at
2991 2991 the top of the stack.
2992 2992
2993 2993 By default, abort if the working directory contains uncommitted
2994 2994 changes. With --keep-changes, abort only if the uncommitted files
2995 2995 overlap with patched files. With -f/--force, backup and discard
2996 2996 changes made to such files.
2997 2997
2998 2998 Return 0 on success.
2999 2999 """
3000 3000 opts = pycompat.byteskwargs(opts)
3001 3001 opts = fixkeepchangesopts(ui, opts)
3002 3002 localupdate = True
3003 3003 if opts.get('name'):
3004 3004 q = queue(ui, repo.baseui, repo.path, repo.vfs.join(opts.get('name')))
3005 3005 ui.warn(_('using patch queue: %s\n') % q.path)
3006 3006 localupdate = False
3007 3007 else:
3008 3008 q = repo.mq
3009 3009 ret = q.pop(repo, patch, force=opts.get('force'), update=localupdate,
3010 3010 all=opts.get('all'), nobackup=opts.get('no_backup'),
3011 3011 keepchanges=opts.get('keep_changes'))
3012 3012 q.savedirty()
3013 3013 return ret
3014 3014
3015 3015 @command("qrename|qmv", [], _('hg qrename PATCH1 [PATCH2]'))
3016 3016 def rename(ui, repo, patch, name=None, **opts):
3017 3017 """rename a patch
3018 3018
3019 3019 With one argument, renames the current patch to PATCH1.
3020 3020 With two arguments, renames PATCH1 to PATCH2.
3021 3021
3022 3022 Returns 0 on success."""
3023 3023 q = repo.mq
3024 3024 if not name:
3025 3025 name = patch
3026 3026 patch = None
3027 3027
3028 3028 if patch:
3029 3029 patch = q.lookup(patch)
3030 3030 else:
3031 3031 if not q.applied:
3032 3032 ui.write(_('no patches applied\n'))
3033 3033 return
3034 3034 patch = q.lookup('qtip')
3035 3035 absdest = q.join(name)
3036 3036 if os.path.isdir(absdest):
3037 3037 name = normname(os.path.join(name, os.path.basename(patch)))
3038 3038 absdest = q.join(name)
3039 3039 q.checkpatchname(name)
3040 3040
3041 3041 ui.note(_('renaming %s to %s\n') % (patch, name))
3042 3042 i = q.findseries(patch)
3043 3043 guards = q.guard_re.findall(q.fullseries[i])
3044 3044 q.fullseries[i] = name + ''.join([' #' + g for g in guards])
3045 3045 q.parseseries()
3046 3046 q.seriesdirty = True
3047 3047
3048 3048 info = q.isapplied(patch)
3049 3049 if info:
3050 3050 q.applied[info[0]] = statusentry(info[1], name)
3051 3051 q.applieddirty = True
3052 3052
3053 3053 destdir = os.path.dirname(absdest)
3054 3054 if not os.path.isdir(destdir):
3055 3055 os.makedirs(destdir)
3056 3056 util.rename(q.join(patch), absdest)
3057 3057 r = q.qrepo()
3058 3058 if r and patch in r.dirstate:
3059 3059 wctx = r[None]
3060 3060 with r.wlock():
3061 3061 if r.dirstate[patch] == 'a':
3062 3062 r.dirstate.drop(patch)
3063 3063 r.dirstate.add(name)
3064 3064 else:
3065 3065 wctx.copy(patch, name)
3066 3066 wctx.forget([patch])
3067 3067
3068 3068 q.savedirty()
3069 3069
3070 3070 @command("qrestore",
3071 3071 [('d', 'delete', None, _('delete save entry')),
3072 3072 ('u', 'update', None, _('update queue working directory'))],
3073 3073 _('hg qrestore [-d] [-u] REV'))
3074 3074 def restore(ui, repo, rev, **opts):
3075 3075 """restore the queue state saved by a revision (DEPRECATED)
3076 3076
3077 3077 This command is deprecated, use :hg:`rebase` instead."""
3078 3078 rev = repo.lookup(rev)
3079 3079 q = repo.mq
3080 3080 q.restore(repo, rev, delete=opts.get(r'delete'),
3081 3081 qupdate=opts.get(r'update'))
3082 3082 q.savedirty()
3083 3083 return 0
3084 3084
3085 3085 @command("qsave",
3086 3086 [('c', 'copy', None, _('copy patch directory')),
3087 3087 ('n', 'name', '',
3088 3088 _('copy directory name'), _('NAME')),
3089 3089 ('e', 'empty', None, _('clear queue status file')),
3090 3090 ('f', 'force', None, _('force copy'))] + cmdutil.commitopts,
3091 3091 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'))
3092 3092 def save(ui, repo, **opts):
3093 3093 """save current queue state (DEPRECATED)
3094 3094
3095 3095 This command is deprecated, use :hg:`rebase` instead."""
3096 3096 q = repo.mq
3097 3097 opts = pycompat.byteskwargs(opts)
3098 3098 message = cmdutil.logmessage(ui, opts)
3099 3099 ret = q.save(repo, msg=message)
3100 3100 if ret:
3101 3101 return ret
3102 3102 q.savedirty() # save to .hg/patches before copying
3103 3103 if opts.get('copy'):
3104 3104 path = q.path
3105 3105 if opts.get('name'):
3106 3106 newpath = os.path.join(q.basepath, opts.get('name'))
3107 3107 if os.path.exists(newpath):
3108 3108 if not os.path.isdir(newpath):
3109 3109 raise error.Abort(_('destination %s exists and is not '
3110 3110 'a directory') % newpath)
3111 3111 if not opts.get('force'):
3112 3112 raise error.Abort(_('destination %s exists, '
3113 3113 'use -f to force') % newpath)
3114 3114 else:
3115 3115 newpath = savename(path)
3116 3116 ui.warn(_("copy %s to %s\n") % (path, newpath))
3117 3117 util.copyfiles(path, newpath)
3118 3118 if opts.get('empty'):
3119 3119 del q.applied[:]
3120 3120 q.applieddirty = True
3121 3121 q.savedirty()
3122 3122 return 0
3123 3123
3124 3124
3125 3125 @command("qselect",
3126 3126 [('n', 'none', None, _('disable all guards')),
3127 3127 ('s', 'series', None, _('list all guards in series file')),
3128 3128 ('', 'pop', None, _('pop to before first guarded applied patch')),
3129 3129 ('', 'reapply', None, _('pop, then reapply patches'))],
3130 3130 _('hg qselect [OPTION]... [GUARD]...'))
3131 3131 def select(ui, repo, *args, **opts):
3132 3132 '''set or print guarded patches to push
3133 3133
3134 3134 Use the :hg:`qguard` command to set or print guards on patch, then use
3135 3135 qselect to tell mq which guards to use. A patch will be pushed if
3136 3136 it has no guards or any positive guards match the currently
3137 3137 selected guard, but will not be pushed if any negative guards
3138 3138 match the current guard. For example::
3139 3139
3140 3140 qguard foo.patch -- -stable (negative guard)
3141 3141 qguard bar.patch +stable (positive guard)
3142 3142 qselect stable
3143 3143
3144 3144 This activates the "stable" guard. mq will skip foo.patch (because
3145 3145 it has a negative match) but push bar.patch (because it has a
3146 3146 positive match).
3147 3147
3148 3148 With no arguments, prints the currently active guards.
3149 3149 With one argument, sets the active guard.
3150 3150
3151 3151 Use -n/--none to deactivate guards (no other arguments needed).
3152 3152 When no guards are active, patches with positive guards are
3153 3153 skipped and patches with negative guards are pushed.
3154 3154
3155 3155 qselect can change the guards on applied patches. It does not pop
3156 3156 guarded patches by default. Use --pop to pop back to the last
3157 3157 applied patch that is not guarded. Use --reapply (which implies
3158 3158 --pop) to push back to the current patch afterwards, but skip
3159 3159 guarded patches.
3160 3160
3161 3161 Use -s/--series to print a list of all guards in the series file
3162 3162 (no other arguments needed). Use -v for more information.
3163 3163
3164 3164 Returns 0 on success.'''
3165 3165
3166 3166 q = repo.mq
3167 3167 opts = pycompat.byteskwargs(opts)
3168 3168 guards = q.active()
3169 3169 pushable = lambda i: q.pushable(q.applied[i].name)[0]
3170 3170 if args or opts.get('none'):
3171 3171 old_unapplied = q.unapplied(repo)
3172 3172 old_guarded = [i for i in xrange(len(q.applied)) if not pushable(i)]
3173 3173 q.setactive(args)
3174 3174 q.savedirty()
3175 3175 if not args:
3176 3176 ui.status(_('guards deactivated\n'))
3177 3177 if not opts.get('pop') and not opts.get('reapply'):
3178 3178 unapplied = q.unapplied(repo)
3179 3179 guarded = [i for i in xrange(len(q.applied)) if not pushable(i)]
3180 3180 if len(unapplied) != len(old_unapplied):
3181 3181 ui.status(_('number of unguarded, unapplied patches has '
3182 3182 'changed from %d to %d\n') %
3183 3183 (len(old_unapplied), len(unapplied)))
3184 3184 if len(guarded) != len(old_guarded):
3185 3185 ui.status(_('number of guarded, applied patches has changed '
3186 3186 'from %d to %d\n') %
3187 3187 (len(old_guarded), len(guarded)))
3188 3188 elif opts.get('series'):
3189 3189 guards = {}
3190 3190 noguards = 0
3191 3191 for gs in q.seriesguards:
3192 3192 if not gs:
3193 3193 noguards += 1
3194 3194 for g in gs:
3195 3195 guards.setdefault(g, 0)
3196 3196 guards[g] += 1
3197 3197 if ui.verbose:
3198 3198 guards['NONE'] = noguards
3199 3199 guards = guards.items()
3200 3200 guards.sort(key=lambda x: x[0][1:])
3201 3201 if guards:
3202 3202 ui.note(_('guards in series file:\n'))
3203 3203 for guard, count in guards:
3204 3204 ui.note('%2d ' % count)
3205 3205 ui.write(guard, '\n')
3206 3206 else:
3207 3207 ui.note(_('no guards in series file\n'))
3208 3208 else:
3209 3209 if guards:
3210 3210 ui.note(_('active guards:\n'))
3211 3211 for g in guards:
3212 3212 ui.write(g, '\n')
3213 3213 else:
3214 3214 ui.write(_('no active guards\n'))
3215 3215 reapply = opts.get('reapply') and q.applied and q.applied[-1].name
3216 3216 popped = False
3217 3217 if opts.get('pop') or opts.get('reapply'):
3218 3218 for i in xrange(len(q.applied)):
3219 3219 if not pushable(i):
3220 3220 ui.status(_('popping guarded patches\n'))
3221 3221 popped = True
3222 3222 if i == 0:
3223 3223 q.pop(repo, all=True)
3224 3224 else:
3225 3225 q.pop(repo, q.applied[i - 1].name)
3226 3226 break
3227 3227 if popped:
3228 3228 try:
3229 3229 if reapply:
3230 3230 ui.status(_('reapplying unguarded patches\n'))
3231 3231 q.push(repo, reapply)
3232 3232 finally:
3233 3233 q.savedirty()
3234 3234
3235 3235 @command("qfinish",
3236 3236 [('a', 'applied', None, _('finish all applied changesets'))],
3237 3237 _('hg qfinish [-a] [REV]...'))
3238 3238 def finish(ui, repo, *revrange, **opts):
3239 3239 """move applied patches into repository history
3240 3240
3241 3241 Finishes the specified revisions (corresponding to applied
3242 3242 patches) by moving them out of mq control into regular repository
3243 3243 history.
3244 3244
3245 3245 Accepts a revision range or the -a/--applied option. If --applied
3246 3246 is specified, all applied mq revisions are removed from mq
3247 3247 control. Otherwise, the given revisions must be at the base of the
3248 3248 stack of applied patches.
3249 3249
3250 3250 This can be especially useful if your changes have been applied to
3251 3251 an upstream repository, or if you are about to push your changes
3252 3252 to upstream.
3253 3253
3254 3254 Returns 0 on success.
3255 3255 """
3256 3256 if not opts.get(r'applied') and not revrange:
3257 3257 raise error.Abort(_('no revisions specified'))
3258 3258 elif opts.get(r'applied'):
3259 3259 revrange = ('qbase::qtip',) + revrange
3260 3260
3261 3261 q = repo.mq
3262 3262 if not q.applied:
3263 3263 ui.status(_('no patches applied\n'))
3264 3264 return 0
3265 3265
3266 3266 revs = scmutil.revrange(repo, revrange)
3267 3267 if repo['.'].rev() in revs and repo[None].files():
3268 3268 ui.warn(_('warning: uncommitted changes in the working directory\n'))
3269 3269 # queue.finish may changes phases but leave the responsibility to lock the
3270 3270 # repo to the caller to avoid deadlock with wlock. This command code is
3271 3271 # responsibility for this locking.
3272 3272 with repo.lock():
3273 3273 q.finish(repo, revs)
3274 3274 q.savedirty()
3275 3275 return 0
3276 3276
3277 3277 @command("qqueue",
3278 3278 [('l', 'list', False, _('list all available queues')),
3279 3279 ('', 'active', False, _('print name of active queue')),
3280 3280 ('c', 'create', False, _('create new queue')),
3281 3281 ('', 'rename', False, _('rename active queue')),
3282 3282 ('', 'delete', False, _('delete reference to queue')),
3283 3283 ('', 'purge', False, _('delete queue, and remove patch dir')),
3284 3284 ],
3285 3285 _('[OPTION] [QUEUE]'))
3286 3286 def qqueue(ui, repo, name=None, **opts):
3287 3287 '''manage multiple patch queues
3288 3288
3289 3289 Supports switching between different patch queues, as well as creating
3290 3290 new patch queues and deleting existing ones.
3291 3291
3292 3292 Omitting a queue name or specifying -l/--list will show you the registered
3293 3293 queues - by default the "normal" patches queue is registered. The currently
3294 3294 active queue will be marked with "(active)". Specifying --active will print
3295 3295 only the name of the active queue.
3296 3296
3297 3297 To create a new queue, use -c/--create. The queue is automatically made
3298 3298 active, except in the case where there are applied patches from the
3299 3299 currently active queue in the repository. Then the queue will only be
3300 3300 created and switching will fail.
3301 3301
3302 3302 To delete an existing queue, use --delete. You cannot delete the currently
3303 3303 active queue.
3304 3304
3305 3305 Returns 0 on success.
3306 3306 '''
3307 3307 q = repo.mq
3308 3308 _defaultqueue = 'patches'
3309 3309 _allqueues = 'patches.queues'
3310 3310 _activequeue = 'patches.queue'
3311 3311
3312 3312 def _getcurrent():
3313 3313 cur = os.path.basename(q.path)
3314 3314 if cur.startswith('patches-'):
3315 3315 cur = cur[8:]
3316 3316 return cur
3317 3317
3318 3318 def _noqueues():
3319 3319 try:
3320 3320 fh = repo.vfs(_allqueues, 'r')
3321 3321 fh.close()
3322 3322 except IOError:
3323 3323 return True
3324 3324
3325 3325 return False
3326 3326
3327 3327 def _getqueues():
3328 3328 current = _getcurrent()
3329 3329
3330 3330 try:
3331 3331 fh = repo.vfs(_allqueues, 'r')
3332 3332 queues = [queue.strip() for queue in fh if queue.strip()]
3333 3333 fh.close()
3334 3334 if current not in queues:
3335 3335 queues.append(current)
3336 3336 except IOError:
3337 3337 queues = [_defaultqueue]
3338 3338
3339 3339 return sorted(queues)
3340 3340
3341 3341 def _setactive(name):
3342 3342 if q.applied:
3343 3343 raise error.Abort(_('new queue created, but cannot make active '
3344 3344 'as patches are applied'))
3345 3345 _setactivenocheck(name)
3346 3346
3347 3347 def _setactivenocheck(name):
3348 3348 fh = repo.vfs(_activequeue, 'w')
3349 3349 if name != 'patches':
3350 3350 fh.write(name)
3351 3351 fh.close()
3352 3352
3353 3353 def _addqueue(name):
3354 3354 fh = repo.vfs(_allqueues, 'a')
3355 3355 fh.write('%s\n' % (name,))
3356 3356 fh.close()
3357 3357
3358 3358 def _queuedir(name):
3359 3359 if name == 'patches':
3360 3360 return repo.vfs.join('patches')
3361 3361 else:
3362 3362 return repo.vfs.join('patches-' + name)
3363 3363
3364 3364 def _validname(name):
3365 3365 for n in name:
3366 3366 if n in ':\\/.':
3367 3367 return False
3368 3368 return True
3369 3369
3370 3370 def _delete(name):
3371 3371 if name not in existing:
3372 3372 raise error.Abort(_('cannot delete queue that does not exist'))
3373 3373
3374 3374 current = _getcurrent()
3375 3375
3376 3376 if name == current:
3377 3377 raise error.Abort(_('cannot delete currently active queue'))
3378 3378
3379 3379 fh = repo.vfs('patches.queues.new', 'w')
3380 3380 for queue in existing:
3381 3381 if queue == name:
3382 3382 continue
3383 3383 fh.write('%s\n' % (queue,))
3384 3384 fh.close()
3385 3385 repo.vfs.rename('patches.queues.new', _allqueues)
3386 3386
3387 3387 opts = pycompat.byteskwargs(opts)
3388 3388 if not name or opts.get('list') or opts.get('active'):
3389 3389 current = _getcurrent()
3390 3390 if opts.get('active'):
3391 3391 ui.write('%s\n' % (current,))
3392 3392 return
3393 3393 for queue in _getqueues():
3394 3394 ui.write('%s' % (queue,))
3395 3395 if queue == current and not ui.quiet:
3396 3396 ui.write(_(' (active)\n'))
3397 3397 else:
3398 3398 ui.write('\n')
3399 3399 return
3400 3400
3401 3401 if not _validname(name):
3402 3402 raise error.Abort(
3403 3403 _('invalid queue name, may not contain the characters ":\\/."'))
3404 3404
3405 3405 with repo.wlock():
3406 3406 existing = _getqueues()
3407 3407
3408 3408 if opts.get('create'):
3409 3409 if name in existing:
3410 3410 raise error.Abort(_('queue "%s" already exists') % name)
3411 3411 if _noqueues():
3412 3412 _addqueue(_defaultqueue)
3413 3413 _addqueue(name)
3414 3414 _setactive(name)
3415 3415 elif opts.get('rename'):
3416 3416 current = _getcurrent()
3417 3417 if name == current:
3418 3418 raise error.Abort(_('can\'t rename "%s" to its current name')
3419 3419 % name)
3420 3420 if name in existing:
3421 3421 raise error.Abort(_('queue "%s" already exists') % name)
3422 3422
3423 3423 olddir = _queuedir(current)
3424 3424 newdir = _queuedir(name)
3425 3425
3426 3426 if os.path.exists(newdir):
3427 3427 raise error.Abort(_('non-queue directory "%s" already exists') %
3428 3428 newdir)
3429 3429
3430 3430 fh = repo.vfs('patches.queues.new', 'w')
3431 3431 for queue in existing:
3432 3432 if queue == current:
3433 3433 fh.write('%s\n' % (name,))
3434 3434 if os.path.exists(olddir):
3435 3435 util.rename(olddir, newdir)
3436 3436 else:
3437 3437 fh.write('%s\n' % (queue,))
3438 3438 fh.close()
3439 3439 repo.vfs.rename('patches.queues.new', _allqueues)
3440 3440 _setactivenocheck(name)
3441 3441 elif opts.get('delete'):
3442 3442 _delete(name)
3443 3443 elif opts.get('purge'):
3444 3444 if name in existing:
3445 3445 _delete(name)
3446 3446 qdir = _queuedir(name)
3447 3447 if os.path.exists(qdir):
3448 3448 shutil.rmtree(qdir)
3449 3449 else:
3450 3450 if name not in existing:
3451 3451 raise error.Abort(_('use --create to create a new queue'))
3452 3452 _setactive(name)
3453 3453
3454 3454 def mqphasedefaults(repo, roots):
3455 3455 """callback used to set mq changeset as secret when no phase data exists"""
3456 3456 if repo.mq.applied:
3457 3457 if repo.ui.configbool('mq', 'secret'):
3458 3458 mqphase = phases.secret
3459 3459 else:
3460 3460 mqphase = phases.draft
3461 3461 qbase = repo[repo.mq.applied[0].node]
3462 3462 roots[mqphase].add(qbase.node())
3463 3463 return roots
3464 3464
3465 3465 def reposetup(ui, repo):
3466 3466 class mqrepo(repo.__class__):
3467 3467 @localrepo.unfilteredpropertycache
3468 3468 def mq(self):
3469 3469 return queue(self.ui, self.baseui, self.path)
3470 3470
3471 3471 def invalidateall(self):
3472 3472 super(mqrepo, self).invalidateall()
3473 3473 if localrepo.hasunfilteredcache(self, 'mq'):
3474 3474 # recreate mq in case queue path was changed
3475 3475 delattr(self.unfiltered(), 'mq')
3476 3476
3477 3477 def abortifwdirpatched(self, errmsg, force=False):
3478 3478 if self.mq.applied and self.mq.checkapplied and not force:
3479 3479 parents = self.dirstate.parents()
3480 3480 patches = [s.node for s in self.mq.applied]
3481 3481 if parents[0] in patches or parents[1] in patches:
3482 3482 raise error.Abort(errmsg)
3483 3483
3484 3484 def commit(self, text="", user=None, date=None, match=None,
3485 3485 force=False, editor=False, extra=None):
3486 3486 if extra is None:
3487 3487 extra = {}
3488 3488 self.abortifwdirpatched(
3489 3489 _('cannot commit over an applied mq patch'),
3490 3490 force)
3491 3491
3492 3492 return super(mqrepo, self).commit(text, user, date, match, force,
3493 3493 editor, extra)
3494 3494
3495 3495 def checkpush(self, pushop):
3496 3496 if self.mq.applied and self.mq.checkapplied and not pushop.force:
3497 3497 outapplied = [e.node for e in self.mq.applied]
3498 3498 if pushop.revs:
3499 3499 # Assume applied patches have no non-patch descendants and
3500 3500 # are not on remote already. Filtering any changeset not
3501 3501 # pushed.
3502 3502 heads = set(pushop.revs)
3503 3503 for node in reversed(outapplied):
3504 3504 if node in heads:
3505 3505 break
3506 3506 else:
3507 3507 outapplied.pop()
3508 3508 # looking for pushed and shared changeset
3509 3509 for node in outapplied:
3510 3510 if self[node].phase() < phases.secret:
3511 3511 raise error.Abort(_('source has mq patches applied'))
3512 3512 # no non-secret patches pushed
3513 3513 super(mqrepo, self).checkpush(pushop)
3514 3514
3515 3515 def _findtags(self):
3516 3516 '''augment tags from base class with patch tags'''
3517 3517 result = super(mqrepo, self)._findtags()
3518 3518
3519 3519 q = self.mq
3520 3520 if not q.applied:
3521 3521 return result
3522 3522
3523 3523 mqtags = [(patch.node, patch.name) for patch in q.applied]
3524 3524
3525 3525 try:
3526 3526 # for now ignore filtering business
3527 3527 self.unfiltered().changelog.rev(mqtags[-1][0])
3528 3528 except error.LookupError:
3529 3529 self.ui.warn(_('mq status file refers to unknown node %s\n')
3530 3530 % short(mqtags[-1][0]))
3531 3531 return result
3532 3532
3533 3533 # do not add fake tags for filtered revisions
3534 3534 included = self.changelog.hasnode
3535 3535 mqtags = [mqt for mqt in mqtags if included(mqt[0])]
3536 3536 if not mqtags:
3537 3537 return result
3538 3538
3539 3539 mqtags.append((mqtags[-1][0], 'qtip'))
3540 3540 mqtags.append((mqtags[0][0], 'qbase'))
3541 3541 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
3542 3542 tags = result[0]
3543 3543 for patch in mqtags:
3544 3544 if patch[1] in tags:
3545 3545 self.ui.warn(_('tag %s overrides mq patch of the same '
3546 3546 'name\n') % patch[1])
3547 3547 else:
3548 3548 tags[patch[1]] = patch[0]
3549 3549
3550 3550 return result
3551 3551
3552 3552 if repo.local():
3553 3553 repo.__class__ = mqrepo
3554 3554
3555 3555 repo._phasedefaults.append(mqphasedefaults)
3556 3556
3557 3557 def mqimport(orig, ui, repo, *args, **kwargs):
3558 3558 if (util.safehasattr(repo, 'abortifwdirpatched')
3559 3559 and not kwargs.get(r'no_commit', False)):
3560 3560 repo.abortifwdirpatched(_('cannot import over an applied patch'),
3561 3561 kwargs.get(r'force'))
3562 3562 return orig(ui, repo, *args, **kwargs)
3563 3563
3564 3564 def mqinit(orig, ui, *args, **kwargs):
3565 3565 mq = kwargs.pop(r'mq', None)
3566 3566
3567 3567 if not mq:
3568 3568 return orig(ui, *args, **kwargs)
3569 3569
3570 3570 if args:
3571 3571 repopath = args[0]
3572 3572 if not hg.islocal(repopath):
3573 3573 raise error.Abort(_('only a local queue repository '
3574 3574 'may be initialized'))
3575 3575 else:
3576 3576 repopath = cmdutil.findrepo(pycompat.getcwd())
3577 3577 if not repopath:
3578 3578 raise error.Abort(_('there is no Mercurial repository here '
3579 3579 '(.hg not found)'))
3580 3580 repo = hg.repository(ui, repopath)
3581 3581 return qinit(ui, repo, True)
3582 3582
3583 3583 def mqcommand(orig, ui, repo, *args, **kwargs):
3584 3584 """Add --mq option to operate on patch repository instead of main"""
3585 3585
3586 3586 # some commands do not like getting unknown options
3587 3587 mq = kwargs.pop(r'mq', None)
3588 3588
3589 3589 if not mq:
3590 3590 return orig(ui, repo, *args, **kwargs)
3591 3591
3592 3592 q = repo.mq
3593 3593 r = q.qrepo()
3594 3594 if not r:
3595 3595 raise error.Abort(_('no queue repository'))
3596 3596 return orig(r.ui, r, *args, **kwargs)
3597 3597
3598 3598 def summaryhook(ui, repo):
3599 3599 q = repo.mq
3600 3600 m = []
3601 3601 a, u = len(q.applied), len(q.unapplied(repo))
3602 3602 if a:
3603 3603 m.append(ui.label(_("%d applied"), 'qseries.applied') % a)
3604 3604 if u:
3605 3605 m.append(ui.label(_("%d unapplied"), 'qseries.unapplied') % u)
3606 3606 if m:
3607 3607 # i18n: column positioning for "hg summary"
3608 3608 ui.write(_("mq: %s\n") % ', '.join(m))
3609 3609 else:
3610 3610 # i18n: column positioning for "hg summary"
3611 3611 ui.note(_("mq: (empty queue)\n"))
3612 3612
3613 3613 revsetpredicate = registrar.revsetpredicate()
3614 3614
3615 3615 @revsetpredicate('mq()')
3616 3616 def revsetmq(repo, subset, x):
3617 3617 """Changesets managed by MQ.
3618 3618 """
3619 3619 revsetlang.getargs(x, 0, 0, _("mq takes no arguments"))
3620 3620 applied = set([repo[r.node].rev() for r in repo.mq.applied])
3621 3621 return smartset.baseset([r for r in subset if r in applied])
3622 3622
3623 3623 # tell hggettext to extract docstrings from these functions:
3624 3624 i18nfunctions = [revsetmq]
3625 3625
3626 3626 def extsetup(ui):
3627 3627 # Ensure mq wrappers are called first, regardless of extension load order by
3628 3628 # NOT wrapping in uisetup() and instead deferring to init stage two here.
3629 3629 mqopt = [('', 'mq', None, _("operate on patch repository"))]
3630 3630
3631 3631 extensions.wrapcommand(commands.table, 'import', mqimport)
3632 3632 cmdutil.summaryhooks.add('mq', summaryhook)
3633 3633
3634 3634 entry = extensions.wrapcommand(commands.table, 'init', mqinit)
3635 3635 entry[1].extend(mqopt)
3636 3636
3637 3637 def dotable(cmdtable):
3638 3638 for cmd, entry in cmdtable.iteritems():
3639 3639 cmd = cmdutil.parsealiases(cmd)[0]
3640 3640 func = entry[0]
3641 3641 if func.norepo:
3642 3642 continue
3643 3643 entry = extensions.wrapcommand(cmdtable, cmd, mqcommand)
3644 3644 entry[1].extend(mqopt)
3645 3645
3646 3646 dotable(commands.table)
3647 3647
3648 3648 for extname, extmodule in extensions.extensions():
3649 3649 if extmodule.__file__ != __file__:
3650 3650 dotable(getattr(extmodule, 'cmdtable', {}))
3651 3651
3652 3652 colortable = {'qguard.negative': 'red',
3653 3653 'qguard.positive': 'yellow',
3654 3654 'qguard.unguarded': 'green',
3655 3655 'qseries.applied': 'blue bold underline',
3656 3656 'qseries.guarded': 'black bold',
3657 3657 'qseries.missing': 'red bold',
3658 3658 'qseries.unapplied': 'black bold'}
@@ -1,3164 +1,3164
1 1 # cmdutil.py - help for command processing in mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import re
13 13 import tempfile
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 hex,
18 18 nullid,
19 19 nullrev,
20 20 short,
21 21 )
22 22
23 23 from . import (
24 24 bookmarks,
25 25 changelog,
26 26 copies,
27 27 crecord as crecordmod,
28 28 dirstateguard,
29 29 encoding,
30 30 error,
31 31 formatter,
32 32 logcmdutil,
33 33 match as matchmod,
34 34 obsolete,
35 35 patch,
36 36 pathutil,
37 37 pycompat,
38 38 registrar,
39 39 revlog,
40 40 rewriteutil,
41 41 scmutil,
42 42 smartset,
43 subrepoutil,
43 44 templater,
44 45 util,
45 46 vfs as vfsmod,
46 47 )
47 48 stringio = util.stringio
48 49
49 50 # templates of common command options
50 51
51 52 dryrunopts = [
52 53 ('n', 'dry-run', None,
53 54 _('do not perform actions, just print output')),
54 55 ]
55 56
56 57 remoteopts = [
57 58 ('e', 'ssh', '',
58 59 _('specify ssh command to use'), _('CMD')),
59 60 ('', 'remotecmd', '',
60 61 _('specify hg command to run on the remote side'), _('CMD')),
61 62 ('', 'insecure', None,
62 63 _('do not verify server certificate (ignoring web.cacerts config)')),
63 64 ]
64 65
65 66 walkopts = [
66 67 ('I', 'include', [],
67 68 _('include names matching the given patterns'), _('PATTERN')),
68 69 ('X', 'exclude', [],
69 70 _('exclude names matching the given patterns'), _('PATTERN')),
70 71 ]
71 72
72 73 commitopts = [
73 74 ('m', 'message', '',
74 75 _('use text as commit message'), _('TEXT')),
75 76 ('l', 'logfile', '',
76 77 _('read commit message from file'), _('FILE')),
77 78 ]
78 79
79 80 commitopts2 = [
80 81 ('d', 'date', '',
81 82 _('record the specified date as commit date'), _('DATE')),
82 83 ('u', 'user', '',
83 84 _('record the specified user as committer'), _('USER')),
84 85 ]
85 86
86 87 # hidden for now
87 88 formatteropts = [
88 89 ('T', 'template', '',
89 90 _('display with template (EXPERIMENTAL)'), _('TEMPLATE')),
90 91 ]
91 92
92 93 templateopts = [
93 94 ('', 'style', '',
94 95 _('display using template map file (DEPRECATED)'), _('STYLE')),
95 96 ('T', 'template', '',
96 97 _('display with template'), _('TEMPLATE')),
97 98 ]
98 99
99 100 logopts = [
100 101 ('p', 'patch', None, _('show patch')),
101 102 ('g', 'git', None, _('use git extended diff format')),
102 103 ('l', 'limit', '',
103 104 _('limit number of changes displayed'), _('NUM')),
104 105 ('M', 'no-merges', None, _('do not show merges')),
105 106 ('', 'stat', None, _('output diffstat-style summary of changes')),
106 107 ('G', 'graph', None, _("show the revision DAG")),
107 108 ] + templateopts
108 109
109 110 diffopts = [
110 111 ('a', 'text', None, _('treat all files as text')),
111 112 ('g', 'git', None, _('use git extended diff format')),
112 113 ('', 'binary', None, _('generate binary diffs in git mode (default)')),
113 114 ('', 'nodates', None, _('omit dates from diff headers'))
114 115 ]
115 116
116 117 diffwsopts = [
117 118 ('w', 'ignore-all-space', None,
118 119 _('ignore white space when comparing lines')),
119 120 ('b', 'ignore-space-change', None,
120 121 _('ignore changes in the amount of white space')),
121 122 ('B', 'ignore-blank-lines', None,
122 123 _('ignore changes whose lines are all blank')),
123 124 ('Z', 'ignore-space-at-eol', None,
124 125 _('ignore changes in whitespace at EOL')),
125 126 ]
126 127
127 128 diffopts2 = [
128 129 ('', 'noprefix', None, _('omit a/ and b/ prefixes from filenames')),
129 130 ('p', 'show-function', None, _('show which function each change is in')),
130 131 ('', 'reverse', None, _('produce a diff that undoes the changes')),
131 132 ] + diffwsopts + [
132 133 ('U', 'unified', '',
133 134 _('number of lines of context to show'), _('NUM')),
134 135 ('', 'stat', None, _('output diffstat-style summary of changes')),
135 136 ('', 'root', '', _('produce diffs relative to subdirectory'), _('DIR')),
136 137 ]
137 138
138 139 mergetoolopts = [
139 140 ('t', 'tool', '', _('specify merge tool')),
140 141 ]
141 142
142 143 similarityopts = [
143 144 ('s', 'similarity', '',
144 145 _('guess renamed files by similarity (0<=s<=100)'), _('SIMILARITY'))
145 146 ]
146 147
147 148 subrepoopts = [
148 149 ('S', 'subrepos', None,
149 150 _('recurse into subrepositories'))
150 151 ]
151 152
152 153 debugrevlogopts = [
153 154 ('c', 'changelog', False, _('open changelog')),
154 155 ('m', 'manifest', False, _('open manifest')),
155 156 ('', 'dir', '', _('open directory manifest')),
156 157 ]
157 158
158 159 # special string such that everything below this line will be ingored in the
159 160 # editor text
160 161 _linebelow = "^HG: ------------------------ >8 ------------------------$"
161 162
162 163 def ishunk(x):
163 164 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
164 165 return isinstance(x, hunkclasses)
165 166
166 167 def newandmodified(chunks, originalchunks):
167 168 newlyaddedandmodifiedfiles = set()
168 169 for chunk in chunks:
169 170 if ishunk(chunk) and chunk.header.isnewfile() and chunk not in \
170 171 originalchunks:
171 172 newlyaddedandmodifiedfiles.add(chunk.header.filename())
172 173 return newlyaddedandmodifiedfiles
173 174
174 175 def parsealiases(cmd):
175 176 return cmd.lstrip("^").split("|")
176 177
177 178 def setupwrapcolorwrite(ui):
178 179 # wrap ui.write so diff output can be labeled/colorized
179 180 def wrapwrite(orig, *args, **kw):
180 181 label = kw.pop(r'label', '')
181 182 for chunk, l in patch.difflabel(lambda: args):
182 183 orig(chunk, label=label + l)
183 184
184 185 oldwrite = ui.write
185 186 def wrap(*args, **kwargs):
186 187 return wrapwrite(oldwrite, *args, **kwargs)
187 188 setattr(ui, 'write', wrap)
188 189 return oldwrite
189 190
190 191 def filterchunks(ui, originalhunks, usecurses, testfile, operation=None):
191 192 if usecurses:
192 193 if testfile:
193 194 recordfn = crecordmod.testdecorator(testfile,
194 195 crecordmod.testchunkselector)
195 196 else:
196 197 recordfn = crecordmod.chunkselector
197 198
198 199 return crecordmod.filterpatch(ui, originalhunks, recordfn, operation)
199 200
200 201 else:
201 202 return patch.filterpatch(ui, originalhunks, operation)
202 203
203 204 def recordfilter(ui, originalhunks, operation=None):
204 205 """ Prompts the user to filter the originalhunks and return a list of
205 206 selected hunks.
206 207 *operation* is used for to build ui messages to indicate the user what
207 208 kind of filtering they are doing: reverting, committing, shelving, etc.
208 209 (see patch.filterpatch).
209 210 """
210 211 usecurses = crecordmod.checkcurses(ui)
211 212 testfile = ui.config('experimental', 'crecordtest')
212 213 oldwrite = setupwrapcolorwrite(ui)
213 214 try:
214 215 newchunks, newopts = filterchunks(ui, originalhunks, usecurses,
215 216 testfile, operation)
216 217 finally:
217 218 ui.write = oldwrite
218 219 return newchunks, newopts
219 220
220 221 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
221 222 filterfn, *pats, **opts):
222 223 from . import merge as mergemod
223 224 opts = pycompat.byteskwargs(opts)
224 225 if not ui.interactive():
225 226 if cmdsuggest:
226 227 msg = _('running non-interactively, use %s instead') % cmdsuggest
227 228 else:
228 229 msg = _('running non-interactively')
229 230 raise error.Abort(msg)
230 231
231 232 # make sure username is set before going interactive
232 233 if not opts.get('user'):
233 234 ui.username() # raise exception, username not provided
234 235
235 236 def recordfunc(ui, repo, message, match, opts):
236 237 """This is generic record driver.
237 238
238 239 Its job is to interactively filter local changes, and
239 240 accordingly prepare working directory into a state in which the
240 241 job can be delegated to a non-interactive commit command such as
241 242 'commit' or 'qrefresh'.
242 243
243 244 After the actual job is done by non-interactive command, the
244 245 working directory is restored to its original state.
245 246
246 247 In the end we'll record interesting changes, and everything else
247 248 will be left in place, so the user can continue working.
248 249 """
249 250
250 251 checkunfinished(repo, commit=True)
251 252 wctx = repo[None]
252 253 merge = len(wctx.parents()) > 1
253 254 if merge:
254 255 raise error.Abort(_('cannot partially commit a merge '
255 256 '(use "hg commit" instead)'))
256 257
257 258 def fail(f, msg):
258 259 raise error.Abort('%s: %s' % (f, msg))
259 260
260 261 force = opts.get('force')
261 262 if not force:
262 263 vdirs = []
263 264 match.explicitdir = vdirs.append
264 265 match.bad = fail
265 266
266 267 status = repo.status(match=match)
267 268 if not force:
268 269 repo.checkcommitpatterns(wctx, vdirs, match, status, fail)
269 270 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
270 271 diffopts.nodates = True
271 272 diffopts.git = True
272 273 diffopts.showfunc = True
273 274 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
274 275 originalchunks = patch.parsepatch(originaldiff)
275 276
276 277 # 1. filter patch, since we are intending to apply subset of it
277 278 try:
278 279 chunks, newopts = filterfn(ui, originalchunks)
279 280 except error.PatchError as err:
280 281 raise error.Abort(_('error parsing patch: %s') % err)
281 282 opts.update(newopts)
282 283
283 284 # We need to keep a backup of files that have been newly added and
284 285 # modified during the recording process because there is a previous
285 286 # version without the edit in the workdir
286 287 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
287 288 contenders = set()
288 289 for h in chunks:
289 290 try:
290 291 contenders.update(set(h.files()))
291 292 except AttributeError:
292 293 pass
293 294
294 295 changed = status.modified + status.added + status.removed
295 296 newfiles = [f for f in changed if f in contenders]
296 297 if not newfiles:
297 298 ui.status(_('no changes to record\n'))
298 299 return 0
299 300
300 301 modified = set(status.modified)
301 302
302 303 # 2. backup changed files, so we can restore them in the end
303 304
304 305 if backupall:
305 306 tobackup = changed
306 307 else:
307 308 tobackup = [f for f in newfiles if f in modified or f in \
308 309 newlyaddedandmodifiedfiles]
309 310 backups = {}
310 311 if tobackup:
311 312 backupdir = repo.vfs.join('record-backups')
312 313 try:
313 314 os.mkdir(backupdir)
314 315 except OSError as err:
315 316 if err.errno != errno.EEXIST:
316 317 raise
317 318 try:
318 319 # backup continues
319 320 for f in tobackup:
320 321 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
321 322 dir=backupdir)
322 323 os.close(fd)
323 324 ui.debug('backup %r as %r\n' % (f, tmpname))
324 325 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
325 326 backups[f] = tmpname
326 327
327 328 fp = stringio()
328 329 for c in chunks:
329 330 fname = c.filename()
330 331 if fname in backups:
331 332 c.write(fp)
332 333 dopatch = fp.tell()
333 334 fp.seek(0)
334 335
335 336 # 2.5 optionally review / modify patch in text editor
336 337 if opts.get('review', False):
337 338 patchtext = (crecordmod.diffhelptext
338 339 + crecordmod.patchhelptext
339 340 + fp.read())
340 341 reviewedpatch = ui.edit(patchtext, "",
341 342 action="diff",
342 343 repopath=repo.path)
343 344 fp.truncate(0)
344 345 fp.write(reviewedpatch)
345 346 fp.seek(0)
346 347
347 348 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
348 349 # 3a. apply filtered patch to clean repo (clean)
349 350 if backups:
350 351 # Equivalent to hg.revert
351 352 m = scmutil.matchfiles(repo, backups.keys())
352 353 mergemod.update(repo, repo.dirstate.p1(),
353 354 False, True, matcher=m)
354 355
355 356 # 3b. (apply)
356 357 if dopatch:
357 358 try:
358 359 ui.debug('applying patch\n')
359 360 ui.debug(fp.getvalue())
360 361 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
361 362 except error.PatchError as err:
362 363 raise error.Abort(str(err))
363 364 del fp
364 365
365 366 # 4. We prepared working directory according to filtered
366 367 # patch. Now is the time to delegate the job to
367 368 # commit/qrefresh or the like!
368 369
369 370 # Make all of the pathnames absolute.
370 371 newfiles = [repo.wjoin(nf) for nf in newfiles]
371 372 return commitfunc(ui, repo, *newfiles, **pycompat.strkwargs(opts))
372 373 finally:
373 374 # 5. finally restore backed-up files
374 375 try:
375 376 dirstate = repo.dirstate
376 377 for realname, tmpname in backups.iteritems():
377 378 ui.debug('restoring %r to %r\n' % (tmpname, realname))
378 379
379 380 if dirstate[realname] == 'n':
380 381 # without normallookup, restoring timestamp
381 382 # may cause partially committed files
382 383 # to be treated as unmodified
383 384 dirstate.normallookup(realname)
384 385
385 386 # copystat=True here and above are a hack to trick any
386 387 # editors that have f open that we haven't modified them.
387 388 #
388 389 # Also note that this racy as an editor could notice the
389 390 # file's mtime before we've finished writing it.
390 391 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
391 392 os.unlink(tmpname)
392 393 if tobackup:
393 394 os.rmdir(backupdir)
394 395 except OSError:
395 396 pass
396 397
397 398 def recordinwlock(ui, repo, message, match, opts):
398 399 with repo.wlock():
399 400 return recordfunc(ui, repo, message, match, opts)
400 401
401 402 return commit(ui, repo, recordinwlock, pats, opts)
402 403
403 404 class dirnode(object):
404 405 """
405 406 Represent a directory in user working copy with information required for
406 407 the purpose of tersing its status.
407 408
408 409 path is the path to the directory
409 410
410 411 statuses is a set of statuses of all files in this directory (this includes
411 412 all the files in all the subdirectories too)
412 413
413 414 files is a list of files which are direct child of this directory
414 415
415 416 subdirs is a dictionary of sub-directory name as the key and it's own
416 417 dirnode object as the value
417 418 """
418 419
419 420 def __init__(self, dirpath):
420 421 self.path = dirpath
421 422 self.statuses = set([])
422 423 self.files = []
423 424 self.subdirs = {}
424 425
425 426 def _addfileindir(self, filename, status):
426 427 """Add a file in this directory as a direct child."""
427 428 self.files.append((filename, status))
428 429
429 430 def addfile(self, filename, status):
430 431 """
431 432 Add a file to this directory or to its direct parent directory.
432 433
433 434 If the file is not direct child of this directory, we traverse to the
434 435 directory of which this file is a direct child of and add the file
435 436 there.
436 437 """
437 438
438 439 # the filename contains a path separator, it means it's not the direct
439 440 # child of this directory
440 441 if '/' in filename:
441 442 subdir, filep = filename.split('/', 1)
442 443
443 444 # does the dirnode object for subdir exists
444 445 if subdir not in self.subdirs:
445 446 subdirpath = os.path.join(self.path, subdir)
446 447 self.subdirs[subdir] = dirnode(subdirpath)
447 448
448 449 # try adding the file in subdir
449 450 self.subdirs[subdir].addfile(filep, status)
450 451
451 452 else:
452 453 self._addfileindir(filename, status)
453 454
454 455 if status not in self.statuses:
455 456 self.statuses.add(status)
456 457
457 458 def iterfilepaths(self):
458 459 """Yield (status, path) for files directly under this directory."""
459 460 for f, st in self.files:
460 461 yield st, os.path.join(self.path, f)
461 462
462 463 def tersewalk(self, terseargs):
463 464 """
464 465 Yield (status, path) obtained by processing the status of this
465 466 dirnode.
466 467
467 468 terseargs is the string of arguments passed by the user with `--terse`
468 469 flag.
469 470
470 471 Following are the cases which can happen:
471 472
472 473 1) All the files in the directory (including all the files in its
473 474 subdirectories) share the same status and the user has asked us to terse
474 475 that status. -> yield (status, dirpath)
475 476
476 477 2) Otherwise, we do following:
477 478
478 479 a) Yield (status, filepath) for all the files which are in this
479 480 directory (only the ones in this directory, not the subdirs)
480 481
481 482 b) Recurse the function on all the subdirectories of this
482 483 directory
483 484 """
484 485
485 486 if len(self.statuses) == 1:
486 487 onlyst = self.statuses.pop()
487 488
488 489 # Making sure we terse only when the status abbreviation is
489 490 # passed as terse argument
490 491 if onlyst in terseargs:
491 492 yield onlyst, self.path + pycompat.ossep
492 493 return
493 494
494 495 # add the files to status list
495 496 for st, fpath in self.iterfilepaths():
496 497 yield st, fpath
497 498
498 499 #recurse on the subdirs
499 500 for dirobj in self.subdirs.values():
500 501 for st, fpath in dirobj.tersewalk(terseargs):
501 502 yield st, fpath
502 503
503 504 def tersedir(statuslist, terseargs):
504 505 """
505 506 Terse the status if all the files in a directory shares the same status.
506 507
507 508 statuslist is scmutil.status() object which contains a list of files for
508 509 each status.
509 510 terseargs is string which is passed by the user as the argument to `--terse`
510 511 flag.
511 512
512 513 The function makes a tree of objects of dirnode class, and at each node it
513 514 stores the information required to know whether we can terse a certain
514 515 directory or not.
515 516 """
516 517 # the order matters here as that is used to produce final list
517 518 allst = ('m', 'a', 'r', 'd', 'u', 'i', 'c')
518 519
519 520 # checking the argument validity
520 521 for s in pycompat.bytestr(terseargs):
521 522 if s not in allst:
522 523 raise error.Abort(_("'%s' not recognized") % s)
523 524
524 525 # creating a dirnode object for the root of the repo
525 526 rootobj = dirnode('')
526 527 pstatus = ('modified', 'added', 'deleted', 'clean', 'unknown',
527 528 'ignored', 'removed')
528 529
529 530 tersedict = {}
530 531 for attrname in pstatus:
531 532 statuschar = attrname[0:1]
532 533 for f in getattr(statuslist, attrname):
533 534 rootobj.addfile(f, statuschar)
534 535 tersedict[statuschar] = []
535 536
536 537 # we won't be tersing the root dir, so add files in it
537 538 for st, fpath in rootobj.iterfilepaths():
538 539 tersedict[st].append(fpath)
539 540
540 541 # process each sub-directory and build tersedict
541 542 for subdir in rootobj.subdirs.values():
542 543 for st, f in subdir.tersewalk(terseargs):
543 544 tersedict[st].append(f)
544 545
545 546 tersedlist = []
546 547 for st in allst:
547 548 tersedict[st].sort()
548 549 tersedlist.append(tersedict[st])
549 550
550 551 return tersedlist
551 552
552 553 def _commentlines(raw):
553 554 '''Surround lineswith a comment char and a new line'''
554 555 lines = raw.splitlines()
555 556 commentedlines = ['# %s' % line for line in lines]
556 557 return '\n'.join(commentedlines) + '\n'
557 558
558 559 def _conflictsmsg(repo):
559 560 # avoid merge cycle
560 561 from . import merge as mergemod
561 562 mergestate = mergemod.mergestate.read(repo)
562 563 if not mergestate.active():
563 564 return
564 565
565 566 m = scmutil.match(repo[None])
566 567 unresolvedlist = [f for f in mergestate.unresolved() if m(f)]
567 568 if unresolvedlist:
568 569 mergeliststr = '\n'.join(
569 570 [' %s' % util.pathto(repo.root, pycompat.getcwd(), path)
570 571 for path in unresolvedlist])
571 572 msg = _('''Unresolved merge conflicts:
572 573
573 574 %s
574 575
575 576 To mark files as resolved: hg resolve --mark FILE''') % mergeliststr
576 577 else:
577 578 msg = _('No unresolved merge conflicts.')
578 579
579 580 return _commentlines(msg)
580 581
581 582 def _helpmessage(continuecmd, abortcmd):
582 583 msg = _('To continue: %s\n'
583 584 'To abort: %s') % (continuecmd, abortcmd)
584 585 return _commentlines(msg)
585 586
586 587 def _rebasemsg():
587 588 return _helpmessage('hg rebase --continue', 'hg rebase --abort')
588 589
589 590 def _histeditmsg():
590 591 return _helpmessage('hg histedit --continue', 'hg histedit --abort')
591 592
592 593 def _unshelvemsg():
593 594 return _helpmessage('hg unshelve --continue', 'hg unshelve --abort')
594 595
595 596 def _updatecleanmsg(dest=None):
596 597 warning = _('warning: this will discard uncommitted changes')
597 598 return 'hg update --clean %s (%s)' % (dest or '.', warning)
598 599
599 600 def _graftmsg():
600 601 # tweakdefaults requires `update` to have a rev hence the `.`
601 602 return _helpmessage('hg graft --continue', _updatecleanmsg())
602 603
603 604 def _mergemsg():
604 605 # tweakdefaults requires `update` to have a rev hence the `.`
605 606 return _helpmessage('hg commit', _updatecleanmsg())
606 607
607 608 def _bisectmsg():
608 609 msg = _('To mark the changeset good: hg bisect --good\n'
609 610 'To mark the changeset bad: hg bisect --bad\n'
610 611 'To abort: hg bisect --reset\n')
611 612 return _commentlines(msg)
612 613
613 614 def fileexistspredicate(filename):
614 615 return lambda repo: repo.vfs.exists(filename)
615 616
616 617 def _mergepredicate(repo):
617 618 return len(repo[None].parents()) > 1
618 619
619 620 STATES = (
620 621 # (state, predicate to detect states, helpful message function)
621 622 ('histedit', fileexistspredicate('histedit-state'), _histeditmsg),
622 623 ('bisect', fileexistspredicate('bisect.state'), _bisectmsg),
623 624 ('graft', fileexistspredicate('graftstate'), _graftmsg),
624 625 ('unshelve', fileexistspredicate('unshelverebasestate'), _unshelvemsg),
625 626 ('rebase', fileexistspredicate('rebasestate'), _rebasemsg),
626 627 # The merge state is part of a list that will be iterated over.
627 628 # They need to be last because some of the other unfinished states may also
628 629 # be in a merge or update state (eg. rebase, histedit, graft, etc).
629 630 # We want those to have priority.
630 631 ('merge', _mergepredicate, _mergemsg),
631 632 )
632 633
633 634 def _getrepostate(repo):
634 635 # experimental config: commands.status.skipstates
635 636 skip = set(repo.ui.configlist('commands', 'status.skipstates'))
636 637 for state, statedetectionpredicate, msgfn in STATES:
637 638 if state in skip:
638 639 continue
639 640 if statedetectionpredicate(repo):
640 641 return (state, statedetectionpredicate, msgfn)
641 642
642 643 def morestatus(repo, fm):
643 644 statetuple = _getrepostate(repo)
644 645 label = 'status.morestatus'
645 646 if statetuple:
646 647 fm.startitem()
647 648 state, statedetectionpredicate, helpfulmsg = statetuple
648 649 statemsg = _('The repository is in an unfinished *%s* state.') % state
649 650 fm.write('statemsg', '%s\n', _commentlines(statemsg), label=label)
650 651 conmsg = _conflictsmsg(repo)
651 652 if conmsg:
652 653 fm.write('conflictsmsg', '%s\n', conmsg, label=label)
653 654 if helpfulmsg:
654 655 helpmsg = helpfulmsg()
655 656 fm.write('helpmsg', '%s\n', helpmsg, label=label)
656 657
657 658 def findpossible(cmd, table, strict=False):
658 659 """
659 660 Return cmd -> (aliases, command table entry)
660 661 for each matching command.
661 662 Return debug commands (or their aliases) only if no normal command matches.
662 663 """
663 664 choice = {}
664 665 debugchoice = {}
665 666
666 667 if cmd in table:
667 668 # short-circuit exact matches, "log" alias beats "^log|history"
668 669 keys = [cmd]
669 670 else:
670 671 keys = table.keys()
671 672
672 673 allcmds = []
673 674 for e in keys:
674 675 aliases = parsealiases(e)
675 676 allcmds.extend(aliases)
676 677 found = None
677 678 if cmd in aliases:
678 679 found = cmd
679 680 elif not strict:
680 681 for a in aliases:
681 682 if a.startswith(cmd):
682 683 found = a
683 684 break
684 685 if found is not None:
685 686 if aliases[0].startswith("debug") or found.startswith("debug"):
686 687 debugchoice[found] = (aliases, table[e])
687 688 else:
688 689 choice[found] = (aliases, table[e])
689 690
690 691 if not choice and debugchoice:
691 692 choice = debugchoice
692 693
693 694 return choice, allcmds
694 695
695 696 def findcmd(cmd, table, strict=True):
696 697 """Return (aliases, command table entry) for command string."""
697 698 choice, allcmds = findpossible(cmd, table, strict)
698 699
699 700 if cmd in choice:
700 701 return choice[cmd]
701 702
702 703 if len(choice) > 1:
703 704 clist = sorted(choice)
704 705 raise error.AmbiguousCommand(cmd, clist)
705 706
706 707 if choice:
707 708 return list(choice.values())[0]
708 709
709 710 raise error.UnknownCommand(cmd, allcmds)
710 711
711 712 def changebranch(ui, repo, revs, label):
712 713 """ Change the branch name of given revs to label """
713 714
714 715 with repo.wlock(), repo.lock(), repo.transaction('branches'):
715 716 # abort in case of uncommitted merge or dirty wdir
716 717 bailifchanged(repo)
717 718 revs = scmutil.revrange(repo, revs)
718 719 if not revs:
719 720 raise error.Abort("empty revision set")
720 721 roots = repo.revs('roots(%ld)', revs)
721 722 if len(roots) > 1:
722 723 raise error.Abort(_("cannot change branch of non-linear revisions"))
723 724 rewriteutil.precheck(repo, revs, 'change branch of')
724 725
725 726 root = repo[roots.first()]
726 727 if not root.p1().branch() == label and label in repo.branchmap():
727 728 raise error.Abort(_("a branch of the same name already exists"))
728 729
729 730 if repo.revs('merge() and %ld', revs):
730 731 raise error.Abort(_("cannot change branch of a merge commit"))
731 732 if repo.revs('obsolete() and %ld', revs):
732 733 raise error.Abort(_("cannot change branch of a obsolete changeset"))
733 734
734 735 # make sure only topological heads
735 736 if repo.revs('heads(%ld) - head()', revs):
736 737 raise error.Abort(_("cannot change branch in middle of a stack"))
737 738
738 739 replacements = {}
739 740 # avoid import cycle mercurial.cmdutil -> mercurial.context ->
740 741 # mercurial.subrepo -> mercurial.cmdutil
741 742 from . import context
742 743 for rev in revs:
743 744 ctx = repo[rev]
744 745 oldbranch = ctx.branch()
745 746 # check if ctx has same branch
746 747 if oldbranch == label:
747 748 continue
748 749
749 750 def filectxfn(repo, newctx, path):
750 751 try:
751 752 return ctx[path]
752 753 except error.ManifestLookupError:
753 754 return None
754 755
755 756 ui.debug("changing branch of '%s' from '%s' to '%s'\n"
756 757 % (hex(ctx.node()), oldbranch, label))
757 758 extra = ctx.extra()
758 759 extra['branch_change'] = hex(ctx.node())
759 760 # While changing branch of set of linear commits, make sure that
760 761 # we base our commits on new parent rather than old parent which
761 762 # was obsoleted while changing the branch
762 763 p1 = ctx.p1().node()
763 764 p2 = ctx.p2().node()
764 765 if p1 in replacements:
765 766 p1 = replacements[p1][0]
766 767 if p2 in replacements:
767 768 p2 = replacements[p2][0]
768 769
769 770 mc = context.memctx(repo, (p1, p2),
770 771 ctx.description(),
771 772 ctx.files(),
772 773 filectxfn,
773 774 user=ctx.user(),
774 775 date=ctx.date(),
775 776 extra=extra,
776 777 branch=label)
777 778
778 779 commitphase = ctx.phase()
779 780 overrides = {('phases', 'new-commit'): commitphase}
780 781 with repo.ui.configoverride(overrides, 'branch-change'):
781 782 newnode = repo.commitctx(mc)
782 783
783 784 replacements[ctx.node()] = (newnode,)
784 785 ui.debug('new node id is %s\n' % hex(newnode))
785 786
786 787 # create obsmarkers and move bookmarks
787 788 scmutil.cleanupnodes(repo, replacements, 'branch-change')
788 789
789 790 # move the working copy too
790 791 wctx = repo[None]
791 792 # in-progress merge is a bit too complex for now.
792 793 if len(wctx.parents()) == 1:
793 794 newid = replacements.get(wctx.p1().node())
794 795 if newid is not None:
795 796 # avoid import cycle mercurial.cmdutil -> mercurial.hg ->
796 797 # mercurial.cmdutil
797 798 from . import hg
798 799 hg.update(repo, newid[0], quietempty=True)
799 800
800 801 ui.status(_("changed branch on %d changesets\n") % len(replacements))
801 802
802 803 def findrepo(p):
803 804 while not os.path.isdir(os.path.join(p, ".hg")):
804 805 oldp, p = p, os.path.dirname(p)
805 806 if p == oldp:
806 807 return None
807 808
808 809 return p
809 810
810 811 def bailifchanged(repo, merge=True, hint=None):
811 812 """ enforce the precondition that working directory must be clean.
812 813
813 814 'merge' can be set to false if a pending uncommitted merge should be
814 815 ignored (such as when 'update --check' runs).
815 816
816 817 'hint' is the usual hint given to Abort exception.
817 818 """
818 819
819 820 if merge and repo.dirstate.p2() != nullid:
820 821 raise error.Abort(_('outstanding uncommitted merge'), hint=hint)
821 822 modified, added, removed, deleted = repo.status()[:4]
822 823 if modified or added or removed or deleted:
823 824 raise error.Abort(_('uncommitted changes'), hint=hint)
824 825 ctx = repo[None]
825 826 for s in sorted(ctx.substate):
826 827 ctx.sub(s).bailifchanged(hint=hint)
827 828
828 829 def logmessage(ui, opts):
829 830 """ get the log message according to -m and -l option """
830 831 message = opts.get('message')
831 832 logfile = opts.get('logfile')
832 833
833 834 if message and logfile:
834 835 raise error.Abort(_('options --message and --logfile are mutually '
835 836 'exclusive'))
836 837 if not message and logfile:
837 838 try:
838 839 if isstdiofilename(logfile):
839 840 message = ui.fin.read()
840 841 else:
841 842 message = '\n'.join(util.readfile(logfile).splitlines())
842 843 except IOError as inst:
843 844 raise error.Abort(_("can't read commit message '%s': %s") %
844 845 (logfile, encoding.strtolocal(inst.strerror)))
845 846 return message
846 847
847 848 def mergeeditform(ctxorbool, baseformname):
848 849 """return appropriate editform name (referencing a committemplate)
849 850
850 851 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
851 852 merging is committed.
852 853
853 854 This returns baseformname with '.merge' appended if it is a merge,
854 855 otherwise '.normal' is appended.
855 856 """
856 857 if isinstance(ctxorbool, bool):
857 858 if ctxorbool:
858 859 return baseformname + ".merge"
859 860 elif 1 < len(ctxorbool.parents()):
860 861 return baseformname + ".merge"
861 862
862 863 return baseformname + ".normal"
863 864
864 865 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
865 866 editform='', **opts):
866 867 """get appropriate commit message editor according to '--edit' option
867 868
868 869 'finishdesc' is a function to be called with edited commit message
869 870 (= 'description' of the new changeset) just after editing, but
870 871 before checking empty-ness. It should return actual text to be
871 872 stored into history. This allows to change description before
872 873 storing.
873 874
874 875 'extramsg' is a extra message to be shown in the editor instead of
875 876 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
876 877 is automatically added.
877 878
878 879 'editform' is a dot-separated list of names, to distinguish
879 880 the purpose of commit text editing.
880 881
881 882 'getcommiteditor' returns 'commitforceeditor' regardless of
882 883 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
883 884 they are specific for usage in MQ.
884 885 """
885 886 if edit or finishdesc or extramsg:
886 887 return lambda r, c, s: commitforceeditor(r, c, s,
887 888 finishdesc=finishdesc,
888 889 extramsg=extramsg,
889 890 editform=editform)
890 891 elif editform:
891 892 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
892 893 else:
893 894 return commiteditor
894 895
895 896 def makefilename(repo, pat, node, desc=None,
896 897 total=None, seqno=None, revwidth=None, pathname=None):
897 898 node_expander = {
898 899 'H': lambda: hex(node),
899 900 'R': lambda: '%d' % repo.changelog.rev(node),
900 901 'h': lambda: short(node),
901 902 'm': lambda: re.sub('[^\w]', '_', desc or '')
902 903 }
903 904 expander = {
904 905 '%': lambda: '%',
905 906 'b': lambda: os.path.basename(repo.root),
906 907 }
907 908
908 909 try:
909 910 if node:
910 911 expander.update(node_expander)
911 912 if node:
912 913 expander['r'] = (lambda:
913 914 ('%d' % repo.changelog.rev(node)).zfill(revwidth or 0))
914 915 if total is not None:
915 916 expander['N'] = lambda: '%d' % total
916 917 if seqno is not None:
917 918 expander['n'] = lambda: '%d' % seqno
918 919 if total is not None and seqno is not None:
919 920 expander['n'] = (lambda: ('%d' % seqno).zfill(len('%d' % total)))
920 921 if pathname is not None:
921 922 expander['s'] = lambda: os.path.basename(pathname)
922 923 expander['d'] = lambda: os.path.dirname(pathname) or '.'
923 924 expander['p'] = lambda: pathname
924 925
925 926 newname = []
926 927 patlen = len(pat)
927 928 i = 0
928 929 while i < patlen:
929 930 c = pat[i:i + 1]
930 931 if c == '%':
931 932 i += 1
932 933 c = pat[i:i + 1]
933 934 c = expander[c]()
934 935 newname.append(c)
935 936 i += 1
936 937 return ''.join(newname)
937 938 except KeyError as inst:
938 939 raise error.Abort(_("invalid format spec '%%%s' in output filename") %
939 940 inst.args[0])
940 941
941 942 def isstdiofilename(pat):
942 943 """True if the given pat looks like a filename denoting stdin/stdout"""
943 944 return not pat or pat == '-'
944 945
945 946 class _unclosablefile(object):
946 947 def __init__(self, fp):
947 948 self._fp = fp
948 949
949 950 def close(self):
950 951 pass
951 952
952 953 def __iter__(self):
953 954 return iter(self._fp)
954 955
955 956 def __getattr__(self, attr):
956 957 return getattr(self._fp, attr)
957 958
958 959 def __enter__(self):
959 960 return self
960 961
961 962 def __exit__(self, exc_type, exc_value, exc_tb):
962 963 pass
963 964
964 965 def makefileobj(repo, pat, node=None, desc=None, total=None,
965 966 seqno=None, revwidth=None, mode='wb', modemap=None,
966 967 pathname=None):
967 968
968 969 writable = mode not in ('r', 'rb')
969 970
970 971 if isstdiofilename(pat):
971 972 if writable:
972 973 fp = repo.ui.fout
973 974 else:
974 975 fp = repo.ui.fin
975 976 return _unclosablefile(fp)
976 977 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
977 978 if modemap is not None:
978 979 mode = modemap.get(fn, mode)
979 980 if mode == 'wb':
980 981 modemap[fn] = 'ab'
981 982 return open(fn, mode)
982 983
983 984 def openrevlog(repo, cmd, file_, opts):
984 985 """opens the changelog, manifest, a filelog or a given revlog"""
985 986 cl = opts['changelog']
986 987 mf = opts['manifest']
987 988 dir = opts['dir']
988 989 msg = None
989 990 if cl and mf:
990 991 msg = _('cannot specify --changelog and --manifest at the same time')
991 992 elif cl and dir:
992 993 msg = _('cannot specify --changelog and --dir at the same time')
993 994 elif cl or mf or dir:
994 995 if file_:
995 996 msg = _('cannot specify filename with --changelog or --manifest')
996 997 elif not repo:
997 998 msg = _('cannot specify --changelog or --manifest or --dir '
998 999 'without a repository')
999 1000 if msg:
1000 1001 raise error.Abort(msg)
1001 1002
1002 1003 r = None
1003 1004 if repo:
1004 1005 if cl:
1005 1006 r = repo.unfiltered().changelog
1006 1007 elif dir:
1007 1008 if 'treemanifest' not in repo.requirements:
1008 1009 raise error.Abort(_("--dir can only be used on repos with "
1009 1010 "treemanifest enabled"))
1010 1011 dirlog = repo.manifestlog._revlog.dirlog(dir)
1011 1012 if len(dirlog):
1012 1013 r = dirlog
1013 1014 elif mf:
1014 1015 r = repo.manifestlog._revlog
1015 1016 elif file_:
1016 1017 filelog = repo.file(file_)
1017 1018 if len(filelog):
1018 1019 r = filelog
1019 1020 if not r:
1020 1021 if not file_:
1021 1022 raise error.CommandError(cmd, _('invalid arguments'))
1022 1023 if not os.path.isfile(file_):
1023 1024 raise error.Abort(_("revlog '%s' not found") % file_)
1024 1025 r = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False),
1025 1026 file_[:-2] + ".i")
1026 1027 return r
1027 1028
1028 1029 def copy(ui, repo, pats, opts, rename=False):
1029 1030 # called with the repo lock held
1030 1031 #
1031 1032 # hgsep => pathname that uses "/" to separate directories
1032 1033 # ossep => pathname that uses os.sep to separate directories
1033 1034 cwd = repo.getcwd()
1034 1035 targets = {}
1035 1036 after = opts.get("after")
1036 1037 dryrun = opts.get("dry_run")
1037 1038 wctx = repo[None]
1038 1039
1039 1040 def walkpat(pat):
1040 1041 srcs = []
1041 1042 if after:
1042 1043 badstates = '?'
1043 1044 else:
1044 1045 badstates = '?r'
1045 1046 m = scmutil.match(wctx, [pat], opts, globbed=True)
1046 1047 for abs in wctx.walk(m):
1047 1048 state = repo.dirstate[abs]
1048 1049 rel = m.rel(abs)
1049 1050 exact = m.exact(abs)
1050 1051 if state in badstates:
1051 1052 if exact and state == '?':
1052 1053 ui.warn(_('%s: not copying - file is not managed\n') % rel)
1053 1054 if exact and state == 'r':
1054 1055 ui.warn(_('%s: not copying - file has been marked for'
1055 1056 ' remove\n') % rel)
1056 1057 continue
1057 1058 # abs: hgsep
1058 1059 # rel: ossep
1059 1060 srcs.append((abs, rel, exact))
1060 1061 return srcs
1061 1062
1062 1063 # abssrc: hgsep
1063 1064 # relsrc: ossep
1064 1065 # otarget: ossep
1065 1066 def copyfile(abssrc, relsrc, otarget, exact):
1066 1067 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
1067 1068 if '/' in abstarget:
1068 1069 # We cannot normalize abstarget itself, this would prevent
1069 1070 # case only renames, like a => A.
1070 1071 abspath, absname = abstarget.rsplit('/', 1)
1071 1072 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
1072 1073 reltarget = repo.pathto(abstarget, cwd)
1073 1074 target = repo.wjoin(abstarget)
1074 1075 src = repo.wjoin(abssrc)
1075 1076 state = repo.dirstate[abstarget]
1076 1077
1077 1078 scmutil.checkportable(ui, abstarget)
1078 1079
1079 1080 # check for collisions
1080 1081 prevsrc = targets.get(abstarget)
1081 1082 if prevsrc is not None:
1082 1083 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
1083 1084 (reltarget, repo.pathto(abssrc, cwd),
1084 1085 repo.pathto(prevsrc, cwd)))
1085 1086 return
1086 1087
1087 1088 # check for overwrites
1088 1089 exists = os.path.lexists(target)
1089 1090 samefile = False
1090 1091 if exists and abssrc != abstarget:
1091 1092 if (repo.dirstate.normalize(abssrc) ==
1092 1093 repo.dirstate.normalize(abstarget)):
1093 1094 if not rename:
1094 1095 ui.warn(_("%s: can't copy - same file\n") % reltarget)
1095 1096 return
1096 1097 exists = False
1097 1098 samefile = True
1098 1099
1099 1100 if not after and exists or after and state in 'mn':
1100 1101 if not opts['force']:
1101 1102 if state in 'mn':
1102 1103 msg = _('%s: not overwriting - file already committed\n')
1103 1104 if after:
1104 1105 flags = '--after --force'
1105 1106 else:
1106 1107 flags = '--force'
1107 1108 if rename:
1108 1109 hint = _('(hg rename %s to replace the file by '
1109 1110 'recording a rename)\n') % flags
1110 1111 else:
1111 1112 hint = _('(hg copy %s to replace the file by '
1112 1113 'recording a copy)\n') % flags
1113 1114 else:
1114 1115 msg = _('%s: not overwriting - file exists\n')
1115 1116 if rename:
1116 1117 hint = _('(hg rename --after to record the rename)\n')
1117 1118 else:
1118 1119 hint = _('(hg copy --after to record the copy)\n')
1119 1120 ui.warn(msg % reltarget)
1120 1121 ui.warn(hint)
1121 1122 return
1122 1123
1123 1124 if after:
1124 1125 if not exists:
1125 1126 if rename:
1126 1127 ui.warn(_('%s: not recording move - %s does not exist\n') %
1127 1128 (relsrc, reltarget))
1128 1129 else:
1129 1130 ui.warn(_('%s: not recording copy - %s does not exist\n') %
1130 1131 (relsrc, reltarget))
1131 1132 return
1132 1133 elif not dryrun:
1133 1134 try:
1134 1135 if exists:
1135 1136 os.unlink(target)
1136 1137 targetdir = os.path.dirname(target) or '.'
1137 1138 if not os.path.isdir(targetdir):
1138 1139 os.makedirs(targetdir)
1139 1140 if samefile:
1140 1141 tmp = target + "~hgrename"
1141 1142 os.rename(src, tmp)
1142 1143 os.rename(tmp, target)
1143 1144 else:
1144 1145 util.copyfile(src, target)
1145 1146 srcexists = True
1146 1147 except IOError as inst:
1147 1148 if inst.errno == errno.ENOENT:
1148 1149 ui.warn(_('%s: deleted in working directory\n') % relsrc)
1149 1150 srcexists = False
1150 1151 else:
1151 1152 ui.warn(_('%s: cannot copy - %s\n') %
1152 1153 (relsrc, encoding.strtolocal(inst.strerror)))
1153 1154 return True # report a failure
1154 1155
1155 1156 if ui.verbose or not exact:
1156 1157 if rename:
1157 1158 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
1158 1159 else:
1159 1160 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
1160 1161
1161 1162 targets[abstarget] = abssrc
1162 1163
1163 1164 # fix up dirstate
1164 1165 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
1165 1166 dryrun=dryrun, cwd=cwd)
1166 1167 if rename and not dryrun:
1167 1168 if not after and srcexists and not samefile:
1168 1169 repo.wvfs.unlinkpath(abssrc)
1169 1170 wctx.forget([abssrc])
1170 1171
1171 1172 # pat: ossep
1172 1173 # dest ossep
1173 1174 # srcs: list of (hgsep, hgsep, ossep, bool)
1174 1175 # return: function that takes hgsep and returns ossep
1175 1176 def targetpathfn(pat, dest, srcs):
1176 1177 if os.path.isdir(pat):
1177 1178 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1178 1179 abspfx = util.localpath(abspfx)
1179 1180 if destdirexists:
1180 1181 striplen = len(os.path.split(abspfx)[0])
1181 1182 else:
1182 1183 striplen = len(abspfx)
1183 1184 if striplen:
1184 1185 striplen += len(pycompat.ossep)
1185 1186 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1186 1187 elif destdirexists:
1187 1188 res = lambda p: os.path.join(dest,
1188 1189 os.path.basename(util.localpath(p)))
1189 1190 else:
1190 1191 res = lambda p: dest
1191 1192 return res
1192 1193
1193 1194 # pat: ossep
1194 1195 # dest ossep
1195 1196 # srcs: list of (hgsep, hgsep, ossep, bool)
1196 1197 # return: function that takes hgsep and returns ossep
1197 1198 def targetpathafterfn(pat, dest, srcs):
1198 1199 if matchmod.patkind(pat):
1199 1200 # a mercurial pattern
1200 1201 res = lambda p: os.path.join(dest,
1201 1202 os.path.basename(util.localpath(p)))
1202 1203 else:
1203 1204 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1204 1205 if len(abspfx) < len(srcs[0][0]):
1205 1206 # A directory. Either the target path contains the last
1206 1207 # component of the source path or it does not.
1207 1208 def evalpath(striplen):
1208 1209 score = 0
1209 1210 for s in srcs:
1210 1211 t = os.path.join(dest, util.localpath(s[0])[striplen:])
1211 1212 if os.path.lexists(t):
1212 1213 score += 1
1213 1214 return score
1214 1215
1215 1216 abspfx = util.localpath(abspfx)
1216 1217 striplen = len(abspfx)
1217 1218 if striplen:
1218 1219 striplen += len(pycompat.ossep)
1219 1220 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
1220 1221 score = evalpath(striplen)
1221 1222 striplen1 = len(os.path.split(abspfx)[0])
1222 1223 if striplen1:
1223 1224 striplen1 += len(pycompat.ossep)
1224 1225 if evalpath(striplen1) > score:
1225 1226 striplen = striplen1
1226 1227 res = lambda p: os.path.join(dest,
1227 1228 util.localpath(p)[striplen:])
1228 1229 else:
1229 1230 # a file
1230 1231 if destdirexists:
1231 1232 res = lambda p: os.path.join(dest,
1232 1233 os.path.basename(util.localpath(p)))
1233 1234 else:
1234 1235 res = lambda p: dest
1235 1236 return res
1236 1237
1237 1238 pats = scmutil.expandpats(pats)
1238 1239 if not pats:
1239 1240 raise error.Abort(_('no source or destination specified'))
1240 1241 if len(pats) == 1:
1241 1242 raise error.Abort(_('no destination specified'))
1242 1243 dest = pats.pop()
1243 1244 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
1244 1245 if not destdirexists:
1245 1246 if len(pats) > 1 or matchmod.patkind(pats[0]):
1246 1247 raise error.Abort(_('with multiple sources, destination must be an '
1247 1248 'existing directory'))
1248 1249 if util.endswithsep(dest):
1249 1250 raise error.Abort(_('destination %s is not a directory') % dest)
1250 1251
1251 1252 tfn = targetpathfn
1252 1253 if after:
1253 1254 tfn = targetpathafterfn
1254 1255 copylist = []
1255 1256 for pat in pats:
1256 1257 srcs = walkpat(pat)
1257 1258 if not srcs:
1258 1259 continue
1259 1260 copylist.append((tfn(pat, dest, srcs), srcs))
1260 1261 if not copylist:
1261 1262 raise error.Abort(_('no files to copy'))
1262 1263
1263 1264 errors = 0
1264 1265 for targetpath, srcs in copylist:
1265 1266 for abssrc, relsrc, exact in srcs:
1266 1267 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
1267 1268 errors += 1
1268 1269
1269 1270 if errors:
1270 1271 ui.warn(_('(consider using --after)\n'))
1271 1272
1272 1273 return errors != 0
1273 1274
1274 1275 ## facility to let extension process additional data into an import patch
1275 1276 # list of identifier to be executed in order
1276 1277 extrapreimport = [] # run before commit
1277 1278 extrapostimport = [] # run after commit
1278 1279 # mapping from identifier to actual import function
1279 1280 #
1280 1281 # 'preimport' are run before the commit is made and are provided the following
1281 1282 # arguments:
1282 1283 # - repo: the localrepository instance,
1283 1284 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
1284 1285 # - extra: the future extra dictionary of the changeset, please mutate it,
1285 1286 # - opts: the import options.
1286 1287 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
1287 1288 # mutation of in memory commit and more. Feel free to rework the code to get
1288 1289 # there.
1289 1290 extrapreimportmap = {}
1290 1291 # 'postimport' are run after the commit is made and are provided the following
1291 1292 # argument:
1292 1293 # - ctx: the changectx created by import.
1293 1294 extrapostimportmap = {}
1294 1295
1295 1296 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
1296 1297 """Utility function used by commands.import to import a single patch
1297 1298
1298 1299 This function is explicitly defined here to help the evolve extension to
1299 1300 wrap this part of the import logic.
1300 1301
1301 1302 The API is currently a bit ugly because it a simple code translation from
1302 1303 the import command. Feel free to make it better.
1303 1304
1304 1305 :hunk: a patch (as a binary string)
1305 1306 :parents: nodes that will be parent of the created commit
1306 1307 :opts: the full dict of option passed to the import command
1307 1308 :msgs: list to save commit message to.
1308 1309 (used in case we need to save it when failing)
1309 1310 :updatefunc: a function that update a repo to a given node
1310 1311 updatefunc(<repo>, <node>)
1311 1312 """
1312 1313 # avoid cycle context -> subrepo -> cmdutil
1313 1314 from . import context
1314 1315 extractdata = patch.extract(ui, hunk)
1315 1316 tmpname = extractdata.get('filename')
1316 1317 message = extractdata.get('message')
1317 1318 user = opts.get('user') or extractdata.get('user')
1318 1319 date = opts.get('date') or extractdata.get('date')
1319 1320 branch = extractdata.get('branch')
1320 1321 nodeid = extractdata.get('nodeid')
1321 1322 p1 = extractdata.get('p1')
1322 1323 p2 = extractdata.get('p2')
1323 1324
1324 1325 nocommit = opts.get('no_commit')
1325 1326 importbranch = opts.get('import_branch')
1326 1327 update = not opts.get('bypass')
1327 1328 strip = opts["strip"]
1328 1329 prefix = opts["prefix"]
1329 1330 sim = float(opts.get('similarity') or 0)
1330 1331 if not tmpname:
1331 1332 return (None, None, False)
1332 1333
1333 1334 rejects = False
1334 1335
1335 1336 try:
1336 1337 cmdline_message = logmessage(ui, opts)
1337 1338 if cmdline_message:
1338 1339 # pickup the cmdline msg
1339 1340 message = cmdline_message
1340 1341 elif message:
1341 1342 # pickup the patch msg
1342 1343 message = message.strip()
1343 1344 else:
1344 1345 # launch the editor
1345 1346 message = None
1346 1347 ui.debug('message:\n%s\n' % message)
1347 1348
1348 1349 if len(parents) == 1:
1349 1350 parents.append(repo[nullid])
1350 1351 if opts.get('exact'):
1351 1352 if not nodeid or not p1:
1352 1353 raise error.Abort(_('not a Mercurial patch'))
1353 1354 p1 = repo[p1]
1354 1355 p2 = repo[p2 or nullid]
1355 1356 elif p2:
1356 1357 try:
1357 1358 p1 = repo[p1]
1358 1359 p2 = repo[p2]
1359 1360 # Without any options, consider p2 only if the
1360 1361 # patch is being applied on top of the recorded
1361 1362 # first parent.
1362 1363 if p1 != parents[0]:
1363 1364 p1 = parents[0]
1364 1365 p2 = repo[nullid]
1365 1366 except error.RepoError:
1366 1367 p1, p2 = parents
1367 1368 if p2.node() == nullid:
1368 1369 ui.warn(_("warning: import the patch as a normal revision\n"
1369 1370 "(use --exact to import the patch as a merge)\n"))
1370 1371 else:
1371 1372 p1, p2 = parents
1372 1373
1373 1374 n = None
1374 1375 if update:
1375 1376 if p1 != parents[0]:
1376 1377 updatefunc(repo, p1.node())
1377 1378 if p2 != parents[1]:
1378 1379 repo.setparents(p1.node(), p2.node())
1379 1380
1380 1381 if opts.get('exact') or importbranch:
1381 1382 repo.dirstate.setbranch(branch or 'default')
1382 1383
1383 1384 partial = opts.get('partial', False)
1384 1385 files = set()
1385 1386 try:
1386 1387 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
1387 1388 files=files, eolmode=None, similarity=sim / 100.0)
1388 1389 except error.PatchError as e:
1389 1390 if not partial:
1390 1391 raise error.Abort(str(e))
1391 1392 if partial:
1392 1393 rejects = True
1393 1394
1394 1395 files = list(files)
1395 1396 if nocommit:
1396 1397 if message:
1397 1398 msgs.append(message)
1398 1399 else:
1399 1400 if opts.get('exact') or p2:
1400 1401 # If you got here, you either use --force and know what
1401 1402 # you are doing or used --exact or a merge patch while
1402 1403 # being updated to its first parent.
1403 1404 m = None
1404 1405 else:
1405 1406 m = scmutil.matchfiles(repo, files or [])
1406 1407 editform = mergeeditform(repo[None], 'import.normal')
1407 1408 if opts.get('exact'):
1408 1409 editor = None
1409 1410 else:
1410 1411 editor = getcommiteditor(editform=editform,
1411 1412 **pycompat.strkwargs(opts))
1412 1413 extra = {}
1413 1414 for idfunc in extrapreimport:
1414 1415 extrapreimportmap[idfunc](repo, extractdata, extra, opts)
1415 1416 overrides = {}
1416 1417 if partial:
1417 1418 overrides[('ui', 'allowemptycommit')] = True
1418 1419 with repo.ui.configoverride(overrides, 'import'):
1419 1420 n = repo.commit(message, user,
1420 1421 date, match=m,
1421 1422 editor=editor, extra=extra)
1422 1423 for idfunc in extrapostimport:
1423 1424 extrapostimportmap[idfunc](repo[n])
1424 1425 else:
1425 1426 if opts.get('exact') or importbranch:
1426 1427 branch = branch or 'default'
1427 1428 else:
1428 1429 branch = p1.branch()
1429 1430 store = patch.filestore()
1430 1431 try:
1431 1432 files = set()
1432 1433 try:
1433 1434 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
1434 1435 files, eolmode=None)
1435 1436 except error.PatchError as e:
1436 1437 raise error.Abort(str(e))
1437 1438 if opts.get('exact'):
1438 1439 editor = None
1439 1440 else:
1440 1441 editor = getcommiteditor(editform='import.bypass')
1441 1442 memctx = context.memctx(repo, (p1.node(), p2.node()),
1442 1443 message,
1443 1444 files=files,
1444 1445 filectxfn=store,
1445 1446 user=user,
1446 1447 date=date,
1447 1448 branch=branch,
1448 1449 editor=editor)
1449 1450 n = memctx.commit()
1450 1451 finally:
1451 1452 store.close()
1452 1453 if opts.get('exact') and nocommit:
1453 1454 # --exact with --no-commit is still useful in that it does merge
1454 1455 # and branch bits
1455 1456 ui.warn(_("warning: can't check exact import with --no-commit\n"))
1456 1457 elif opts.get('exact') and hex(n) != nodeid:
1457 1458 raise error.Abort(_('patch is damaged or loses information'))
1458 1459 msg = _('applied to working directory')
1459 1460 if n:
1460 1461 # i18n: refers to a short changeset id
1461 1462 msg = _('created %s') % short(n)
1462 1463 return (msg, n, rejects)
1463 1464 finally:
1464 1465 os.unlink(tmpname)
1465 1466
1466 1467 # facility to let extensions include additional data in an exported patch
1467 1468 # list of identifiers to be executed in order
1468 1469 extraexport = []
1469 1470 # mapping from identifier to actual export function
1470 1471 # function as to return a string to be added to the header or None
1471 1472 # it is given two arguments (sequencenumber, changectx)
1472 1473 extraexportmap = {}
1473 1474
1474 1475 def _exportsingle(repo, ctx, match, switch_parent, rev, seqno, write, diffopts):
1475 1476 node = scmutil.binnode(ctx)
1476 1477 parents = [p.node() for p in ctx.parents() if p]
1477 1478 branch = ctx.branch()
1478 1479 if switch_parent:
1479 1480 parents.reverse()
1480 1481
1481 1482 if parents:
1482 1483 prev = parents[0]
1483 1484 else:
1484 1485 prev = nullid
1485 1486
1486 1487 write("# HG changeset patch\n")
1487 1488 write("# User %s\n" % ctx.user())
1488 1489 write("# Date %d %d\n" % ctx.date())
1489 1490 write("# %s\n" % util.datestr(ctx.date()))
1490 1491 if branch and branch != 'default':
1491 1492 write("# Branch %s\n" % branch)
1492 1493 write("# Node ID %s\n" % hex(node))
1493 1494 write("# Parent %s\n" % hex(prev))
1494 1495 if len(parents) > 1:
1495 1496 write("# Parent %s\n" % hex(parents[1]))
1496 1497
1497 1498 for headerid in extraexport:
1498 1499 header = extraexportmap[headerid](seqno, ctx)
1499 1500 if header is not None:
1500 1501 write('# %s\n' % header)
1501 1502 write(ctx.description().rstrip())
1502 1503 write("\n\n")
1503 1504
1504 1505 for chunk, label in patch.diffui(repo, prev, node, match, opts=diffopts):
1505 1506 write(chunk, label=label)
1506 1507
1507 1508 def export(repo, revs, fntemplate='hg-%h.patch', fp=None, switch_parent=False,
1508 1509 opts=None, match=None):
1509 1510 '''export changesets as hg patches
1510 1511
1511 1512 Args:
1512 1513 repo: The repository from which we're exporting revisions.
1513 1514 revs: A list of revisions to export as revision numbers.
1514 1515 fntemplate: An optional string to use for generating patch file names.
1515 1516 fp: An optional file-like object to which patches should be written.
1516 1517 switch_parent: If True, show diffs against second parent when not nullid.
1517 1518 Default is false, which always shows diff against p1.
1518 1519 opts: diff options to use for generating the patch.
1519 1520 match: If specified, only export changes to files matching this matcher.
1520 1521
1521 1522 Returns:
1522 1523 Nothing.
1523 1524
1524 1525 Side Effect:
1525 1526 "HG Changeset Patch" data is emitted to one of the following
1526 1527 destinations:
1527 1528 fp is specified: All revs are written to the specified
1528 1529 file-like object.
1529 1530 fntemplate specified: Each rev is written to a unique file named using
1530 1531 the given template.
1531 1532 Neither fp nor template specified: All revs written to repo.ui.write()
1532 1533 '''
1533 1534
1534 1535 total = len(revs)
1535 1536 revwidth = max(len(str(rev)) for rev in revs)
1536 1537 filemode = {}
1537 1538
1538 1539 write = None
1539 1540 dest = '<unnamed>'
1540 1541 if fp:
1541 1542 dest = getattr(fp, 'name', dest)
1542 1543 def write(s, **kw):
1543 1544 fp.write(s)
1544 1545 elif not fntemplate:
1545 1546 write = repo.ui.write
1546 1547
1547 1548 for seqno, rev in enumerate(revs, 1):
1548 1549 ctx = repo[rev]
1549 1550 fo = None
1550 1551 if not fp and fntemplate:
1551 1552 desc_lines = ctx.description().rstrip().split('\n')
1552 1553 desc = desc_lines[0] #Commit always has a first line.
1553 1554 fo = makefileobj(repo, fntemplate, ctx.node(), desc=desc,
1554 1555 total=total, seqno=seqno, revwidth=revwidth,
1555 1556 mode='wb', modemap=filemode)
1556 1557 dest = fo.name
1557 1558 def write(s, **kw):
1558 1559 fo.write(s)
1559 1560 if not dest.startswith('<'):
1560 1561 repo.ui.note("%s\n" % dest)
1561 1562 _exportsingle(
1562 1563 repo, ctx, match, switch_parent, rev, seqno, write, opts)
1563 1564 if fo is not None:
1564 1565 fo.close()
1565 1566
1566 1567 def showmarker(fm, marker, index=None):
1567 1568 """utility function to display obsolescence marker in a readable way
1568 1569
1569 1570 To be used by debug function."""
1570 1571 if index is not None:
1571 1572 fm.write('index', '%i ', index)
1572 1573 fm.write('prednode', '%s ', hex(marker.prednode()))
1573 1574 succs = marker.succnodes()
1574 1575 fm.condwrite(succs, 'succnodes', '%s ',
1575 1576 fm.formatlist(map(hex, succs), name='node'))
1576 1577 fm.write('flag', '%X ', marker.flags())
1577 1578 parents = marker.parentnodes()
1578 1579 if parents is not None:
1579 1580 fm.write('parentnodes', '{%s} ',
1580 1581 fm.formatlist(map(hex, parents), name='node', sep=', '))
1581 1582 fm.write('date', '(%s) ', fm.formatdate(marker.date()))
1582 1583 meta = marker.metadata().copy()
1583 1584 meta.pop('date', None)
1584 1585 smeta = util.rapply(pycompat.maybebytestr, meta)
1585 1586 fm.write('metadata', '{%s}', fm.formatdict(smeta, fmt='%r: %r', sep=', '))
1586 1587 fm.plain('\n')
1587 1588
1588 1589 def finddate(ui, repo, date):
1589 1590 """Find the tipmost changeset that matches the given date spec"""
1590 1591
1591 1592 df = util.matchdate(date)
1592 1593 m = scmutil.matchall(repo)
1593 1594 results = {}
1594 1595
1595 1596 def prep(ctx, fns):
1596 1597 d = ctx.date()
1597 1598 if df(d[0]):
1598 1599 results[ctx.rev()] = d
1599 1600
1600 1601 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1601 1602 rev = ctx.rev()
1602 1603 if rev in results:
1603 1604 ui.status(_("found revision %s from %s\n") %
1604 1605 (rev, util.datestr(results[rev])))
1605 1606 return '%d' % rev
1606 1607
1607 1608 raise error.Abort(_("revision matching date not found"))
1608 1609
1609 1610 def increasingwindows(windowsize=8, sizelimit=512):
1610 1611 while True:
1611 1612 yield windowsize
1612 1613 if windowsize < sizelimit:
1613 1614 windowsize *= 2
1614 1615
1615 1616 def _walkrevs(repo, opts):
1616 1617 # Default --rev value depends on --follow but --follow behavior
1617 1618 # depends on revisions resolved from --rev...
1618 1619 follow = opts.get('follow') or opts.get('follow_first')
1619 1620 if opts.get('rev'):
1620 1621 revs = scmutil.revrange(repo, opts['rev'])
1621 1622 elif follow and repo.dirstate.p1() == nullid:
1622 1623 revs = smartset.baseset()
1623 1624 elif follow:
1624 1625 revs = repo.revs('reverse(:.)')
1625 1626 else:
1626 1627 revs = smartset.spanset(repo)
1627 1628 revs.reverse()
1628 1629 return revs
1629 1630
1630 1631 class FileWalkError(Exception):
1631 1632 pass
1632 1633
1633 1634 def walkfilerevs(repo, match, follow, revs, fncache):
1634 1635 '''Walks the file history for the matched files.
1635 1636
1636 1637 Returns the changeset revs that are involved in the file history.
1637 1638
1638 1639 Throws FileWalkError if the file history can't be walked using
1639 1640 filelogs alone.
1640 1641 '''
1641 1642 wanted = set()
1642 1643 copies = []
1643 1644 minrev, maxrev = min(revs), max(revs)
1644 1645 def filerevgen(filelog, last):
1645 1646 """
1646 1647 Only files, no patterns. Check the history of each file.
1647 1648
1648 1649 Examines filelog entries within minrev, maxrev linkrev range
1649 1650 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1650 1651 tuples in backwards order
1651 1652 """
1652 1653 cl_count = len(repo)
1653 1654 revs = []
1654 1655 for j in xrange(0, last + 1):
1655 1656 linkrev = filelog.linkrev(j)
1656 1657 if linkrev < minrev:
1657 1658 continue
1658 1659 # only yield rev for which we have the changelog, it can
1659 1660 # happen while doing "hg log" during a pull or commit
1660 1661 if linkrev >= cl_count:
1661 1662 break
1662 1663
1663 1664 parentlinkrevs = []
1664 1665 for p in filelog.parentrevs(j):
1665 1666 if p != nullrev:
1666 1667 parentlinkrevs.append(filelog.linkrev(p))
1667 1668 n = filelog.node(j)
1668 1669 revs.append((linkrev, parentlinkrevs,
1669 1670 follow and filelog.renamed(n)))
1670 1671
1671 1672 return reversed(revs)
1672 1673 def iterfiles():
1673 1674 pctx = repo['.']
1674 1675 for filename in match.files():
1675 1676 if follow:
1676 1677 if filename not in pctx:
1677 1678 raise error.Abort(_('cannot follow file not in parent '
1678 1679 'revision: "%s"') % filename)
1679 1680 yield filename, pctx[filename].filenode()
1680 1681 else:
1681 1682 yield filename, None
1682 1683 for filename_node in copies:
1683 1684 yield filename_node
1684 1685
1685 1686 for file_, node in iterfiles():
1686 1687 filelog = repo.file(file_)
1687 1688 if not len(filelog):
1688 1689 if node is None:
1689 1690 # A zero count may be a directory or deleted file, so
1690 1691 # try to find matching entries on the slow path.
1691 1692 if follow:
1692 1693 raise error.Abort(
1693 1694 _('cannot follow nonexistent file: "%s"') % file_)
1694 1695 raise FileWalkError("Cannot walk via filelog")
1695 1696 else:
1696 1697 continue
1697 1698
1698 1699 if node is None:
1699 1700 last = len(filelog) - 1
1700 1701 else:
1701 1702 last = filelog.rev(node)
1702 1703
1703 1704 # keep track of all ancestors of the file
1704 1705 ancestors = {filelog.linkrev(last)}
1705 1706
1706 1707 # iterate from latest to oldest revision
1707 1708 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1708 1709 if not follow:
1709 1710 if rev > maxrev:
1710 1711 continue
1711 1712 else:
1712 1713 # Note that last might not be the first interesting
1713 1714 # rev to us:
1714 1715 # if the file has been changed after maxrev, we'll
1715 1716 # have linkrev(last) > maxrev, and we still need
1716 1717 # to explore the file graph
1717 1718 if rev not in ancestors:
1718 1719 continue
1719 1720 # XXX insert 1327 fix here
1720 1721 if flparentlinkrevs:
1721 1722 ancestors.update(flparentlinkrevs)
1722 1723
1723 1724 fncache.setdefault(rev, []).append(file_)
1724 1725 wanted.add(rev)
1725 1726 if copied:
1726 1727 copies.append(copied)
1727 1728
1728 1729 return wanted
1729 1730
1730 1731 class _followfilter(object):
1731 1732 def __init__(self, repo, onlyfirst=False):
1732 1733 self.repo = repo
1733 1734 self.startrev = nullrev
1734 1735 self.roots = set()
1735 1736 self.onlyfirst = onlyfirst
1736 1737
1737 1738 def match(self, rev):
1738 1739 def realparents(rev):
1739 1740 if self.onlyfirst:
1740 1741 return self.repo.changelog.parentrevs(rev)[0:1]
1741 1742 else:
1742 1743 return filter(lambda x: x != nullrev,
1743 1744 self.repo.changelog.parentrevs(rev))
1744 1745
1745 1746 if self.startrev == nullrev:
1746 1747 self.startrev = rev
1747 1748 return True
1748 1749
1749 1750 if rev > self.startrev:
1750 1751 # forward: all descendants
1751 1752 if not self.roots:
1752 1753 self.roots.add(self.startrev)
1753 1754 for parent in realparents(rev):
1754 1755 if parent in self.roots:
1755 1756 self.roots.add(rev)
1756 1757 return True
1757 1758 else:
1758 1759 # backwards: all parents
1759 1760 if not self.roots:
1760 1761 self.roots.update(realparents(self.startrev))
1761 1762 if rev in self.roots:
1762 1763 self.roots.remove(rev)
1763 1764 self.roots.update(realparents(rev))
1764 1765 return True
1765 1766
1766 1767 return False
1767 1768
1768 1769 def walkchangerevs(repo, match, opts, prepare):
1769 1770 '''Iterate over files and the revs in which they changed.
1770 1771
1771 1772 Callers most commonly need to iterate backwards over the history
1772 1773 in which they are interested. Doing so has awful (quadratic-looking)
1773 1774 performance, so we use iterators in a "windowed" way.
1774 1775
1775 1776 We walk a window of revisions in the desired order. Within the
1776 1777 window, we first walk forwards to gather data, then in the desired
1777 1778 order (usually backwards) to display it.
1778 1779
1779 1780 This function returns an iterator yielding contexts. Before
1780 1781 yielding each context, the iterator will first call the prepare
1781 1782 function on each context in the window in forward order.'''
1782 1783
1783 1784 follow = opts.get('follow') or opts.get('follow_first')
1784 1785 revs = _walkrevs(repo, opts)
1785 1786 if not revs:
1786 1787 return []
1787 1788 wanted = set()
1788 1789 slowpath = match.anypats() or (not match.always() and opts.get('removed'))
1789 1790 fncache = {}
1790 1791 change = repo.changectx
1791 1792
1792 1793 # First step is to fill wanted, the set of revisions that we want to yield.
1793 1794 # When it does not induce extra cost, we also fill fncache for revisions in
1794 1795 # wanted: a cache of filenames that were changed (ctx.files()) and that
1795 1796 # match the file filtering conditions.
1796 1797
1797 1798 if match.always():
1798 1799 # No files, no patterns. Display all revs.
1799 1800 wanted = revs
1800 1801 elif not slowpath:
1801 1802 # We only have to read through the filelog to find wanted revisions
1802 1803
1803 1804 try:
1804 1805 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1805 1806 except FileWalkError:
1806 1807 slowpath = True
1807 1808
1808 1809 # We decided to fall back to the slowpath because at least one
1809 1810 # of the paths was not a file. Check to see if at least one of them
1810 1811 # existed in history, otherwise simply return
1811 1812 for path in match.files():
1812 1813 if path == '.' or path in repo.store:
1813 1814 break
1814 1815 else:
1815 1816 return []
1816 1817
1817 1818 if slowpath:
1818 1819 # We have to read the changelog to match filenames against
1819 1820 # changed files
1820 1821
1821 1822 if follow:
1822 1823 raise error.Abort(_('can only follow copies/renames for explicit '
1823 1824 'filenames'))
1824 1825
1825 1826 # The slow path checks files modified in every changeset.
1826 1827 # This is really slow on large repos, so compute the set lazily.
1827 1828 class lazywantedset(object):
1828 1829 def __init__(self):
1829 1830 self.set = set()
1830 1831 self.revs = set(revs)
1831 1832
1832 1833 # No need to worry about locality here because it will be accessed
1833 1834 # in the same order as the increasing window below.
1834 1835 def __contains__(self, value):
1835 1836 if value in self.set:
1836 1837 return True
1837 1838 elif not value in self.revs:
1838 1839 return False
1839 1840 else:
1840 1841 self.revs.discard(value)
1841 1842 ctx = change(value)
1842 1843 matches = filter(match, ctx.files())
1843 1844 if matches:
1844 1845 fncache[value] = matches
1845 1846 self.set.add(value)
1846 1847 return True
1847 1848 return False
1848 1849
1849 1850 def discard(self, value):
1850 1851 self.revs.discard(value)
1851 1852 self.set.discard(value)
1852 1853
1853 1854 wanted = lazywantedset()
1854 1855
1855 1856 # it might be worthwhile to do this in the iterator if the rev range
1856 1857 # is descending and the prune args are all within that range
1857 1858 for rev in opts.get('prune', ()):
1858 1859 rev = repo[rev].rev()
1859 1860 ff = _followfilter(repo)
1860 1861 stop = min(revs[0], revs[-1])
1861 1862 for x in xrange(rev, stop - 1, -1):
1862 1863 if ff.match(x):
1863 1864 wanted = wanted - [x]
1864 1865
1865 1866 # Now that wanted is correctly initialized, we can iterate over the
1866 1867 # revision range, yielding only revisions in wanted.
1867 1868 def iterate():
1868 1869 if follow and match.always():
1869 1870 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
1870 1871 def want(rev):
1871 1872 return ff.match(rev) and rev in wanted
1872 1873 else:
1873 1874 def want(rev):
1874 1875 return rev in wanted
1875 1876
1876 1877 it = iter(revs)
1877 1878 stopiteration = False
1878 1879 for windowsize in increasingwindows():
1879 1880 nrevs = []
1880 1881 for i in xrange(windowsize):
1881 1882 rev = next(it, None)
1882 1883 if rev is None:
1883 1884 stopiteration = True
1884 1885 break
1885 1886 elif want(rev):
1886 1887 nrevs.append(rev)
1887 1888 for rev in sorted(nrevs):
1888 1889 fns = fncache.get(rev)
1889 1890 ctx = change(rev)
1890 1891 if not fns:
1891 1892 def fns_generator():
1892 1893 for f in ctx.files():
1893 1894 if match(f):
1894 1895 yield f
1895 1896 fns = fns_generator()
1896 1897 prepare(ctx, fns)
1897 1898 for rev in nrevs:
1898 1899 yield change(rev)
1899 1900
1900 1901 if stopiteration:
1901 1902 break
1902 1903
1903 1904 return iterate()
1904 1905
1905 1906 def add(ui, repo, match, prefix, explicitonly, **opts):
1906 1907 join = lambda f: os.path.join(prefix, f)
1907 1908 bad = []
1908 1909
1909 1910 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
1910 1911 names = []
1911 1912 wctx = repo[None]
1912 1913 cca = None
1913 1914 abort, warn = scmutil.checkportabilityalert(ui)
1914 1915 if abort or warn:
1915 1916 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
1916 1917
1917 1918 badmatch = matchmod.badmatch(match, badfn)
1918 1919 dirstate = repo.dirstate
1919 1920 # We don't want to just call wctx.walk here, since it would return a lot of
1920 1921 # clean files, which we aren't interested in and takes time.
1921 1922 for f in sorted(dirstate.walk(badmatch, subrepos=sorted(wctx.substate),
1922 1923 unknown=True, ignored=False, full=False)):
1923 1924 exact = match.exact(f)
1924 1925 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
1925 1926 if cca:
1926 1927 cca(f)
1927 1928 names.append(f)
1928 1929 if ui.verbose or not exact:
1929 1930 ui.status(_('adding %s\n') % match.rel(f))
1930 1931
1931 1932 for subpath in sorted(wctx.substate):
1932 1933 sub = wctx.sub(subpath)
1933 1934 try:
1934 1935 submatch = matchmod.subdirmatcher(subpath, match)
1935 1936 if opts.get(r'subrepos'):
1936 1937 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
1937 1938 else:
1938 1939 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
1939 1940 except error.LookupError:
1940 1941 ui.status(_("skipping missing subrepository: %s\n")
1941 1942 % join(subpath))
1942 1943
1943 1944 if not opts.get(r'dry_run'):
1944 1945 rejected = wctx.add(names, prefix)
1945 1946 bad.extend(f for f in rejected if f in match.files())
1946 1947 return bad
1947 1948
1948 1949 def addwebdirpath(repo, serverpath, webconf):
1949 1950 webconf[serverpath] = repo.root
1950 1951 repo.ui.debug('adding %s = %s\n' % (serverpath, repo.root))
1951 1952
1952 1953 for r in repo.revs('filelog("path:.hgsub")'):
1953 1954 ctx = repo[r]
1954 1955 for subpath in ctx.substate:
1955 1956 ctx.sub(subpath).addwebdirpath(serverpath, webconf)
1956 1957
1957 1958 def forget(ui, repo, match, prefix, explicitonly):
1958 1959 join = lambda f: os.path.join(prefix, f)
1959 1960 bad = []
1960 1961 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
1961 1962 wctx = repo[None]
1962 1963 forgot = []
1963 1964
1964 1965 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
1965 1966 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1966 1967 if explicitonly:
1967 1968 forget = [f for f in forget if match.exact(f)]
1968 1969
1969 1970 for subpath in sorted(wctx.substate):
1970 1971 sub = wctx.sub(subpath)
1971 1972 try:
1972 1973 submatch = matchmod.subdirmatcher(subpath, match)
1973 1974 subbad, subforgot = sub.forget(submatch, prefix)
1974 1975 bad.extend([subpath + '/' + f for f in subbad])
1975 1976 forgot.extend([subpath + '/' + f for f in subforgot])
1976 1977 except error.LookupError:
1977 1978 ui.status(_("skipping missing subrepository: %s\n")
1978 1979 % join(subpath))
1979 1980
1980 1981 if not explicitonly:
1981 1982 for f in match.files():
1982 1983 if f not in repo.dirstate and not repo.wvfs.isdir(f):
1983 1984 if f not in forgot:
1984 1985 if repo.wvfs.exists(f):
1985 1986 # Don't complain if the exact case match wasn't given.
1986 1987 # But don't do this until after checking 'forgot', so
1987 1988 # that subrepo files aren't normalized, and this op is
1988 1989 # purely from data cached by the status walk above.
1989 1990 if repo.dirstate.normalize(f) in repo.dirstate:
1990 1991 continue
1991 1992 ui.warn(_('not removing %s: '
1992 1993 'file is already untracked\n')
1993 1994 % match.rel(f))
1994 1995 bad.append(f)
1995 1996
1996 1997 for f in forget:
1997 1998 if ui.verbose or not match.exact(f):
1998 1999 ui.status(_('removing %s\n') % match.rel(f))
1999 2000
2000 2001 rejected = wctx.forget(forget, prefix)
2001 2002 bad.extend(f for f in rejected if f in match.files())
2002 2003 forgot.extend(f for f in forget if f not in rejected)
2003 2004 return bad, forgot
2004 2005
2005 2006 def files(ui, ctx, m, fm, fmt, subrepos):
2006 2007 rev = ctx.rev()
2007 2008 ret = 1
2008 2009 ds = ctx.repo().dirstate
2009 2010
2010 2011 for f in ctx.matches(m):
2011 2012 if rev is None and ds[f] == 'r':
2012 2013 continue
2013 2014 fm.startitem()
2014 2015 if ui.verbose:
2015 2016 fc = ctx[f]
2016 2017 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2017 2018 fm.data(abspath=f)
2018 2019 fm.write('path', fmt, m.rel(f))
2019 2020 ret = 0
2020 2021
2021 2022 for subpath in sorted(ctx.substate):
2022 2023 submatch = matchmod.subdirmatcher(subpath, m)
2023 2024 if (subrepos or m.exact(subpath) or any(submatch.files())):
2024 2025 sub = ctx.sub(subpath)
2025 2026 try:
2026 2027 recurse = m.exact(subpath) or subrepos
2027 2028 if sub.printfiles(ui, submatch, fm, fmt, recurse) == 0:
2028 2029 ret = 0
2029 2030 except error.LookupError:
2030 2031 ui.status(_("skipping missing subrepository: %s\n")
2031 2032 % m.abs(subpath))
2032 2033
2033 2034 return ret
2034 2035
2035 2036 def remove(ui, repo, m, prefix, after, force, subrepos, warnings=None):
2036 2037 join = lambda f: os.path.join(prefix, f)
2037 2038 ret = 0
2038 2039 s = repo.status(match=m, clean=True)
2039 2040 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2040 2041
2041 2042 wctx = repo[None]
2042 2043
2043 2044 if warnings is None:
2044 2045 warnings = []
2045 2046 warn = True
2046 2047 else:
2047 2048 warn = False
2048 2049
2049 2050 subs = sorted(wctx.substate)
2050 2051 total = len(subs)
2051 2052 count = 0
2052 2053 for subpath in subs:
2053 2054 count += 1
2054 2055 submatch = matchmod.subdirmatcher(subpath, m)
2055 2056 if subrepos or m.exact(subpath) or any(submatch.files()):
2056 2057 ui.progress(_('searching'), count, total=total, unit=_('subrepos'))
2057 2058 sub = wctx.sub(subpath)
2058 2059 try:
2059 2060 if sub.removefiles(submatch, prefix, after, force, subrepos,
2060 2061 warnings):
2061 2062 ret = 1
2062 2063 except error.LookupError:
2063 2064 warnings.append(_("skipping missing subrepository: %s\n")
2064 2065 % join(subpath))
2065 2066 ui.progress(_('searching'), None)
2066 2067
2067 2068 # warn about failure to delete explicit files/dirs
2068 2069 deleteddirs = util.dirs(deleted)
2069 2070 files = m.files()
2070 2071 total = len(files)
2071 2072 count = 0
2072 2073 for f in files:
2073 2074 def insubrepo():
2074 2075 for subpath in wctx.substate:
2075 2076 if f.startswith(subpath + '/'):
2076 2077 return True
2077 2078 return False
2078 2079
2079 2080 count += 1
2080 2081 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2081 2082 isdir = f in deleteddirs or wctx.hasdir(f)
2082 2083 if (f in repo.dirstate or isdir or f == '.'
2083 2084 or insubrepo() or f in subs):
2084 2085 continue
2085 2086
2086 2087 if repo.wvfs.exists(f):
2087 2088 if repo.wvfs.isdir(f):
2088 2089 warnings.append(_('not removing %s: no tracked files\n')
2089 2090 % m.rel(f))
2090 2091 else:
2091 2092 warnings.append(_('not removing %s: file is untracked\n')
2092 2093 % m.rel(f))
2093 2094 # missing files will generate a warning elsewhere
2094 2095 ret = 1
2095 2096 ui.progress(_('deleting'), None)
2096 2097
2097 2098 if force:
2098 2099 list = modified + deleted + clean + added
2099 2100 elif after:
2100 2101 list = deleted
2101 2102 remaining = modified + added + clean
2102 2103 total = len(remaining)
2103 2104 count = 0
2104 2105 for f in remaining:
2105 2106 count += 1
2106 2107 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2107 2108 if ui.verbose or (f in files):
2108 2109 warnings.append(_('not removing %s: file still exists\n')
2109 2110 % m.rel(f))
2110 2111 ret = 1
2111 2112 ui.progress(_('skipping'), None)
2112 2113 else:
2113 2114 list = deleted + clean
2114 2115 total = len(modified) + len(added)
2115 2116 count = 0
2116 2117 for f in modified:
2117 2118 count += 1
2118 2119 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2119 2120 warnings.append(_('not removing %s: file is modified (use -f'
2120 2121 ' to force removal)\n') % m.rel(f))
2121 2122 ret = 1
2122 2123 for f in added:
2123 2124 count += 1
2124 2125 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2125 2126 warnings.append(_("not removing %s: file has been marked for add"
2126 2127 " (use 'hg forget' to undo add)\n") % m.rel(f))
2127 2128 ret = 1
2128 2129 ui.progress(_('skipping'), None)
2129 2130
2130 2131 list = sorted(list)
2131 2132 total = len(list)
2132 2133 count = 0
2133 2134 for f in list:
2134 2135 count += 1
2135 2136 if ui.verbose or not m.exact(f):
2136 2137 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2137 2138 ui.status(_('removing %s\n') % m.rel(f))
2138 2139 ui.progress(_('deleting'), None)
2139 2140
2140 2141 with repo.wlock():
2141 2142 if not after:
2142 2143 for f in list:
2143 2144 if f in added:
2144 2145 continue # we never unlink added files on remove
2145 2146 repo.wvfs.unlinkpath(f, ignoremissing=True)
2146 2147 repo[None].forget(list)
2147 2148
2148 2149 if warn:
2149 2150 for warning in warnings:
2150 2151 ui.warn(warning)
2151 2152
2152 2153 return ret
2153 2154
2154 2155 def _updatecatformatter(fm, ctx, matcher, path, decode):
2155 2156 """Hook for adding data to the formatter used by ``hg cat``.
2156 2157
2157 2158 Extensions (e.g., lfs) can wrap this to inject keywords/data, but must call
2158 2159 this method first."""
2159 2160 data = ctx[path].data()
2160 2161 if decode:
2161 2162 data = ctx.repo().wwritedata(path, data)
2162 2163 fm.startitem()
2163 2164 fm.write('data', '%s', data)
2164 2165 fm.data(abspath=path, path=matcher.rel(path))
2165 2166
2166 2167 def cat(ui, repo, ctx, matcher, basefm, fntemplate, prefix, **opts):
2167 2168 err = 1
2168 2169 opts = pycompat.byteskwargs(opts)
2169 2170
2170 2171 def write(path):
2171 2172 filename = None
2172 2173 if fntemplate:
2173 2174 filename = makefilename(repo, fntemplate, ctx.node(),
2174 2175 pathname=os.path.join(prefix, path))
2175 2176 # attempt to create the directory if it does not already exist
2176 2177 try:
2177 2178 os.makedirs(os.path.dirname(filename))
2178 2179 except OSError:
2179 2180 pass
2180 2181 with formatter.maybereopen(basefm, filename, opts) as fm:
2181 2182 _updatecatformatter(fm, ctx, matcher, path, opts.get('decode'))
2182 2183
2183 2184 # Automation often uses hg cat on single files, so special case it
2184 2185 # for performance to avoid the cost of parsing the manifest.
2185 2186 if len(matcher.files()) == 1 and not matcher.anypats():
2186 2187 file = matcher.files()[0]
2187 2188 mfl = repo.manifestlog
2188 2189 mfnode = ctx.manifestnode()
2189 2190 try:
2190 2191 if mfnode and mfl[mfnode].find(file)[0]:
2191 2192 _prefetchfiles(repo, ctx, [file])
2192 2193 write(file)
2193 2194 return 0
2194 2195 except KeyError:
2195 2196 pass
2196 2197
2197 2198 files = [f for f in ctx.walk(matcher)]
2198 2199 _prefetchfiles(repo, ctx, files)
2199 2200
2200 2201 for abs in files:
2201 2202 write(abs)
2202 2203 err = 0
2203 2204
2204 2205 for subpath in sorted(ctx.substate):
2205 2206 sub = ctx.sub(subpath)
2206 2207 try:
2207 2208 submatch = matchmod.subdirmatcher(subpath, matcher)
2208 2209
2209 2210 if not sub.cat(submatch, basefm, fntemplate,
2210 2211 os.path.join(prefix, sub._path),
2211 2212 **pycompat.strkwargs(opts)):
2212 2213 err = 0
2213 2214 except error.RepoLookupError:
2214 2215 ui.status(_("skipping missing subrepository: %s\n")
2215 2216 % os.path.join(prefix, subpath))
2216 2217
2217 2218 return err
2218 2219
2219 2220 def commit(ui, repo, commitfunc, pats, opts):
2220 2221 '''commit the specified files or all outstanding changes'''
2221 2222 date = opts.get('date')
2222 2223 if date:
2223 2224 opts['date'] = util.parsedate(date)
2224 2225 message = logmessage(ui, opts)
2225 2226 matcher = scmutil.match(repo[None], pats, opts)
2226 2227
2227 2228 dsguard = None
2228 2229 # extract addremove carefully -- this function can be called from a command
2229 2230 # that doesn't support addremove
2230 2231 if opts.get('addremove'):
2231 2232 dsguard = dirstateguard.dirstateguard(repo, 'commit')
2232 2233 with dsguard or util.nullcontextmanager():
2233 2234 if dsguard:
2234 2235 if scmutil.addremove(repo, matcher, "", opts) != 0:
2235 2236 raise error.Abort(
2236 2237 _("failed to mark all new/missing files as added/removed"))
2237 2238
2238 2239 return commitfunc(ui, repo, message, matcher, opts)
2239 2240
2240 2241 def samefile(f, ctx1, ctx2):
2241 2242 if f in ctx1.manifest():
2242 2243 a = ctx1.filectx(f)
2243 2244 if f in ctx2.manifest():
2244 2245 b = ctx2.filectx(f)
2245 2246 return (not a.cmp(b)
2246 2247 and a.flags() == b.flags())
2247 2248 else:
2248 2249 return False
2249 2250 else:
2250 2251 return f not in ctx2.manifest()
2251 2252
2252 2253 def amend(ui, repo, old, extra, pats, opts):
2253 2254 # avoid cycle context -> subrepo -> cmdutil
2254 2255 from . import context
2255 2256
2256 2257 # amend will reuse the existing user if not specified, but the obsolete
2257 2258 # marker creation requires that the current user's name is specified.
2258 2259 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2259 2260 ui.username() # raise exception if username not set
2260 2261
2261 2262 ui.note(_('amending changeset %s\n') % old)
2262 2263 base = old.p1()
2263 2264
2264 2265 with repo.wlock(), repo.lock(), repo.transaction('amend'):
2265 2266 # Participating changesets:
2266 2267 #
2267 2268 # wctx o - workingctx that contains changes from working copy
2268 2269 # | to go into amending commit
2269 2270 # |
2270 2271 # old o - changeset to amend
2271 2272 # |
2272 2273 # base o - first parent of the changeset to amend
2273 2274 wctx = repo[None]
2274 2275
2275 2276 # Copy to avoid mutating input
2276 2277 extra = extra.copy()
2277 2278 # Update extra dict from amended commit (e.g. to preserve graft
2278 2279 # source)
2279 2280 extra.update(old.extra())
2280 2281
2281 2282 # Also update it from the from the wctx
2282 2283 extra.update(wctx.extra())
2283 2284
2284 2285 user = opts.get('user') or old.user()
2285 2286 date = opts.get('date') or old.date()
2286 2287
2287 2288 # Parse the date to allow comparison between date and old.date()
2288 2289 date = util.parsedate(date)
2289 2290
2290 2291 if len(old.parents()) > 1:
2291 2292 # ctx.files() isn't reliable for merges, so fall back to the
2292 2293 # slower repo.status() method
2293 2294 files = set([fn for st in repo.status(base, old)[:3]
2294 2295 for fn in st])
2295 2296 else:
2296 2297 files = set(old.files())
2297 2298
2298 2299 # add/remove the files to the working copy if the "addremove" option
2299 2300 # was specified.
2300 2301 matcher = scmutil.match(wctx, pats, opts)
2301 2302 if (opts.get('addremove')
2302 2303 and scmutil.addremove(repo, matcher, "", opts)):
2303 2304 raise error.Abort(
2304 2305 _("failed to mark all new/missing files as added/removed"))
2305 2306
2306 2307 # Check subrepos. This depends on in-place wctx._status update in
2307 2308 # subrepo.precommit(). To minimize the risk of this hack, we do
2308 2309 # nothing if .hgsub does not exist.
2309 2310 if '.hgsub' in wctx or '.hgsub' in old:
2310 from . import subrepo # avoid cycle: cmdutil -> subrepo -> cmdutil
2311 subs, commitsubs, newsubstate = subrepo.precommit(
2311 subs, commitsubs, newsubstate = subrepoutil.precommit(
2312 2312 ui, wctx, wctx._status, matcher)
2313 2313 # amend should abort if commitsubrepos is enabled
2314 2314 assert not commitsubs
2315 2315 if subs:
2316 subrepo.writestate(repo, newsubstate)
2316 subrepoutil.writestate(repo, newsubstate)
2317 2317
2318 2318 filestoamend = set(f for f in wctx.files() if matcher(f))
2319 2319
2320 2320 changes = (len(filestoamend) > 0)
2321 2321 if changes:
2322 2322 # Recompute copies (avoid recording a -> b -> a)
2323 2323 copied = copies.pathcopies(base, wctx, matcher)
2324 2324 if old.p2:
2325 2325 copied.update(copies.pathcopies(old.p2(), wctx, matcher))
2326 2326
2327 2327 # Prune files which were reverted by the updates: if old
2328 2328 # introduced file X and the file was renamed in the working
2329 2329 # copy, then those two files are the same and
2330 2330 # we can discard X from our list of files. Likewise if X
2331 2331 # was removed, it's no longer relevant. If X is missing (aka
2332 2332 # deleted), old X must be preserved.
2333 2333 files.update(filestoamend)
2334 2334 files = [f for f in files if (not samefile(f, wctx, base)
2335 2335 or f in wctx.deleted())]
2336 2336
2337 2337 def filectxfn(repo, ctx_, path):
2338 2338 try:
2339 2339 # If the file being considered is not amongst the files
2340 2340 # to be amended, we should return the file context from the
2341 2341 # old changeset. This avoids issues when only some files in
2342 2342 # the working copy are being amended but there are also
2343 2343 # changes to other files from the old changeset.
2344 2344 if path not in filestoamend:
2345 2345 return old.filectx(path)
2346 2346
2347 2347 # Return None for removed files.
2348 2348 if path in wctx.removed():
2349 2349 return None
2350 2350
2351 2351 fctx = wctx[path]
2352 2352 flags = fctx.flags()
2353 2353 mctx = context.memfilectx(repo, ctx_,
2354 2354 fctx.path(), fctx.data(),
2355 2355 islink='l' in flags,
2356 2356 isexec='x' in flags,
2357 2357 copied=copied.get(path))
2358 2358 return mctx
2359 2359 except KeyError:
2360 2360 return None
2361 2361 else:
2362 2362 ui.note(_('copying changeset %s to %s\n') % (old, base))
2363 2363
2364 2364 # Use version of files as in the old cset
2365 2365 def filectxfn(repo, ctx_, path):
2366 2366 try:
2367 2367 return old.filectx(path)
2368 2368 except KeyError:
2369 2369 return None
2370 2370
2371 2371 # See if we got a message from -m or -l, if not, open the editor with
2372 2372 # the message of the changeset to amend.
2373 2373 message = logmessage(ui, opts)
2374 2374
2375 2375 editform = mergeeditform(old, 'commit.amend')
2376 2376 editor = getcommiteditor(editform=editform,
2377 2377 **pycompat.strkwargs(opts))
2378 2378
2379 2379 if not message:
2380 2380 editor = getcommiteditor(edit=True, editform=editform)
2381 2381 message = old.description()
2382 2382
2383 2383 pureextra = extra.copy()
2384 2384 extra['amend_source'] = old.hex()
2385 2385
2386 2386 new = context.memctx(repo,
2387 2387 parents=[base.node(), old.p2().node()],
2388 2388 text=message,
2389 2389 files=files,
2390 2390 filectxfn=filectxfn,
2391 2391 user=user,
2392 2392 date=date,
2393 2393 extra=extra,
2394 2394 editor=editor)
2395 2395
2396 2396 newdesc = changelog.stripdesc(new.description())
2397 2397 if ((not changes)
2398 2398 and newdesc == old.description()
2399 2399 and user == old.user()
2400 2400 and date == old.date()
2401 2401 and pureextra == old.extra()):
2402 2402 # nothing changed. continuing here would create a new node
2403 2403 # anyway because of the amend_source noise.
2404 2404 #
2405 2405 # This not what we expect from amend.
2406 2406 return old.node()
2407 2407
2408 2408 if opts.get('secret'):
2409 2409 commitphase = 'secret'
2410 2410 else:
2411 2411 commitphase = old.phase()
2412 2412 overrides = {('phases', 'new-commit'): commitphase}
2413 2413 with ui.configoverride(overrides, 'amend'):
2414 2414 newid = repo.commitctx(new)
2415 2415
2416 2416 # Reroute the working copy parent to the new changeset
2417 2417 repo.setparents(newid, nullid)
2418 2418 mapping = {old.node(): (newid,)}
2419 2419 obsmetadata = None
2420 2420 if opts.get('note'):
2421 2421 obsmetadata = {'note': opts['note']}
2422 2422 scmutil.cleanupnodes(repo, mapping, 'amend', metadata=obsmetadata)
2423 2423
2424 2424 # Fixing the dirstate because localrepo.commitctx does not update
2425 2425 # it. This is rather convenient because we did not need to update
2426 2426 # the dirstate for all the files in the new commit which commitctx
2427 2427 # could have done if it updated the dirstate. Now, we can
2428 2428 # selectively update the dirstate only for the amended files.
2429 2429 dirstate = repo.dirstate
2430 2430
2431 2431 # Update the state of the files which were added and
2432 2432 # and modified in the amend to "normal" in the dirstate.
2433 2433 normalfiles = set(wctx.modified() + wctx.added()) & filestoamend
2434 2434 for f in normalfiles:
2435 2435 dirstate.normal(f)
2436 2436
2437 2437 # Update the state of files which were removed in the amend
2438 2438 # to "removed" in the dirstate.
2439 2439 removedfiles = set(wctx.removed()) & filestoamend
2440 2440 for f in removedfiles:
2441 2441 dirstate.drop(f)
2442 2442
2443 2443 return newid
2444 2444
2445 2445 def commiteditor(repo, ctx, subs, editform=''):
2446 2446 if ctx.description():
2447 2447 return ctx.description()
2448 2448 return commitforceeditor(repo, ctx, subs, editform=editform,
2449 2449 unchangedmessagedetection=True)
2450 2450
2451 2451 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2452 2452 editform='', unchangedmessagedetection=False):
2453 2453 if not extramsg:
2454 2454 extramsg = _("Leave message empty to abort commit.")
2455 2455
2456 2456 forms = [e for e in editform.split('.') if e]
2457 2457 forms.insert(0, 'changeset')
2458 2458 templatetext = None
2459 2459 while forms:
2460 2460 ref = '.'.join(forms)
2461 2461 if repo.ui.config('committemplate', ref):
2462 2462 templatetext = committext = buildcommittemplate(
2463 2463 repo, ctx, subs, extramsg, ref)
2464 2464 break
2465 2465 forms.pop()
2466 2466 else:
2467 2467 committext = buildcommittext(repo, ctx, subs, extramsg)
2468 2468
2469 2469 # run editor in the repository root
2470 2470 olddir = pycompat.getcwd()
2471 2471 os.chdir(repo.root)
2472 2472
2473 2473 # make in-memory changes visible to external process
2474 2474 tr = repo.currenttransaction()
2475 2475 repo.dirstate.write(tr)
2476 2476 pending = tr and tr.writepending() and repo.root
2477 2477
2478 2478 editortext = repo.ui.edit(committext, ctx.user(), ctx.extra(),
2479 2479 editform=editform, pending=pending,
2480 2480 repopath=repo.path, action='commit')
2481 2481 text = editortext
2482 2482
2483 2483 # strip away anything below this special string (used for editors that want
2484 2484 # to display the diff)
2485 2485 stripbelow = re.search(_linebelow, text, flags=re.MULTILINE)
2486 2486 if stripbelow:
2487 2487 text = text[:stripbelow.start()]
2488 2488
2489 2489 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
2490 2490 os.chdir(olddir)
2491 2491
2492 2492 if finishdesc:
2493 2493 text = finishdesc(text)
2494 2494 if not text.strip():
2495 2495 raise error.Abort(_("empty commit message"))
2496 2496 if unchangedmessagedetection and editortext == templatetext:
2497 2497 raise error.Abort(_("commit message unchanged"))
2498 2498
2499 2499 return text
2500 2500
2501 2501 def buildcommittemplate(repo, ctx, subs, extramsg, ref):
2502 2502 ui = repo.ui
2503 2503 spec = formatter.templatespec(ref, None, None)
2504 2504 t = logcmdutil.changesettemplater(ui, repo, spec)
2505 2505 t.t.cache.update((k, templater.unquotestring(v))
2506 2506 for k, v in repo.ui.configitems('committemplate'))
2507 2507
2508 2508 if not extramsg:
2509 2509 extramsg = '' # ensure that extramsg is string
2510 2510
2511 2511 ui.pushbuffer()
2512 2512 t.show(ctx, extramsg=extramsg)
2513 2513 return ui.popbuffer()
2514 2514
2515 2515 def hgprefix(msg):
2516 2516 return "\n".join(["HG: %s" % a for a in msg.split("\n") if a])
2517 2517
2518 2518 def buildcommittext(repo, ctx, subs, extramsg):
2519 2519 edittext = []
2520 2520 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2521 2521 if ctx.description():
2522 2522 edittext.append(ctx.description())
2523 2523 edittext.append("")
2524 2524 edittext.append("") # Empty line between message and comments.
2525 2525 edittext.append(hgprefix(_("Enter commit message."
2526 2526 " Lines beginning with 'HG:' are removed.")))
2527 2527 edittext.append(hgprefix(extramsg))
2528 2528 edittext.append("HG: --")
2529 2529 edittext.append(hgprefix(_("user: %s") % ctx.user()))
2530 2530 if ctx.p2():
2531 2531 edittext.append(hgprefix(_("branch merge")))
2532 2532 if ctx.branch():
2533 2533 edittext.append(hgprefix(_("branch '%s'") % ctx.branch()))
2534 2534 if bookmarks.isactivewdirparent(repo):
2535 2535 edittext.append(hgprefix(_("bookmark '%s'") % repo._activebookmark))
2536 2536 edittext.extend([hgprefix(_("subrepo %s") % s) for s in subs])
2537 2537 edittext.extend([hgprefix(_("added %s") % f) for f in added])
2538 2538 edittext.extend([hgprefix(_("changed %s") % f) for f in modified])
2539 2539 edittext.extend([hgprefix(_("removed %s") % f) for f in removed])
2540 2540 if not added and not modified and not removed:
2541 2541 edittext.append(hgprefix(_("no files changed")))
2542 2542 edittext.append("")
2543 2543
2544 2544 return "\n".join(edittext)
2545 2545
2546 2546 def commitstatus(repo, node, branch, bheads=None, opts=None):
2547 2547 if opts is None:
2548 2548 opts = {}
2549 2549 ctx = repo[node]
2550 2550 parents = ctx.parents()
2551 2551
2552 2552 if (not opts.get('amend') and bheads and node not in bheads and not
2553 2553 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2554 2554 repo.ui.status(_('created new head\n'))
2555 2555 # The message is not printed for initial roots. For the other
2556 2556 # changesets, it is printed in the following situations:
2557 2557 #
2558 2558 # Par column: for the 2 parents with ...
2559 2559 # N: null or no parent
2560 2560 # B: parent is on another named branch
2561 2561 # C: parent is a regular non head changeset
2562 2562 # H: parent was a branch head of the current branch
2563 2563 # Msg column: whether we print "created new head" message
2564 2564 # In the following, it is assumed that there already exists some
2565 2565 # initial branch heads of the current branch, otherwise nothing is
2566 2566 # printed anyway.
2567 2567 #
2568 2568 # Par Msg Comment
2569 2569 # N N y additional topo root
2570 2570 #
2571 2571 # B N y additional branch root
2572 2572 # C N y additional topo head
2573 2573 # H N n usual case
2574 2574 #
2575 2575 # B B y weird additional branch root
2576 2576 # C B y branch merge
2577 2577 # H B n merge with named branch
2578 2578 #
2579 2579 # C C y additional head from merge
2580 2580 # C H n merge with a head
2581 2581 #
2582 2582 # H H n head merge: head count decreases
2583 2583
2584 2584 if not opts.get('close_branch'):
2585 2585 for r in parents:
2586 2586 if r.closesbranch() and r.branch() == branch:
2587 2587 repo.ui.status(_('reopening closed branch head %d\n') % r)
2588 2588
2589 2589 if repo.ui.debugflag:
2590 2590 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
2591 2591 elif repo.ui.verbose:
2592 2592 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
2593 2593
2594 2594 def postcommitstatus(repo, pats, opts):
2595 2595 return repo.status(match=scmutil.match(repo[None], pats, opts))
2596 2596
2597 2597 def revert(ui, repo, ctx, parents, *pats, **opts):
2598 2598 opts = pycompat.byteskwargs(opts)
2599 2599 parent, p2 = parents
2600 2600 node = ctx.node()
2601 2601
2602 2602 mf = ctx.manifest()
2603 2603 if node == p2:
2604 2604 parent = p2
2605 2605
2606 2606 # need all matching names in dirstate and manifest of target rev,
2607 2607 # so have to walk both. do not print errors if files exist in one
2608 2608 # but not other. in both cases, filesets should be evaluated against
2609 2609 # workingctx to get consistent result (issue4497). this means 'set:**'
2610 2610 # cannot be used to select missing files from target rev.
2611 2611
2612 2612 # `names` is a mapping for all elements in working copy and target revision
2613 2613 # The mapping is in the form:
2614 2614 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2615 2615 names = {}
2616 2616
2617 2617 with repo.wlock():
2618 2618 ## filling of the `names` mapping
2619 2619 # walk dirstate to fill `names`
2620 2620
2621 2621 interactive = opts.get('interactive', False)
2622 2622 wctx = repo[None]
2623 2623 m = scmutil.match(wctx, pats, opts)
2624 2624
2625 2625 # we'll need this later
2626 2626 targetsubs = sorted(s for s in wctx.substate if m(s))
2627 2627
2628 2628 if not m.always():
2629 2629 matcher = matchmod.badmatch(m, lambda x, y: False)
2630 2630 for abs in wctx.walk(matcher):
2631 2631 names[abs] = m.rel(abs), m.exact(abs)
2632 2632
2633 2633 # walk target manifest to fill `names`
2634 2634
2635 2635 def badfn(path, msg):
2636 2636 if path in names:
2637 2637 return
2638 2638 if path in ctx.substate:
2639 2639 return
2640 2640 path_ = path + '/'
2641 2641 for f in names:
2642 2642 if f.startswith(path_):
2643 2643 return
2644 2644 ui.warn("%s: %s\n" % (m.rel(path), msg))
2645 2645
2646 2646 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
2647 2647 if abs not in names:
2648 2648 names[abs] = m.rel(abs), m.exact(abs)
2649 2649
2650 2650 # Find status of all file in `names`.
2651 2651 m = scmutil.matchfiles(repo, names)
2652 2652
2653 2653 changes = repo.status(node1=node, match=m,
2654 2654 unknown=True, ignored=True, clean=True)
2655 2655 else:
2656 2656 changes = repo.status(node1=node, match=m)
2657 2657 for kind in changes:
2658 2658 for abs in kind:
2659 2659 names[abs] = m.rel(abs), m.exact(abs)
2660 2660
2661 2661 m = scmutil.matchfiles(repo, names)
2662 2662
2663 2663 modified = set(changes.modified)
2664 2664 added = set(changes.added)
2665 2665 removed = set(changes.removed)
2666 2666 _deleted = set(changes.deleted)
2667 2667 unknown = set(changes.unknown)
2668 2668 unknown.update(changes.ignored)
2669 2669 clean = set(changes.clean)
2670 2670 modadded = set()
2671 2671
2672 2672 # We need to account for the state of the file in the dirstate,
2673 2673 # even when we revert against something else than parent. This will
2674 2674 # slightly alter the behavior of revert (doing back up or not, delete
2675 2675 # or just forget etc).
2676 2676 if parent == node:
2677 2677 dsmodified = modified
2678 2678 dsadded = added
2679 2679 dsremoved = removed
2680 2680 # store all local modifications, useful later for rename detection
2681 2681 localchanges = dsmodified | dsadded
2682 2682 modified, added, removed = set(), set(), set()
2683 2683 else:
2684 2684 changes = repo.status(node1=parent, match=m)
2685 2685 dsmodified = set(changes.modified)
2686 2686 dsadded = set(changes.added)
2687 2687 dsremoved = set(changes.removed)
2688 2688 # store all local modifications, useful later for rename detection
2689 2689 localchanges = dsmodified | dsadded
2690 2690
2691 2691 # only take into account for removes between wc and target
2692 2692 clean |= dsremoved - removed
2693 2693 dsremoved &= removed
2694 2694 # distinct between dirstate remove and other
2695 2695 removed -= dsremoved
2696 2696
2697 2697 modadded = added & dsmodified
2698 2698 added -= modadded
2699 2699
2700 2700 # tell newly modified apart.
2701 2701 dsmodified &= modified
2702 2702 dsmodified |= modified & dsadded # dirstate added may need backup
2703 2703 modified -= dsmodified
2704 2704
2705 2705 # We need to wait for some post-processing to update this set
2706 2706 # before making the distinction. The dirstate will be used for
2707 2707 # that purpose.
2708 2708 dsadded = added
2709 2709
2710 2710 # in case of merge, files that are actually added can be reported as
2711 2711 # modified, we need to post process the result
2712 2712 if p2 != nullid:
2713 2713 mergeadd = set(dsmodified)
2714 2714 for path in dsmodified:
2715 2715 if path in mf:
2716 2716 mergeadd.remove(path)
2717 2717 dsadded |= mergeadd
2718 2718 dsmodified -= mergeadd
2719 2719
2720 2720 # if f is a rename, update `names` to also revert the source
2721 2721 cwd = repo.getcwd()
2722 2722 for f in localchanges:
2723 2723 src = repo.dirstate.copied(f)
2724 2724 # XXX should we check for rename down to target node?
2725 2725 if src and src not in names and repo.dirstate[src] == 'r':
2726 2726 dsremoved.add(src)
2727 2727 names[src] = (repo.pathto(src, cwd), True)
2728 2728
2729 2729 # determine the exact nature of the deleted changesets
2730 2730 deladded = set(_deleted)
2731 2731 for path in _deleted:
2732 2732 if path in mf:
2733 2733 deladded.remove(path)
2734 2734 deleted = _deleted - deladded
2735 2735
2736 2736 # distinguish between file to forget and the other
2737 2737 added = set()
2738 2738 for abs in dsadded:
2739 2739 if repo.dirstate[abs] != 'a':
2740 2740 added.add(abs)
2741 2741 dsadded -= added
2742 2742
2743 2743 for abs in deladded:
2744 2744 if repo.dirstate[abs] == 'a':
2745 2745 dsadded.add(abs)
2746 2746 deladded -= dsadded
2747 2747
2748 2748 # For files marked as removed, we check if an unknown file is present at
2749 2749 # the same path. If a such file exists it may need to be backed up.
2750 2750 # Making the distinction at this stage helps have simpler backup
2751 2751 # logic.
2752 2752 removunk = set()
2753 2753 for abs in removed:
2754 2754 target = repo.wjoin(abs)
2755 2755 if os.path.lexists(target):
2756 2756 removunk.add(abs)
2757 2757 removed -= removunk
2758 2758
2759 2759 dsremovunk = set()
2760 2760 for abs in dsremoved:
2761 2761 target = repo.wjoin(abs)
2762 2762 if os.path.lexists(target):
2763 2763 dsremovunk.add(abs)
2764 2764 dsremoved -= dsremovunk
2765 2765
2766 2766 # action to be actually performed by revert
2767 2767 # (<list of file>, message>) tuple
2768 2768 actions = {'revert': ([], _('reverting %s\n')),
2769 2769 'add': ([], _('adding %s\n')),
2770 2770 'remove': ([], _('removing %s\n')),
2771 2771 'drop': ([], _('removing %s\n')),
2772 2772 'forget': ([], _('forgetting %s\n')),
2773 2773 'undelete': ([], _('undeleting %s\n')),
2774 2774 'noop': (None, _('no changes needed to %s\n')),
2775 2775 'unknown': (None, _('file not managed: %s\n')),
2776 2776 }
2777 2777
2778 2778 # "constant" that convey the backup strategy.
2779 2779 # All set to `discard` if `no-backup` is set do avoid checking
2780 2780 # no_backup lower in the code.
2781 2781 # These values are ordered for comparison purposes
2782 2782 backupinteractive = 3 # do backup if interactively modified
2783 2783 backup = 2 # unconditionally do backup
2784 2784 check = 1 # check if the existing file differs from target
2785 2785 discard = 0 # never do backup
2786 2786 if opts.get('no_backup'):
2787 2787 backupinteractive = backup = check = discard
2788 2788 if interactive:
2789 2789 dsmodifiedbackup = backupinteractive
2790 2790 else:
2791 2791 dsmodifiedbackup = backup
2792 2792 tobackup = set()
2793 2793
2794 2794 backupanddel = actions['remove']
2795 2795 if not opts.get('no_backup'):
2796 2796 backupanddel = actions['drop']
2797 2797
2798 2798 disptable = (
2799 2799 # dispatch table:
2800 2800 # file state
2801 2801 # action
2802 2802 # make backup
2803 2803
2804 2804 ## Sets that results that will change file on disk
2805 2805 # Modified compared to target, no local change
2806 2806 (modified, actions['revert'], discard),
2807 2807 # Modified compared to target, but local file is deleted
2808 2808 (deleted, actions['revert'], discard),
2809 2809 # Modified compared to target, local change
2810 2810 (dsmodified, actions['revert'], dsmodifiedbackup),
2811 2811 # Added since target
2812 2812 (added, actions['remove'], discard),
2813 2813 # Added in working directory
2814 2814 (dsadded, actions['forget'], discard),
2815 2815 # Added since target, have local modification
2816 2816 (modadded, backupanddel, backup),
2817 2817 # Added since target but file is missing in working directory
2818 2818 (deladded, actions['drop'], discard),
2819 2819 # Removed since target, before working copy parent
2820 2820 (removed, actions['add'], discard),
2821 2821 # Same as `removed` but an unknown file exists at the same path
2822 2822 (removunk, actions['add'], check),
2823 2823 # Removed since targe, marked as such in working copy parent
2824 2824 (dsremoved, actions['undelete'], discard),
2825 2825 # Same as `dsremoved` but an unknown file exists at the same path
2826 2826 (dsremovunk, actions['undelete'], check),
2827 2827 ## the following sets does not result in any file changes
2828 2828 # File with no modification
2829 2829 (clean, actions['noop'], discard),
2830 2830 # Existing file, not tracked anywhere
2831 2831 (unknown, actions['unknown'], discard),
2832 2832 )
2833 2833
2834 2834 for abs, (rel, exact) in sorted(names.items()):
2835 2835 # target file to be touch on disk (relative to cwd)
2836 2836 target = repo.wjoin(abs)
2837 2837 # search the entry in the dispatch table.
2838 2838 # if the file is in any of these sets, it was touched in the working
2839 2839 # directory parent and we are sure it needs to be reverted.
2840 2840 for table, (xlist, msg), dobackup in disptable:
2841 2841 if abs not in table:
2842 2842 continue
2843 2843 if xlist is not None:
2844 2844 xlist.append(abs)
2845 2845 if dobackup:
2846 2846 # If in interactive mode, don't automatically create
2847 2847 # .orig files (issue4793)
2848 2848 if dobackup == backupinteractive:
2849 2849 tobackup.add(abs)
2850 2850 elif (backup <= dobackup or wctx[abs].cmp(ctx[abs])):
2851 2851 bakname = scmutil.origpath(ui, repo, rel)
2852 2852 ui.note(_('saving current version of %s as %s\n') %
2853 2853 (rel, bakname))
2854 2854 if not opts.get('dry_run'):
2855 2855 if interactive:
2856 2856 util.copyfile(target, bakname)
2857 2857 else:
2858 2858 util.rename(target, bakname)
2859 2859 if ui.verbose or not exact:
2860 2860 if not isinstance(msg, bytes):
2861 2861 msg = msg(abs)
2862 2862 ui.status(msg % rel)
2863 2863 elif exact:
2864 2864 ui.warn(msg % rel)
2865 2865 break
2866 2866
2867 2867 if not opts.get('dry_run'):
2868 2868 needdata = ('revert', 'add', 'undelete')
2869 2869 if _revertprefetch is not _revertprefetchstub:
2870 2870 ui.deprecwarn("'cmdutil._revertprefetch' is deprecated, use "
2871 2871 "'cmdutil._prefetchfiles'", '4.6', stacklevel=1)
2872 2872 _revertprefetch(repo, ctx,
2873 2873 *[actions[name][0] for name in needdata])
2874 2874 oplist = [actions[name][0] for name in needdata]
2875 2875 _prefetchfiles(repo, ctx,
2876 2876 [f for sublist in oplist for f in sublist])
2877 2877 _performrevert(repo, parents, ctx, actions, interactive, tobackup)
2878 2878
2879 2879 if targetsubs:
2880 2880 # Revert the subrepos on the revert list
2881 2881 for sub in targetsubs:
2882 2882 try:
2883 2883 wctx.sub(sub).revert(ctx.substate[sub], *pats,
2884 2884 **pycompat.strkwargs(opts))
2885 2885 except KeyError:
2886 2886 raise error.Abort("subrepository '%s' does not exist in %s!"
2887 2887 % (sub, short(ctx.node())))
2888 2888
2889 2889 def _revertprefetchstub(repo, ctx, *files):
2890 2890 """Stub method for detecting extension wrapping of _revertprefetch(), to
2891 2891 issue a deprecation warning."""
2892 2892
2893 2893 _revertprefetch = _revertprefetchstub
2894 2894
2895 2895 def _prefetchfiles(repo, ctx, files):
2896 2896 """Let extensions changing the storage layer prefetch content for any non
2897 2897 merge based command."""
2898 2898
2899 2899 def _performrevert(repo, parents, ctx, actions, interactive=False,
2900 2900 tobackup=None):
2901 2901 """function that actually perform all the actions computed for revert
2902 2902
2903 2903 This is an independent function to let extension to plug in and react to
2904 2904 the imminent revert.
2905 2905
2906 2906 Make sure you have the working directory locked when calling this function.
2907 2907 """
2908 2908 parent, p2 = parents
2909 2909 node = ctx.node()
2910 2910 excluded_files = []
2911 2911 matcher_opts = {"exclude": excluded_files}
2912 2912
2913 2913 def checkout(f):
2914 2914 fc = ctx[f]
2915 2915 repo.wwrite(f, fc.data(), fc.flags())
2916 2916
2917 2917 def doremove(f):
2918 2918 try:
2919 2919 repo.wvfs.unlinkpath(f)
2920 2920 except OSError:
2921 2921 pass
2922 2922 repo.dirstate.remove(f)
2923 2923
2924 2924 audit_path = pathutil.pathauditor(repo.root, cached=True)
2925 2925 for f in actions['forget'][0]:
2926 2926 if interactive:
2927 2927 choice = repo.ui.promptchoice(
2928 2928 _("forget added file %s (Yn)?$$ &Yes $$ &No") % f)
2929 2929 if choice == 0:
2930 2930 repo.dirstate.drop(f)
2931 2931 else:
2932 2932 excluded_files.append(repo.wjoin(f))
2933 2933 else:
2934 2934 repo.dirstate.drop(f)
2935 2935 for f in actions['remove'][0]:
2936 2936 audit_path(f)
2937 2937 if interactive:
2938 2938 choice = repo.ui.promptchoice(
2939 2939 _("remove added file %s (Yn)?$$ &Yes $$ &No") % f)
2940 2940 if choice == 0:
2941 2941 doremove(f)
2942 2942 else:
2943 2943 excluded_files.append(repo.wjoin(f))
2944 2944 else:
2945 2945 doremove(f)
2946 2946 for f in actions['drop'][0]:
2947 2947 audit_path(f)
2948 2948 repo.dirstate.remove(f)
2949 2949
2950 2950 normal = None
2951 2951 if node == parent:
2952 2952 # We're reverting to our parent. If possible, we'd like status
2953 2953 # to report the file as clean. We have to use normallookup for
2954 2954 # merges to avoid losing information about merged/dirty files.
2955 2955 if p2 != nullid:
2956 2956 normal = repo.dirstate.normallookup
2957 2957 else:
2958 2958 normal = repo.dirstate.normal
2959 2959
2960 2960 newlyaddedandmodifiedfiles = set()
2961 2961 if interactive:
2962 2962 # Prompt the user for changes to revert
2963 2963 torevert = [repo.wjoin(f) for f in actions['revert'][0]]
2964 2964 m = scmutil.match(ctx, torevert, matcher_opts)
2965 2965 diffopts = patch.difffeatureopts(repo.ui, whitespace=True)
2966 2966 diffopts.nodates = True
2967 2967 diffopts.git = True
2968 2968 operation = 'discard'
2969 2969 reversehunks = True
2970 2970 if node != parent:
2971 2971 operation = 'apply'
2972 2972 reversehunks = False
2973 2973 if reversehunks:
2974 2974 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
2975 2975 else:
2976 2976 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
2977 2977 originalchunks = patch.parsepatch(diff)
2978 2978
2979 2979 try:
2980 2980
2981 2981 chunks, opts = recordfilter(repo.ui, originalchunks,
2982 2982 operation=operation)
2983 2983 if reversehunks:
2984 2984 chunks = patch.reversehunks(chunks)
2985 2985
2986 2986 except error.PatchError as err:
2987 2987 raise error.Abort(_('error parsing patch: %s') % err)
2988 2988
2989 2989 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
2990 2990 if tobackup is None:
2991 2991 tobackup = set()
2992 2992 # Apply changes
2993 2993 fp = stringio()
2994 2994 for c in chunks:
2995 2995 # Create a backup file only if this hunk should be backed up
2996 2996 if ishunk(c) and c.header.filename() in tobackup:
2997 2997 abs = c.header.filename()
2998 2998 target = repo.wjoin(abs)
2999 2999 bakname = scmutil.origpath(repo.ui, repo, m.rel(abs))
3000 3000 util.copyfile(target, bakname)
3001 3001 tobackup.remove(abs)
3002 3002 c.write(fp)
3003 3003 dopatch = fp.tell()
3004 3004 fp.seek(0)
3005 3005 if dopatch:
3006 3006 try:
3007 3007 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3008 3008 except error.PatchError as err:
3009 3009 raise error.Abort(str(err))
3010 3010 del fp
3011 3011 else:
3012 3012 for f in actions['revert'][0]:
3013 3013 checkout(f)
3014 3014 if normal:
3015 3015 normal(f)
3016 3016
3017 3017 for f in actions['add'][0]:
3018 3018 # Don't checkout modified files, they are already created by the diff
3019 3019 if f not in newlyaddedandmodifiedfiles:
3020 3020 checkout(f)
3021 3021 repo.dirstate.add(f)
3022 3022
3023 3023 normal = repo.dirstate.normallookup
3024 3024 if node == parent and p2 == nullid:
3025 3025 normal = repo.dirstate.normal
3026 3026 for f in actions['undelete'][0]:
3027 3027 checkout(f)
3028 3028 normal(f)
3029 3029
3030 3030 copied = copies.pathcopies(repo[parent], ctx)
3031 3031
3032 3032 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3033 3033 if f in copied:
3034 3034 repo.dirstate.copy(copied[f], f)
3035 3035
3036 3036 class command(registrar.command):
3037 3037 """deprecated: used registrar.command instead"""
3038 3038 def _doregister(self, func, name, *args, **kwargs):
3039 3039 func._deprecatedregistrar = True # flag for deprecwarn in extensions.py
3040 3040 return super(command, self)._doregister(func, name, *args, **kwargs)
3041 3041
3042 3042 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3043 3043 # commands.outgoing. "missing" is "missing" of the result of
3044 3044 # "findcommonoutgoing()"
3045 3045 outgoinghooks = util.hooks()
3046 3046
3047 3047 # a list of (ui, repo) functions called by commands.summary
3048 3048 summaryhooks = util.hooks()
3049 3049
3050 3050 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3051 3051 #
3052 3052 # functions should return tuple of booleans below, if 'changes' is None:
3053 3053 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3054 3054 #
3055 3055 # otherwise, 'changes' is a tuple of tuples below:
3056 3056 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3057 3057 # - (desturl, destbranch, destpeer, outgoing)
3058 3058 summaryremotehooks = util.hooks()
3059 3059
3060 3060 # A list of state files kept by multistep operations like graft.
3061 3061 # Since graft cannot be aborted, it is considered 'clearable' by update.
3062 3062 # note: bisect is intentionally excluded
3063 3063 # (state file, clearable, allowcommit, error, hint)
3064 3064 unfinishedstates = [
3065 3065 ('graftstate', True, False, _('graft in progress'),
3066 3066 _("use 'hg graft --continue' or 'hg update' to abort")),
3067 3067 ('updatestate', True, False, _('last update was interrupted'),
3068 3068 _("use 'hg update' to get a consistent checkout"))
3069 3069 ]
3070 3070
3071 3071 def checkunfinished(repo, commit=False):
3072 3072 '''Look for an unfinished multistep operation, like graft, and abort
3073 3073 if found. It's probably good to check this right before
3074 3074 bailifchanged().
3075 3075 '''
3076 3076 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3077 3077 if commit and allowcommit:
3078 3078 continue
3079 3079 if repo.vfs.exists(f):
3080 3080 raise error.Abort(msg, hint=hint)
3081 3081
3082 3082 def clearunfinished(repo):
3083 3083 '''Check for unfinished operations (as above), and clear the ones
3084 3084 that are clearable.
3085 3085 '''
3086 3086 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3087 3087 if not clearable and repo.vfs.exists(f):
3088 3088 raise error.Abort(msg, hint=hint)
3089 3089 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3090 3090 if clearable and repo.vfs.exists(f):
3091 3091 util.unlink(repo.vfs.join(f))
3092 3092
3093 3093 afterresolvedstates = [
3094 3094 ('graftstate',
3095 3095 _('hg graft --continue')),
3096 3096 ]
3097 3097
3098 3098 def howtocontinue(repo):
3099 3099 '''Check for an unfinished operation and return the command to finish
3100 3100 it.
3101 3101
3102 3102 afterresolvedstates tuples define a .hg/{file} and the corresponding
3103 3103 command needed to finish it.
3104 3104
3105 3105 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3106 3106 a boolean.
3107 3107 '''
3108 3108 contmsg = _("continue: %s")
3109 3109 for f, msg in afterresolvedstates:
3110 3110 if repo.vfs.exists(f):
3111 3111 return contmsg % msg, True
3112 3112 if repo[None].dirty(missing=True, merge=False, branch=False):
3113 3113 return contmsg % _("hg commit"), False
3114 3114 return None, None
3115 3115
3116 3116 def checkafterresolved(repo):
3117 3117 '''Inform the user about the next action after completing hg resolve
3118 3118
3119 3119 If there's a matching afterresolvedstates, howtocontinue will yield
3120 3120 repo.ui.warn as the reporter.
3121 3121
3122 3122 Otherwise, it will yield repo.ui.note.
3123 3123 '''
3124 3124 msg, warning = howtocontinue(repo)
3125 3125 if msg is not None:
3126 3126 if warning:
3127 3127 repo.ui.warn("%s\n" % msg)
3128 3128 else:
3129 3129 repo.ui.note("%s\n" % msg)
3130 3130
3131 3131 def wrongtooltocontinue(repo, task):
3132 3132 '''Raise an abort suggesting how to properly continue if there is an
3133 3133 active task.
3134 3134
3135 3135 Uses howtocontinue() to find the active task.
3136 3136
3137 3137 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3138 3138 a hint.
3139 3139 '''
3140 3140 after = howtocontinue(repo)
3141 3141 hint = None
3142 3142 if after[1]:
3143 3143 hint = after[0]
3144 3144 raise error.Abort(_('no %s in progress') % task, hint=hint)
3145 3145
3146 3146 class changeset_printer(logcmdutil.changesetprinter):
3147 3147
3148 3148 def __init__(self, ui, *args, **kwargs):
3149 3149 msg = ("'cmdutil.changeset_printer' is deprecated, "
3150 3150 "use 'logcmdutil.logcmdutil'")
3151 3151 ui.deprecwarn(msg, "4.6")
3152 3152 super(changeset_printer, self).__init__(ui, *args, **kwargs)
3153 3153
3154 3154 def displaygraph(ui, *args, **kwargs):
3155 3155 msg = ("'cmdutil.displaygraph' is deprecated, "
3156 3156 "use 'logcmdutil.displaygraph'")
3157 3157 ui.deprecwarn(msg, "4.6")
3158 3158 return logcmdutil.displaygraph(ui, *args, **kwargs)
3159 3159
3160 3160 def show_changeset(ui, *args, **kwargs):
3161 3161 msg = ("'cmdutil.show_changeset' is deprecated, "
3162 3162 "use 'logcmdutil.changesetdisplayer'")
3163 3163 ui.deprecwarn(msg, "4.6")
3164 3164 return logcmdutil.changesetdisplayer(ui, *args, **kwargs)
@@ -1,2749 +1,2750
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import re
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 addednodeid,
19 19 bin,
20 20 hex,
21 21 modifiednodeid,
22 22 nullid,
23 23 nullrev,
24 24 short,
25 25 wdirid,
26 26 wdirnodes,
27 27 wdirrev,
28 28 )
29 29 from .thirdparty import (
30 30 attr,
31 31 )
32 32 from . import (
33 33 encoding,
34 34 error,
35 35 fileset,
36 36 match as matchmod,
37 37 mdiff,
38 38 obsolete as obsmod,
39 39 obsutil,
40 40 patch,
41 41 pathutil,
42 42 phases,
43 43 pycompat,
44 44 repoview,
45 45 revlog,
46 46 scmutil,
47 47 sparse,
48 48 subrepo,
49 subrepoutil,
49 50 util,
50 51 )
51 52
52 53 propertycache = util.propertycache
53 54
54 55 nonascii = re.compile(r'[^\x21-\x7f]').search
55 56
56 57 class basectx(object):
57 58 """A basectx object represents the common logic for its children:
58 59 changectx: read-only context that is already present in the repo,
59 60 workingctx: a context that represents the working directory and can
60 61 be committed,
61 62 memctx: a context that represents changes in-memory and can also
62 63 be committed."""
63 64 def __new__(cls, repo, changeid='', *args, **kwargs):
64 65 if isinstance(changeid, basectx):
65 66 return changeid
66 67
67 68 o = super(basectx, cls).__new__(cls)
68 69
69 70 o._repo = repo
70 71 o._rev = nullrev
71 72 o._node = nullid
72 73
73 74 return o
74 75
75 76 def __bytes__(self):
76 77 return short(self.node())
77 78
78 79 __str__ = encoding.strmethod(__bytes__)
79 80
80 81 def __int__(self):
81 82 return self.rev()
82 83
83 84 def __repr__(self):
84 85 return r"<%s %s>" % (type(self).__name__, str(self))
85 86
86 87 def __eq__(self, other):
87 88 try:
88 89 return type(self) == type(other) and self._rev == other._rev
89 90 except AttributeError:
90 91 return False
91 92
92 93 def __ne__(self, other):
93 94 return not (self == other)
94 95
95 96 def __contains__(self, key):
96 97 return key in self._manifest
97 98
98 99 def __getitem__(self, key):
99 100 return self.filectx(key)
100 101
101 102 def __iter__(self):
102 103 return iter(self._manifest)
103 104
104 105 def _buildstatusmanifest(self, status):
105 106 """Builds a manifest that includes the given status results, if this is
106 107 a working copy context. For non-working copy contexts, it just returns
107 108 the normal manifest."""
108 109 return self.manifest()
109 110
110 111 def _matchstatus(self, other, match):
111 112 """This internal method provides a way for child objects to override the
112 113 match operator.
113 114 """
114 115 return match
115 116
116 117 def _buildstatus(self, other, s, match, listignored, listclean,
117 118 listunknown):
118 119 """build a status with respect to another context"""
119 120 # Load earliest manifest first for caching reasons. More specifically,
120 121 # if you have revisions 1000 and 1001, 1001 is probably stored as a
121 122 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
122 123 # 1000 and cache it so that when you read 1001, we just need to apply a
123 124 # delta to what's in the cache. So that's one full reconstruction + one
124 125 # delta application.
125 126 mf2 = None
126 127 if self.rev() is not None and self.rev() < other.rev():
127 128 mf2 = self._buildstatusmanifest(s)
128 129 mf1 = other._buildstatusmanifest(s)
129 130 if mf2 is None:
130 131 mf2 = self._buildstatusmanifest(s)
131 132
132 133 modified, added = [], []
133 134 removed = []
134 135 clean = []
135 136 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
136 137 deletedset = set(deleted)
137 138 d = mf1.diff(mf2, match=match, clean=listclean)
138 139 for fn, value in d.iteritems():
139 140 if fn in deletedset:
140 141 continue
141 142 if value is None:
142 143 clean.append(fn)
143 144 continue
144 145 (node1, flag1), (node2, flag2) = value
145 146 if node1 is None:
146 147 added.append(fn)
147 148 elif node2 is None:
148 149 removed.append(fn)
149 150 elif flag1 != flag2:
150 151 modified.append(fn)
151 152 elif node2 not in wdirnodes:
152 153 # When comparing files between two commits, we save time by
153 154 # not comparing the file contents when the nodeids differ.
154 155 # Note that this means we incorrectly report a reverted change
155 156 # to a file as a modification.
156 157 modified.append(fn)
157 158 elif self[fn].cmp(other[fn]):
158 159 modified.append(fn)
159 160 else:
160 161 clean.append(fn)
161 162
162 163 if removed:
163 164 # need to filter files if they are already reported as removed
164 165 unknown = [fn for fn in unknown if fn not in mf1 and
165 166 (not match or match(fn))]
166 167 ignored = [fn for fn in ignored if fn not in mf1 and
167 168 (not match or match(fn))]
168 169 # if they're deleted, don't report them as removed
169 170 removed = [fn for fn in removed if fn not in deletedset]
170 171
171 172 return scmutil.status(modified, added, removed, deleted, unknown,
172 173 ignored, clean)
173 174
174 175 @propertycache
175 176 def substate(self):
176 return subrepo.state(self, self._repo.ui)
177 return subrepoutil.state(self, self._repo.ui)
177 178
178 179 def subrev(self, subpath):
179 180 return self.substate[subpath][1]
180 181
181 182 def rev(self):
182 183 return self._rev
183 184 def node(self):
184 185 return self._node
185 186 def hex(self):
186 187 return hex(self.node())
187 188 def manifest(self):
188 189 return self._manifest
189 190 def manifestctx(self):
190 191 return self._manifestctx
191 192 def repo(self):
192 193 return self._repo
193 194 def phasestr(self):
194 195 return phases.phasenames[self.phase()]
195 196 def mutable(self):
196 197 return self.phase() > phases.public
197 198
198 199 def getfileset(self, expr):
199 200 return fileset.getfileset(self, expr)
200 201
201 202 def obsolete(self):
202 203 """True if the changeset is obsolete"""
203 204 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
204 205
205 206 def extinct(self):
206 207 """True if the changeset is extinct"""
207 208 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
208 209
209 210 def orphan(self):
210 211 """True if the changeset is not obsolete but it's ancestor are"""
211 212 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
212 213
213 214 def phasedivergent(self):
214 215 """True if the changeset try to be a successor of a public changeset
215 216
216 217 Only non-public and non-obsolete changesets may be bumped.
217 218 """
218 219 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
219 220
220 221 def contentdivergent(self):
221 222 """Is a successors of a changeset with multiple possible successors set
222 223
223 224 Only non-public and non-obsolete changesets may be divergent.
224 225 """
225 226 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
226 227
227 228 def isunstable(self):
228 229 """True if the changeset is either unstable, bumped or divergent"""
229 230 return self.orphan() or self.phasedivergent() or self.contentdivergent()
230 231
231 232 def instabilities(self):
232 233 """return the list of instabilities affecting this changeset.
233 234
234 235 Instabilities are returned as strings. possible values are:
235 236 - orphan,
236 237 - phase-divergent,
237 238 - content-divergent.
238 239 """
239 240 instabilities = []
240 241 if self.orphan():
241 242 instabilities.append('orphan')
242 243 if self.phasedivergent():
243 244 instabilities.append('phase-divergent')
244 245 if self.contentdivergent():
245 246 instabilities.append('content-divergent')
246 247 return instabilities
247 248
248 249 def parents(self):
249 250 """return contexts for each parent changeset"""
250 251 return self._parents
251 252
252 253 def p1(self):
253 254 return self._parents[0]
254 255
255 256 def p2(self):
256 257 parents = self._parents
257 258 if len(parents) == 2:
258 259 return parents[1]
259 260 return changectx(self._repo, nullrev)
260 261
261 262 def _fileinfo(self, path):
262 263 if r'_manifest' in self.__dict__:
263 264 try:
264 265 return self._manifest[path], self._manifest.flags(path)
265 266 except KeyError:
266 267 raise error.ManifestLookupError(self._node, path,
267 268 _('not found in manifest'))
268 269 if r'_manifestdelta' in self.__dict__ or path in self.files():
269 270 if path in self._manifestdelta:
270 271 return (self._manifestdelta[path],
271 272 self._manifestdelta.flags(path))
272 273 mfl = self._repo.manifestlog
273 274 try:
274 275 node, flag = mfl[self._changeset.manifest].find(path)
275 276 except KeyError:
276 277 raise error.ManifestLookupError(self._node, path,
277 278 _('not found in manifest'))
278 279
279 280 return node, flag
280 281
281 282 def filenode(self, path):
282 283 return self._fileinfo(path)[0]
283 284
284 285 def flags(self, path):
285 286 try:
286 287 return self._fileinfo(path)[1]
287 288 except error.LookupError:
288 289 return ''
289 290
290 291 def sub(self, path, allowcreate=True):
291 292 '''return a subrepo for the stored revision of path, never wdir()'''
292 293 return subrepo.subrepo(self, path, allowcreate=allowcreate)
293 294
294 295 def nullsub(self, path, pctx):
295 296 return subrepo.nullsubrepo(self, path, pctx)
296 297
297 298 def workingsub(self, path):
298 299 '''return a subrepo for the stored revision, or wdir if this is a wdir
299 300 context.
300 301 '''
301 302 return subrepo.subrepo(self, path, allowwdir=True)
302 303
303 304 def match(self, pats=None, include=None, exclude=None, default='glob',
304 305 listsubrepos=False, badfn=None):
305 306 r = self._repo
306 307 return matchmod.match(r.root, r.getcwd(), pats,
307 308 include, exclude, default,
308 309 auditor=r.nofsauditor, ctx=self,
309 310 listsubrepos=listsubrepos, badfn=badfn)
310 311
311 312 def diff(self, ctx2=None, match=None, **opts):
312 313 """Returns a diff generator for the given contexts and matcher"""
313 314 if ctx2 is None:
314 315 ctx2 = self.p1()
315 316 if ctx2 is not None:
316 317 ctx2 = self._repo[ctx2]
317 318 diffopts = patch.diffopts(self._repo.ui, pycompat.byteskwargs(opts))
318 319 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
319 320
320 321 def dirs(self):
321 322 return self._manifest.dirs()
322 323
323 324 def hasdir(self, dir):
324 325 return self._manifest.hasdir(dir)
325 326
326 327 def status(self, other=None, match=None, listignored=False,
327 328 listclean=False, listunknown=False, listsubrepos=False):
328 329 """return status of files between two nodes or node and working
329 330 directory.
330 331
331 332 If other is None, compare this node with working directory.
332 333
333 334 returns (modified, added, removed, deleted, unknown, ignored, clean)
334 335 """
335 336
336 337 ctx1 = self
337 338 ctx2 = self._repo[other]
338 339
339 340 # This next code block is, admittedly, fragile logic that tests for
340 341 # reversing the contexts and wouldn't need to exist if it weren't for
341 342 # the fast (and common) code path of comparing the working directory
342 343 # with its first parent.
343 344 #
344 345 # What we're aiming for here is the ability to call:
345 346 #
346 347 # workingctx.status(parentctx)
347 348 #
348 349 # If we always built the manifest for each context and compared those,
349 350 # then we'd be done. But the special case of the above call means we
350 351 # just copy the manifest of the parent.
351 352 reversed = False
352 353 if (not isinstance(ctx1, changectx)
353 354 and isinstance(ctx2, changectx)):
354 355 reversed = True
355 356 ctx1, ctx2 = ctx2, ctx1
356 357
357 358 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
358 359 match = ctx2._matchstatus(ctx1, match)
359 360 r = scmutil.status([], [], [], [], [], [], [])
360 361 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
361 362 listunknown)
362 363
363 364 if reversed:
364 365 # Reverse added and removed. Clear deleted, unknown and ignored as
365 366 # these make no sense to reverse.
366 367 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
367 368 r.clean)
368 369
369 370 if listsubrepos:
370 371 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
371 372 try:
372 373 rev2 = ctx2.subrev(subpath)
373 374 except KeyError:
374 375 # A subrepo that existed in node1 was deleted between
375 376 # node1 and node2 (inclusive). Thus, ctx2's substate
376 377 # won't contain that subpath. The best we can do ignore it.
377 378 rev2 = None
378 379 submatch = matchmod.subdirmatcher(subpath, match)
379 380 s = sub.status(rev2, match=submatch, ignored=listignored,
380 381 clean=listclean, unknown=listunknown,
381 382 listsubrepos=True)
382 383 for rfiles, sfiles in zip(r, s):
383 384 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
384 385
385 386 for l in r:
386 387 l.sort()
387 388
388 389 return r
389 390
390 391 def _filterederror(repo, changeid):
391 392 """build an exception to be raised about a filtered changeid
392 393
393 394 This is extracted in a function to help extensions (eg: evolve) to
394 395 experiment with various message variants."""
395 396 if repo.filtername.startswith('visible'):
396 397
397 398 # Check if the changeset is obsolete
398 399 unfilteredrepo = repo.unfiltered()
399 400 ctx = unfilteredrepo[changeid]
400 401
401 402 # If the changeset is obsolete, enrich the message with the reason
402 403 # that made this changeset not visible
403 404 if ctx.obsolete():
404 405 msg = obsutil._getfilteredreason(repo, changeid, ctx)
405 406 else:
406 407 msg = _("hidden revision '%s'") % changeid
407 408
408 409 hint = _('use --hidden to access hidden revisions')
409 410
410 411 return error.FilteredRepoLookupError(msg, hint=hint)
411 412 msg = _("filtered revision '%s' (not in '%s' subset)")
412 413 msg %= (changeid, repo.filtername)
413 414 return error.FilteredRepoLookupError(msg)
414 415
415 416 class changectx(basectx):
416 417 """A changecontext object makes access to data related to a particular
417 418 changeset convenient. It represents a read-only context already present in
418 419 the repo."""
419 420 def __init__(self, repo, changeid=''):
420 421 """changeid is a revision number, node, or tag"""
421 422
422 423 # since basectx.__new__ already took care of copying the object, we
423 424 # don't need to do anything in __init__, so we just exit here
424 425 if isinstance(changeid, basectx):
425 426 return
426 427
427 428 if changeid == '':
428 429 changeid = '.'
429 430 self._repo = repo
430 431
431 432 try:
432 433 if isinstance(changeid, int):
433 434 self._node = repo.changelog.node(changeid)
434 435 self._rev = changeid
435 436 return
436 437 if not pycompat.ispy3 and isinstance(changeid, long):
437 438 changeid = str(changeid)
438 439 if changeid == 'null':
439 440 self._node = nullid
440 441 self._rev = nullrev
441 442 return
442 443 if changeid == 'tip':
443 444 self._node = repo.changelog.tip()
444 445 self._rev = repo.changelog.rev(self._node)
445 446 return
446 447 if (changeid == '.'
447 448 or repo.local() and changeid == repo.dirstate.p1()):
448 449 # this is a hack to delay/avoid loading obsmarkers
449 450 # when we know that '.' won't be hidden
450 451 self._node = repo.dirstate.p1()
451 452 self._rev = repo.unfiltered().changelog.rev(self._node)
452 453 return
453 454 if len(changeid) == 20:
454 455 try:
455 456 self._node = changeid
456 457 self._rev = repo.changelog.rev(changeid)
457 458 return
458 459 except error.FilteredRepoLookupError:
459 460 raise
460 461 except LookupError:
461 462 pass
462 463
463 464 try:
464 465 r = int(changeid)
465 466 if '%d' % r != changeid:
466 467 raise ValueError
467 468 l = len(repo.changelog)
468 469 if r < 0:
469 470 r += l
470 471 if r < 0 or r >= l and r != wdirrev:
471 472 raise ValueError
472 473 self._rev = r
473 474 self._node = repo.changelog.node(r)
474 475 return
475 476 except error.FilteredIndexError:
476 477 raise
477 478 except (ValueError, OverflowError, IndexError):
478 479 pass
479 480
480 481 if len(changeid) == 40:
481 482 try:
482 483 self._node = bin(changeid)
483 484 self._rev = repo.changelog.rev(self._node)
484 485 return
485 486 except error.FilteredLookupError:
486 487 raise
487 488 except (TypeError, LookupError):
488 489 pass
489 490
490 491 # lookup bookmarks through the name interface
491 492 try:
492 493 self._node = repo.names.singlenode(repo, changeid)
493 494 self._rev = repo.changelog.rev(self._node)
494 495 return
495 496 except KeyError:
496 497 pass
497 498 except error.FilteredRepoLookupError:
498 499 raise
499 500 except error.RepoLookupError:
500 501 pass
501 502
502 503 self._node = repo.unfiltered().changelog._partialmatch(changeid)
503 504 if self._node is not None:
504 505 self._rev = repo.changelog.rev(self._node)
505 506 return
506 507
507 508 # lookup failed
508 509 # check if it might have come from damaged dirstate
509 510 #
510 511 # XXX we could avoid the unfiltered if we had a recognizable
511 512 # exception for filtered changeset access
512 513 if (repo.local()
513 514 and changeid in repo.unfiltered().dirstate.parents()):
514 515 msg = _("working directory has unknown parent '%s'!")
515 516 raise error.Abort(msg % short(changeid))
516 517 try:
517 518 if len(changeid) == 20 and nonascii(changeid):
518 519 changeid = hex(changeid)
519 520 except TypeError:
520 521 pass
521 522 except (error.FilteredIndexError, error.FilteredLookupError,
522 523 error.FilteredRepoLookupError):
523 524 raise _filterederror(repo, changeid)
524 525 except IndexError:
525 526 pass
526 527 raise error.RepoLookupError(
527 528 _("unknown revision '%s'") % changeid)
528 529
529 530 def __hash__(self):
530 531 try:
531 532 return hash(self._rev)
532 533 except AttributeError:
533 534 return id(self)
534 535
535 536 def __nonzero__(self):
536 537 return self._rev != nullrev
537 538
538 539 __bool__ = __nonzero__
539 540
540 541 @propertycache
541 542 def _changeset(self):
542 543 return self._repo.changelog.changelogrevision(self.rev())
543 544
544 545 @propertycache
545 546 def _manifest(self):
546 547 return self._manifestctx.read()
547 548
548 549 @property
549 550 def _manifestctx(self):
550 551 return self._repo.manifestlog[self._changeset.manifest]
551 552
552 553 @propertycache
553 554 def _manifestdelta(self):
554 555 return self._manifestctx.readdelta()
555 556
556 557 @propertycache
557 558 def _parents(self):
558 559 repo = self._repo
559 560 p1, p2 = repo.changelog.parentrevs(self._rev)
560 561 if p2 == nullrev:
561 562 return [changectx(repo, p1)]
562 563 return [changectx(repo, p1), changectx(repo, p2)]
563 564
564 565 def changeset(self):
565 566 c = self._changeset
566 567 return (
567 568 c.manifest,
568 569 c.user,
569 570 c.date,
570 571 c.files,
571 572 c.description,
572 573 c.extra,
573 574 )
574 575 def manifestnode(self):
575 576 return self._changeset.manifest
576 577
577 578 def user(self):
578 579 return self._changeset.user
579 580 def date(self):
580 581 return self._changeset.date
581 582 def files(self):
582 583 return self._changeset.files
583 584 def description(self):
584 585 return self._changeset.description
585 586 def branch(self):
586 587 return encoding.tolocal(self._changeset.extra.get("branch"))
587 588 def closesbranch(self):
588 589 return 'close' in self._changeset.extra
589 590 def extra(self):
590 591 """Return a dict of extra information."""
591 592 return self._changeset.extra
592 593 def tags(self):
593 594 """Return a list of byte tag names"""
594 595 return self._repo.nodetags(self._node)
595 596 def bookmarks(self):
596 597 """Return a list of byte bookmark names."""
597 598 return self._repo.nodebookmarks(self._node)
598 599 def phase(self):
599 600 return self._repo._phasecache.phase(self._repo, self._rev)
600 601 def hidden(self):
601 602 return self._rev in repoview.filterrevs(self._repo, 'visible')
602 603
603 604 def isinmemory(self):
604 605 return False
605 606
606 607 def children(self):
607 608 """return list of changectx contexts for each child changeset.
608 609
609 610 This returns only the immediate child changesets. Use descendants() to
610 611 recursively walk children.
611 612 """
612 613 c = self._repo.changelog.children(self._node)
613 614 return [changectx(self._repo, x) for x in c]
614 615
615 616 def ancestors(self):
616 617 for a in self._repo.changelog.ancestors([self._rev]):
617 618 yield changectx(self._repo, a)
618 619
619 620 def descendants(self):
620 621 """Recursively yield all children of the changeset.
621 622
622 623 For just the immediate children, use children()
623 624 """
624 625 for d in self._repo.changelog.descendants([self._rev]):
625 626 yield changectx(self._repo, d)
626 627
627 628 def filectx(self, path, fileid=None, filelog=None):
628 629 """get a file context from this changeset"""
629 630 if fileid is None:
630 631 fileid = self.filenode(path)
631 632 return filectx(self._repo, path, fileid=fileid,
632 633 changectx=self, filelog=filelog)
633 634
634 635 def ancestor(self, c2, warn=False):
635 636 """return the "best" ancestor context of self and c2
636 637
637 638 If there are multiple candidates, it will show a message and check
638 639 merge.preferancestor configuration before falling back to the
639 640 revlog ancestor."""
640 641 # deal with workingctxs
641 642 n2 = c2._node
642 643 if n2 is None:
643 644 n2 = c2._parents[0]._node
644 645 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
645 646 if not cahs:
646 647 anc = nullid
647 648 elif len(cahs) == 1:
648 649 anc = cahs[0]
649 650 else:
650 651 # experimental config: merge.preferancestor
651 652 for r in self._repo.ui.configlist('merge', 'preferancestor'):
652 653 try:
653 654 ctx = changectx(self._repo, r)
654 655 except error.RepoLookupError:
655 656 continue
656 657 anc = ctx.node()
657 658 if anc in cahs:
658 659 break
659 660 else:
660 661 anc = self._repo.changelog.ancestor(self._node, n2)
661 662 if warn:
662 663 self._repo.ui.status(
663 664 (_("note: using %s as ancestor of %s and %s\n") %
664 665 (short(anc), short(self._node), short(n2))) +
665 666 ''.join(_(" alternatively, use --config "
666 667 "merge.preferancestor=%s\n") %
667 668 short(n) for n in sorted(cahs) if n != anc))
668 669 return changectx(self._repo, anc)
669 670
670 671 def descendant(self, other):
671 672 """True if other is descendant of this changeset"""
672 673 return self._repo.changelog.descendant(self._rev, other._rev)
673 674
674 675 def walk(self, match):
675 676 '''Generates matching file names.'''
676 677
677 678 # Wrap match.bad method to have message with nodeid
678 679 def bad(fn, msg):
679 680 # The manifest doesn't know about subrepos, so don't complain about
680 681 # paths into valid subrepos.
681 682 if any(fn == s or fn.startswith(s + '/')
682 683 for s in self.substate):
683 684 return
684 685 match.bad(fn, _('no such file in rev %s') % self)
685 686
686 687 m = matchmod.badmatch(match, bad)
687 688 return self._manifest.walk(m)
688 689
689 690 def matches(self, match):
690 691 return self.walk(match)
691 692
692 693 class basefilectx(object):
693 694 """A filecontext object represents the common logic for its children:
694 695 filectx: read-only access to a filerevision that is already present
695 696 in the repo,
696 697 workingfilectx: a filecontext that represents files from the working
697 698 directory,
698 699 memfilectx: a filecontext that represents files in-memory,
699 700 overlayfilectx: duplicate another filecontext with some fields overridden.
700 701 """
701 702 @propertycache
702 703 def _filelog(self):
703 704 return self._repo.file(self._path)
704 705
705 706 @propertycache
706 707 def _changeid(self):
707 708 if r'_changeid' in self.__dict__:
708 709 return self._changeid
709 710 elif r'_changectx' in self.__dict__:
710 711 return self._changectx.rev()
711 712 elif r'_descendantrev' in self.__dict__:
712 713 # this file context was created from a revision with a known
713 714 # descendant, we can (lazily) correct for linkrev aliases
714 715 return self._adjustlinkrev(self._descendantrev)
715 716 else:
716 717 return self._filelog.linkrev(self._filerev)
717 718
718 719 @propertycache
719 720 def _filenode(self):
720 721 if r'_fileid' in self.__dict__:
721 722 return self._filelog.lookup(self._fileid)
722 723 else:
723 724 return self._changectx.filenode(self._path)
724 725
725 726 @propertycache
726 727 def _filerev(self):
727 728 return self._filelog.rev(self._filenode)
728 729
729 730 @propertycache
730 731 def _repopath(self):
731 732 return self._path
732 733
733 734 def __nonzero__(self):
734 735 try:
735 736 self._filenode
736 737 return True
737 738 except error.LookupError:
738 739 # file is missing
739 740 return False
740 741
741 742 __bool__ = __nonzero__
742 743
743 744 def __bytes__(self):
744 745 try:
745 746 return "%s@%s" % (self.path(), self._changectx)
746 747 except error.LookupError:
747 748 return "%s@???" % self.path()
748 749
749 750 __str__ = encoding.strmethod(__bytes__)
750 751
751 752 def __repr__(self):
752 753 return "<%s %s>" % (type(self).__name__, str(self))
753 754
754 755 def __hash__(self):
755 756 try:
756 757 return hash((self._path, self._filenode))
757 758 except AttributeError:
758 759 return id(self)
759 760
760 761 def __eq__(self, other):
761 762 try:
762 763 return (type(self) == type(other) and self._path == other._path
763 764 and self._filenode == other._filenode)
764 765 except AttributeError:
765 766 return False
766 767
767 768 def __ne__(self, other):
768 769 return not (self == other)
769 770
770 771 def filerev(self):
771 772 return self._filerev
772 773 def filenode(self):
773 774 return self._filenode
774 775 @propertycache
775 776 def _flags(self):
776 777 return self._changectx.flags(self._path)
777 778 def flags(self):
778 779 return self._flags
779 780 def filelog(self):
780 781 return self._filelog
781 782 def rev(self):
782 783 return self._changeid
783 784 def linkrev(self):
784 785 return self._filelog.linkrev(self._filerev)
785 786 def node(self):
786 787 return self._changectx.node()
787 788 def hex(self):
788 789 return self._changectx.hex()
789 790 def user(self):
790 791 return self._changectx.user()
791 792 def date(self):
792 793 return self._changectx.date()
793 794 def files(self):
794 795 return self._changectx.files()
795 796 def description(self):
796 797 return self._changectx.description()
797 798 def branch(self):
798 799 return self._changectx.branch()
799 800 def extra(self):
800 801 return self._changectx.extra()
801 802 def phase(self):
802 803 return self._changectx.phase()
803 804 def phasestr(self):
804 805 return self._changectx.phasestr()
805 806 def obsolete(self):
806 807 return self._changectx.obsolete()
807 808 def instabilities(self):
808 809 return self._changectx.instabilities()
809 810 def manifest(self):
810 811 return self._changectx.manifest()
811 812 def changectx(self):
812 813 return self._changectx
813 814 def renamed(self):
814 815 return self._copied
815 816 def repo(self):
816 817 return self._repo
817 818 def size(self):
818 819 return len(self.data())
819 820
820 821 def path(self):
821 822 return self._path
822 823
823 824 def isbinary(self):
824 825 try:
825 826 return util.binary(self.data())
826 827 except IOError:
827 828 return False
828 829 def isexec(self):
829 830 return 'x' in self.flags()
830 831 def islink(self):
831 832 return 'l' in self.flags()
832 833
833 834 def isabsent(self):
834 835 """whether this filectx represents a file not in self._changectx
835 836
836 837 This is mainly for merge code to detect change/delete conflicts. This is
837 838 expected to be True for all subclasses of basectx."""
838 839 return False
839 840
840 841 _customcmp = False
841 842 def cmp(self, fctx):
842 843 """compare with other file context
843 844
844 845 returns True if different than fctx.
845 846 """
846 847 if fctx._customcmp:
847 848 return fctx.cmp(self)
848 849
849 850 if (fctx._filenode is None
850 851 and (self._repo._encodefilterpats
851 852 # if file data starts with '\1\n', empty metadata block is
852 853 # prepended, which adds 4 bytes to filelog.size().
853 854 or self.size() - 4 == fctx.size())
854 855 or self.size() == fctx.size()):
855 856 return self._filelog.cmp(self._filenode, fctx.data())
856 857
857 858 return True
858 859
859 860 def _adjustlinkrev(self, srcrev, inclusive=False):
860 861 """return the first ancestor of <srcrev> introducing <fnode>
861 862
862 863 If the linkrev of the file revision does not point to an ancestor of
863 864 srcrev, we'll walk down the ancestors until we find one introducing
864 865 this file revision.
865 866
866 867 :srcrev: the changeset revision we search ancestors from
867 868 :inclusive: if true, the src revision will also be checked
868 869 """
869 870 repo = self._repo
870 871 cl = repo.unfiltered().changelog
871 872 mfl = repo.manifestlog
872 873 # fetch the linkrev
873 874 lkr = self.linkrev()
874 875 # hack to reuse ancestor computation when searching for renames
875 876 memberanc = getattr(self, '_ancestrycontext', None)
876 877 iteranc = None
877 878 if srcrev is None:
878 879 # wctx case, used by workingfilectx during mergecopy
879 880 revs = [p.rev() for p in self._repo[None].parents()]
880 881 inclusive = True # we skipped the real (revless) source
881 882 else:
882 883 revs = [srcrev]
883 884 if memberanc is None:
884 885 memberanc = iteranc = cl.ancestors(revs, lkr,
885 886 inclusive=inclusive)
886 887 # check if this linkrev is an ancestor of srcrev
887 888 if lkr not in memberanc:
888 889 if iteranc is None:
889 890 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
890 891 fnode = self._filenode
891 892 path = self._path
892 893 for a in iteranc:
893 894 ac = cl.read(a) # get changeset data (we avoid object creation)
894 895 if path in ac[3]: # checking the 'files' field.
895 896 # The file has been touched, check if the content is
896 897 # similar to the one we search for.
897 898 if fnode == mfl[ac[0]].readfast().get(path):
898 899 return a
899 900 # In theory, we should never get out of that loop without a result.
900 901 # But if manifest uses a buggy file revision (not children of the
901 902 # one it replaces) we could. Such a buggy situation will likely
902 903 # result is crash somewhere else at to some point.
903 904 return lkr
904 905
905 906 def introrev(self):
906 907 """return the rev of the changeset which introduced this file revision
907 908
908 909 This method is different from linkrev because it take into account the
909 910 changeset the filectx was created from. It ensures the returned
910 911 revision is one of its ancestors. This prevents bugs from
911 912 'linkrev-shadowing' when a file revision is used by multiple
912 913 changesets.
913 914 """
914 915 lkr = self.linkrev()
915 916 attrs = vars(self)
916 917 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
917 918 if noctx or self.rev() == lkr:
918 919 return self.linkrev()
919 920 return self._adjustlinkrev(self.rev(), inclusive=True)
920 921
921 922 def introfilectx(self):
922 923 """Return filectx having identical contents, but pointing to the
923 924 changeset revision where this filectx was introduced"""
924 925 introrev = self.introrev()
925 926 if self.rev() == introrev:
926 927 return self
927 928 return self.filectx(self.filenode(), changeid=introrev)
928 929
929 930 def _parentfilectx(self, path, fileid, filelog):
930 931 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
931 932 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
932 933 if '_changeid' in vars(self) or '_changectx' in vars(self):
933 934 # If self is associated with a changeset (probably explicitly
934 935 # fed), ensure the created filectx is associated with a
935 936 # changeset that is an ancestor of self.changectx.
936 937 # This lets us later use _adjustlinkrev to get a correct link.
937 938 fctx._descendantrev = self.rev()
938 939 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
939 940 elif '_descendantrev' in vars(self):
940 941 # Otherwise propagate _descendantrev if we have one associated.
941 942 fctx._descendantrev = self._descendantrev
942 943 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
943 944 return fctx
944 945
945 946 def parents(self):
946 947 _path = self._path
947 948 fl = self._filelog
948 949 parents = self._filelog.parents(self._filenode)
949 950 pl = [(_path, node, fl) for node in parents if node != nullid]
950 951
951 952 r = fl.renamed(self._filenode)
952 953 if r:
953 954 # - In the simple rename case, both parent are nullid, pl is empty.
954 955 # - In case of merge, only one of the parent is null id and should
955 956 # be replaced with the rename information. This parent is -always-
956 957 # the first one.
957 958 #
958 959 # As null id have always been filtered out in the previous list
959 960 # comprehension, inserting to 0 will always result in "replacing
960 961 # first nullid parent with rename information.
961 962 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
962 963
963 964 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
964 965
965 966 def p1(self):
966 967 return self.parents()[0]
967 968
968 969 def p2(self):
969 970 p = self.parents()
970 971 if len(p) == 2:
971 972 return p[1]
972 973 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
973 974
974 975 def annotate(self, follow=False, linenumber=False, skiprevs=None,
975 976 diffopts=None):
976 977 '''returns a list of tuples of ((ctx, number), line) for each line
977 978 in the file, where ctx is the filectx of the node where
978 979 that line was last changed; if linenumber parameter is true, number is
979 980 the line number at the first appearance in the managed file, otherwise,
980 981 number has a fixed value of False.
981 982 '''
982 983
983 984 def lines(text):
984 985 if text.endswith("\n"):
985 986 return text.count("\n")
986 987 return text.count("\n") + int(bool(text))
987 988
988 989 if linenumber:
989 990 def decorate(text, rev):
990 991 return ([annotateline(fctx=rev, lineno=i)
991 992 for i in xrange(1, lines(text) + 1)], text)
992 993 else:
993 994 def decorate(text, rev):
994 995 return ([annotateline(fctx=rev)] * lines(text), text)
995 996
996 997 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
997 998
998 999 def parents(f):
999 1000 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1000 1001 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1001 1002 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1002 1003 # isn't an ancestor of the srcrev.
1003 1004 f._changeid
1004 1005 pl = f.parents()
1005 1006
1006 1007 # Don't return renamed parents if we aren't following.
1007 1008 if not follow:
1008 1009 pl = [p for p in pl if p.path() == f.path()]
1009 1010
1010 1011 # renamed filectx won't have a filelog yet, so set it
1011 1012 # from the cache to save time
1012 1013 for p in pl:
1013 1014 if not r'_filelog' in p.__dict__:
1014 1015 p._filelog = getlog(p.path())
1015 1016
1016 1017 return pl
1017 1018
1018 1019 # use linkrev to find the first changeset where self appeared
1019 1020 base = self.introfilectx()
1020 1021 if getattr(base, '_ancestrycontext', None) is None:
1021 1022 cl = self._repo.changelog
1022 1023 if base.rev() is None:
1023 1024 # wctx is not inclusive, but works because _ancestrycontext
1024 1025 # is used to test filelog revisions
1025 1026 ac = cl.ancestors([p.rev() for p in base.parents()],
1026 1027 inclusive=True)
1027 1028 else:
1028 1029 ac = cl.ancestors([base.rev()], inclusive=True)
1029 1030 base._ancestrycontext = ac
1030 1031
1031 1032 # This algorithm would prefer to be recursive, but Python is a
1032 1033 # bit recursion-hostile. Instead we do an iterative
1033 1034 # depth-first search.
1034 1035
1035 1036 # 1st DFS pre-calculates pcache and needed
1036 1037 visit = [base]
1037 1038 pcache = {}
1038 1039 needed = {base: 1}
1039 1040 while visit:
1040 1041 f = visit.pop()
1041 1042 if f in pcache:
1042 1043 continue
1043 1044 pl = parents(f)
1044 1045 pcache[f] = pl
1045 1046 for p in pl:
1046 1047 needed[p] = needed.get(p, 0) + 1
1047 1048 if p not in pcache:
1048 1049 visit.append(p)
1049 1050
1050 1051 # 2nd DFS does the actual annotate
1051 1052 visit[:] = [base]
1052 1053 hist = {}
1053 1054 while visit:
1054 1055 f = visit[-1]
1055 1056 if f in hist:
1056 1057 visit.pop()
1057 1058 continue
1058 1059
1059 1060 ready = True
1060 1061 pl = pcache[f]
1061 1062 for p in pl:
1062 1063 if p not in hist:
1063 1064 ready = False
1064 1065 visit.append(p)
1065 1066 if ready:
1066 1067 visit.pop()
1067 1068 curr = decorate(f.data(), f)
1068 1069 skipchild = False
1069 1070 if skiprevs is not None:
1070 1071 skipchild = f._changeid in skiprevs
1071 1072 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1072 1073 diffopts)
1073 1074 for p in pl:
1074 1075 if needed[p] == 1:
1075 1076 del hist[p]
1076 1077 del needed[p]
1077 1078 else:
1078 1079 needed[p] -= 1
1079 1080
1080 1081 hist[f] = curr
1081 1082 del pcache[f]
1082 1083
1083 1084 return pycompat.ziplist(hist[base][0], hist[base][1].splitlines(True))
1084 1085
1085 1086 def ancestors(self, followfirst=False):
1086 1087 visit = {}
1087 1088 c = self
1088 1089 if followfirst:
1089 1090 cut = 1
1090 1091 else:
1091 1092 cut = None
1092 1093
1093 1094 while True:
1094 1095 for parent in c.parents()[:cut]:
1095 1096 visit[(parent.linkrev(), parent.filenode())] = parent
1096 1097 if not visit:
1097 1098 break
1098 1099 c = visit.pop(max(visit))
1099 1100 yield c
1100 1101
1101 1102 def decodeddata(self):
1102 1103 """Returns `data()` after running repository decoding filters.
1103 1104
1104 1105 This is often equivalent to how the data would be expressed on disk.
1105 1106 """
1106 1107 return self._repo.wwritedata(self.path(), self.data())
1107 1108
1108 1109 @attr.s(slots=True, frozen=True)
1109 1110 class annotateline(object):
1110 1111 fctx = attr.ib()
1111 1112 lineno = attr.ib(default=False)
1112 1113 # Whether this annotation was the result of a skip-annotate.
1113 1114 skip = attr.ib(default=False)
1114 1115
1115 1116 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1116 1117 r'''
1117 1118 Given parent and child fctxes and annotate data for parents, for all lines
1118 1119 in either parent that match the child, annotate the child with the parent's
1119 1120 data.
1120 1121
1121 1122 Additionally, if `skipchild` is True, replace all other lines with parent
1122 1123 annotate data as well such that child is never blamed for any lines.
1123 1124
1124 1125 See test-annotate.py for unit tests.
1125 1126 '''
1126 1127 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1127 1128 for parent in parents]
1128 1129
1129 1130 if skipchild:
1130 1131 # Need to iterate over the blocks twice -- make it a list
1131 1132 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1132 1133 # Mercurial currently prefers p2 over p1 for annotate.
1133 1134 # TODO: change this?
1134 1135 for parent, blocks in pblocks:
1135 1136 for (a1, a2, b1, b2), t in blocks:
1136 1137 # Changed blocks ('!') or blocks made only of blank lines ('~')
1137 1138 # belong to the child.
1138 1139 if t == '=':
1139 1140 child[0][b1:b2] = parent[0][a1:a2]
1140 1141
1141 1142 if skipchild:
1142 1143 # Now try and match up anything that couldn't be matched,
1143 1144 # Reversing pblocks maintains bias towards p2, matching above
1144 1145 # behavior.
1145 1146 pblocks.reverse()
1146 1147
1147 1148 # The heuristics are:
1148 1149 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1149 1150 # This could potentially be smarter but works well enough.
1150 1151 # * For a non-matching section, do a best-effort fit. Match lines in
1151 1152 # diff hunks 1:1, dropping lines as necessary.
1152 1153 # * Repeat the last line as a last resort.
1153 1154
1154 1155 # First, replace as much as possible without repeating the last line.
1155 1156 remaining = [(parent, []) for parent, _blocks in pblocks]
1156 1157 for idx, (parent, blocks) in enumerate(pblocks):
1157 1158 for (a1, a2, b1, b2), _t in blocks:
1158 1159 if a2 - a1 >= b2 - b1:
1159 1160 for bk in xrange(b1, b2):
1160 1161 if child[0][bk].fctx == childfctx:
1161 1162 ak = min(a1 + (bk - b1), a2 - 1)
1162 1163 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
1163 1164 else:
1164 1165 remaining[idx][1].append((a1, a2, b1, b2))
1165 1166
1166 1167 # Then, look at anything left, which might involve repeating the last
1167 1168 # line.
1168 1169 for parent, blocks in remaining:
1169 1170 for a1, a2, b1, b2 in blocks:
1170 1171 for bk in xrange(b1, b2):
1171 1172 if child[0][bk].fctx == childfctx:
1172 1173 ak = min(a1 + (bk - b1), a2 - 1)
1173 1174 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
1174 1175 return child
1175 1176
1176 1177 class filectx(basefilectx):
1177 1178 """A filecontext object makes access to data related to a particular
1178 1179 filerevision convenient."""
1179 1180 def __init__(self, repo, path, changeid=None, fileid=None,
1180 1181 filelog=None, changectx=None):
1181 1182 """changeid can be a changeset revision, node, or tag.
1182 1183 fileid can be a file revision or node."""
1183 1184 self._repo = repo
1184 1185 self._path = path
1185 1186
1186 1187 assert (changeid is not None
1187 1188 or fileid is not None
1188 1189 or changectx is not None), \
1189 1190 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1190 1191 % (changeid, fileid, changectx))
1191 1192
1192 1193 if filelog is not None:
1193 1194 self._filelog = filelog
1194 1195
1195 1196 if changeid is not None:
1196 1197 self._changeid = changeid
1197 1198 if changectx is not None:
1198 1199 self._changectx = changectx
1199 1200 if fileid is not None:
1200 1201 self._fileid = fileid
1201 1202
1202 1203 @propertycache
1203 1204 def _changectx(self):
1204 1205 try:
1205 1206 return changectx(self._repo, self._changeid)
1206 1207 except error.FilteredRepoLookupError:
1207 1208 # Linkrev may point to any revision in the repository. When the
1208 1209 # repository is filtered this may lead to `filectx` trying to build
1209 1210 # `changectx` for filtered revision. In such case we fallback to
1210 1211 # creating `changectx` on the unfiltered version of the reposition.
1211 1212 # This fallback should not be an issue because `changectx` from
1212 1213 # `filectx` are not used in complex operations that care about
1213 1214 # filtering.
1214 1215 #
1215 1216 # This fallback is a cheap and dirty fix that prevent several
1216 1217 # crashes. It does not ensure the behavior is correct. However the
1217 1218 # behavior was not correct before filtering either and "incorrect
1218 1219 # behavior" is seen as better as "crash"
1219 1220 #
1220 1221 # Linkrevs have several serious troubles with filtering that are
1221 1222 # complicated to solve. Proper handling of the issue here should be
1222 1223 # considered when solving linkrev issue are on the table.
1223 1224 return changectx(self._repo.unfiltered(), self._changeid)
1224 1225
1225 1226 def filectx(self, fileid, changeid=None):
1226 1227 '''opens an arbitrary revision of the file without
1227 1228 opening a new filelog'''
1228 1229 return filectx(self._repo, self._path, fileid=fileid,
1229 1230 filelog=self._filelog, changeid=changeid)
1230 1231
1231 1232 def rawdata(self):
1232 1233 return self._filelog.revision(self._filenode, raw=True)
1233 1234
1234 1235 def rawflags(self):
1235 1236 """low-level revlog flags"""
1236 1237 return self._filelog.flags(self._filerev)
1237 1238
1238 1239 def data(self):
1239 1240 try:
1240 1241 return self._filelog.read(self._filenode)
1241 1242 except error.CensoredNodeError:
1242 1243 if self._repo.ui.config("censor", "policy") == "ignore":
1243 1244 return ""
1244 1245 raise error.Abort(_("censored node: %s") % short(self._filenode),
1245 1246 hint=_("set censor.policy to ignore errors"))
1246 1247
1247 1248 def size(self):
1248 1249 return self._filelog.size(self._filerev)
1249 1250
1250 1251 @propertycache
1251 1252 def _copied(self):
1252 1253 """check if file was actually renamed in this changeset revision
1253 1254
1254 1255 If rename logged in file revision, we report copy for changeset only
1255 1256 if file revisions linkrev points back to the changeset in question
1256 1257 or both changeset parents contain different file revisions.
1257 1258 """
1258 1259
1259 1260 renamed = self._filelog.renamed(self._filenode)
1260 1261 if not renamed:
1261 1262 return renamed
1262 1263
1263 1264 if self.rev() == self.linkrev():
1264 1265 return renamed
1265 1266
1266 1267 name = self.path()
1267 1268 fnode = self._filenode
1268 1269 for p in self._changectx.parents():
1269 1270 try:
1270 1271 if fnode == p.filenode(name):
1271 1272 return None
1272 1273 except error.LookupError:
1273 1274 pass
1274 1275 return renamed
1275 1276
1276 1277 def children(self):
1277 1278 # hard for renames
1278 1279 c = self._filelog.children(self._filenode)
1279 1280 return [filectx(self._repo, self._path, fileid=x,
1280 1281 filelog=self._filelog) for x in c]
1281 1282
1282 1283 class committablectx(basectx):
1283 1284 """A committablectx object provides common functionality for a context that
1284 1285 wants the ability to commit, e.g. workingctx or memctx."""
1285 1286 def __init__(self, repo, text="", user=None, date=None, extra=None,
1286 1287 changes=None):
1287 1288 self._repo = repo
1288 1289 self._rev = None
1289 1290 self._node = None
1290 1291 self._text = text
1291 1292 if date:
1292 1293 self._date = util.parsedate(date)
1293 1294 if user:
1294 1295 self._user = user
1295 1296 if changes:
1296 1297 self._status = changes
1297 1298
1298 1299 self._extra = {}
1299 1300 if extra:
1300 1301 self._extra = extra.copy()
1301 1302 if 'branch' not in self._extra:
1302 1303 try:
1303 1304 branch = encoding.fromlocal(self._repo.dirstate.branch())
1304 1305 except UnicodeDecodeError:
1305 1306 raise error.Abort(_('branch name not in UTF-8!'))
1306 1307 self._extra['branch'] = branch
1307 1308 if self._extra['branch'] == '':
1308 1309 self._extra['branch'] = 'default'
1309 1310
1310 1311 def __bytes__(self):
1311 1312 return bytes(self._parents[0]) + "+"
1312 1313
1313 1314 __str__ = encoding.strmethod(__bytes__)
1314 1315
1315 1316 def __nonzero__(self):
1316 1317 return True
1317 1318
1318 1319 __bool__ = __nonzero__
1319 1320
1320 1321 def _buildflagfunc(self):
1321 1322 # Create a fallback function for getting file flags when the
1322 1323 # filesystem doesn't support them
1323 1324
1324 1325 copiesget = self._repo.dirstate.copies().get
1325 1326 parents = self.parents()
1326 1327 if len(parents) < 2:
1327 1328 # when we have one parent, it's easy: copy from parent
1328 1329 man = parents[0].manifest()
1329 1330 def func(f):
1330 1331 f = copiesget(f, f)
1331 1332 return man.flags(f)
1332 1333 else:
1333 1334 # merges are tricky: we try to reconstruct the unstored
1334 1335 # result from the merge (issue1802)
1335 1336 p1, p2 = parents
1336 1337 pa = p1.ancestor(p2)
1337 1338 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1338 1339
1339 1340 def func(f):
1340 1341 f = copiesget(f, f) # may be wrong for merges with copies
1341 1342 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1342 1343 if fl1 == fl2:
1343 1344 return fl1
1344 1345 if fl1 == fla:
1345 1346 return fl2
1346 1347 if fl2 == fla:
1347 1348 return fl1
1348 1349 return '' # punt for conflicts
1349 1350
1350 1351 return func
1351 1352
1352 1353 @propertycache
1353 1354 def _flagfunc(self):
1354 1355 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1355 1356
1356 1357 @propertycache
1357 1358 def _status(self):
1358 1359 return self._repo.status()
1359 1360
1360 1361 @propertycache
1361 1362 def _user(self):
1362 1363 return self._repo.ui.username()
1363 1364
1364 1365 @propertycache
1365 1366 def _date(self):
1366 1367 ui = self._repo.ui
1367 1368 date = ui.configdate('devel', 'default-date')
1368 1369 if date is None:
1369 1370 date = util.makedate()
1370 1371 return date
1371 1372
1372 1373 def subrev(self, subpath):
1373 1374 return None
1374 1375
1375 1376 def manifestnode(self):
1376 1377 return None
1377 1378 def user(self):
1378 1379 return self._user or self._repo.ui.username()
1379 1380 def date(self):
1380 1381 return self._date
1381 1382 def description(self):
1382 1383 return self._text
1383 1384 def files(self):
1384 1385 return sorted(self._status.modified + self._status.added +
1385 1386 self._status.removed)
1386 1387
1387 1388 def modified(self):
1388 1389 return self._status.modified
1389 1390 def added(self):
1390 1391 return self._status.added
1391 1392 def removed(self):
1392 1393 return self._status.removed
1393 1394 def deleted(self):
1394 1395 return self._status.deleted
1395 1396 def branch(self):
1396 1397 return encoding.tolocal(self._extra['branch'])
1397 1398 def closesbranch(self):
1398 1399 return 'close' in self._extra
1399 1400 def extra(self):
1400 1401 return self._extra
1401 1402
1402 1403 def isinmemory(self):
1403 1404 return False
1404 1405
1405 1406 def tags(self):
1406 1407 return []
1407 1408
1408 1409 def bookmarks(self):
1409 1410 b = []
1410 1411 for p in self.parents():
1411 1412 b.extend(p.bookmarks())
1412 1413 return b
1413 1414
1414 1415 def phase(self):
1415 1416 phase = phases.draft # default phase to draft
1416 1417 for p in self.parents():
1417 1418 phase = max(phase, p.phase())
1418 1419 return phase
1419 1420
1420 1421 def hidden(self):
1421 1422 return False
1422 1423
1423 1424 def children(self):
1424 1425 return []
1425 1426
1426 1427 def flags(self, path):
1427 1428 if r'_manifest' in self.__dict__:
1428 1429 try:
1429 1430 return self._manifest.flags(path)
1430 1431 except KeyError:
1431 1432 return ''
1432 1433
1433 1434 try:
1434 1435 return self._flagfunc(path)
1435 1436 except OSError:
1436 1437 return ''
1437 1438
1438 1439 def ancestor(self, c2):
1439 1440 """return the "best" ancestor context of self and c2"""
1440 1441 return self._parents[0].ancestor(c2) # punt on two parents for now
1441 1442
1442 1443 def walk(self, match):
1443 1444 '''Generates matching file names.'''
1444 1445 return sorted(self._repo.dirstate.walk(match,
1445 1446 subrepos=sorted(self.substate),
1446 1447 unknown=True, ignored=False))
1447 1448
1448 1449 def matches(self, match):
1449 1450 return sorted(self._repo.dirstate.matches(match))
1450 1451
1451 1452 def ancestors(self):
1452 1453 for p in self._parents:
1453 1454 yield p
1454 1455 for a in self._repo.changelog.ancestors(
1455 1456 [p.rev() for p in self._parents]):
1456 1457 yield changectx(self._repo, a)
1457 1458
1458 1459 def markcommitted(self, node):
1459 1460 """Perform post-commit cleanup necessary after committing this ctx
1460 1461
1461 1462 Specifically, this updates backing stores this working context
1462 1463 wraps to reflect the fact that the changes reflected by this
1463 1464 workingctx have been committed. For example, it marks
1464 1465 modified and added files as normal in the dirstate.
1465 1466
1466 1467 """
1467 1468
1468 1469 with self._repo.dirstate.parentchange():
1469 1470 for f in self.modified() + self.added():
1470 1471 self._repo.dirstate.normal(f)
1471 1472 for f in self.removed():
1472 1473 self._repo.dirstate.drop(f)
1473 1474 self._repo.dirstate.setparents(node)
1474 1475
1475 1476 # write changes out explicitly, because nesting wlock at
1476 1477 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1477 1478 # from immediately doing so for subsequent changing files
1478 1479 self._repo.dirstate.write(self._repo.currenttransaction())
1479 1480
1480 1481 def dirty(self, missing=False, merge=True, branch=True):
1481 1482 return False
1482 1483
1483 1484 class workingctx(committablectx):
1484 1485 """A workingctx object makes access to data related to
1485 1486 the current working directory convenient.
1486 1487 date - any valid date string or (unixtime, offset), or None.
1487 1488 user - username string, or None.
1488 1489 extra - a dictionary of extra values, or None.
1489 1490 changes - a list of file lists as returned by localrepo.status()
1490 1491 or None to use the repository status.
1491 1492 """
1492 1493 def __init__(self, repo, text="", user=None, date=None, extra=None,
1493 1494 changes=None):
1494 1495 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1495 1496
1496 1497 def __iter__(self):
1497 1498 d = self._repo.dirstate
1498 1499 for f in d:
1499 1500 if d[f] != 'r':
1500 1501 yield f
1501 1502
1502 1503 def __contains__(self, key):
1503 1504 return self._repo.dirstate[key] not in "?r"
1504 1505
1505 1506 def hex(self):
1506 1507 return hex(wdirid)
1507 1508
1508 1509 @propertycache
1509 1510 def _parents(self):
1510 1511 p = self._repo.dirstate.parents()
1511 1512 if p[1] == nullid:
1512 1513 p = p[:-1]
1513 1514 return [changectx(self._repo, x) for x in p]
1514 1515
1515 1516 def filectx(self, path, filelog=None):
1516 1517 """get a file context from the working directory"""
1517 1518 return workingfilectx(self._repo, path, workingctx=self,
1518 1519 filelog=filelog)
1519 1520
1520 1521 def dirty(self, missing=False, merge=True, branch=True):
1521 1522 "check whether a working directory is modified"
1522 1523 # check subrepos first
1523 1524 for s in sorted(self.substate):
1524 1525 if self.sub(s).dirty(missing=missing):
1525 1526 return True
1526 1527 # check current working dir
1527 1528 return ((merge and self.p2()) or
1528 1529 (branch and self.branch() != self.p1().branch()) or
1529 1530 self.modified() or self.added() or self.removed() or
1530 1531 (missing and self.deleted()))
1531 1532
1532 1533 def add(self, list, prefix=""):
1533 1534 with self._repo.wlock():
1534 1535 ui, ds = self._repo.ui, self._repo.dirstate
1535 1536 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1536 1537 rejected = []
1537 1538 lstat = self._repo.wvfs.lstat
1538 1539 for f in list:
1539 1540 # ds.pathto() returns an absolute file when this is invoked from
1540 1541 # the keyword extension. That gets flagged as non-portable on
1541 1542 # Windows, since it contains the drive letter and colon.
1542 1543 scmutil.checkportable(ui, os.path.join(prefix, f))
1543 1544 try:
1544 1545 st = lstat(f)
1545 1546 except OSError:
1546 1547 ui.warn(_("%s does not exist!\n") % uipath(f))
1547 1548 rejected.append(f)
1548 1549 continue
1549 1550 if st.st_size > 10000000:
1550 1551 ui.warn(_("%s: up to %d MB of RAM may be required "
1551 1552 "to manage this file\n"
1552 1553 "(use 'hg revert %s' to cancel the "
1553 1554 "pending addition)\n")
1554 1555 % (f, 3 * st.st_size // 1000000, uipath(f)))
1555 1556 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1556 1557 ui.warn(_("%s not added: only files and symlinks "
1557 1558 "supported currently\n") % uipath(f))
1558 1559 rejected.append(f)
1559 1560 elif ds[f] in 'amn':
1560 1561 ui.warn(_("%s already tracked!\n") % uipath(f))
1561 1562 elif ds[f] == 'r':
1562 1563 ds.normallookup(f)
1563 1564 else:
1564 1565 ds.add(f)
1565 1566 return rejected
1566 1567
1567 1568 def forget(self, files, prefix=""):
1568 1569 with self._repo.wlock():
1569 1570 ds = self._repo.dirstate
1570 1571 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1571 1572 rejected = []
1572 1573 for f in files:
1573 1574 if f not in self._repo.dirstate:
1574 1575 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1575 1576 rejected.append(f)
1576 1577 elif self._repo.dirstate[f] != 'a':
1577 1578 self._repo.dirstate.remove(f)
1578 1579 else:
1579 1580 self._repo.dirstate.drop(f)
1580 1581 return rejected
1581 1582
1582 1583 def undelete(self, list):
1583 1584 pctxs = self.parents()
1584 1585 with self._repo.wlock():
1585 1586 ds = self._repo.dirstate
1586 1587 for f in list:
1587 1588 if self._repo.dirstate[f] != 'r':
1588 1589 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1589 1590 else:
1590 1591 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1591 1592 t = fctx.data()
1592 1593 self._repo.wwrite(f, t, fctx.flags())
1593 1594 self._repo.dirstate.normal(f)
1594 1595
1595 1596 def copy(self, source, dest):
1596 1597 try:
1597 1598 st = self._repo.wvfs.lstat(dest)
1598 1599 except OSError as err:
1599 1600 if err.errno != errno.ENOENT:
1600 1601 raise
1601 1602 self._repo.ui.warn(_("%s does not exist!\n")
1602 1603 % self._repo.dirstate.pathto(dest))
1603 1604 return
1604 1605 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1605 1606 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1606 1607 "symbolic link\n")
1607 1608 % self._repo.dirstate.pathto(dest))
1608 1609 else:
1609 1610 with self._repo.wlock():
1610 1611 if self._repo.dirstate[dest] in '?':
1611 1612 self._repo.dirstate.add(dest)
1612 1613 elif self._repo.dirstate[dest] in 'r':
1613 1614 self._repo.dirstate.normallookup(dest)
1614 1615 self._repo.dirstate.copy(source, dest)
1615 1616
1616 1617 def match(self, pats=None, include=None, exclude=None, default='glob',
1617 1618 listsubrepos=False, badfn=None):
1618 1619 r = self._repo
1619 1620
1620 1621 # Only a case insensitive filesystem needs magic to translate user input
1621 1622 # to actual case in the filesystem.
1622 1623 icasefs = not util.fscasesensitive(r.root)
1623 1624 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1624 1625 default, auditor=r.auditor, ctx=self,
1625 1626 listsubrepos=listsubrepos, badfn=badfn,
1626 1627 icasefs=icasefs)
1627 1628
1628 1629 def _filtersuspectsymlink(self, files):
1629 1630 if not files or self._repo.dirstate._checklink:
1630 1631 return files
1631 1632
1632 1633 # Symlink placeholders may get non-symlink-like contents
1633 1634 # via user error or dereferencing by NFS or Samba servers,
1634 1635 # so we filter out any placeholders that don't look like a
1635 1636 # symlink
1636 1637 sane = []
1637 1638 for f in files:
1638 1639 if self.flags(f) == 'l':
1639 1640 d = self[f].data()
1640 1641 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1641 1642 self._repo.ui.debug('ignoring suspect symlink placeholder'
1642 1643 ' "%s"\n' % f)
1643 1644 continue
1644 1645 sane.append(f)
1645 1646 return sane
1646 1647
1647 1648 def _checklookup(self, files):
1648 1649 # check for any possibly clean files
1649 1650 if not files:
1650 1651 return [], [], []
1651 1652
1652 1653 modified = []
1653 1654 deleted = []
1654 1655 fixup = []
1655 1656 pctx = self._parents[0]
1656 1657 # do a full compare of any files that might have changed
1657 1658 for f in sorted(files):
1658 1659 try:
1659 1660 # This will return True for a file that got replaced by a
1660 1661 # directory in the interim, but fixing that is pretty hard.
1661 1662 if (f not in pctx or self.flags(f) != pctx.flags(f)
1662 1663 or pctx[f].cmp(self[f])):
1663 1664 modified.append(f)
1664 1665 else:
1665 1666 fixup.append(f)
1666 1667 except (IOError, OSError):
1667 1668 # A file become inaccessible in between? Mark it as deleted,
1668 1669 # matching dirstate behavior (issue5584).
1669 1670 # The dirstate has more complex behavior around whether a
1670 1671 # missing file matches a directory, etc, but we don't need to
1671 1672 # bother with that: if f has made it to this point, we're sure
1672 1673 # it's in the dirstate.
1673 1674 deleted.append(f)
1674 1675
1675 1676 return modified, deleted, fixup
1676 1677
1677 1678 def _poststatusfixup(self, status, fixup):
1678 1679 """update dirstate for files that are actually clean"""
1679 1680 poststatus = self._repo.postdsstatus()
1680 1681 if fixup or poststatus:
1681 1682 try:
1682 1683 oldid = self._repo.dirstate.identity()
1683 1684
1684 1685 # updating the dirstate is optional
1685 1686 # so we don't wait on the lock
1686 1687 # wlock can invalidate the dirstate, so cache normal _after_
1687 1688 # taking the lock
1688 1689 with self._repo.wlock(False):
1689 1690 if self._repo.dirstate.identity() == oldid:
1690 1691 if fixup:
1691 1692 normal = self._repo.dirstate.normal
1692 1693 for f in fixup:
1693 1694 normal(f)
1694 1695 # write changes out explicitly, because nesting
1695 1696 # wlock at runtime may prevent 'wlock.release()'
1696 1697 # after this block from doing so for subsequent
1697 1698 # changing files
1698 1699 tr = self._repo.currenttransaction()
1699 1700 self._repo.dirstate.write(tr)
1700 1701
1701 1702 if poststatus:
1702 1703 for ps in poststatus:
1703 1704 ps(self, status)
1704 1705 else:
1705 1706 # in this case, writing changes out breaks
1706 1707 # consistency, because .hg/dirstate was
1707 1708 # already changed simultaneously after last
1708 1709 # caching (see also issue5584 for detail)
1709 1710 self._repo.ui.debug('skip updating dirstate: '
1710 1711 'identity mismatch\n')
1711 1712 except error.LockError:
1712 1713 pass
1713 1714 finally:
1714 1715 # Even if the wlock couldn't be grabbed, clear out the list.
1715 1716 self._repo.clearpostdsstatus()
1716 1717
1717 1718 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1718 1719 '''Gets the status from the dirstate -- internal use only.'''
1719 1720 subrepos = []
1720 1721 if '.hgsub' in self:
1721 1722 subrepos = sorted(self.substate)
1722 1723 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1723 1724 clean=clean, unknown=unknown)
1724 1725
1725 1726 # check for any possibly clean files
1726 1727 fixup = []
1727 1728 if cmp:
1728 1729 modified2, deleted2, fixup = self._checklookup(cmp)
1729 1730 s.modified.extend(modified2)
1730 1731 s.deleted.extend(deleted2)
1731 1732
1732 1733 if fixup and clean:
1733 1734 s.clean.extend(fixup)
1734 1735
1735 1736 self._poststatusfixup(s, fixup)
1736 1737
1737 1738 if match.always():
1738 1739 # cache for performance
1739 1740 if s.unknown or s.ignored or s.clean:
1740 1741 # "_status" is cached with list*=False in the normal route
1741 1742 self._status = scmutil.status(s.modified, s.added, s.removed,
1742 1743 s.deleted, [], [], [])
1743 1744 else:
1744 1745 self._status = s
1745 1746
1746 1747 return s
1747 1748
1748 1749 @propertycache
1749 1750 def _manifest(self):
1750 1751 """generate a manifest corresponding to the values in self._status
1751 1752
1752 1753 This reuse the file nodeid from parent, but we use special node
1753 1754 identifiers for added and modified files. This is used by manifests
1754 1755 merge to see that files are different and by update logic to avoid
1755 1756 deleting newly added files.
1756 1757 """
1757 1758 return self._buildstatusmanifest(self._status)
1758 1759
1759 1760 def _buildstatusmanifest(self, status):
1760 1761 """Builds a manifest that includes the given status results."""
1761 1762 parents = self.parents()
1762 1763
1763 1764 man = parents[0].manifest().copy()
1764 1765
1765 1766 ff = self._flagfunc
1766 1767 for i, l in ((addednodeid, status.added),
1767 1768 (modifiednodeid, status.modified)):
1768 1769 for f in l:
1769 1770 man[f] = i
1770 1771 try:
1771 1772 man.setflag(f, ff(f))
1772 1773 except OSError:
1773 1774 pass
1774 1775
1775 1776 for f in status.deleted + status.removed:
1776 1777 if f in man:
1777 1778 del man[f]
1778 1779
1779 1780 return man
1780 1781
1781 1782 def _buildstatus(self, other, s, match, listignored, listclean,
1782 1783 listunknown):
1783 1784 """build a status with respect to another context
1784 1785
1785 1786 This includes logic for maintaining the fast path of status when
1786 1787 comparing the working directory against its parent, which is to skip
1787 1788 building a new manifest if self (working directory) is not comparing
1788 1789 against its parent (repo['.']).
1789 1790 """
1790 1791 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1791 1792 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1792 1793 # might have accidentally ended up with the entire contents of the file
1793 1794 # they are supposed to be linking to.
1794 1795 s.modified[:] = self._filtersuspectsymlink(s.modified)
1795 1796 if other != self._repo['.']:
1796 1797 s = super(workingctx, self)._buildstatus(other, s, match,
1797 1798 listignored, listclean,
1798 1799 listunknown)
1799 1800 return s
1800 1801
1801 1802 def _matchstatus(self, other, match):
1802 1803 """override the match method with a filter for directory patterns
1803 1804
1804 1805 We use inheritance to customize the match.bad method only in cases of
1805 1806 workingctx since it belongs only to the working directory when
1806 1807 comparing against the parent changeset.
1807 1808
1808 1809 If we aren't comparing against the working directory's parent, then we
1809 1810 just use the default match object sent to us.
1810 1811 """
1811 1812 if other != self._repo['.']:
1812 1813 def bad(f, msg):
1813 1814 # 'f' may be a directory pattern from 'match.files()',
1814 1815 # so 'f not in ctx1' is not enough
1815 1816 if f not in other and not other.hasdir(f):
1816 1817 self._repo.ui.warn('%s: %s\n' %
1817 1818 (self._repo.dirstate.pathto(f), msg))
1818 1819 match.bad = bad
1819 1820 return match
1820 1821
1821 1822 def markcommitted(self, node):
1822 1823 super(workingctx, self).markcommitted(node)
1823 1824
1824 1825 sparse.aftercommit(self._repo, node)
1825 1826
1826 1827 class committablefilectx(basefilectx):
1827 1828 """A committablefilectx provides common functionality for a file context
1828 1829 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1829 1830 def __init__(self, repo, path, filelog=None, ctx=None):
1830 1831 self._repo = repo
1831 1832 self._path = path
1832 1833 self._changeid = None
1833 1834 self._filerev = self._filenode = None
1834 1835
1835 1836 if filelog is not None:
1836 1837 self._filelog = filelog
1837 1838 if ctx:
1838 1839 self._changectx = ctx
1839 1840
1840 1841 def __nonzero__(self):
1841 1842 return True
1842 1843
1843 1844 __bool__ = __nonzero__
1844 1845
1845 1846 def linkrev(self):
1846 1847 # linked to self._changectx no matter if file is modified or not
1847 1848 return self.rev()
1848 1849
1849 1850 def parents(self):
1850 1851 '''return parent filectxs, following copies if necessary'''
1851 1852 def filenode(ctx, path):
1852 1853 return ctx._manifest.get(path, nullid)
1853 1854
1854 1855 path = self._path
1855 1856 fl = self._filelog
1856 1857 pcl = self._changectx._parents
1857 1858 renamed = self.renamed()
1858 1859
1859 1860 if renamed:
1860 1861 pl = [renamed + (None,)]
1861 1862 else:
1862 1863 pl = [(path, filenode(pcl[0], path), fl)]
1863 1864
1864 1865 for pc in pcl[1:]:
1865 1866 pl.append((path, filenode(pc, path), fl))
1866 1867
1867 1868 return [self._parentfilectx(p, fileid=n, filelog=l)
1868 1869 for p, n, l in pl if n != nullid]
1869 1870
1870 1871 def children(self):
1871 1872 return []
1872 1873
1873 1874 class workingfilectx(committablefilectx):
1874 1875 """A workingfilectx object makes access to data related to a particular
1875 1876 file in the working directory convenient."""
1876 1877 def __init__(self, repo, path, filelog=None, workingctx=None):
1877 1878 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1878 1879
1879 1880 @propertycache
1880 1881 def _changectx(self):
1881 1882 return workingctx(self._repo)
1882 1883
1883 1884 def data(self):
1884 1885 return self._repo.wread(self._path)
1885 1886 def renamed(self):
1886 1887 rp = self._repo.dirstate.copied(self._path)
1887 1888 if not rp:
1888 1889 return None
1889 1890 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1890 1891
1891 1892 def size(self):
1892 1893 return self._repo.wvfs.lstat(self._path).st_size
1893 1894 def date(self):
1894 1895 t, tz = self._changectx.date()
1895 1896 try:
1896 1897 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1897 1898 except OSError as err:
1898 1899 if err.errno != errno.ENOENT:
1899 1900 raise
1900 1901 return (t, tz)
1901 1902
1902 1903 def exists(self):
1903 1904 return self._repo.wvfs.exists(self._path)
1904 1905
1905 1906 def lexists(self):
1906 1907 return self._repo.wvfs.lexists(self._path)
1907 1908
1908 1909 def audit(self):
1909 1910 return self._repo.wvfs.audit(self._path)
1910 1911
1911 1912 def cmp(self, fctx):
1912 1913 """compare with other file context
1913 1914
1914 1915 returns True if different than fctx.
1915 1916 """
1916 1917 # fctx should be a filectx (not a workingfilectx)
1917 1918 # invert comparison to reuse the same code path
1918 1919 return fctx.cmp(self)
1919 1920
1920 1921 def remove(self, ignoremissing=False):
1921 1922 """wraps unlink for a repo's working directory"""
1922 1923 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1923 1924
1924 1925 def write(self, data, flags, backgroundclose=False, **kwargs):
1925 1926 """wraps repo.wwrite"""
1926 1927 self._repo.wwrite(self._path, data, flags,
1927 1928 backgroundclose=backgroundclose,
1928 1929 **kwargs)
1929 1930
1930 1931 def markcopied(self, src):
1931 1932 """marks this file a copy of `src`"""
1932 1933 if self._repo.dirstate[self._path] in "nma":
1933 1934 self._repo.dirstate.copy(src, self._path)
1934 1935
1935 1936 def clearunknown(self):
1936 1937 """Removes conflicting items in the working directory so that
1937 1938 ``write()`` can be called successfully.
1938 1939 """
1939 1940 wvfs = self._repo.wvfs
1940 1941 f = self._path
1941 1942 wvfs.audit(f)
1942 1943 if wvfs.isdir(f) and not wvfs.islink(f):
1943 1944 wvfs.rmtree(f, forcibly=True)
1944 1945 for p in reversed(list(util.finddirs(f))):
1945 1946 if wvfs.isfileorlink(p):
1946 1947 wvfs.unlink(p)
1947 1948 break
1948 1949
1949 1950 def setflags(self, l, x):
1950 1951 self._repo.wvfs.setflags(self._path, l, x)
1951 1952
1952 1953 class overlayworkingctx(committablectx):
1953 1954 """Wraps another mutable context with a write-back cache that can be
1954 1955 converted into a commit context.
1955 1956
1956 1957 self._cache[path] maps to a dict with keys: {
1957 1958 'exists': bool?
1958 1959 'date': date?
1959 1960 'data': str?
1960 1961 'flags': str?
1961 1962 'copied': str? (path or None)
1962 1963 }
1963 1964 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1964 1965 is `False`, the file was deleted.
1965 1966 """
1966 1967
1967 1968 def __init__(self, repo):
1968 1969 super(overlayworkingctx, self).__init__(repo)
1969 1970 self._repo = repo
1970 1971 self.clean()
1971 1972
1972 1973 def setbase(self, wrappedctx):
1973 1974 self._wrappedctx = wrappedctx
1974 1975 self._parents = [wrappedctx]
1975 1976 # Drop old manifest cache as it is now out of date.
1976 1977 # This is necessary when, e.g., rebasing several nodes with one
1977 1978 # ``overlayworkingctx`` (e.g. with --collapse).
1978 1979 util.clearcachedproperty(self, '_manifest')
1979 1980
1980 1981 def data(self, path):
1981 1982 if self.isdirty(path):
1982 1983 if self._cache[path]['exists']:
1983 1984 if self._cache[path]['data']:
1984 1985 return self._cache[path]['data']
1985 1986 else:
1986 1987 # Must fallback here, too, because we only set flags.
1987 1988 return self._wrappedctx[path].data()
1988 1989 else:
1989 1990 raise error.ProgrammingError("No such file or directory: %s" %
1990 1991 path)
1991 1992 else:
1992 1993 return self._wrappedctx[path].data()
1993 1994
1994 1995 @propertycache
1995 1996 def _manifest(self):
1996 1997 parents = self.parents()
1997 1998 man = parents[0].manifest().copy()
1998 1999
1999 2000 flag = self._flagfunc
2000 2001 for path in self.added():
2001 2002 man[path] = addednodeid
2002 2003 man.setflag(path, flag(path))
2003 2004 for path in self.modified():
2004 2005 man[path] = modifiednodeid
2005 2006 man.setflag(path, flag(path))
2006 2007 for path in self.removed():
2007 2008 del man[path]
2008 2009 return man
2009 2010
2010 2011 @propertycache
2011 2012 def _flagfunc(self):
2012 2013 def f(path):
2013 2014 return self._cache[path]['flags']
2014 2015 return f
2015 2016
2016 2017 def files(self):
2017 2018 return sorted(self.added() + self.modified() + self.removed())
2018 2019
2019 2020 def modified(self):
2020 2021 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
2021 2022 self._existsinparent(f)]
2022 2023
2023 2024 def added(self):
2024 2025 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
2025 2026 not self._existsinparent(f)]
2026 2027
2027 2028 def removed(self):
2028 2029 return [f for f in self._cache.keys() if
2029 2030 not self._cache[f]['exists'] and self._existsinparent(f)]
2030 2031
2031 2032 def isinmemory(self):
2032 2033 return True
2033 2034
2034 2035 def filedate(self, path):
2035 2036 if self.isdirty(path):
2036 2037 return self._cache[path]['date']
2037 2038 else:
2038 2039 return self._wrappedctx[path].date()
2039 2040
2040 2041 def markcopied(self, path, origin):
2041 2042 if self.isdirty(path):
2042 2043 self._cache[path]['copied'] = origin
2043 2044 else:
2044 2045 raise error.ProgrammingError('markcopied() called on clean context')
2045 2046
2046 2047 def copydata(self, path):
2047 2048 if self.isdirty(path):
2048 2049 return self._cache[path]['copied']
2049 2050 else:
2050 2051 raise error.ProgrammingError('copydata() called on clean context')
2051 2052
2052 2053 def flags(self, path):
2053 2054 if self.isdirty(path):
2054 2055 if self._cache[path]['exists']:
2055 2056 return self._cache[path]['flags']
2056 2057 else:
2057 2058 raise error.ProgrammingError("No such file or directory: %s" %
2058 2059 self._path)
2059 2060 else:
2060 2061 return self._wrappedctx[path].flags()
2061 2062
2062 2063 def _existsinparent(self, path):
2063 2064 try:
2064 2065 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2065 2066 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2066 2067 # with an ``exists()`` function.
2067 2068 self._wrappedctx[path]
2068 2069 return True
2069 2070 except error.ManifestLookupError:
2070 2071 return False
2071 2072
2072 2073 def _auditconflicts(self, path):
2073 2074 """Replicates conflict checks done by wvfs.write().
2074 2075
2075 2076 Since we never write to the filesystem and never call `applyupdates` in
2076 2077 IMM, we'll never check that a path is actually writable -- e.g., because
2077 2078 it adds `a/foo`, but `a` is actually a file in the other commit.
2078 2079 """
2079 2080 def fail(path, component):
2080 2081 # p1() is the base and we're receiving "writes" for p2()'s
2081 2082 # files.
2082 2083 if 'l' in self.p1()[component].flags():
2083 2084 raise error.Abort("error: %s conflicts with symlink %s "
2084 2085 "in %s." % (path, component,
2085 2086 self.p1().rev()))
2086 2087 else:
2087 2088 raise error.Abort("error: '%s' conflicts with file '%s' in "
2088 2089 "%s." % (path, component,
2089 2090 self.p1().rev()))
2090 2091
2091 2092 # Test that each new directory to be created to write this path from p2
2092 2093 # is not a file in p1.
2093 2094 components = path.split('/')
2094 2095 for i in xrange(len(components)):
2095 2096 component = "/".join(components[0:i])
2096 2097 if component in self.p1():
2097 2098 fail(path, component)
2098 2099
2099 2100 # Test the other direction -- that this path from p2 isn't a directory
2100 2101 # in p1 (test that p1 doesn't any paths matching `path/*`).
2101 2102 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
2102 2103 matches = self.p1().manifest().matches(match)
2103 2104 if len(matches) > 0:
2104 2105 if len(matches) == 1 and matches.keys()[0] == path:
2105 2106 return
2106 2107 raise error.Abort("error: file '%s' cannot be written because "
2107 2108 " '%s/' is a folder in %s (containing %d "
2108 2109 "entries: %s)"
2109 2110 % (path, path, self.p1(), len(matches),
2110 2111 ', '.join(matches.keys())))
2111 2112
2112 2113 def write(self, path, data, flags='', **kwargs):
2113 2114 if data is None:
2114 2115 raise error.ProgrammingError("data must be non-None")
2115 2116 self._auditconflicts(path)
2116 2117 self._markdirty(path, exists=True, data=data, date=util.makedate(),
2117 2118 flags=flags)
2118 2119
2119 2120 def setflags(self, path, l, x):
2120 2121 self._markdirty(path, exists=True, date=util.makedate(),
2121 2122 flags=(l and 'l' or '') + (x and 'x' or ''))
2122 2123
2123 2124 def remove(self, path):
2124 2125 self._markdirty(path, exists=False)
2125 2126
2126 2127 def exists(self, path):
2127 2128 """exists behaves like `lexists`, but needs to follow symlinks and
2128 2129 return False if they are broken.
2129 2130 """
2130 2131 if self.isdirty(path):
2131 2132 # If this path exists and is a symlink, "follow" it by calling
2132 2133 # exists on the destination path.
2133 2134 if (self._cache[path]['exists'] and
2134 2135 'l' in self._cache[path]['flags']):
2135 2136 return self.exists(self._cache[path]['data'].strip())
2136 2137 else:
2137 2138 return self._cache[path]['exists']
2138 2139
2139 2140 return self._existsinparent(path)
2140 2141
2141 2142 def lexists(self, path):
2142 2143 """lexists returns True if the path exists"""
2143 2144 if self.isdirty(path):
2144 2145 return self._cache[path]['exists']
2145 2146
2146 2147 return self._existsinparent(path)
2147 2148
2148 2149 def size(self, path):
2149 2150 if self.isdirty(path):
2150 2151 if self._cache[path]['exists']:
2151 2152 return len(self._cache[path]['data'])
2152 2153 else:
2153 2154 raise error.ProgrammingError("No such file or directory: %s" %
2154 2155 self._path)
2155 2156 return self._wrappedctx[path].size()
2156 2157
2157 2158 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
2158 2159 user=None, editor=None):
2159 2160 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2160 2161 committed.
2161 2162
2162 2163 ``text`` is the commit message.
2163 2164 ``parents`` (optional) are rev numbers.
2164 2165 """
2165 2166 # Default parents to the wrapped contexts' if not passed.
2166 2167 if parents is None:
2167 2168 parents = self._wrappedctx.parents()
2168 2169 if len(parents) == 1:
2169 2170 parents = (parents[0], None)
2170 2171
2171 2172 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2172 2173 if parents[1] is None:
2173 2174 parents = (self._repo[parents[0]], None)
2174 2175 else:
2175 2176 parents = (self._repo[parents[0]], self._repo[parents[1]])
2176 2177
2177 2178 files = self._cache.keys()
2178 2179 def getfile(repo, memctx, path):
2179 2180 if self._cache[path]['exists']:
2180 2181 return memfilectx(repo, memctx, path,
2181 2182 self._cache[path]['data'],
2182 2183 'l' in self._cache[path]['flags'],
2183 2184 'x' in self._cache[path]['flags'],
2184 2185 self._cache[path]['copied'])
2185 2186 else:
2186 2187 # Returning None, but including the path in `files`, is
2187 2188 # necessary for memctx to register a deletion.
2188 2189 return None
2189 2190 return memctx(self._repo, parents, text, files, getfile, date=date,
2190 2191 extra=extra, user=user, branch=branch, editor=editor)
2191 2192
2192 2193 def isdirty(self, path):
2193 2194 return path in self._cache
2194 2195
2195 2196 def isempty(self):
2196 2197 # We need to discard any keys that are actually clean before the empty
2197 2198 # commit check.
2198 2199 self._compact()
2199 2200 return len(self._cache) == 0
2200 2201
2201 2202 def clean(self):
2202 2203 self._cache = {}
2203 2204
2204 2205 def _compact(self):
2205 2206 """Removes keys from the cache that are actually clean, by comparing
2206 2207 them with the underlying context.
2207 2208
2208 2209 This can occur during the merge process, e.g. by passing --tool :local
2209 2210 to resolve a conflict.
2210 2211 """
2211 2212 keys = []
2212 2213 for path in self._cache.keys():
2213 2214 cache = self._cache[path]
2214 2215 try:
2215 2216 underlying = self._wrappedctx[path]
2216 2217 if (underlying.data() == cache['data'] and
2217 2218 underlying.flags() == cache['flags']):
2218 2219 keys.append(path)
2219 2220 except error.ManifestLookupError:
2220 2221 # Path not in the underlying manifest (created).
2221 2222 continue
2222 2223
2223 2224 for path in keys:
2224 2225 del self._cache[path]
2225 2226 return keys
2226 2227
2227 2228 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2228 2229 self._cache[path] = {
2229 2230 'exists': exists,
2230 2231 'data': data,
2231 2232 'date': date,
2232 2233 'flags': flags,
2233 2234 'copied': None,
2234 2235 }
2235 2236
2236 2237 def filectx(self, path, filelog=None):
2237 2238 return overlayworkingfilectx(self._repo, path, parent=self,
2238 2239 filelog=filelog)
2239 2240
2240 2241 class overlayworkingfilectx(committablefilectx):
2241 2242 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2242 2243 cache, which can be flushed through later by calling ``flush()``."""
2243 2244
2244 2245 def __init__(self, repo, path, filelog=None, parent=None):
2245 2246 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2246 2247 parent)
2247 2248 self._repo = repo
2248 2249 self._parent = parent
2249 2250 self._path = path
2250 2251
2251 2252 def cmp(self, fctx):
2252 2253 return self.data() != fctx.data()
2253 2254
2254 2255 def changectx(self):
2255 2256 return self._parent
2256 2257
2257 2258 def data(self):
2258 2259 return self._parent.data(self._path)
2259 2260
2260 2261 def date(self):
2261 2262 return self._parent.filedate(self._path)
2262 2263
2263 2264 def exists(self):
2264 2265 return self.lexists()
2265 2266
2266 2267 def lexists(self):
2267 2268 return self._parent.exists(self._path)
2268 2269
2269 2270 def renamed(self):
2270 2271 path = self._parent.copydata(self._path)
2271 2272 if not path:
2272 2273 return None
2273 2274 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2274 2275
2275 2276 def size(self):
2276 2277 return self._parent.size(self._path)
2277 2278
2278 2279 def markcopied(self, origin):
2279 2280 self._parent.markcopied(self._path, origin)
2280 2281
2281 2282 def audit(self):
2282 2283 pass
2283 2284
2284 2285 def flags(self):
2285 2286 return self._parent.flags(self._path)
2286 2287
2287 2288 def setflags(self, islink, isexec):
2288 2289 return self._parent.setflags(self._path, islink, isexec)
2289 2290
2290 2291 def write(self, data, flags, backgroundclose=False, **kwargs):
2291 2292 return self._parent.write(self._path, data, flags, **kwargs)
2292 2293
2293 2294 def remove(self, ignoremissing=False):
2294 2295 return self._parent.remove(self._path)
2295 2296
2296 2297 def clearunknown(self):
2297 2298 pass
2298 2299
2299 2300 class workingcommitctx(workingctx):
2300 2301 """A workingcommitctx object makes access to data related to
2301 2302 the revision being committed convenient.
2302 2303
2303 2304 This hides changes in the working directory, if they aren't
2304 2305 committed in this context.
2305 2306 """
2306 2307 def __init__(self, repo, changes,
2307 2308 text="", user=None, date=None, extra=None):
2308 2309 super(workingctx, self).__init__(repo, text, user, date, extra,
2309 2310 changes)
2310 2311
2311 2312 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2312 2313 """Return matched files only in ``self._status``
2313 2314
2314 2315 Uncommitted files appear "clean" via this context, even if
2315 2316 they aren't actually so in the working directory.
2316 2317 """
2317 2318 if clean:
2318 2319 clean = [f for f in self._manifest if f not in self._changedset]
2319 2320 else:
2320 2321 clean = []
2321 2322 return scmutil.status([f for f in self._status.modified if match(f)],
2322 2323 [f for f in self._status.added if match(f)],
2323 2324 [f for f in self._status.removed if match(f)],
2324 2325 [], [], [], clean)
2325 2326
2326 2327 @propertycache
2327 2328 def _changedset(self):
2328 2329 """Return the set of files changed in this context
2329 2330 """
2330 2331 changed = set(self._status.modified)
2331 2332 changed.update(self._status.added)
2332 2333 changed.update(self._status.removed)
2333 2334 return changed
2334 2335
2335 2336 def makecachingfilectxfn(func):
2336 2337 """Create a filectxfn that caches based on the path.
2337 2338
2338 2339 We can't use util.cachefunc because it uses all arguments as the cache
2339 2340 key and this creates a cycle since the arguments include the repo and
2340 2341 memctx.
2341 2342 """
2342 2343 cache = {}
2343 2344
2344 2345 def getfilectx(repo, memctx, path):
2345 2346 if path not in cache:
2346 2347 cache[path] = func(repo, memctx, path)
2347 2348 return cache[path]
2348 2349
2349 2350 return getfilectx
2350 2351
2351 2352 def memfilefromctx(ctx):
2352 2353 """Given a context return a memfilectx for ctx[path]
2353 2354
2354 2355 This is a convenience method for building a memctx based on another
2355 2356 context.
2356 2357 """
2357 2358 def getfilectx(repo, memctx, path):
2358 2359 fctx = ctx[path]
2359 2360 # this is weird but apparently we only keep track of one parent
2360 2361 # (why not only store that instead of a tuple?)
2361 2362 copied = fctx.renamed()
2362 2363 if copied:
2363 2364 copied = copied[0]
2364 2365 return memfilectx(repo, memctx, path, fctx.data(),
2365 2366 islink=fctx.islink(), isexec=fctx.isexec(),
2366 2367 copied=copied)
2367 2368
2368 2369 return getfilectx
2369 2370
2370 2371 def memfilefrompatch(patchstore):
2371 2372 """Given a patch (e.g. patchstore object) return a memfilectx
2372 2373
2373 2374 This is a convenience method for building a memctx based on a patchstore.
2374 2375 """
2375 2376 def getfilectx(repo, memctx, path):
2376 2377 data, mode, copied = patchstore.getfile(path)
2377 2378 if data is None:
2378 2379 return None
2379 2380 islink, isexec = mode
2380 2381 return memfilectx(repo, memctx, path, data, islink=islink,
2381 2382 isexec=isexec, copied=copied)
2382 2383
2383 2384 return getfilectx
2384 2385
2385 2386 class memctx(committablectx):
2386 2387 """Use memctx to perform in-memory commits via localrepo.commitctx().
2387 2388
2388 2389 Revision information is supplied at initialization time while
2389 2390 related files data and is made available through a callback
2390 2391 mechanism. 'repo' is the current localrepo, 'parents' is a
2391 2392 sequence of two parent revisions identifiers (pass None for every
2392 2393 missing parent), 'text' is the commit message and 'files' lists
2393 2394 names of files touched by the revision (normalized and relative to
2394 2395 repository root).
2395 2396
2396 2397 filectxfn(repo, memctx, path) is a callable receiving the
2397 2398 repository, the current memctx object and the normalized path of
2398 2399 requested file, relative to repository root. It is fired by the
2399 2400 commit function for every file in 'files', but calls order is
2400 2401 undefined. If the file is available in the revision being
2401 2402 committed (updated or added), filectxfn returns a memfilectx
2402 2403 object. If the file was removed, filectxfn return None for recent
2403 2404 Mercurial. Moved files are represented by marking the source file
2404 2405 removed and the new file added with copy information (see
2405 2406 memfilectx).
2406 2407
2407 2408 user receives the committer name and defaults to current
2408 2409 repository username, date is the commit date in any format
2409 2410 supported by util.parsedate() and defaults to current date, extra
2410 2411 is a dictionary of metadata or is left empty.
2411 2412 """
2412 2413
2413 2414 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2414 2415 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2415 2416 # this field to determine what to do in filectxfn.
2416 2417 _returnnoneformissingfiles = True
2417 2418
2418 2419 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2419 2420 date=None, extra=None, branch=None, editor=False):
2420 2421 super(memctx, self).__init__(repo, text, user, date, extra)
2421 2422 self._rev = None
2422 2423 self._node = None
2423 2424 parents = [(p or nullid) for p in parents]
2424 2425 p1, p2 = parents
2425 2426 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2426 2427 files = sorted(set(files))
2427 2428 self._files = files
2428 2429 if branch is not None:
2429 2430 self._extra['branch'] = encoding.fromlocal(branch)
2430 2431 self.substate = {}
2431 2432
2432 2433 if isinstance(filectxfn, patch.filestore):
2433 2434 filectxfn = memfilefrompatch(filectxfn)
2434 2435 elif not callable(filectxfn):
2435 2436 # if store is not callable, wrap it in a function
2436 2437 filectxfn = memfilefromctx(filectxfn)
2437 2438
2438 2439 # memoizing increases performance for e.g. vcs convert scenarios.
2439 2440 self._filectxfn = makecachingfilectxfn(filectxfn)
2440 2441
2441 2442 if editor:
2442 2443 self._text = editor(self._repo, self, [])
2443 2444 self._repo.savecommitmessage(self._text)
2444 2445
2445 2446 def filectx(self, path, filelog=None):
2446 2447 """get a file context from the working directory
2447 2448
2448 2449 Returns None if file doesn't exist and should be removed."""
2449 2450 return self._filectxfn(self._repo, self, path)
2450 2451
2451 2452 def commit(self):
2452 2453 """commit context to the repo"""
2453 2454 return self._repo.commitctx(self)
2454 2455
2455 2456 @propertycache
2456 2457 def _manifest(self):
2457 2458 """generate a manifest based on the return values of filectxfn"""
2458 2459
2459 2460 # keep this simple for now; just worry about p1
2460 2461 pctx = self._parents[0]
2461 2462 man = pctx.manifest().copy()
2462 2463
2463 2464 for f in self._status.modified:
2464 2465 p1node = nullid
2465 2466 p2node = nullid
2466 2467 p = pctx[f].parents() # if file isn't in pctx, check p2?
2467 2468 if len(p) > 0:
2468 2469 p1node = p[0].filenode()
2469 2470 if len(p) > 1:
2470 2471 p2node = p[1].filenode()
2471 2472 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2472 2473
2473 2474 for f in self._status.added:
2474 2475 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2475 2476
2476 2477 for f in self._status.removed:
2477 2478 if f in man:
2478 2479 del man[f]
2479 2480
2480 2481 return man
2481 2482
2482 2483 @propertycache
2483 2484 def _status(self):
2484 2485 """Calculate exact status from ``files`` specified at construction
2485 2486 """
2486 2487 man1 = self.p1().manifest()
2487 2488 p2 = self._parents[1]
2488 2489 # "1 < len(self._parents)" can't be used for checking
2489 2490 # existence of the 2nd parent, because "memctx._parents" is
2490 2491 # explicitly initialized by the list, of which length is 2.
2491 2492 if p2.node() != nullid:
2492 2493 man2 = p2.manifest()
2493 2494 managing = lambda f: f in man1 or f in man2
2494 2495 else:
2495 2496 managing = lambda f: f in man1
2496 2497
2497 2498 modified, added, removed = [], [], []
2498 2499 for f in self._files:
2499 2500 if not managing(f):
2500 2501 added.append(f)
2501 2502 elif self[f]:
2502 2503 modified.append(f)
2503 2504 else:
2504 2505 removed.append(f)
2505 2506
2506 2507 return scmutil.status(modified, added, removed, [], [], [], [])
2507 2508
2508 2509 class memfilectx(committablefilectx):
2509 2510 """memfilectx represents an in-memory file to commit.
2510 2511
2511 2512 See memctx and committablefilectx for more details.
2512 2513 """
2513 2514 def __init__(self, repo, changectx, path, data, islink=False,
2514 2515 isexec=False, copied=None):
2515 2516 """
2516 2517 path is the normalized file path relative to repository root.
2517 2518 data is the file content as a string.
2518 2519 islink is True if the file is a symbolic link.
2519 2520 isexec is True if the file is executable.
2520 2521 copied is the source file path if current file was copied in the
2521 2522 revision being committed, or None."""
2522 2523 super(memfilectx, self).__init__(repo, path, None, changectx)
2523 2524 self._data = data
2524 2525 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2525 2526 self._copied = None
2526 2527 if copied:
2527 2528 self._copied = (copied, nullid)
2528 2529
2529 2530 def data(self):
2530 2531 return self._data
2531 2532
2532 2533 def remove(self, ignoremissing=False):
2533 2534 """wraps unlink for a repo's working directory"""
2534 2535 # need to figure out what to do here
2535 2536 del self._changectx[self._path]
2536 2537
2537 2538 def write(self, data, flags, **kwargs):
2538 2539 """wraps repo.wwrite"""
2539 2540 self._data = data
2540 2541
2541 2542 class overlayfilectx(committablefilectx):
2542 2543 """Like memfilectx but take an original filectx and optional parameters to
2543 2544 override parts of it. This is useful when fctx.data() is expensive (i.e.
2544 2545 flag processor is expensive) and raw data, flags, and filenode could be
2545 2546 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2546 2547 """
2547 2548
2548 2549 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2549 2550 copied=None, ctx=None):
2550 2551 """originalfctx: filecontext to duplicate
2551 2552
2552 2553 datafunc: None or a function to override data (file content). It is a
2553 2554 function to be lazy. path, flags, copied, ctx: None or overridden value
2554 2555
2555 2556 copied could be (path, rev), or False. copied could also be just path,
2556 2557 and will be converted to (path, nullid). This simplifies some callers.
2557 2558 """
2558 2559
2559 2560 if path is None:
2560 2561 path = originalfctx.path()
2561 2562 if ctx is None:
2562 2563 ctx = originalfctx.changectx()
2563 2564 ctxmatch = lambda: True
2564 2565 else:
2565 2566 ctxmatch = lambda: ctx == originalfctx.changectx()
2566 2567
2567 2568 repo = originalfctx.repo()
2568 2569 flog = originalfctx.filelog()
2569 2570 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2570 2571
2571 2572 if copied is None:
2572 2573 copied = originalfctx.renamed()
2573 2574 copiedmatch = lambda: True
2574 2575 else:
2575 2576 if copied and not isinstance(copied, tuple):
2576 2577 # repo._filecommit will recalculate copyrev so nullid is okay
2577 2578 copied = (copied, nullid)
2578 2579 copiedmatch = lambda: copied == originalfctx.renamed()
2579 2580
2580 2581 # When data, copied (could affect data), ctx (could affect filelog
2581 2582 # parents) are not overridden, rawdata, rawflags, and filenode may be
2582 2583 # reused (repo._filecommit should double check filelog parents).
2583 2584 #
2584 2585 # path, flags are not hashed in filelog (but in manifestlog) so they do
2585 2586 # not affect reusable here.
2586 2587 #
2587 2588 # If ctx or copied is overridden to a same value with originalfctx,
2588 2589 # still consider it's reusable. originalfctx.renamed() may be a bit
2589 2590 # expensive so it's not called unless necessary. Assuming datafunc is
2590 2591 # always expensive, do not call it for this "reusable" test.
2591 2592 reusable = datafunc is None and ctxmatch() and copiedmatch()
2592 2593
2593 2594 if datafunc is None:
2594 2595 datafunc = originalfctx.data
2595 2596 if flags is None:
2596 2597 flags = originalfctx.flags()
2597 2598
2598 2599 self._datafunc = datafunc
2599 2600 self._flags = flags
2600 2601 self._copied = copied
2601 2602
2602 2603 if reusable:
2603 2604 # copy extra fields from originalfctx
2604 2605 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2605 2606 for attr_ in attrs:
2606 2607 if util.safehasattr(originalfctx, attr_):
2607 2608 setattr(self, attr_, getattr(originalfctx, attr_))
2608 2609
2609 2610 def data(self):
2610 2611 return self._datafunc()
2611 2612
2612 2613 class metadataonlyctx(committablectx):
2613 2614 """Like memctx but it's reusing the manifest of different commit.
2614 2615 Intended to be used by lightweight operations that are creating
2615 2616 metadata-only changes.
2616 2617
2617 2618 Revision information is supplied at initialization time. 'repo' is the
2618 2619 current localrepo, 'ctx' is original revision which manifest we're reuisng
2619 2620 'parents' is a sequence of two parent revisions identifiers (pass None for
2620 2621 every missing parent), 'text' is the commit.
2621 2622
2622 2623 user receives the committer name and defaults to current repository
2623 2624 username, date is the commit date in any format supported by
2624 2625 util.parsedate() and defaults to current date, extra is a dictionary of
2625 2626 metadata or is left empty.
2626 2627 """
2627 2628 def __new__(cls, repo, originalctx, *args, **kwargs):
2628 2629 return super(metadataonlyctx, cls).__new__(cls, repo)
2629 2630
2630 2631 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2631 2632 date=None, extra=None, editor=False):
2632 2633 if text is None:
2633 2634 text = originalctx.description()
2634 2635 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2635 2636 self._rev = None
2636 2637 self._node = None
2637 2638 self._originalctx = originalctx
2638 2639 self._manifestnode = originalctx.manifestnode()
2639 2640 if parents is None:
2640 2641 parents = originalctx.parents()
2641 2642 else:
2642 2643 parents = [repo[p] for p in parents if p is not None]
2643 2644 parents = parents[:]
2644 2645 while len(parents) < 2:
2645 2646 parents.append(repo[nullid])
2646 2647 p1, p2 = self._parents = parents
2647 2648
2648 2649 # sanity check to ensure that the reused manifest parents are
2649 2650 # manifests of our commit parents
2650 2651 mp1, mp2 = self.manifestctx().parents
2651 2652 if p1 != nullid and p1.manifestnode() != mp1:
2652 2653 raise RuntimeError('can\'t reuse the manifest: '
2653 2654 'its p1 doesn\'t match the new ctx p1')
2654 2655 if p2 != nullid and p2.manifestnode() != mp2:
2655 2656 raise RuntimeError('can\'t reuse the manifest: '
2656 2657 'its p2 doesn\'t match the new ctx p2')
2657 2658
2658 2659 self._files = originalctx.files()
2659 2660 self.substate = {}
2660 2661
2661 2662 if editor:
2662 2663 self._text = editor(self._repo, self, [])
2663 2664 self._repo.savecommitmessage(self._text)
2664 2665
2665 2666 def manifestnode(self):
2666 2667 return self._manifestnode
2667 2668
2668 2669 @property
2669 2670 def _manifestctx(self):
2670 2671 return self._repo.manifestlog[self._manifestnode]
2671 2672
2672 2673 def filectx(self, path, filelog=None):
2673 2674 return self._originalctx.filectx(path, filelog=filelog)
2674 2675
2675 2676 def commit(self):
2676 2677 """commit context to the repo"""
2677 2678 return self._repo.commitctx(self)
2678 2679
2679 2680 @property
2680 2681 def _manifest(self):
2681 2682 return self._originalctx.manifest()
2682 2683
2683 2684 @propertycache
2684 2685 def _status(self):
2685 2686 """Calculate exact status from ``files`` specified in the ``origctx``
2686 2687 and parents manifests.
2687 2688 """
2688 2689 man1 = self.p1().manifest()
2689 2690 p2 = self._parents[1]
2690 2691 # "1 < len(self._parents)" can't be used for checking
2691 2692 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2692 2693 # explicitly initialized by the list, of which length is 2.
2693 2694 if p2.node() != nullid:
2694 2695 man2 = p2.manifest()
2695 2696 managing = lambda f: f in man1 or f in man2
2696 2697 else:
2697 2698 managing = lambda f: f in man1
2698 2699
2699 2700 modified, added, removed = [], [], []
2700 2701 for f in self._files:
2701 2702 if not managing(f):
2702 2703 added.append(f)
2703 2704 elif f in self:
2704 2705 modified.append(f)
2705 2706 else:
2706 2707 removed.append(f)
2707 2708
2708 2709 return scmutil.status(modified, added, removed, [], [], [], [])
2709 2710
2710 2711 class arbitraryfilectx(object):
2711 2712 """Allows you to use filectx-like functions on a file in an arbitrary
2712 2713 location on disk, possibly not in the working directory.
2713 2714 """
2714 2715 def __init__(self, path, repo=None):
2715 2716 # Repo is optional because contrib/simplemerge uses this class.
2716 2717 self._repo = repo
2717 2718 self._path = path
2718 2719
2719 2720 def cmp(self, fctx):
2720 2721 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2721 2722 # path if either side is a symlink.
2722 2723 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2723 2724 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2724 2725 # Add a fast-path for merge if both sides are disk-backed.
2725 2726 # Note that filecmp uses the opposite return values (True if same)
2726 2727 # from our cmp functions (True if different).
2727 2728 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2728 2729 return self.data() != fctx.data()
2729 2730
2730 2731 def path(self):
2731 2732 return self._path
2732 2733
2733 2734 def flags(self):
2734 2735 return ''
2735 2736
2736 2737 def data(self):
2737 2738 return util.readfile(self._path)
2738 2739
2739 2740 def decodeddata(self):
2740 2741 with open(self._path, "rb") as f:
2741 2742 return f.read()
2742 2743
2743 2744 def remove(self):
2744 2745 util.unlink(self._path)
2745 2746
2746 2747 def write(self, data, flags, **kwargs):
2747 2748 assert not flags
2748 2749 with open(self._path, "w") as f:
2749 2750 f.write(data)
@@ -1,2275 +1,2275
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import inspect
13 13 import os
14 14 import random
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 )
24 24 from . import (
25 25 bookmarks,
26 26 branchmap,
27 27 bundle2,
28 28 changegroup,
29 29 changelog,
30 30 color,
31 31 context,
32 32 dirstate,
33 33 dirstateguard,
34 34 discovery,
35 35 encoding,
36 36 error,
37 37 exchange,
38 38 extensions,
39 39 filelog,
40 40 hook,
41 41 lock as lockmod,
42 42 manifest,
43 43 match as matchmod,
44 44 merge as mergemod,
45 45 mergeutil,
46 46 namespaces,
47 47 obsolete,
48 48 pathutil,
49 49 peer,
50 50 phases,
51 51 pushkey,
52 52 pycompat,
53 53 repository,
54 54 repoview,
55 55 revset,
56 56 revsetlang,
57 57 scmutil,
58 58 sparse,
59 59 store,
60 subrepo,
60 subrepoutil,
61 61 tags as tagsmod,
62 62 transaction,
63 63 txnutil,
64 64 util,
65 65 vfs as vfsmod,
66 66 )
67 67
68 68 release = lockmod.release
69 69 urlerr = util.urlerr
70 70 urlreq = util.urlreq
71 71
72 72 # set of (path, vfs-location) tuples. vfs-location is:
73 73 # - 'plain for vfs relative paths
74 74 # - '' for svfs relative paths
75 75 _cachedfiles = set()
76 76
77 77 class _basefilecache(scmutil.filecache):
78 78 """All filecache usage on repo are done for logic that should be unfiltered
79 79 """
80 80 def __get__(self, repo, type=None):
81 81 if repo is None:
82 82 return self
83 83 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
84 84 def __set__(self, repo, value):
85 85 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
86 86 def __delete__(self, repo):
87 87 return super(_basefilecache, self).__delete__(repo.unfiltered())
88 88
89 89 class repofilecache(_basefilecache):
90 90 """filecache for files in .hg but outside of .hg/store"""
91 91 def __init__(self, *paths):
92 92 super(repofilecache, self).__init__(*paths)
93 93 for path in paths:
94 94 _cachedfiles.add((path, 'plain'))
95 95
96 96 def join(self, obj, fname):
97 97 return obj.vfs.join(fname)
98 98
99 99 class storecache(_basefilecache):
100 100 """filecache for files in the store"""
101 101 def __init__(self, *paths):
102 102 super(storecache, self).__init__(*paths)
103 103 for path in paths:
104 104 _cachedfiles.add((path, ''))
105 105
106 106 def join(self, obj, fname):
107 107 return obj.sjoin(fname)
108 108
109 109 def isfilecached(repo, name):
110 110 """check if a repo has already cached "name" filecache-ed property
111 111
112 112 This returns (cachedobj-or-None, iscached) tuple.
113 113 """
114 114 cacheentry = repo.unfiltered()._filecache.get(name, None)
115 115 if not cacheentry:
116 116 return None, False
117 117 return cacheentry.obj, True
118 118
119 119 class unfilteredpropertycache(util.propertycache):
120 120 """propertycache that apply to unfiltered repo only"""
121 121
122 122 def __get__(self, repo, type=None):
123 123 unfi = repo.unfiltered()
124 124 if unfi is repo:
125 125 return super(unfilteredpropertycache, self).__get__(unfi)
126 126 return getattr(unfi, self.name)
127 127
128 128 class filteredpropertycache(util.propertycache):
129 129 """propertycache that must take filtering in account"""
130 130
131 131 def cachevalue(self, obj, value):
132 132 object.__setattr__(obj, self.name, value)
133 133
134 134
135 135 def hasunfilteredcache(repo, name):
136 136 """check if a repo has an unfilteredpropertycache value for <name>"""
137 137 return name in vars(repo.unfiltered())
138 138
139 139 def unfilteredmethod(orig):
140 140 """decorate method that always need to be run on unfiltered version"""
141 141 def wrapper(repo, *args, **kwargs):
142 142 return orig(repo.unfiltered(), *args, **kwargs)
143 143 return wrapper
144 144
145 145 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
146 146 'unbundle'}
147 147 legacycaps = moderncaps.union({'changegroupsubset'})
148 148
149 149 class localpeer(repository.peer):
150 150 '''peer for a local repo; reflects only the most recent API'''
151 151
152 152 def __init__(self, repo, caps=None):
153 153 super(localpeer, self).__init__()
154 154
155 155 if caps is None:
156 156 caps = moderncaps.copy()
157 157 self._repo = repo.filtered('served')
158 158 self._ui = repo.ui
159 159 self._caps = repo._restrictcapabilities(caps)
160 160
161 161 # Begin of _basepeer interface.
162 162
163 163 @util.propertycache
164 164 def ui(self):
165 165 return self._ui
166 166
167 167 def url(self):
168 168 return self._repo.url()
169 169
170 170 def local(self):
171 171 return self._repo
172 172
173 173 def peer(self):
174 174 return self
175 175
176 176 def canpush(self):
177 177 return True
178 178
179 179 def close(self):
180 180 self._repo.close()
181 181
182 182 # End of _basepeer interface.
183 183
184 184 # Begin of _basewirecommands interface.
185 185
186 186 def branchmap(self):
187 187 return self._repo.branchmap()
188 188
189 189 def capabilities(self):
190 190 return self._caps
191 191
192 192 def debugwireargs(self, one, two, three=None, four=None, five=None):
193 193 """Used to test argument passing over the wire"""
194 194 return "%s %s %s %s %s" % (one, two, three, four, five)
195 195
196 196 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
197 197 **kwargs):
198 198 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
199 199 common=common, bundlecaps=bundlecaps,
200 200 **kwargs)[1]
201 201 cb = util.chunkbuffer(chunks)
202 202
203 203 if exchange.bundle2requested(bundlecaps):
204 204 # When requesting a bundle2, getbundle returns a stream to make the
205 205 # wire level function happier. We need to build a proper object
206 206 # from it in local peer.
207 207 return bundle2.getunbundler(self.ui, cb)
208 208 else:
209 209 return changegroup.getunbundler('01', cb, None)
210 210
211 211 def heads(self):
212 212 return self._repo.heads()
213 213
214 214 def known(self, nodes):
215 215 return self._repo.known(nodes)
216 216
217 217 def listkeys(self, namespace):
218 218 return self._repo.listkeys(namespace)
219 219
220 220 def lookup(self, key):
221 221 return self._repo.lookup(key)
222 222
223 223 def pushkey(self, namespace, key, old, new):
224 224 return self._repo.pushkey(namespace, key, old, new)
225 225
226 226 def stream_out(self):
227 227 raise error.Abort(_('cannot perform stream clone against local '
228 228 'peer'))
229 229
230 230 def unbundle(self, cg, heads, url):
231 231 """apply a bundle on a repo
232 232
233 233 This function handles the repo locking itself."""
234 234 try:
235 235 try:
236 236 cg = exchange.readbundle(self.ui, cg, None)
237 237 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
238 238 if util.safehasattr(ret, 'getchunks'):
239 239 # This is a bundle20 object, turn it into an unbundler.
240 240 # This little dance should be dropped eventually when the
241 241 # API is finally improved.
242 242 stream = util.chunkbuffer(ret.getchunks())
243 243 ret = bundle2.getunbundler(self.ui, stream)
244 244 return ret
245 245 except Exception as exc:
246 246 # If the exception contains output salvaged from a bundle2
247 247 # reply, we need to make sure it is printed before continuing
248 248 # to fail. So we build a bundle2 with such output and consume
249 249 # it directly.
250 250 #
251 251 # This is not very elegant but allows a "simple" solution for
252 252 # issue4594
253 253 output = getattr(exc, '_bundle2salvagedoutput', ())
254 254 if output:
255 255 bundler = bundle2.bundle20(self._repo.ui)
256 256 for out in output:
257 257 bundler.addpart(out)
258 258 stream = util.chunkbuffer(bundler.getchunks())
259 259 b = bundle2.getunbundler(self.ui, stream)
260 260 bundle2.processbundle(self._repo, b)
261 261 raise
262 262 except error.PushRaced as exc:
263 263 raise error.ResponseError(_('push failed:'), str(exc))
264 264
265 265 # End of _basewirecommands interface.
266 266
267 267 # Begin of peer interface.
268 268
269 269 def iterbatch(self):
270 270 return peer.localiterbatcher(self)
271 271
272 272 # End of peer interface.
273 273
274 274 class locallegacypeer(repository.legacypeer, localpeer):
275 275 '''peer extension which implements legacy methods too; used for tests with
276 276 restricted capabilities'''
277 277
278 278 def __init__(self, repo):
279 279 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
280 280
281 281 # Begin of baselegacywirecommands interface.
282 282
283 283 def between(self, pairs):
284 284 return self._repo.between(pairs)
285 285
286 286 def branches(self, nodes):
287 287 return self._repo.branches(nodes)
288 288
289 289 def changegroup(self, basenodes, source):
290 290 outgoing = discovery.outgoing(self._repo, missingroots=basenodes,
291 291 missingheads=self._repo.heads())
292 292 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
293 293
294 294 def changegroupsubset(self, bases, heads, source):
295 295 outgoing = discovery.outgoing(self._repo, missingroots=bases,
296 296 missingheads=heads)
297 297 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
298 298
299 299 # End of baselegacywirecommands interface.
300 300
301 301 # Increment the sub-version when the revlog v2 format changes to lock out old
302 302 # clients.
303 303 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
304 304
305 305 class localrepository(object):
306 306
307 307 supportedformats = {
308 308 'revlogv1',
309 309 'generaldelta',
310 310 'treemanifest',
311 311 'manifestv2',
312 312 REVLOGV2_REQUIREMENT,
313 313 }
314 314 _basesupported = supportedformats | {
315 315 'store',
316 316 'fncache',
317 317 'shared',
318 318 'relshared',
319 319 'dotencode',
320 320 'exp-sparse',
321 321 }
322 322 openerreqs = {
323 323 'revlogv1',
324 324 'generaldelta',
325 325 'treemanifest',
326 326 'manifestv2',
327 327 }
328 328
329 329 # a list of (ui, featureset) functions.
330 330 # only functions defined in module of enabled extensions are invoked
331 331 featuresetupfuncs = set()
332 332
333 333 # list of prefix for file which can be written without 'wlock'
334 334 # Extensions should extend this list when needed
335 335 _wlockfreeprefix = {
336 336 # We migh consider requiring 'wlock' for the next
337 337 # two, but pretty much all the existing code assume
338 338 # wlock is not needed so we keep them excluded for
339 339 # now.
340 340 'hgrc',
341 341 'requires',
342 342 # XXX cache is a complicatged business someone
343 343 # should investigate this in depth at some point
344 344 'cache/',
345 345 # XXX shouldn't be dirstate covered by the wlock?
346 346 'dirstate',
347 347 # XXX bisect was still a bit too messy at the time
348 348 # this changeset was introduced. Someone should fix
349 349 # the remainig bit and drop this line
350 350 'bisect.state',
351 351 }
352 352
353 353 def __init__(self, baseui, path, create=False):
354 354 self.requirements = set()
355 355 self.filtername = None
356 356 # wvfs: rooted at the repository root, used to access the working copy
357 357 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
358 358 # vfs: rooted at .hg, used to access repo files outside of .hg/store
359 359 self.vfs = None
360 360 # svfs: usually rooted at .hg/store, used to access repository history
361 361 # If this is a shared repository, this vfs may point to another
362 362 # repository's .hg/store directory.
363 363 self.svfs = None
364 364 self.root = self.wvfs.base
365 365 self.path = self.wvfs.join(".hg")
366 366 self.origroot = path
367 367 # This is only used by context.workingctx.match in order to
368 368 # detect files in subrepos.
369 369 self.auditor = pathutil.pathauditor(
370 370 self.root, callback=self._checknested)
371 371 # This is only used by context.basectx.match in order to detect
372 372 # files in subrepos.
373 373 self.nofsauditor = pathutil.pathauditor(
374 374 self.root, callback=self._checknested, realfs=False, cached=True)
375 375 self.baseui = baseui
376 376 self.ui = baseui.copy()
377 377 self.ui.copy = baseui.copy # prevent copying repo configuration
378 378 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
379 379 if (self.ui.configbool('devel', 'all-warnings') or
380 380 self.ui.configbool('devel', 'check-locks')):
381 381 self.vfs.audit = self._getvfsward(self.vfs.audit)
382 382 # A list of callback to shape the phase if no data were found.
383 383 # Callback are in the form: func(repo, roots) --> processed root.
384 384 # This list it to be filled by extension during repo setup
385 385 self._phasedefaults = []
386 386 try:
387 387 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
388 388 self._loadextensions()
389 389 except IOError:
390 390 pass
391 391
392 392 if self.featuresetupfuncs:
393 393 self.supported = set(self._basesupported) # use private copy
394 394 extmods = set(m.__name__ for n, m
395 395 in extensions.extensions(self.ui))
396 396 for setupfunc in self.featuresetupfuncs:
397 397 if setupfunc.__module__ in extmods:
398 398 setupfunc(self.ui, self.supported)
399 399 else:
400 400 self.supported = self._basesupported
401 401 color.setup(self.ui)
402 402
403 403 # Add compression engines.
404 404 for name in util.compengines:
405 405 engine = util.compengines[name]
406 406 if engine.revlogheader():
407 407 self.supported.add('exp-compression-%s' % name)
408 408
409 409 if not self.vfs.isdir():
410 410 if create:
411 411 self.requirements = newreporequirements(self)
412 412
413 413 if not self.wvfs.exists():
414 414 self.wvfs.makedirs()
415 415 self.vfs.makedir(notindexed=True)
416 416
417 417 if 'store' in self.requirements:
418 418 self.vfs.mkdir("store")
419 419
420 420 # create an invalid changelog
421 421 self.vfs.append(
422 422 "00changelog.i",
423 423 '\0\0\0\2' # represents revlogv2
424 424 ' dummy changelog to prevent using the old repo layout'
425 425 )
426 426 else:
427 427 raise error.RepoError(_("repository %s not found") % path)
428 428 elif create:
429 429 raise error.RepoError(_("repository %s already exists") % path)
430 430 else:
431 431 try:
432 432 self.requirements = scmutil.readrequires(
433 433 self.vfs, self.supported)
434 434 except IOError as inst:
435 435 if inst.errno != errno.ENOENT:
436 436 raise
437 437
438 438 cachepath = self.vfs.join('cache')
439 439 self.sharedpath = self.path
440 440 try:
441 441 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
442 442 if 'relshared' in self.requirements:
443 443 sharedpath = self.vfs.join(sharedpath)
444 444 vfs = vfsmod.vfs(sharedpath, realpath=True)
445 445 cachepath = vfs.join('cache')
446 446 s = vfs.base
447 447 if not vfs.exists():
448 448 raise error.RepoError(
449 449 _('.hg/sharedpath points to nonexistent directory %s') % s)
450 450 self.sharedpath = s
451 451 except IOError as inst:
452 452 if inst.errno != errno.ENOENT:
453 453 raise
454 454
455 455 if 'exp-sparse' in self.requirements and not sparse.enabled:
456 456 raise error.RepoError(_('repository is using sparse feature but '
457 457 'sparse is not enabled; enable the '
458 458 '"sparse" extensions to access'))
459 459
460 460 self.store = store.store(
461 461 self.requirements, self.sharedpath,
462 462 lambda base: vfsmod.vfs(base, cacheaudited=True))
463 463 self.spath = self.store.path
464 464 self.svfs = self.store.vfs
465 465 self.sjoin = self.store.join
466 466 self.vfs.createmode = self.store.createmode
467 467 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
468 468 self.cachevfs.createmode = self.store.createmode
469 469 if (self.ui.configbool('devel', 'all-warnings') or
470 470 self.ui.configbool('devel', 'check-locks')):
471 471 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
472 472 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
473 473 else: # standard vfs
474 474 self.svfs.audit = self._getsvfsward(self.svfs.audit)
475 475 self._applyopenerreqs()
476 476 if create:
477 477 self._writerequirements()
478 478
479 479 self._dirstatevalidatewarned = False
480 480
481 481 self._branchcaches = {}
482 482 self._revbranchcache = None
483 483 self.filterpats = {}
484 484 self._datafilters = {}
485 485 self._transref = self._lockref = self._wlockref = None
486 486
487 487 # A cache for various files under .hg/ that tracks file changes,
488 488 # (used by the filecache decorator)
489 489 #
490 490 # Maps a property name to its util.filecacheentry
491 491 self._filecache = {}
492 492
493 493 # hold sets of revision to be filtered
494 494 # should be cleared when something might have changed the filter value:
495 495 # - new changesets,
496 496 # - phase change,
497 497 # - new obsolescence marker,
498 498 # - working directory parent change,
499 499 # - bookmark changes
500 500 self.filteredrevcache = {}
501 501
502 502 # post-dirstate-status hooks
503 503 self._postdsstatus = []
504 504
505 505 # generic mapping between names and nodes
506 506 self.names = namespaces.namespaces()
507 507
508 508 # Key to signature value.
509 509 self._sparsesignaturecache = {}
510 510 # Signature to cached matcher instance.
511 511 self._sparsematchercache = {}
512 512
513 513 def _getvfsward(self, origfunc):
514 514 """build a ward for self.vfs"""
515 515 rref = weakref.ref(self)
516 516 def checkvfs(path, mode=None):
517 517 ret = origfunc(path, mode=mode)
518 518 repo = rref()
519 519 if (repo is None
520 520 or not util.safehasattr(repo, '_wlockref')
521 521 or not util.safehasattr(repo, '_lockref')):
522 522 return
523 523 if mode in (None, 'r', 'rb'):
524 524 return
525 525 if path.startswith(repo.path):
526 526 # truncate name relative to the repository (.hg)
527 527 path = path[len(repo.path) + 1:]
528 528 if path.startswith('cache/'):
529 529 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
530 530 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
531 531 if path.startswith('journal.'):
532 532 # journal is covered by 'lock'
533 533 if repo._currentlock(repo._lockref) is None:
534 534 repo.ui.develwarn('write with no lock: "%s"' % path,
535 535 stacklevel=2, config='check-locks')
536 536 elif repo._currentlock(repo._wlockref) is None:
537 537 # rest of vfs files are covered by 'wlock'
538 538 #
539 539 # exclude special files
540 540 for prefix in self._wlockfreeprefix:
541 541 if path.startswith(prefix):
542 542 return
543 543 repo.ui.develwarn('write with no wlock: "%s"' % path,
544 544 stacklevel=2, config='check-locks')
545 545 return ret
546 546 return checkvfs
547 547
548 548 def _getsvfsward(self, origfunc):
549 549 """build a ward for self.svfs"""
550 550 rref = weakref.ref(self)
551 551 def checksvfs(path, mode=None):
552 552 ret = origfunc(path, mode=mode)
553 553 repo = rref()
554 554 if repo is None or not util.safehasattr(repo, '_lockref'):
555 555 return
556 556 if mode in (None, 'r', 'rb'):
557 557 return
558 558 if path.startswith(repo.sharedpath):
559 559 # truncate name relative to the repository (.hg)
560 560 path = path[len(repo.sharedpath) + 1:]
561 561 if repo._currentlock(repo._lockref) is None:
562 562 repo.ui.develwarn('write with no lock: "%s"' % path,
563 563 stacklevel=3)
564 564 return ret
565 565 return checksvfs
566 566
567 567 def close(self):
568 568 self._writecaches()
569 569
570 570 def _loadextensions(self):
571 571 extensions.loadall(self.ui)
572 572
573 573 def _writecaches(self):
574 574 if self._revbranchcache:
575 575 self._revbranchcache.write()
576 576
577 577 def _restrictcapabilities(self, caps):
578 578 if self.ui.configbool('experimental', 'bundle2-advertise'):
579 579 caps = set(caps)
580 580 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
581 581 role='client'))
582 582 caps.add('bundle2=' + urlreq.quote(capsblob))
583 583 return caps
584 584
585 585 def _applyopenerreqs(self):
586 586 self.svfs.options = dict((r, 1) for r in self.requirements
587 587 if r in self.openerreqs)
588 588 # experimental config: format.chunkcachesize
589 589 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
590 590 if chunkcachesize is not None:
591 591 self.svfs.options['chunkcachesize'] = chunkcachesize
592 592 # experimental config: format.maxchainlen
593 593 maxchainlen = self.ui.configint('format', 'maxchainlen')
594 594 if maxchainlen is not None:
595 595 self.svfs.options['maxchainlen'] = maxchainlen
596 596 # experimental config: format.manifestcachesize
597 597 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
598 598 if manifestcachesize is not None:
599 599 self.svfs.options['manifestcachesize'] = manifestcachesize
600 600 # experimental config: format.aggressivemergedeltas
601 601 aggressivemergedeltas = self.ui.configbool('format',
602 602 'aggressivemergedeltas')
603 603 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
604 604 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
605 605 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
606 606 if 0 <= chainspan:
607 607 self.svfs.options['maxdeltachainspan'] = chainspan
608 608 mmapindexthreshold = self.ui.configbytes('experimental',
609 609 'mmapindexthreshold')
610 610 if mmapindexthreshold is not None:
611 611 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
612 612 withsparseread = self.ui.configbool('experimental', 'sparse-read')
613 613 srdensitythres = float(self.ui.config('experimental',
614 614 'sparse-read.density-threshold'))
615 615 srmingapsize = self.ui.configbytes('experimental',
616 616 'sparse-read.min-gap-size')
617 617 self.svfs.options['with-sparse-read'] = withsparseread
618 618 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
619 619 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
620 620
621 621 for r in self.requirements:
622 622 if r.startswith('exp-compression-'):
623 623 self.svfs.options['compengine'] = r[len('exp-compression-'):]
624 624
625 625 # TODO move "revlogv2" to openerreqs once finalized.
626 626 if REVLOGV2_REQUIREMENT in self.requirements:
627 627 self.svfs.options['revlogv2'] = True
628 628
629 629 def _writerequirements(self):
630 630 scmutil.writerequires(self.vfs, self.requirements)
631 631
632 632 def _checknested(self, path):
633 633 """Determine if path is a legal nested repository."""
634 634 if not path.startswith(self.root):
635 635 return False
636 636 subpath = path[len(self.root) + 1:]
637 637 normsubpath = util.pconvert(subpath)
638 638
639 639 # XXX: Checking against the current working copy is wrong in
640 640 # the sense that it can reject things like
641 641 #
642 642 # $ hg cat -r 10 sub/x.txt
643 643 #
644 644 # if sub/ is no longer a subrepository in the working copy
645 645 # parent revision.
646 646 #
647 647 # However, it can of course also allow things that would have
648 648 # been rejected before, such as the above cat command if sub/
649 649 # is a subrepository now, but was a normal directory before.
650 650 # The old path auditor would have rejected by mistake since it
651 651 # panics when it sees sub/.hg/.
652 652 #
653 653 # All in all, checking against the working copy seems sensible
654 654 # since we want to prevent access to nested repositories on
655 655 # the filesystem *now*.
656 656 ctx = self[None]
657 657 parts = util.splitpath(subpath)
658 658 while parts:
659 659 prefix = '/'.join(parts)
660 660 if prefix in ctx.substate:
661 661 if prefix == normsubpath:
662 662 return True
663 663 else:
664 664 sub = ctx.sub(prefix)
665 665 return sub.checknested(subpath[len(prefix) + 1:])
666 666 else:
667 667 parts.pop()
668 668 return False
669 669
670 670 def peer(self):
671 671 return localpeer(self) # not cached to avoid reference cycle
672 672
673 673 def unfiltered(self):
674 674 """Return unfiltered version of the repository
675 675
676 676 Intended to be overwritten by filtered repo."""
677 677 return self
678 678
679 679 def filtered(self, name, visibilityexceptions=None):
680 680 """Return a filtered version of a repository"""
681 681 cls = repoview.newtype(self.unfiltered().__class__)
682 682 return cls(self, name, visibilityexceptions)
683 683
684 684 @repofilecache('bookmarks', 'bookmarks.current')
685 685 def _bookmarks(self):
686 686 return bookmarks.bmstore(self)
687 687
688 688 @property
689 689 def _activebookmark(self):
690 690 return self._bookmarks.active
691 691
692 692 # _phasesets depend on changelog. what we need is to call
693 693 # _phasecache.invalidate() if '00changelog.i' was changed, but it
694 694 # can't be easily expressed in filecache mechanism.
695 695 @storecache('phaseroots', '00changelog.i')
696 696 def _phasecache(self):
697 697 return phases.phasecache(self, self._phasedefaults)
698 698
699 699 @storecache('obsstore')
700 700 def obsstore(self):
701 701 return obsolete.makestore(self.ui, self)
702 702
703 703 @storecache('00changelog.i')
704 704 def changelog(self):
705 705 return changelog.changelog(self.svfs,
706 706 trypending=txnutil.mayhavepending(self.root))
707 707
708 708 def _constructmanifest(self):
709 709 # This is a temporary function while we migrate from manifest to
710 710 # manifestlog. It allows bundlerepo and unionrepo to intercept the
711 711 # manifest creation.
712 712 return manifest.manifestrevlog(self.svfs)
713 713
714 714 @storecache('00manifest.i')
715 715 def manifestlog(self):
716 716 return manifest.manifestlog(self.svfs, self)
717 717
718 718 @repofilecache('dirstate')
719 719 def dirstate(self):
720 720 sparsematchfn = lambda: sparse.matcher(self)
721 721
722 722 return dirstate.dirstate(self.vfs, self.ui, self.root,
723 723 self._dirstatevalidate, sparsematchfn)
724 724
725 725 def _dirstatevalidate(self, node):
726 726 try:
727 727 self.changelog.rev(node)
728 728 return node
729 729 except error.LookupError:
730 730 if not self._dirstatevalidatewarned:
731 731 self._dirstatevalidatewarned = True
732 732 self.ui.warn(_("warning: ignoring unknown"
733 733 " working parent %s!\n") % short(node))
734 734 return nullid
735 735
736 736 def __getitem__(self, changeid):
737 737 if changeid is None:
738 738 return context.workingctx(self)
739 739 if isinstance(changeid, slice):
740 740 # wdirrev isn't contiguous so the slice shouldn't include it
741 741 return [context.changectx(self, i)
742 742 for i in xrange(*changeid.indices(len(self)))
743 743 if i not in self.changelog.filteredrevs]
744 744 try:
745 745 return context.changectx(self, changeid)
746 746 except error.WdirUnsupported:
747 747 return context.workingctx(self)
748 748
749 749 def __contains__(self, changeid):
750 750 """True if the given changeid exists
751 751
752 752 error.LookupError is raised if an ambiguous node specified.
753 753 """
754 754 try:
755 755 self[changeid]
756 756 return True
757 757 except error.RepoLookupError:
758 758 return False
759 759
760 760 def __nonzero__(self):
761 761 return True
762 762
763 763 __bool__ = __nonzero__
764 764
765 765 def __len__(self):
766 766 # no need to pay the cost of repoview.changelog
767 767 unfi = self.unfiltered()
768 768 return len(unfi.changelog)
769 769
770 770 def __iter__(self):
771 771 return iter(self.changelog)
772 772
773 773 def revs(self, expr, *args):
774 774 '''Find revisions matching a revset.
775 775
776 776 The revset is specified as a string ``expr`` that may contain
777 777 %-formatting to escape certain types. See ``revsetlang.formatspec``.
778 778
779 779 Revset aliases from the configuration are not expanded. To expand
780 780 user aliases, consider calling ``scmutil.revrange()`` or
781 781 ``repo.anyrevs([expr], user=True)``.
782 782
783 783 Returns a revset.abstractsmartset, which is a list-like interface
784 784 that contains integer revisions.
785 785 '''
786 786 expr = revsetlang.formatspec(expr, *args)
787 787 m = revset.match(None, expr)
788 788 return m(self)
789 789
790 790 def set(self, expr, *args):
791 791 '''Find revisions matching a revset and emit changectx instances.
792 792
793 793 This is a convenience wrapper around ``revs()`` that iterates the
794 794 result and is a generator of changectx instances.
795 795
796 796 Revset aliases from the configuration are not expanded. To expand
797 797 user aliases, consider calling ``scmutil.revrange()``.
798 798 '''
799 799 for r in self.revs(expr, *args):
800 800 yield self[r]
801 801
802 802 def anyrevs(self, specs, user=False, localalias=None):
803 803 '''Find revisions matching one of the given revsets.
804 804
805 805 Revset aliases from the configuration are not expanded by default. To
806 806 expand user aliases, specify ``user=True``. To provide some local
807 807 definitions overriding user aliases, set ``localalias`` to
808 808 ``{name: definitionstring}``.
809 809 '''
810 810 if user:
811 811 m = revset.matchany(self.ui, specs, repo=self,
812 812 localalias=localalias)
813 813 else:
814 814 m = revset.matchany(None, specs, localalias=localalias)
815 815 return m(self)
816 816
817 817 def url(self):
818 818 return 'file:' + self.root
819 819
820 820 def hook(self, name, throw=False, **args):
821 821 """Call a hook, passing this repo instance.
822 822
823 823 This a convenience method to aid invoking hooks. Extensions likely
824 824 won't call this unless they have registered a custom hook or are
825 825 replacing code that is expected to call a hook.
826 826 """
827 827 return hook.hook(self.ui, self, name, throw, **args)
828 828
829 829 @filteredpropertycache
830 830 def _tagscache(self):
831 831 '''Returns a tagscache object that contains various tags related
832 832 caches.'''
833 833
834 834 # This simplifies its cache management by having one decorated
835 835 # function (this one) and the rest simply fetch things from it.
836 836 class tagscache(object):
837 837 def __init__(self):
838 838 # These two define the set of tags for this repository. tags
839 839 # maps tag name to node; tagtypes maps tag name to 'global' or
840 840 # 'local'. (Global tags are defined by .hgtags across all
841 841 # heads, and local tags are defined in .hg/localtags.)
842 842 # They constitute the in-memory cache of tags.
843 843 self.tags = self.tagtypes = None
844 844
845 845 self.nodetagscache = self.tagslist = None
846 846
847 847 cache = tagscache()
848 848 cache.tags, cache.tagtypes = self._findtags()
849 849
850 850 return cache
851 851
852 852 def tags(self):
853 853 '''return a mapping of tag to node'''
854 854 t = {}
855 855 if self.changelog.filteredrevs:
856 856 tags, tt = self._findtags()
857 857 else:
858 858 tags = self._tagscache.tags
859 859 for k, v in tags.iteritems():
860 860 try:
861 861 # ignore tags to unknown nodes
862 862 self.changelog.rev(v)
863 863 t[k] = v
864 864 except (error.LookupError, ValueError):
865 865 pass
866 866 return t
867 867
868 868 def _findtags(self):
869 869 '''Do the hard work of finding tags. Return a pair of dicts
870 870 (tags, tagtypes) where tags maps tag name to node, and tagtypes
871 871 maps tag name to a string like \'global\' or \'local\'.
872 872 Subclasses or extensions are free to add their own tags, but
873 873 should be aware that the returned dicts will be retained for the
874 874 duration of the localrepo object.'''
875 875
876 876 # XXX what tagtype should subclasses/extensions use? Currently
877 877 # mq and bookmarks add tags, but do not set the tagtype at all.
878 878 # Should each extension invent its own tag type? Should there
879 879 # be one tagtype for all such "virtual" tags? Or is the status
880 880 # quo fine?
881 881
882 882
883 883 # map tag name to (node, hist)
884 884 alltags = tagsmod.findglobaltags(self.ui, self)
885 885 # map tag name to tag type
886 886 tagtypes = dict((tag, 'global') for tag in alltags)
887 887
888 888 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
889 889
890 890 # Build the return dicts. Have to re-encode tag names because
891 891 # the tags module always uses UTF-8 (in order not to lose info
892 892 # writing to the cache), but the rest of Mercurial wants them in
893 893 # local encoding.
894 894 tags = {}
895 895 for (name, (node, hist)) in alltags.iteritems():
896 896 if node != nullid:
897 897 tags[encoding.tolocal(name)] = node
898 898 tags['tip'] = self.changelog.tip()
899 899 tagtypes = dict([(encoding.tolocal(name), value)
900 900 for (name, value) in tagtypes.iteritems()])
901 901 return (tags, tagtypes)
902 902
903 903 def tagtype(self, tagname):
904 904 '''
905 905 return the type of the given tag. result can be:
906 906
907 907 'local' : a local tag
908 908 'global' : a global tag
909 909 None : tag does not exist
910 910 '''
911 911
912 912 return self._tagscache.tagtypes.get(tagname)
913 913
914 914 def tagslist(self):
915 915 '''return a list of tags ordered by revision'''
916 916 if not self._tagscache.tagslist:
917 917 l = []
918 918 for t, n in self.tags().iteritems():
919 919 l.append((self.changelog.rev(n), t, n))
920 920 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
921 921
922 922 return self._tagscache.tagslist
923 923
924 924 def nodetags(self, node):
925 925 '''return the tags associated with a node'''
926 926 if not self._tagscache.nodetagscache:
927 927 nodetagscache = {}
928 928 for t, n in self._tagscache.tags.iteritems():
929 929 nodetagscache.setdefault(n, []).append(t)
930 930 for tags in nodetagscache.itervalues():
931 931 tags.sort()
932 932 self._tagscache.nodetagscache = nodetagscache
933 933 return self._tagscache.nodetagscache.get(node, [])
934 934
935 935 def nodebookmarks(self, node):
936 936 """return the list of bookmarks pointing to the specified node"""
937 937 marks = []
938 938 for bookmark, n in self._bookmarks.iteritems():
939 939 if n == node:
940 940 marks.append(bookmark)
941 941 return sorted(marks)
942 942
943 943 def branchmap(self):
944 944 '''returns a dictionary {branch: [branchheads]} with branchheads
945 945 ordered by increasing revision number'''
946 946 branchmap.updatecache(self)
947 947 return self._branchcaches[self.filtername]
948 948
949 949 @unfilteredmethod
950 950 def revbranchcache(self):
951 951 if not self._revbranchcache:
952 952 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
953 953 return self._revbranchcache
954 954
955 955 def branchtip(self, branch, ignoremissing=False):
956 956 '''return the tip node for a given branch
957 957
958 958 If ignoremissing is True, then this method will not raise an error.
959 959 This is helpful for callers that only expect None for a missing branch
960 960 (e.g. namespace).
961 961
962 962 '''
963 963 try:
964 964 return self.branchmap().branchtip(branch)
965 965 except KeyError:
966 966 if not ignoremissing:
967 967 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
968 968 else:
969 969 pass
970 970
971 971 def lookup(self, key):
972 972 return self[key].node()
973 973
974 974 def lookupbranch(self, key, remote=None):
975 975 repo = remote or self
976 976 if key in repo.branchmap():
977 977 return key
978 978
979 979 repo = (remote and remote.local()) and remote or self
980 980 return repo[key].branch()
981 981
982 982 def known(self, nodes):
983 983 cl = self.changelog
984 984 nm = cl.nodemap
985 985 filtered = cl.filteredrevs
986 986 result = []
987 987 for n in nodes:
988 988 r = nm.get(n)
989 989 resp = not (r is None or r in filtered)
990 990 result.append(resp)
991 991 return result
992 992
993 993 def local(self):
994 994 return self
995 995
996 996 def publishing(self):
997 997 # it's safe (and desirable) to trust the publish flag unconditionally
998 998 # so that we don't finalize changes shared between users via ssh or nfs
999 999 return self.ui.configbool('phases', 'publish', untrusted=True)
1000 1000
1001 1001 def cancopy(self):
1002 1002 # so statichttprepo's override of local() works
1003 1003 if not self.local():
1004 1004 return False
1005 1005 if not self.publishing():
1006 1006 return True
1007 1007 # if publishing we can't copy if there is filtered content
1008 1008 return not self.filtered('visible').changelog.filteredrevs
1009 1009
1010 1010 def shared(self):
1011 1011 '''the type of shared repository (None if not shared)'''
1012 1012 if self.sharedpath != self.path:
1013 1013 return 'store'
1014 1014 return None
1015 1015
1016 1016 def wjoin(self, f, *insidef):
1017 1017 return self.vfs.reljoin(self.root, f, *insidef)
1018 1018
1019 1019 def file(self, f):
1020 1020 if f[0] == '/':
1021 1021 f = f[1:]
1022 1022 return filelog.filelog(self.svfs, f)
1023 1023
1024 1024 def changectx(self, changeid):
1025 1025 return self[changeid]
1026 1026
1027 1027 def setparents(self, p1, p2=nullid):
1028 1028 with self.dirstate.parentchange():
1029 1029 copies = self.dirstate.setparents(p1, p2)
1030 1030 pctx = self[p1]
1031 1031 if copies:
1032 1032 # Adjust copy records, the dirstate cannot do it, it
1033 1033 # requires access to parents manifests. Preserve them
1034 1034 # only for entries added to first parent.
1035 1035 for f in copies:
1036 1036 if f not in pctx and copies[f] in pctx:
1037 1037 self.dirstate.copy(copies[f], f)
1038 1038 if p2 == nullid:
1039 1039 for f, s in sorted(self.dirstate.copies().items()):
1040 1040 if f not in pctx and s not in pctx:
1041 1041 self.dirstate.copy(None, f)
1042 1042
1043 1043 def filectx(self, path, changeid=None, fileid=None):
1044 1044 """changeid can be a changeset revision, node, or tag.
1045 1045 fileid can be a file revision or node."""
1046 1046 return context.filectx(self, path, changeid, fileid)
1047 1047
1048 1048 def getcwd(self):
1049 1049 return self.dirstate.getcwd()
1050 1050
1051 1051 def pathto(self, f, cwd=None):
1052 1052 return self.dirstate.pathto(f, cwd)
1053 1053
1054 1054 def _loadfilter(self, filter):
1055 1055 if filter not in self.filterpats:
1056 1056 l = []
1057 1057 for pat, cmd in self.ui.configitems(filter):
1058 1058 if cmd == '!':
1059 1059 continue
1060 1060 mf = matchmod.match(self.root, '', [pat])
1061 1061 fn = None
1062 1062 params = cmd
1063 1063 for name, filterfn in self._datafilters.iteritems():
1064 1064 if cmd.startswith(name):
1065 1065 fn = filterfn
1066 1066 params = cmd[len(name):].lstrip()
1067 1067 break
1068 1068 if not fn:
1069 1069 fn = lambda s, c, **kwargs: util.filter(s, c)
1070 1070 # Wrap old filters not supporting keyword arguments
1071 1071 if not inspect.getargspec(fn)[2]:
1072 1072 oldfn = fn
1073 1073 fn = lambda s, c, **kwargs: oldfn(s, c)
1074 1074 l.append((mf, fn, params))
1075 1075 self.filterpats[filter] = l
1076 1076 return self.filterpats[filter]
1077 1077
1078 1078 def _filter(self, filterpats, filename, data):
1079 1079 for mf, fn, cmd in filterpats:
1080 1080 if mf(filename):
1081 1081 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1082 1082 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1083 1083 break
1084 1084
1085 1085 return data
1086 1086
1087 1087 @unfilteredpropertycache
1088 1088 def _encodefilterpats(self):
1089 1089 return self._loadfilter('encode')
1090 1090
1091 1091 @unfilteredpropertycache
1092 1092 def _decodefilterpats(self):
1093 1093 return self._loadfilter('decode')
1094 1094
1095 1095 def adddatafilter(self, name, filter):
1096 1096 self._datafilters[name] = filter
1097 1097
1098 1098 def wread(self, filename):
1099 1099 if self.wvfs.islink(filename):
1100 1100 data = self.wvfs.readlink(filename)
1101 1101 else:
1102 1102 data = self.wvfs.read(filename)
1103 1103 return self._filter(self._encodefilterpats, filename, data)
1104 1104
1105 1105 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1106 1106 """write ``data`` into ``filename`` in the working directory
1107 1107
1108 1108 This returns length of written (maybe decoded) data.
1109 1109 """
1110 1110 data = self._filter(self._decodefilterpats, filename, data)
1111 1111 if 'l' in flags:
1112 1112 self.wvfs.symlink(data, filename)
1113 1113 else:
1114 1114 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1115 1115 **kwargs)
1116 1116 if 'x' in flags:
1117 1117 self.wvfs.setflags(filename, False, True)
1118 1118 else:
1119 1119 self.wvfs.setflags(filename, False, False)
1120 1120 return len(data)
1121 1121
1122 1122 def wwritedata(self, filename, data):
1123 1123 return self._filter(self._decodefilterpats, filename, data)
1124 1124
1125 1125 def currenttransaction(self):
1126 1126 """return the current transaction or None if non exists"""
1127 1127 if self._transref:
1128 1128 tr = self._transref()
1129 1129 else:
1130 1130 tr = None
1131 1131
1132 1132 if tr and tr.running():
1133 1133 return tr
1134 1134 return None
1135 1135
1136 1136 def transaction(self, desc, report=None):
1137 1137 if (self.ui.configbool('devel', 'all-warnings')
1138 1138 or self.ui.configbool('devel', 'check-locks')):
1139 1139 if self._currentlock(self._lockref) is None:
1140 1140 raise error.ProgrammingError('transaction requires locking')
1141 1141 tr = self.currenttransaction()
1142 1142 if tr is not None:
1143 1143 return tr.nest()
1144 1144
1145 1145 # abort here if the journal already exists
1146 1146 if self.svfs.exists("journal"):
1147 1147 raise error.RepoError(
1148 1148 _("abandoned transaction found"),
1149 1149 hint=_("run 'hg recover' to clean up transaction"))
1150 1150
1151 1151 idbase = "%.40f#%f" % (random.random(), time.time())
1152 1152 ha = hex(hashlib.sha1(idbase).digest())
1153 1153 txnid = 'TXN:' + ha
1154 1154 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1155 1155
1156 1156 self._writejournal(desc)
1157 1157 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1158 1158 if report:
1159 1159 rp = report
1160 1160 else:
1161 1161 rp = self.ui.warn
1162 1162 vfsmap = {'plain': self.vfs} # root of .hg/
1163 1163 # we must avoid cyclic reference between repo and transaction.
1164 1164 reporef = weakref.ref(self)
1165 1165 # Code to track tag movement
1166 1166 #
1167 1167 # Since tags are all handled as file content, it is actually quite hard
1168 1168 # to track these movement from a code perspective. So we fallback to a
1169 1169 # tracking at the repository level. One could envision to track changes
1170 1170 # to the '.hgtags' file through changegroup apply but that fails to
1171 1171 # cope with case where transaction expose new heads without changegroup
1172 1172 # being involved (eg: phase movement).
1173 1173 #
1174 1174 # For now, We gate the feature behind a flag since this likely comes
1175 1175 # with performance impacts. The current code run more often than needed
1176 1176 # and do not use caches as much as it could. The current focus is on
1177 1177 # the behavior of the feature so we disable it by default. The flag
1178 1178 # will be removed when we are happy with the performance impact.
1179 1179 #
1180 1180 # Once this feature is no longer experimental move the following
1181 1181 # documentation to the appropriate help section:
1182 1182 #
1183 1183 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1184 1184 # tags (new or changed or deleted tags). In addition the details of
1185 1185 # these changes are made available in a file at:
1186 1186 # ``REPOROOT/.hg/changes/tags.changes``.
1187 1187 # Make sure you check for HG_TAG_MOVED before reading that file as it
1188 1188 # might exist from a previous transaction even if no tag were touched
1189 1189 # in this one. Changes are recorded in a line base format::
1190 1190 #
1191 1191 # <action> <hex-node> <tag-name>\n
1192 1192 #
1193 1193 # Actions are defined as follow:
1194 1194 # "-R": tag is removed,
1195 1195 # "+A": tag is added,
1196 1196 # "-M": tag is moved (old value),
1197 1197 # "+M": tag is moved (new value),
1198 1198 tracktags = lambda x: None
1199 1199 # experimental config: experimental.hook-track-tags
1200 1200 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1201 1201 if desc != 'strip' and shouldtracktags:
1202 1202 oldheads = self.changelog.headrevs()
1203 1203 def tracktags(tr2):
1204 1204 repo = reporef()
1205 1205 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1206 1206 newheads = repo.changelog.headrevs()
1207 1207 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1208 1208 # notes: we compare lists here.
1209 1209 # As we do it only once buiding set would not be cheaper
1210 1210 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1211 1211 if changes:
1212 1212 tr2.hookargs['tag_moved'] = '1'
1213 1213 with repo.vfs('changes/tags.changes', 'w',
1214 1214 atomictemp=True) as changesfile:
1215 1215 # note: we do not register the file to the transaction
1216 1216 # because we needs it to still exist on the transaction
1217 1217 # is close (for txnclose hooks)
1218 1218 tagsmod.writediff(changesfile, changes)
1219 1219 def validate(tr2):
1220 1220 """will run pre-closing hooks"""
1221 1221 # XXX the transaction API is a bit lacking here so we take a hacky
1222 1222 # path for now
1223 1223 #
1224 1224 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1225 1225 # dict is copied before these run. In addition we needs the data
1226 1226 # available to in memory hooks too.
1227 1227 #
1228 1228 # Moreover, we also need to make sure this runs before txnclose
1229 1229 # hooks and there is no "pending" mechanism that would execute
1230 1230 # logic only if hooks are about to run.
1231 1231 #
1232 1232 # Fixing this limitation of the transaction is also needed to track
1233 1233 # other families of changes (bookmarks, phases, obsolescence).
1234 1234 #
1235 1235 # This will have to be fixed before we remove the experimental
1236 1236 # gating.
1237 1237 tracktags(tr2)
1238 1238 repo = reporef()
1239 1239 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1240 1240 scmutil.enforcesinglehead(repo, tr2, desc)
1241 1241 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1242 1242 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1243 1243 args = tr.hookargs.copy()
1244 1244 args.update(bookmarks.preparehookargs(name, old, new))
1245 1245 repo.hook('pretxnclose-bookmark', throw=True,
1246 1246 txnname=desc,
1247 1247 **pycompat.strkwargs(args))
1248 1248 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1249 1249 cl = repo.unfiltered().changelog
1250 1250 for rev, (old, new) in tr.changes['phases'].items():
1251 1251 args = tr.hookargs.copy()
1252 1252 node = hex(cl.node(rev))
1253 1253 args.update(phases.preparehookargs(node, old, new))
1254 1254 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1255 1255 **pycompat.strkwargs(args))
1256 1256
1257 1257 repo.hook('pretxnclose', throw=True,
1258 1258 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1259 1259 def releasefn(tr, success):
1260 1260 repo = reporef()
1261 1261 if success:
1262 1262 # this should be explicitly invoked here, because
1263 1263 # in-memory changes aren't written out at closing
1264 1264 # transaction, if tr.addfilegenerator (via
1265 1265 # dirstate.write or so) isn't invoked while
1266 1266 # transaction running
1267 1267 repo.dirstate.write(None)
1268 1268 else:
1269 1269 # discard all changes (including ones already written
1270 1270 # out) in this transaction
1271 1271 repo.dirstate.restorebackup(None, 'journal.dirstate')
1272 1272
1273 1273 repo.invalidate(clearfilecache=True)
1274 1274
1275 1275 tr = transaction.transaction(rp, self.svfs, vfsmap,
1276 1276 "journal",
1277 1277 "undo",
1278 1278 aftertrans(renames),
1279 1279 self.store.createmode,
1280 1280 validator=validate,
1281 1281 releasefn=releasefn,
1282 1282 checkambigfiles=_cachedfiles)
1283 1283 tr.changes['revs'] = xrange(0, 0)
1284 1284 tr.changes['obsmarkers'] = set()
1285 1285 tr.changes['phases'] = {}
1286 1286 tr.changes['bookmarks'] = {}
1287 1287
1288 1288 tr.hookargs['txnid'] = txnid
1289 1289 # note: writing the fncache only during finalize mean that the file is
1290 1290 # outdated when running hooks. As fncache is used for streaming clone,
1291 1291 # this is not expected to break anything that happen during the hooks.
1292 1292 tr.addfinalize('flush-fncache', self.store.write)
1293 1293 def txnclosehook(tr2):
1294 1294 """To be run if transaction is successful, will schedule a hook run
1295 1295 """
1296 1296 # Don't reference tr2 in hook() so we don't hold a reference.
1297 1297 # This reduces memory consumption when there are multiple
1298 1298 # transactions per lock. This can likely go away if issue5045
1299 1299 # fixes the function accumulation.
1300 1300 hookargs = tr2.hookargs
1301 1301
1302 1302 def hookfunc():
1303 1303 repo = reporef()
1304 1304 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1305 1305 bmchanges = sorted(tr.changes['bookmarks'].items())
1306 1306 for name, (old, new) in bmchanges:
1307 1307 args = tr.hookargs.copy()
1308 1308 args.update(bookmarks.preparehookargs(name, old, new))
1309 1309 repo.hook('txnclose-bookmark', throw=False,
1310 1310 txnname=desc, **pycompat.strkwargs(args))
1311 1311
1312 1312 if hook.hashook(repo.ui, 'txnclose-phase'):
1313 1313 cl = repo.unfiltered().changelog
1314 1314 phasemv = sorted(tr.changes['phases'].items())
1315 1315 for rev, (old, new) in phasemv:
1316 1316 args = tr.hookargs.copy()
1317 1317 node = hex(cl.node(rev))
1318 1318 args.update(phases.preparehookargs(node, old, new))
1319 1319 repo.hook('txnclose-phase', throw=False, txnname=desc,
1320 1320 **pycompat.strkwargs(args))
1321 1321
1322 1322 repo.hook('txnclose', throw=False, txnname=desc,
1323 1323 **pycompat.strkwargs(hookargs))
1324 1324 reporef()._afterlock(hookfunc)
1325 1325 tr.addfinalize('txnclose-hook', txnclosehook)
1326 1326 # Include a leading "-" to make it happen before the transaction summary
1327 1327 # reports registered via scmutil.registersummarycallback() whose names
1328 1328 # are 00-txnreport etc. That way, the caches will be warm when the
1329 1329 # callbacks run.
1330 1330 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1331 1331 def txnaborthook(tr2):
1332 1332 """To be run if transaction is aborted
1333 1333 """
1334 1334 reporef().hook('txnabort', throw=False, txnname=desc,
1335 1335 **pycompat.strkwargs(tr2.hookargs))
1336 1336 tr.addabort('txnabort-hook', txnaborthook)
1337 1337 # avoid eager cache invalidation. in-memory data should be identical
1338 1338 # to stored data if transaction has no error.
1339 1339 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1340 1340 self._transref = weakref.ref(tr)
1341 1341 scmutil.registersummarycallback(self, tr, desc)
1342 1342 return tr
1343 1343
1344 1344 def _journalfiles(self):
1345 1345 return ((self.svfs, 'journal'),
1346 1346 (self.vfs, 'journal.dirstate'),
1347 1347 (self.vfs, 'journal.branch'),
1348 1348 (self.vfs, 'journal.desc'),
1349 1349 (self.vfs, 'journal.bookmarks'),
1350 1350 (self.svfs, 'journal.phaseroots'))
1351 1351
1352 1352 def undofiles(self):
1353 1353 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1354 1354
1355 1355 @unfilteredmethod
1356 1356 def _writejournal(self, desc):
1357 1357 self.dirstate.savebackup(None, 'journal.dirstate')
1358 1358 self.vfs.write("journal.branch",
1359 1359 encoding.fromlocal(self.dirstate.branch()))
1360 1360 self.vfs.write("journal.desc",
1361 1361 "%d\n%s\n" % (len(self), desc))
1362 1362 self.vfs.write("journal.bookmarks",
1363 1363 self.vfs.tryread("bookmarks"))
1364 1364 self.svfs.write("journal.phaseroots",
1365 1365 self.svfs.tryread("phaseroots"))
1366 1366
1367 1367 def recover(self):
1368 1368 with self.lock():
1369 1369 if self.svfs.exists("journal"):
1370 1370 self.ui.status(_("rolling back interrupted transaction\n"))
1371 1371 vfsmap = {'': self.svfs,
1372 1372 'plain': self.vfs,}
1373 1373 transaction.rollback(self.svfs, vfsmap, "journal",
1374 1374 self.ui.warn,
1375 1375 checkambigfiles=_cachedfiles)
1376 1376 self.invalidate()
1377 1377 return True
1378 1378 else:
1379 1379 self.ui.warn(_("no interrupted transaction available\n"))
1380 1380 return False
1381 1381
1382 1382 def rollback(self, dryrun=False, force=False):
1383 1383 wlock = lock = dsguard = None
1384 1384 try:
1385 1385 wlock = self.wlock()
1386 1386 lock = self.lock()
1387 1387 if self.svfs.exists("undo"):
1388 1388 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1389 1389
1390 1390 return self._rollback(dryrun, force, dsguard)
1391 1391 else:
1392 1392 self.ui.warn(_("no rollback information available\n"))
1393 1393 return 1
1394 1394 finally:
1395 1395 release(dsguard, lock, wlock)
1396 1396
1397 1397 @unfilteredmethod # Until we get smarter cache management
1398 1398 def _rollback(self, dryrun, force, dsguard):
1399 1399 ui = self.ui
1400 1400 try:
1401 1401 args = self.vfs.read('undo.desc').splitlines()
1402 1402 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1403 1403 if len(args) >= 3:
1404 1404 detail = args[2]
1405 1405 oldtip = oldlen - 1
1406 1406
1407 1407 if detail and ui.verbose:
1408 1408 msg = (_('repository tip rolled back to revision %d'
1409 1409 ' (undo %s: %s)\n')
1410 1410 % (oldtip, desc, detail))
1411 1411 else:
1412 1412 msg = (_('repository tip rolled back to revision %d'
1413 1413 ' (undo %s)\n')
1414 1414 % (oldtip, desc))
1415 1415 except IOError:
1416 1416 msg = _('rolling back unknown transaction\n')
1417 1417 desc = None
1418 1418
1419 1419 if not force and self['.'] != self['tip'] and desc == 'commit':
1420 1420 raise error.Abort(
1421 1421 _('rollback of last commit while not checked out '
1422 1422 'may lose data'), hint=_('use -f to force'))
1423 1423
1424 1424 ui.status(msg)
1425 1425 if dryrun:
1426 1426 return 0
1427 1427
1428 1428 parents = self.dirstate.parents()
1429 1429 self.destroying()
1430 1430 vfsmap = {'plain': self.vfs, '': self.svfs}
1431 1431 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1432 1432 checkambigfiles=_cachedfiles)
1433 1433 if self.vfs.exists('undo.bookmarks'):
1434 1434 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1435 1435 if self.svfs.exists('undo.phaseroots'):
1436 1436 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1437 1437 self.invalidate()
1438 1438
1439 1439 parentgone = (parents[0] not in self.changelog.nodemap or
1440 1440 parents[1] not in self.changelog.nodemap)
1441 1441 if parentgone:
1442 1442 # prevent dirstateguard from overwriting already restored one
1443 1443 dsguard.close()
1444 1444
1445 1445 self.dirstate.restorebackup(None, 'undo.dirstate')
1446 1446 try:
1447 1447 branch = self.vfs.read('undo.branch')
1448 1448 self.dirstate.setbranch(encoding.tolocal(branch))
1449 1449 except IOError:
1450 1450 ui.warn(_('named branch could not be reset: '
1451 1451 'current branch is still \'%s\'\n')
1452 1452 % self.dirstate.branch())
1453 1453
1454 1454 parents = tuple([p.rev() for p in self[None].parents()])
1455 1455 if len(parents) > 1:
1456 1456 ui.status(_('working directory now based on '
1457 1457 'revisions %d and %d\n') % parents)
1458 1458 else:
1459 1459 ui.status(_('working directory now based on '
1460 1460 'revision %d\n') % parents)
1461 1461 mergemod.mergestate.clean(self, self['.'].node())
1462 1462
1463 1463 # TODO: if we know which new heads may result from this rollback, pass
1464 1464 # them to destroy(), which will prevent the branchhead cache from being
1465 1465 # invalidated.
1466 1466 self.destroyed()
1467 1467 return 0
1468 1468
1469 1469 def _buildcacheupdater(self, newtransaction):
1470 1470 """called during transaction to build the callback updating cache
1471 1471
1472 1472 Lives on the repository to help extension who might want to augment
1473 1473 this logic. For this purpose, the created transaction is passed to the
1474 1474 method.
1475 1475 """
1476 1476 # we must avoid cyclic reference between repo and transaction.
1477 1477 reporef = weakref.ref(self)
1478 1478 def updater(tr):
1479 1479 repo = reporef()
1480 1480 repo.updatecaches(tr)
1481 1481 return updater
1482 1482
1483 1483 @unfilteredmethod
1484 1484 def updatecaches(self, tr=None):
1485 1485 """warm appropriate caches
1486 1486
1487 1487 If this function is called after a transaction closed. The transaction
1488 1488 will be available in the 'tr' argument. This can be used to selectively
1489 1489 update caches relevant to the changes in that transaction.
1490 1490 """
1491 1491 if tr is not None and tr.hookargs.get('source') == 'strip':
1492 1492 # During strip, many caches are invalid but
1493 1493 # later call to `destroyed` will refresh them.
1494 1494 return
1495 1495
1496 1496 if tr is None or tr.changes['revs']:
1497 1497 # updating the unfiltered branchmap should refresh all the others,
1498 1498 self.ui.debug('updating the branch cache\n')
1499 1499 branchmap.updatecache(self.filtered('served'))
1500 1500
1501 1501 def invalidatecaches(self):
1502 1502
1503 1503 if '_tagscache' in vars(self):
1504 1504 # can't use delattr on proxy
1505 1505 del self.__dict__['_tagscache']
1506 1506
1507 1507 self.unfiltered()._branchcaches.clear()
1508 1508 self.invalidatevolatilesets()
1509 1509 self._sparsesignaturecache.clear()
1510 1510
1511 1511 def invalidatevolatilesets(self):
1512 1512 self.filteredrevcache.clear()
1513 1513 obsolete.clearobscaches(self)
1514 1514
1515 1515 def invalidatedirstate(self):
1516 1516 '''Invalidates the dirstate, causing the next call to dirstate
1517 1517 to check if it was modified since the last time it was read,
1518 1518 rereading it if it has.
1519 1519
1520 1520 This is different to dirstate.invalidate() that it doesn't always
1521 1521 rereads the dirstate. Use dirstate.invalidate() if you want to
1522 1522 explicitly read the dirstate again (i.e. restoring it to a previous
1523 1523 known good state).'''
1524 1524 if hasunfilteredcache(self, 'dirstate'):
1525 1525 for k in self.dirstate._filecache:
1526 1526 try:
1527 1527 delattr(self.dirstate, k)
1528 1528 except AttributeError:
1529 1529 pass
1530 1530 delattr(self.unfiltered(), 'dirstate')
1531 1531
1532 1532 def invalidate(self, clearfilecache=False):
1533 1533 '''Invalidates both store and non-store parts other than dirstate
1534 1534
1535 1535 If a transaction is running, invalidation of store is omitted,
1536 1536 because discarding in-memory changes might cause inconsistency
1537 1537 (e.g. incomplete fncache causes unintentional failure, but
1538 1538 redundant one doesn't).
1539 1539 '''
1540 1540 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1541 1541 for k in list(self._filecache.keys()):
1542 1542 # dirstate is invalidated separately in invalidatedirstate()
1543 1543 if k == 'dirstate':
1544 1544 continue
1545 1545 if (k == 'changelog' and
1546 1546 self.currenttransaction() and
1547 1547 self.changelog._delayed):
1548 1548 # The changelog object may store unwritten revisions. We don't
1549 1549 # want to lose them.
1550 1550 # TODO: Solve the problem instead of working around it.
1551 1551 continue
1552 1552
1553 1553 if clearfilecache:
1554 1554 del self._filecache[k]
1555 1555 try:
1556 1556 delattr(unfiltered, k)
1557 1557 except AttributeError:
1558 1558 pass
1559 1559 self.invalidatecaches()
1560 1560 if not self.currenttransaction():
1561 1561 # TODO: Changing contents of store outside transaction
1562 1562 # causes inconsistency. We should make in-memory store
1563 1563 # changes detectable, and abort if changed.
1564 1564 self.store.invalidatecaches()
1565 1565
1566 1566 def invalidateall(self):
1567 1567 '''Fully invalidates both store and non-store parts, causing the
1568 1568 subsequent operation to reread any outside changes.'''
1569 1569 # extension should hook this to invalidate its caches
1570 1570 self.invalidate()
1571 1571 self.invalidatedirstate()
1572 1572
1573 1573 @unfilteredmethod
1574 1574 def _refreshfilecachestats(self, tr):
1575 1575 """Reload stats of cached files so that they are flagged as valid"""
1576 1576 for k, ce in self._filecache.items():
1577 1577 k = pycompat.sysstr(k)
1578 1578 if k == r'dirstate' or k not in self.__dict__:
1579 1579 continue
1580 1580 ce.refresh()
1581 1581
1582 1582 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1583 1583 inheritchecker=None, parentenvvar=None):
1584 1584 parentlock = None
1585 1585 # the contents of parentenvvar are used by the underlying lock to
1586 1586 # determine whether it can be inherited
1587 1587 if parentenvvar is not None:
1588 1588 parentlock = encoding.environ.get(parentenvvar)
1589 1589
1590 1590 timeout = 0
1591 1591 warntimeout = 0
1592 1592 if wait:
1593 1593 timeout = self.ui.configint("ui", "timeout")
1594 1594 warntimeout = self.ui.configint("ui", "timeout.warn")
1595 1595
1596 1596 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1597 1597 releasefn=releasefn,
1598 1598 acquirefn=acquirefn, desc=desc,
1599 1599 inheritchecker=inheritchecker,
1600 1600 parentlock=parentlock)
1601 1601 return l
1602 1602
1603 1603 def _afterlock(self, callback):
1604 1604 """add a callback to be run when the repository is fully unlocked
1605 1605
1606 1606 The callback will be executed when the outermost lock is released
1607 1607 (with wlock being higher level than 'lock')."""
1608 1608 for ref in (self._wlockref, self._lockref):
1609 1609 l = ref and ref()
1610 1610 if l and l.held:
1611 1611 l.postrelease.append(callback)
1612 1612 break
1613 1613 else: # no lock have been found.
1614 1614 callback()
1615 1615
1616 1616 def lock(self, wait=True):
1617 1617 '''Lock the repository store (.hg/store) and return a weak reference
1618 1618 to the lock. Use this before modifying the store (e.g. committing or
1619 1619 stripping). If you are opening a transaction, get a lock as well.)
1620 1620
1621 1621 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1622 1622 'wlock' first to avoid a dead-lock hazard.'''
1623 1623 l = self._currentlock(self._lockref)
1624 1624 if l is not None:
1625 1625 l.lock()
1626 1626 return l
1627 1627
1628 1628 l = self._lock(self.svfs, "lock", wait, None,
1629 1629 self.invalidate, _('repository %s') % self.origroot)
1630 1630 self._lockref = weakref.ref(l)
1631 1631 return l
1632 1632
1633 1633 def _wlockchecktransaction(self):
1634 1634 if self.currenttransaction() is not None:
1635 1635 raise error.LockInheritanceContractViolation(
1636 1636 'wlock cannot be inherited in the middle of a transaction')
1637 1637
1638 1638 def wlock(self, wait=True):
1639 1639 '''Lock the non-store parts of the repository (everything under
1640 1640 .hg except .hg/store) and return a weak reference to the lock.
1641 1641
1642 1642 Use this before modifying files in .hg.
1643 1643
1644 1644 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1645 1645 'wlock' first to avoid a dead-lock hazard.'''
1646 1646 l = self._wlockref and self._wlockref()
1647 1647 if l is not None and l.held:
1648 1648 l.lock()
1649 1649 return l
1650 1650
1651 1651 # We do not need to check for non-waiting lock acquisition. Such
1652 1652 # acquisition would not cause dead-lock as they would just fail.
1653 1653 if wait and (self.ui.configbool('devel', 'all-warnings')
1654 1654 or self.ui.configbool('devel', 'check-locks')):
1655 1655 if self._currentlock(self._lockref) is not None:
1656 1656 self.ui.develwarn('"wlock" acquired after "lock"')
1657 1657
1658 1658 def unlock():
1659 1659 if self.dirstate.pendingparentchange():
1660 1660 self.dirstate.invalidate()
1661 1661 else:
1662 1662 self.dirstate.write(None)
1663 1663
1664 1664 self._filecache['dirstate'].refresh()
1665 1665
1666 1666 l = self._lock(self.vfs, "wlock", wait, unlock,
1667 1667 self.invalidatedirstate, _('working directory of %s') %
1668 1668 self.origroot,
1669 1669 inheritchecker=self._wlockchecktransaction,
1670 1670 parentenvvar='HG_WLOCK_LOCKER')
1671 1671 self._wlockref = weakref.ref(l)
1672 1672 return l
1673 1673
1674 1674 def _currentlock(self, lockref):
1675 1675 """Returns the lock if it's held, or None if it's not."""
1676 1676 if lockref is None:
1677 1677 return None
1678 1678 l = lockref()
1679 1679 if l is None or not l.held:
1680 1680 return None
1681 1681 return l
1682 1682
1683 1683 def currentwlock(self):
1684 1684 """Returns the wlock if it's held, or None if it's not."""
1685 1685 return self._currentlock(self._wlockref)
1686 1686
1687 1687 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1688 1688 """
1689 1689 commit an individual file as part of a larger transaction
1690 1690 """
1691 1691
1692 1692 fname = fctx.path()
1693 1693 fparent1 = manifest1.get(fname, nullid)
1694 1694 fparent2 = manifest2.get(fname, nullid)
1695 1695 if isinstance(fctx, context.filectx):
1696 1696 node = fctx.filenode()
1697 1697 if node in [fparent1, fparent2]:
1698 1698 self.ui.debug('reusing %s filelog entry\n' % fname)
1699 1699 if manifest1.flags(fname) != fctx.flags():
1700 1700 changelist.append(fname)
1701 1701 return node
1702 1702
1703 1703 flog = self.file(fname)
1704 1704 meta = {}
1705 1705 copy = fctx.renamed()
1706 1706 if copy and copy[0] != fname:
1707 1707 # Mark the new revision of this file as a copy of another
1708 1708 # file. This copy data will effectively act as a parent
1709 1709 # of this new revision. If this is a merge, the first
1710 1710 # parent will be the nullid (meaning "look up the copy data")
1711 1711 # and the second one will be the other parent. For example:
1712 1712 #
1713 1713 # 0 --- 1 --- 3 rev1 changes file foo
1714 1714 # \ / rev2 renames foo to bar and changes it
1715 1715 # \- 2 -/ rev3 should have bar with all changes and
1716 1716 # should record that bar descends from
1717 1717 # bar in rev2 and foo in rev1
1718 1718 #
1719 1719 # this allows this merge to succeed:
1720 1720 #
1721 1721 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1722 1722 # \ / merging rev3 and rev4 should use bar@rev2
1723 1723 # \- 2 --- 4 as the merge base
1724 1724 #
1725 1725
1726 1726 cfname = copy[0]
1727 1727 crev = manifest1.get(cfname)
1728 1728 newfparent = fparent2
1729 1729
1730 1730 if manifest2: # branch merge
1731 1731 if fparent2 == nullid or crev is None: # copied on remote side
1732 1732 if cfname in manifest2:
1733 1733 crev = manifest2[cfname]
1734 1734 newfparent = fparent1
1735 1735
1736 1736 # Here, we used to search backwards through history to try to find
1737 1737 # where the file copy came from if the source of a copy was not in
1738 1738 # the parent directory. However, this doesn't actually make sense to
1739 1739 # do (what does a copy from something not in your working copy even
1740 1740 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1741 1741 # the user that copy information was dropped, so if they didn't
1742 1742 # expect this outcome it can be fixed, but this is the correct
1743 1743 # behavior in this circumstance.
1744 1744
1745 1745 if crev:
1746 1746 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1747 1747 meta["copy"] = cfname
1748 1748 meta["copyrev"] = hex(crev)
1749 1749 fparent1, fparent2 = nullid, newfparent
1750 1750 else:
1751 1751 self.ui.warn(_("warning: can't find ancestor for '%s' "
1752 1752 "copied from '%s'!\n") % (fname, cfname))
1753 1753
1754 1754 elif fparent1 == nullid:
1755 1755 fparent1, fparent2 = fparent2, nullid
1756 1756 elif fparent2 != nullid:
1757 1757 # is one parent an ancestor of the other?
1758 1758 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1759 1759 if fparent1 in fparentancestors:
1760 1760 fparent1, fparent2 = fparent2, nullid
1761 1761 elif fparent2 in fparentancestors:
1762 1762 fparent2 = nullid
1763 1763
1764 1764 # is the file changed?
1765 1765 text = fctx.data()
1766 1766 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1767 1767 changelist.append(fname)
1768 1768 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1769 1769 # are just the flags changed during merge?
1770 1770 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1771 1771 changelist.append(fname)
1772 1772
1773 1773 return fparent1
1774 1774
1775 1775 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1776 1776 """check for commit arguments that aren't committable"""
1777 1777 if match.isexact() or match.prefix():
1778 1778 matched = set(status.modified + status.added + status.removed)
1779 1779
1780 1780 for f in match.files():
1781 1781 f = self.dirstate.normalize(f)
1782 1782 if f == '.' or f in matched or f in wctx.substate:
1783 1783 continue
1784 1784 if f in status.deleted:
1785 1785 fail(f, _('file not found!'))
1786 1786 if f in vdirs: # visited directory
1787 1787 d = f + '/'
1788 1788 for mf in matched:
1789 1789 if mf.startswith(d):
1790 1790 break
1791 1791 else:
1792 1792 fail(f, _("no match under directory!"))
1793 1793 elif f not in self.dirstate:
1794 1794 fail(f, _("file not tracked!"))
1795 1795
1796 1796 @unfilteredmethod
1797 1797 def commit(self, text="", user=None, date=None, match=None, force=False,
1798 1798 editor=False, extra=None):
1799 1799 """Add a new revision to current repository.
1800 1800
1801 1801 Revision information is gathered from the working directory,
1802 1802 match can be used to filter the committed files. If editor is
1803 1803 supplied, it is called to get a commit message.
1804 1804 """
1805 1805 if extra is None:
1806 1806 extra = {}
1807 1807
1808 1808 def fail(f, msg):
1809 1809 raise error.Abort('%s: %s' % (f, msg))
1810 1810
1811 1811 if not match:
1812 1812 match = matchmod.always(self.root, '')
1813 1813
1814 1814 if not force:
1815 1815 vdirs = []
1816 1816 match.explicitdir = vdirs.append
1817 1817 match.bad = fail
1818 1818
1819 1819 wlock = lock = tr = None
1820 1820 try:
1821 1821 wlock = self.wlock()
1822 1822 lock = self.lock() # for recent changelog (see issue4368)
1823 1823
1824 1824 wctx = self[None]
1825 1825 merge = len(wctx.parents()) > 1
1826 1826
1827 1827 if not force and merge and not match.always():
1828 1828 raise error.Abort(_('cannot partially commit a merge '
1829 1829 '(do not specify files or patterns)'))
1830 1830
1831 1831 status = self.status(match=match, clean=force)
1832 1832 if force:
1833 1833 status.modified.extend(status.clean) # mq may commit clean files
1834 1834
1835 1835 # check subrepos
1836 subs, commitsubs, newstate = subrepo.precommit(
1836 subs, commitsubs, newstate = subrepoutil.precommit(
1837 1837 self.ui, wctx, status, match, force=force)
1838 1838
1839 1839 # make sure all explicit patterns are matched
1840 1840 if not force:
1841 1841 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1842 1842
1843 1843 cctx = context.workingcommitctx(self, status,
1844 1844 text, user, date, extra)
1845 1845
1846 1846 # internal config: ui.allowemptycommit
1847 1847 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1848 1848 or extra.get('close') or merge or cctx.files()
1849 1849 or self.ui.configbool('ui', 'allowemptycommit'))
1850 1850 if not allowemptycommit:
1851 1851 return None
1852 1852
1853 1853 if merge and cctx.deleted():
1854 1854 raise error.Abort(_("cannot commit merge with missing files"))
1855 1855
1856 1856 ms = mergemod.mergestate.read(self)
1857 1857 mergeutil.checkunresolved(ms)
1858 1858
1859 1859 if editor:
1860 1860 cctx._text = editor(self, cctx, subs)
1861 1861 edited = (text != cctx._text)
1862 1862
1863 1863 # Save commit message in case this transaction gets rolled back
1864 1864 # (e.g. by a pretxncommit hook). Leave the content alone on
1865 1865 # the assumption that the user will use the same editor again.
1866 1866 msgfn = self.savecommitmessage(cctx._text)
1867 1867
1868 1868 # commit subs and write new state
1869 1869 if subs:
1870 1870 for s in sorted(commitsubs):
1871 1871 sub = wctx.sub(s)
1872 1872 self.ui.status(_('committing subrepository %s\n') %
1873 subrepo.subrelpath(sub))
1873 subrepoutil.subrelpath(sub))
1874 1874 sr = sub.commit(cctx._text, user, date)
1875 1875 newstate[s] = (newstate[s][0], sr)
1876 subrepo.writestate(self, newstate)
1876 subrepoutil.writestate(self, newstate)
1877 1877
1878 1878 p1, p2 = self.dirstate.parents()
1879 1879 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1880 1880 try:
1881 1881 self.hook("precommit", throw=True, parent1=hookp1,
1882 1882 parent2=hookp2)
1883 1883 tr = self.transaction('commit')
1884 1884 ret = self.commitctx(cctx, True)
1885 1885 except: # re-raises
1886 1886 if edited:
1887 1887 self.ui.write(
1888 1888 _('note: commit message saved in %s\n') % msgfn)
1889 1889 raise
1890 1890 # update bookmarks, dirstate and mergestate
1891 1891 bookmarks.update(self, [p1, p2], ret)
1892 1892 cctx.markcommitted(ret)
1893 1893 ms.reset()
1894 1894 tr.close()
1895 1895
1896 1896 finally:
1897 1897 lockmod.release(tr, lock, wlock)
1898 1898
1899 1899 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1900 1900 # hack for command that use a temporary commit (eg: histedit)
1901 1901 # temporary commit got stripped before hook release
1902 1902 if self.changelog.hasnode(ret):
1903 1903 self.hook("commit", node=node, parent1=parent1,
1904 1904 parent2=parent2)
1905 1905 self._afterlock(commithook)
1906 1906 return ret
1907 1907
1908 1908 @unfilteredmethod
1909 1909 def commitctx(self, ctx, error=False):
1910 1910 """Add a new revision to current repository.
1911 1911 Revision information is passed via the context argument.
1912 1912 """
1913 1913
1914 1914 tr = None
1915 1915 p1, p2 = ctx.p1(), ctx.p2()
1916 1916 user = ctx.user()
1917 1917
1918 1918 lock = self.lock()
1919 1919 try:
1920 1920 tr = self.transaction("commit")
1921 1921 trp = weakref.proxy(tr)
1922 1922
1923 1923 if ctx.manifestnode():
1924 1924 # reuse an existing manifest revision
1925 1925 mn = ctx.manifestnode()
1926 1926 files = ctx.files()
1927 1927 elif ctx.files():
1928 1928 m1ctx = p1.manifestctx()
1929 1929 m2ctx = p2.manifestctx()
1930 1930 mctx = m1ctx.copy()
1931 1931
1932 1932 m = mctx.read()
1933 1933 m1 = m1ctx.read()
1934 1934 m2 = m2ctx.read()
1935 1935
1936 1936 # check in files
1937 1937 added = []
1938 1938 changed = []
1939 1939 removed = list(ctx.removed())
1940 1940 linkrev = len(self)
1941 1941 self.ui.note(_("committing files:\n"))
1942 1942 for f in sorted(ctx.modified() + ctx.added()):
1943 1943 self.ui.note(f + "\n")
1944 1944 try:
1945 1945 fctx = ctx[f]
1946 1946 if fctx is None:
1947 1947 removed.append(f)
1948 1948 else:
1949 1949 added.append(f)
1950 1950 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1951 1951 trp, changed)
1952 1952 m.setflag(f, fctx.flags())
1953 1953 except OSError as inst:
1954 1954 self.ui.warn(_("trouble committing %s!\n") % f)
1955 1955 raise
1956 1956 except IOError as inst:
1957 1957 errcode = getattr(inst, 'errno', errno.ENOENT)
1958 1958 if error or errcode and errcode != errno.ENOENT:
1959 1959 self.ui.warn(_("trouble committing %s!\n") % f)
1960 1960 raise
1961 1961
1962 1962 # update manifest
1963 1963 self.ui.note(_("committing manifest\n"))
1964 1964 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1965 1965 drop = [f for f in removed if f in m]
1966 1966 for f in drop:
1967 1967 del m[f]
1968 1968 mn = mctx.write(trp, linkrev,
1969 1969 p1.manifestnode(), p2.manifestnode(),
1970 1970 added, drop)
1971 1971 files = changed + removed
1972 1972 else:
1973 1973 mn = p1.manifestnode()
1974 1974 files = []
1975 1975
1976 1976 # update changelog
1977 1977 self.ui.note(_("committing changelog\n"))
1978 1978 self.changelog.delayupdate(tr)
1979 1979 n = self.changelog.add(mn, files, ctx.description(),
1980 1980 trp, p1.node(), p2.node(),
1981 1981 user, ctx.date(), ctx.extra().copy())
1982 1982 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1983 1983 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1984 1984 parent2=xp2)
1985 1985 # set the new commit is proper phase
1986 targetphase = subrepo.newcommitphase(self.ui, ctx)
1986 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
1987 1987 if targetphase:
1988 1988 # retract boundary do not alter parent changeset.
1989 1989 # if a parent have higher the resulting phase will
1990 1990 # be compliant anyway
1991 1991 #
1992 1992 # if minimal phase was 0 we don't need to retract anything
1993 1993 phases.registernew(self, tr, targetphase, [n])
1994 1994 tr.close()
1995 1995 return n
1996 1996 finally:
1997 1997 if tr:
1998 1998 tr.release()
1999 1999 lock.release()
2000 2000
2001 2001 @unfilteredmethod
2002 2002 def destroying(self):
2003 2003 '''Inform the repository that nodes are about to be destroyed.
2004 2004 Intended for use by strip and rollback, so there's a common
2005 2005 place for anything that has to be done before destroying history.
2006 2006
2007 2007 This is mostly useful for saving state that is in memory and waiting
2008 2008 to be flushed when the current lock is released. Because a call to
2009 2009 destroyed is imminent, the repo will be invalidated causing those
2010 2010 changes to stay in memory (waiting for the next unlock), or vanish
2011 2011 completely.
2012 2012 '''
2013 2013 # When using the same lock to commit and strip, the phasecache is left
2014 2014 # dirty after committing. Then when we strip, the repo is invalidated,
2015 2015 # causing those changes to disappear.
2016 2016 if '_phasecache' in vars(self):
2017 2017 self._phasecache.write()
2018 2018
2019 2019 @unfilteredmethod
2020 2020 def destroyed(self):
2021 2021 '''Inform the repository that nodes have been destroyed.
2022 2022 Intended for use by strip and rollback, so there's a common
2023 2023 place for anything that has to be done after destroying history.
2024 2024 '''
2025 2025 # When one tries to:
2026 2026 # 1) destroy nodes thus calling this method (e.g. strip)
2027 2027 # 2) use phasecache somewhere (e.g. commit)
2028 2028 #
2029 2029 # then 2) will fail because the phasecache contains nodes that were
2030 2030 # removed. We can either remove phasecache from the filecache,
2031 2031 # causing it to reload next time it is accessed, or simply filter
2032 2032 # the removed nodes now and write the updated cache.
2033 2033 self._phasecache.filterunknown(self)
2034 2034 self._phasecache.write()
2035 2035
2036 2036 # refresh all repository caches
2037 2037 self.updatecaches()
2038 2038
2039 2039 # Ensure the persistent tag cache is updated. Doing it now
2040 2040 # means that the tag cache only has to worry about destroyed
2041 2041 # heads immediately after a strip/rollback. That in turn
2042 2042 # guarantees that "cachetip == currenttip" (comparing both rev
2043 2043 # and node) always means no nodes have been added or destroyed.
2044 2044
2045 2045 # XXX this is suboptimal when qrefresh'ing: we strip the current
2046 2046 # head, refresh the tag cache, then immediately add a new head.
2047 2047 # But I think doing it this way is necessary for the "instant
2048 2048 # tag cache retrieval" case to work.
2049 2049 self.invalidate()
2050 2050
2051 2051 def status(self, node1='.', node2=None, match=None,
2052 2052 ignored=False, clean=False, unknown=False,
2053 2053 listsubrepos=False):
2054 2054 '''a convenience method that calls node1.status(node2)'''
2055 2055 return self[node1].status(node2, match, ignored, clean, unknown,
2056 2056 listsubrepos)
2057 2057
2058 2058 def addpostdsstatus(self, ps):
2059 2059 """Add a callback to run within the wlock, at the point at which status
2060 2060 fixups happen.
2061 2061
2062 2062 On status completion, callback(wctx, status) will be called with the
2063 2063 wlock held, unless the dirstate has changed from underneath or the wlock
2064 2064 couldn't be grabbed.
2065 2065
2066 2066 Callbacks should not capture and use a cached copy of the dirstate --
2067 2067 it might change in the meanwhile. Instead, they should access the
2068 2068 dirstate via wctx.repo().dirstate.
2069 2069
2070 2070 This list is emptied out after each status run -- extensions should
2071 2071 make sure it adds to this list each time dirstate.status is called.
2072 2072 Extensions should also make sure they don't call this for statuses
2073 2073 that don't involve the dirstate.
2074 2074 """
2075 2075
2076 2076 # The list is located here for uniqueness reasons -- it is actually
2077 2077 # managed by the workingctx, but that isn't unique per-repo.
2078 2078 self._postdsstatus.append(ps)
2079 2079
2080 2080 def postdsstatus(self):
2081 2081 """Used by workingctx to get the list of post-dirstate-status hooks."""
2082 2082 return self._postdsstatus
2083 2083
2084 2084 def clearpostdsstatus(self):
2085 2085 """Used by workingctx to clear post-dirstate-status hooks."""
2086 2086 del self._postdsstatus[:]
2087 2087
2088 2088 def heads(self, start=None):
2089 2089 if start is None:
2090 2090 cl = self.changelog
2091 2091 headrevs = reversed(cl.headrevs())
2092 2092 return [cl.node(rev) for rev in headrevs]
2093 2093
2094 2094 heads = self.changelog.heads(start)
2095 2095 # sort the output in rev descending order
2096 2096 return sorted(heads, key=self.changelog.rev, reverse=True)
2097 2097
2098 2098 def branchheads(self, branch=None, start=None, closed=False):
2099 2099 '''return a (possibly filtered) list of heads for the given branch
2100 2100
2101 2101 Heads are returned in topological order, from newest to oldest.
2102 2102 If branch is None, use the dirstate branch.
2103 2103 If start is not None, return only heads reachable from start.
2104 2104 If closed is True, return heads that are marked as closed as well.
2105 2105 '''
2106 2106 if branch is None:
2107 2107 branch = self[None].branch()
2108 2108 branches = self.branchmap()
2109 2109 if branch not in branches:
2110 2110 return []
2111 2111 # the cache returns heads ordered lowest to highest
2112 2112 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2113 2113 if start is not None:
2114 2114 # filter out the heads that cannot be reached from startrev
2115 2115 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2116 2116 bheads = [h for h in bheads if h in fbheads]
2117 2117 return bheads
2118 2118
2119 2119 def branches(self, nodes):
2120 2120 if not nodes:
2121 2121 nodes = [self.changelog.tip()]
2122 2122 b = []
2123 2123 for n in nodes:
2124 2124 t = n
2125 2125 while True:
2126 2126 p = self.changelog.parents(n)
2127 2127 if p[1] != nullid or p[0] == nullid:
2128 2128 b.append((t, n, p[0], p[1]))
2129 2129 break
2130 2130 n = p[0]
2131 2131 return b
2132 2132
2133 2133 def between(self, pairs):
2134 2134 r = []
2135 2135
2136 2136 for top, bottom in pairs:
2137 2137 n, l, i = top, [], 0
2138 2138 f = 1
2139 2139
2140 2140 while n != bottom and n != nullid:
2141 2141 p = self.changelog.parents(n)[0]
2142 2142 if i == f:
2143 2143 l.append(n)
2144 2144 f = f * 2
2145 2145 n = p
2146 2146 i += 1
2147 2147
2148 2148 r.append(l)
2149 2149
2150 2150 return r
2151 2151
2152 2152 def checkpush(self, pushop):
2153 2153 """Extensions can override this function if additional checks have
2154 2154 to be performed before pushing, or call it if they override push
2155 2155 command.
2156 2156 """
2157 2157
2158 2158 @unfilteredpropertycache
2159 2159 def prepushoutgoinghooks(self):
2160 2160 """Return util.hooks consists of a pushop with repo, remote, outgoing
2161 2161 methods, which are called before pushing changesets.
2162 2162 """
2163 2163 return util.hooks()
2164 2164
2165 2165 def pushkey(self, namespace, key, old, new):
2166 2166 try:
2167 2167 tr = self.currenttransaction()
2168 2168 hookargs = {}
2169 2169 if tr is not None:
2170 2170 hookargs.update(tr.hookargs)
2171 2171 hookargs['namespace'] = namespace
2172 2172 hookargs['key'] = key
2173 2173 hookargs['old'] = old
2174 2174 hookargs['new'] = new
2175 2175 self.hook('prepushkey', throw=True, **hookargs)
2176 2176 except error.HookAbort as exc:
2177 2177 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2178 2178 if exc.hint:
2179 2179 self.ui.write_err(_("(%s)\n") % exc.hint)
2180 2180 return False
2181 2181 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2182 2182 ret = pushkey.push(self, namespace, key, old, new)
2183 2183 def runhook():
2184 2184 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2185 2185 ret=ret)
2186 2186 self._afterlock(runhook)
2187 2187 return ret
2188 2188
2189 2189 def listkeys(self, namespace):
2190 2190 self.hook('prelistkeys', throw=True, namespace=namespace)
2191 2191 self.ui.debug('listing keys for "%s"\n' % namespace)
2192 2192 values = pushkey.list(self, namespace)
2193 2193 self.hook('listkeys', namespace=namespace, values=values)
2194 2194 return values
2195 2195
2196 2196 def debugwireargs(self, one, two, three=None, four=None, five=None):
2197 2197 '''used to test argument passing over the wire'''
2198 2198 return "%s %s %s %s %s" % (one, two, three, four, five)
2199 2199
2200 2200 def savecommitmessage(self, text):
2201 2201 fp = self.vfs('last-message.txt', 'wb')
2202 2202 try:
2203 2203 fp.write(text)
2204 2204 finally:
2205 2205 fp.close()
2206 2206 return self.pathto(fp.name[len(self.root) + 1:])
2207 2207
2208 2208 # used to avoid circular references so destructors work
2209 2209 def aftertrans(files):
2210 2210 renamefiles = [tuple(t) for t in files]
2211 2211 def a():
2212 2212 for vfs, src, dest in renamefiles:
2213 2213 # if src and dest refer to a same file, vfs.rename is a no-op,
2214 2214 # leaving both src and dest on disk. delete dest to make sure
2215 2215 # the rename couldn't be such a no-op.
2216 2216 vfs.tryunlink(dest)
2217 2217 try:
2218 2218 vfs.rename(src, dest)
2219 2219 except OSError: # journal file does not yet exist
2220 2220 pass
2221 2221 return a
2222 2222
2223 2223 def undoname(fn):
2224 2224 base, name = os.path.split(fn)
2225 2225 assert name.startswith('journal')
2226 2226 return os.path.join(base, name.replace('journal', 'undo', 1))
2227 2227
2228 2228 def instance(ui, path, create):
2229 2229 return localrepository(ui, util.urllocalpath(path), create)
2230 2230
2231 2231 def islocal(path):
2232 2232 return True
2233 2233
2234 2234 def newreporequirements(repo):
2235 2235 """Determine the set of requirements for a new local repository.
2236 2236
2237 2237 Extensions can wrap this function to specify custom requirements for
2238 2238 new repositories.
2239 2239 """
2240 2240 ui = repo.ui
2241 2241 requirements = {'revlogv1'}
2242 2242 if ui.configbool('format', 'usestore'):
2243 2243 requirements.add('store')
2244 2244 if ui.configbool('format', 'usefncache'):
2245 2245 requirements.add('fncache')
2246 2246 if ui.configbool('format', 'dotencode'):
2247 2247 requirements.add('dotencode')
2248 2248
2249 2249 compengine = ui.config('experimental', 'format.compression')
2250 2250 if compengine not in util.compengines:
2251 2251 raise error.Abort(_('compression engine %s defined by '
2252 2252 'experimental.format.compression not available') %
2253 2253 compengine,
2254 2254 hint=_('run "hg debuginstall" to list available '
2255 2255 'compression engines'))
2256 2256
2257 2257 # zlib is the historical default and doesn't need an explicit requirement.
2258 2258 if compengine != 'zlib':
2259 2259 requirements.add('exp-compression-%s' % compengine)
2260 2260
2261 2261 if scmutil.gdinitconfig(ui):
2262 2262 requirements.add('generaldelta')
2263 2263 if ui.configbool('experimental', 'treemanifest'):
2264 2264 requirements.add('treemanifest')
2265 2265 if ui.configbool('experimental', 'manifestv2'):
2266 2266 requirements.add('manifestv2')
2267 2267
2268 2268 revlogv2 = ui.config('experimental', 'revlogv2')
2269 2269 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2270 2270 requirements.remove('revlogv1')
2271 2271 # generaldelta is implied by revlogv2.
2272 2272 requirements.discard('generaldelta')
2273 2273 requirements.add(REVLOGV2_REQUIREMENT)
2274 2274
2275 2275 return requirements
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: file copied from mercurial/subrepo.py to mercurial/subrepoutil.py
The requested commit or file is too big and content was truncated. Show full diff
General Comments 0
You need to be logged in to leave comments. Login now