##// END OF EJS Templates
py3: invalidate repository cache with system-string keys...
Yuya Nishihara -
r40396:dee73a97 default
parent child Browse files
Show More
@@ -1,3699 +1,3699 b''
1 1 # mq.py - patch queues for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''manage a stack of patches
9 9
10 10 This extension lets you work with a stack of patches in a Mercurial
11 11 repository. It manages two stacks of patches - all known patches, and
12 12 applied patches (subset of known patches).
13 13
14 14 Known patches are represented as patch files in the .hg/patches
15 15 directory. Applied patches are both patch files and changesets.
16 16
17 17 Common tasks (use :hg:`help COMMAND` for more details)::
18 18
19 19 create new patch qnew
20 20 import existing patch qimport
21 21
22 22 print patch series qseries
23 23 print applied patches qapplied
24 24
25 25 add known patch to applied stack qpush
26 26 remove patch from applied stack qpop
27 27 refresh contents of top applied patch qrefresh
28 28
29 29 By default, mq will automatically use git patches when required to
30 30 avoid losing file mode changes, copy records, binary files or empty
31 31 files creations or deletions. This behavior can be configured with::
32 32
33 33 [mq]
34 34 git = auto/keep/yes/no
35 35
36 36 If set to 'keep', mq will obey the [diff] section configuration while
37 37 preserving existing git patches upon qrefresh. If set to 'yes' or
38 38 'no', mq will override the [diff] section and always generate git or
39 39 regular patches, possibly losing data in the second case.
40 40
41 41 It may be desirable for mq changesets to be kept in the secret phase (see
42 42 :hg:`help phases`), which can be enabled with the following setting::
43 43
44 44 [mq]
45 45 secret = True
46 46
47 47 You will by default be managing a patch queue named "patches". You can
48 48 create other, independent patch queues with the :hg:`qqueue` command.
49 49
50 50 If the working directory contains uncommitted files, qpush, qpop and
51 51 qgoto abort immediately. If -f/--force is used, the changes are
52 52 discarded. Setting::
53 53
54 54 [mq]
55 55 keepchanges = True
56 56
57 57 make them behave as if --keep-changes were passed, and non-conflicting
58 58 local changes will be tolerated and preserved. If incompatible options
59 59 such as -f/--force or --exact are passed, this setting is ignored.
60 60
61 61 This extension used to provide a strip command. This command now lives
62 62 in the strip extension.
63 63 '''
64 64
65 65 from __future__ import absolute_import, print_function
66 66
67 67 import errno
68 68 import os
69 69 import re
70 70 import shutil
71 71 from mercurial.i18n import _
72 72 from mercurial.node import (
73 73 bin,
74 74 hex,
75 75 nullid,
76 76 nullrev,
77 77 short,
78 78 )
79 79 from mercurial import (
80 80 cmdutil,
81 81 commands,
82 82 dirstateguard,
83 83 encoding,
84 84 error,
85 85 extensions,
86 86 hg,
87 87 localrepo,
88 88 lock as lockmod,
89 89 logcmdutil,
90 90 patch as patchmod,
91 91 phases,
92 92 pycompat,
93 93 registrar,
94 94 revsetlang,
95 95 scmutil,
96 96 smartset,
97 97 subrepoutil,
98 98 util,
99 99 vfs as vfsmod,
100 100 )
101 101 from mercurial.utils import (
102 102 dateutil,
103 103 stringutil,
104 104 )
105 105
106 106 release = lockmod.release
107 107 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
108 108
109 109 cmdtable = {}
110 110 command = registrar.command(cmdtable)
111 111 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
112 112 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
113 113 # be specifying the version(s) of Mercurial they are tested with, or
114 114 # leave the attribute unspecified.
115 115 testedwith = 'ships-with-hg-core'
116 116
117 117 configtable = {}
118 118 configitem = registrar.configitem(configtable)
119 119
120 120 configitem('mq', 'git',
121 121 default='auto',
122 122 )
123 123 configitem('mq', 'keepchanges',
124 124 default=False,
125 125 )
126 126 configitem('mq', 'plain',
127 127 default=False,
128 128 )
129 129 configitem('mq', 'secret',
130 130 default=False,
131 131 )
132 132
133 133 # force load strip extension formerly included in mq and import some utility
134 134 try:
135 135 stripext = extensions.find('strip')
136 136 except KeyError:
137 137 # note: load is lazy so we could avoid the try-except,
138 138 # but I (marmoute) prefer this explicit code.
139 139 class dummyui(object):
140 140 def debug(self, msg):
141 141 pass
142 142 stripext = extensions.load(dummyui(), 'strip', '')
143 143
144 144 strip = stripext.strip
145 145 checksubstate = stripext.checksubstate
146 146 checklocalchanges = stripext.checklocalchanges
147 147
148 148
149 149 # Patch names looks like unix-file names.
150 150 # They must be joinable with queue directory and result in the patch path.
151 151 normname = util.normpath
152 152
153 153 class statusentry(object):
154 154 def __init__(self, node, name):
155 155 self.node, self.name = node, name
156 156
157 157 def __bytes__(self):
158 158 return hex(self.node) + ':' + self.name
159 159
160 160 __str__ = encoding.strmethod(__bytes__)
161 161 __repr__ = encoding.strmethod(__bytes__)
162 162
163 163 # The order of the headers in 'hg export' HG patches:
164 164 HGHEADERS = [
165 165 # '# HG changeset patch',
166 166 '# User ',
167 167 '# Date ',
168 168 '# ',
169 169 '# Branch ',
170 170 '# Node ID ',
171 171 '# Parent ', # can occur twice for merges - but that is not relevant for mq
172 172 ]
173 173 # The order of headers in plain 'mail style' patches:
174 174 PLAINHEADERS = {
175 175 'from': 0,
176 176 'date': 1,
177 177 'subject': 2,
178 178 }
179 179
180 180 def inserthgheader(lines, header, value):
181 181 """Assuming lines contains a HG patch header, add a header line with value.
182 182 >>> try: inserthgheader([], b'# Date ', b'z')
183 183 ... except ValueError as inst: print("oops")
184 184 oops
185 185 >>> inserthgheader([b'# HG changeset patch'], b'# Date ', b'z')
186 186 ['# HG changeset patch', '# Date z']
187 187 >>> inserthgheader([b'# HG changeset patch', b''], b'# Date ', b'z')
188 188 ['# HG changeset patch', '# Date z', '']
189 189 >>> inserthgheader([b'# HG changeset patch', b'# User y'], b'# Date ', b'z')
190 190 ['# HG changeset patch', '# User y', '# Date z']
191 191 >>> inserthgheader([b'# HG changeset patch', b'# Date x', b'# User y'],
192 192 ... b'# User ', b'z')
193 193 ['# HG changeset patch', '# Date x', '# User z']
194 194 >>> inserthgheader([b'# HG changeset patch', b'# Date y'], b'# Date ', b'z')
195 195 ['# HG changeset patch', '# Date z']
196 196 >>> inserthgheader([b'# HG changeset patch', b'', b'# Date y'],
197 197 ... b'# Date ', b'z')
198 198 ['# HG changeset patch', '# Date z', '', '# Date y']
199 199 >>> inserthgheader([b'# HG changeset patch', b'# Parent y'],
200 200 ... b'# Date ', b'z')
201 201 ['# HG changeset patch', '# Date z', '# Parent y']
202 202 """
203 203 start = lines.index('# HG changeset patch') + 1
204 204 newindex = HGHEADERS.index(header)
205 205 bestpos = len(lines)
206 206 for i in range(start, len(lines)):
207 207 line = lines[i]
208 208 if not line.startswith('# '):
209 209 bestpos = min(bestpos, i)
210 210 break
211 211 for lineindex, h in enumerate(HGHEADERS):
212 212 if line.startswith(h):
213 213 if lineindex == newindex:
214 214 lines[i] = header + value
215 215 return lines
216 216 if lineindex > newindex:
217 217 bestpos = min(bestpos, i)
218 218 break # next line
219 219 lines.insert(bestpos, header + value)
220 220 return lines
221 221
222 222 def insertplainheader(lines, header, value):
223 223 """For lines containing a plain patch header, add a header line with value.
224 224 >>> insertplainheader([], b'Date', b'z')
225 225 ['Date: z']
226 226 >>> insertplainheader([b''], b'Date', b'z')
227 227 ['Date: z', '']
228 228 >>> insertplainheader([b'x'], b'Date', b'z')
229 229 ['Date: z', '', 'x']
230 230 >>> insertplainheader([b'From: y', b'x'], b'Date', b'z')
231 231 ['From: y', 'Date: z', '', 'x']
232 232 >>> insertplainheader([b' date : x', b' from : y', b''], b'From', b'z')
233 233 [' date : x', 'From: z', '']
234 234 >>> insertplainheader([b'', b'Date: y'], b'Date', b'z')
235 235 ['Date: z', '', 'Date: y']
236 236 >>> insertplainheader([b'foo: bar', b'DATE: z', b'x'], b'From', b'y')
237 237 ['From: y', 'foo: bar', 'DATE: z', '', 'x']
238 238 """
239 239 newprio = PLAINHEADERS[header.lower()]
240 240 bestpos = len(lines)
241 241 for i, line in enumerate(lines):
242 242 if ':' in line:
243 243 lheader = line.split(':', 1)[0].strip().lower()
244 244 lprio = PLAINHEADERS.get(lheader, newprio + 1)
245 245 if lprio == newprio:
246 246 lines[i] = '%s: %s' % (header, value)
247 247 return lines
248 248 if lprio > newprio and i < bestpos:
249 249 bestpos = i
250 250 else:
251 251 if line:
252 252 lines.insert(i, '')
253 253 if i < bestpos:
254 254 bestpos = i
255 255 break
256 256 lines.insert(bestpos, '%s: %s' % (header, value))
257 257 return lines
258 258
259 259 class patchheader(object):
260 260 def __init__(self, pf, plainmode=False):
261 261 def eatdiff(lines):
262 262 while lines:
263 263 l = lines[-1]
264 264 if (l.startswith("diff -") or
265 265 l.startswith("Index:") or
266 266 l.startswith("===========")):
267 267 del lines[-1]
268 268 else:
269 269 break
270 270 def eatempty(lines):
271 271 while lines:
272 272 if not lines[-1].strip():
273 273 del lines[-1]
274 274 else:
275 275 break
276 276
277 277 message = []
278 278 comments = []
279 279 user = None
280 280 date = None
281 281 parent = None
282 282 format = None
283 283 subject = None
284 284 branch = None
285 285 nodeid = None
286 286 diffstart = 0
287 287
288 288 for line in open(pf, 'rb'):
289 289 line = line.rstrip()
290 290 if (line.startswith('diff --git')
291 291 or (diffstart and line.startswith('+++ '))):
292 292 diffstart = 2
293 293 break
294 294 diffstart = 0 # reset
295 295 if line.startswith("--- "):
296 296 diffstart = 1
297 297 continue
298 298 elif format == "hgpatch":
299 299 # parse values when importing the result of an hg export
300 300 if line.startswith("# User "):
301 301 user = line[7:]
302 302 elif line.startswith("# Date "):
303 303 date = line[7:]
304 304 elif line.startswith("# Parent "):
305 305 parent = line[9:].lstrip() # handle double trailing space
306 306 elif line.startswith("# Branch "):
307 307 branch = line[9:]
308 308 elif line.startswith("# Node ID "):
309 309 nodeid = line[10:]
310 310 elif not line.startswith("# ") and line:
311 311 message.append(line)
312 312 format = None
313 313 elif line == '# HG changeset patch':
314 314 message = []
315 315 format = "hgpatch"
316 316 elif (format != "tagdone" and (line.startswith("Subject: ") or
317 317 line.startswith("subject: "))):
318 318 subject = line[9:]
319 319 format = "tag"
320 320 elif (format != "tagdone" and (line.startswith("From: ") or
321 321 line.startswith("from: "))):
322 322 user = line[6:]
323 323 format = "tag"
324 324 elif (format != "tagdone" and (line.startswith("Date: ") or
325 325 line.startswith("date: "))):
326 326 date = line[6:]
327 327 format = "tag"
328 328 elif format == "tag" and line == "":
329 329 # when looking for tags (subject: from: etc) they
330 330 # end once you find a blank line in the source
331 331 format = "tagdone"
332 332 elif message or line:
333 333 message.append(line)
334 334 comments.append(line)
335 335
336 336 eatdiff(message)
337 337 eatdiff(comments)
338 338 # Remember the exact starting line of the patch diffs before consuming
339 339 # empty lines, for external use by TortoiseHg and others
340 340 self.diffstartline = len(comments)
341 341 eatempty(message)
342 342 eatempty(comments)
343 343
344 344 # make sure message isn't empty
345 345 if format and format.startswith("tag") and subject:
346 346 message.insert(0, subject)
347 347
348 348 self.message = message
349 349 self.comments = comments
350 350 self.user = user
351 351 self.date = date
352 352 self.parent = parent
353 353 # nodeid and branch are for external use by TortoiseHg and others
354 354 self.nodeid = nodeid
355 355 self.branch = branch
356 356 self.haspatch = diffstart > 1
357 357 self.plainmode = (plainmode or
358 358 '# HG changeset patch' not in self.comments and
359 359 any(c.startswith('Date: ') or
360 360 c.startswith('From: ')
361 361 for c in self.comments))
362 362
363 363 def setuser(self, user):
364 364 try:
365 365 inserthgheader(self.comments, '# User ', user)
366 366 except ValueError:
367 367 if self.plainmode:
368 368 insertplainheader(self.comments, 'From', user)
369 369 else:
370 370 tmp = ['# HG changeset patch', '# User ' + user]
371 371 self.comments = tmp + self.comments
372 372 self.user = user
373 373
374 374 def setdate(self, date):
375 375 try:
376 376 inserthgheader(self.comments, '# Date ', date)
377 377 except ValueError:
378 378 if self.plainmode:
379 379 insertplainheader(self.comments, 'Date', date)
380 380 else:
381 381 tmp = ['# HG changeset patch', '# Date ' + date]
382 382 self.comments = tmp + self.comments
383 383 self.date = date
384 384
385 385 def setparent(self, parent):
386 386 try:
387 387 inserthgheader(self.comments, '# Parent ', parent)
388 388 except ValueError:
389 389 if not self.plainmode:
390 390 tmp = ['# HG changeset patch', '# Parent ' + parent]
391 391 self.comments = tmp + self.comments
392 392 self.parent = parent
393 393
394 394 def setmessage(self, message):
395 395 if self.comments:
396 396 self._delmsg()
397 397 self.message = [message]
398 398 if message:
399 399 if self.plainmode and self.comments and self.comments[-1]:
400 400 self.comments.append('')
401 401 self.comments.append(message)
402 402
403 403 def __bytes__(self):
404 404 s = '\n'.join(self.comments).rstrip()
405 405 if not s:
406 406 return ''
407 407 return s + '\n\n'
408 408
409 409 __str__ = encoding.strmethod(__bytes__)
410 410
411 411 def _delmsg(self):
412 412 '''Remove existing message, keeping the rest of the comments fields.
413 413 If comments contains 'subject: ', message will prepend
414 414 the field and a blank line.'''
415 415 if self.message:
416 416 subj = 'subject: ' + self.message[0].lower()
417 417 for i in pycompat.xrange(len(self.comments)):
418 418 if subj == self.comments[i].lower():
419 419 del self.comments[i]
420 420 self.message = self.message[2:]
421 421 break
422 422 ci = 0
423 423 for mi in self.message:
424 424 while mi != self.comments[ci]:
425 425 ci += 1
426 426 del self.comments[ci]
427 427
428 428 def newcommit(repo, phase, *args, **kwargs):
429 429 """helper dedicated to ensure a commit respect mq.secret setting
430 430
431 431 It should be used instead of repo.commit inside the mq source for operation
432 432 creating new changeset.
433 433 """
434 434 repo = repo.unfiltered()
435 435 if phase is None:
436 436 if repo.ui.configbool('mq', 'secret'):
437 437 phase = phases.secret
438 438 overrides = {('ui', 'allowemptycommit'): True}
439 439 if phase is not None:
440 440 overrides[('phases', 'new-commit')] = phase
441 441 with repo.ui.configoverride(overrides, 'mq'):
442 442 repo.ui.setconfig('ui', 'allowemptycommit', True)
443 443 return repo.commit(*args, **kwargs)
444 444
445 445 class AbortNoCleanup(error.Abort):
446 446 pass
447 447
448 448 class queue(object):
449 449 def __init__(self, ui, baseui, path, patchdir=None):
450 450 self.basepath = path
451 451 try:
452 452 with open(os.path.join(path, 'patches.queue'), r'rb') as fh:
453 453 cur = fh.read().rstrip()
454 454
455 455 if not cur:
456 456 curpath = os.path.join(path, 'patches')
457 457 else:
458 458 curpath = os.path.join(path, 'patches-' + cur)
459 459 except IOError:
460 460 curpath = os.path.join(path, 'patches')
461 461 self.path = patchdir or curpath
462 462 self.opener = vfsmod.vfs(self.path)
463 463 self.ui = ui
464 464 self.baseui = baseui
465 465 self.applieddirty = False
466 466 self.seriesdirty = False
467 467 self.added = []
468 468 self.seriespath = "series"
469 469 self.statuspath = "status"
470 470 self.guardspath = "guards"
471 471 self.activeguards = None
472 472 self.guardsdirty = False
473 473 # Handle mq.git as a bool with extended values
474 474 gitmode = ui.config('mq', 'git').lower()
475 475 boolmode = stringutil.parsebool(gitmode)
476 476 if boolmode is not None:
477 477 if boolmode:
478 478 gitmode = 'yes'
479 479 else:
480 480 gitmode = 'no'
481 481 self.gitmode = gitmode
482 482 # deprecated config: mq.plain
483 483 self.plainmode = ui.configbool('mq', 'plain')
484 484 self.checkapplied = True
485 485
486 486 @util.propertycache
487 487 def applied(self):
488 488 def parselines(lines):
489 489 for l in lines:
490 490 entry = l.split(':', 1)
491 491 if len(entry) > 1:
492 492 n, name = entry
493 493 yield statusentry(bin(n), name)
494 494 elif l.strip():
495 495 self.ui.warn(_('malformated mq status line: %s\n') %
496 496 stringutil.pprint(entry))
497 497 # else we ignore empty lines
498 498 try:
499 499 lines = self.opener.read(self.statuspath).splitlines()
500 500 return list(parselines(lines))
501 501 except IOError as e:
502 502 if e.errno == errno.ENOENT:
503 503 return []
504 504 raise
505 505
506 506 @util.propertycache
507 507 def fullseries(self):
508 508 try:
509 509 return self.opener.read(self.seriespath).splitlines()
510 510 except IOError as e:
511 511 if e.errno == errno.ENOENT:
512 512 return []
513 513 raise
514 514
515 515 @util.propertycache
516 516 def series(self):
517 517 self.parseseries()
518 518 return self.series
519 519
520 520 @util.propertycache
521 521 def seriesguards(self):
522 522 self.parseseries()
523 523 return self.seriesguards
524 524
525 525 def invalidate(self):
526 526 for a in 'applied fullseries series seriesguards'.split():
527 527 if a in self.__dict__:
528 528 delattr(self, a)
529 529 self.applieddirty = False
530 530 self.seriesdirty = False
531 531 self.guardsdirty = False
532 532 self.activeguards = None
533 533
534 534 def diffopts(self, opts=None, patchfn=None, plain=False):
535 535 """Return diff options tweaked for this mq use, possibly upgrading to
536 536 git format, and possibly plain and without lossy options."""
537 537 diffopts = patchmod.difffeatureopts(self.ui, opts,
538 538 git=True, whitespace=not plain, formatchanging=not plain)
539 539 if self.gitmode == 'auto':
540 540 diffopts.upgrade = True
541 541 elif self.gitmode == 'keep':
542 542 pass
543 543 elif self.gitmode in ('yes', 'no'):
544 544 diffopts.git = self.gitmode == 'yes'
545 545 else:
546 546 raise error.Abort(_('mq.git option can be auto/keep/yes/no'
547 547 ' got %s') % self.gitmode)
548 548 if patchfn:
549 549 diffopts = self.patchopts(diffopts, patchfn)
550 550 return diffopts
551 551
552 552 def patchopts(self, diffopts, *patches):
553 553 """Return a copy of input diff options with git set to true if
554 554 referenced patch is a git patch and should be preserved as such.
555 555 """
556 556 diffopts = diffopts.copy()
557 557 if not diffopts.git and self.gitmode == 'keep':
558 558 for patchfn in patches:
559 559 patchf = self.opener(patchfn, 'r')
560 560 # if the patch was a git patch, refresh it as a git patch
561 561 diffopts.git = any(line.startswith('diff --git')
562 562 for line in patchf)
563 563 patchf.close()
564 564 return diffopts
565 565
566 566 def join(self, *p):
567 567 return os.path.join(self.path, *p)
568 568
569 569 def findseries(self, patch):
570 570 def matchpatch(l):
571 571 l = l.split('#', 1)[0]
572 572 return l.strip() == patch
573 573 for index, l in enumerate(self.fullseries):
574 574 if matchpatch(l):
575 575 return index
576 576 return None
577 577
578 578 guard_re = re.compile(br'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
579 579
580 580 def parseseries(self):
581 581 self.series = []
582 582 self.seriesguards = []
583 583 for l in self.fullseries:
584 584 h = l.find('#')
585 585 if h == -1:
586 586 patch = l
587 587 comment = ''
588 588 elif h == 0:
589 589 continue
590 590 else:
591 591 patch = l[:h]
592 592 comment = l[h:]
593 593 patch = patch.strip()
594 594 if patch:
595 595 if patch in self.series:
596 596 raise error.Abort(_('%s appears more than once in %s') %
597 597 (patch, self.join(self.seriespath)))
598 598 self.series.append(patch)
599 599 self.seriesguards.append(self.guard_re.findall(comment))
600 600
601 601 def checkguard(self, guard):
602 602 if not guard:
603 603 return _('guard cannot be an empty string')
604 604 bad_chars = '# \t\r\n\f'
605 605 first = guard[0]
606 606 if first in '-+':
607 607 return (_('guard %r starts with invalid character: %r') %
608 608 (guard, first))
609 609 for c in bad_chars:
610 610 if c in guard:
611 611 return _('invalid character in guard %r: %r') % (guard, c)
612 612
613 613 def setactive(self, guards):
614 614 for guard in guards:
615 615 bad = self.checkguard(guard)
616 616 if bad:
617 617 raise error.Abort(bad)
618 618 guards = sorted(set(guards))
619 619 self.ui.debug('active guards: %s\n' % ' '.join(guards))
620 620 self.activeguards = guards
621 621 self.guardsdirty = True
622 622
623 623 def active(self):
624 624 if self.activeguards is None:
625 625 self.activeguards = []
626 626 try:
627 627 guards = self.opener.read(self.guardspath).split()
628 628 except IOError as err:
629 629 if err.errno != errno.ENOENT:
630 630 raise
631 631 guards = []
632 632 for i, guard in enumerate(guards):
633 633 bad = self.checkguard(guard)
634 634 if bad:
635 635 self.ui.warn('%s:%d: %s\n' %
636 636 (self.join(self.guardspath), i + 1, bad))
637 637 else:
638 638 self.activeguards.append(guard)
639 639 return self.activeguards
640 640
641 641 def setguards(self, idx, guards):
642 642 for g in guards:
643 643 if len(g) < 2:
644 644 raise error.Abort(_('guard %r too short') % g)
645 645 if g[0] not in '-+':
646 646 raise error.Abort(_('guard %r starts with invalid char') % g)
647 647 bad = self.checkguard(g[1:])
648 648 if bad:
649 649 raise error.Abort(bad)
650 650 drop = self.guard_re.sub('', self.fullseries[idx])
651 651 self.fullseries[idx] = drop + ''.join([' #' + g for g in guards])
652 652 self.parseseries()
653 653 self.seriesdirty = True
654 654
655 655 def pushable(self, idx):
656 656 if isinstance(idx, bytes):
657 657 idx = self.series.index(idx)
658 658 patchguards = self.seriesguards[idx]
659 659 if not patchguards:
660 660 return True, None
661 661 guards = self.active()
662 662 exactneg = [g for g in patchguards
663 663 if g.startswith('-') and g[1:] in guards]
664 664 if exactneg:
665 665 return False, stringutil.pprint(exactneg[0])
666 666 pos = [g for g in patchguards if g.startswith('+')]
667 667 exactpos = [g for g in pos if g[1:] in guards]
668 668 if pos:
669 669 if exactpos:
670 670 return True, stringutil.pprint(exactpos[0])
671 671 return False, ' '.join([stringutil.pprint(p) for p in pos])
672 672 return True, ''
673 673
674 674 def explainpushable(self, idx, all_patches=False):
675 675 if all_patches:
676 676 write = self.ui.write
677 677 else:
678 678 write = self.ui.warn
679 679
680 680 if all_patches or self.ui.verbose:
681 681 if isinstance(idx, bytes):
682 682 idx = self.series.index(idx)
683 683 pushable, why = self.pushable(idx)
684 684 if all_patches and pushable:
685 685 if why is None:
686 686 write(_('allowing %s - no guards in effect\n') %
687 687 self.series[idx])
688 688 else:
689 689 if not why:
690 690 write(_('allowing %s - no matching negative guards\n') %
691 691 self.series[idx])
692 692 else:
693 693 write(_('allowing %s - guarded by %s\n') %
694 694 (self.series[idx], why))
695 695 if not pushable:
696 696 if why:
697 697 write(_('skipping %s - guarded by %s\n') %
698 698 (self.series[idx], why))
699 699 else:
700 700 write(_('skipping %s - no matching guards\n') %
701 701 self.series[idx])
702 702
703 703 def savedirty(self):
704 704 def writelist(items, path):
705 705 fp = self.opener(path, 'wb')
706 706 for i in items:
707 707 fp.write("%s\n" % i)
708 708 fp.close()
709 709 if self.applieddirty:
710 710 writelist(map(bytes, self.applied), self.statuspath)
711 711 self.applieddirty = False
712 712 if self.seriesdirty:
713 713 writelist(self.fullseries, self.seriespath)
714 714 self.seriesdirty = False
715 715 if self.guardsdirty:
716 716 writelist(self.activeguards, self.guardspath)
717 717 self.guardsdirty = False
718 718 if self.added:
719 719 qrepo = self.qrepo()
720 720 if qrepo:
721 721 qrepo[None].add(f for f in self.added if f not in qrepo[None])
722 722 self.added = []
723 723
724 724 def removeundo(self, repo):
725 725 undo = repo.sjoin('undo')
726 726 if not os.path.exists(undo):
727 727 return
728 728 try:
729 729 os.unlink(undo)
730 730 except OSError as inst:
731 731 self.ui.warn(_('error removing undo: %s\n') %
732 732 stringutil.forcebytestr(inst))
733 733
734 734 def backup(self, repo, files, copy=False):
735 735 # backup local changes in --force case
736 736 for f in sorted(files):
737 737 absf = repo.wjoin(f)
738 738 if os.path.lexists(absf):
739 739 self.ui.note(_('saving current version of %s as %s\n') %
740 740 (f, scmutil.origpath(self.ui, repo, f)))
741 741
742 742 absorig = scmutil.origpath(self.ui, repo, absf)
743 743 if copy:
744 744 util.copyfile(absf, absorig)
745 745 else:
746 746 util.rename(absf, absorig)
747 747
748 748 def printdiff(self, repo, diffopts, node1, node2=None, files=None,
749 749 fp=None, changes=None, opts=None):
750 750 if opts is None:
751 751 opts = {}
752 752 stat = opts.get('stat')
753 753 m = scmutil.match(repo[node1], files, opts)
754 754 logcmdutil.diffordiffstat(self.ui, repo, diffopts, node1, node2, m,
755 755 changes, stat, fp)
756 756
757 757 def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
758 758 # first try just applying the patch
759 759 (err, n) = self.apply(repo, [patch], update_status=False,
760 760 strict=True, merge=rev)
761 761
762 762 if err == 0:
763 763 return (err, n)
764 764
765 765 if n is None:
766 766 raise error.Abort(_("apply failed for patch %s") % patch)
767 767
768 768 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
769 769
770 770 # apply failed, strip away that rev and merge.
771 771 hg.clean(repo, head)
772 772 strip(self.ui, repo, [n], update=False, backup=False)
773 773
774 774 ctx = repo[rev]
775 775 ret = hg.merge(repo, rev)
776 776 if ret:
777 777 raise error.Abort(_("update returned %d") % ret)
778 778 n = newcommit(repo, None, ctx.description(), ctx.user(), force=True)
779 779 if n is None:
780 780 raise error.Abort(_("repo commit failed"))
781 781 try:
782 782 ph = patchheader(mergeq.join(patch), self.plainmode)
783 783 except Exception:
784 784 raise error.Abort(_("unable to read %s") % patch)
785 785
786 786 diffopts = self.patchopts(diffopts, patch)
787 787 patchf = self.opener(patch, "w")
788 788 comments = bytes(ph)
789 789 if comments:
790 790 patchf.write(comments)
791 791 self.printdiff(repo, diffopts, head, n, fp=patchf)
792 792 patchf.close()
793 793 self.removeundo(repo)
794 794 return (0, n)
795 795
796 796 def qparents(self, repo, rev=None):
797 797 """return the mq handled parent or p1
798 798
799 799 In some case where mq get himself in being the parent of a merge the
800 800 appropriate parent may be p2.
801 801 (eg: an in progress merge started with mq disabled)
802 802
803 803 If no parent are managed by mq, p1 is returned.
804 804 """
805 805 if rev is None:
806 806 (p1, p2) = repo.dirstate.parents()
807 807 if p2 == nullid:
808 808 return p1
809 809 if not self.applied:
810 810 return None
811 811 return self.applied[-1].node
812 812 p1, p2 = repo.changelog.parents(rev)
813 813 if p2 != nullid and p2 in [x.node for x in self.applied]:
814 814 return p2
815 815 return p1
816 816
817 817 def mergepatch(self, repo, mergeq, series, diffopts):
818 818 if not self.applied:
819 819 # each of the patches merged in will have two parents. This
820 820 # can confuse the qrefresh, qdiff, and strip code because it
821 821 # needs to know which parent is actually in the patch queue.
822 822 # so, we insert a merge marker with only one parent. This way
823 823 # the first patch in the queue is never a merge patch
824 824 #
825 825 pname = ".hg.patches.merge.marker"
826 826 n = newcommit(repo, None, '[mq]: merge marker', force=True)
827 827 self.removeundo(repo)
828 828 self.applied.append(statusentry(n, pname))
829 829 self.applieddirty = True
830 830
831 831 head = self.qparents(repo)
832 832
833 833 for patch in series:
834 834 patch = mergeq.lookup(patch, strict=True)
835 835 if not patch:
836 836 self.ui.warn(_("patch %s does not exist\n") % patch)
837 837 return (1, None)
838 838 pushable, reason = self.pushable(patch)
839 839 if not pushable:
840 840 self.explainpushable(patch, all_patches=True)
841 841 continue
842 842 info = mergeq.isapplied(patch)
843 843 if not info:
844 844 self.ui.warn(_("patch %s is not applied\n") % patch)
845 845 return (1, None)
846 846 rev = info[1]
847 847 err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
848 848 if head:
849 849 self.applied.append(statusentry(head, patch))
850 850 self.applieddirty = True
851 851 if err:
852 852 return (err, head)
853 853 self.savedirty()
854 854 return (0, head)
855 855
856 856 def patch(self, repo, patchfile):
857 857 '''Apply patchfile to the working directory.
858 858 patchfile: name of patch file'''
859 859 files = set()
860 860 try:
861 861 fuzz = patchmod.patch(self.ui, repo, patchfile, strip=1,
862 862 files=files, eolmode=None)
863 863 return (True, list(files), fuzz)
864 864 except Exception as inst:
865 865 self.ui.note(stringutil.forcebytestr(inst) + '\n')
866 866 if not self.ui.verbose:
867 867 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
868 868 self.ui.traceback()
869 869 return (False, list(files), False)
870 870
871 871 def apply(self, repo, series, list=False, update_status=True,
872 872 strict=False, patchdir=None, merge=None, all_files=None,
873 873 tobackup=None, keepchanges=False):
874 874 wlock = lock = tr = None
875 875 try:
876 876 wlock = repo.wlock()
877 877 lock = repo.lock()
878 878 tr = repo.transaction("qpush")
879 879 try:
880 880 ret = self._apply(repo, series, list, update_status,
881 881 strict, patchdir, merge, all_files=all_files,
882 882 tobackup=tobackup, keepchanges=keepchanges)
883 883 tr.close()
884 884 self.savedirty()
885 885 return ret
886 886 except AbortNoCleanup:
887 887 tr.close()
888 888 self.savedirty()
889 889 raise
890 890 except: # re-raises
891 891 try:
892 892 tr.abort()
893 893 finally:
894 894 self.invalidate()
895 895 raise
896 896 finally:
897 897 release(tr, lock, wlock)
898 898 self.removeundo(repo)
899 899
900 900 def _apply(self, repo, series, list=False, update_status=True,
901 901 strict=False, patchdir=None, merge=None, all_files=None,
902 902 tobackup=None, keepchanges=False):
903 903 """returns (error, hash)
904 904
905 905 error = 1 for unable to read, 2 for patch failed, 3 for patch
906 906 fuzz. tobackup is None or a set of files to backup before they
907 907 are modified by a patch.
908 908 """
909 909 # TODO unify with commands.py
910 910 if not patchdir:
911 911 patchdir = self.path
912 912 err = 0
913 913 n = None
914 914 for patchname in series:
915 915 pushable, reason = self.pushable(patchname)
916 916 if not pushable:
917 917 self.explainpushable(patchname, all_patches=True)
918 918 continue
919 919 self.ui.status(_("applying %s\n") % patchname)
920 920 pf = os.path.join(patchdir, patchname)
921 921
922 922 try:
923 923 ph = patchheader(self.join(patchname), self.plainmode)
924 924 except IOError:
925 925 self.ui.warn(_("unable to read %s\n") % patchname)
926 926 err = 1
927 927 break
928 928
929 929 message = ph.message
930 930 if not message:
931 931 # The commit message should not be translated
932 932 message = "imported patch %s\n" % patchname
933 933 else:
934 934 if list:
935 935 # The commit message should not be translated
936 936 message.append("\nimported patch %s" % patchname)
937 937 message = '\n'.join(message)
938 938
939 939 if ph.haspatch:
940 940 if tobackup:
941 941 touched = patchmod.changedfiles(self.ui, repo, pf)
942 942 touched = set(touched) & tobackup
943 943 if touched and keepchanges:
944 944 raise AbortNoCleanup(
945 945 _("conflicting local changes found"),
946 946 hint=_("did you forget to qrefresh?"))
947 947 self.backup(repo, touched, copy=True)
948 948 tobackup = tobackup - touched
949 949 (patcherr, files, fuzz) = self.patch(repo, pf)
950 950 if all_files is not None:
951 951 all_files.update(files)
952 952 patcherr = not patcherr
953 953 else:
954 954 self.ui.warn(_("patch %s is empty\n") % patchname)
955 955 patcherr, files, fuzz = 0, [], 0
956 956
957 957 if merge and files:
958 958 # Mark as removed/merged and update dirstate parent info
959 959 removed = []
960 960 merged = []
961 961 for f in files:
962 962 if os.path.lexists(repo.wjoin(f)):
963 963 merged.append(f)
964 964 else:
965 965 removed.append(f)
966 966 with repo.dirstate.parentchange():
967 967 for f in removed:
968 968 repo.dirstate.remove(f)
969 969 for f in merged:
970 970 repo.dirstate.merge(f)
971 971 p1, p2 = repo.dirstate.parents()
972 972 repo.setparents(p1, merge)
973 973
974 974 if all_files and '.hgsubstate' in all_files:
975 975 wctx = repo[None]
976 976 pctx = repo['.']
977 977 overwrite = False
978 978 mergedsubstate = subrepoutil.submerge(repo, pctx, wctx, wctx,
979 979 overwrite)
980 980 files += mergedsubstate.keys()
981 981
982 982 match = scmutil.matchfiles(repo, files or [])
983 983 oldtip = repo.changelog.tip()
984 984 n = newcommit(repo, None, message, ph.user, ph.date, match=match,
985 985 force=True)
986 986 if repo.changelog.tip() == oldtip:
987 987 raise error.Abort(_("qpush exactly duplicates child changeset"))
988 988 if n is None:
989 989 raise error.Abort(_("repository commit failed"))
990 990
991 991 if update_status:
992 992 self.applied.append(statusentry(n, patchname))
993 993
994 994 if patcherr:
995 995 self.ui.warn(_("patch failed, rejects left in working "
996 996 "directory\n"))
997 997 err = 2
998 998 break
999 999
1000 1000 if fuzz and strict:
1001 1001 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
1002 1002 err = 3
1003 1003 break
1004 1004 return (err, n)
1005 1005
1006 1006 def _cleanup(self, patches, numrevs, keep=False):
1007 1007 if not keep:
1008 1008 r = self.qrepo()
1009 1009 if r:
1010 1010 r[None].forget(patches)
1011 1011 for p in patches:
1012 1012 try:
1013 1013 os.unlink(self.join(p))
1014 1014 except OSError as inst:
1015 1015 if inst.errno != errno.ENOENT:
1016 1016 raise
1017 1017
1018 1018 qfinished = []
1019 1019 if numrevs:
1020 1020 qfinished = self.applied[:numrevs]
1021 1021 del self.applied[:numrevs]
1022 1022 self.applieddirty = True
1023 1023
1024 1024 unknown = []
1025 1025
1026 1026 sortedseries = []
1027 1027 for p in patches:
1028 1028 idx = self.findseries(p)
1029 1029 if idx is None:
1030 1030 sortedseries.append((-1, p))
1031 1031 else:
1032 1032 sortedseries.append((idx, p))
1033 1033
1034 1034 sortedseries.sort(reverse=True)
1035 1035 for (i, p) in sortedseries:
1036 1036 if i != -1:
1037 1037 del self.fullseries[i]
1038 1038 else:
1039 1039 unknown.append(p)
1040 1040
1041 1041 if unknown:
1042 1042 if numrevs:
1043 1043 rev = dict((entry.name, entry.node) for entry in qfinished)
1044 1044 for p in unknown:
1045 1045 msg = _('revision %s refers to unknown patches: %s\n')
1046 1046 self.ui.warn(msg % (short(rev[p]), p))
1047 1047 else:
1048 1048 msg = _('unknown patches: %s\n')
1049 1049 raise error.Abort(''.join(msg % p for p in unknown))
1050 1050
1051 1051 self.parseseries()
1052 1052 self.seriesdirty = True
1053 1053 return [entry.node for entry in qfinished]
1054 1054
1055 1055 def _revpatches(self, repo, revs):
1056 1056 firstrev = repo[self.applied[0].node].rev()
1057 1057 patches = []
1058 1058 for i, rev in enumerate(revs):
1059 1059
1060 1060 if rev < firstrev:
1061 1061 raise error.Abort(_('revision %d is not managed') % rev)
1062 1062
1063 1063 ctx = repo[rev]
1064 1064 base = self.applied[i].node
1065 1065 if ctx.node() != base:
1066 1066 msg = _('cannot delete revision %d above applied patches')
1067 1067 raise error.Abort(msg % rev)
1068 1068
1069 1069 patch = self.applied[i].name
1070 1070 for fmt in ('[mq]: %s', 'imported patch %s'):
1071 1071 if ctx.description() == fmt % patch:
1072 1072 msg = _('patch %s finalized without changeset message\n')
1073 1073 repo.ui.status(msg % patch)
1074 1074 break
1075 1075
1076 1076 patches.append(patch)
1077 1077 return patches
1078 1078
1079 1079 def finish(self, repo, revs):
1080 1080 # Manually trigger phase computation to ensure phasedefaults is
1081 1081 # executed before we remove the patches.
1082 1082 repo._phasecache
1083 1083 patches = self._revpatches(repo, sorted(revs))
1084 1084 qfinished = self._cleanup(patches, len(patches))
1085 1085 if qfinished and repo.ui.configbool('mq', 'secret'):
1086 1086 # only use this logic when the secret option is added
1087 1087 oldqbase = repo[qfinished[0]]
1088 1088 tphase = phases.newcommitphase(repo.ui)
1089 1089 if oldqbase.phase() > tphase and oldqbase.p1().phase() <= tphase:
1090 1090 with repo.transaction('qfinish') as tr:
1091 1091 phases.advanceboundary(repo, tr, tphase, qfinished)
1092 1092
1093 1093 def delete(self, repo, patches, opts):
1094 1094 if not patches and not opts.get('rev'):
1095 1095 raise error.Abort(_('qdelete requires at least one revision or '
1096 1096 'patch name'))
1097 1097
1098 1098 realpatches = []
1099 1099 for patch in patches:
1100 1100 patch = self.lookup(patch, strict=True)
1101 1101 info = self.isapplied(patch)
1102 1102 if info:
1103 1103 raise error.Abort(_("cannot delete applied patch %s") % patch)
1104 1104 if patch not in self.series:
1105 1105 raise error.Abort(_("patch %s not in series file") % patch)
1106 1106 if patch not in realpatches:
1107 1107 realpatches.append(patch)
1108 1108
1109 1109 numrevs = 0
1110 1110 if opts.get('rev'):
1111 1111 if not self.applied:
1112 1112 raise error.Abort(_('no patches applied'))
1113 1113 revs = scmutil.revrange(repo, opts.get('rev'))
1114 1114 revs.sort()
1115 1115 revpatches = self._revpatches(repo, revs)
1116 1116 realpatches += revpatches
1117 1117 numrevs = len(revpatches)
1118 1118
1119 1119 self._cleanup(realpatches, numrevs, opts.get('keep'))
1120 1120
1121 1121 def checktoppatch(self, repo):
1122 1122 '''check that working directory is at qtip'''
1123 1123 if self.applied:
1124 1124 top = self.applied[-1].node
1125 1125 patch = self.applied[-1].name
1126 1126 if repo.dirstate.p1() != top:
1127 1127 raise error.Abort(_("working directory revision is not qtip"))
1128 1128 return top, patch
1129 1129 return None, None
1130 1130
1131 1131 def putsubstate2changes(self, substatestate, changes):
1132 1132 for files in changes[:3]:
1133 1133 if '.hgsubstate' in files:
1134 1134 return # already listed up
1135 1135 # not yet listed up
1136 1136 if substatestate in 'a?':
1137 1137 changes[1].append('.hgsubstate')
1138 1138 elif substatestate in 'r':
1139 1139 changes[2].append('.hgsubstate')
1140 1140 else: # modified
1141 1141 changes[0].append('.hgsubstate')
1142 1142
1143 1143 def checklocalchanges(self, repo, force=False, refresh=True):
1144 1144 excsuffix = ''
1145 1145 if refresh:
1146 1146 excsuffix = ', qrefresh first'
1147 1147 # plain versions for i18n tool to detect them
1148 1148 _("local changes found, qrefresh first")
1149 1149 _("local changed subrepos found, qrefresh first")
1150 1150 return checklocalchanges(repo, force, excsuffix)
1151 1151
1152 1152 _reserved = ('series', 'status', 'guards', '.', '..')
1153 1153 def checkreservedname(self, name):
1154 1154 if name in self._reserved:
1155 1155 raise error.Abort(_('"%s" cannot be used as the name of a patch')
1156 1156 % name)
1157 1157 if name != name.strip():
1158 1158 # whitespace is stripped by parseseries()
1159 1159 raise error.Abort(_('patch name cannot begin or end with '
1160 1160 'whitespace'))
1161 1161 for prefix in ('.hg', '.mq'):
1162 1162 if name.startswith(prefix):
1163 1163 raise error.Abort(_('patch name cannot begin with "%s"')
1164 1164 % prefix)
1165 1165 for c in ('#', ':', '\r', '\n'):
1166 1166 if c in name:
1167 1167 raise error.Abort(_('%r cannot be used in the name of a patch')
1168 1168 % pycompat.bytestr(c))
1169 1169
1170 1170 def checkpatchname(self, name, force=False):
1171 1171 self.checkreservedname(name)
1172 1172 if not force and os.path.exists(self.join(name)):
1173 1173 if os.path.isdir(self.join(name)):
1174 1174 raise error.Abort(_('"%s" already exists as a directory')
1175 1175 % name)
1176 1176 else:
1177 1177 raise error.Abort(_('patch "%s" already exists') % name)
1178 1178
1179 1179 def makepatchname(self, title, fallbackname):
1180 1180 """Return a suitable filename for title, adding a suffix to make
1181 1181 it unique in the existing list"""
1182 1182 namebase = re.sub('[\s\W_]+', '_', title.lower()).strip('_')
1183 1183 namebase = namebase[:75] # avoid too long name (issue5117)
1184 1184 if namebase:
1185 1185 try:
1186 1186 self.checkreservedname(namebase)
1187 1187 except error.Abort:
1188 1188 namebase = fallbackname
1189 1189 else:
1190 1190 namebase = fallbackname
1191 1191 name = namebase
1192 1192 i = 0
1193 1193 while True:
1194 1194 if name not in self.fullseries:
1195 1195 try:
1196 1196 self.checkpatchname(name)
1197 1197 break
1198 1198 except error.Abort:
1199 1199 pass
1200 1200 i += 1
1201 1201 name = '%s__%d' % (namebase, i)
1202 1202 return name
1203 1203
1204 1204 def checkkeepchanges(self, keepchanges, force):
1205 1205 if force and keepchanges:
1206 1206 raise error.Abort(_('cannot use both --force and --keep-changes'))
1207 1207
1208 1208 def new(self, repo, patchfn, *pats, **opts):
1209 1209 """options:
1210 1210 msg: a string or a no-argument function returning a string
1211 1211 """
1212 1212 opts = pycompat.byteskwargs(opts)
1213 1213 msg = opts.get('msg')
1214 1214 edit = opts.get('edit')
1215 1215 editform = opts.get('editform', 'mq.qnew')
1216 1216 user = opts.get('user')
1217 1217 date = opts.get('date')
1218 1218 if date:
1219 1219 date = dateutil.parsedate(date)
1220 1220 diffopts = self.diffopts({'git': opts.get('git')}, plain=True)
1221 1221 if opts.get('checkname', True):
1222 1222 self.checkpatchname(patchfn)
1223 1223 inclsubs = checksubstate(repo)
1224 1224 if inclsubs:
1225 1225 substatestate = repo.dirstate['.hgsubstate']
1226 1226 if opts.get('include') or opts.get('exclude') or pats:
1227 1227 # detect missing files in pats
1228 1228 def badfn(f, msg):
1229 1229 if f != '.hgsubstate': # .hgsubstate is auto-created
1230 1230 raise error.Abort('%s: %s' % (f, msg))
1231 1231 match = scmutil.match(repo[None], pats, opts, badfn=badfn)
1232 1232 changes = repo.status(match=match)
1233 1233 else:
1234 1234 changes = self.checklocalchanges(repo, force=True)
1235 1235 commitfiles = list(inclsubs)
1236 1236 for files in changes[:3]:
1237 1237 commitfiles.extend(files)
1238 1238 match = scmutil.matchfiles(repo, commitfiles)
1239 1239 if len(repo[None].parents()) > 1:
1240 1240 raise error.Abort(_('cannot manage merge changesets'))
1241 1241 self.checktoppatch(repo)
1242 1242 insert = self.fullseriesend()
1243 1243 with repo.wlock():
1244 1244 try:
1245 1245 # if patch file write fails, abort early
1246 1246 p = self.opener(patchfn, "w")
1247 1247 except IOError as e:
1248 1248 raise error.Abort(_('cannot write patch "%s": %s')
1249 1249 % (patchfn, encoding.strtolocal(e.strerror)))
1250 1250 try:
1251 1251 defaultmsg = "[mq]: %s" % patchfn
1252 1252 editor = cmdutil.getcommiteditor(editform=editform)
1253 1253 if edit:
1254 1254 def finishdesc(desc):
1255 1255 if desc.rstrip():
1256 1256 return desc
1257 1257 else:
1258 1258 return defaultmsg
1259 1259 # i18n: this message is shown in editor with "HG: " prefix
1260 1260 extramsg = _('Leave message empty to use default message.')
1261 1261 editor = cmdutil.getcommiteditor(finishdesc=finishdesc,
1262 1262 extramsg=extramsg,
1263 1263 editform=editform)
1264 1264 commitmsg = msg
1265 1265 else:
1266 1266 commitmsg = msg or defaultmsg
1267 1267
1268 1268 n = newcommit(repo, None, commitmsg, user, date, match=match,
1269 1269 force=True, editor=editor)
1270 1270 if n is None:
1271 1271 raise error.Abort(_("repo commit failed"))
1272 1272 try:
1273 1273 self.fullseries[insert:insert] = [patchfn]
1274 1274 self.applied.append(statusentry(n, patchfn))
1275 1275 self.parseseries()
1276 1276 self.seriesdirty = True
1277 1277 self.applieddirty = True
1278 1278 nctx = repo[n]
1279 1279 ph = patchheader(self.join(patchfn), self.plainmode)
1280 1280 if user:
1281 1281 ph.setuser(user)
1282 1282 if date:
1283 1283 ph.setdate('%d %d' % date)
1284 1284 ph.setparent(hex(nctx.p1().node()))
1285 1285 msg = nctx.description().strip()
1286 1286 if msg == defaultmsg.strip():
1287 1287 msg = ''
1288 1288 ph.setmessage(msg)
1289 1289 p.write(bytes(ph))
1290 1290 if commitfiles:
1291 1291 parent = self.qparents(repo, n)
1292 1292 if inclsubs:
1293 1293 self.putsubstate2changes(substatestate, changes)
1294 1294 chunks = patchmod.diff(repo, node1=parent, node2=n,
1295 1295 changes=changes, opts=diffopts)
1296 1296 for chunk in chunks:
1297 1297 p.write(chunk)
1298 1298 p.close()
1299 1299 r = self.qrepo()
1300 1300 if r:
1301 1301 r[None].add([patchfn])
1302 1302 except: # re-raises
1303 1303 repo.rollback()
1304 1304 raise
1305 1305 except Exception:
1306 1306 patchpath = self.join(patchfn)
1307 1307 try:
1308 1308 os.unlink(patchpath)
1309 1309 except OSError:
1310 1310 self.ui.warn(_('error unlinking %s\n') % patchpath)
1311 1311 raise
1312 1312 self.removeundo(repo)
1313 1313
1314 1314 def isapplied(self, patch):
1315 1315 """returns (index, rev, patch)"""
1316 1316 for i, a in enumerate(self.applied):
1317 1317 if a.name == patch:
1318 1318 return (i, a.node, a.name)
1319 1319 return None
1320 1320
1321 1321 # if the exact patch name does not exist, we try a few
1322 1322 # variations. If strict is passed, we try only #1
1323 1323 #
1324 1324 # 1) a number (as string) to indicate an offset in the series file
1325 1325 # 2) a unique substring of the patch name was given
1326 1326 # 3) patchname[-+]num to indicate an offset in the series file
1327 1327 def lookup(self, patch, strict=False):
1328 1328 def partialname(s):
1329 1329 if s in self.series:
1330 1330 return s
1331 1331 matches = [x for x in self.series if s in x]
1332 1332 if len(matches) > 1:
1333 1333 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
1334 1334 for m in matches:
1335 1335 self.ui.warn(' %s\n' % m)
1336 1336 return None
1337 1337 if matches:
1338 1338 return matches[0]
1339 1339 if self.series and self.applied:
1340 1340 if s == 'qtip':
1341 1341 return self.series[self.seriesend(True) - 1]
1342 1342 if s == 'qbase':
1343 1343 return self.series[0]
1344 1344 return None
1345 1345
1346 1346 if patch in self.series:
1347 1347 return patch
1348 1348
1349 1349 if not os.path.isfile(self.join(patch)):
1350 1350 try:
1351 1351 sno = int(patch)
1352 1352 except (ValueError, OverflowError):
1353 1353 pass
1354 1354 else:
1355 1355 if -len(self.series) <= sno < len(self.series):
1356 1356 return self.series[sno]
1357 1357
1358 1358 if not strict:
1359 1359 res = partialname(patch)
1360 1360 if res:
1361 1361 return res
1362 1362 minus = patch.rfind('-')
1363 1363 if minus >= 0:
1364 1364 res = partialname(patch[:minus])
1365 1365 if res:
1366 1366 i = self.series.index(res)
1367 1367 try:
1368 1368 off = int(patch[minus + 1:] or 1)
1369 1369 except (ValueError, OverflowError):
1370 1370 pass
1371 1371 else:
1372 1372 if i - off >= 0:
1373 1373 return self.series[i - off]
1374 1374 plus = patch.rfind('+')
1375 1375 if plus >= 0:
1376 1376 res = partialname(patch[:plus])
1377 1377 if res:
1378 1378 i = self.series.index(res)
1379 1379 try:
1380 1380 off = int(patch[plus + 1:] or 1)
1381 1381 except (ValueError, OverflowError):
1382 1382 pass
1383 1383 else:
1384 1384 if i + off < len(self.series):
1385 1385 return self.series[i + off]
1386 1386 raise error.Abort(_("patch %s not in series") % patch)
1387 1387
1388 1388 def push(self, repo, patch=None, force=False, list=False, mergeq=None,
1389 1389 all=False, move=False, exact=False, nobackup=False,
1390 1390 keepchanges=False):
1391 1391 self.checkkeepchanges(keepchanges, force)
1392 1392 diffopts = self.diffopts()
1393 1393 with repo.wlock():
1394 1394 heads = []
1395 1395 for hs in repo.branchmap().itervalues():
1396 1396 heads.extend(hs)
1397 1397 if not heads:
1398 1398 heads = [nullid]
1399 1399 if repo.dirstate.p1() not in heads and not exact:
1400 1400 self.ui.status(_("(working directory not at a head)\n"))
1401 1401
1402 1402 if not self.series:
1403 1403 self.ui.warn(_('no patches in series\n'))
1404 1404 return 0
1405 1405
1406 1406 # Suppose our series file is: A B C and the current 'top'
1407 1407 # patch is B. qpush C should be performed (moving forward)
1408 1408 # qpush B is a NOP (no change) qpush A is an error (can't
1409 1409 # go backwards with qpush)
1410 1410 if patch:
1411 1411 patch = self.lookup(patch)
1412 1412 info = self.isapplied(patch)
1413 1413 if info and info[0] >= len(self.applied) - 1:
1414 1414 self.ui.warn(
1415 1415 _('qpush: %s is already at the top\n') % patch)
1416 1416 return 0
1417 1417
1418 1418 pushable, reason = self.pushable(patch)
1419 1419 if pushable:
1420 1420 if self.series.index(patch) < self.seriesend():
1421 1421 raise error.Abort(
1422 1422 _("cannot push to a previous patch: %s") % patch)
1423 1423 else:
1424 1424 if reason:
1425 1425 reason = _('guarded by %s') % reason
1426 1426 else:
1427 1427 reason = _('no matching guards')
1428 1428 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
1429 1429 return 1
1430 1430 elif all:
1431 1431 patch = self.series[-1]
1432 1432 if self.isapplied(patch):
1433 1433 self.ui.warn(_('all patches are currently applied\n'))
1434 1434 return 0
1435 1435
1436 1436 # Following the above example, starting at 'top' of B:
1437 1437 # qpush should be performed (pushes C), but a subsequent
1438 1438 # qpush without an argument is an error (nothing to
1439 1439 # apply). This allows a loop of "...while hg qpush..." to
1440 1440 # work as it detects an error when done
1441 1441 start = self.seriesend()
1442 1442 if start == len(self.series):
1443 1443 self.ui.warn(_('patch series already fully applied\n'))
1444 1444 return 1
1445 1445 if not force and not keepchanges:
1446 1446 self.checklocalchanges(repo, refresh=self.applied)
1447 1447
1448 1448 if exact:
1449 1449 if keepchanges:
1450 1450 raise error.Abort(
1451 1451 _("cannot use --exact and --keep-changes together"))
1452 1452 if move:
1453 1453 raise error.Abort(_('cannot use --exact and --move '
1454 1454 'together'))
1455 1455 if self.applied:
1456 1456 raise error.Abort(_('cannot push --exact with applied '
1457 1457 'patches'))
1458 1458 root = self.series[start]
1459 1459 target = patchheader(self.join(root), self.plainmode).parent
1460 1460 if not target:
1461 1461 raise error.Abort(
1462 1462 _("%s does not have a parent recorded") % root)
1463 1463 if not repo[target] == repo['.']:
1464 1464 hg.update(repo, target)
1465 1465
1466 1466 if move:
1467 1467 if not patch:
1468 1468 raise error.Abort(_("please specify the patch to move"))
1469 1469 for fullstart, rpn in enumerate(self.fullseries):
1470 1470 # strip markers for patch guards
1471 1471 if self.guard_re.split(rpn, 1)[0] == self.series[start]:
1472 1472 break
1473 1473 for i, rpn in enumerate(self.fullseries[fullstart:]):
1474 1474 # strip markers for patch guards
1475 1475 if self.guard_re.split(rpn, 1)[0] == patch:
1476 1476 break
1477 1477 index = fullstart + i
1478 1478 assert index < len(self.fullseries)
1479 1479 fullpatch = self.fullseries[index]
1480 1480 del self.fullseries[index]
1481 1481 self.fullseries.insert(fullstart, fullpatch)
1482 1482 self.parseseries()
1483 1483 self.seriesdirty = True
1484 1484
1485 1485 self.applieddirty = True
1486 1486 if start > 0:
1487 1487 self.checktoppatch(repo)
1488 1488 if not patch:
1489 1489 patch = self.series[start]
1490 1490 end = start + 1
1491 1491 else:
1492 1492 end = self.series.index(patch, start) + 1
1493 1493
1494 1494 tobackup = set()
1495 1495 if (not nobackup and force) or keepchanges:
1496 1496 status = self.checklocalchanges(repo, force=True)
1497 1497 if keepchanges:
1498 1498 tobackup.update(status.modified + status.added +
1499 1499 status.removed + status.deleted)
1500 1500 else:
1501 1501 tobackup.update(status.modified + status.added)
1502 1502
1503 1503 s = self.series[start:end]
1504 1504 all_files = set()
1505 1505 try:
1506 1506 if mergeq:
1507 1507 ret = self.mergepatch(repo, mergeq, s, diffopts)
1508 1508 else:
1509 1509 ret = self.apply(repo, s, list, all_files=all_files,
1510 1510 tobackup=tobackup, keepchanges=keepchanges)
1511 1511 except AbortNoCleanup:
1512 1512 raise
1513 1513 except: # re-raises
1514 1514 self.ui.warn(_('cleaning up working directory...\n'))
1515 1515 cmdutil.revert(self.ui, repo, repo['.'],
1516 1516 repo.dirstate.parents(), no_backup=True)
1517 1517 # only remove unknown files that we know we touched or
1518 1518 # created while patching
1519 1519 for f in all_files:
1520 1520 if f not in repo.dirstate:
1521 1521 repo.wvfs.unlinkpath(f, ignoremissing=True)
1522 1522 self.ui.warn(_('done\n'))
1523 1523 raise
1524 1524
1525 1525 if not self.applied:
1526 1526 return ret[0]
1527 1527 top = self.applied[-1].name
1528 1528 if ret[0] and ret[0] > 1:
1529 1529 msg = _("errors during apply, please fix and qrefresh %s\n")
1530 1530 self.ui.write(msg % top)
1531 1531 else:
1532 1532 self.ui.write(_("now at: %s\n") % top)
1533 1533 return ret[0]
1534 1534
1535 1535 def pop(self, repo, patch=None, force=False, update=True, all=False,
1536 1536 nobackup=False, keepchanges=False):
1537 1537 self.checkkeepchanges(keepchanges, force)
1538 1538 with repo.wlock():
1539 1539 if patch:
1540 1540 # index, rev, patch
1541 1541 info = self.isapplied(patch)
1542 1542 if not info:
1543 1543 patch = self.lookup(patch)
1544 1544 info = self.isapplied(patch)
1545 1545 if not info:
1546 1546 raise error.Abort(_("patch %s is not applied") % patch)
1547 1547
1548 1548 if not self.applied:
1549 1549 # Allow qpop -a to work repeatedly,
1550 1550 # but not qpop without an argument
1551 1551 self.ui.warn(_("no patches applied\n"))
1552 1552 return not all
1553 1553
1554 1554 if all:
1555 1555 start = 0
1556 1556 elif patch:
1557 1557 start = info[0] + 1
1558 1558 else:
1559 1559 start = len(self.applied) - 1
1560 1560
1561 1561 if start >= len(self.applied):
1562 1562 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1563 1563 return
1564 1564
1565 1565 if not update:
1566 1566 parents = repo.dirstate.parents()
1567 1567 rr = [x.node for x in self.applied]
1568 1568 for p in parents:
1569 1569 if p in rr:
1570 1570 self.ui.warn(_("qpop: forcing dirstate update\n"))
1571 1571 update = True
1572 1572 else:
1573 1573 parents = [p.node() for p in repo[None].parents()]
1574 1574 update = any(entry.node in parents
1575 1575 for entry in self.applied[start:])
1576 1576
1577 1577 tobackup = set()
1578 1578 if update:
1579 1579 s = self.checklocalchanges(repo, force=force or keepchanges)
1580 1580 if force:
1581 1581 if not nobackup:
1582 1582 tobackup.update(s.modified + s.added)
1583 1583 elif keepchanges:
1584 1584 tobackup.update(s.modified + s.added +
1585 1585 s.removed + s.deleted)
1586 1586
1587 1587 self.applieddirty = True
1588 1588 end = len(self.applied)
1589 1589 rev = self.applied[start].node
1590 1590
1591 1591 try:
1592 1592 heads = repo.changelog.heads(rev)
1593 1593 except error.LookupError:
1594 1594 node = short(rev)
1595 1595 raise error.Abort(_('trying to pop unknown node %s') % node)
1596 1596
1597 1597 if heads != [self.applied[-1].node]:
1598 1598 raise error.Abort(_("popping would remove a revision not "
1599 1599 "managed by this patch queue"))
1600 1600 if not repo[self.applied[-1].node].mutable():
1601 1601 raise error.Abort(
1602 1602 _("popping would remove a public revision"),
1603 1603 hint=_("see 'hg help phases' for details"))
1604 1604
1605 1605 # we know there are no local changes, so we can make a simplified
1606 1606 # form of hg.update.
1607 1607 if update:
1608 1608 qp = self.qparents(repo, rev)
1609 1609 ctx = repo[qp]
1610 1610 m, a, r, d = repo.status(qp, '.')[:4]
1611 1611 if d:
1612 1612 raise error.Abort(_("deletions found between repo revs"))
1613 1613
1614 1614 tobackup = set(a + m + r) & tobackup
1615 1615 if keepchanges and tobackup:
1616 1616 raise error.Abort(_("local changes found, qrefresh first"))
1617 1617 self.backup(repo, tobackup)
1618 1618 with repo.dirstate.parentchange():
1619 1619 for f in a:
1620 1620 repo.wvfs.unlinkpath(f, ignoremissing=True)
1621 1621 repo.dirstate.drop(f)
1622 1622 for f in m + r:
1623 1623 fctx = ctx[f]
1624 1624 repo.wwrite(f, fctx.data(), fctx.flags())
1625 1625 repo.dirstate.normal(f)
1626 1626 repo.setparents(qp, nullid)
1627 1627 for patch in reversed(self.applied[start:end]):
1628 1628 self.ui.status(_("popping %s\n") % patch.name)
1629 1629 del self.applied[start:end]
1630 1630 strip(self.ui, repo, [rev], update=False, backup=False)
1631 1631 for s, state in repo['.'].substate.items():
1632 1632 repo['.'].sub(s).get(state)
1633 1633 if self.applied:
1634 1634 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1635 1635 else:
1636 1636 self.ui.write(_("patch queue now empty\n"))
1637 1637
1638 1638 def diff(self, repo, pats, opts):
1639 1639 top, patch = self.checktoppatch(repo)
1640 1640 if not top:
1641 1641 self.ui.write(_("no patches applied\n"))
1642 1642 return
1643 1643 qp = self.qparents(repo, top)
1644 1644 if opts.get('reverse'):
1645 1645 node1, node2 = None, qp
1646 1646 else:
1647 1647 node1, node2 = qp, None
1648 1648 diffopts = self.diffopts(opts, patch)
1649 1649 self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
1650 1650
1651 1651 def refresh(self, repo, pats=None, **opts):
1652 1652 opts = pycompat.byteskwargs(opts)
1653 1653 if not self.applied:
1654 1654 self.ui.write(_("no patches applied\n"))
1655 1655 return 1
1656 1656 msg = opts.get('msg', '').rstrip()
1657 1657 edit = opts.get('edit')
1658 1658 editform = opts.get('editform', 'mq.qrefresh')
1659 1659 newuser = opts.get('user')
1660 1660 newdate = opts.get('date')
1661 1661 if newdate:
1662 1662 newdate = '%d %d' % dateutil.parsedate(newdate)
1663 1663 wlock = repo.wlock()
1664 1664
1665 1665 try:
1666 1666 self.checktoppatch(repo)
1667 1667 (top, patchfn) = (self.applied[-1].node, self.applied[-1].name)
1668 1668 if repo.changelog.heads(top) != [top]:
1669 1669 raise error.Abort(_("cannot qrefresh a revision with children"))
1670 1670 if not repo[top].mutable():
1671 1671 raise error.Abort(_("cannot qrefresh public revision"),
1672 1672 hint=_("see 'hg help phases' for details"))
1673 1673
1674 1674 cparents = repo.changelog.parents(top)
1675 1675 patchparent = self.qparents(repo, top)
1676 1676
1677 1677 inclsubs = checksubstate(repo, patchparent)
1678 1678 if inclsubs:
1679 1679 substatestate = repo.dirstate['.hgsubstate']
1680 1680
1681 1681 ph = patchheader(self.join(patchfn), self.plainmode)
1682 1682 diffopts = self.diffopts({'git': opts.get('git')}, patchfn,
1683 1683 plain=True)
1684 1684 if newuser:
1685 1685 ph.setuser(newuser)
1686 1686 if newdate:
1687 1687 ph.setdate(newdate)
1688 1688 ph.setparent(hex(patchparent))
1689 1689
1690 1690 # only commit new patch when write is complete
1691 1691 patchf = self.opener(patchfn, 'w', atomictemp=True)
1692 1692
1693 1693 # update the dirstate in place, strip off the qtip commit
1694 1694 # and then commit.
1695 1695 #
1696 1696 # this should really read:
1697 1697 # mm, dd, aa = repo.status(top, patchparent)[:3]
1698 1698 # but we do it backwards to take advantage of manifest/changelog
1699 1699 # caching against the next repo.status call
1700 1700 mm, aa, dd = repo.status(patchparent, top)[:3]
1701 1701 changes = repo.changelog.read(top)
1702 1702 man = repo.manifestlog[changes[0]].read()
1703 1703 aaa = aa[:]
1704 1704 match1 = scmutil.match(repo[None], pats, opts)
1705 1705 # in short mode, we only diff the files included in the
1706 1706 # patch already plus specified files
1707 1707 if opts.get('short'):
1708 1708 # if amending a patch, we start with existing
1709 1709 # files plus specified files - unfiltered
1710 1710 match = scmutil.matchfiles(repo, mm + aa + dd + match1.files())
1711 1711 # filter with include/exclude options
1712 1712 match1 = scmutil.match(repo[None], opts=opts)
1713 1713 else:
1714 1714 match = scmutil.matchall(repo)
1715 1715 m, a, r, d = repo.status(match=match)[:4]
1716 1716 mm = set(mm)
1717 1717 aa = set(aa)
1718 1718 dd = set(dd)
1719 1719
1720 1720 # we might end up with files that were added between
1721 1721 # qtip and the dirstate parent, but then changed in the
1722 1722 # local dirstate. in this case, we want them to only
1723 1723 # show up in the added section
1724 1724 for x in m:
1725 1725 if x not in aa:
1726 1726 mm.add(x)
1727 1727 # we might end up with files added by the local dirstate that
1728 1728 # were deleted by the patch. In this case, they should only
1729 1729 # show up in the changed section.
1730 1730 for x in a:
1731 1731 if x in dd:
1732 1732 dd.remove(x)
1733 1733 mm.add(x)
1734 1734 else:
1735 1735 aa.add(x)
1736 1736 # make sure any files deleted in the local dirstate
1737 1737 # are not in the add or change column of the patch
1738 1738 forget = []
1739 1739 for x in d + r:
1740 1740 if x in aa:
1741 1741 aa.remove(x)
1742 1742 forget.append(x)
1743 1743 continue
1744 1744 else:
1745 1745 mm.discard(x)
1746 1746 dd.add(x)
1747 1747
1748 1748 m = list(mm)
1749 1749 r = list(dd)
1750 1750 a = list(aa)
1751 1751
1752 1752 # create 'match' that includes the files to be recommitted.
1753 1753 # apply match1 via repo.status to ensure correct case handling.
1754 1754 cm, ca, cr, cd = repo.status(patchparent, match=match1)[:4]
1755 1755 allmatches = set(cm + ca + cr + cd)
1756 1756 refreshchanges = [x.intersection(allmatches) for x in (mm, aa, dd)]
1757 1757
1758 1758 files = set(inclsubs)
1759 1759 for x in refreshchanges:
1760 1760 files.update(x)
1761 1761 match = scmutil.matchfiles(repo, files)
1762 1762
1763 1763 bmlist = repo[top].bookmarks()
1764 1764
1765 1765 dsguard = None
1766 1766 try:
1767 1767 dsguard = dirstateguard.dirstateguard(repo, 'mq.refresh')
1768 1768 if diffopts.git or diffopts.upgrade:
1769 1769 copies = {}
1770 1770 for dst in a:
1771 1771 src = repo.dirstate.copied(dst)
1772 1772 # during qfold, the source file for copies may
1773 1773 # be removed. Treat this as a simple add.
1774 1774 if src is not None and src in repo.dirstate:
1775 1775 copies.setdefault(src, []).append(dst)
1776 1776 repo.dirstate.add(dst)
1777 1777 # remember the copies between patchparent and qtip
1778 1778 for dst in aaa:
1779 1779 f = repo.file(dst)
1780 1780 src = f.renamed(man[dst])
1781 1781 if src:
1782 1782 copies.setdefault(src[0], []).extend(
1783 1783 copies.get(dst, []))
1784 1784 if dst in a:
1785 1785 copies[src[0]].append(dst)
1786 1786 # we can't copy a file created by the patch itself
1787 1787 if dst in copies:
1788 1788 del copies[dst]
1789 1789 for src, dsts in copies.iteritems():
1790 1790 for dst in dsts:
1791 1791 repo.dirstate.copy(src, dst)
1792 1792 else:
1793 1793 for dst in a:
1794 1794 repo.dirstate.add(dst)
1795 1795 # Drop useless copy information
1796 1796 for f in list(repo.dirstate.copies()):
1797 1797 repo.dirstate.copy(None, f)
1798 1798 for f in r:
1799 1799 repo.dirstate.remove(f)
1800 1800 # if the patch excludes a modified file, mark that
1801 1801 # file with mtime=0 so status can see it.
1802 1802 mm = []
1803 1803 for i in pycompat.xrange(len(m) - 1, -1, -1):
1804 1804 if not match1(m[i]):
1805 1805 mm.append(m[i])
1806 1806 del m[i]
1807 1807 for f in m:
1808 1808 repo.dirstate.normal(f)
1809 1809 for f in mm:
1810 1810 repo.dirstate.normallookup(f)
1811 1811 for f in forget:
1812 1812 repo.dirstate.drop(f)
1813 1813
1814 1814 user = ph.user or changes[1]
1815 1815
1816 1816 oldphase = repo[top].phase()
1817 1817
1818 1818 # assumes strip can roll itself back if interrupted
1819 1819 repo.setparents(*cparents)
1820 1820 self.applied.pop()
1821 1821 self.applieddirty = True
1822 1822 strip(self.ui, repo, [top], update=False, backup=False)
1823 1823 dsguard.close()
1824 1824 finally:
1825 1825 release(dsguard)
1826 1826
1827 1827 try:
1828 1828 # might be nice to attempt to roll back strip after this
1829 1829
1830 1830 defaultmsg = "[mq]: %s" % patchfn
1831 1831 editor = cmdutil.getcommiteditor(editform=editform)
1832 1832 if edit:
1833 1833 def finishdesc(desc):
1834 1834 if desc.rstrip():
1835 1835 ph.setmessage(desc)
1836 1836 return desc
1837 1837 return defaultmsg
1838 1838 # i18n: this message is shown in editor with "HG: " prefix
1839 1839 extramsg = _('Leave message empty to use default message.')
1840 1840 editor = cmdutil.getcommiteditor(finishdesc=finishdesc,
1841 1841 extramsg=extramsg,
1842 1842 editform=editform)
1843 1843 message = msg or "\n".join(ph.message)
1844 1844 elif not msg:
1845 1845 if not ph.message:
1846 1846 message = defaultmsg
1847 1847 else:
1848 1848 message = "\n".join(ph.message)
1849 1849 else:
1850 1850 message = msg
1851 1851 ph.setmessage(msg)
1852 1852
1853 1853 # Ensure we create a new changeset in the same phase than
1854 1854 # the old one.
1855 1855 lock = tr = None
1856 1856 try:
1857 1857 lock = repo.lock()
1858 1858 tr = repo.transaction('mq')
1859 1859 n = newcommit(repo, oldphase, message, user, ph.date,
1860 1860 match=match, force=True, editor=editor)
1861 1861 # only write patch after a successful commit
1862 1862 c = [list(x) for x in refreshchanges]
1863 1863 if inclsubs:
1864 1864 self.putsubstate2changes(substatestate, c)
1865 1865 chunks = patchmod.diff(repo, patchparent,
1866 1866 changes=c, opts=diffopts)
1867 1867 comments = bytes(ph)
1868 1868 if comments:
1869 1869 patchf.write(comments)
1870 1870 for chunk in chunks:
1871 1871 patchf.write(chunk)
1872 1872 patchf.close()
1873 1873
1874 1874 marks = repo._bookmarks
1875 1875 marks.applychanges(repo, tr, [(bm, n) for bm in bmlist])
1876 1876 tr.close()
1877 1877
1878 1878 self.applied.append(statusentry(n, patchfn))
1879 1879 finally:
1880 1880 lockmod.release(tr, lock)
1881 1881 except: # re-raises
1882 1882 ctx = repo[cparents[0]]
1883 1883 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1884 1884 self.savedirty()
1885 1885 self.ui.warn(_('qrefresh interrupted while patch was popped! '
1886 1886 '(revert --all, qpush to recover)\n'))
1887 1887 raise
1888 1888 finally:
1889 1889 wlock.release()
1890 1890 self.removeundo(repo)
1891 1891
1892 1892 def init(self, repo, create=False):
1893 1893 if not create and os.path.isdir(self.path):
1894 1894 raise error.Abort(_("patch queue directory already exists"))
1895 1895 try:
1896 1896 os.mkdir(self.path)
1897 1897 except OSError as inst:
1898 1898 if inst.errno != errno.EEXIST or not create:
1899 1899 raise
1900 1900 if create:
1901 1901 return self.qrepo(create=True)
1902 1902
1903 1903 def unapplied(self, repo, patch=None):
1904 1904 if patch and patch not in self.series:
1905 1905 raise error.Abort(_("patch %s is not in series file") % patch)
1906 1906 if not patch:
1907 1907 start = self.seriesend()
1908 1908 else:
1909 1909 start = self.series.index(patch) + 1
1910 1910 unapplied = []
1911 1911 for i in pycompat.xrange(start, len(self.series)):
1912 1912 pushable, reason = self.pushable(i)
1913 1913 if pushable:
1914 1914 unapplied.append((i, self.series[i]))
1915 1915 self.explainpushable(i)
1916 1916 return unapplied
1917 1917
1918 1918 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1919 1919 summary=False):
1920 1920 def displayname(pfx, patchname, state):
1921 1921 if pfx:
1922 1922 self.ui.write(pfx)
1923 1923 if summary:
1924 1924 ph = patchheader(self.join(patchname), self.plainmode)
1925 1925 if ph.message:
1926 1926 msg = ph.message[0]
1927 1927 else:
1928 1928 msg = ''
1929 1929
1930 1930 if self.ui.formatted():
1931 1931 width = self.ui.termwidth() - len(pfx) - len(patchname) - 2
1932 1932 if width > 0:
1933 1933 msg = stringutil.ellipsis(msg, width)
1934 1934 else:
1935 1935 msg = ''
1936 1936 self.ui.write(patchname, label='qseries.' + state)
1937 1937 self.ui.write(': ')
1938 1938 self.ui.write(msg, label='qseries.message.' + state)
1939 1939 else:
1940 1940 self.ui.write(patchname, label='qseries.' + state)
1941 1941 self.ui.write('\n')
1942 1942
1943 1943 applied = set([p.name for p in self.applied])
1944 1944 if length is None:
1945 1945 length = len(self.series) - start
1946 1946 if not missing:
1947 1947 if self.ui.verbose:
1948 1948 idxwidth = len("%d" % (start + length - 1))
1949 1949 for i in pycompat.xrange(start, start + length):
1950 1950 patch = self.series[i]
1951 1951 if patch in applied:
1952 1952 char, state = 'A', 'applied'
1953 1953 elif self.pushable(i)[0]:
1954 1954 char, state = 'U', 'unapplied'
1955 1955 else:
1956 1956 char, state = 'G', 'guarded'
1957 1957 pfx = ''
1958 1958 if self.ui.verbose:
1959 1959 pfx = '%*d %s ' % (idxwidth, i, char)
1960 1960 elif status and status != char:
1961 1961 continue
1962 1962 displayname(pfx, patch, state)
1963 1963 else:
1964 1964 msng_list = []
1965 1965 for root, dirs, files in os.walk(self.path):
1966 1966 d = root[len(self.path) + 1:]
1967 1967 for f in files:
1968 1968 fl = os.path.join(d, f)
1969 1969 if (fl not in self.series and
1970 1970 fl not in (self.statuspath, self.seriespath,
1971 1971 self.guardspath)
1972 1972 and not fl.startswith('.')):
1973 1973 msng_list.append(fl)
1974 1974 for x in sorted(msng_list):
1975 1975 pfx = self.ui.verbose and ('D ') or ''
1976 1976 displayname(pfx, x, 'missing')
1977 1977
1978 1978 def issaveline(self, l):
1979 1979 if l.name == '.hg.patches.save.line':
1980 1980 return True
1981 1981
1982 1982 def qrepo(self, create=False):
1983 1983 ui = self.baseui.copy()
1984 1984 # copy back attributes set by ui.pager()
1985 1985 if self.ui.pageractive and not ui.pageractive:
1986 1986 ui.pageractive = self.ui.pageractive
1987 1987 # internal config: ui.formatted
1988 1988 ui.setconfig('ui', 'formatted',
1989 1989 self.ui.config('ui', 'formatted'), 'mqpager')
1990 1990 ui.setconfig('ui', 'interactive',
1991 1991 self.ui.config('ui', 'interactive'), 'mqpager')
1992 1992 if create or os.path.isdir(self.join(".hg")):
1993 1993 return hg.repository(ui, path=self.path, create=create)
1994 1994
1995 1995 def restore(self, repo, rev, delete=None, qupdate=None):
1996 1996 desc = repo[rev].description().strip()
1997 1997 lines = desc.splitlines()
1998 1998 i = 0
1999 1999 datastart = None
2000 2000 series = []
2001 2001 applied = []
2002 2002 qpp = None
2003 2003 for i, line in enumerate(lines):
2004 2004 if line == 'Patch Data:':
2005 2005 datastart = i + 1
2006 2006 elif line.startswith('Dirstate:'):
2007 2007 l = line.rstrip()
2008 2008 l = l[10:].split(' ')
2009 2009 qpp = [bin(x) for x in l]
2010 2010 elif datastart is not None:
2011 2011 l = line.rstrip()
2012 2012 n, name = l.split(':', 1)
2013 2013 if n:
2014 2014 applied.append(statusentry(bin(n), name))
2015 2015 else:
2016 2016 series.append(l)
2017 2017 if datastart is None:
2018 2018 self.ui.warn(_("no saved patch data found\n"))
2019 2019 return 1
2020 2020 self.ui.warn(_("restoring status: %s\n") % lines[0])
2021 2021 self.fullseries = series
2022 2022 self.applied = applied
2023 2023 self.parseseries()
2024 2024 self.seriesdirty = True
2025 2025 self.applieddirty = True
2026 2026 heads = repo.changelog.heads()
2027 2027 if delete:
2028 2028 if rev not in heads:
2029 2029 self.ui.warn(_("save entry has children, leaving it alone\n"))
2030 2030 else:
2031 2031 self.ui.warn(_("removing save entry %s\n") % short(rev))
2032 2032 pp = repo.dirstate.parents()
2033 2033 if rev in pp:
2034 2034 update = True
2035 2035 else:
2036 2036 update = False
2037 2037 strip(self.ui, repo, [rev], update=update, backup=False)
2038 2038 if qpp:
2039 2039 self.ui.warn(_("saved queue repository parents: %s %s\n") %
2040 2040 (short(qpp[0]), short(qpp[1])))
2041 2041 if qupdate:
2042 2042 self.ui.status(_("updating queue directory\n"))
2043 2043 r = self.qrepo()
2044 2044 if not r:
2045 2045 self.ui.warn(_("unable to load queue repository\n"))
2046 2046 return 1
2047 2047 hg.clean(r, qpp[0])
2048 2048
2049 2049 def save(self, repo, msg=None):
2050 2050 if not self.applied:
2051 2051 self.ui.warn(_("save: no patches applied, exiting\n"))
2052 2052 return 1
2053 2053 if self.issaveline(self.applied[-1]):
2054 2054 self.ui.warn(_("status is already saved\n"))
2055 2055 return 1
2056 2056
2057 2057 if not msg:
2058 2058 msg = _("hg patches saved state")
2059 2059 else:
2060 2060 msg = "hg patches: " + msg.rstrip('\r\n')
2061 2061 r = self.qrepo()
2062 2062 if r:
2063 2063 pp = r.dirstate.parents()
2064 2064 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
2065 2065 msg += "\n\nPatch Data:\n"
2066 2066 msg += ''.join('%s\n' % x for x in self.applied)
2067 2067 msg += ''.join(':%s\n' % x for x in self.fullseries)
2068 2068 n = repo.commit(msg, force=True)
2069 2069 if not n:
2070 2070 self.ui.warn(_("repo commit failed\n"))
2071 2071 return 1
2072 2072 self.applied.append(statusentry(n, '.hg.patches.save.line'))
2073 2073 self.applieddirty = True
2074 2074 self.removeundo(repo)
2075 2075
2076 2076 def fullseriesend(self):
2077 2077 if self.applied:
2078 2078 p = self.applied[-1].name
2079 2079 end = self.findseries(p)
2080 2080 if end is None:
2081 2081 return len(self.fullseries)
2082 2082 return end + 1
2083 2083 return 0
2084 2084
2085 2085 def seriesend(self, all_patches=False):
2086 2086 """If all_patches is False, return the index of the next pushable patch
2087 2087 in the series, or the series length. If all_patches is True, return the
2088 2088 index of the first patch past the last applied one.
2089 2089 """
2090 2090 end = 0
2091 2091 def nextpatch(start):
2092 2092 if all_patches or start >= len(self.series):
2093 2093 return start
2094 2094 for i in pycompat.xrange(start, len(self.series)):
2095 2095 p, reason = self.pushable(i)
2096 2096 if p:
2097 2097 return i
2098 2098 self.explainpushable(i)
2099 2099 return len(self.series)
2100 2100 if self.applied:
2101 2101 p = self.applied[-1].name
2102 2102 try:
2103 2103 end = self.series.index(p)
2104 2104 except ValueError:
2105 2105 return 0
2106 2106 return nextpatch(end + 1)
2107 2107 return nextpatch(end)
2108 2108
2109 2109 def appliedname(self, index):
2110 2110 pname = self.applied[index].name
2111 2111 if not self.ui.verbose:
2112 2112 p = pname
2113 2113 else:
2114 2114 p = ("%d" % self.series.index(pname)) + " " + pname
2115 2115 return p
2116 2116
2117 2117 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
2118 2118 force=None, git=False):
2119 2119 def checkseries(patchname):
2120 2120 if patchname in self.series:
2121 2121 raise error.Abort(_('patch %s is already in the series file')
2122 2122 % patchname)
2123 2123
2124 2124 if rev:
2125 2125 if files:
2126 2126 raise error.Abort(_('option "-r" not valid when importing '
2127 2127 'files'))
2128 2128 rev = scmutil.revrange(repo, rev)
2129 2129 rev.sort(reverse=True)
2130 2130 elif not files:
2131 2131 raise error.Abort(_('no files or revisions specified'))
2132 2132 if (len(files) > 1 or len(rev) > 1) and patchname:
2133 2133 raise error.Abort(_('option "-n" not valid when importing multiple '
2134 2134 'patches'))
2135 2135 imported = []
2136 2136 if rev:
2137 2137 # If mq patches are applied, we can only import revisions
2138 2138 # that form a linear path to qbase.
2139 2139 # Otherwise, they should form a linear path to a head.
2140 2140 heads = repo.changelog.heads(repo.changelog.node(rev.first()))
2141 2141 if len(heads) > 1:
2142 2142 raise error.Abort(_('revision %d is the root of more than one '
2143 2143 'branch') % rev.last())
2144 2144 if self.applied:
2145 2145 base = repo.changelog.node(rev.first())
2146 2146 if base in [n.node for n in self.applied]:
2147 2147 raise error.Abort(_('revision %d is already managed')
2148 2148 % rev.first())
2149 2149 if heads != [self.applied[-1].node]:
2150 2150 raise error.Abort(_('revision %d is not the parent of '
2151 2151 'the queue') % rev.first())
2152 2152 base = repo.changelog.rev(self.applied[0].node)
2153 2153 lastparent = repo.changelog.parentrevs(base)[0]
2154 2154 else:
2155 2155 if heads != [repo.changelog.node(rev.first())]:
2156 2156 raise error.Abort(_('revision %d has unmanaged children')
2157 2157 % rev.first())
2158 2158 lastparent = None
2159 2159
2160 2160 diffopts = self.diffopts({'git': git})
2161 2161 with repo.transaction('qimport') as tr:
2162 2162 for r in rev:
2163 2163 if not repo[r].mutable():
2164 2164 raise error.Abort(_('revision %d is not mutable') % r,
2165 2165 hint=_("see 'hg help phases' "
2166 2166 'for details'))
2167 2167 p1, p2 = repo.changelog.parentrevs(r)
2168 2168 n = repo.changelog.node(r)
2169 2169 if p2 != nullrev:
2170 2170 raise error.Abort(_('cannot import merge revision %d')
2171 2171 % r)
2172 2172 if lastparent and lastparent != r:
2173 2173 raise error.Abort(_('revision %d is not the parent of '
2174 2174 '%d')
2175 2175 % (r, lastparent))
2176 2176 lastparent = p1
2177 2177
2178 2178 if not patchname:
2179 2179 patchname = self.makepatchname(
2180 2180 repo[r].description().split('\n', 1)[0],
2181 2181 '%d.diff' % r)
2182 2182 checkseries(patchname)
2183 2183 self.checkpatchname(patchname, force)
2184 2184 self.fullseries.insert(0, patchname)
2185 2185
2186 2186 with self.opener(patchname, "w") as fp:
2187 2187 cmdutil.exportfile(repo, [n], fp, opts=diffopts)
2188 2188
2189 2189 se = statusentry(n, patchname)
2190 2190 self.applied.insert(0, se)
2191 2191
2192 2192 self.added.append(patchname)
2193 2193 imported.append(patchname)
2194 2194 patchname = None
2195 2195 if rev and repo.ui.configbool('mq', 'secret'):
2196 2196 # if we added anything with --rev, move the secret root
2197 2197 phases.retractboundary(repo, tr, phases.secret, [n])
2198 2198 self.parseseries()
2199 2199 self.applieddirty = True
2200 2200 self.seriesdirty = True
2201 2201
2202 2202 for i, filename in enumerate(files):
2203 2203 if existing:
2204 2204 if filename == '-':
2205 2205 raise error.Abort(_('-e is incompatible with import from -')
2206 2206 )
2207 2207 filename = normname(filename)
2208 2208 self.checkreservedname(filename)
2209 2209 if util.url(filename).islocal():
2210 2210 originpath = self.join(filename)
2211 2211 if not os.path.isfile(originpath):
2212 2212 raise error.Abort(
2213 2213 _("patch %s does not exist") % filename)
2214 2214
2215 2215 if patchname:
2216 2216 self.checkpatchname(patchname, force)
2217 2217
2218 2218 self.ui.write(_('renaming %s to %s\n')
2219 2219 % (filename, patchname))
2220 2220 util.rename(originpath, self.join(patchname))
2221 2221 else:
2222 2222 patchname = filename
2223 2223
2224 2224 else:
2225 2225 if filename == '-' and not patchname:
2226 2226 raise error.Abort(_('need --name to import a patch from -'))
2227 2227 elif not patchname:
2228 2228 patchname = normname(os.path.basename(filename.rstrip('/')))
2229 2229 self.checkpatchname(patchname, force)
2230 2230 try:
2231 2231 if filename == '-':
2232 2232 text = self.ui.fin.read()
2233 2233 else:
2234 2234 fp = hg.openpath(self.ui, filename)
2235 2235 text = fp.read()
2236 2236 fp.close()
2237 2237 except (OSError, IOError):
2238 2238 raise error.Abort(_("unable to read file %s") % filename)
2239 2239 patchf = self.opener(patchname, "w")
2240 2240 patchf.write(text)
2241 2241 patchf.close()
2242 2242 if not force:
2243 2243 checkseries(patchname)
2244 2244 if patchname not in self.series:
2245 2245 index = self.fullseriesend() + i
2246 2246 self.fullseries[index:index] = [patchname]
2247 2247 self.parseseries()
2248 2248 self.seriesdirty = True
2249 2249 self.ui.warn(_("adding %s to series file\n") % patchname)
2250 2250 self.added.append(patchname)
2251 2251 imported.append(patchname)
2252 2252 patchname = None
2253 2253
2254 2254 self.removeundo(repo)
2255 2255 return imported
2256 2256
2257 2257 def fixkeepchangesopts(ui, opts):
2258 2258 if (not ui.configbool('mq', 'keepchanges') or opts.get('force')
2259 2259 or opts.get('exact')):
2260 2260 return opts
2261 2261 opts = dict(opts)
2262 2262 opts['keep_changes'] = True
2263 2263 return opts
2264 2264
2265 2265 @command("qdelete|qremove|qrm",
2266 2266 [('k', 'keep', None, _('keep patch file')),
2267 2267 ('r', 'rev', [],
2268 2268 _('stop managing a revision (DEPRECATED)'), _('REV'))],
2269 2269 _('hg qdelete [-k] [PATCH]...'),
2270 2270 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
2271 2271 def delete(ui, repo, *patches, **opts):
2272 2272 """remove patches from queue
2273 2273
2274 2274 The patches must not be applied, and at least one patch is required. Exact
2275 2275 patch identifiers must be given. With -k/--keep, the patch files are
2276 2276 preserved in the patch directory.
2277 2277
2278 2278 To stop managing a patch and move it into permanent history,
2279 2279 use the :hg:`qfinish` command."""
2280 2280 q = repo.mq
2281 2281 q.delete(repo, patches, pycompat.byteskwargs(opts))
2282 2282 q.savedirty()
2283 2283 return 0
2284 2284
2285 2285 @command("qapplied",
2286 2286 [('1', 'last', None, _('show only the preceding applied patch'))
2287 2287 ] + seriesopts,
2288 2288 _('hg qapplied [-1] [-s] [PATCH]'),
2289 2289 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
2290 2290 def applied(ui, repo, patch=None, **opts):
2291 2291 """print the patches already applied
2292 2292
2293 2293 Returns 0 on success."""
2294 2294
2295 2295 q = repo.mq
2296 2296 opts = pycompat.byteskwargs(opts)
2297 2297
2298 2298 if patch:
2299 2299 if patch not in q.series:
2300 2300 raise error.Abort(_("patch %s is not in series file") % patch)
2301 2301 end = q.series.index(patch) + 1
2302 2302 else:
2303 2303 end = q.seriesend(True)
2304 2304
2305 2305 if opts.get('last') and not end:
2306 2306 ui.write(_("no patches applied\n"))
2307 2307 return 1
2308 2308 elif opts.get('last') and end == 1:
2309 2309 ui.write(_("only one patch applied\n"))
2310 2310 return 1
2311 2311 elif opts.get('last'):
2312 2312 start = end - 2
2313 2313 end = 1
2314 2314 else:
2315 2315 start = 0
2316 2316
2317 2317 q.qseries(repo, length=end, start=start, status='A',
2318 2318 summary=opts.get('summary'))
2319 2319
2320 2320
2321 2321 @command("qunapplied",
2322 2322 [('1', 'first', None, _('show only the first patch'))] + seriesopts,
2323 2323 _('hg qunapplied [-1] [-s] [PATCH]'),
2324 2324 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
2325 2325 def unapplied(ui, repo, patch=None, **opts):
2326 2326 """print the patches not yet applied
2327 2327
2328 2328 Returns 0 on success."""
2329 2329
2330 2330 q = repo.mq
2331 2331 opts = pycompat.byteskwargs(opts)
2332 2332 if patch:
2333 2333 if patch not in q.series:
2334 2334 raise error.Abort(_("patch %s is not in series file") % patch)
2335 2335 start = q.series.index(patch) + 1
2336 2336 else:
2337 2337 start = q.seriesend(True)
2338 2338
2339 2339 if start == len(q.series) and opts.get('first'):
2340 2340 ui.write(_("all patches applied\n"))
2341 2341 return 1
2342 2342
2343 2343 if opts.get('first'):
2344 2344 length = 1
2345 2345 else:
2346 2346 length = None
2347 2347 q.qseries(repo, start=start, length=length, status='U',
2348 2348 summary=opts.get('summary'))
2349 2349
2350 2350 @command("qimport",
2351 2351 [('e', 'existing', None, _('import file in patch directory')),
2352 2352 ('n', 'name', '',
2353 2353 _('name of patch file'), _('NAME')),
2354 2354 ('f', 'force', None, _('overwrite existing files')),
2355 2355 ('r', 'rev', [],
2356 2356 _('place existing revisions under mq control'), _('REV')),
2357 2357 ('g', 'git', None, _('use git extended diff format')),
2358 2358 ('P', 'push', None, _('qpush after importing'))],
2359 2359 _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... [FILE]...'),
2360 2360 helpcategory=command.CATEGORY_IMPORT_EXPORT)
2361 2361 def qimport(ui, repo, *filename, **opts):
2362 2362 """import a patch or existing changeset
2363 2363
2364 2364 The patch is inserted into the series after the last applied
2365 2365 patch. If no patches have been applied, qimport prepends the patch
2366 2366 to the series.
2367 2367
2368 2368 The patch will have the same name as its source file unless you
2369 2369 give it a new one with -n/--name.
2370 2370
2371 2371 You can register an existing patch inside the patch directory with
2372 2372 the -e/--existing flag.
2373 2373
2374 2374 With -f/--force, an existing patch of the same name will be
2375 2375 overwritten.
2376 2376
2377 2377 An existing changeset may be placed under mq control with -r/--rev
2378 2378 (e.g. qimport --rev . -n patch will place the current revision
2379 2379 under mq control). With -g/--git, patches imported with --rev will
2380 2380 use the git diff format. See the diffs help topic for information
2381 2381 on why this is important for preserving rename/copy information
2382 2382 and permission changes. Use :hg:`qfinish` to remove changesets
2383 2383 from mq control.
2384 2384
2385 2385 To import a patch from standard input, pass - as the patch file.
2386 2386 When importing from standard input, a patch name must be specified
2387 2387 using the --name flag.
2388 2388
2389 2389 To import an existing patch while renaming it::
2390 2390
2391 2391 hg qimport -e existing-patch -n new-name
2392 2392
2393 2393 Returns 0 if import succeeded.
2394 2394 """
2395 2395 opts = pycompat.byteskwargs(opts)
2396 2396 with repo.lock(): # cause this may move phase
2397 2397 q = repo.mq
2398 2398 try:
2399 2399 imported = q.qimport(
2400 2400 repo, filename, patchname=opts.get('name'),
2401 2401 existing=opts.get('existing'), force=opts.get('force'),
2402 2402 rev=opts.get('rev'), git=opts.get('git'))
2403 2403 finally:
2404 2404 q.savedirty()
2405 2405
2406 2406 if imported and opts.get('push') and not opts.get('rev'):
2407 2407 return q.push(repo, imported[-1])
2408 2408 return 0
2409 2409
2410 2410 def qinit(ui, repo, create):
2411 2411 """initialize a new queue repository
2412 2412
2413 2413 This command also creates a series file for ordering patches, and
2414 2414 an mq-specific .hgignore file in the queue repository, to exclude
2415 2415 the status and guards files (these contain mostly transient state).
2416 2416
2417 2417 Returns 0 if initialization succeeded."""
2418 2418 q = repo.mq
2419 2419 r = q.init(repo, create)
2420 2420 q.savedirty()
2421 2421 if r:
2422 2422 if not os.path.exists(r.wjoin('.hgignore')):
2423 2423 fp = r.wvfs('.hgignore', 'w')
2424 2424 fp.write('^\\.hg\n')
2425 2425 fp.write('^\\.mq\n')
2426 2426 fp.write('syntax: glob\n')
2427 2427 fp.write('status\n')
2428 2428 fp.write('guards\n')
2429 2429 fp.close()
2430 2430 if not os.path.exists(r.wjoin('series')):
2431 2431 r.wvfs('series', 'w').close()
2432 2432 r[None].add(['.hgignore', 'series'])
2433 2433 commands.add(ui, r)
2434 2434 return 0
2435 2435
2436 2436 @command("qinit",
2437 2437 [('c', 'create-repo', None, _('create queue repository'))],
2438 2438 _('hg qinit [-c]'),
2439 2439 helpcategory=command.CATEGORY_REPO_CREATION,
2440 2440 helpbasic=True)
2441 2441 def init(ui, repo, **opts):
2442 2442 """init a new queue repository (DEPRECATED)
2443 2443
2444 2444 The queue repository is unversioned by default. If
2445 2445 -c/--create-repo is specified, qinit will create a separate nested
2446 2446 repository for patches (qinit -c may also be run later to convert
2447 2447 an unversioned patch repository into a versioned one). You can use
2448 2448 qcommit to commit changes to this queue repository.
2449 2449
2450 2450 This command is deprecated. Without -c, it's implied by other relevant
2451 2451 commands. With -c, use :hg:`init --mq` instead."""
2452 2452 return qinit(ui, repo, create=opts.get(r'create_repo'))
2453 2453
2454 2454 @command("qclone",
2455 2455 [('', 'pull', None, _('use pull protocol to copy metadata')),
2456 2456 ('U', 'noupdate', None,
2457 2457 _('do not update the new working directories')),
2458 2458 ('', 'uncompressed', None,
2459 2459 _('use uncompressed transfer (fast over LAN)')),
2460 2460 ('p', 'patches', '',
2461 2461 _('location of source patch repository'), _('REPO')),
2462 2462 ] + cmdutil.remoteopts,
2463 2463 _('hg qclone [OPTION]... SOURCE [DEST]'),
2464 2464 helpcategory=command.CATEGORY_REPO_CREATION,
2465 2465 norepo=True)
2466 2466 def clone(ui, source, dest=None, **opts):
2467 2467 '''clone main and patch repository at same time
2468 2468
2469 2469 If source is local, destination will have no patches applied. If
2470 2470 source is remote, this command can not check if patches are
2471 2471 applied in source, so cannot guarantee that patches are not
2472 2472 applied in destination. If you clone remote repository, be sure
2473 2473 before that it has no patches applied.
2474 2474
2475 2475 Source patch repository is looked for in <src>/.hg/patches by
2476 2476 default. Use -p <url> to change.
2477 2477
2478 2478 The patch directory must be a nested Mercurial repository, as
2479 2479 would be created by :hg:`init --mq`.
2480 2480
2481 2481 Return 0 on success.
2482 2482 '''
2483 2483 opts = pycompat.byteskwargs(opts)
2484 2484 def patchdir(repo):
2485 2485 """compute a patch repo url from a repo object"""
2486 2486 url = repo.url()
2487 2487 if url.endswith('/'):
2488 2488 url = url[:-1]
2489 2489 return url + '/.hg/patches'
2490 2490
2491 2491 # main repo (destination and sources)
2492 2492 if dest is None:
2493 2493 dest = hg.defaultdest(source)
2494 2494 sr = hg.peer(ui, opts, ui.expandpath(source))
2495 2495
2496 2496 # patches repo (source only)
2497 2497 if opts.get('patches'):
2498 2498 patchespath = ui.expandpath(opts.get('patches'))
2499 2499 else:
2500 2500 patchespath = patchdir(sr)
2501 2501 try:
2502 2502 hg.peer(ui, opts, patchespath)
2503 2503 except error.RepoError:
2504 2504 raise error.Abort(_('versioned patch repository not found'
2505 2505 ' (see init --mq)'))
2506 2506 qbase, destrev = None, None
2507 2507 if sr.local():
2508 2508 repo = sr.local()
2509 2509 if repo.mq.applied and repo[qbase].phase() != phases.secret:
2510 2510 qbase = repo.mq.applied[0].node
2511 2511 if not hg.islocal(dest):
2512 2512 heads = set(repo.heads())
2513 2513 destrev = list(heads.difference(repo.heads(qbase)))
2514 2514 destrev.append(repo.changelog.parents(qbase)[0])
2515 2515 elif sr.capable('lookup'):
2516 2516 try:
2517 2517 qbase = sr.lookup('qbase')
2518 2518 except error.RepoError:
2519 2519 pass
2520 2520
2521 2521 ui.note(_('cloning main repository\n'))
2522 2522 sr, dr = hg.clone(ui, opts, sr.url(), dest,
2523 2523 pull=opts.get('pull'),
2524 2524 revs=destrev,
2525 2525 update=False,
2526 2526 stream=opts.get('uncompressed'))
2527 2527
2528 2528 ui.note(_('cloning patch repository\n'))
2529 2529 hg.clone(ui, opts, opts.get('patches') or patchdir(sr), patchdir(dr),
2530 2530 pull=opts.get('pull'), update=not opts.get('noupdate'),
2531 2531 stream=opts.get('uncompressed'))
2532 2532
2533 2533 if dr.local():
2534 2534 repo = dr.local()
2535 2535 if qbase:
2536 2536 ui.note(_('stripping applied patches from destination '
2537 2537 'repository\n'))
2538 2538 strip(ui, repo, [qbase], update=False, backup=None)
2539 2539 if not opts.get('noupdate'):
2540 2540 ui.note(_('updating destination repository\n'))
2541 2541 hg.update(repo, repo.changelog.tip())
2542 2542
2543 2543 @command("qcommit|qci",
2544 2544 commands.table["commit|ci"][1],
2545 2545 _('hg qcommit [OPTION]... [FILE]...'),
2546 2546 helpcategory=command.CATEGORY_COMMITTING,
2547 2547 inferrepo=True)
2548 2548 def commit(ui, repo, *pats, **opts):
2549 2549 """commit changes in the queue repository (DEPRECATED)
2550 2550
2551 2551 This command is deprecated; use :hg:`commit --mq` instead."""
2552 2552 q = repo.mq
2553 2553 r = q.qrepo()
2554 2554 if not r:
2555 2555 raise error.Abort('no queue repository')
2556 2556 commands.commit(r.ui, r, *pats, **opts)
2557 2557
2558 2558 @command("qseries",
2559 2559 [('m', 'missing', None, _('print patches not in series')),
2560 2560 ] + seriesopts,
2561 2561 _('hg qseries [-ms]'),
2562 2562 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
2563 2563 def series(ui, repo, **opts):
2564 2564 """print the entire series file
2565 2565
2566 2566 Returns 0 on success."""
2567 2567 repo.mq.qseries(repo, missing=opts.get(r'missing'),
2568 2568 summary=opts.get(r'summary'))
2569 2569 return 0
2570 2570
2571 2571 @command("qtop", seriesopts, _('hg qtop [-s]'),
2572 2572 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
2573 2573 def top(ui, repo, **opts):
2574 2574 """print the name of the current patch
2575 2575
2576 2576 Returns 0 on success."""
2577 2577 q = repo.mq
2578 2578 if q.applied:
2579 2579 t = q.seriesend(True)
2580 2580 else:
2581 2581 t = 0
2582 2582
2583 2583 if t:
2584 2584 q.qseries(repo, start=t - 1, length=1, status='A',
2585 2585 summary=opts.get(r'summary'))
2586 2586 else:
2587 2587 ui.write(_("no patches applied\n"))
2588 2588 return 1
2589 2589
2590 2590 @command("qnext", seriesopts, _('hg qnext [-s]'),
2591 2591 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
2592 2592 def next(ui, repo, **opts):
2593 2593 """print the name of the next pushable patch
2594 2594
2595 2595 Returns 0 on success."""
2596 2596 q = repo.mq
2597 2597 end = q.seriesend()
2598 2598 if end == len(q.series):
2599 2599 ui.write(_("all patches applied\n"))
2600 2600 return 1
2601 2601 q.qseries(repo, start=end, length=1, summary=opts.get(r'summary'))
2602 2602
2603 2603 @command("qprev", seriesopts, _('hg qprev [-s]'),
2604 2604 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
2605 2605 def prev(ui, repo, **opts):
2606 2606 """print the name of the preceding applied patch
2607 2607
2608 2608 Returns 0 on success."""
2609 2609 q = repo.mq
2610 2610 l = len(q.applied)
2611 2611 if l == 1:
2612 2612 ui.write(_("only one patch applied\n"))
2613 2613 return 1
2614 2614 if not l:
2615 2615 ui.write(_("no patches applied\n"))
2616 2616 return 1
2617 2617 idx = q.series.index(q.applied[-2].name)
2618 2618 q.qseries(repo, start=idx, length=1, status='A',
2619 2619 summary=opts.get(r'summary'))
2620 2620
2621 2621 def setupheaderopts(ui, opts):
2622 2622 if not opts.get('user') and opts.get('currentuser'):
2623 2623 opts['user'] = ui.username()
2624 2624 if not opts.get('date') and opts.get('currentdate'):
2625 2625 opts['date'] = "%d %d" % dateutil.makedate()
2626 2626
2627 2627 @command("qnew",
2628 2628 [('e', 'edit', None, _('invoke editor on commit messages')),
2629 2629 ('f', 'force', None, _('import uncommitted changes (DEPRECATED)')),
2630 2630 ('g', 'git', None, _('use git extended diff format')),
2631 2631 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2632 2632 ('u', 'user', '',
2633 2633 _('add "From: <USER>" to patch'), _('USER')),
2634 2634 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2635 2635 ('d', 'date', '',
2636 2636 _('add "Date: <DATE>" to patch'), _('DATE'))
2637 2637 ] + cmdutil.walkopts + cmdutil.commitopts,
2638 2638 _('hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...'),
2639 2639 helpcategory=command.CATEGORY_COMMITTING, helpbasic=True,
2640 2640 inferrepo=True)
2641 2641 def new(ui, repo, patch, *args, **opts):
2642 2642 """create a new patch
2643 2643
2644 2644 qnew creates a new patch on top of the currently-applied patch (if
2645 2645 any). The patch will be initialized with any outstanding changes
2646 2646 in the working directory. You may also use -I/--include,
2647 2647 -X/--exclude, and/or a list of files after the patch name to add
2648 2648 only changes to matching files to the new patch, leaving the rest
2649 2649 as uncommitted modifications.
2650 2650
2651 2651 -u/--user and -d/--date can be used to set the (given) user and
2652 2652 date, respectively. -U/--currentuser and -D/--currentdate set user
2653 2653 to current user and date to current date.
2654 2654
2655 2655 -e/--edit, -m/--message or -l/--logfile set the patch header as
2656 2656 well as the commit message. If none is specified, the header is
2657 2657 empty and the commit message is '[mq]: PATCH'.
2658 2658
2659 2659 Use the -g/--git option to keep the patch in the git extended diff
2660 2660 format. Read the diffs help topic for more information on why this
2661 2661 is important for preserving permission changes and copy/rename
2662 2662 information.
2663 2663
2664 2664 Returns 0 on successful creation of a new patch.
2665 2665 """
2666 2666 opts = pycompat.byteskwargs(opts)
2667 2667 msg = cmdutil.logmessage(ui, opts)
2668 2668 q = repo.mq
2669 2669 opts['msg'] = msg
2670 2670 setupheaderopts(ui, opts)
2671 2671 q.new(repo, patch, *args, **pycompat.strkwargs(opts))
2672 2672 q.savedirty()
2673 2673 return 0
2674 2674
2675 2675 @command("qrefresh",
2676 2676 [('e', 'edit', None, _('invoke editor on commit messages')),
2677 2677 ('g', 'git', None, _('use git extended diff format')),
2678 2678 ('s', 'short', None,
2679 2679 _('refresh only files already in the patch and specified files')),
2680 2680 ('U', 'currentuser', None,
2681 2681 _('add/update author field in patch with current user')),
2682 2682 ('u', 'user', '',
2683 2683 _('add/update author field in patch with given user'), _('USER')),
2684 2684 ('D', 'currentdate', None,
2685 2685 _('add/update date field in patch with current date')),
2686 2686 ('d', 'date', '',
2687 2687 _('add/update date field in patch with given date'), _('DATE'))
2688 2688 ] + cmdutil.walkopts + cmdutil.commitopts,
2689 2689 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...'),
2690 2690 helpcategory=command.CATEGORY_COMMITTING, helpbasic=True,
2691 2691 inferrepo=True)
2692 2692 def refresh(ui, repo, *pats, **opts):
2693 2693 """update the current patch
2694 2694
2695 2695 If any file patterns are provided, the refreshed patch will
2696 2696 contain only the modifications that match those patterns; the
2697 2697 remaining modifications will remain in the working directory.
2698 2698
2699 2699 If -s/--short is specified, files currently included in the patch
2700 2700 will be refreshed just like matched files and remain in the patch.
2701 2701
2702 2702 If -e/--edit is specified, Mercurial will start your configured editor for
2703 2703 you to enter a message. In case qrefresh fails, you will find a backup of
2704 2704 your message in ``.hg/last-message.txt``.
2705 2705
2706 2706 hg add/remove/copy/rename work as usual, though you might want to
2707 2707 use git-style patches (-g/--git or [diff] git=1) to track copies
2708 2708 and renames. See the diffs help topic for more information on the
2709 2709 git diff format.
2710 2710
2711 2711 Returns 0 on success.
2712 2712 """
2713 2713 opts = pycompat.byteskwargs(opts)
2714 2714 q = repo.mq
2715 2715 message = cmdutil.logmessage(ui, opts)
2716 2716 setupheaderopts(ui, opts)
2717 2717 with repo.wlock():
2718 2718 ret = q.refresh(repo, pats, msg=message, **pycompat.strkwargs(opts))
2719 2719 q.savedirty()
2720 2720 return ret
2721 2721
2722 2722 @command("qdiff",
2723 2723 cmdutil.diffopts + cmdutil.diffopts2 + cmdutil.walkopts,
2724 2724 _('hg qdiff [OPTION]... [FILE]...'),
2725 2725 helpcategory=command.CATEGORY_FILE_CONTENTS, helpbasic=True,
2726 2726 inferrepo=True)
2727 2727 def diff(ui, repo, *pats, **opts):
2728 2728 """diff of the current patch and subsequent modifications
2729 2729
2730 2730 Shows a diff which includes the current patch as well as any
2731 2731 changes which have been made in the working directory since the
2732 2732 last refresh (thus showing what the current patch would become
2733 2733 after a qrefresh).
2734 2734
2735 2735 Use :hg:`diff` if you only want to see the changes made since the
2736 2736 last qrefresh, or :hg:`export qtip` if you want to see changes
2737 2737 made by the current patch without including changes made since the
2738 2738 qrefresh.
2739 2739
2740 2740 Returns 0 on success.
2741 2741 """
2742 2742 ui.pager('qdiff')
2743 2743 repo.mq.diff(repo, pats, pycompat.byteskwargs(opts))
2744 2744 return 0
2745 2745
2746 2746 @command('qfold',
2747 2747 [('e', 'edit', None, _('invoke editor on commit messages')),
2748 2748 ('k', 'keep', None, _('keep folded patch files')),
2749 2749 ] + cmdutil.commitopts,
2750 2750 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...'),
2751 2751 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT)
2752 2752 def fold(ui, repo, *files, **opts):
2753 2753 """fold the named patches into the current patch
2754 2754
2755 2755 Patches must not yet be applied. Each patch will be successively
2756 2756 applied to the current patch in the order given. If all the
2757 2757 patches apply successfully, the current patch will be refreshed
2758 2758 with the new cumulative patch, and the folded patches will be
2759 2759 deleted. With -k/--keep, the folded patch files will not be
2760 2760 removed afterwards.
2761 2761
2762 2762 The header for each folded patch will be concatenated with the
2763 2763 current patch header, separated by a line of ``* * *``.
2764 2764
2765 2765 Returns 0 on success."""
2766 2766 opts = pycompat.byteskwargs(opts)
2767 2767 q = repo.mq
2768 2768 if not files:
2769 2769 raise error.Abort(_('qfold requires at least one patch name'))
2770 2770 if not q.checktoppatch(repo)[0]:
2771 2771 raise error.Abort(_('no patches applied'))
2772 2772 q.checklocalchanges(repo)
2773 2773
2774 2774 message = cmdutil.logmessage(ui, opts)
2775 2775
2776 2776 parent = q.lookup('qtip')
2777 2777 patches = []
2778 2778 messages = []
2779 2779 for f in files:
2780 2780 p = q.lookup(f)
2781 2781 if p in patches or p == parent:
2782 2782 ui.warn(_('skipping already folded patch %s\n') % p)
2783 2783 if q.isapplied(p):
2784 2784 raise error.Abort(_('qfold cannot fold already applied patch %s')
2785 2785 % p)
2786 2786 patches.append(p)
2787 2787
2788 2788 for p in patches:
2789 2789 if not message:
2790 2790 ph = patchheader(q.join(p), q.plainmode)
2791 2791 if ph.message:
2792 2792 messages.append(ph.message)
2793 2793 pf = q.join(p)
2794 2794 (patchsuccess, files, fuzz) = q.patch(repo, pf)
2795 2795 if not patchsuccess:
2796 2796 raise error.Abort(_('error folding patch %s') % p)
2797 2797
2798 2798 if not message:
2799 2799 ph = patchheader(q.join(parent), q.plainmode)
2800 2800 message = ph.message
2801 2801 for msg in messages:
2802 2802 if msg:
2803 2803 if message:
2804 2804 message.append('* * *')
2805 2805 message.extend(msg)
2806 2806 message = '\n'.join(message)
2807 2807
2808 2808 diffopts = q.patchopts(q.diffopts(), *patches)
2809 2809 with repo.wlock():
2810 2810 q.refresh(repo, msg=message, git=diffopts.git, edit=opts.get('edit'),
2811 2811 editform='mq.qfold')
2812 2812 q.delete(repo, patches, opts)
2813 2813 q.savedirty()
2814 2814
2815 2815 @command("qgoto",
2816 2816 [('', 'keep-changes', None,
2817 2817 _('tolerate non-conflicting local changes')),
2818 2818 ('f', 'force', None, _('overwrite any local changes')),
2819 2819 ('', 'no-backup', None, _('do not save backup copies of files'))],
2820 2820 _('hg qgoto [OPTION]... PATCH'),
2821 2821 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
2822 2822 def goto(ui, repo, patch, **opts):
2823 2823 '''push or pop patches until named patch is at top of stack
2824 2824
2825 2825 Returns 0 on success.'''
2826 2826 opts = pycompat.byteskwargs(opts)
2827 2827 opts = fixkeepchangesopts(ui, opts)
2828 2828 q = repo.mq
2829 2829 patch = q.lookup(patch)
2830 2830 nobackup = opts.get('no_backup')
2831 2831 keepchanges = opts.get('keep_changes')
2832 2832 if q.isapplied(patch):
2833 2833 ret = q.pop(repo, patch, force=opts.get('force'), nobackup=nobackup,
2834 2834 keepchanges=keepchanges)
2835 2835 else:
2836 2836 ret = q.push(repo, patch, force=opts.get('force'), nobackup=nobackup,
2837 2837 keepchanges=keepchanges)
2838 2838 q.savedirty()
2839 2839 return ret
2840 2840
2841 2841 @command("qguard",
2842 2842 [('l', 'list', None, _('list all patches and guards')),
2843 2843 ('n', 'none', None, _('drop all guards'))],
2844 2844 _('hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]'),
2845 2845 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
2846 2846 def guard(ui, repo, *args, **opts):
2847 2847 '''set or print guards for a patch
2848 2848
2849 2849 Guards control whether a patch can be pushed. A patch with no
2850 2850 guards is always pushed. A patch with a positive guard ("+foo") is
2851 2851 pushed only if the :hg:`qselect` command has activated it. A patch with
2852 2852 a negative guard ("-foo") is never pushed if the :hg:`qselect` command
2853 2853 has activated it.
2854 2854
2855 2855 With no arguments, print the currently active guards.
2856 2856 With arguments, set guards for the named patch.
2857 2857
2858 2858 .. note::
2859 2859
2860 2860 Specifying negative guards now requires '--'.
2861 2861
2862 2862 To set guards on another patch::
2863 2863
2864 2864 hg qguard other.patch -- +2.6.17 -stable
2865 2865
2866 2866 Returns 0 on success.
2867 2867 '''
2868 2868 def status(idx):
2869 2869 guards = q.seriesguards[idx] or ['unguarded']
2870 2870 if q.series[idx] in applied:
2871 2871 state = 'applied'
2872 2872 elif q.pushable(idx)[0]:
2873 2873 state = 'unapplied'
2874 2874 else:
2875 2875 state = 'guarded'
2876 2876 label = 'qguard.patch qguard.%s qseries.%s' % (state, state)
2877 2877 ui.write('%s: ' % ui.label(q.series[idx], label))
2878 2878
2879 2879 for i, guard in enumerate(guards):
2880 2880 if guard.startswith('+'):
2881 2881 ui.write(guard, label='qguard.positive')
2882 2882 elif guard.startswith('-'):
2883 2883 ui.write(guard, label='qguard.negative')
2884 2884 else:
2885 2885 ui.write(guard, label='qguard.unguarded')
2886 2886 if i != len(guards) - 1:
2887 2887 ui.write(' ')
2888 2888 ui.write('\n')
2889 2889 q = repo.mq
2890 2890 applied = set(p.name for p in q.applied)
2891 2891 patch = None
2892 2892 args = list(args)
2893 2893 if opts.get(r'list'):
2894 2894 if args or opts.get(r'none'):
2895 2895 raise error.Abort(_('cannot mix -l/--list with options or '
2896 2896 'arguments'))
2897 2897 for i in pycompat.xrange(len(q.series)):
2898 2898 status(i)
2899 2899 return
2900 2900 if not args or args[0][0:1] in '-+':
2901 2901 if not q.applied:
2902 2902 raise error.Abort(_('no patches applied'))
2903 2903 patch = q.applied[-1].name
2904 2904 if patch is None and args[0][0:1] not in '-+':
2905 2905 patch = args.pop(0)
2906 2906 if patch is None:
2907 2907 raise error.Abort(_('no patch to work with'))
2908 2908 if args or opts.get(r'none'):
2909 2909 idx = q.findseries(patch)
2910 2910 if idx is None:
2911 2911 raise error.Abort(_('no patch named %s') % patch)
2912 2912 q.setguards(idx, args)
2913 2913 q.savedirty()
2914 2914 else:
2915 2915 status(q.series.index(q.lookup(patch)))
2916 2916
2917 2917 @command("qheader", [], _('hg qheader [PATCH]'),
2918 2918 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
2919 2919 def header(ui, repo, patch=None):
2920 2920 """print the header of the topmost or specified patch
2921 2921
2922 2922 Returns 0 on success."""
2923 2923 q = repo.mq
2924 2924
2925 2925 if patch:
2926 2926 patch = q.lookup(patch)
2927 2927 else:
2928 2928 if not q.applied:
2929 2929 ui.write(_('no patches applied\n'))
2930 2930 return 1
2931 2931 patch = q.lookup('qtip')
2932 2932 ph = patchheader(q.join(patch), q.plainmode)
2933 2933
2934 2934 ui.write('\n'.join(ph.message) + '\n')
2935 2935
2936 2936 def lastsavename(path):
2937 2937 (directory, base) = os.path.split(path)
2938 2938 names = os.listdir(directory)
2939 2939 namere = re.compile("%s.([0-9]+)" % base)
2940 2940 maxindex = None
2941 2941 maxname = None
2942 2942 for f in names:
2943 2943 m = namere.match(f)
2944 2944 if m:
2945 2945 index = int(m.group(1))
2946 2946 if maxindex is None or index > maxindex:
2947 2947 maxindex = index
2948 2948 maxname = f
2949 2949 if maxname:
2950 2950 return (os.path.join(directory, maxname), maxindex)
2951 2951 return (None, None)
2952 2952
2953 2953 def savename(path):
2954 2954 (last, index) = lastsavename(path)
2955 2955 if last is None:
2956 2956 index = 0
2957 2957 newpath = path + ".%d" % (index + 1)
2958 2958 return newpath
2959 2959
2960 2960 @command("qpush",
2961 2961 [('', 'keep-changes', None,
2962 2962 _('tolerate non-conflicting local changes')),
2963 2963 ('f', 'force', None, _('apply on top of local changes')),
2964 2964 ('e', 'exact', None,
2965 2965 _('apply the target patch to its recorded parent')),
2966 2966 ('l', 'list', None, _('list patch name in commit text')),
2967 2967 ('a', 'all', None, _('apply all patches')),
2968 2968 ('m', 'merge', None, _('merge from another queue (DEPRECATED)')),
2969 2969 ('n', 'name', '',
2970 2970 _('merge queue name (DEPRECATED)'), _('NAME')),
2971 2971 ('', 'move', None,
2972 2972 _('reorder patch series and apply only the patch')),
2973 2973 ('', 'no-backup', None, _('do not save backup copies of files'))],
2974 2974 _('hg qpush [-f] [-l] [-a] [--move] [PATCH | INDEX]'),
2975 2975 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2976 2976 helpbasic=True)
2977 2977 def push(ui, repo, patch=None, **opts):
2978 2978 """push the next patch onto the stack
2979 2979
2980 2980 By default, abort if the working directory contains uncommitted
2981 2981 changes. With --keep-changes, abort only if the uncommitted files
2982 2982 overlap with patched files. With -f/--force, backup and patch over
2983 2983 uncommitted changes.
2984 2984
2985 2985 Return 0 on success.
2986 2986 """
2987 2987 q = repo.mq
2988 2988 mergeq = None
2989 2989
2990 2990 opts = pycompat.byteskwargs(opts)
2991 2991 opts = fixkeepchangesopts(ui, opts)
2992 2992 if opts.get('merge'):
2993 2993 if opts.get('name'):
2994 2994 newpath = repo.vfs.join(opts.get('name'))
2995 2995 else:
2996 2996 newpath, i = lastsavename(q.path)
2997 2997 if not newpath:
2998 2998 ui.warn(_("no saved queues found, please use -n\n"))
2999 2999 return 1
3000 3000 mergeq = queue(ui, repo.baseui, repo.path, newpath)
3001 3001 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
3002 3002 ret = q.push(repo, patch, force=opts.get('force'), list=opts.get('list'),
3003 3003 mergeq=mergeq, all=opts.get('all'), move=opts.get('move'),
3004 3004 exact=opts.get('exact'), nobackup=opts.get('no_backup'),
3005 3005 keepchanges=opts.get('keep_changes'))
3006 3006 return ret
3007 3007
3008 3008 @command("qpop",
3009 3009 [('a', 'all', None, _('pop all patches')),
3010 3010 ('n', 'name', '',
3011 3011 _('queue name to pop (DEPRECATED)'), _('NAME')),
3012 3012 ('', 'keep-changes', None,
3013 3013 _('tolerate non-conflicting local changes')),
3014 3014 ('f', 'force', None, _('forget any local changes to patched files')),
3015 3015 ('', 'no-backup', None, _('do not save backup copies of files'))],
3016 3016 _('hg qpop [-a] [-f] [PATCH | INDEX]'),
3017 3017 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3018 3018 helpbasic=True)
3019 3019 def pop(ui, repo, patch=None, **opts):
3020 3020 """pop the current patch off the stack
3021 3021
3022 3022 Without argument, pops off the top of the patch stack. If given a
3023 3023 patch name, keeps popping off patches until the named patch is at
3024 3024 the top of the stack.
3025 3025
3026 3026 By default, abort if the working directory contains uncommitted
3027 3027 changes. With --keep-changes, abort only if the uncommitted files
3028 3028 overlap with patched files. With -f/--force, backup and discard
3029 3029 changes made to such files.
3030 3030
3031 3031 Return 0 on success.
3032 3032 """
3033 3033 opts = pycompat.byteskwargs(opts)
3034 3034 opts = fixkeepchangesopts(ui, opts)
3035 3035 localupdate = True
3036 3036 if opts.get('name'):
3037 3037 q = queue(ui, repo.baseui, repo.path, repo.vfs.join(opts.get('name')))
3038 3038 ui.warn(_('using patch queue: %s\n') % q.path)
3039 3039 localupdate = False
3040 3040 else:
3041 3041 q = repo.mq
3042 3042 ret = q.pop(repo, patch, force=opts.get('force'), update=localupdate,
3043 3043 all=opts.get('all'), nobackup=opts.get('no_backup'),
3044 3044 keepchanges=opts.get('keep_changes'))
3045 3045 q.savedirty()
3046 3046 return ret
3047 3047
3048 3048 @command("qrename|qmv", [], _('hg qrename PATCH1 [PATCH2]'),
3049 3049 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
3050 3050 def rename(ui, repo, patch, name=None, **opts):
3051 3051 """rename a patch
3052 3052
3053 3053 With one argument, renames the current patch to PATCH1.
3054 3054 With two arguments, renames PATCH1 to PATCH2.
3055 3055
3056 3056 Returns 0 on success."""
3057 3057 q = repo.mq
3058 3058 if not name:
3059 3059 name = patch
3060 3060 patch = None
3061 3061
3062 3062 if patch:
3063 3063 patch = q.lookup(patch)
3064 3064 else:
3065 3065 if not q.applied:
3066 3066 ui.write(_('no patches applied\n'))
3067 3067 return
3068 3068 patch = q.lookup('qtip')
3069 3069 absdest = q.join(name)
3070 3070 if os.path.isdir(absdest):
3071 3071 name = normname(os.path.join(name, os.path.basename(patch)))
3072 3072 absdest = q.join(name)
3073 3073 q.checkpatchname(name)
3074 3074
3075 3075 ui.note(_('renaming %s to %s\n') % (patch, name))
3076 3076 i = q.findseries(patch)
3077 3077 guards = q.guard_re.findall(q.fullseries[i])
3078 3078 q.fullseries[i] = name + ''.join([' #' + g for g in guards])
3079 3079 q.parseseries()
3080 3080 q.seriesdirty = True
3081 3081
3082 3082 info = q.isapplied(patch)
3083 3083 if info:
3084 3084 q.applied[info[0]] = statusentry(info[1], name)
3085 3085 q.applieddirty = True
3086 3086
3087 3087 destdir = os.path.dirname(absdest)
3088 3088 if not os.path.isdir(destdir):
3089 3089 os.makedirs(destdir)
3090 3090 util.rename(q.join(patch), absdest)
3091 3091 r = q.qrepo()
3092 3092 if r and patch in r.dirstate:
3093 3093 wctx = r[None]
3094 3094 with r.wlock():
3095 3095 if r.dirstate[patch] == 'a':
3096 3096 r.dirstate.drop(patch)
3097 3097 r.dirstate.add(name)
3098 3098 else:
3099 3099 wctx.copy(patch, name)
3100 3100 wctx.forget([patch])
3101 3101
3102 3102 q.savedirty()
3103 3103
3104 3104 @command("qrestore",
3105 3105 [('d', 'delete', None, _('delete save entry')),
3106 3106 ('u', 'update', None, _('update queue working directory'))],
3107 3107 _('hg qrestore [-d] [-u] REV'),
3108 3108 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
3109 3109 def restore(ui, repo, rev, **opts):
3110 3110 """restore the queue state saved by a revision (DEPRECATED)
3111 3111
3112 3112 This command is deprecated, use :hg:`rebase` instead."""
3113 3113 rev = repo.lookup(rev)
3114 3114 q = repo.mq
3115 3115 q.restore(repo, rev, delete=opts.get(r'delete'),
3116 3116 qupdate=opts.get(r'update'))
3117 3117 q.savedirty()
3118 3118 return 0
3119 3119
3120 3120 @command("qsave",
3121 3121 [('c', 'copy', None, _('copy patch directory')),
3122 3122 ('n', 'name', '',
3123 3123 _('copy directory name'), _('NAME')),
3124 3124 ('e', 'empty', None, _('clear queue status file')),
3125 3125 ('f', 'force', None, _('force copy'))] + cmdutil.commitopts,
3126 3126 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'),
3127 3127 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
3128 3128 def save(ui, repo, **opts):
3129 3129 """save current queue state (DEPRECATED)
3130 3130
3131 3131 This command is deprecated, use :hg:`rebase` instead."""
3132 3132 q = repo.mq
3133 3133 opts = pycompat.byteskwargs(opts)
3134 3134 message = cmdutil.logmessage(ui, opts)
3135 3135 ret = q.save(repo, msg=message)
3136 3136 if ret:
3137 3137 return ret
3138 3138 q.savedirty() # save to .hg/patches before copying
3139 3139 if opts.get('copy'):
3140 3140 path = q.path
3141 3141 if opts.get('name'):
3142 3142 newpath = os.path.join(q.basepath, opts.get('name'))
3143 3143 if os.path.exists(newpath):
3144 3144 if not os.path.isdir(newpath):
3145 3145 raise error.Abort(_('destination %s exists and is not '
3146 3146 'a directory') % newpath)
3147 3147 if not opts.get('force'):
3148 3148 raise error.Abort(_('destination %s exists, '
3149 3149 'use -f to force') % newpath)
3150 3150 else:
3151 3151 newpath = savename(path)
3152 3152 ui.warn(_("copy %s to %s\n") % (path, newpath))
3153 3153 util.copyfiles(path, newpath)
3154 3154 if opts.get('empty'):
3155 3155 del q.applied[:]
3156 3156 q.applieddirty = True
3157 3157 q.savedirty()
3158 3158 return 0
3159 3159
3160 3160
3161 3161 @command("qselect",
3162 3162 [('n', 'none', None, _('disable all guards')),
3163 3163 ('s', 'series', None, _('list all guards in series file')),
3164 3164 ('', 'pop', None, _('pop to before first guarded applied patch')),
3165 3165 ('', 'reapply', None, _('pop, then reapply patches'))],
3166 3166 _('hg qselect [OPTION]... [GUARD]...'),
3167 3167 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
3168 3168 def select(ui, repo, *args, **opts):
3169 3169 '''set or print guarded patches to push
3170 3170
3171 3171 Use the :hg:`qguard` command to set or print guards on patch, then use
3172 3172 qselect to tell mq which guards to use. A patch will be pushed if
3173 3173 it has no guards or any positive guards match the currently
3174 3174 selected guard, but will not be pushed if any negative guards
3175 3175 match the current guard. For example::
3176 3176
3177 3177 qguard foo.patch -- -stable (negative guard)
3178 3178 qguard bar.patch +stable (positive guard)
3179 3179 qselect stable
3180 3180
3181 3181 This activates the "stable" guard. mq will skip foo.patch (because
3182 3182 it has a negative match) but push bar.patch (because it has a
3183 3183 positive match).
3184 3184
3185 3185 With no arguments, prints the currently active guards.
3186 3186 With one argument, sets the active guard.
3187 3187
3188 3188 Use -n/--none to deactivate guards (no other arguments needed).
3189 3189 When no guards are active, patches with positive guards are
3190 3190 skipped and patches with negative guards are pushed.
3191 3191
3192 3192 qselect can change the guards on applied patches. It does not pop
3193 3193 guarded patches by default. Use --pop to pop back to the last
3194 3194 applied patch that is not guarded. Use --reapply (which implies
3195 3195 --pop) to push back to the current patch afterwards, but skip
3196 3196 guarded patches.
3197 3197
3198 3198 Use -s/--series to print a list of all guards in the series file
3199 3199 (no other arguments needed). Use -v for more information.
3200 3200
3201 3201 Returns 0 on success.'''
3202 3202
3203 3203 q = repo.mq
3204 3204 opts = pycompat.byteskwargs(opts)
3205 3205 guards = q.active()
3206 3206 pushable = lambda i: q.pushable(q.applied[i].name)[0]
3207 3207 if args or opts.get('none'):
3208 3208 old_unapplied = q.unapplied(repo)
3209 3209 old_guarded = [i for i in pycompat.xrange(len(q.applied))
3210 3210 if not pushable(i)]
3211 3211 q.setactive(args)
3212 3212 q.savedirty()
3213 3213 if not args:
3214 3214 ui.status(_('guards deactivated\n'))
3215 3215 if not opts.get('pop') and not opts.get('reapply'):
3216 3216 unapplied = q.unapplied(repo)
3217 3217 guarded = [i for i in pycompat.xrange(len(q.applied))
3218 3218 if not pushable(i)]
3219 3219 if len(unapplied) != len(old_unapplied):
3220 3220 ui.status(_('number of unguarded, unapplied patches has '
3221 3221 'changed from %d to %d\n') %
3222 3222 (len(old_unapplied), len(unapplied)))
3223 3223 if len(guarded) != len(old_guarded):
3224 3224 ui.status(_('number of guarded, applied patches has changed '
3225 3225 'from %d to %d\n') %
3226 3226 (len(old_guarded), len(guarded)))
3227 3227 elif opts.get('series'):
3228 3228 guards = {}
3229 3229 noguards = 0
3230 3230 for gs in q.seriesguards:
3231 3231 if not gs:
3232 3232 noguards += 1
3233 3233 for g in gs:
3234 3234 guards.setdefault(g, 0)
3235 3235 guards[g] += 1
3236 3236 if ui.verbose:
3237 3237 guards['NONE'] = noguards
3238 3238 guards = list(guards.items())
3239 3239 guards.sort(key=lambda x: x[0][1:])
3240 3240 if guards:
3241 3241 ui.note(_('guards in series file:\n'))
3242 3242 for guard, count in guards:
3243 3243 ui.note('%2d ' % count)
3244 3244 ui.write(guard, '\n')
3245 3245 else:
3246 3246 ui.note(_('no guards in series file\n'))
3247 3247 else:
3248 3248 if guards:
3249 3249 ui.note(_('active guards:\n'))
3250 3250 for g in guards:
3251 3251 ui.write(g, '\n')
3252 3252 else:
3253 3253 ui.write(_('no active guards\n'))
3254 3254 reapply = opts.get('reapply') and q.applied and q.applied[-1].name
3255 3255 popped = False
3256 3256 if opts.get('pop') or opts.get('reapply'):
3257 3257 for i in pycompat.xrange(len(q.applied)):
3258 3258 if not pushable(i):
3259 3259 ui.status(_('popping guarded patches\n'))
3260 3260 popped = True
3261 3261 if i == 0:
3262 3262 q.pop(repo, all=True)
3263 3263 else:
3264 3264 q.pop(repo, q.applied[i - 1].name)
3265 3265 break
3266 3266 if popped:
3267 3267 try:
3268 3268 if reapply:
3269 3269 ui.status(_('reapplying unguarded patches\n'))
3270 3270 q.push(repo, reapply)
3271 3271 finally:
3272 3272 q.savedirty()
3273 3273
3274 3274 @command("qfinish",
3275 3275 [('a', 'applied', None, _('finish all applied changesets'))],
3276 3276 _('hg qfinish [-a] [REV]...'),
3277 3277 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
3278 3278 def finish(ui, repo, *revrange, **opts):
3279 3279 """move applied patches into repository history
3280 3280
3281 3281 Finishes the specified revisions (corresponding to applied
3282 3282 patches) by moving them out of mq control into regular repository
3283 3283 history.
3284 3284
3285 3285 Accepts a revision range or the -a/--applied option. If --applied
3286 3286 is specified, all applied mq revisions are removed from mq
3287 3287 control. Otherwise, the given revisions must be at the base of the
3288 3288 stack of applied patches.
3289 3289
3290 3290 This can be especially useful if your changes have been applied to
3291 3291 an upstream repository, or if you are about to push your changes
3292 3292 to upstream.
3293 3293
3294 3294 Returns 0 on success.
3295 3295 """
3296 3296 if not opts.get(r'applied') and not revrange:
3297 3297 raise error.Abort(_('no revisions specified'))
3298 3298 elif opts.get(r'applied'):
3299 3299 revrange = ('qbase::qtip',) + revrange
3300 3300
3301 3301 q = repo.mq
3302 3302 if not q.applied:
3303 3303 ui.status(_('no patches applied\n'))
3304 3304 return 0
3305 3305
3306 3306 revs = scmutil.revrange(repo, revrange)
3307 3307 if repo['.'].rev() in revs and repo[None].files():
3308 3308 ui.warn(_('warning: uncommitted changes in the working directory\n'))
3309 3309 # queue.finish may changes phases but leave the responsibility to lock the
3310 3310 # repo to the caller to avoid deadlock with wlock. This command code is
3311 3311 # responsibility for this locking.
3312 3312 with repo.lock():
3313 3313 q.finish(repo, revs)
3314 3314 q.savedirty()
3315 3315 return 0
3316 3316
3317 3317 @command("qqueue",
3318 3318 [('l', 'list', False, _('list all available queues')),
3319 3319 ('', 'active', False, _('print name of active queue')),
3320 3320 ('c', 'create', False, _('create new queue')),
3321 3321 ('', 'rename', False, _('rename active queue')),
3322 3322 ('', 'delete', False, _('delete reference to queue')),
3323 3323 ('', 'purge', False, _('delete queue, and remove patch dir')),
3324 3324 ],
3325 3325 _('[OPTION] [QUEUE]'),
3326 3326 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
3327 3327 def qqueue(ui, repo, name=None, **opts):
3328 3328 '''manage multiple patch queues
3329 3329
3330 3330 Supports switching between different patch queues, as well as creating
3331 3331 new patch queues and deleting existing ones.
3332 3332
3333 3333 Omitting a queue name or specifying -l/--list will show you the registered
3334 3334 queues - by default the "normal" patches queue is registered. The currently
3335 3335 active queue will be marked with "(active)". Specifying --active will print
3336 3336 only the name of the active queue.
3337 3337
3338 3338 To create a new queue, use -c/--create. The queue is automatically made
3339 3339 active, except in the case where there are applied patches from the
3340 3340 currently active queue in the repository. Then the queue will only be
3341 3341 created and switching will fail.
3342 3342
3343 3343 To delete an existing queue, use --delete. You cannot delete the currently
3344 3344 active queue.
3345 3345
3346 3346 Returns 0 on success.
3347 3347 '''
3348 3348 q = repo.mq
3349 3349 _defaultqueue = 'patches'
3350 3350 _allqueues = 'patches.queues'
3351 3351 _activequeue = 'patches.queue'
3352 3352
3353 3353 def _getcurrent():
3354 3354 cur = os.path.basename(q.path)
3355 3355 if cur.startswith('patches-'):
3356 3356 cur = cur[8:]
3357 3357 return cur
3358 3358
3359 3359 def _noqueues():
3360 3360 try:
3361 3361 fh = repo.vfs(_allqueues, 'r')
3362 3362 fh.close()
3363 3363 except IOError:
3364 3364 return True
3365 3365
3366 3366 return False
3367 3367
3368 3368 def _getqueues():
3369 3369 current = _getcurrent()
3370 3370
3371 3371 try:
3372 3372 fh = repo.vfs(_allqueues, 'r')
3373 3373 queues = [queue.strip() for queue in fh if queue.strip()]
3374 3374 fh.close()
3375 3375 if current not in queues:
3376 3376 queues.append(current)
3377 3377 except IOError:
3378 3378 queues = [_defaultqueue]
3379 3379
3380 3380 return sorted(queues)
3381 3381
3382 3382 def _setactive(name):
3383 3383 if q.applied:
3384 3384 raise error.Abort(_('new queue created, but cannot make active '
3385 3385 'as patches are applied'))
3386 3386 _setactivenocheck(name)
3387 3387
3388 3388 def _setactivenocheck(name):
3389 3389 fh = repo.vfs(_activequeue, 'w')
3390 3390 if name != 'patches':
3391 3391 fh.write(name)
3392 3392 fh.close()
3393 3393
3394 3394 def _addqueue(name):
3395 3395 fh = repo.vfs(_allqueues, 'a')
3396 3396 fh.write('%s\n' % (name,))
3397 3397 fh.close()
3398 3398
3399 3399 def _queuedir(name):
3400 3400 if name == 'patches':
3401 3401 return repo.vfs.join('patches')
3402 3402 else:
3403 3403 return repo.vfs.join('patches-' + name)
3404 3404
3405 3405 def _validname(name):
3406 3406 for n in name:
3407 3407 if n in ':\\/.':
3408 3408 return False
3409 3409 return True
3410 3410
3411 3411 def _delete(name):
3412 3412 if name not in existing:
3413 3413 raise error.Abort(_('cannot delete queue that does not exist'))
3414 3414
3415 3415 current = _getcurrent()
3416 3416
3417 3417 if name == current:
3418 3418 raise error.Abort(_('cannot delete currently active queue'))
3419 3419
3420 3420 fh = repo.vfs('patches.queues.new', 'w')
3421 3421 for queue in existing:
3422 3422 if queue == name:
3423 3423 continue
3424 3424 fh.write('%s\n' % (queue,))
3425 3425 fh.close()
3426 3426 repo.vfs.rename('patches.queues.new', _allqueues)
3427 3427
3428 3428 opts = pycompat.byteskwargs(opts)
3429 3429 if not name or opts.get('list') or opts.get('active'):
3430 3430 current = _getcurrent()
3431 3431 if opts.get('active'):
3432 3432 ui.write('%s\n' % (current,))
3433 3433 return
3434 3434 for queue in _getqueues():
3435 3435 ui.write('%s' % (queue,))
3436 3436 if queue == current and not ui.quiet:
3437 3437 ui.write(_(' (active)\n'))
3438 3438 else:
3439 3439 ui.write('\n')
3440 3440 return
3441 3441
3442 3442 if not _validname(name):
3443 3443 raise error.Abort(
3444 3444 _('invalid queue name, may not contain the characters ":\\/."'))
3445 3445
3446 3446 with repo.wlock():
3447 3447 existing = _getqueues()
3448 3448
3449 3449 if opts.get('create'):
3450 3450 if name in existing:
3451 3451 raise error.Abort(_('queue "%s" already exists') % name)
3452 3452 if _noqueues():
3453 3453 _addqueue(_defaultqueue)
3454 3454 _addqueue(name)
3455 3455 _setactive(name)
3456 3456 elif opts.get('rename'):
3457 3457 current = _getcurrent()
3458 3458 if name == current:
3459 3459 raise error.Abort(_('can\'t rename "%s" to its current name')
3460 3460 % name)
3461 3461 if name in existing:
3462 3462 raise error.Abort(_('queue "%s" already exists') % name)
3463 3463
3464 3464 olddir = _queuedir(current)
3465 3465 newdir = _queuedir(name)
3466 3466
3467 3467 if os.path.exists(newdir):
3468 3468 raise error.Abort(_('non-queue directory "%s" already exists') %
3469 3469 newdir)
3470 3470
3471 3471 fh = repo.vfs('patches.queues.new', 'w')
3472 3472 for queue in existing:
3473 3473 if queue == current:
3474 3474 fh.write('%s\n' % (name,))
3475 3475 if os.path.exists(olddir):
3476 3476 util.rename(olddir, newdir)
3477 3477 else:
3478 3478 fh.write('%s\n' % (queue,))
3479 3479 fh.close()
3480 3480 repo.vfs.rename('patches.queues.new', _allqueues)
3481 3481 _setactivenocheck(name)
3482 3482 elif opts.get('delete'):
3483 3483 _delete(name)
3484 3484 elif opts.get('purge'):
3485 3485 if name in existing:
3486 3486 _delete(name)
3487 3487 qdir = _queuedir(name)
3488 3488 if os.path.exists(qdir):
3489 3489 shutil.rmtree(qdir)
3490 3490 else:
3491 3491 if name not in existing:
3492 3492 raise error.Abort(_('use --create to create a new queue'))
3493 3493 _setactive(name)
3494 3494
3495 3495 def mqphasedefaults(repo, roots):
3496 3496 """callback used to set mq changeset as secret when no phase data exists"""
3497 3497 if repo.mq.applied:
3498 3498 if repo.ui.configbool('mq', 'secret'):
3499 3499 mqphase = phases.secret
3500 3500 else:
3501 3501 mqphase = phases.draft
3502 3502 qbase = repo[repo.mq.applied[0].node]
3503 3503 roots[mqphase].add(qbase.node())
3504 3504 return roots
3505 3505
3506 3506 def reposetup(ui, repo):
3507 3507 class mqrepo(repo.__class__):
3508 3508 @localrepo.unfilteredpropertycache
3509 3509 def mq(self):
3510 3510 return queue(self.ui, self.baseui, self.path)
3511 3511
3512 3512 def invalidateall(self):
3513 3513 super(mqrepo, self).invalidateall()
3514 if localrepo.hasunfilteredcache(self, 'mq'):
3514 if localrepo.hasunfilteredcache(self, r'mq'):
3515 3515 # recreate mq in case queue path was changed
3516 delattr(self.unfiltered(), 'mq')
3516 delattr(self.unfiltered(), r'mq')
3517 3517
3518 3518 def abortifwdirpatched(self, errmsg, force=False):
3519 3519 if self.mq.applied and self.mq.checkapplied and not force:
3520 3520 parents = self.dirstate.parents()
3521 3521 patches = [s.node for s in self.mq.applied]
3522 3522 if parents[0] in patches or parents[1] in patches:
3523 3523 raise error.Abort(errmsg)
3524 3524
3525 3525 def commit(self, text="", user=None, date=None, match=None,
3526 3526 force=False, editor=False, extra=None):
3527 3527 if extra is None:
3528 3528 extra = {}
3529 3529 self.abortifwdirpatched(
3530 3530 _('cannot commit over an applied mq patch'),
3531 3531 force)
3532 3532
3533 3533 return super(mqrepo, self).commit(text, user, date, match, force,
3534 3534 editor, extra)
3535 3535
3536 3536 def checkpush(self, pushop):
3537 3537 if self.mq.applied and self.mq.checkapplied and not pushop.force:
3538 3538 outapplied = [e.node for e in self.mq.applied]
3539 3539 if pushop.revs:
3540 3540 # Assume applied patches have no non-patch descendants and
3541 3541 # are not on remote already. Filtering any changeset not
3542 3542 # pushed.
3543 3543 heads = set(pushop.revs)
3544 3544 for node in reversed(outapplied):
3545 3545 if node in heads:
3546 3546 break
3547 3547 else:
3548 3548 outapplied.pop()
3549 3549 # looking for pushed and shared changeset
3550 3550 for node in outapplied:
3551 3551 if self[node].phase() < phases.secret:
3552 3552 raise error.Abort(_('source has mq patches applied'))
3553 3553 # no non-secret patches pushed
3554 3554 super(mqrepo, self).checkpush(pushop)
3555 3555
3556 3556 def _findtags(self):
3557 3557 '''augment tags from base class with patch tags'''
3558 3558 result = super(mqrepo, self)._findtags()
3559 3559
3560 3560 q = self.mq
3561 3561 if not q.applied:
3562 3562 return result
3563 3563
3564 3564 mqtags = [(patch.node, patch.name) for patch in q.applied]
3565 3565
3566 3566 try:
3567 3567 # for now ignore filtering business
3568 3568 self.unfiltered().changelog.rev(mqtags[-1][0])
3569 3569 except error.LookupError:
3570 3570 self.ui.warn(_('mq status file refers to unknown node %s\n')
3571 3571 % short(mqtags[-1][0]))
3572 3572 return result
3573 3573
3574 3574 # do not add fake tags for filtered revisions
3575 3575 included = self.changelog.hasnode
3576 3576 mqtags = [mqt for mqt in mqtags if included(mqt[0])]
3577 3577 if not mqtags:
3578 3578 return result
3579 3579
3580 3580 mqtags.append((mqtags[-1][0], 'qtip'))
3581 3581 mqtags.append((mqtags[0][0], 'qbase'))
3582 3582 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
3583 3583 tags = result[0]
3584 3584 for patch in mqtags:
3585 3585 if patch[1] in tags:
3586 3586 self.ui.warn(_('tag %s overrides mq patch of the same '
3587 3587 'name\n') % patch[1])
3588 3588 else:
3589 3589 tags[patch[1]] = patch[0]
3590 3590
3591 3591 return result
3592 3592
3593 3593 if repo.local():
3594 3594 repo.__class__ = mqrepo
3595 3595
3596 3596 repo._phasedefaults.append(mqphasedefaults)
3597 3597
3598 3598 def mqimport(orig, ui, repo, *args, **kwargs):
3599 3599 if (util.safehasattr(repo, 'abortifwdirpatched')
3600 3600 and not kwargs.get(r'no_commit', False)):
3601 3601 repo.abortifwdirpatched(_('cannot import over an applied patch'),
3602 3602 kwargs.get(r'force'))
3603 3603 return orig(ui, repo, *args, **kwargs)
3604 3604
3605 3605 def mqinit(orig, ui, *args, **kwargs):
3606 3606 mq = kwargs.pop(r'mq', None)
3607 3607
3608 3608 if not mq:
3609 3609 return orig(ui, *args, **kwargs)
3610 3610
3611 3611 if args:
3612 3612 repopath = args[0]
3613 3613 if not hg.islocal(repopath):
3614 3614 raise error.Abort(_('only a local queue repository '
3615 3615 'may be initialized'))
3616 3616 else:
3617 3617 repopath = cmdutil.findrepo(encoding.getcwd())
3618 3618 if not repopath:
3619 3619 raise error.Abort(_('there is no Mercurial repository here '
3620 3620 '(.hg not found)'))
3621 3621 repo = hg.repository(ui, repopath)
3622 3622 return qinit(ui, repo, True)
3623 3623
3624 3624 def mqcommand(orig, ui, repo, *args, **kwargs):
3625 3625 """Add --mq option to operate on patch repository instead of main"""
3626 3626
3627 3627 # some commands do not like getting unknown options
3628 3628 mq = kwargs.pop(r'mq', None)
3629 3629
3630 3630 if not mq:
3631 3631 return orig(ui, repo, *args, **kwargs)
3632 3632
3633 3633 q = repo.mq
3634 3634 r = q.qrepo()
3635 3635 if not r:
3636 3636 raise error.Abort(_('no queue repository'))
3637 3637 return orig(r.ui, r, *args, **kwargs)
3638 3638
3639 3639 def summaryhook(ui, repo):
3640 3640 q = repo.mq
3641 3641 m = []
3642 3642 a, u = len(q.applied), len(q.unapplied(repo))
3643 3643 if a:
3644 3644 m.append(ui.label(_("%d applied"), 'qseries.applied') % a)
3645 3645 if u:
3646 3646 m.append(ui.label(_("%d unapplied"), 'qseries.unapplied') % u)
3647 3647 if m:
3648 3648 # i18n: column positioning for "hg summary"
3649 3649 ui.write(_("mq: %s\n") % ', '.join(m))
3650 3650 else:
3651 3651 # i18n: column positioning for "hg summary"
3652 3652 ui.note(_("mq: (empty queue)\n"))
3653 3653
3654 3654 revsetpredicate = registrar.revsetpredicate()
3655 3655
3656 3656 @revsetpredicate('mq()')
3657 3657 def revsetmq(repo, subset, x):
3658 3658 """Changesets managed by MQ.
3659 3659 """
3660 3660 revsetlang.getargs(x, 0, 0, _("mq takes no arguments"))
3661 3661 applied = set([repo[r.node].rev() for r in repo.mq.applied])
3662 3662 return smartset.baseset([r for r in subset if r in applied])
3663 3663
3664 3664 # tell hggettext to extract docstrings from these functions:
3665 3665 i18nfunctions = [revsetmq]
3666 3666
3667 3667 def extsetup(ui):
3668 3668 # Ensure mq wrappers are called first, regardless of extension load order by
3669 3669 # NOT wrapping in uisetup() and instead deferring to init stage two here.
3670 3670 mqopt = [('', 'mq', None, _("operate on patch repository"))]
3671 3671
3672 3672 extensions.wrapcommand(commands.table, 'import', mqimport)
3673 3673 cmdutil.summaryhooks.add('mq', summaryhook)
3674 3674
3675 3675 entry = extensions.wrapcommand(commands.table, 'init', mqinit)
3676 3676 entry[1].extend(mqopt)
3677 3677
3678 3678 def dotable(cmdtable):
3679 3679 for cmd, entry in cmdtable.iteritems():
3680 3680 cmd = cmdutil.parsealiases(cmd)[0]
3681 3681 func = entry[0]
3682 3682 if func.norepo:
3683 3683 continue
3684 3684 entry = extensions.wrapcommand(cmdtable, cmd, mqcommand)
3685 3685 entry[1].extend(mqopt)
3686 3686
3687 3687 dotable(commands.table)
3688 3688
3689 3689 for extname, extmodule in extensions.extensions():
3690 3690 if extmodule.__file__ != __file__:
3691 3691 dotable(getattr(extmodule, 'cmdtable', {}))
3692 3692
3693 3693 colortable = {'qguard.negative': 'red',
3694 3694 'qguard.positive': 'yellow',
3695 3695 'qguard.unguarded': 'green',
3696 3696 'qseries.applied': 'blue bold underline',
3697 3697 'qseries.guarded': 'black bold',
3698 3698 'qseries.missing': 'red bold',
3699 3699 'qseries.unapplied': 'black bold'}
@@ -1,3040 +1,3040 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import os
13 13 import random
14 14 import sys
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 bin,
21 21 hex,
22 22 nullid,
23 23 nullrev,
24 24 short,
25 25 )
26 26 from . import (
27 27 bookmarks,
28 28 branchmap,
29 29 bundle2,
30 30 changegroup,
31 31 changelog,
32 32 color,
33 33 context,
34 34 dirstate,
35 35 dirstateguard,
36 36 discovery,
37 37 encoding,
38 38 error,
39 39 exchange,
40 40 extensions,
41 41 filelog,
42 42 hook,
43 43 lock as lockmod,
44 44 manifest,
45 45 match as matchmod,
46 46 merge as mergemod,
47 47 mergeutil,
48 48 namespaces,
49 49 narrowspec,
50 50 obsolete,
51 51 pathutil,
52 52 phases,
53 53 pushkey,
54 54 pycompat,
55 55 repository,
56 56 repoview,
57 57 revset,
58 58 revsetlang,
59 59 scmutil,
60 60 sparse,
61 61 store as storemod,
62 62 subrepoutil,
63 63 tags as tagsmod,
64 64 transaction,
65 65 txnutil,
66 66 util,
67 67 vfs as vfsmod,
68 68 )
69 69 from .utils import (
70 70 interfaceutil,
71 71 procutil,
72 72 stringutil,
73 73 )
74 74
75 75 from .revlogutils import (
76 76 constants as revlogconst,
77 77 )
78 78
79 79 release = lockmod.release
80 80 urlerr = util.urlerr
81 81 urlreq = util.urlreq
82 82
83 83 # set of (path, vfs-location) tuples. vfs-location is:
84 84 # - 'plain for vfs relative paths
85 85 # - '' for svfs relative paths
86 86 _cachedfiles = set()
87 87
88 88 class _basefilecache(scmutil.filecache):
89 89 """All filecache usage on repo are done for logic that should be unfiltered
90 90 """
91 91 def __get__(self, repo, type=None):
92 92 if repo is None:
93 93 return self
94 94 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
95 95 def __set__(self, repo, value):
96 96 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
97 97 def __delete__(self, repo):
98 98 return super(_basefilecache, self).__delete__(repo.unfiltered())
99 99
100 100 class repofilecache(_basefilecache):
101 101 """filecache for files in .hg but outside of .hg/store"""
102 102 def __init__(self, *paths):
103 103 super(repofilecache, self).__init__(*paths)
104 104 for path in paths:
105 105 _cachedfiles.add((path, 'plain'))
106 106
107 107 def join(self, obj, fname):
108 108 return obj.vfs.join(fname)
109 109
110 110 class storecache(_basefilecache):
111 111 """filecache for files in the store"""
112 112 def __init__(self, *paths):
113 113 super(storecache, self).__init__(*paths)
114 114 for path in paths:
115 115 _cachedfiles.add((path, ''))
116 116
117 117 def join(self, obj, fname):
118 118 return obj.sjoin(fname)
119 119
120 120 def isfilecached(repo, name):
121 121 """check if a repo has already cached "name" filecache-ed property
122 122
123 123 This returns (cachedobj-or-None, iscached) tuple.
124 124 """
125 125 cacheentry = repo.unfiltered()._filecache.get(name, None)
126 126 if not cacheentry:
127 127 return None, False
128 128 return cacheentry.obj, True
129 129
130 130 class unfilteredpropertycache(util.propertycache):
131 131 """propertycache that apply to unfiltered repo only"""
132 132
133 133 def __get__(self, repo, type=None):
134 134 unfi = repo.unfiltered()
135 135 if unfi is repo:
136 136 return super(unfilteredpropertycache, self).__get__(unfi)
137 137 return getattr(unfi, self.name)
138 138
139 139 class filteredpropertycache(util.propertycache):
140 140 """propertycache that must take filtering in account"""
141 141
142 142 def cachevalue(self, obj, value):
143 143 object.__setattr__(obj, self.name, value)
144 144
145 145
146 146 def hasunfilteredcache(repo, name):
147 147 """check if a repo has an unfilteredpropertycache value for <name>"""
148 148 return name in vars(repo.unfiltered())
149 149
150 150 def unfilteredmethod(orig):
151 151 """decorate method that always need to be run on unfiltered version"""
152 152 def wrapper(repo, *args, **kwargs):
153 153 return orig(repo.unfiltered(), *args, **kwargs)
154 154 return wrapper
155 155
156 156 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
157 157 'unbundle'}
158 158 legacycaps = moderncaps.union({'changegroupsubset'})
159 159
160 160 @interfaceutil.implementer(repository.ipeercommandexecutor)
161 161 class localcommandexecutor(object):
162 162 def __init__(self, peer):
163 163 self._peer = peer
164 164 self._sent = False
165 165 self._closed = False
166 166
167 167 def __enter__(self):
168 168 return self
169 169
170 170 def __exit__(self, exctype, excvalue, exctb):
171 171 self.close()
172 172
173 173 def callcommand(self, command, args):
174 174 if self._sent:
175 175 raise error.ProgrammingError('callcommand() cannot be used after '
176 176 'sendcommands()')
177 177
178 178 if self._closed:
179 179 raise error.ProgrammingError('callcommand() cannot be used after '
180 180 'close()')
181 181
182 182 # We don't need to support anything fancy. Just call the named
183 183 # method on the peer and return a resolved future.
184 184 fn = getattr(self._peer, pycompat.sysstr(command))
185 185
186 186 f = pycompat.futures.Future()
187 187
188 188 try:
189 189 result = fn(**pycompat.strkwargs(args))
190 190 except Exception:
191 191 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
192 192 else:
193 193 f.set_result(result)
194 194
195 195 return f
196 196
197 197 def sendcommands(self):
198 198 self._sent = True
199 199
200 200 def close(self):
201 201 self._closed = True
202 202
203 203 @interfaceutil.implementer(repository.ipeercommands)
204 204 class localpeer(repository.peer):
205 205 '''peer for a local repo; reflects only the most recent API'''
206 206
207 207 def __init__(self, repo, caps=None):
208 208 super(localpeer, self).__init__()
209 209
210 210 if caps is None:
211 211 caps = moderncaps.copy()
212 212 self._repo = repo.filtered('served')
213 213 self.ui = repo.ui
214 214 self._caps = repo._restrictcapabilities(caps)
215 215
216 216 # Begin of _basepeer interface.
217 217
218 218 def url(self):
219 219 return self._repo.url()
220 220
221 221 def local(self):
222 222 return self._repo
223 223
224 224 def peer(self):
225 225 return self
226 226
227 227 def canpush(self):
228 228 return True
229 229
230 230 def close(self):
231 231 self._repo.close()
232 232
233 233 # End of _basepeer interface.
234 234
235 235 # Begin of _basewirecommands interface.
236 236
237 237 def branchmap(self):
238 238 return self._repo.branchmap()
239 239
240 240 def capabilities(self):
241 241 return self._caps
242 242
243 243 def clonebundles(self):
244 244 return self._repo.tryread('clonebundles.manifest')
245 245
246 246 def debugwireargs(self, one, two, three=None, four=None, five=None):
247 247 """Used to test argument passing over the wire"""
248 248 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
249 249 pycompat.bytestr(four),
250 250 pycompat.bytestr(five))
251 251
252 252 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
253 253 **kwargs):
254 254 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
255 255 common=common, bundlecaps=bundlecaps,
256 256 **kwargs)[1]
257 257 cb = util.chunkbuffer(chunks)
258 258
259 259 if exchange.bundle2requested(bundlecaps):
260 260 # When requesting a bundle2, getbundle returns a stream to make the
261 261 # wire level function happier. We need to build a proper object
262 262 # from it in local peer.
263 263 return bundle2.getunbundler(self.ui, cb)
264 264 else:
265 265 return changegroup.getunbundler('01', cb, None)
266 266
267 267 def heads(self):
268 268 return self._repo.heads()
269 269
270 270 def known(self, nodes):
271 271 return self._repo.known(nodes)
272 272
273 273 def listkeys(self, namespace):
274 274 return self._repo.listkeys(namespace)
275 275
276 276 def lookup(self, key):
277 277 return self._repo.lookup(key)
278 278
279 279 def pushkey(self, namespace, key, old, new):
280 280 return self._repo.pushkey(namespace, key, old, new)
281 281
282 282 def stream_out(self):
283 283 raise error.Abort(_('cannot perform stream clone against local '
284 284 'peer'))
285 285
286 286 def unbundle(self, bundle, heads, url):
287 287 """apply a bundle on a repo
288 288
289 289 This function handles the repo locking itself."""
290 290 try:
291 291 try:
292 292 bundle = exchange.readbundle(self.ui, bundle, None)
293 293 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
294 294 if util.safehasattr(ret, 'getchunks'):
295 295 # This is a bundle20 object, turn it into an unbundler.
296 296 # This little dance should be dropped eventually when the
297 297 # API is finally improved.
298 298 stream = util.chunkbuffer(ret.getchunks())
299 299 ret = bundle2.getunbundler(self.ui, stream)
300 300 return ret
301 301 except Exception as exc:
302 302 # If the exception contains output salvaged from a bundle2
303 303 # reply, we need to make sure it is printed before continuing
304 304 # to fail. So we build a bundle2 with such output and consume
305 305 # it directly.
306 306 #
307 307 # This is not very elegant but allows a "simple" solution for
308 308 # issue4594
309 309 output = getattr(exc, '_bundle2salvagedoutput', ())
310 310 if output:
311 311 bundler = bundle2.bundle20(self._repo.ui)
312 312 for out in output:
313 313 bundler.addpart(out)
314 314 stream = util.chunkbuffer(bundler.getchunks())
315 315 b = bundle2.getunbundler(self.ui, stream)
316 316 bundle2.processbundle(self._repo, b)
317 317 raise
318 318 except error.PushRaced as exc:
319 319 raise error.ResponseError(_('push failed:'),
320 320 stringutil.forcebytestr(exc))
321 321
322 322 # End of _basewirecommands interface.
323 323
324 324 # Begin of peer interface.
325 325
326 326 def commandexecutor(self):
327 327 return localcommandexecutor(self)
328 328
329 329 # End of peer interface.
330 330
331 331 @interfaceutil.implementer(repository.ipeerlegacycommands)
332 332 class locallegacypeer(localpeer):
333 333 '''peer extension which implements legacy methods too; used for tests with
334 334 restricted capabilities'''
335 335
336 336 def __init__(self, repo):
337 337 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
338 338
339 339 # Begin of baselegacywirecommands interface.
340 340
341 341 def between(self, pairs):
342 342 return self._repo.between(pairs)
343 343
344 344 def branches(self, nodes):
345 345 return self._repo.branches(nodes)
346 346
347 347 def changegroup(self, nodes, source):
348 348 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
349 349 missingheads=self._repo.heads())
350 350 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
351 351
352 352 def changegroupsubset(self, bases, heads, source):
353 353 outgoing = discovery.outgoing(self._repo, missingroots=bases,
354 354 missingheads=heads)
355 355 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
356 356
357 357 # End of baselegacywirecommands interface.
358 358
359 359 # Increment the sub-version when the revlog v2 format changes to lock out old
360 360 # clients.
361 361 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
362 362
363 363 # A repository with the sparserevlog feature will have delta chains that
364 364 # can spread over a larger span. Sparse reading cuts these large spans into
365 365 # pieces, so that each piece isn't too big.
366 366 # Without the sparserevlog capability, reading from the repository could use
367 367 # huge amounts of memory, because the whole span would be read at once,
368 368 # including all the intermediate revisions that aren't pertinent for the chain.
369 369 # This is why once a repository has enabled sparse-read, it becomes required.
370 370 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
371 371
372 372 # Functions receiving (ui, features) that extensions can register to impact
373 373 # the ability to load repositories with custom requirements. Only
374 374 # functions defined in loaded extensions are called.
375 375 #
376 376 # The function receives a set of requirement strings that the repository
377 377 # is capable of opening. Functions will typically add elements to the
378 378 # set to reflect that the extension knows how to handle that requirements.
379 379 featuresetupfuncs = set()
380 380
381 381 def makelocalrepository(baseui, path, intents=None):
382 382 """Create a local repository object.
383 383
384 384 Given arguments needed to construct a local repository, this function
385 385 performs various early repository loading functionality (such as
386 386 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
387 387 the repository can be opened, derives a type suitable for representing
388 388 that repository, and returns an instance of it.
389 389
390 390 The returned object conforms to the ``repository.completelocalrepository``
391 391 interface.
392 392
393 393 The repository type is derived by calling a series of factory functions
394 394 for each aspect/interface of the final repository. These are defined by
395 395 ``REPO_INTERFACES``.
396 396
397 397 Each factory function is called to produce a type implementing a specific
398 398 interface. The cumulative list of returned types will be combined into a
399 399 new type and that type will be instantiated to represent the local
400 400 repository.
401 401
402 402 The factory functions each receive various state that may be consulted
403 403 as part of deriving a type.
404 404
405 405 Extensions should wrap these factory functions to customize repository type
406 406 creation. Note that an extension's wrapped function may be called even if
407 407 that extension is not loaded for the repo being constructed. Extensions
408 408 should check if their ``__name__`` appears in the
409 409 ``extensionmodulenames`` set passed to the factory function and no-op if
410 410 not.
411 411 """
412 412 ui = baseui.copy()
413 413 # Prevent copying repo configuration.
414 414 ui.copy = baseui.copy
415 415
416 416 # Working directory VFS rooted at repository root.
417 417 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
418 418
419 419 # Main VFS for .hg/ directory.
420 420 hgpath = wdirvfs.join(b'.hg')
421 421 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
422 422
423 423 # The .hg/ path should exist and should be a directory. All other
424 424 # cases are errors.
425 425 if not hgvfs.isdir():
426 426 try:
427 427 hgvfs.stat()
428 428 except OSError as e:
429 429 if e.errno != errno.ENOENT:
430 430 raise
431 431
432 432 raise error.RepoError(_(b'repository %s not found') % path)
433 433
434 434 # .hg/requires file contains a newline-delimited list of
435 435 # features/capabilities the opener (us) must have in order to use
436 436 # the repository. This file was introduced in Mercurial 0.9.2,
437 437 # which means very old repositories may not have one. We assume
438 438 # a missing file translates to no requirements.
439 439 try:
440 440 requirements = set(hgvfs.read(b'requires').splitlines())
441 441 except IOError as e:
442 442 if e.errno != errno.ENOENT:
443 443 raise
444 444 requirements = set()
445 445
446 446 # The .hg/hgrc file may load extensions or contain config options
447 447 # that influence repository construction. Attempt to load it and
448 448 # process any new extensions that it may have pulled in.
449 449 try:
450 450 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
451 451 # Run this before extensions.loadall() so extensions can be
452 452 # automatically enabled.
453 453 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
454 454 except IOError:
455 455 pass
456 456 else:
457 457 extensions.loadall(ui)
458 458
459 459 # Set of module names of extensions loaded for this repository.
460 460 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
461 461
462 462 supportedrequirements = gathersupportedrequirements(ui)
463 463
464 464 # We first validate the requirements are known.
465 465 ensurerequirementsrecognized(requirements, supportedrequirements)
466 466
467 467 # Then we validate that the known set is reasonable to use together.
468 468 ensurerequirementscompatible(ui, requirements)
469 469
470 470 # TODO there are unhandled edge cases related to opening repositories with
471 471 # shared storage. If storage is shared, we should also test for requirements
472 472 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
473 473 # that repo, as that repo may load extensions needed to open it. This is a
474 474 # bit complicated because we don't want the other hgrc to overwrite settings
475 475 # in this hgrc.
476 476 #
477 477 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
478 478 # file when sharing repos. But if a requirement is added after the share is
479 479 # performed, thereby introducing a new requirement for the opener, we may
480 480 # will not see that and could encounter a run-time error interacting with
481 481 # that shared store since it has an unknown-to-us requirement.
482 482
483 483 # At this point, we know we should be capable of opening the repository.
484 484 # Now get on with doing that.
485 485
486 486 features = set()
487 487
488 488 # The "store" part of the repository holds versioned data. How it is
489 489 # accessed is determined by various requirements. The ``shared`` or
490 490 # ``relshared`` requirements indicate the store lives in the path contained
491 491 # in the ``.hg/sharedpath`` file. This is an absolute path for
492 492 # ``shared`` and relative to ``.hg/`` for ``relshared``.
493 493 if b'shared' in requirements or b'relshared' in requirements:
494 494 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
495 495 if b'relshared' in requirements:
496 496 sharedpath = hgvfs.join(sharedpath)
497 497
498 498 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
499 499
500 500 if not sharedvfs.exists():
501 501 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
502 502 b'directory %s') % sharedvfs.base)
503 503
504 504 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
505 505
506 506 storebasepath = sharedvfs.base
507 507 cachepath = sharedvfs.join(b'cache')
508 508 else:
509 509 storebasepath = hgvfs.base
510 510 cachepath = hgvfs.join(b'cache')
511 511
512 512 # The store has changed over time and the exact layout is dictated by
513 513 # requirements. The store interface abstracts differences across all
514 514 # of them.
515 515 store = makestore(requirements, storebasepath,
516 516 lambda base: vfsmod.vfs(base, cacheaudited=True))
517 517 hgvfs.createmode = store.createmode
518 518
519 519 storevfs = store.vfs
520 520 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
521 521
522 522 # The cache vfs is used to manage cache files.
523 523 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
524 524 cachevfs.createmode = store.createmode
525 525
526 526 # Now resolve the type for the repository object. We do this by repeatedly
527 527 # calling a factory function to produces types for specific aspects of the
528 528 # repo's operation. The aggregate returned types are used as base classes
529 529 # for a dynamically-derived type, which will represent our new repository.
530 530
531 531 bases = []
532 532 extrastate = {}
533 533
534 534 for iface, fn in REPO_INTERFACES:
535 535 # We pass all potentially useful state to give extensions tons of
536 536 # flexibility.
537 537 typ = fn()(ui=ui,
538 538 intents=intents,
539 539 requirements=requirements,
540 540 features=features,
541 541 wdirvfs=wdirvfs,
542 542 hgvfs=hgvfs,
543 543 store=store,
544 544 storevfs=storevfs,
545 545 storeoptions=storevfs.options,
546 546 cachevfs=cachevfs,
547 547 extensionmodulenames=extensionmodulenames,
548 548 extrastate=extrastate,
549 549 baseclasses=bases)
550 550
551 551 if not isinstance(typ, type):
552 552 raise error.ProgrammingError('unable to construct type for %s' %
553 553 iface)
554 554
555 555 bases.append(typ)
556 556
557 557 # type() allows you to use characters in type names that wouldn't be
558 558 # recognized as Python symbols in source code. We abuse that to add
559 559 # rich information about our constructed repo.
560 560 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
561 561 wdirvfs.base,
562 562 b','.join(sorted(requirements))))
563 563
564 564 cls = type(name, tuple(bases), {})
565 565
566 566 return cls(
567 567 baseui=baseui,
568 568 ui=ui,
569 569 origroot=path,
570 570 wdirvfs=wdirvfs,
571 571 hgvfs=hgvfs,
572 572 requirements=requirements,
573 573 supportedrequirements=supportedrequirements,
574 574 sharedpath=storebasepath,
575 575 store=store,
576 576 cachevfs=cachevfs,
577 577 features=features,
578 578 intents=intents)
579 579
580 580 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
581 581 """Perform additional actions after .hg/hgrc is loaded.
582 582
583 583 This function is called during repository loading immediately after
584 584 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
585 585
586 586 The function can be used to validate configs, automatically add
587 587 options (including extensions) based on requirements, etc.
588 588 """
589 589
590 590 # Map of requirements to list of extensions to load automatically when
591 591 # requirement is present.
592 592 autoextensions = {
593 593 b'largefiles': [b'largefiles'],
594 594 b'lfs': [b'lfs'],
595 595 }
596 596
597 597 for requirement, names in sorted(autoextensions.items()):
598 598 if requirement not in requirements:
599 599 continue
600 600
601 601 for name in names:
602 602 if not ui.hasconfig(b'extensions', name):
603 603 ui.setconfig(b'extensions', name, b'', source='autoload')
604 604
605 605 def gathersupportedrequirements(ui):
606 606 """Determine the complete set of recognized requirements."""
607 607 # Start with all requirements supported by this file.
608 608 supported = set(localrepository._basesupported)
609 609
610 610 # Execute ``featuresetupfuncs`` entries if they belong to an extension
611 611 # relevant to this ui instance.
612 612 modules = {m.__name__ for n, m in extensions.extensions(ui)}
613 613
614 614 for fn in featuresetupfuncs:
615 615 if fn.__module__ in modules:
616 616 fn(ui, supported)
617 617
618 618 # Add derived requirements from registered compression engines.
619 619 for name in util.compengines:
620 620 engine = util.compengines[name]
621 621 if engine.revlogheader():
622 622 supported.add(b'exp-compression-%s' % name)
623 623
624 624 return supported
625 625
626 626 def ensurerequirementsrecognized(requirements, supported):
627 627 """Validate that a set of local requirements is recognized.
628 628
629 629 Receives a set of requirements. Raises an ``error.RepoError`` if there
630 630 exists any requirement in that set that currently loaded code doesn't
631 631 recognize.
632 632
633 633 Returns a set of supported requirements.
634 634 """
635 635 missing = set()
636 636
637 637 for requirement in requirements:
638 638 if requirement in supported:
639 639 continue
640 640
641 641 if not requirement or not requirement[0:1].isalnum():
642 642 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
643 643
644 644 missing.add(requirement)
645 645
646 646 if missing:
647 647 raise error.RequirementError(
648 648 _(b'repository requires features unknown to this Mercurial: %s') %
649 649 b' '.join(sorted(missing)),
650 650 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
651 651 b'for more information'))
652 652
653 653 def ensurerequirementscompatible(ui, requirements):
654 654 """Validates that a set of recognized requirements is mutually compatible.
655 655
656 656 Some requirements may not be compatible with others or require
657 657 config options that aren't enabled. This function is called during
658 658 repository opening to ensure that the set of requirements needed
659 659 to open a repository is sane and compatible with config options.
660 660
661 661 Extensions can monkeypatch this function to perform additional
662 662 checking.
663 663
664 664 ``error.RepoError`` should be raised on failure.
665 665 """
666 666 if b'exp-sparse' in requirements and not sparse.enabled:
667 667 raise error.RepoError(_(b'repository is using sparse feature but '
668 668 b'sparse is not enabled; enable the '
669 669 b'"sparse" extensions to access'))
670 670
671 671 def makestore(requirements, path, vfstype):
672 672 """Construct a storage object for a repository."""
673 673 if b'store' in requirements:
674 674 if b'fncache' in requirements:
675 675 return storemod.fncachestore(path, vfstype,
676 676 b'dotencode' in requirements)
677 677
678 678 return storemod.encodedstore(path, vfstype)
679 679
680 680 return storemod.basicstore(path, vfstype)
681 681
682 682 def resolvestorevfsoptions(ui, requirements, features):
683 683 """Resolve the options to pass to the store vfs opener.
684 684
685 685 The returned dict is used to influence behavior of the storage layer.
686 686 """
687 687 options = {}
688 688
689 689 if b'treemanifest' in requirements:
690 690 options[b'treemanifest'] = True
691 691
692 692 # experimental config: format.manifestcachesize
693 693 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
694 694 if manifestcachesize is not None:
695 695 options[b'manifestcachesize'] = manifestcachesize
696 696
697 697 # In the absence of another requirement superseding a revlog-related
698 698 # requirement, we have to assume the repo is using revlog version 0.
699 699 # This revlog format is super old and we don't bother trying to parse
700 700 # opener options for it because those options wouldn't do anything
701 701 # meaningful on such old repos.
702 702 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
703 703 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
704 704
705 705 return options
706 706
707 707 def resolverevlogstorevfsoptions(ui, requirements, features):
708 708 """Resolve opener options specific to revlogs."""
709 709
710 710 options = {}
711 711 options[b'flagprocessors'] = {}
712 712
713 713 if b'revlogv1' in requirements:
714 714 options[b'revlogv1'] = True
715 715 if REVLOGV2_REQUIREMENT in requirements:
716 716 options[b'revlogv2'] = True
717 717
718 718 if b'generaldelta' in requirements:
719 719 options[b'generaldelta'] = True
720 720
721 721 # experimental config: format.chunkcachesize
722 722 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
723 723 if chunkcachesize is not None:
724 724 options[b'chunkcachesize'] = chunkcachesize
725 725
726 726 deltabothparents = ui.configbool(b'storage',
727 727 b'revlog.optimize-delta-parent-choice')
728 728 options[b'deltabothparents'] = deltabothparents
729 729
730 730 options[b'lazydeltabase'] = not scmutil.gddeltaconfig(ui)
731 731
732 732 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
733 733 if 0 <= chainspan:
734 734 options[b'maxdeltachainspan'] = chainspan
735 735
736 736 mmapindexthreshold = ui.configbytes(b'experimental',
737 737 b'mmapindexthreshold')
738 738 if mmapindexthreshold is not None:
739 739 options[b'mmapindexthreshold'] = mmapindexthreshold
740 740
741 741 withsparseread = ui.configbool(b'experimental', b'sparse-read')
742 742 srdensitythres = float(ui.config(b'experimental',
743 743 b'sparse-read.density-threshold'))
744 744 srmingapsize = ui.configbytes(b'experimental',
745 745 b'sparse-read.min-gap-size')
746 746 options[b'with-sparse-read'] = withsparseread
747 747 options[b'sparse-read-density-threshold'] = srdensitythres
748 748 options[b'sparse-read-min-gap-size'] = srmingapsize
749 749
750 750 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
751 751 options[b'sparse-revlog'] = sparserevlog
752 752 if sparserevlog:
753 753 options[b'generaldelta'] = True
754 754
755 755 maxchainlen = None
756 756 if sparserevlog:
757 757 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
758 758 # experimental config: format.maxchainlen
759 759 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
760 760 if maxchainlen is not None:
761 761 options[b'maxchainlen'] = maxchainlen
762 762
763 763 for r in requirements:
764 764 if r.startswith(b'exp-compression-'):
765 765 options[b'compengine'] = r[len(b'exp-compression-'):]
766 766
767 767 if repository.NARROW_REQUIREMENT in requirements:
768 768 options[b'enableellipsis'] = True
769 769
770 770 return options
771 771
772 772 def makemain(**kwargs):
773 773 """Produce a type conforming to ``ilocalrepositorymain``."""
774 774 return localrepository
775 775
776 776 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
777 777 class revlogfilestorage(object):
778 778 """File storage when using revlogs."""
779 779
780 780 def file(self, path):
781 781 if path[0] == b'/':
782 782 path = path[1:]
783 783
784 784 return filelog.filelog(self.svfs, path)
785 785
786 786 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
787 787 class revlognarrowfilestorage(object):
788 788 """File storage when using revlogs and narrow files."""
789 789
790 790 def file(self, path):
791 791 if path[0] == b'/':
792 792 path = path[1:]
793 793
794 794 return filelog.narrowfilelog(self.svfs, path, self.narrowmatch())
795 795
796 796 def makefilestorage(requirements, features, **kwargs):
797 797 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
798 798 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
799 799 features.add(repository.REPO_FEATURE_STREAM_CLONE)
800 800
801 801 if repository.NARROW_REQUIREMENT in requirements:
802 802 return revlognarrowfilestorage
803 803 else:
804 804 return revlogfilestorage
805 805
806 806 # List of repository interfaces and factory functions for them. Each
807 807 # will be called in order during ``makelocalrepository()`` to iteratively
808 808 # derive the final type for a local repository instance. We capture the
809 809 # function as a lambda so we don't hold a reference and the module-level
810 810 # functions can be wrapped.
811 811 REPO_INTERFACES = [
812 812 (repository.ilocalrepositorymain, lambda: makemain),
813 813 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
814 814 ]
815 815
816 816 @interfaceutil.implementer(repository.ilocalrepositorymain)
817 817 class localrepository(object):
818 818 """Main class for representing local repositories.
819 819
820 820 All local repositories are instances of this class.
821 821
822 822 Constructed on its own, instances of this class are not usable as
823 823 repository objects. To obtain a usable repository object, call
824 824 ``hg.repository()``, ``localrepo.instance()``, or
825 825 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
826 826 ``instance()`` adds support for creating new repositories.
827 827 ``hg.repository()`` adds more extension integration, including calling
828 828 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
829 829 used.
830 830 """
831 831
832 832 # obsolete experimental requirements:
833 833 # - manifestv2: An experimental new manifest format that allowed
834 834 # for stem compression of long paths. Experiment ended up not
835 835 # being successful (repository sizes went up due to worse delta
836 836 # chains), and the code was deleted in 4.6.
837 837 supportedformats = {
838 838 'revlogv1',
839 839 'generaldelta',
840 840 'treemanifest',
841 841 REVLOGV2_REQUIREMENT,
842 842 SPARSEREVLOG_REQUIREMENT,
843 843 }
844 844 _basesupported = supportedformats | {
845 845 'store',
846 846 'fncache',
847 847 'shared',
848 848 'relshared',
849 849 'dotencode',
850 850 'exp-sparse',
851 851 'internal-phase'
852 852 }
853 853
854 854 # list of prefix for file which can be written without 'wlock'
855 855 # Extensions should extend this list when needed
856 856 _wlockfreeprefix = {
857 857 # We migh consider requiring 'wlock' for the next
858 858 # two, but pretty much all the existing code assume
859 859 # wlock is not needed so we keep them excluded for
860 860 # now.
861 861 'hgrc',
862 862 'requires',
863 863 # XXX cache is a complicatged business someone
864 864 # should investigate this in depth at some point
865 865 'cache/',
866 866 # XXX shouldn't be dirstate covered by the wlock?
867 867 'dirstate',
868 868 # XXX bisect was still a bit too messy at the time
869 869 # this changeset was introduced. Someone should fix
870 870 # the remainig bit and drop this line
871 871 'bisect.state',
872 872 }
873 873
874 874 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
875 875 supportedrequirements, sharedpath, store, cachevfs,
876 876 features, intents=None):
877 877 """Create a new local repository instance.
878 878
879 879 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
880 880 or ``localrepo.makelocalrepository()`` for obtaining a new repository
881 881 object.
882 882
883 883 Arguments:
884 884
885 885 baseui
886 886 ``ui.ui`` instance that ``ui`` argument was based off of.
887 887
888 888 ui
889 889 ``ui.ui`` instance for use by the repository.
890 890
891 891 origroot
892 892 ``bytes`` path to working directory root of this repository.
893 893
894 894 wdirvfs
895 895 ``vfs.vfs`` rooted at the working directory.
896 896
897 897 hgvfs
898 898 ``vfs.vfs`` rooted at .hg/
899 899
900 900 requirements
901 901 ``set`` of bytestrings representing repository opening requirements.
902 902
903 903 supportedrequirements
904 904 ``set`` of bytestrings representing repository requirements that we
905 905 know how to open. May be a supetset of ``requirements``.
906 906
907 907 sharedpath
908 908 ``bytes`` Defining path to storage base directory. Points to a
909 909 ``.hg/`` directory somewhere.
910 910
911 911 store
912 912 ``store.basicstore`` (or derived) instance providing access to
913 913 versioned storage.
914 914
915 915 cachevfs
916 916 ``vfs.vfs`` used for cache files.
917 917
918 918 features
919 919 ``set`` of bytestrings defining features/capabilities of this
920 920 instance.
921 921
922 922 intents
923 923 ``set`` of system strings indicating what this repo will be used
924 924 for.
925 925 """
926 926 self.baseui = baseui
927 927 self.ui = ui
928 928 self.origroot = origroot
929 929 # vfs rooted at working directory.
930 930 self.wvfs = wdirvfs
931 931 self.root = wdirvfs.base
932 932 # vfs rooted at .hg/. Used to access most non-store paths.
933 933 self.vfs = hgvfs
934 934 self.path = hgvfs.base
935 935 self.requirements = requirements
936 936 self.supported = supportedrequirements
937 937 self.sharedpath = sharedpath
938 938 self.store = store
939 939 self.cachevfs = cachevfs
940 940 self.features = features
941 941
942 942 self.filtername = None
943 943
944 944 if (self.ui.configbool('devel', 'all-warnings') or
945 945 self.ui.configbool('devel', 'check-locks')):
946 946 self.vfs.audit = self._getvfsward(self.vfs.audit)
947 947 # A list of callback to shape the phase if no data were found.
948 948 # Callback are in the form: func(repo, roots) --> processed root.
949 949 # This list it to be filled by extension during repo setup
950 950 self._phasedefaults = []
951 951
952 952 color.setup(self.ui)
953 953
954 954 self.spath = self.store.path
955 955 self.svfs = self.store.vfs
956 956 self.sjoin = self.store.join
957 957 if (self.ui.configbool('devel', 'all-warnings') or
958 958 self.ui.configbool('devel', 'check-locks')):
959 959 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
960 960 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
961 961 else: # standard vfs
962 962 self.svfs.audit = self._getsvfsward(self.svfs.audit)
963 963
964 964 self._dirstatevalidatewarned = False
965 965
966 966 self._branchcaches = {}
967 967 self._revbranchcache = None
968 968 self._filterpats = {}
969 969 self._datafilters = {}
970 970 self._transref = self._lockref = self._wlockref = None
971 971
972 972 # A cache for various files under .hg/ that tracks file changes,
973 973 # (used by the filecache decorator)
974 974 #
975 975 # Maps a property name to its util.filecacheentry
976 976 self._filecache = {}
977 977
978 978 # hold sets of revision to be filtered
979 979 # should be cleared when something might have changed the filter value:
980 980 # - new changesets,
981 981 # - phase change,
982 982 # - new obsolescence marker,
983 983 # - working directory parent change,
984 984 # - bookmark changes
985 985 self.filteredrevcache = {}
986 986
987 987 # post-dirstate-status hooks
988 988 self._postdsstatus = []
989 989
990 990 # generic mapping between names and nodes
991 991 self.names = namespaces.namespaces()
992 992
993 993 # Key to signature value.
994 994 self._sparsesignaturecache = {}
995 995 # Signature to cached matcher instance.
996 996 self._sparsematchercache = {}
997 997
998 998 def _getvfsward(self, origfunc):
999 999 """build a ward for self.vfs"""
1000 1000 rref = weakref.ref(self)
1001 1001 def checkvfs(path, mode=None):
1002 1002 ret = origfunc(path, mode=mode)
1003 1003 repo = rref()
1004 1004 if (repo is None
1005 1005 or not util.safehasattr(repo, '_wlockref')
1006 1006 or not util.safehasattr(repo, '_lockref')):
1007 1007 return
1008 1008 if mode in (None, 'r', 'rb'):
1009 1009 return
1010 1010 if path.startswith(repo.path):
1011 1011 # truncate name relative to the repository (.hg)
1012 1012 path = path[len(repo.path) + 1:]
1013 1013 if path.startswith('cache/'):
1014 1014 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1015 1015 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
1016 1016 if path.startswith('journal.'):
1017 1017 # journal is covered by 'lock'
1018 1018 if repo._currentlock(repo._lockref) is None:
1019 1019 repo.ui.develwarn('write with no lock: "%s"' % path,
1020 1020 stacklevel=2, config='check-locks')
1021 1021 elif repo._currentlock(repo._wlockref) is None:
1022 1022 # rest of vfs files are covered by 'wlock'
1023 1023 #
1024 1024 # exclude special files
1025 1025 for prefix in self._wlockfreeprefix:
1026 1026 if path.startswith(prefix):
1027 1027 return
1028 1028 repo.ui.develwarn('write with no wlock: "%s"' % path,
1029 1029 stacklevel=2, config='check-locks')
1030 1030 return ret
1031 1031 return checkvfs
1032 1032
1033 1033 def _getsvfsward(self, origfunc):
1034 1034 """build a ward for self.svfs"""
1035 1035 rref = weakref.ref(self)
1036 1036 def checksvfs(path, mode=None):
1037 1037 ret = origfunc(path, mode=mode)
1038 1038 repo = rref()
1039 1039 if repo is None or not util.safehasattr(repo, '_lockref'):
1040 1040 return
1041 1041 if mode in (None, 'r', 'rb'):
1042 1042 return
1043 1043 if path.startswith(repo.sharedpath):
1044 1044 # truncate name relative to the repository (.hg)
1045 1045 path = path[len(repo.sharedpath) + 1:]
1046 1046 if repo._currentlock(repo._lockref) is None:
1047 1047 repo.ui.develwarn('write with no lock: "%s"' % path,
1048 1048 stacklevel=3)
1049 1049 return ret
1050 1050 return checksvfs
1051 1051
1052 1052 def close(self):
1053 1053 self._writecaches()
1054 1054
1055 1055 def _writecaches(self):
1056 1056 if self._revbranchcache:
1057 1057 self._revbranchcache.write()
1058 1058
1059 1059 def _restrictcapabilities(self, caps):
1060 1060 if self.ui.configbool('experimental', 'bundle2-advertise'):
1061 1061 caps = set(caps)
1062 1062 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1063 1063 role='client'))
1064 1064 caps.add('bundle2=' + urlreq.quote(capsblob))
1065 1065 return caps
1066 1066
1067 1067 def _writerequirements(self):
1068 1068 scmutil.writerequires(self.vfs, self.requirements)
1069 1069
1070 1070 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1071 1071 # self -> auditor -> self._checknested -> self
1072 1072
1073 1073 @property
1074 1074 def auditor(self):
1075 1075 # This is only used by context.workingctx.match in order to
1076 1076 # detect files in subrepos.
1077 1077 return pathutil.pathauditor(self.root, callback=self._checknested)
1078 1078
1079 1079 @property
1080 1080 def nofsauditor(self):
1081 1081 # This is only used by context.basectx.match in order to detect
1082 1082 # files in subrepos.
1083 1083 return pathutil.pathauditor(self.root, callback=self._checknested,
1084 1084 realfs=False, cached=True)
1085 1085
1086 1086 def _checknested(self, path):
1087 1087 """Determine if path is a legal nested repository."""
1088 1088 if not path.startswith(self.root):
1089 1089 return False
1090 1090 subpath = path[len(self.root) + 1:]
1091 1091 normsubpath = util.pconvert(subpath)
1092 1092
1093 1093 # XXX: Checking against the current working copy is wrong in
1094 1094 # the sense that it can reject things like
1095 1095 #
1096 1096 # $ hg cat -r 10 sub/x.txt
1097 1097 #
1098 1098 # if sub/ is no longer a subrepository in the working copy
1099 1099 # parent revision.
1100 1100 #
1101 1101 # However, it can of course also allow things that would have
1102 1102 # been rejected before, such as the above cat command if sub/
1103 1103 # is a subrepository now, but was a normal directory before.
1104 1104 # The old path auditor would have rejected by mistake since it
1105 1105 # panics when it sees sub/.hg/.
1106 1106 #
1107 1107 # All in all, checking against the working copy seems sensible
1108 1108 # since we want to prevent access to nested repositories on
1109 1109 # the filesystem *now*.
1110 1110 ctx = self[None]
1111 1111 parts = util.splitpath(subpath)
1112 1112 while parts:
1113 1113 prefix = '/'.join(parts)
1114 1114 if prefix in ctx.substate:
1115 1115 if prefix == normsubpath:
1116 1116 return True
1117 1117 else:
1118 1118 sub = ctx.sub(prefix)
1119 1119 return sub.checknested(subpath[len(prefix) + 1:])
1120 1120 else:
1121 1121 parts.pop()
1122 1122 return False
1123 1123
1124 1124 def peer(self):
1125 1125 return localpeer(self) # not cached to avoid reference cycle
1126 1126
1127 1127 def unfiltered(self):
1128 1128 """Return unfiltered version of the repository
1129 1129
1130 1130 Intended to be overwritten by filtered repo."""
1131 1131 return self
1132 1132
1133 1133 def filtered(self, name, visibilityexceptions=None):
1134 1134 """Return a filtered version of a repository"""
1135 1135 cls = repoview.newtype(self.unfiltered().__class__)
1136 1136 return cls(self, name, visibilityexceptions)
1137 1137
1138 1138 @repofilecache('bookmarks', 'bookmarks.current')
1139 1139 def _bookmarks(self):
1140 1140 return bookmarks.bmstore(self)
1141 1141
1142 1142 @property
1143 1143 def _activebookmark(self):
1144 1144 return self._bookmarks.active
1145 1145
1146 1146 # _phasesets depend on changelog. what we need is to call
1147 1147 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1148 1148 # can't be easily expressed in filecache mechanism.
1149 1149 @storecache('phaseroots', '00changelog.i')
1150 1150 def _phasecache(self):
1151 1151 return phases.phasecache(self, self._phasedefaults)
1152 1152
1153 1153 @storecache('obsstore')
1154 1154 def obsstore(self):
1155 1155 return obsolete.makestore(self.ui, self)
1156 1156
1157 1157 @storecache('00changelog.i')
1158 1158 def changelog(self):
1159 1159 return changelog.changelog(self.svfs,
1160 1160 trypending=txnutil.mayhavepending(self.root))
1161 1161
1162 1162 @storecache('00manifest.i')
1163 1163 def manifestlog(self):
1164 1164 rootstore = manifest.manifestrevlog(self.svfs)
1165 1165 return manifest.manifestlog(self.svfs, self, rootstore)
1166 1166
1167 1167 @repofilecache('dirstate')
1168 1168 def dirstate(self):
1169 1169 return self._makedirstate()
1170 1170
1171 1171 def _makedirstate(self):
1172 1172 """Extension point for wrapping the dirstate per-repo."""
1173 1173 sparsematchfn = lambda: sparse.matcher(self)
1174 1174
1175 1175 return dirstate.dirstate(self.vfs, self.ui, self.root,
1176 1176 self._dirstatevalidate, sparsematchfn)
1177 1177
1178 1178 def _dirstatevalidate(self, node):
1179 1179 try:
1180 1180 self.changelog.rev(node)
1181 1181 return node
1182 1182 except error.LookupError:
1183 1183 if not self._dirstatevalidatewarned:
1184 1184 self._dirstatevalidatewarned = True
1185 1185 self.ui.warn(_("warning: ignoring unknown"
1186 1186 " working parent %s!\n") % short(node))
1187 1187 return nullid
1188 1188
1189 1189 @storecache(narrowspec.FILENAME)
1190 1190 def narrowpats(self):
1191 1191 """matcher patterns for this repository's narrowspec
1192 1192
1193 1193 A tuple of (includes, excludes).
1194 1194 """
1195 1195 return narrowspec.load(self)
1196 1196
1197 1197 @storecache(narrowspec.FILENAME)
1198 1198 def _narrowmatch(self):
1199 1199 if repository.NARROW_REQUIREMENT not in self.requirements:
1200 1200 return matchmod.always(self.root, '')
1201 1201 include, exclude = self.narrowpats
1202 1202 return narrowspec.match(self.root, include=include, exclude=exclude)
1203 1203
1204 1204 def narrowmatch(self, match=None, includeexact=False):
1205 1205 """matcher corresponding the the repo's narrowspec
1206 1206
1207 1207 If `match` is given, then that will be intersected with the narrow
1208 1208 matcher.
1209 1209
1210 1210 If `includeexact` is True, then any exact matches from `match` will
1211 1211 be included even if they're outside the narrowspec.
1212 1212 """
1213 1213 if match:
1214 1214 if includeexact and not self._narrowmatch.always():
1215 1215 # do not exclude explicitly-specified paths so that they can
1216 1216 # be warned later on
1217 1217 em = matchmod.exact(match._root, match._cwd, match.files())
1218 1218 nm = matchmod.unionmatcher([self._narrowmatch, em])
1219 1219 return matchmod.intersectmatchers(match, nm)
1220 1220 return matchmod.intersectmatchers(match, self._narrowmatch)
1221 1221 return self._narrowmatch
1222 1222
1223 1223 def setnarrowpats(self, newincludes, newexcludes):
1224 1224 narrowspec.save(self, newincludes, newexcludes)
1225 1225 self.invalidate(clearfilecache=True)
1226 1226
1227 1227 def __getitem__(self, changeid):
1228 1228 if changeid is None:
1229 1229 return context.workingctx(self)
1230 1230 if isinstance(changeid, context.basectx):
1231 1231 return changeid
1232 1232 if isinstance(changeid, slice):
1233 1233 # wdirrev isn't contiguous so the slice shouldn't include it
1234 1234 return [self[i]
1235 1235 for i in pycompat.xrange(*changeid.indices(len(self)))
1236 1236 if i not in self.changelog.filteredrevs]
1237 1237 try:
1238 1238 if isinstance(changeid, int):
1239 1239 node = self.changelog.node(changeid)
1240 1240 rev = changeid
1241 1241 elif changeid == 'null':
1242 1242 node = nullid
1243 1243 rev = nullrev
1244 1244 elif changeid == 'tip':
1245 1245 node = self.changelog.tip()
1246 1246 rev = self.changelog.rev(node)
1247 1247 elif changeid == '.':
1248 1248 # this is a hack to delay/avoid loading obsmarkers
1249 1249 # when we know that '.' won't be hidden
1250 1250 node = self.dirstate.p1()
1251 1251 rev = self.unfiltered().changelog.rev(node)
1252 1252 elif len(changeid) == 20:
1253 1253 try:
1254 1254 node = changeid
1255 1255 rev = self.changelog.rev(changeid)
1256 1256 except error.FilteredLookupError:
1257 1257 changeid = hex(changeid) # for the error message
1258 1258 raise
1259 1259 except LookupError:
1260 1260 # check if it might have come from damaged dirstate
1261 1261 #
1262 1262 # XXX we could avoid the unfiltered if we had a recognizable
1263 1263 # exception for filtered changeset access
1264 1264 if (self.local()
1265 1265 and changeid in self.unfiltered().dirstate.parents()):
1266 1266 msg = _("working directory has unknown parent '%s'!")
1267 1267 raise error.Abort(msg % short(changeid))
1268 1268 changeid = hex(changeid) # for the error message
1269 1269 raise
1270 1270
1271 1271 elif len(changeid) == 40:
1272 1272 node = bin(changeid)
1273 1273 rev = self.changelog.rev(node)
1274 1274 else:
1275 1275 raise error.ProgrammingError(
1276 1276 "unsupported changeid '%s' of type %s" %
1277 1277 (changeid, type(changeid)))
1278 1278
1279 1279 return context.changectx(self, rev, node)
1280 1280
1281 1281 except (error.FilteredIndexError, error.FilteredLookupError):
1282 1282 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1283 1283 % pycompat.bytestr(changeid))
1284 1284 except (IndexError, LookupError):
1285 1285 raise error.RepoLookupError(
1286 1286 _("unknown revision '%s'") % pycompat.bytestr(changeid))
1287 1287 except error.WdirUnsupported:
1288 1288 return context.workingctx(self)
1289 1289
1290 1290 def __contains__(self, changeid):
1291 1291 """True if the given changeid exists
1292 1292
1293 1293 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1294 1294 specified.
1295 1295 """
1296 1296 try:
1297 1297 self[changeid]
1298 1298 return True
1299 1299 except error.RepoLookupError:
1300 1300 return False
1301 1301
1302 1302 def __nonzero__(self):
1303 1303 return True
1304 1304
1305 1305 __bool__ = __nonzero__
1306 1306
1307 1307 def __len__(self):
1308 1308 # no need to pay the cost of repoview.changelog
1309 1309 unfi = self.unfiltered()
1310 1310 return len(unfi.changelog)
1311 1311
1312 1312 def __iter__(self):
1313 1313 return iter(self.changelog)
1314 1314
1315 1315 def revs(self, expr, *args):
1316 1316 '''Find revisions matching a revset.
1317 1317
1318 1318 The revset is specified as a string ``expr`` that may contain
1319 1319 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1320 1320
1321 1321 Revset aliases from the configuration are not expanded. To expand
1322 1322 user aliases, consider calling ``scmutil.revrange()`` or
1323 1323 ``repo.anyrevs([expr], user=True)``.
1324 1324
1325 1325 Returns a revset.abstractsmartset, which is a list-like interface
1326 1326 that contains integer revisions.
1327 1327 '''
1328 1328 expr = revsetlang.formatspec(expr, *args)
1329 1329 m = revset.match(None, expr)
1330 1330 return m(self)
1331 1331
1332 1332 def set(self, expr, *args):
1333 1333 '''Find revisions matching a revset and emit changectx instances.
1334 1334
1335 1335 This is a convenience wrapper around ``revs()`` that iterates the
1336 1336 result and is a generator of changectx instances.
1337 1337
1338 1338 Revset aliases from the configuration are not expanded. To expand
1339 1339 user aliases, consider calling ``scmutil.revrange()``.
1340 1340 '''
1341 1341 for r in self.revs(expr, *args):
1342 1342 yield self[r]
1343 1343
1344 1344 def anyrevs(self, specs, user=False, localalias=None):
1345 1345 '''Find revisions matching one of the given revsets.
1346 1346
1347 1347 Revset aliases from the configuration are not expanded by default. To
1348 1348 expand user aliases, specify ``user=True``. To provide some local
1349 1349 definitions overriding user aliases, set ``localalias`` to
1350 1350 ``{name: definitionstring}``.
1351 1351 '''
1352 1352 if user:
1353 1353 m = revset.matchany(self.ui, specs,
1354 1354 lookup=revset.lookupfn(self),
1355 1355 localalias=localalias)
1356 1356 else:
1357 1357 m = revset.matchany(None, specs, localalias=localalias)
1358 1358 return m(self)
1359 1359
1360 1360 def url(self):
1361 1361 return 'file:' + self.root
1362 1362
1363 1363 def hook(self, name, throw=False, **args):
1364 1364 """Call a hook, passing this repo instance.
1365 1365
1366 1366 This a convenience method to aid invoking hooks. Extensions likely
1367 1367 won't call this unless they have registered a custom hook or are
1368 1368 replacing code that is expected to call a hook.
1369 1369 """
1370 1370 return hook.hook(self.ui, self, name, throw, **args)
1371 1371
1372 1372 @filteredpropertycache
1373 1373 def _tagscache(self):
1374 1374 '''Returns a tagscache object that contains various tags related
1375 1375 caches.'''
1376 1376
1377 1377 # This simplifies its cache management by having one decorated
1378 1378 # function (this one) and the rest simply fetch things from it.
1379 1379 class tagscache(object):
1380 1380 def __init__(self):
1381 1381 # These two define the set of tags for this repository. tags
1382 1382 # maps tag name to node; tagtypes maps tag name to 'global' or
1383 1383 # 'local'. (Global tags are defined by .hgtags across all
1384 1384 # heads, and local tags are defined in .hg/localtags.)
1385 1385 # They constitute the in-memory cache of tags.
1386 1386 self.tags = self.tagtypes = None
1387 1387
1388 1388 self.nodetagscache = self.tagslist = None
1389 1389
1390 1390 cache = tagscache()
1391 1391 cache.tags, cache.tagtypes = self._findtags()
1392 1392
1393 1393 return cache
1394 1394
1395 1395 def tags(self):
1396 1396 '''return a mapping of tag to node'''
1397 1397 t = {}
1398 1398 if self.changelog.filteredrevs:
1399 1399 tags, tt = self._findtags()
1400 1400 else:
1401 1401 tags = self._tagscache.tags
1402 1402 for k, v in tags.iteritems():
1403 1403 try:
1404 1404 # ignore tags to unknown nodes
1405 1405 self.changelog.rev(v)
1406 1406 t[k] = v
1407 1407 except (error.LookupError, ValueError):
1408 1408 pass
1409 1409 return t
1410 1410
1411 1411 def _findtags(self):
1412 1412 '''Do the hard work of finding tags. Return a pair of dicts
1413 1413 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1414 1414 maps tag name to a string like \'global\' or \'local\'.
1415 1415 Subclasses or extensions are free to add their own tags, but
1416 1416 should be aware that the returned dicts will be retained for the
1417 1417 duration of the localrepo object.'''
1418 1418
1419 1419 # XXX what tagtype should subclasses/extensions use? Currently
1420 1420 # mq and bookmarks add tags, but do not set the tagtype at all.
1421 1421 # Should each extension invent its own tag type? Should there
1422 1422 # be one tagtype for all such "virtual" tags? Or is the status
1423 1423 # quo fine?
1424 1424
1425 1425
1426 1426 # map tag name to (node, hist)
1427 1427 alltags = tagsmod.findglobaltags(self.ui, self)
1428 1428 # map tag name to tag type
1429 1429 tagtypes = dict((tag, 'global') for tag in alltags)
1430 1430
1431 1431 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1432 1432
1433 1433 # Build the return dicts. Have to re-encode tag names because
1434 1434 # the tags module always uses UTF-8 (in order not to lose info
1435 1435 # writing to the cache), but the rest of Mercurial wants them in
1436 1436 # local encoding.
1437 1437 tags = {}
1438 1438 for (name, (node, hist)) in alltags.iteritems():
1439 1439 if node != nullid:
1440 1440 tags[encoding.tolocal(name)] = node
1441 1441 tags['tip'] = self.changelog.tip()
1442 1442 tagtypes = dict([(encoding.tolocal(name), value)
1443 1443 for (name, value) in tagtypes.iteritems()])
1444 1444 return (tags, tagtypes)
1445 1445
1446 1446 def tagtype(self, tagname):
1447 1447 '''
1448 1448 return the type of the given tag. result can be:
1449 1449
1450 1450 'local' : a local tag
1451 1451 'global' : a global tag
1452 1452 None : tag does not exist
1453 1453 '''
1454 1454
1455 1455 return self._tagscache.tagtypes.get(tagname)
1456 1456
1457 1457 def tagslist(self):
1458 1458 '''return a list of tags ordered by revision'''
1459 1459 if not self._tagscache.tagslist:
1460 1460 l = []
1461 1461 for t, n in self.tags().iteritems():
1462 1462 l.append((self.changelog.rev(n), t, n))
1463 1463 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1464 1464
1465 1465 return self._tagscache.tagslist
1466 1466
1467 1467 def nodetags(self, node):
1468 1468 '''return the tags associated with a node'''
1469 1469 if not self._tagscache.nodetagscache:
1470 1470 nodetagscache = {}
1471 1471 for t, n in self._tagscache.tags.iteritems():
1472 1472 nodetagscache.setdefault(n, []).append(t)
1473 1473 for tags in nodetagscache.itervalues():
1474 1474 tags.sort()
1475 1475 self._tagscache.nodetagscache = nodetagscache
1476 1476 return self._tagscache.nodetagscache.get(node, [])
1477 1477
1478 1478 def nodebookmarks(self, node):
1479 1479 """return the list of bookmarks pointing to the specified node"""
1480 1480 return self._bookmarks.names(node)
1481 1481
1482 1482 def branchmap(self):
1483 1483 '''returns a dictionary {branch: [branchheads]} with branchheads
1484 1484 ordered by increasing revision number'''
1485 1485 branchmap.updatecache(self)
1486 1486 return self._branchcaches[self.filtername]
1487 1487
1488 1488 @unfilteredmethod
1489 1489 def revbranchcache(self):
1490 1490 if not self._revbranchcache:
1491 1491 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1492 1492 return self._revbranchcache
1493 1493
1494 1494 def branchtip(self, branch, ignoremissing=False):
1495 1495 '''return the tip node for a given branch
1496 1496
1497 1497 If ignoremissing is True, then this method will not raise an error.
1498 1498 This is helpful for callers that only expect None for a missing branch
1499 1499 (e.g. namespace).
1500 1500
1501 1501 '''
1502 1502 try:
1503 1503 return self.branchmap().branchtip(branch)
1504 1504 except KeyError:
1505 1505 if not ignoremissing:
1506 1506 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1507 1507 else:
1508 1508 pass
1509 1509
1510 1510 def lookup(self, key):
1511 1511 return scmutil.revsymbol(self, key).node()
1512 1512
1513 1513 def lookupbranch(self, key):
1514 1514 if key in self.branchmap():
1515 1515 return key
1516 1516
1517 1517 return scmutil.revsymbol(self, key).branch()
1518 1518
1519 1519 def known(self, nodes):
1520 1520 cl = self.changelog
1521 1521 nm = cl.nodemap
1522 1522 filtered = cl.filteredrevs
1523 1523 result = []
1524 1524 for n in nodes:
1525 1525 r = nm.get(n)
1526 1526 resp = not (r is None or r in filtered)
1527 1527 result.append(resp)
1528 1528 return result
1529 1529
1530 1530 def local(self):
1531 1531 return self
1532 1532
1533 1533 def publishing(self):
1534 1534 # it's safe (and desirable) to trust the publish flag unconditionally
1535 1535 # so that we don't finalize changes shared between users via ssh or nfs
1536 1536 return self.ui.configbool('phases', 'publish', untrusted=True)
1537 1537
1538 1538 def cancopy(self):
1539 1539 # so statichttprepo's override of local() works
1540 1540 if not self.local():
1541 1541 return False
1542 1542 if not self.publishing():
1543 1543 return True
1544 1544 # if publishing we can't copy if there is filtered content
1545 1545 return not self.filtered('visible').changelog.filteredrevs
1546 1546
1547 1547 def shared(self):
1548 1548 '''the type of shared repository (None if not shared)'''
1549 1549 if self.sharedpath != self.path:
1550 1550 return 'store'
1551 1551 return None
1552 1552
1553 1553 def wjoin(self, f, *insidef):
1554 1554 return self.vfs.reljoin(self.root, f, *insidef)
1555 1555
1556 1556 def setparents(self, p1, p2=nullid):
1557 1557 with self.dirstate.parentchange():
1558 1558 copies = self.dirstate.setparents(p1, p2)
1559 1559 pctx = self[p1]
1560 1560 if copies:
1561 1561 # Adjust copy records, the dirstate cannot do it, it
1562 1562 # requires access to parents manifests. Preserve them
1563 1563 # only for entries added to first parent.
1564 1564 for f in copies:
1565 1565 if f not in pctx and copies[f] in pctx:
1566 1566 self.dirstate.copy(copies[f], f)
1567 1567 if p2 == nullid:
1568 1568 for f, s in sorted(self.dirstate.copies().items()):
1569 1569 if f not in pctx and s not in pctx:
1570 1570 self.dirstate.copy(None, f)
1571 1571
1572 1572 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1573 1573 """changeid can be a changeset revision, node, or tag.
1574 1574 fileid can be a file revision or node."""
1575 1575 return context.filectx(self, path, changeid, fileid,
1576 1576 changectx=changectx)
1577 1577
1578 1578 def getcwd(self):
1579 1579 return self.dirstate.getcwd()
1580 1580
1581 1581 def pathto(self, f, cwd=None):
1582 1582 return self.dirstate.pathto(f, cwd)
1583 1583
1584 1584 def _loadfilter(self, filter):
1585 1585 if filter not in self._filterpats:
1586 1586 l = []
1587 1587 for pat, cmd in self.ui.configitems(filter):
1588 1588 if cmd == '!':
1589 1589 continue
1590 1590 mf = matchmod.match(self.root, '', [pat])
1591 1591 fn = None
1592 1592 params = cmd
1593 1593 for name, filterfn in self._datafilters.iteritems():
1594 1594 if cmd.startswith(name):
1595 1595 fn = filterfn
1596 1596 params = cmd[len(name):].lstrip()
1597 1597 break
1598 1598 if not fn:
1599 1599 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1600 1600 # Wrap old filters not supporting keyword arguments
1601 1601 if not pycompat.getargspec(fn)[2]:
1602 1602 oldfn = fn
1603 1603 fn = lambda s, c, **kwargs: oldfn(s, c)
1604 1604 l.append((mf, fn, params))
1605 1605 self._filterpats[filter] = l
1606 1606 return self._filterpats[filter]
1607 1607
1608 1608 def _filter(self, filterpats, filename, data):
1609 1609 for mf, fn, cmd in filterpats:
1610 1610 if mf(filename):
1611 1611 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1612 1612 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1613 1613 break
1614 1614
1615 1615 return data
1616 1616
1617 1617 @unfilteredpropertycache
1618 1618 def _encodefilterpats(self):
1619 1619 return self._loadfilter('encode')
1620 1620
1621 1621 @unfilteredpropertycache
1622 1622 def _decodefilterpats(self):
1623 1623 return self._loadfilter('decode')
1624 1624
1625 1625 def adddatafilter(self, name, filter):
1626 1626 self._datafilters[name] = filter
1627 1627
1628 1628 def wread(self, filename):
1629 1629 if self.wvfs.islink(filename):
1630 1630 data = self.wvfs.readlink(filename)
1631 1631 else:
1632 1632 data = self.wvfs.read(filename)
1633 1633 return self._filter(self._encodefilterpats, filename, data)
1634 1634
1635 1635 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1636 1636 """write ``data`` into ``filename`` in the working directory
1637 1637
1638 1638 This returns length of written (maybe decoded) data.
1639 1639 """
1640 1640 data = self._filter(self._decodefilterpats, filename, data)
1641 1641 if 'l' in flags:
1642 1642 self.wvfs.symlink(data, filename)
1643 1643 else:
1644 1644 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1645 1645 **kwargs)
1646 1646 if 'x' in flags:
1647 1647 self.wvfs.setflags(filename, False, True)
1648 1648 else:
1649 1649 self.wvfs.setflags(filename, False, False)
1650 1650 return len(data)
1651 1651
1652 1652 def wwritedata(self, filename, data):
1653 1653 return self._filter(self._decodefilterpats, filename, data)
1654 1654
1655 1655 def currenttransaction(self):
1656 1656 """return the current transaction or None if non exists"""
1657 1657 if self._transref:
1658 1658 tr = self._transref()
1659 1659 else:
1660 1660 tr = None
1661 1661
1662 1662 if tr and tr.running():
1663 1663 return tr
1664 1664 return None
1665 1665
1666 1666 def transaction(self, desc, report=None):
1667 1667 if (self.ui.configbool('devel', 'all-warnings')
1668 1668 or self.ui.configbool('devel', 'check-locks')):
1669 1669 if self._currentlock(self._lockref) is None:
1670 1670 raise error.ProgrammingError('transaction requires locking')
1671 1671 tr = self.currenttransaction()
1672 1672 if tr is not None:
1673 1673 return tr.nest(name=desc)
1674 1674
1675 1675 # abort here if the journal already exists
1676 1676 if self.svfs.exists("journal"):
1677 1677 raise error.RepoError(
1678 1678 _("abandoned transaction found"),
1679 1679 hint=_("run 'hg recover' to clean up transaction"))
1680 1680
1681 1681 idbase = "%.40f#%f" % (random.random(), time.time())
1682 1682 ha = hex(hashlib.sha1(idbase).digest())
1683 1683 txnid = 'TXN:' + ha
1684 1684 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1685 1685
1686 1686 self._writejournal(desc)
1687 1687 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1688 1688 if report:
1689 1689 rp = report
1690 1690 else:
1691 1691 rp = self.ui.warn
1692 1692 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1693 1693 # we must avoid cyclic reference between repo and transaction.
1694 1694 reporef = weakref.ref(self)
1695 1695 # Code to track tag movement
1696 1696 #
1697 1697 # Since tags are all handled as file content, it is actually quite hard
1698 1698 # to track these movement from a code perspective. So we fallback to a
1699 1699 # tracking at the repository level. One could envision to track changes
1700 1700 # to the '.hgtags' file through changegroup apply but that fails to
1701 1701 # cope with case where transaction expose new heads without changegroup
1702 1702 # being involved (eg: phase movement).
1703 1703 #
1704 1704 # For now, We gate the feature behind a flag since this likely comes
1705 1705 # with performance impacts. The current code run more often than needed
1706 1706 # and do not use caches as much as it could. The current focus is on
1707 1707 # the behavior of the feature so we disable it by default. The flag
1708 1708 # will be removed when we are happy with the performance impact.
1709 1709 #
1710 1710 # Once this feature is no longer experimental move the following
1711 1711 # documentation to the appropriate help section:
1712 1712 #
1713 1713 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1714 1714 # tags (new or changed or deleted tags). In addition the details of
1715 1715 # these changes are made available in a file at:
1716 1716 # ``REPOROOT/.hg/changes/tags.changes``.
1717 1717 # Make sure you check for HG_TAG_MOVED before reading that file as it
1718 1718 # might exist from a previous transaction even if no tag were touched
1719 1719 # in this one. Changes are recorded in a line base format::
1720 1720 #
1721 1721 # <action> <hex-node> <tag-name>\n
1722 1722 #
1723 1723 # Actions are defined as follow:
1724 1724 # "-R": tag is removed,
1725 1725 # "+A": tag is added,
1726 1726 # "-M": tag is moved (old value),
1727 1727 # "+M": tag is moved (new value),
1728 1728 tracktags = lambda x: None
1729 1729 # experimental config: experimental.hook-track-tags
1730 1730 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1731 1731 if desc != 'strip' and shouldtracktags:
1732 1732 oldheads = self.changelog.headrevs()
1733 1733 def tracktags(tr2):
1734 1734 repo = reporef()
1735 1735 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1736 1736 newheads = repo.changelog.headrevs()
1737 1737 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1738 1738 # notes: we compare lists here.
1739 1739 # As we do it only once buiding set would not be cheaper
1740 1740 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1741 1741 if changes:
1742 1742 tr2.hookargs['tag_moved'] = '1'
1743 1743 with repo.vfs('changes/tags.changes', 'w',
1744 1744 atomictemp=True) as changesfile:
1745 1745 # note: we do not register the file to the transaction
1746 1746 # because we needs it to still exist on the transaction
1747 1747 # is close (for txnclose hooks)
1748 1748 tagsmod.writediff(changesfile, changes)
1749 1749 def validate(tr2):
1750 1750 """will run pre-closing hooks"""
1751 1751 # XXX the transaction API is a bit lacking here so we take a hacky
1752 1752 # path for now
1753 1753 #
1754 1754 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1755 1755 # dict is copied before these run. In addition we needs the data
1756 1756 # available to in memory hooks too.
1757 1757 #
1758 1758 # Moreover, we also need to make sure this runs before txnclose
1759 1759 # hooks and there is no "pending" mechanism that would execute
1760 1760 # logic only if hooks are about to run.
1761 1761 #
1762 1762 # Fixing this limitation of the transaction is also needed to track
1763 1763 # other families of changes (bookmarks, phases, obsolescence).
1764 1764 #
1765 1765 # This will have to be fixed before we remove the experimental
1766 1766 # gating.
1767 1767 tracktags(tr2)
1768 1768 repo = reporef()
1769 1769 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1770 1770 scmutil.enforcesinglehead(repo, tr2, desc)
1771 1771 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1772 1772 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1773 1773 args = tr.hookargs.copy()
1774 1774 args.update(bookmarks.preparehookargs(name, old, new))
1775 1775 repo.hook('pretxnclose-bookmark', throw=True,
1776 1776 txnname=desc,
1777 1777 **pycompat.strkwargs(args))
1778 1778 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1779 1779 cl = repo.unfiltered().changelog
1780 1780 for rev, (old, new) in tr.changes['phases'].items():
1781 1781 args = tr.hookargs.copy()
1782 1782 node = hex(cl.node(rev))
1783 1783 args.update(phases.preparehookargs(node, old, new))
1784 1784 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1785 1785 **pycompat.strkwargs(args))
1786 1786
1787 1787 repo.hook('pretxnclose', throw=True,
1788 1788 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1789 1789 def releasefn(tr, success):
1790 1790 repo = reporef()
1791 1791 if success:
1792 1792 # this should be explicitly invoked here, because
1793 1793 # in-memory changes aren't written out at closing
1794 1794 # transaction, if tr.addfilegenerator (via
1795 1795 # dirstate.write or so) isn't invoked while
1796 1796 # transaction running
1797 1797 repo.dirstate.write(None)
1798 1798 else:
1799 1799 # discard all changes (including ones already written
1800 1800 # out) in this transaction
1801 1801 narrowspec.restorebackup(self, 'journal.narrowspec')
1802 1802 repo.dirstate.restorebackup(None, 'journal.dirstate')
1803 1803
1804 1804 repo.invalidate(clearfilecache=True)
1805 1805
1806 1806 tr = transaction.transaction(rp, self.svfs, vfsmap,
1807 1807 "journal",
1808 1808 "undo",
1809 1809 aftertrans(renames),
1810 1810 self.store.createmode,
1811 1811 validator=validate,
1812 1812 releasefn=releasefn,
1813 1813 checkambigfiles=_cachedfiles,
1814 1814 name=desc)
1815 1815 tr.changes['origrepolen'] = len(self)
1816 1816 tr.changes['obsmarkers'] = set()
1817 1817 tr.changes['phases'] = {}
1818 1818 tr.changes['bookmarks'] = {}
1819 1819
1820 1820 tr.hookargs['txnid'] = txnid
1821 1821 # note: writing the fncache only during finalize mean that the file is
1822 1822 # outdated when running hooks. As fncache is used for streaming clone,
1823 1823 # this is not expected to break anything that happen during the hooks.
1824 1824 tr.addfinalize('flush-fncache', self.store.write)
1825 1825 def txnclosehook(tr2):
1826 1826 """To be run if transaction is successful, will schedule a hook run
1827 1827 """
1828 1828 # Don't reference tr2 in hook() so we don't hold a reference.
1829 1829 # This reduces memory consumption when there are multiple
1830 1830 # transactions per lock. This can likely go away if issue5045
1831 1831 # fixes the function accumulation.
1832 1832 hookargs = tr2.hookargs
1833 1833
1834 1834 def hookfunc():
1835 1835 repo = reporef()
1836 1836 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1837 1837 bmchanges = sorted(tr.changes['bookmarks'].items())
1838 1838 for name, (old, new) in bmchanges:
1839 1839 args = tr.hookargs.copy()
1840 1840 args.update(bookmarks.preparehookargs(name, old, new))
1841 1841 repo.hook('txnclose-bookmark', throw=False,
1842 1842 txnname=desc, **pycompat.strkwargs(args))
1843 1843
1844 1844 if hook.hashook(repo.ui, 'txnclose-phase'):
1845 1845 cl = repo.unfiltered().changelog
1846 1846 phasemv = sorted(tr.changes['phases'].items())
1847 1847 for rev, (old, new) in phasemv:
1848 1848 args = tr.hookargs.copy()
1849 1849 node = hex(cl.node(rev))
1850 1850 args.update(phases.preparehookargs(node, old, new))
1851 1851 repo.hook('txnclose-phase', throw=False, txnname=desc,
1852 1852 **pycompat.strkwargs(args))
1853 1853
1854 1854 repo.hook('txnclose', throw=False, txnname=desc,
1855 1855 **pycompat.strkwargs(hookargs))
1856 1856 reporef()._afterlock(hookfunc)
1857 1857 tr.addfinalize('txnclose-hook', txnclosehook)
1858 1858 # Include a leading "-" to make it happen before the transaction summary
1859 1859 # reports registered via scmutil.registersummarycallback() whose names
1860 1860 # are 00-txnreport etc. That way, the caches will be warm when the
1861 1861 # callbacks run.
1862 1862 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1863 1863 def txnaborthook(tr2):
1864 1864 """To be run if transaction is aborted
1865 1865 """
1866 1866 reporef().hook('txnabort', throw=False, txnname=desc,
1867 1867 **pycompat.strkwargs(tr2.hookargs))
1868 1868 tr.addabort('txnabort-hook', txnaborthook)
1869 1869 # avoid eager cache invalidation. in-memory data should be identical
1870 1870 # to stored data if transaction has no error.
1871 1871 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1872 1872 self._transref = weakref.ref(tr)
1873 1873 scmutil.registersummarycallback(self, tr, desc)
1874 1874 return tr
1875 1875
1876 1876 def _journalfiles(self):
1877 1877 return ((self.svfs, 'journal'),
1878 1878 (self.vfs, 'journal.dirstate'),
1879 1879 (self.vfs, 'journal.branch'),
1880 1880 (self.vfs, 'journal.desc'),
1881 1881 (self.vfs, 'journal.bookmarks'),
1882 1882 (self.svfs, 'journal.phaseroots'))
1883 1883
1884 1884 def undofiles(self):
1885 1885 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1886 1886
1887 1887 @unfilteredmethod
1888 1888 def _writejournal(self, desc):
1889 1889 self.dirstate.savebackup(None, 'journal.dirstate')
1890 1890 narrowspec.savebackup(self, 'journal.narrowspec')
1891 1891 self.vfs.write("journal.branch",
1892 1892 encoding.fromlocal(self.dirstate.branch()))
1893 1893 self.vfs.write("journal.desc",
1894 1894 "%d\n%s\n" % (len(self), desc))
1895 1895 self.vfs.write("journal.bookmarks",
1896 1896 self.vfs.tryread("bookmarks"))
1897 1897 self.svfs.write("journal.phaseroots",
1898 1898 self.svfs.tryread("phaseroots"))
1899 1899
1900 1900 def recover(self):
1901 1901 with self.lock():
1902 1902 if self.svfs.exists("journal"):
1903 1903 self.ui.status(_("rolling back interrupted transaction\n"))
1904 1904 vfsmap = {'': self.svfs,
1905 1905 'plain': self.vfs,}
1906 1906 transaction.rollback(self.svfs, vfsmap, "journal",
1907 1907 self.ui.warn,
1908 1908 checkambigfiles=_cachedfiles)
1909 1909 self.invalidate()
1910 1910 return True
1911 1911 else:
1912 1912 self.ui.warn(_("no interrupted transaction available\n"))
1913 1913 return False
1914 1914
1915 1915 def rollback(self, dryrun=False, force=False):
1916 1916 wlock = lock = dsguard = None
1917 1917 try:
1918 1918 wlock = self.wlock()
1919 1919 lock = self.lock()
1920 1920 if self.svfs.exists("undo"):
1921 1921 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1922 1922
1923 1923 return self._rollback(dryrun, force, dsguard)
1924 1924 else:
1925 1925 self.ui.warn(_("no rollback information available\n"))
1926 1926 return 1
1927 1927 finally:
1928 1928 release(dsguard, lock, wlock)
1929 1929
1930 1930 @unfilteredmethod # Until we get smarter cache management
1931 1931 def _rollback(self, dryrun, force, dsguard):
1932 1932 ui = self.ui
1933 1933 try:
1934 1934 args = self.vfs.read('undo.desc').splitlines()
1935 1935 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1936 1936 if len(args) >= 3:
1937 1937 detail = args[2]
1938 1938 oldtip = oldlen - 1
1939 1939
1940 1940 if detail and ui.verbose:
1941 1941 msg = (_('repository tip rolled back to revision %d'
1942 1942 ' (undo %s: %s)\n')
1943 1943 % (oldtip, desc, detail))
1944 1944 else:
1945 1945 msg = (_('repository tip rolled back to revision %d'
1946 1946 ' (undo %s)\n')
1947 1947 % (oldtip, desc))
1948 1948 except IOError:
1949 1949 msg = _('rolling back unknown transaction\n')
1950 1950 desc = None
1951 1951
1952 1952 if not force and self['.'] != self['tip'] and desc == 'commit':
1953 1953 raise error.Abort(
1954 1954 _('rollback of last commit while not checked out '
1955 1955 'may lose data'), hint=_('use -f to force'))
1956 1956
1957 1957 ui.status(msg)
1958 1958 if dryrun:
1959 1959 return 0
1960 1960
1961 1961 parents = self.dirstate.parents()
1962 1962 self.destroying()
1963 1963 vfsmap = {'plain': self.vfs, '': self.svfs}
1964 1964 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1965 1965 checkambigfiles=_cachedfiles)
1966 1966 if self.vfs.exists('undo.bookmarks'):
1967 1967 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1968 1968 if self.svfs.exists('undo.phaseroots'):
1969 1969 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1970 1970 self.invalidate()
1971 1971
1972 1972 parentgone = (parents[0] not in self.changelog.nodemap or
1973 1973 parents[1] not in self.changelog.nodemap)
1974 1974 if parentgone:
1975 1975 # prevent dirstateguard from overwriting already restored one
1976 1976 dsguard.close()
1977 1977
1978 1978 narrowspec.restorebackup(self, 'undo.narrowspec')
1979 1979 self.dirstate.restorebackup(None, 'undo.dirstate')
1980 1980 try:
1981 1981 branch = self.vfs.read('undo.branch')
1982 1982 self.dirstate.setbranch(encoding.tolocal(branch))
1983 1983 except IOError:
1984 1984 ui.warn(_('named branch could not be reset: '
1985 1985 'current branch is still \'%s\'\n')
1986 1986 % self.dirstate.branch())
1987 1987
1988 1988 parents = tuple([p.rev() for p in self[None].parents()])
1989 1989 if len(parents) > 1:
1990 1990 ui.status(_('working directory now based on '
1991 1991 'revisions %d and %d\n') % parents)
1992 1992 else:
1993 1993 ui.status(_('working directory now based on '
1994 1994 'revision %d\n') % parents)
1995 1995 mergemod.mergestate.clean(self, self['.'].node())
1996 1996
1997 1997 # TODO: if we know which new heads may result from this rollback, pass
1998 1998 # them to destroy(), which will prevent the branchhead cache from being
1999 1999 # invalidated.
2000 2000 self.destroyed()
2001 2001 return 0
2002 2002
2003 2003 def _buildcacheupdater(self, newtransaction):
2004 2004 """called during transaction to build the callback updating cache
2005 2005
2006 2006 Lives on the repository to help extension who might want to augment
2007 2007 this logic. For this purpose, the created transaction is passed to the
2008 2008 method.
2009 2009 """
2010 2010 # we must avoid cyclic reference between repo and transaction.
2011 2011 reporef = weakref.ref(self)
2012 2012 def updater(tr):
2013 2013 repo = reporef()
2014 2014 repo.updatecaches(tr)
2015 2015 return updater
2016 2016
2017 2017 @unfilteredmethod
2018 2018 def updatecaches(self, tr=None, full=False):
2019 2019 """warm appropriate caches
2020 2020
2021 2021 If this function is called after a transaction closed. The transaction
2022 2022 will be available in the 'tr' argument. This can be used to selectively
2023 2023 update caches relevant to the changes in that transaction.
2024 2024
2025 2025 If 'full' is set, make sure all caches the function knows about have
2026 2026 up-to-date data. Even the ones usually loaded more lazily.
2027 2027 """
2028 2028 if tr is not None and tr.hookargs.get('source') == 'strip':
2029 2029 # During strip, many caches are invalid but
2030 2030 # later call to `destroyed` will refresh them.
2031 2031 return
2032 2032
2033 2033 if tr is None or tr.changes['origrepolen'] < len(self):
2034 2034 # updating the unfiltered branchmap should refresh all the others,
2035 2035 self.ui.debug('updating the branch cache\n')
2036 2036 branchmap.updatecache(self.filtered('served'))
2037 2037
2038 2038 if full:
2039 2039 rbc = self.revbranchcache()
2040 2040 for r in self.changelog:
2041 2041 rbc.branchinfo(r)
2042 2042 rbc.write()
2043 2043
2044 2044 # ensure the working copy parents are in the manifestfulltextcache
2045 2045 for ctx in self['.'].parents():
2046 2046 ctx.manifest() # accessing the manifest is enough
2047 2047
2048 2048 def invalidatecaches(self):
2049 2049
2050 if '_tagscache' in vars(self):
2050 if r'_tagscache' in vars(self):
2051 2051 # can't use delattr on proxy
2052 del self.__dict__['_tagscache']
2052 del self.__dict__[r'_tagscache']
2053 2053
2054 2054 self.unfiltered()._branchcaches.clear()
2055 2055 self.invalidatevolatilesets()
2056 2056 self._sparsesignaturecache.clear()
2057 2057
2058 2058 def invalidatevolatilesets(self):
2059 2059 self.filteredrevcache.clear()
2060 2060 obsolete.clearobscaches(self)
2061 2061
2062 2062 def invalidatedirstate(self):
2063 2063 '''Invalidates the dirstate, causing the next call to dirstate
2064 2064 to check if it was modified since the last time it was read,
2065 2065 rereading it if it has.
2066 2066
2067 2067 This is different to dirstate.invalidate() that it doesn't always
2068 2068 rereads the dirstate. Use dirstate.invalidate() if you want to
2069 2069 explicitly read the dirstate again (i.e. restoring it to a previous
2070 2070 known good state).'''
2071 2071 if hasunfilteredcache(self, r'dirstate'):
2072 2072 for k in self.dirstate._filecache:
2073 2073 try:
2074 2074 delattr(self.dirstate, k)
2075 2075 except AttributeError:
2076 2076 pass
2077 2077 delattr(self.unfiltered(), r'dirstate')
2078 2078
2079 2079 def invalidate(self, clearfilecache=False):
2080 2080 '''Invalidates both store and non-store parts other than dirstate
2081 2081
2082 2082 If a transaction is running, invalidation of store is omitted,
2083 2083 because discarding in-memory changes might cause inconsistency
2084 2084 (e.g. incomplete fncache causes unintentional failure, but
2085 2085 redundant one doesn't).
2086 2086 '''
2087 2087 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2088 2088 for k in list(self._filecache.keys()):
2089 2089 # dirstate is invalidated separately in invalidatedirstate()
2090 2090 if k == 'dirstate':
2091 2091 continue
2092 2092 if (k == 'changelog' and
2093 2093 self.currenttransaction() and
2094 2094 self.changelog._delayed):
2095 2095 # The changelog object may store unwritten revisions. We don't
2096 2096 # want to lose them.
2097 2097 # TODO: Solve the problem instead of working around it.
2098 2098 continue
2099 2099
2100 2100 if clearfilecache:
2101 2101 del self._filecache[k]
2102 2102 try:
2103 2103 delattr(unfiltered, k)
2104 2104 except AttributeError:
2105 2105 pass
2106 2106 self.invalidatecaches()
2107 2107 if not self.currenttransaction():
2108 2108 # TODO: Changing contents of store outside transaction
2109 2109 # causes inconsistency. We should make in-memory store
2110 2110 # changes detectable, and abort if changed.
2111 2111 self.store.invalidatecaches()
2112 2112
2113 2113 def invalidateall(self):
2114 2114 '''Fully invalidates both store and non-store parts, causing the
2115 2115 subsequent operation to reread any outside changes.'''
2116 2116 # extension should hook this to invalidate its caches
2117 2117 self.invalidate()
2118 2118 self.invalidatedirstate()
2119 2119
2120 2120 @unfilteredmethod
2121 2121 def _refreshfilecachestats(self, tr):
2122 2122 """Reload stats of cached files so that they are flagged as valid"""
2123 2123 for k, ce in self._filecache.items():
2124 2124 k = pycompat.sysstr(k)
2125 2125 if k == r'dirstate' or k not in self.__dict__:
2126 2126 continue
2127 2127 ce.refresh()
2128 2128
2129 2129 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2130 2130 inheritchecker=None, parentenvvar=None):
2131 2131 parentlock = None
2132 2132 # the contents of parentenvvar are used by the underlying lock to
2133 2133 # determine whether it can be inherited
2134 2134 if parentenvvar is not None:
2135 2135 parentlock = encoding.environ.get(parentenvvar)
2136 2136
2137 2137 timeout = 0
2138 2138 warntimeout = 0
2139 2139 if wait:
2140 2140 timeout = self.ui.configint("ui", "timeout")
2141 2141 warntimeout = self.ui.configint("ui", "timeout.warn")
2142 2142 # internal config: ui.signal-safe-lock
2143 2143 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2144 2144
2145 2145 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2146 2146 releasefn=releasefn,
2147 2147 acquirefn=acquirefn, desc=desc,
2148 2148 inheritchecker=inheritchecker,
2149 2149 parentlock=parentlock,
2150 2150 signalsafe=signalsafe)
2151 2151 return l
2152 2152
2153 2153 def _afterlock(self, callback):
2154 2154 """add a callback to be run when the repository is fully unlocked
2155 2155
2156 2156 The callback will be executed when the outermost lock is released
2157 2157 (with wlock being higher level than 'lock')."""
2158 2158 for ref in (self._wlockref, self._lockref):
2159 2159 l = ref and ref()
2160 2160 if l and l.held:
2161 2161 l.postrelease.append(callback)
2162 2162 break
2163 2163 else: # no lock have been found.
2164 2164 callback()
2165 2165
2166 2166 def lock(self, wait=True):
2167 2167 '''Lock the repository store (.hg/store) and return a weak reference
2168 2168 to the lock. Use this before modifying the store (e.g. committing or
2169 2169 stripping). If you are opening a transaction, get a lock as well.)
2170 2170
2171 2171 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2172 2172 'wlock' first to avoid a dead-lock hazard.'''
2173 2173 l = self._currentlock(self._lockref)
2174 2174 if l is not None:
2175 2175 l.lock()
2176 2176 return l
2177 2177
2178 2178 l = self._lock(self.svfs, "lock", wait, None,
2179 2179 self.invalidate, _('repository %s') % self.origroot)
2180 2180 self._lockref = weakref.ref(l)
2181 2181 return l
2182 2182
2183 2183 def _wlockchecktransaction(self):
2184 2184 if self.currenttransaction() is not None:
2185 2185 raise error.LockInheritanceContractViolation(
2186 2186 'wlock cannot be inherited in the middle of a transaction')
2187 2187
2188 2188 def wlock(self, wait=True):
2189 2189 '''Lock the non-store parts of the repository (everything under
2190 2190 .hg except .hg/store) and return a weak reference to the lock.
2191 2191
2192 2192 Use this before modifying files in .hg.
2193 2193
2194 2194 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2195 2195 'wlock' first to avoid a dead-lock hazard.'''
2196 2196 l = self._wlockref and self._wlockref()
2197 2197 if l is not None and l.held:
2198 2198 l.lock()
2199 2199 return l
2200 2200
2201 2201 # We do not need to check for non-waiting lock acquisition. Such
2202 2202 # acquisition would not cause dead-lock as they would just fail.
2203 2203 if wait and (self.ui.configbool('devel', 'all-warnings')
2204 2204 or self.ui.configbool('devel', 'check-locks')):
2205 2205 if self._currentlock(self._lockref) is not None:
2206 2206 self.ui.develwarn('"wlock" acquired after "lock"')
2207 2207
2208 2208 def unlock():
2209 2209 if self.dirstate.pendingparentchange():
2210 2210 self.dirstate.invalidate()
2211 2211 else:
2212 2212 self.dirstate.write(None)
2213 2213
2214 2214 self._filecache['dirstate'].refresh()
2215 2215
2216 2216 l = self._lock(self.vfs, "wlock", wait, unlock,
2217 2217 self.invalidatedirstate, _('working directory of %s') %
2218 2218 self.origroot,
2219 2219 inheritchecker=self._wlockchecktransaction,
2220 2220 parentenvvar='HG_WLOCK_LOCKER')
2221 2221 self._wlockref = weakref.ref(l)
2222 2222 return l
2223 2223
2224 2224 def _currentlock(self, lockref):
2225 2225 """Returns the lock if it's held, or None if it's not."""
2226 2226 if lockref is None:
2227 2227 return None
2228 2228 l = lockref()
2229 2229 if l is None or not l.held:
2230 2230 return None
2231 2231 return l
2232 2232
2233 2233 def currentwlock(self):
2234 2234 """Returns the wlock if it's held, or None if it's not."""
2235 2235 return self._currentlock(self._wlockref)
2236 2236
2237 2237 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2238 2238 """
2239 2239 commit an individual file as part of a larger transaction
2240 2240 """
2241 2241
2242 2242 fname = fctx.path()
2243 2243 fparent1 = manifest1.get(fname, nullid)
2244 2244 fparent2 = manifest2.get(fname, nullid)
2245 2245 if isinstance(fctx, context.filectx):
2246 2246 node = fctx.filenode()
2247 2247 if node in [fparent1, fparent2]:
2248 2248 self.ui.debug('reusing %s filelog entry\n' % fname)
2249 2249 if manifest1.flags(fname) != fctx.flags():
2250 2250 changelist.append(fname)
2251 2251 return node
2252 2252
2253 2253 flog = self.file(fname)
2254 2254 meta = {}
2255 2255 copy = fctx.renamed()
2256 2256 if copy and copy[0] != fname:
2257 2257 # Mark the new revision of this file as a copy of another
2258 2258 # file. This copy data will effectively act as a parent
2259 2259 # of this new revision. If this is a merge, the first
2260 2260 # parent will be the nullid (meaning "look up the copy data")
2261 2261 # and the second one will be the other parent. For example:
2262 2262 #
2263 2263 # 0 --- 1 --- 3 rev1 changes file foo
2264 2264 # \ / rev2 renames foo to bar and changes it
2265 2265 # \- 2 -/ rev3 should have bar with all changes and
2266 2266 # should record that bar descends from
2267 2267 # bar in rev2 and foo in rev1
2268 2268 #
2269 2269 # this allows this merge to succeed:
2270 2270 #
2271 2271 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2272 2272 # \ / merging rev3 and rev4 should use bar@rev2
2273 2273 # \- 2 --- 4 as the merge base
2274 2274 #
2275 2275
2276 2276 cfname = copy[0]
2277 2277 crev = manifest1.get(cfname)
2278 2278 newfparent = fparent2
2279 2279
2280 2280 if manifest2: # branch merge
2281 2281 if fparent2 == nullid or crev is None: # copied on remote side
2282 2282 if cfname in manifest2:
2283 2283 crev = manifest2[cfname]
2284 2284 newfparent = fparent1
2285 2285
2286 2286 # Here, we used to search backwards through history to try to find
2287 2287 # where the file copy came from if the source of a copy was not in
2288 2288 # the parent directory. However, this doesn't actually make sense to
2289 2289 # do (what does a copy from something not in your working copy even
2290 2290 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2291 2291 # the user that copy information was dropped, so if they didn't
2292 2292 # expect this outcome it can be fixed, but this is the correct
2293 2293 # behavior in this circumstance.
2294 2294
2295 2295 if crev:
2296 2296 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2297 2297 meta["copy"] = cfname
2298 2298 meta["copyrev"] = hex(crev)
2299 2299 fparent1, fparent2 = nullid, newfparent
2300 2300 else:
2301 2301 self.ui.warn(_("warning: can't find ancestor for '%s' "
2302 2302 "copied from '%s'!\n") % (fname, cfname))
2303 2303
2304 2304 elif fparent1 == nullid:
2305 2305 fparent1, fparent2 = fparent2, nullid
2306 2306 elif fparent2 != nullid:
2307 2307 # is one parent an ancestor of the other?
2308 2308 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2309 2309 if fparent1 in fparentancestors:
2310 2310 fparent1, fparent2 = fparent2, nullid
2311 2311 elif fparent2 in fparentancestors:
2312 2312 fparent2 = nullid
2313 2313
2314 2314 # is the file changed?
2315 2315 text = fctx.data()
2316 2316 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2317 2317 changelist.append(fname)
2318 2318 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2319 2319 # are just the flags changed during merge?
2320 2320 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2321 2321 changelist.append(fname)
2322 2322
2323 2323 return fparent1
2324 2324
2325 2325 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2326 2326 """check for commit arguments that aren't committable"""
2327 2327 if match.isexact() or match.prefix():
2328 2328 matched = set(status.modified + status.added + status.removed)
2329 2329
2330 2330 for f in match.files():
2331 2331 f = self.dirstate.normalize(f)
2332 2332 if f == '.' or f in matched or f in wctx.substate:
2333 2333 continue
2334 2334 if f in status.deleted:
2335 2335 fail(f, _('file not found!'))
2336 2336 if f in vdirs: # visited directory
2337 2337 d = f + '/'
2338 2338 for mf in matched:
2339 2339 if mf.startswith(d):
2340 2340 break
2341 2341 else:
2342 2342 fail(f, _("no match under directory!"))
2343 2343 elif f not in self.dirstate:
2344 2344 fail(f, _("file not tracked!"))
2345 2345
2346 2346 @unfilteredmethod
2347 2347 def commit(self, text="", user=None, date=None, match=None, force=False,
2348 2348 editor=False, extra=None):
2349 2349 """Add a new revision to current repository.
2350 2350
2351 2351 Revision information is gathered from the working directory,
2352 2352 match can be used to filter the committed files. If editor is
2353 2353 supplied, it is called to get a commit message.
2354 2354 """
2355 2355 if extra is None:
2356 2356 extra = {}
2357 2357
2358 2358 def fail(f, msg):
2359 2359 raise error.Abort('%s: %s' % (f, msg))
2360 2360
2361 2361 if not match:
2362 2362 match = matchmod.always(self.root, '')
2363 2363
2364 2364 if not force:
2365 2365 vdirs = []
2366 2366 match.explicitdir = vdirs.append
2367 2367 match.bad = fail
2368 2368
2369 2369 wlock = lock = tr = None
2370 2370 try:
2371 2371 wlock = self.wlock()
2372 2372 lock = self.lock() # for recent changelog (see issue4368)
2373 2373
2374 2374 wctx = self[None]
2375 2375 merge = len(wctx.parents()) > 1
2376 2376
2377 2377 if not force and merge and not match.always():
2378 2378 raise error.Abort(_('cannot partially commit a merge '
2379 2379 '(do not specify files or patterns)'))
2380 2380
2381 2381 status = self.status(match=match, clean=force)
2382 2382 if force:
2383 2383 status.modified.extend(status.clean) # mq may commit clean files
2384 2384
2385 2385 # check subrepos
2386 2386 subs, commitsubs, newstate = subrepoutil.precommit(
2387 2387 self.ui, wctx, status, match, force=force)
2388 2388
2389 2389 # make sure all explicit patterns are matched
2390 2390 if not force:
2391 2391 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2392 2392
2393 2393 cctx = context.workingcommitctx(self, status,
2394 2394 text, user, date, extra)
2395 2395
2396 2396 # internal config: ui.allowemptycommit
2397 2397 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2398 2398 or extra.get('close') or merge or cctx.files()
2399 2399 or self.ui.configbool('ui', 'allowemptycommit'))
2400 2400 if not allowemptycommit:
2401 2401 return None
2402 2402
2403 2403 if merge and cctx.deleted():
2404 2404 raise error.Abort(_("cannot commit merge with missing files"))
2405 2405
2406 2406 ms = mergemod.mergestate.read(self)
2407 2407 mergeutil.checkunresolved(ms)
2408 2408
2409 2409 if editor:
2410 2410 cctx._text = editor(self, cctx, subs)
2411 2411 edited = (text != cctx._text)
2412 2412
2413 2413 # Save commit message in case this transaction gets rolled back
2414 2414 # (e.g. by a pretxncommit hook). Leave the content alone on
2415 2415 # the assumption that the user will use the same editor again.
2416 2416 msgfn = self.savecommitmessage(cctx._text)
2417 2417
2418 2418 # commit subs and write new state
2419 2419 if subs:
2420 2420 for s in sorted(commitsubs):
2421 2421 sub = wctx.sub(s)
2422 2422 self.ui.status(_('committing subrepository %s\n') %
2423 2423 subrepoutil.subrelpath(sub))
2424 2424 sr = sub.commit(cctx._text, user, date)
2425 2425 newstate[s] = (newstate[s][0], sr)
2426 2426 subrepoutil.writestate(self, newstate)
2427 2427
2428 2428 p1, p2 = self.dirstate.parents()
2429 2429 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2430 2430 try:
2431 2431 self.hook("precommit", throw=True, parent1=hookp1,
2432 2432 parent2=hookp2)
2433 2433 tr = self.transaction('commit')
2434 2434 ret = self.commitctx(cctx, True)
2435 2435 except: # re-raises
2436 2436 if edited:
2437 2437 self.ui.write(
2438 2438 _('note: commit message saved in %s\n') % msgfn)
2439 2439 raise
2440 2440 # update bookmarks, dirstate and mergestate
2441 2441 bookmarks.update(self, [p1, p2], ret)
2442 2442 cctx.markcommitted(ret)
2443 2443 ms.reset()
2444 2444 tr.close()
2445 2445
2446 2446 finally:
2447 2447 lockmod.release(tr, lock, wlock)
2448 2448
2449 2449 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2450 2450 # hack for command that use a temporary commit (eg: histedit)
2451 2451 # temporary commit got stripped before hook release
2452 2452 if self.changelog.hasnode(ret):
2453 2453 self.hook("commit", node=node, parent1=parent1,
2454 2454 parent2=parent2)
2455 2455 self._afterlock(commithook)
2456 2456 return ret
2457 2457
2458 2458 @unfilteredmethod
2459 2459 def commitctx(self, ctx, error=False):
2460 2460 """Add a new revision to current repository.
2461 2461 Revision information is passed via the context argument.
2462 2462
2463 2463 ctx.files() should list all files involved in this commit, i.e.
2464 2464 modified/added/removed files. On merge, it may be wider than the
2465 2465 ctx.files() to be committed, since any file nodes derived directly
2466 2466 from p1 or p2 are excluded from the committed ctx.files().
2467 2467 """
2468 2468
2469 2469 tr = None
2470 2470 p1, p2 = ctx.p1(), ctx.p2()
2471 2471 user = ctx.user()
2472 2472
2473 2473 lock = self.lock()
2474 2474 try:
2475 2475 tr = self.transaction("commit")
2476 2476 trp = weakref.proxy(tr)
2477 2477
2478 2478 if ctx.manifestnode():
2479 2479 # reuse an existing manifest revision
2480 2480 self.ui.debug('reusing known manifest\n')
2481 2481 mn = ctx.manifestnode()
2482 2482 files = ctx.files()
2483 2483 elif ctx.files():
2484 2484 m1ctx = p1.manifestctx()
2485 2485 m2ctx = p2.manifestctx()
2486 2486 mctx = m1ctx.copy()
2487 2487
2488 2488 m = mctx.read()
2489 2489 m1 = m1ctx.read()
2490 2490 m2 = m2ctx.read()
2491 2491
2492 2492 # check in files
2493 2493 added = []
2494 2494 changed = []
2495 2495 removed = list(ctx.removed())
2496 2496 linkrev = len(self)
2497 2497 self.ui.note(_("committing files:\n"))
2498 2498 for f in sorted(ctx.modified() + ctx.added()):
2499 2499 self.ui.note(f + "\n")
2500 2500 try:
2501 2501 fctx = ctx[f]
2502 2502 if fctx is None:
2503 2503 removed.append(f)
2504 2504 else:
2505 2505 added.append(f)
2506 2506 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2507 2507 trp, changed)
2508 2508 m.setflag(f, fctx.flags())
2509 2509 except OSError as inst:
2510 2510 self.ui.warn(_("trouble committing %s!\n") % f)
2511 2511 raise
2512 2512 except IOError as inst:
2513 2513 errcode = getattr(inst, 'errno', errno.ENOENT)
2514 2514 if error or errcode and errcode != errno.ENOENT:
2515 2515 self.ui.warn(_("trouble committing %s!\n") % f)
2516 2516 raise
2517 2517
2518 2518 # update manifest
2519 2519 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2520 2520 drop = [f for f in removed if f in m]
2521 2521 for f in drop:
2522 2522 del m[f]
2523 2523 files = changed + removed
2524 2524 md = None
2525 2525 if not files:
2526 2526 # if no "files" actually changed in terms of the changelog,
2527 2527 # try hard to detect unmodified manifest entry so that the
2528 2528 # exact same commit can be reproduced later on convert.
2529 2529 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2530 2530 if not files and md:
2531 2531 self.ui.debug('not reusing manifest (no file change in '
2532 2532 'changelog, but manifest differs)\n')
2533 2533 if files or md:
2534 2534 self.ui.note(_("committing manifest\n"))
2535 2535 # we're using narrowmatch here since it's already applied at
2536 2536 # other stages (such as dirstate.walk), so we're already
2537 2537 # ignoring things outside of narrowspec in most cases. The
2538 2538 # one case where we might have files outside the narrowspec
2539 2539 # at this point is merges, and we already error out in the
2540 2540 # case where the merge has files outside of the narrowspec,
2541 2541 # so this is safe.
2542 2542 mn = mctx.write(trp, linkrev,
2543 2543 p1.manifestnode(), p2.manifestnode(),
2544 2544 added, drop, match=self.narrowmatch())
2545 2545 else:
2546 2546 self.ui.debug('reusing manifest form p1 (listed files '
2547 2547 'actually unchanged)\n')
2548 2548 mn = p1.manifestnode()
2549 2549 else:
2550 2550 self.ui.debug('reusing manifest from p1 (no file change)\n')
2551 2551 mn = p1.manifestnode()
2552 2552 files = []
2553 2553
2554 2554 # update changelog
2555 2555 self.ui.note(_("committing changelog\n"))
2556 2556 self.changelog.delayupdate(tr)
2557 2557 n = self.changelog.add(mn, files, ctx.description(),
2558 2558 trp, p1.node(), p2.node(),
2559 2559 user, ctx.date(), ctx.extra().copy())
2560 2560 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2561 2561 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2562 2562 parent2=xp2)
2563 2563 # set the new commit is proper phase
2564 2564 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2565 2565 if targetphase:
2566 2566 # retract boundary do not alter parent changeset.
2567 2567 # if a parent have higher the resulting phase will
2568 2568 # be compliant anyway
2569 2569 #
2570 2570 # if minimal phase was 0 we don't need to retract anything
2571 2571 phases.registernew(self, tr, targetphase, [n])
2572 2572 tr.close()
2573 2573 return n
2574 2574 finally:
2575 2575 if tr:
2576 2576 tr.release()
2577 2577 lock.release()
2578 2578
2579 2579 @unfilteredmethod
2580 2580 def destroying(self):
2581 2581 '''Inform the repository that nodes are about to be destroyed.
2582 2582 Intended for use by strip and rollback, so there's a common
2583 2583 place for anything that has to be done before destroying history.
2584 2584
2585 2585 This is mostly useful for saving state that is in memory and waiting
2586 2586 to be flushed when the current lock is released. Because a call to
2587 2587 destroyed is imminent, the repo will be invalidated causing those
2588 2588 changes to stay in memory (waiting for the next unlock), or vanish
2589 2589 completely.
2590 2590 '''
2591 2591 # When using the same lock to commit and strip, the phasecache is left
2592 2592 # dirty after committing. Then when we strip, the repo is invalidated,
2593 2593 # causing those changes to disappear.
2594 2594 if '_phasecache' in vars(self):
2595 2595 self._phasecache.write()
2596 2596
2597 2597 @unfilteredmethod
2598 2598 def destroyed(self):
2599 2599 '''Inform the repository that nodes have been destroyed.
2600 2600 Intended for use by strip and rollback, so there's a common
2601 2601 place for anything that has to be done after destroying history.
2602 2602 '''
2603 2603 # When one tries to:
2604 2604 # 1) destroy nodes thus calling this method (e.g. strip)
2605 2605 # 2) use phasecache somewhere (e.g. commit)
2606 2606 #
2607 2607 # then 2) will fail because the phasecache contains nodes that were
2608 2608 # removed. We can either remove phasecache from the filecache,
2609 2609 # causing it to reload next time it is accessed, or simply filter
2610 2610 # the removed nodes now and write the updated cache.
2611 2611 self._phasecache.filterunknown(self)
2612 2612 self._phasecache.write()
2613 2613
2614 2614 # refresh all repository caches
2615 2615 self.updatecaches()
2616 2616
2617 2617 # Ensure the persistent tag cache is updated. Doing it now
2618 2618 # means that the tag cache only has to worry about destroyed
2619 2619 # heads immediately after a strip/rollback. That in turn
2620 2620 # guarantees that "cachetip == currenttip" (comparing both rev
2621 2621 # and node) always means no nodes have been added or destroyed.
2622 2622
2623 2623 # XXX this is suboptimal when qrefresh'ing: we strip the current
2624 2624 # head, refresh the tag cache, then immediately add a new head.
2625 2625 # But I think doing it this way is necessary for the "instant
2626 2626 # tag cache retrieval" case to work.
2627 2627 self.invalidate()
2628 2628
2629 2629 def status(self, node1='.', node2=None, match=None,
2630 2630 ignored=False, clean=False, unknown=False,
2631 2631 listsubrepos=False):
2632 2632 '''a convenience method that calls node1.status(node2)'''
2633 2633 return self[node1].status(node2, match, ignored, clean, unknown,
2634 2634 listsubrepos)
2635 2635
2636 2636 def addpostdsstatus(self, ps):
2637 2637 """Add a callback to run within the wlock, at the point at which status
2638 2638 fixups happen.
2639 2639
2640 2640 On status completion, callback(wctx, status) will be called with the
2641 2641 wlock held, unless the dirstate has changed from underneath or the wlock
2642 2642 couldn't be grabbed.
2643 2643
2644 2644 Callbacks should not capture and use a cached copy of the dirstate --
2645 2645 it might change in the meanwhile. Instead, they should access the
2646 2646 dirstate via wctx.repo().dirstate.
2647 2647
2648 2648 This list is emptied out after each status run -- extensions should
2649 2649 make sure it adds to this list each time dirstate.status is called.
2650 2650 Extensions should also make sure they don't call this for statuses
2651 2651 that don't involve the dirstate.
2652 2652 """
2653 2653
2654 2654 # The list is located here for uniqueness reasons -- it is actually
2655 2655 # managed by the workingctx, but that isn't unique per-repo.
2656 2656 self._postdsstatus.append(ps)
2657 2657
2658 2658 def postdsstatus(self):
2659 2659 """Used by workingctx to get the list of post-dirstate-status hooks."""
2660 2660 return self._postdsstatus
2661 2661
2662 2662 def clearpostdsstatus(self):
2663 2663 """Used by workingctx to clear post-dirstate-status hooks."""
2664 2664 del self._postdsstatus[:]
2665 2665
2666 2666 def heads(self, start=None):
2667 2667 if start is None:
2668 2668 cl = self.changelog
2669 2669 headrevs = reversed(cl.headrevs())
2670 2670 return [cl.node(rev) for rev in headrevs]
2671 2671
2672 2672 heads = self.changelog.heads(start)
2673 2673 # sort the output in rev descending order
2674 2674 return sorted(heads, key=self.changelog.rev, reverse=True)
2675 2675
2676 2676 def branchheads(self, branch=None, start=None, closed=False):
2677 2677 '''return a (possibly filtered) list of heads for the given branch
2678 2678
2679 2679 Heads are returned in topological order, from newest to oldest.
2680 2680 If branch is None, use the dirstate branch.
2681 2681 If start is not None, return only heads reachable from start.
2682 2682 If closed is True, return heads that are marked as closed as well.
2683 2683 '''
2684 2684 if branch is None:
2685 2685 branch = self[None].branch()
2686 2686 branches = self.branchmap()
2687 2687 if branch not in branches:
2688 2688 return []
2689 2689 # the cache returns heads ordered lowest to highest
2690 2690 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2691 2691 if start is not None:
2692 2692 # filter out the heads that cannot be reached from startrev
2693 2693 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2694 2694 bheads = [h for h in bheads if h in fbheads]
2695 2695 return bheads
2696 2696
2697 2697 def branches(self, nodes):
2698 2698 if not nodes:
2699 2699 nodes = [self.changelog.tip()]
2700 2700 b = []
2701 2701 for n in nodes:
2702 2702 t = n
2703 2703 while True:
2704 2704 p = self.changelog.parents(n)
2705 2705 if p[1] != nullid or p[0] == nullid:
2706 2706 b.append((t, n, p[0], p[1]))
2707 2707 break
2708 2708 n = p[0]
2709 2709 return b
2710 2710
2711 2711 def between(self, pairs):
2712 2712 r = []
2713 2713
2714 2714 for top, bottom in pairs:
2715 2715 n, l, i = top, [], 0
2716 2716 f = 1
2717 2717
2718 2718 while n != bottom and n != nullid:
2719 2719 p = self.changelog.parents(n)[0]
2720 2720 if i == f:
2721 2721 l.append(n)
2722 2722 f = f * 2
2723 2723 n = p
2724 2724 i += 1
2725 2725
2726 2726 r.append(l)
2727 2727
2728 2728 return r
2729 2729
2730 2730 def checkpush(self, pushop):
2731 2731 """Extensions can override this function if additional checks have
2732 2732 to be performed before pushing, or call it if they override push
2733 2733 command.
2734 2734 """
2735 2735
2736 2736 @unfilteredpropertycache
2737 2737 def prepushoutgoinghooks(self):
2738 2738 """Return util.hooks consists of a pushop with repo, remote, outgoing
2739 2739 methods, which are called before pushing changesets.
2740 2740 """
2741 2741 return util.hooks()
2742 2742
2743 2743 def pushkey(self, namespace, key, old, new):
2744 2744 try:
2745 2745 tr = self.currenttransaction()
2746 2746 hookargs = {}
2747 2747 if tr is not None:
2748 2748 hookargs.update(tr.hookargs)
2749 2749 hookargs = pycompat.strkwargs(hookargs)
2750 2750 hookargs[r'namespace'] = namespace
2751 2751 hookargs[r'key'] = key
2752 2752 hookargs[r'old'] = old
2753 2753 hookargs[r'new'] = new
2754 2754 self.hook('prepushkey', throw=True, **hookargs)
2755 2755 except error.HookAbort as exc:
2756 2756 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2757 2757 if exc.hint:
2758 2758 self.ui.write_err(_("(%s)\n") % exc.hint)
2759 2759 return False
2760 2760 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2761 2761 ret = pushkey.push(self, namespace, key, old, new)
2762 2762 def runhook():
2763 2763 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2764 2764 ret=ret)
2765 2765 self._afterlock(runhook)
2766 2766 return ret
2767 2767
2768 2768 def listkeys(self, namespace):
2769 2769 self.hook('prelistkeys', throw=True, namespace=namespace)
2770 2770 self.ui.debug('listing keys for "%s"\n' % namespace)
2771 2771 values = pushkey.list(self, namespace)
2772 2772 self.hook('listkeys', namespace=namespace, values=values)
2773 2773 return values
2774 2774
2775 2775 def debugwireargs(self, one, two, three=None, four=None, five=None):
2776 2776 '''used to test argument passing over the wire'''
2777 2777 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2778 2778 pycompat.bytestr(four),
2779 2779 pycompat.bytestr(five))
2780 2780
2781 2781 def savecommitmessage(self, text):
2782 2782 fp = self.vfs('last-message.txt', 'wb')
2783 2783 try:
2784 2784 fp.write(text)
2785 2785 finally:
2786 2786 fp.close()
2787 2787 return self.pathto(fp.name[len(self.root) + 1:])
2788 2788
2789 2789 # used to avoid circular references so destructors work
2790 2790 def aftertrans(files):
2791 2791 renamefiles = [tuple(t) for t in files]
2792 2792 def a():
2793 2793 for vfs, src, dest in renamefiles:
2794 2794 # if src and dest refer to a same file, vfs.rename is a no-op,
2795 2795 # leaving both src and dest on disk. delete dest to make sure
2796 2796 # the rename couldn't be such a no-op.
2797 2797 vfs.tryunlink(dest)
2798 2798 try:
2799 2799 vfs.rename(src, dest)
2800 2800 except OSError: # journal file does not yet exist
2801 2801 pass
2802 2802 return a
2803 2803
2804 2804 def undoname(fn):
2805 2805 base, name = os.path.split(fn)
2806 2806 assert name.startswith('journal')
2807 2807 return os.path.join(base, name.replace('journal', 'undo', 1))
2808 2808
2809 2809 def instance(ui, path, create, intents=None, createopts=None):
2810 2810 localpath = util.urllocalpath(path)
2811 2811 if create:
2812 2812 createrepository(ui, localpath, createopts=createopts)
2813 2813
2814 2814 return makelocalrepository(ui, localpath, intents=intents)
2815 2815
2816 2816 def islocal(path):
2817 2817 return True
2818 2818
2819 2819 def defaultcreateopts(ui, createopts=None):
2820 2820 """Populate the default creation options for a repository.
2821 2821
2822 2822 A dictionary of explicitly requested creation options can be passed
2823 2823 in. Missing keys will be populated.
2824 2824 """
2825 2825 createopts = dict(createopts or {})
2826 2826
2827 2827 if 'backend' not in createopts:
2828 2828 # experimental config: storage.new-repo-backend
2829 2829 createopts['backend'] = ui.config('storage', 'new-repo-backend')
2830 2830
2831 2831 return createopts
2832 2832
2833 2833 def newreporequirements(ui, createopts):
2834 2834 """Determine the set of requirements for a new local repository.
2835 2835
2836 2836 Extensions can wrap this function to specify custom requirements for
2837 2837 new repositories.
2838 2838 """
2839 2839 # If the repo is being created from a shared repository, we copy
2840 2840 # its requirements.
2841 2841 if 'sharedrepo' in createopts:
2842 2842 requirements = set(createopts['sharedrepo'].requirements)
2843 2843 if createopts.get('sharedrelative'):
2844 2844 requirements.add('relshared')
2845 2845 else:
2846 2846 requirements.add('shared')
2847 2847
2848 2848 return requirements
2849 2849
2850 2850 if 'backend' not in createopts:
2851 2851 raise error.ProgrammingError('backend key not present in createopts; '
2852 2852 'was defaultcreateopts() called?')
2853 2853
2854 2854 if createopts['backend'] != 'revlogv1':
2855 2855 raise error.Abort(_('unable to determine repository requirements for '
2856 2856 'storage backend: %s') % createopts['backend'])
2857 2857
2858 2858 requirements = {'revlogv1'}
2859 2859 if ui.configbool('format', 'usestore'):
2860 2860 requirements.add('store')
2861 2861 if ui.configbool('format', 'usefncache'):
2862 2862 requirements.add('fncache')
2863 2863 if ui.configbool('format', 'dotencode'):
2864 2864 requirements.add('dotencode')
2865 2865
2866 2866 compengine = ui.config('experimental', 'format.compression')
2867 2867 if compengine not in util.compengines:
2868 2868 raise error.Abort(_('compression engine %s defined by '
2869 2869 'experimental.format.compression not available') %
2870 2870 compengine,
2871 2871 hint=_('run "hg debuginstall" to list available '
2872 2872 'compression engines'))
2873 2873
2874 2874 # zlib is the historical default and doesn't need an explicit requirement.
2875 2875 if compengine != 'zlib':
2876 2876 requirements.add('exp-compression-%s' % compengine)
2877 2877
2878 2878 if scmutil.gdinitconfig(ui):
2879 2879 requirements.add('generaldelta')
2880 2880 if ui.configbool('experimental', 'treemanifest'):
2881 2881 requirements.add('treemanifest')
2882 2882 # experimental config: format.sparse-revlog
2883 2883 if ui.configbool('format', 'sparse-revlog'):
2884 2884 requirements.add(SPARSEREVLOG_REQUIREMENT)
2885 2885
2886 2886 revlogv2 = ui.config('experimental', 'revlogv2')
2887 2887 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2888 2888 requirements.remove('revlogv1')
2889 2889 # generaldelta is implied by revlogv2.
2890 2890 requirements.discard('generaldelta')
2891 2891 requirements.add(REVLOGV2_REQUIREMENT)
2892 2892 # experimental config: format.internal-phase
2893 2893 if ui.configbool('format', 'internal-phase'):
2894 2894 requirements.add('internal-phase')
2895 2895
2896 2896 if createopts.get('narrowfiles'):
2897 2897 requirements.add(repository.NARROW_REQUIREMENT)
2898 2898
2899 2899 if createopts.get('lfs'):
2900 2900 requirements.add('lfs')
2901 2901
2902 2902 return requirements
2903 2903
2904 2904 def filterknowncreateopts(ui, createopts):
2905 2905 """Filters a dict of repo creation options against options that are known.
2906 2906
2907 2907 Receives a dict of repo creation options and returns a dict of those
2908 2908 options that we don't know how to handle.
2909 2909
2910 2910 This function is called as part of repository creation. If the
2911 2911 returned dict contains any items, repository creation will not
2912 2912 be allowed, as it means there was a request to create a repository
2913 2913 with options not recognized by loaded code.
2914 2914
2915 2915 Extensions can wrap this function to filter out creation options
2916 2916 they know how to handle.
2917 2917 """
2918 2918 known = {
2919 2919 'backend',
2920 2920 'lfs',
2921 2921 'narrowfiles',
2922 2922 'sharedrepo',
2923 2923 'sharedrelative',
2924 2924 'shareditems',
2925 2925 }
2926 2926
2927 2927 return {k: v for k, v in createopts.items() if k not in known}
2928 2928
2929 2929 def createrepository(ui, path, createopts=None):
2930 2930 """Create a new repository in a vfs.
2931 2931
2932 2932 ``path`` path to the new repo's working directory.
2933 2933 ``createopts`` options for the new repository.
2934 2934
2935 2935 The following keys for ``createopts`` are recognized:
2936 2936
2937 2937 backend
2938 2938 The storage backend to use.
2939 2939 lfs
2940 2940 Repository will be created with ``lfs`` requirement. The lfs extension
2941 2941 will automatically be loaded when the repository is accessed.
2942 2942 narrowfiles
2943 2943 Set up repository to support narrow file storage.
2944 2944 sharedrepo
2945 2945 Repository object from which storage should be shared.
2946 2946 sharedrelative
2947 2947 Boolean indicating if the path to the shared repo should be
2948 2948 stored as relative. By default, the pointer to the "parent" repo
2949 2949 is stored as an absolute path.
2950 2950 shareditems
2951 2951 Set of items to share to the new repository (in addition to storage).
2952 2952 """
2953 2953 createopts = defaultcreateopts(ui, createopts=createopts)
2954 2954
2955 2955 unknownopts = filterknowncreateopts(ui, createopts)
2956 2956
2957 2957 if not isinstance(unknownopts, dict):
2958 2958 raise error.ProgrammingError('filterknowncreateopts() did not return '
2959 2959 'a dict')
2960 2960
2961 2961 if unknownopts:
2962 2962 raise error.Abort(_('unable to create repository because of unknown '
2963 2963 'creation option: %s') %
2964 2964 ', '.join(sorted(unknownopts)),
2965 2965 hint=_('is a required extension not loaded?'))
2966 2966
2967 2967 requirements = newreporequirements(ui, createopts=createopts)
2968 2968
2969 2969 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2970 2970
2971 2971 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2972 2972 if hgvfs.exists():
2973 2973 raise error.RepoError(_('repository %s already exists') % path)
2974 2974
2975 2975 if 'sharedrepo' in createopts:
2976 2976 sharedpath = createopts['sharedrepo'].sharedpath
2977 2977
2978 2978 if createopts.get('sharedrelative'):
2979 2979 try:
2980 2980 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
2981 2981 except (IOError, ValueError) as e:
2982 2982 # ValueError is raised on Windows if the drive letters differ
2983 2983 # on each path.
2984 2984 raise error.Abort(_('cannot calculate relative path'),
2985 2985 hint=stringutil.forcebytestr(e))
2986 2986
2987 2987 if not wdirvfs.exists():
2988 2988 wdirvfs.makedirs()
2989 2989
2990 2990 hgvfs.makedir(notindexed=True)
2991 2991
2992 2992 if b'store' in requirements and 'sharedrepo' not in createopts:
2993 2993 hgvfs.mkdir(b'store')
2994 2994
2995 2995 # We create an invalid changelog outside the store so very old
2996 2996 # Mercurial versions (which didn't know about the requirements
2997 2997 # file) encounter an error on reading the changelog. This
2998 2998 # effectively locks out old clients and prevents them from
2999 2999 # mucking with a repo in an unknown format.
3000 3000 #
3001 3001 # The revlog header has version 2, which won't be recognized by
3002 3002 # such old clients.
3003 3003 hgvfs.append(b'00changelog.i',
3004 3004 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3005 3005 b'layout')
3006 3006
3007 3007 scmutil.writerequires(hgvfs, requirements)
3008 3008
3009 3009 # Write out file telling readers where to find the shared store.
3010 3010 if 'sharedrepo' in createopts:
3011 3011 hgvfs.write(b'sharedpath', sharedpath)
3012 3012
3013 3013 if createopts.get('shareditems'):
3014 3014 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3015 3015 hgvfs.write(b'shared', shared)
3016 3016
3017 3017 def poisonrepository(repo):
3018 3018 """Poison a repository instance so it can no longer be used."""
3019 3019 # Perform any cleanup on the instance.
3020 3020 repo.close()
3021 3021
3022 3022 # Our strategy is to replace the type of the object with one that
3023 3023 # has all attribute lookups result in error.
3024 3024 #
3025 3025 # But we have to allow the close() method because some constructors
3026 3026 # of repos call close() on repo references.
3027 3027 class poisonedrepository(object):
3028 3028 def __getattribute__(self, item):
3029 3029 if item == r'close':
3030 3030 return object.__getattribute__(self, item)
3031 3031
3032 3032 raise error.ProgrammingError('repo instances should not be used '
3033 3033 'after unshare')
3034 3034
3035 3035 def close(self):
3036 3036 pass
3037 3037
3038 3038 # We may have a repoview, which intercepts __setattr__. So be sure
3039 3039 # we operate at the lowest level possible.
3040 3040 object.__setattr__(repo, r'__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now