##// END OF EJS Templates
py3: invalidate repository cache with system-string keys...
Yuya Nishihara -
r40396:dee73a97 default
parent child Browse files
Show More
@@ -1,3699 +1,3699 b''
1 # mq.py - patch queues for mercurial
1 # mq.py - patch queues for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''manage a stack of patches
8 '''manage a stack of patches
9
9
10 This extension lets you work with a stack of patches in a Mercurial
10 This extension lets you work with a stack of patches in a Mercurial
11 repository. It manages two stacks of patches - all known patches, and
11 repository. It manages two stacks of patches - all known patches, and
12 applied patches (subset of known patches).
12 applied patches (subset of known patches).
13
13
14 Known patches are represented as patch files in the .hg/patches
14 Known patches are represented as patch files in the .hg/patches
15 directory. Applied patches are both patch files and changesets.
15 directory. Applied patches are both patch files and changesets.
16
16
17 Common tasks (use :hg:`help COMMAND` for more details)::
17 Common tasks (use :hg:`help COMMAND` for more details)::
18
18
19 create new patch qnew
19 create new patch qnew
20 import existing patch qimport
20 import existing patch qimport
21
21
22 print patch series qseries
22 print patch series qseries
23 print applied patches qapplied
23 print applied patches qapplied
24
24
25 add known patch to applied stack qpush
25 add known patch to applied stack qpush
26 remove patch from applied stack qpop
26 remove patch from applied stack qpop
27 refresh contents of top applied patch qrefresh
27 refresh contents of top applied patch qrefresh
28
28
29 By default, mq will automatically use git patches when required to
29 By default, mq will automatically use git patches when required to
30 avoid losing file mode changes, copy records, binary files or empty
30 avoid losing file mode changes, copy records, binary files or empty
31 files creations or deletions. This behavior can be configured with::
31 files creations or deletions. This behavior can be configured with::
32
32
33 [mq]
33 [mq]
34 git = auto/keep/yes/no
34 git = auto/keep/yes/no
35
35
36 If set to 'keep', mq will obey the [diff] section configuration while
36 If set to 'keep', mq will obey the [diff] section configuration while
37 preserving existing git patches upon qrefresh. If set to 'yes' or
37 preserving existing git patches upon qrefresh. If set to 'yes' or
38 'no', mq will override the [diff] section and always generate git or
38 'no', mq will override the [diff] section and always generate git or
39 regular patches, possibly losing data in the second case.
39 regular patches, possibly losing data in the second case.
40
40
41 It may be desirable for mq changesets to be kept in the secret phase (see
41 It may be desirable for mq changesets to be kept in the secret phase (see
42 :hg:`help phases`), which can be enabled with the following setting::
42 :hg:`help phases`), which can be enabled with the following setting::
43
43
44 [mq]
44 [mq]
45 secret = True
45 secret = True
46
46
47 You will by default be managing a patch queue named "patches". You can
47 You will by default be managing a patch queue named "patches". You can
48 create other, independent patch queues with the :hg:`qqueue` command.
48 create other, independent patch queues with the :hg:`qqueue` command.
49
49
50 If the working directory contains uncommitted files, qpush, qpop and
50 If the working directory contains uncommitted files, qpush, qpop and
51 qgoto abort immediately. If -f/--force is used, the changes are
51 qgoto abort immediately. If -f/--force is used, the changes are
52 discarded. Setting::
52 discarded. Setting::
53
53
54 [mq]
54 [mq]
55 keepchanges = True
55 keepchanges = True
56
56
57 make them behave as if --keep-changes were passed, and non-conflicting
57 make them behave as if --keep-changes were passed, and non-conflicting
58 local changes will be tolerated and preserved. If incompatible options
58 local changes will be tolerated and preserved. If incompatible options
59 such as -f/--force or --exact are passed, this setting is ignored.
59 such as -f/--force or --exact are passed, this setting is ignored.
60
60
61 This extension used to provide a strip command. This command now lives
61 This extension used to provide a strip command. This command now lives
62 in the strip extension.
62 in the strip extension.
63 '''
63 '''
64
64
65 from __future__ import absolute_import, print_function
65 from __future__ import absolute_import, print_function
66
66
67 import errno
67 import errno
68 import os
68 import os
69 import re
69 import re
70 import shutil
70 import shutil
71 from mercurial.i18n import _
71 from mercurial.i18n import _
72 from mercurial.node import (
72 from mercurial.node import (
73 bin,
73 bin,
74 hex,
74 hex,
75 nullid,
75 nullid,
76 nullrev,
76 nullrev,
77 short,
77 short,
78 )
78 )
79 from mercurial import (
79 from mercurial import (
80 cmdutil,
80 cmdutil,
81 commands,
81 commands,
82 dirstateguard,
82 dirstateguard,
83 encoding,
83 encoding,
84 error,
84 error,
85 extensions,
85 extensions,
86 hg,
86 hg,
87 localrepo,
87 localrepo,
88 lock as lockmod,
88 lock as lockmod,
89 logcmdutil,
89 logcmdutil,
90 patch as patchmod,
90 patch as patchmod,
91 phases,
91 phases,
92 pycompat,
92 pycompat,
93 registrar,
93 registrar,
94 revsetlang,
94 revsetlang,
95 scmutil,
95 scmutil,
96 smartset,
96 smartset,
97 subrepoutil,
97 subrepoutil,
98 util,
98 util,
99 vfs as vfsmod,
99 vfs as vfsmod,
100 )
100 )
101 from mercurial.utils import (
101 from mercurial.utils import (
102 dateutil,
102 dateutil,
103 stringutil,
103 stringutil,
104 )
104 )
105
105
106 release = lockmod.release
106 release = lockmod.release
107 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
107 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
108
108
109 cmdtable = {}
109 cmdtable = {}
110 command = registrar.command(cmdtable)
110 command = registrar.command(cmdtable)
111 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
111 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
112 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
112 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
113 # be specifying the version(s) of Mercurial they are tested with, or
113 # be specifying the version(s) of Mercurial they are tested with, or
114 # leave the attribute unspecified.
114 # leave the attribute unspecified.
115 testedwith = 'ships-with-hg-core'
115 testedwith = 'ships-with-hg-core'
116
116
117 configtable = {}
117 configtable = {}
118 configitem = registrar.configitem(configtable)
118 configitem = registrar.configitem(configtable)
119
119
120 configitem('mq', 'git',
120 configitem('mq', 'git',
121 default='auto',
121 default='auto',
122 )
122 )
123 configitem('mq', 'keepchanges',
123 configitem('mq', 'keepchanges',
124 default=False,
124 default=False,
125 )
125 )
126 configitem('mq', 'plain',
126 configitem('mq', 'plain',
127 default=False,
127 default=False,
128 )
128 )
129 configitem('mq', 'secret',
129 configitem('mq', 'secret',
130 default=False,
130 default=False,
131 )
131 )
132
132
133 # force load strip extension formerly included in mq and import some utility
133 # force load strip extension formerly included in mq and import some utility
134 try:
134 try:
135 stripext = extensions.find('strip')
135 stripext = extensions.find('strip')
136 except KeyError:
136 except KeyError:
137 # note: load is lazy so we could avoid the try-except,
137 # note: load is lazy so we could avoid the try-except,
138 # but I (marmoute) prefer this explicit code.
138 # but I (marmoute) prefer this explicit code.
139 class dummyui(object):
139 class dummyui(object):
140 def debug(self, msg):
140 def debug(self, msg):
141 pass
141 pass
142 stripext = extensions.load(dummyui(), 'strip', '')
142 stripext = extensions.load(dummyui(), 'strip', '')
143
143
144 strip = stripext.strip
144 strip = stripext.strip
145 checksubstate = stripext.checksubstate
145 checksubstate = stripext.checksubstate
146 checklocalchanges = stripext.checklocalchanges
146 checklocalchanges = stripext.checklocalchanges
147
147
148
148
149 # Patch names looks like unix-file names.
149 # Patch names looks like unix-file names.
150 # They must be joinable with queue directory and result in the patch path.
150 # They must be joinable with queue directory and result in the patch path.
151 normname = util.normpath
151 normname = util.normpath
152
152
153 class statusentry(object):
153 class statusentry(object):
154 def __init__(self, node, name):
154 def __init__(self, node, name):
155 self.node, self.name = node, name
155 self.node, self.name = node, name
156
156
157 def __bytes__(self):
157 def __bytes__(self):
158 return hex(self.node) + ':' + self.name
158 return hex(self.node) + ':' + self.name
159
159
160 __str__ = encoding.strmethod(__bytes__)
160 __str__ = encoding.strmethod(__bytes__)
161 __repr__ = encoding.strmethod(__bytes__)
161 __repr__ = encoding.strmethod(__bytes__)
162
162
163 # The order of the headers in 'hg export' HG patches:
163 # The order of the headers in 'hg export' HG patches:
164 HGHEADERS = [
164 HGHEADERS = [
165 # '# HG changeset patch',
165 # '# HG changeset patch',
166 '# User ',
166 '# User ',
167 '# Date ',
167 '# Date ',
168 '# ',
168 '# ',
169 '# Branch ',
169 '# Branch ',
170 '# Node ID ',
170 '# Node ID ',
171 '# Parent ', # can occur twice for merges - but that is not relevant for mq
171 '# Parent ', # can occur twice for merges - but that is not relevant for mq
172 ]
172 ]
173 # The order of headers in plain 'mail style' patches:
173 # The order of headers in plain 'mail style' patches:
174 PLAINHEADERS = {
174 PLAINHEADERS = {
175 'from': 0,
175 'from': 0,
176 'date': 1,
176 'date': 1,
177 'subject': 2,
177 'subject': 2,
178 }
178 }
179
179
180 def inserthgheader(lines, header, value):
180 def inserthgheader(lines, header, value):
181 """Assuming lines contains a HG patch header, add a header line with value.
181 """Assuming lines contains a HG patch header, add a header line with value.
182 >>> try: inserthgheader([], b'# Date ', b'z')
182 >>> try: inserthgheader([], b'# Date ', b'z')
183 ... except ValueError as inst: print("oops")
183 ... except ValueError as inst: print("oops")
184 oops
184 oops
185 >>> inserthgheader([b'# HG changeset patch'], b'# Date ', b'z')
185 >>> inserthgheader([b'# HG changeset patch'], b'# Date ', b'z')
186 ['# HG changeset patch', '# Date z']
186 ['# HG changeset patch', '# Date z']
187 >>> inserthgheader([b'# HG changeset patch', b''], b'# Date ', b'z')
187 >>> inserthgheader([b'# HG changeset patch', b''], b'# Date ', b'z')
188 ['# HG changeset patch', '# Date z', '']
188 ['# HG changeset patch', '# Date z', '']
189 >>> inserthgheader([b'# HG changeset patch', b'# User y'], b'# Date ', b'z')
189 >>> inserthgheader([b'# HG changeset patch', b'# User y'], b'# Date ', b'z')
190 ['# HG changeset patch', '# User y', '# Date z']
190 ['# HG changeset patch', '# User y', '# Date z']
191 >>> inserthgheader([b'# HG changeset patch', b'# Date x', b'# User y'],
191 >>> inserthgheader([b'# HG changeset patch', b'# Date x', b'# User y'],
192 ... b'# User ', b'z')
192 ... b'# User ', b'z')
193 ['# HG changeset patch', '# Date x', '# User z']
193 ['# HG changeset patch', '# Date x', '# User z']
194 >>> inserthgheader([b'# HG changeset patch', b'# Date y'], b'# Date ', b'z')
194 >>> inserthgheader([b'# HG changeset patch', b'# Date y'], b'# Date ', b'z')
195 ['# HG changeset patch', '# Date z']
195 ['# HG changeset patch', '# Date z']
196 >>> inserthgheader([b'# HG changeset patch', b'', b'# Date y'],
196 >>> inserthgheader([b'# HG changeset patch', b'', b'# Date y'],
197 ... b'# Date ', b'z')
197 ... b'# Date ', b'z')
198 ['# HG changeset patch', '# Date z', '', '# Date y']
198 ['# HG changeset patch', '# Date z', '', '# Date y']
199 >>> inserthgheader([b'# HG changeset patch', b'# Parent y'],
199 >>> inserthgheader([b'# HG changeset patch', b'# Parent y'],
200 ... b'# Date ', b'z')
200 ... b'# Date ', b'z')
201 ['# HG changeset patch', '# Date z', '# Parent y']
201 ['# HG changeset patch', '# Date z', '# Parent y']
202 """
202 """
203 start = lines.index('# HG changeset patch') + 1
203 start = lines.index('# HG changeset patch') + 1
204 newindex = HGHEADERS.index(header)
204 newindex = HGHEADERS.index(header)
205 bestpos = len(lines)
205 bestpos = len(lines)
206 for i in range(start, len(lines)):
206 for i in range(start, len(lines)):
207 line = lines[i]
207 line = lines[i]
208 if not line.startswith('# '):
208 if not line.startswith('# '):
209 bestpos = min(bestpos, i)
209 bestpos = min(bestpos, i)
210 break
210 break
211 for lineindex, h in enumerate(HGHEADERS):
211 for lineindex, h in enumerate(HGHEADERS):
212 if line.startswith(h):
212 if line.startswith(h):
213 if lineindex == newindex:
213 if lineindex == newindex:
214 lines[i] = header + value
214 lines[i] = header + value
215 return lines
215 return lines
216 if lineindex > newindex:
216 if lineindex > newindex:
217 bestpos = min(bestpos, i)
217 bestpos = min(bestpos, i)
218 break # next line
218 break # next line
219 lines.insert(bestpos, header + value)
219 lines.insert(bestpos, header + value)
220 return lines
220 return lines
221
221
222 def insertplainheader(lines, header, value):
222 def insertplainheader(lines, header, value):
223 """For lines containing a plain patch header, add a header line with value.
223 """For lines containing a plain patch header, add a header line with value.
224 >>> insertplainheader([], b'Date', b'z')
224 >>> insertplainheader([], b'Date', b'z')
225 ['Date: z']
225 ['Date: z']
226 >>> insertplainheader([b''], b'Date', b'z')
226 >>> insertplainheader([b''], b'Date', b'z')
227 ['Date: z', '']
227 ['Date: z', '']
228 >>> insertplainheader([b'x'], b'Date', b'z')
228 >>> insertplainheader([b'x'], b'Date', b'z')
229 ['Date: z', '', 'x']
229 ['Date: z', '', 'x']
230 >>> insertplainheader([b'From: y', b'x'], b'Date', b'z')
230 >>> insertplainheader([b'From: y', b'x'], b'Date', b'z')
231 ['From: y', 'Date: z', '', 'x']
231 ['From: y', 'Date: z', '', 'x']
232 >>> insertplainheader([b' date : x', b' from : y', b''], b'From', b'z')
232 >>> insertplainheader([b' date : x', b' from : y', b''], b'From', b'z')
233 [' date : x', 'From: z', '']
233 [' date : x', 'From: z', '']
234 >>> insertplainheader([b'', b'Date: y'], b'Date', b'z')
234 >>> insertplainheader([b'', b'Date: y'], b'Date', b'z')
235 ['Date: z', '', 'Date: y']
235 ['Date: z', '', 'Date: y']
236 >>> insertplainheader([b'foo: bar', b'DATE: z', b'x'], b'From', b'y')
236 >>> insertplainheader([b'foo: bar', b'DATE: z', b'x'], b'From', b'y')
237 ['From: y', 'foo: bar', 'DATE: z', '', 'x']
237 ['From: y', 'foo: bar', 'DATE: z', '', 'x']
238 """
238 """
239 newprio = PLAINHEADERS[header.lower()]
239 newprio = PLAINHEADERS[header.lower()]
240 bestpos = len(lines)
240 bestpos = len(lines)
241 for i, line in enumerate(lines):
241 for i, line in enumerate(lines):
242 if ':' in line:
242 if ':' in line:
243 lheader = line.split(':', 1)[0].strip().lower()
243 lheader = line.split(':', 1)[0].strip().lower()
244 lprio = PLAINHEADERS.get(lheader, newprio + 1)
244 lprio = PLAINHEADERS.get(lheader, newprio + 1)
245 if lprio == newprio:
245 if lprio == newprio:
246 lines[i] = '%s: %s' % (header, value)
246 lines[i] = '%s: %s' % (header, value)
247 return lines
247 return lines
248 if lprio > newprio and i < bestpos:
248 if lprio > newprio and i < bestpos:
249 bestpos = i
249 bestpos = i
250 else:
250 else:
251 if line:
251 if line:
252 lines.insert(i, '')
252 lines.insert(i, '')
253 if i < bestpos:
253 if i < bestpos:
254 bestpos = i
254 bestpos = i
255 break
255 break
256 lines.insert(bestpos, '%s: %s' % (header, value))
256 lines.insert(bestpos, '%s: %s' % (header, value))
257 return lines
257 return lines
258
258
259 class patchheader(object):
259 class patchheader(object):
260 def __init__(self, pf, plainmode=False):
260 def __init__(self, pf, plainmode=False):
261 def eatdiff(lines):
261 def eatdiff(lines):
262 while lines:
262 while lines:
263 l = lines[-1]
263 l = lines[-1]
264 if (l.startswith("diff -") or
264 if (l.startswith("diff -") or
265 l.startswith("Index:") or
265 l.startswith("Index:") or
266 l.startswith("===========")):
266 l.startswith("===========")):
267 del lines[-1]
267 del lines[-1]
268 else:
268 else:
269 break
269 break
270 def eatempty(lines):
270 def eatempty(lines):
271 while lines:
271 while lines:
272 if not lines[-1].strip():
272 if not lines[-1].strip():
273 del lines[-1]
273 del lines[-1]
274 else:
274 else:
275 break
275 break
276
276
277 message = []
277 message = []
278 comments = []
278 comments = []
279 user = None
279 user = None
280 date = None
280 date = None
281 parent = None
281 parent = None
282 format = None
282 format = None
283 subject = None
283 subject = None
284 branch = None
284 branch = None
285 nodeid = None
285 nodeid = None
286 diffstart = 0
286 diffstart = 0
287
287
288 for line in open(pf, 'rb'):
288 for line in open(pf, 'rb'):
289 line = line.rstrip()
289 line = line.rstrip()
290 if (line.startswith('diff --git')
290 if (line.startswith('diff --git')
291 or (diffstart and line.startswith('+++ '))):
291 or (diffstart and line.startswith('+++ '))):
292 diffstart = 2
292 diffstart = 2
293 break
293 break
294 diffstart = 0 # reset
294 diffstart = 0 # reset
295 if line.startswith("--- "):
295 if line.startswith("--- "):
296 diffstart = 1
296 diffstart = 1
297 continue
297 continue
298 elif format == "hgpatch":
298 elif format == "hgpatch":
299 # parse values when importing the result of an hg export
299 # parse values when importing the result of an hg export
300 if line.startswith("# User "):
300 if line.startswith("# User "):
301 user = line[7:]
301 user = line[7:]
302 elif line.startswith("# Date "):
302 elif line.startswith("# Date "):
303 date = line[7:]
303 date = line[7:]
304 elif line.startswith("# Parent "):
304 elif line.startswith("# Parent "):
305 parent = line[9:].lstrip() # handle double trailing space
305 parent = line[9:].lstrip() # handle double trailing space
306 elif line.startswith("# Branch "):
306 elif line.startswith("# Branch "):
307 branch = line[9:]
307 branch = line[9:]
308 elif line.startswith("# Node ID "):
308 elif line.startswith("# Node ID "):
309 nodeid = line[10:]
309 nodeid = line[10:]
310 elif not line.startswith("# ") and line:
310 elif not line.startswith("# ") and line:
311 message.append(line)
311 message.append(line)
312 format = None
312 format = None
313 elif line == '# HG changeset patch':
313 elif line == '# HG changeset patch':
314 message = []
314 message = []
315 format = "hgpatch"
315 format = "hgpatch"
316 elif (format != "tagdone" and (line.startswith("Subject: ") or
316 elif (format != "tagdone" and (line.startswith("Subject: ") or
317 line.startswith("subject: "))):
317 line.startswith("subject: "))):
318 subject = line[9:]
318 subject = line[9:]
319 format = "tag"
319 format = "tag"
320 elif (format != "tagdone" and (line.startswith("From: ") or
320 elif (format != "tagdone" and (line.startswith("From: ") or
321 line.startswith("from: "))):
321 line.startswith("from: "))):
322 user = line[6:]
322 user = line[6:]
323 format = "tag"
323 format = "tag"
324 elif (format != "tagdone" and (line.startswith("Date: ") or
324 elif (format != "tagdone" and (line.startswith("Date: ") or
325 line.startswith("date: "))):
325 line.startswith("date: "))):
326 date = line[6:]
326 date = line[6:]
327 format = "tag"
327 format = "tag"
328 elif format == "tag" and line == "":
328 elif format == "tag" and line == "":
329 # when looking for tags (subject: from: etc) they
329 # when looking for tags (subject: from: etc) they
330 # end once you find a blank line in the source
330 # end once you find a blank line in the source
331 format = "tagdone"
331 format = "tagdone"
332 elif message or line:
332 elif message or line:
333 message.append(line)
333 message.append(line)
334 comments.append(line)
334 comments.append(line)
335
335
336 eatdiff(message)
336 eatdiff(message)
337 eatdiff(comments)
337 eatdiff(comments)
338 # Remember the exact starting line of the patch diffs before consuming
338 # Remember the exact starting line of the patch diffs before consuming
339 # empty lines, for external use by TortoiseHg and others
339 # empty lines, for external use by TortoiseHg and others
340 self.diffstartline = len(comments)
340 self.diffstartline = len(comments)
341 eatempty(message)
341 eatempty(message)
342 eatempty(comments)
342 eatempty(comments)
343
343
344 # make sure message isn't empty
344 # make sure message isn't empty
345 if format and format.startswith("tag") and subject:
345 if format and format.startswith("tag") and subject:
346 message.insert(0, subject)
346 message.insert(0, subject)
347
347
348 self.message = message
348 self.message = message
349 self.comments = comments
349 self.comments = comments
350 self.user = user
350 self.user = user
351 self.date = date
351 self.date = date
352 self.parent = parent
352 self.parent = parent
353 # nodeid and branch are for external use by TortoiseHg and others
353 # nodeid and branch are for external use by TortoiseHg and others
354 self.nodeid = nodeid
354 self.nodeid = nodeid
355 self.branch = branch
355 self.branch = branch
356 self.haspatch = diffstart > 1
356 self.haspatch = diffstart > 1
357 self.plainmode = (plainmode or
357 self.plainmode = (plainmode or
358 '# HG changeset patch' not in self.comments and
358 '# HG changeset patch' not in self.comments and
359 any(c.startswith('Date: ') or
359 any(c.startswith('Date: ') or
360 c.startswith('From: ')
360 c.startswith('From: ')
361 for c in self.comments))
361 for c in self.comments))
362
362
363 def setuser(self, user):
363 def setuser(self, user):
364 try:
364 try:
365 inserthgheader(self.comments, '# User ', user)
365 inserthgheader(self.comments, '# User ', user)
366 except ValueError:
366 except ValueError:
367 if self.plainmode:
367 if self.plainmode:
368 insertplainheader(self.comments, 'From', user)
368 insertplainheader(self.comments, 'From', user)
369 else:
369 else:
370 tmp = ['# HG changeset patch', '# User ' + user]
370 tmp = ['# HG changeset patch', '# User ' + user]
371 self.comments = tmp + self.comments
371 self.comments = tmp + self.comments
372 self.user = user
372 self.user = user
373
373
374 def setdate(self, date):
374 def setdate(self, date):
375 try:
375 try:
376 inserthgheader(self.comments, '# Date ', date)
376 inserthgheader(self.comments, '# Date ', date)
377 except ValueError:
377 except ValueError:
378 if self.plainmode:
378 if self.plainmode:
379 insertplainheader(self.comments, 'Date', date)
379 insertplainheader(self.comments, 'Date', date)
380 else:
380 else:
381 tmp = ['# HG changeset patch', '# Date ' + date]
381 tmp = ['# HG changeset patch', '# Date ' + date]
382 self.comments = tmp + self.comments
382 self.comments = tmp + self.comments
383 self.date = date
383 self.date = date
384
384
385 def setparent(self, parent):
385 def setparent(self, parent):
386 try:
386 try:
387 inserthgheader(self.comments, '# Parent ', parent)
387 inserthgheader(self.comments, '# Parent ', parent)
388 except ValueError:
388 except ValueError:
389 if not self.plainmode:
389 if not self.plainmode:
390 tmp = ['# HG changeset patch', '# Parent ' + parent]
390 tmp = ['# HG changeset patch', '# Parent ' + parent]
391 self.comments = tmp + self.comments
391 self.comments = tmp + self.comments
392 self.parent = parent
392 self.parent = parent
393
393
394 def setmessage(self, message):
394 def setmessage(self, message):
395 if self.comments:
395 if self.comments:
396 self._delmsg()
396 self._delmsg()
397 self.message = [message]
397 self.message = [message]
398 if message:
398 if message:
399 if self.plainmode and self.comments and self.comments[-1]:
399 if self.plainmode and self.comments and self.comments[-1]:
400 self.comments.append('')
400 self.comments.append('')
401 self.comments.append(message)
401 self.comments.append(message)
402
402
403 def __bytes__(self):
403 def __bytes__(self):
404 s = '\n'.join(self.comments).rstrip()
404 s = '\n'.join(self.comments).rstrip()
405 if not s:
405 if not s:
406 return ''
406 return ''
407 return s + '\n\n'
407 return s + '\n\n'
408
408
409 __str__ = encoding.strmethod(__bytes__)
409 __str__ = encoding.strmethod(__bytes__)
410
410
411 def _delmsg(self):
411 def _delmsg(self):
412 '''Remove existing message, keeping the rest of the comments fields.
412 '''Remove existing message, keeping the rest of the comments fields.
413 If comments contains 'subject: ', message will prepend
413 If comments contains 'subject: ', message will prepend
414 the field and a blank line.'''
414 the field and a blank line.'''
415 if self.message:
415 if self.message:
416 subj = 'subject: ' + self.message[0].lower()
416 subj = 'subject: ' + self.message[0].lower()
417 for i in pycompat.xrange(len(self.comments)):
417 for i in pycompat.xrange(len(self.comments)):
418 if subj == self.comments[i].lower():
418 if subj == self.comments[i].lower():
419 del self.comments[i]
419 del self.comments[i]
420 self.message = self.message[2:]
420 self.message = self.message[2:]
421 break
421 break
422 ci = 0
422 ci = 0
423 for mi in self.message:
423 for mi in self.message:
424 while mi != self.comments[ci]:
424 while mi != self.comments[ci]:
425 ci += 1
425 ci += 1
426 del self.comments[ci]
426 del self.comments[ci]
427
427
428 def newcommit(repo, phase, *args, **kwargs):
428 def newcommit(repo, phase, *args, **kwargs):
429 """helper dedicated to ensure a commit respect mq.secret setting
429 """helper dedicated to ensure a commit respect mq.secret setting
430
430
431 It should be used instead of repo.commit inside the mq source for operation
431 It should be used instead of repo.commit inside the mq source for operation
432 creating new changeset.
432 creating new changeset.
433 """
433 """
434 repo = repo.unfiltered()
434 repo = repo.unfiltered()
435 if phase is None:
435 if phase is None:
436 if repo.ui.configbool('mq', 'secret'):
436 if repo.ui.configbool('mq', 'secret'):
437 phase = phases.secret
437 phase = phases.secret
438 overrides = {('ui', 'allowemptycommit'): True}
438 overrides = {('ui', 'allowemptycommit'): True}
439 if phase is not None:
439 if phase is not None:
440 overrides[('phases', 'new-commit')] = phase
440 overrides[('phases', 'new-commit')] = phase
441 with repo.ui.configoverride(overrides, 'mq'):
441 with repo.ui.configoverride(overrides, 'mq'):
442 repo.ui.setconfig('ui', 'allowemptycommit', True)
442 repo.ui.setconfig('ui', 'allowemptycommit', True)
443 return repo.commit(*args, **kwargs)
443 return repo.commit(*args, **kwargs)
444
444
445 class AbortNoCleanup(error.Abort):
445 class AbortNoCleanup(error.Abort):
446 pass
446 pass
447
447
448 class queue(object):
448 class queue(object):
449 def __init__(self, ui, baseui, path, patchdir=None):
449 def __init__(self, ui, baseui, path, patchdir=None):
450 self.basepath = path
450 self.basepath = path
451 try:
451 try:
452 with open(os.path.join(path, 'patches.queue'), r'rb') as fh:
452 with open(os.path.join(path, 'patches.queue'), r'rb') as fh:
453 cur = fh.read().rstrip()
453 cur = fh.read().rstrip()
454
454
455 if not cur:
455 if not cur:
456 curpath = os.path.join(path, 'patches')
456 curpath = os.path.join(path, 'patches')
457 else:
457 else:
458 curpath = os.path.join(path, 'patches-' + cur)
458 curpath = os.path.join(path, 'patches-' + cur)
459 except IOError:
459 except IOError:
460 curpath = os.path.join(path, 'patches')
460 curpath = os.path.join(path, 'patches')
461 self.path = patchdir or curpath
461 self.path = patchdir or curpath
462 self.opener = vfsmod.vfs(self.path)
462 self.opener = vfsmod.vfs(self.path)
463 self.ui = ui
463 self.ui = ui
464 self.baseui = baseui
464 self.baseui = baseui
465 self.applieddirty = False
465 self.applieddirty = False
466 self.seriesdirty = False
466 self.seriesdirty = False
467 self.added = []
467 self.added = []
468 self.seriespath = "series"
468 self.seriespath = "series"
469 self.statuspath = "status"
469 self.statuspath = "status"
470 self.guardspath = "guards"
470 self.guardspath = "guards"
471 self.activeguards = None
471 self.activeguards = None
472 self.guardsdirty = False
472 self.guardsdirty = False
473 # Handle mq.git as a bool with extended values
473 # Handle mq.git as a bool with extended values
474 gitmode = ui.config('mq', 'git').lower()
474 gitmode = ui.config('mq', 'git').lower()
475 boolmode = stringutil.parsebool(gitmode)
475 boolmode = stringutil.parsebool(gitmode)
476 if boolmode is not None:
476 if boolmode is not None:
477 if boolmode:
477 if boolmode:
478 gitmode = 'yes'
478 gitmode = 'yes'
479 else:
479 else:
480 gitmode = 'no'
480 gitmode = 'no'
481 self.gitmode = gitmode
481 self.gitmode = gitmode
482 # deprecated config: mq.plain
482 # deprecated config: mq.plain
483 self.plainmode = ui.configbool('mq', 'plain')
483 self.plainmode = ui.configbool('mq', 'plain')
484 self.checkapplied = True
484 self.checkapplied = True
485
485
486 @util.propertycache
486 @util.propertycache
487 def applied(self):
487 def applied(self):
488 def parselines(lines):
488 def parselines(lines):
489 for l in lines:
489 for l in lines:
490 entry = l.split(':', 1)
490 entry = l.split(':', 1)
491 if len(entry) > 1:
491 if len(entry) > 1:
492 n, name = entry
492 n, name = entry
493 yield statusentry(bin(n), name)
493 yield statusentry(bin(n), name)
494 elif l.strip():
494 elif l.strip():
495 self.ui.warn(_('malformated mq status line: %s\n') %
495 self.ui.warn(_('malformated mq status line: %s\n') %
496 stringutil.pprint(entry))
496 stringutil.pprint(entry))
497 # else we ignore empty lines
497 # else we ignore empty lines
498 try:
498 try:
499 lines = self.opener.read(self.statuspath).splitlines()
499 lines = self.opener.read(self.statuspath).splitlines()
500 return list(parselines(lines))
500 return list(parselines(lines))
501 except IOError as e:
501 except IOError as e:
502 if e.errno == errno.ENOENT:
502 if e.errno == errno.ENOENT:
503 return []
503 return []
504 raise
504 raise
505
505
506 @util.propertycache
506 @util.propertycache
507 def fullseries(self):
507 def fullseries(self):
508 try:
508 try:
509 return self.opener.read(self.seriespath).splitlines()
509 return self.opener.read(self.seriespath).splitlines()
510 except IOError as e:
510 except IOError as e:
511 if e.errno == errno.ENOENT:
511 if e.errno == errno.ENOENT:
512 return []
512 return []
513 raise
513 raise
514
514
515 @util.propertycache
515 @util.propertycache
516 def series(self):
516 def series(self):
517 self.parseseries()
517 self.parseseries()
518 return self.series
518 return self.series
519
519
520 @util.propertycache
520 @util.propertycache
521 def seriesguards(self):
521 def seriesguards(self):
522 self.parseseries()
522 self.parseseries()
523 return self.seriesguards
523 return self.seriesguards
524
524
525 def invalidate(self):
525 def invalidate(self):
526 for a in 'applied fullseries series seriesguards'.split():
526 for a in 'applied fullseries series seriesguards'.split():
527 if a in self.__dict__:
527 if a in self.__dict__:
528 delattr(self, a)
528 delattr(self, a)
529 self.applieddirty = False
529 self.applieddirty = False
530 self.seriesdirty = False
530 self.seriesdirty = False
531 self.guardsdirty = False
531 self.guardsdirty = False
532 self.activeguards = None
532 self.activeguards = None
533
533
534 def diffopts(self, opts=None, patchfn=None, plain=False):
534 def diffopts(self, opts=None, patchfn=None, plain=False):
535 """Return diff options tweaked for this mq use, possibly upgrading to
535 """Return diff options tweaked for this mq use, possibly upgrading to
536 git format, and possibly plain and without lossy options."""
536 git format, and possibly plain and without lossy options."""
537 diffopts = patchmod.difffeatureopts(self.ui, opts,
537 diffopts = patchmod.difffeatureopts(self.ui, opts,
538 git=True, whitespace=not plain, formatchanging=not plain)
538 git=True, whitespace=not plain, formatchanging=not plain)
539 if self.gitmode == 'auto':
539 if self.gitmode == 'auto':
540 diffopts.upgrade = True
540 diffopts.upgrade = True
541 elif self.gitmode == 'keep':
541 elif self.gitmode == 'keep':
542 pass
542 pass
543 elif self.gitmode in ('yes', 'no'):
543 elif self.gitmode in ('yes', 'no'):
544 diffopts.git = self.gitmode == 'yes'
544 diffopts.git = self.gitmode == 'yes'
545 else:
545 else:
546 raise error.Abort(_('mq.git option can be auto/keep/yes/no'
546 raise error.Abort(_('mq.git option can be auto/keep/yes/no'
547 ' got %s') % self.gitmode)
547 ' got %s') % self.gitmode)
548 if patchfn:
548 if patchfn:
549 diffopts = self.patchopts(diffopts, patchfn)
549 diffopts = self.patchopts(diffopts, patchfn)
550 return diffopts
550 return diffopts
551
551
552 def patchopts(self, diffopts, *patches):
552 def patchopts(self, diffopts, *patches):
553 """Return a copy of input diff options with git set to true if
553 """Return a copy of input diff options with git set to true if
554 referenced patch is a git patch and should be preserved as such.
554 referenced patch is a git patch and should be preserved as such.
555 """
555 """
556 diffopts = diffopts.copy()
556 diffopts = diffopts.copy()
557 if not diffopts.git and self.gitmode == 'keep':
557 if not diffopts.git and self.gitmode == 'keep':
558 for patchfn in patches:
558 for patchfn in patches:
559 patchf = self.opener(patchfn, 'r')
559 patchf = self.opener(patchfn, 'r')
560 # if the patch was a git patch, refresh it as a git patch
560 # if the patch was a git patch, refresh it as a git patch
561 diffopts.git = any(line.startswith('diff --git')
561 diffopts.git = any(line.startswith('diff --git')
562 for line in patchf)
562 for line in patchf)
563 patchf.close()
563 patchf.close()
564 return diffopts
564 return diffopts
565
565
566 def join(self, *p):
566 def join(self, *p):
567 return os.path.join(self.path, *p)
567 return os.path.join(self.path, *p)
568
568
569 def findseries(self, patch):
569 def findseries(self, patch):
570 def matchpatch(l):
570 def matchpatch(l):
571 l = l.split('#', 1)[0]
571 l = l.split('#', 1)[0]
572 return l.strip() == patch
572 return l.strip() == patch
573 for index, l in enumerate(self.fullseries):
573 for index, l in enumerate(self.fullseries):
574 if matchpatch(l):
574 if matchpatch(l):
575 return index
575 return index
576 return None
576 return None
577
577
578 guard_re = re.compile(br'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
578 guard_re = re.compile(br'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
579
579
580 def parseseries(self):
580 def parseseries(self):
581 self.series = []
581 self.series = []
582 self.seriesguards = []
582 self.seriesguards = []
583 for l in self.fullseries:
583 for l in self.fullseries:
584 h = l.find('#')
584 h = l.find('#')
585 if h == -1:
585 if h == -1:
586 patch = l
586 patch = l
587 comment = ''
587 comment = ''
588 elif h == 0:
588 elif h == 0:
589 continue
589 continue
590 else:
590 else:
591 patch = l[:h]
591 patch = l[:h]
592 comment = l[h:]
592 comment = l[h:]
593 patch = patch.strip()
593 patch = patch.strip()
594 if patch:
594 if patch:
595 if patch in self.series:
595 if patch in self.series:
596 raise error.Abort(_('%s appears more than once in %s') %
596 raise error.Abort(_('%s appears more than once in %s') %
597 (patch, self.join(self.seriespath)))
597 (patch, self.join(self.seriespath)))
598 self.series.append(patch)
598 self.series.append(patch)
599 self.seriesguards.append(self.guard_re.findall(comment))
599 self.seriesguards.append(self.guard_re.findall(comment))
600
600
601 def checkguard(self, guard):
601 def checkguard(self, guard):
602 if not guard:
602 if not guard:
603 return _('guard cannot be an empty string')
603 return _('guard cannot be an empty string')
604 bad_chars = '# \t\r\n\f'
604 bad_chars = '# \t\r\n\f'
605 first = guard[0]
605 first = guard[0]
606 if first in '-+':
606 if first in '-+':
607 return (_('guard %r starts with invalid character: %r') %
607 return (_('guard %r starts with invalid character: %r') %
608 (guard, first))
608 (guard, first))
609 for c in bad_chars:
609 for c in bad_chars:
610 if c in guard:
610 if c in guard:
611 return _('invalid character in guard %r: %r') % (guard, c)
611 return _('invalid character in guard %r: %r') % (guard, c)
612
612
613 def setactive(self, guards):
613 def setactive(self, guards):
614 for guard in guards:
614 for guard in guards:
615 bad = self.checkguard(guard)
615 bad = self.checkguard(guard)
616 if bad:
616 if bad:
617 raise error.Abort(bad)
617 raise error.Abort(bad)
618 guards = sorted(set(guards))
618 guards = sorted(set(guards))
619 self.ui.debug('active guards: %s\n' % ' '.join(guards))
619 self.ui.debug('active guards: %s\n' % ' '.join(guards))
620 self.activeguards = guards
620 self.activeguards = guards
621 self.guardsdirty = True
621 self.guardsdirty = True
622
622
623 def active(self):
623 def active(self):
624 if self.activeguards is None:
624 if self.activeguards is None:
625 self.activeguards = []
625 self.activeguards = []
626 try:
626 try:
627 guards = self.opener.read(self.guardspath).split()
627 guards = self.opener.read(self.guardspath).split()
628 except IOError as err:
628 except IOError as err:
629 if err.errno != errno.ENOENT:
629 if err.errno != errno.ENOENT:
630 raise
630 raise
631 guards = []
631 guards = []
632 for i, guard in enumerate(guards):
632 for i, guard in enumerate(guards):
633 bad = self.checkguard(guard)
633 bad = self.checkguard(guard)
634 if bad:
634 if bad:
635 self.ui.warn('%s:%d: %s\n' %
635 self.ui.warn('%s:%d: %s\n' %
636 (self.join(self.guardspath), i + 1, bad))
636 (self.join(self.guardspath), i + 1, bad))
637 else:
637 else:
638 self.activeguards.append(guard)
638 self.activeguards.append(guard)
639 return self.activeguards
639 return self.activeguards
640
640
641 def setguards(self, idx, guards):
641 def setguards(self, idx, guards):
642 for g in guards:
642 for g in guards:
643 if len(g) < 2:
643 if len(g) < 2:
644 raise error.Abort(_('guard %r too short') % g)
644 raise error.Abort(_('guard %r too short') % g)
645 if g[0] not in '-+':
645 if g[0] not in '-+':
646 raise error.Abort(_('guard %r starts with invalid char') % g)
646 raise error.Abort(_('guard %r starts with invalid char') % g)
647 bad = self.checkguard(g[1:])
647 bad = self.checkguard(g[1:])
648 if bad:
648 if bad:
649 raise error.Abort(bad)
649 raise error.Abort(bad)
650 drop = self.guard_re.sub('', self.fullseries[idx])
650 drop = self.guard_re.sub('', self.fullseries[idx])
651 self.fullseries[idx] = drop + ''.join([' #' + g for g in guards])
651 self.fullseries[idx] = drop + ''.join([' #' + g for g in guards])
652 self.parseseries()
652 self.parseseries()
653 self.seriesdirty = True
653 self.seriesdirty = True
654
654
655 def pushable(self, idx):
655 def pushable(self, idx):
656 if isinstance(idx, bytes):
656 if isinstance(idx, bytes):
657 idx = self.series.index(idx)
657 idx = self.series.index(idx)
658 patchguards = self.seriesguards[idx]
658 patchguards = self.seriesguards[idx]
659 if not patchguards:
659 if not patchguards:
660 return True, None
660 return True, None
661 guards = self.active()
661 guards = self.active()
662 exactneg = [g for g in patchguards
662 exactneg = [g for g in patchguards
663 if g.startswith('-') and g[1:] in guards]
663 if g.startswith('-') and g[1:] in guards]
664 if exactneg:
664 if exactneg:
665 return False, stringutil.pprint(exactneg[0])
665 return False, stringutil.pprint(exactneg[0])
666 pos = [g for g in patchguards if g.startswith('+')]
666 pos = [g for g in patchguards if g.startswith('+')]
667 exactpos = [g for g in pos if g[1:] in guards]
667 exactpos = [g for g in pos if g[1:] in guards]
668 if pos:
668 if pos:
669 if exactpos:
669 if exactpos:
670 return True, stringutil.pprint(exactpos[0])
670 return True, stringutil.pprint(exactpos[0])
671 return False, ' '.join([stringutil.pprint(p) for p in pos])
671 return False, ' '.join([stringutil.pprint(p) for p in pos])
672 return True, ''
672 return True, ''
673
673
674 def explainpushable(self, idx, all_patches=False):
674 def explainpushable(self, idx, all_patches=False):
675 if all_patches:
675 if all_patches:
676 write = self.ui.write
676 write = self.ui.write
677 else:
677 else:
678 write = self.ui.warn
678 write = self.ui.warn
679
679
680 if all_patches or self.ui.verbose:
680 if all_patches or self.ui.verbose:
681 if isinstance(idx, bytes):
681 if isinstance(idx, bytes):
682 idx = self.series.index(idx)
682 idx = self.series.index(idx)
683 pushable, why = self.pushable(idx)
683 pushable, why = self.pushable(idx)
684 if all_patches and pushable:
684 if all_patches and pushable:
685 if why is None:
685 if why is None:
686 write(_('allowing %s - no guards in effect\n') %
686 write(_('allowing %s - no guards in effect\n') %
687 self.series[idx])
687 self.series[idx])
688 else:
688 else:
689 if not why:
689 if not why:
690 write(_('allowing %s - no matching negative guards\n') %
690 write(_('allowing %s - no matching negative guards\n') %
691 self.series[idx])
691 self.series[idx])
692 else:
692 else:
693 write(_('allowing %s - guarded by %s\n') %
693 write(_('allowing %s - guarded by %s\n') %
694 (self.series[idx], why))
694 (self.series[idx], why))
695 if not pushable:
695 if not pushable:
696 if why:
696 if why:
697 write(_('skipping %s - guarded by %s\n') %
697 write(_('skipping %s - guarded by %s\n') %
698 (self.series[idx], why))
698 (self.series[idx], why))
699 else:
699 else:
700 write(_('skipping %s - no matching guards\n') %
700 write(_('skipping %s - no matching guards\n') %
701 self.series[idx])
701 self.series[idx])
702
702
703 def savedirty(self):
703 def savedirty(self):
704 def writelist(items, path):
704 def writelist(items, path):
705 fp = self.opener(path, 'wb')
705 fp = self.opener(path, 'wb')
706 for i in items:
706 for i in items:
707 fp.write("%s\n" % i)
707 fp.write("%s\n" % i)
708 fp.close()
708 fp.close()
709 if self.applieddirty:
709 if self.applieddirty:
710 writelist(map(bytes, self.applied), self.statuspath)
710 writelist(map(bytes, self.applied), self.statuspath)
711 self.applieddirty = False
711 self.applieddirty = False
712 if self.seriesdirty:
712 if self.seriesdirty:
713 writelist(self.fullseries, self.seriespath)
713 writelist(self.fullseries, self.seriespath)
714 self.seriesdirty = False
714 self.seriesdirty = False
715 if self.guardsdirty:
715 if self.guardsdirty:
716 writelist(self.activeguards, self.guardspath)
716 writelist(self.activeguards, self.guardspath)
717 self.guardsdirty = False
717 self.guardsdirty = False
718 if self.added:
718 if self.added:
719 qrepo = self.qrepo()
719 qrepo = self.qrepo()
720 if qrepo:
720 if qrepo:
721 qrepo[None].add(f for f in self.added if f not in qrepo[None])
721 qrepo[None].add(f for f in self.added if f not in qrepo[None])
722 self.added = []
722 self.added = []
723
723
724 def removeundo(self, repo):
724 def removeundo(self, repo):
725 undo = repo.sjoin('undo')
725 undo = repo.sjoin('undo')
726 if not os.path.exists(undo):
726 if not os.path.exists(undo):
727 return
727 return
728 try:
728 try:
729 os.unlink(undo)
729 os.unlink(undo)
730 except OSError as inst:
730 except OSError as inst:
731 self.ui.warn(_('error removing undo: %s\n') %
731 self.ui.warn(_('error removing undo: %s\n') %
732 stringutil.forcebytestr(inst))
732 stringutil.forcebytestr(inst))
733
733
734 def backup(self, repo, files, copy=False):
734 def backup(self, repo, files, copy=False):
735 # backup local changes in --force case
735 # backup local changes in --force case
736 for f in sorted(files):
736 for f in sorted(files):
737 absf = repo.wjoin(f)
737 absf = repo.wjoin(f)
738 if os.path.lexists(absf):
738 if os.path.lexists(absf):
739 self.ui.note(_('saving current version of %s as %s\n') %
739 self.ui.note(_('saving current version of %s as %s\n') %
740 (f, scmutil.origpath(self.ui, repo, f)))
740 (f, scmutil.origpath(self.ui, repo, f)))
741
741
742 absorig = scmutil.origpath(self.ui, repo, absf)
742 absorig = scmutil.origpath(self.ui, repo, absf)
743 if copy:
743 if copy:
744 util.copyfile(absf, absorig)
744 util.copyfile(absf, absorig)
745 else:
745 else:
746 util.rename(absf, absorig)
746 util.rename(absf, absorig)
747
747
748 def printdiff(self, repo, diffopts, node1, node2=None, files=None,
748 def printdiff(self, repo, diffopts, node1, node2=None, files=None,
749 fp=None, changes=None, opts=None):
749 fp=None, changes=None, opts=None):
750 if opts is None:
750 if opts is None:
751 opts = {}
751 opts = {}
752 stat = opts.get('stat')
752 stat = opts.get('stat')
753 m = scmutil.match(repo[node1], files, opts)
753 m = scmutil.match(repo[node1], files, opts)
754 logcmdutil.diffordiffstat(self.ui, repo, diffopts, node1, node2, m,
754 logcmdutil.diffordiffstat(self.ui, repo, diffopts, node1, node2, m,
755 changes, stat, fp)
755 changes, stat, fp)
756
756
757 def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
757 def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
758 # first try just applying the patch
758 # first try just applying the patch
759 (err, n) = self.apply(repo, [patch], update_status=False,
759 (err, n) = self.apply(repo, [patch], update_status=False,
760 strict=True, merge=rev)
760 strict=True, merge=rev)
761
761
762 if err == 0:
762 if err == 0:
763 return (err, n)
763 return (err, n)
764
764
765 if n is None:
765 if n is None:
766 raise error.Abort(_("apply failed for patch %s") % patch)
766 raise error.Abort(_("apply failed for patch %s") % patch)
767
767
768 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
768 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
769
769
770 # apply failed, strip away that rev and merge.
770 # apply failed, strip away that rev and merge.
771 hg.clean(repo, head)
771 hg.clean(repo, head)
772 strip(self.ui, repo, [n], update=False, backup=False)
772 strip(self.ui, repo, [n], update=False, backup=False)
773
773
774 ctx = repo[rev]
774 ctx = repo[rev]
775 ret = hg.merge(repo, rev)
775 ret = hg.merge(repo, rev)
776 if ret:
776 if ret:
777 raise error.Abort(_("update returned %d") % ret)
777 raise error.Abort(_("update returned %d") % ret)
778 n = newcommit(repo, None, ctx.description(), ctx.user(), force=True)
778 n = newcommit(repo, None, ctx.description(), ctx.user(), force=True)
779 if n is None:
779 if n is None:
780 raise error.Abort(_("repo commit failed"))
780 raise error.Abort(_("repo commit failed"))
781 try:
781 try:
782 ph = patchheader(mergeq.join(patch), self.plainmode)
782 ph = patchheader(mergeq.join(patch), self.plainmode)
783 except Exception:
783 except Exception:
784 raise error.Abort(_("unable to read %s") % patch)
784 raise error.Abort(_("unable to read %s") % patch)
785
785
786 diffopts = self.patchopts(diffopts, patch)
786 diffopts = self.patchopts(diffopts, patch)
787 patchf = self.opener(patch, "w")
787 patchf = self.opener(patch, "w")
788 comments = bytes(ph)
788 comments = bytes(ph)
789 if comments:
789 if comments:
790 patchf.write(comments)
790 patchf.write(comments)
791 self.printdiff(repo, diffopts, head, n, fp=patchf)
791 self.printdiff(repo, diffopts, head, n, fp=patchf)
792 patchf.close()
792 patchf.close()
793 self.removeundo(repo)
793 self.removeundo(repo)
794 return (0, n)
794 return (0, n)
795
795
796 def qparents(self, repo, rev=None):
796 def qparents(self, repo, rev=None):
797 """return the mq handled parent or p1
797 """return the mq handled parent or p1
798
798
799 In some case where mq get himself in being the parent of a merge the
799 In some case where mq get himself in being the parent of a merge the
800 appropriate parent may be p2.
800 appropriate parent may be p2.
801 (eg: an in progress merge started with mq disabled)
801 (eg: an in progress merge started with mq disabled)
802
802
803 If no parent are managed by mq, p1 is returned.
803 If no parent are managed by mq, p1 is returned.
804 """
804 """
805 if rev is None:
805 if rev is None:
806 (p1, p2) = repo.dirstate.parents()
806 (p1, p2) = repo.dirstate.parents()
807 if p2 == nullid:
807 if p2 == nullid:
808 return p1
808 return p1
809 if not self.applied:
809 if not self.applied:
810 return None
810 return None
811 return self.applied[-1].node
811 return self.applied[-1].node
812 p1, p2 = repo.changelog.parents(rev)
812 p1, p2 = repo.changelog.parents(rev)
813 if p2 != nullid and p2 in [x.node for x in self.applied]:
813 if p2 != nullid and p2 in [x.node for x in self.applied]:
814 return p2
814 return p2
815 return p1
815 return p1
816
816
817 def mergepatch(self, repo, mergeq, series, diffopts):
817 def mergepatch(self, repo, mergeq, series, diffopts):
818 if not self.applied:
818 if not self.applied:
819 # each of the patches merged in will have two parents. This
819 # each of the patches merged in will have two parents. This
820 # can confuse the qrefresh, qdiff, and strip code because it
820 # can confuse the qrefresh, qdiff, and strip code because it
821 # needs to know which parent is actually in the patch queue.
821 # needs to know which parent is actually in the patch queue.
822 # so, we insert a merge marker with only one parent. This way
822 # so, we insert a merge marker with only one parent. This way
823 # the first patch in the queue is never a merge patch
823 # the first patch in the queue is never a merge patch
824 #
824 #
825 pname = ".hg.patches.merge.marker"
825 pname = ".hg.patches.merge.marker"
826 n = newcommit(repo, None, '[mq]: merge marker', force=True)
826 n = newcommit(repo, None, '[mq]: merge marker', force=True)
827 self.removeundo(repo)
827 self.removeundo(repo)
828 self.applied.append(statusentry(n, pname))
828 self.applied.append(statusentry(n, pname))
829 self.applieddirty = True
829 self.applieddirty = True
830
830
831 head = self.qparents(repo)
831 head = self.qparents(repo)
832
832
833 for patch in series:
833 for patch in series:
834 patch = mergeq.lookup(patch, strict=True)
834 patch = mergeq.lookup(patch, strict=True)
835 if not patch:
835 if not patch:
836 self.ui.warn(_("patch %s does not exist\n") % patch)
836 self.ui.warn(_("patch %s does not exist\n") % patch)
837 return (1, None)
837 return (1, None)
838 pushable, reason = self.pushable(patch)
838 pushable, reason = self.pushable(patch)
839 if not pushable:
839 if not pushable:
840 self.explainpushable(patch, all_patches=True)
840 self.explainpushable(patch, all_patches=True)
841 continue
841 continue
842 info = mergeq.isapplied(patch)
842 info = mergeq.isapplied(patch)
843 if not info:
843 if not info:
844 self.ui.warn(_("patch %s is not applied\n") % patch)
844 self.ui.warn(_("patch %s is not applied\n") % patch)
845 return (1, None)
845 return (1, None)
846 rev = info[1]
846 rev = info[1]
847 err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
847 err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
848 if head:
848 if head:
849 self.applied.append(statusentry(head, patch))
849 self.applied.append(statusentry(head, patch))
850 self.applieddirty = True
850 self.applieddirty = True
851 if err:
851 if err:
852 return (err, head)
852 return (err, head)
853 self.savedirty()
853 self.savedirty()
854 return (0, head)
854 return (0, head)
855
855
856 def patch(self, repo, patchfile):
856 def patch(self, repo, patchfile):
857 '''Apply patchfile to the working directory.
857 '''Apply patchfile to the working directory.
858 patchfile: name of patch file'''
858 patchfile: name of patch file'''
859 files = set()
859 files = set()
860 try:
860 try:
861 fuzz = patchmod.patch(self.ui, repo, patchfile, strip=1,
861 fuzz = patchmod.patch(self.ui, repo, patchfile, strip=1,
862 files=files, eolmode=None)
862 files=files, eolmode=None)
863 return (True, list(files), fuzz)
863 return (True, list(files), fuzz)
864 except Exception as inst:
864 except Exception as inst:
865 self.ui.note(stringutil.forcebytestr(inst) + '\n')
865 self.ui.note(stringutil.forcebytestr(inst) + '\n')
866 if not self.ui.verbose:
866 if not self.ui.verbose:
867 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
867 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
868 self.ui.traceback()
868 self.ui.traceback()
869 return (False, list(files), False)
869 return (False, list(files), False)
870
870
871 def apply(self, repo, series, list=False, update_status=True,
871 def apply(self, repo, series, list=False, update_status=True,
872 strict=False, patchdir=None, merge=None, all_files=None,
872 strict=False, patchdir=None, merge=None, all_files=None,
873 tobackup=None, keepchanges=False):
873 tobackup=None, keepchanges=False):
874 wlock = lock = tr = None
874 wlock = lock = tr = None
875 try:
875 try:
876 wlock = repo.wlock()
876 wlock = repo.wlock()
877 lock = repo.lock()
877 lock = repo.lock()
878 tr = repo.transaction("qpush")
878 tr = repo.transaction("qpush")
879 try:
879 try:
880 ret = self._apply(repo, series, list, update_status,
880 ret = self._apply(repo, series, list, update_status,
881 strict, patchdir, merge, all_files=all_files,
881 strict, patchdir, merge, all_files=all_files,
882 tobackup=tobackup, keepchanges=keepchanges)
882 tobackup=tobackup, keepchanges=keepchanges)
883 tr.close()
883 tr.close()
884 self.savedirty()
884 self.savedirty()
885 return ret
885 return ret
886 except AbortNoCleanup:
886 except AbortNoCleanup:
887 tr.close()
887 tr.close()
888 self.savedirty()
888 self.savedirty()
889 raise
889 raise
890 except: # re-raises
890 except: # re-raises
891 try:
891 try:
892 tr.abort()
892 tr.abort()
893 finally:
893 finally:
894 self.invalidate()
894 self.invalidate()
895 raise
895 raise
896 finally:
896 finally:
897 release(tr, lock, wlock)
897 release(tr, lock, wlock)
898 self.removeundo(repo)
898 self.removeundo(repo)
899
899
900 def _apply(self, repo, series, list=False, update_status=True,
900 def _apply(self, repo, series, list=False, update_status=True,
901 strict=False, patchdir=None, merge=None, all_files=None,
901 strict=False, patchdir=None, merge=None, all_files=None,
902 tobackup=None, keepchanges=False):
902 tobackup=None, keepchanges=False):
903 """returns (error, hash)
903 """returns (error, hash)
904
904
905 error = 1 for unable to read, 2 for patch failed, 3 for patch
905 error = 1 for unable to read, 2 for patch failed, 3 for patch
906 fuzz. tobackup is None or a set of files to backup before they
906 fuzz. tobackup is None or a set of files to backup before they
907 are modified by a patch.
907 are modified by a patch.
908 """
908 """
909 # TODO unify with commands.py
909 # TODO unify with commands.py
910 if not patchdir:
910 if not patchdir:
911 patchdir = self.path
911 patchdir = self.path
912 err = 0
912 err = 0
913 n = None
913 n = None
914 for patchname in series:
914 for patchname in series:
915 pushable, reason = self.pushable(patchname)
915 pushable, reason = self.pushable(patchname)
916 if not pushable:
916 if not pushable:
917 self.explainpushable(patchname, all_patches=True)
917 self.explainpushable(patchname, all_patches=True)
918 continue
918 continue
919 self.ui.status(_("applying %s\n") % patchname)
919 self.ui.status(_("applying %s\n") % patchname)
920 pf = os.path.join(patchdir, patchname)
920 pf = os.path.join(patchdir, patchname)
921
921
922 try:
922 try:
923 ph = patchheader(self.join(patchname), self.plainmode)
923 ph = patchheader(self.join(patchname), self.plainmode)
924 except IOError:
924 except IOError:
925 self.ui.warn(_("unable to read %s\n") % patchname)
925 self.ui.warn(_("unable to read %s\n") % patchname)
926 err = 1
926 err = 1
927 break
927 break
928
928
929 message = ph.message
929 message = ph.message
930 if not message:
930 if not message:
931 # The commit message should not be translated
931 # The commit message should not be translated
932 message = "imported patch %s\n" % patchname
932 message = "imported patch %s\n" % patchname
933 else:
933 else:
934 if list:
934 if list:
935 # The commit message should not be translated
935 # The commit message should not be translated
936 message.append("\nimported patch %s" % patchname)
936 message.append("\nimported patch %s" % patchname)
937 message = '\n'.join(message)
937 message = '\n'.join(message)
938
938
939 if ph.haspatch:
939 if ph.haspatch:
940 if tobackup:
940 if tobackup:
941 touched = patchmod.changedfiles(self.ui, repo, pf)
941 touched = patchmod.changedfiles(self.ui, repo, pf)
942 touched = set(touched) & tobackup
942 touched = set(touched) & tobackup
943 if touched and keepchanges:
943 if touched and keepchanges:
944 raise AbortNoCleanup(
944 raise AbortNoCleanup(
945 _("conflicting local changes found"),
945 _("conflicting local changes found"),
946 hint=_("did you forget to qrefresh?"))
946 hint=_("did you forget to qrefresh?"))
947 self.backup(repo, touched, copy=True)
947 self.backup(repo, touched, copy=True)
948 tobackup = tobackup - touched
948 tobackup = tobackup - touched
949 (patcherr, files, fuzz) = self.patch(repo, pf)
949 (patcherr, files, fuzz) = self.patch(repo, pf)
950 if all_files is not None:
950 if all_files is not None:
951 all_files.update(files)
951 all_files.update(files)
952 patcherr = not patcherr
952 patcherr = not patcherr
953 else:
953 else:
954 self.ui.warn(_("patch %s is empty\n") % patchname)
954 self.ui.warn(_("patch %s is empty\n") % patchname)
955 patcherr, files, fuzz = 0, [], 0
955 patcherr, files, fuzz = 0, [], 0
956
956
957 if merge and files:
957 if merge and files:
958 # Mark as removed/merged and update dirstate parent info
958 # Mark as removed/merged and update dirstate parent info
959 removed = []
959 removed = []
960 merged = []
960 merged = []
961 for f in files:
961 for f in files:
962 if os.path.lexists(repo.wjoin(f)):
962 if os.path.lexists(repo.wjoin(f)):
963 merged.append(f)
963 merged.append(f)
964 else:
964 else:
965 removed.append(f)
965 removed.append(f)
966 with repo.dirstate.parentchange():
966 with repo.dirstate.parentchange():
967 for f in removed:
967 for f in removed:
968 repo.dirstate.remove(f)
968 repo.dirstate.remove(f)
969 for f in merged:
969 for f in merged:
970 repo.dirstate.merge(f)
970 repo.dirstate.merge(f)
971 p1, p2 = repo.dirstate.parents()
971 p1, p2 = repo.dirstate.parents()
972 repo.setparents(p1, merge)
972 repo.setparents(p1, merge)
973
973
974 if all_files and '.hgsubstate' in all_files:
974 if all_files and '.hgsubstate' in all_files:
975 wctx = repo[None]
975 wctx = repo[None]
976 pctx = repo['.']
976 pctx = repo['.']
977 overwrite = False
977 overwrite = False
978 mergedsubstate = subrepoutil.submerge(repo, pctx, wctx, wctx,
978 mergedsubstate = subrepoutil.submerge(repo, pctx, wctx, wctx,
979 overwrite)
979 overwrite)
980 files += mergedsubstate.keys()
980 files += mergedsubstate.keys()
981
981
982 match = scmutil.matchfiles(repo, files or [])
982 match = scmutil.matchfiles(repo, files or [])
983 oldtip = repo.changelog.tip()
983 oldtip = repo.changelog.tip()
984 n = newcommit(repo, None, message, ph.user, ph.date, match=match,
984 n = newcommit(repo, None, message, ph.user, ph.date, match=match,
985 force=True)
985 force=True)
986 if repo.changelog.tip() == oldtip:
986 if repo.changelog.tip() == oldtip:
987 raise error.Abort(_("qpush exactly duplicates child changeset"))
987 raise error.Abort(_("qpush exactly duplicates child changeset"))
988 if n is None:
988 if n is None:
989 raise error.Abort(_("repository commit failed"))
989 raise error.Abort(_("repository commit failed"))
990
990
991 if update_status:
991 if update_status:
992 self.applied.append(statusentry(n, patchname))
992 self.applied.append(statusentry(n, patchname))
993
993
994 if patcherr:
994 if patcherr:
995 self.ui.warn(_("patch failed, rejects left in working "
995 self.ui.warn(_("patch failed, rejects left in working "
996 "directory\n"))
996 "directory\n"))
997 err = 2
997 err = 2
998 break
998 break
999
999
1000 if fuzz and strict:
1000 if fuzz and strict:
1001 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
1001 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
1002 err = 3
1002 err = 3
1003 break
1003 break
1004 return (err, n)
1004 return (err, n)
1005
1005
1006 def _cleanup(self, patches, numrevs, keep=False):
1006 def _cleanup(self, patches, numrevs, keep=False):
1007 if not keep:
1007 if not keep:
1008 r = self.qrepo()
1008 r = self.qrepo()
1009 if r:
1009 if r:
1010 r[None].forget(patches)
1010 r[None].forget(patches)
1011 for p in patches:
1011 for p in patches:
1012 try:
1012 try:
1013 os.unlink(self.join(p))
1013 os.unlink(self.join(p))
1014 except OSError as inst:
1014 except OSError as inst:
1015 if inst.errno != errno.ENOENT:
1015 if inst.errno != errno.ENOENT:
1016 raise
1016 raise
1017
1017
1018 qfinished = []
1018 qfinished = []
1019 if numrevs:
1019 if numrevs:
1020 qfinished = self.applied[:numrevs]
1020 qfinished = self.applied[:numrevs]
1021 del self.applied[:numrevs]
1021 del self.applied[:numrevs]
1022 self.applieddirty = True
1022 self.applieddirty = True
1023
1023
1024 unknown = []
1024 unknown = []
1025
1025
1026 sortedseries = []
1026 sortedseries = []
1027 for p in patches:
1027 for p in patches:
1028 idx = self.findseries(p)
1028 idx = self.findseries(p)
1029 if idx is None:
1029 if idx is None:
1030 sortedseries.append((-1, p))
1030 sortedseries.append((-1, p))
1031 else:
1031 else:
1032 sortedseries.append((idx, p))
1032 sortedseries.append((idx, p))
1033
1033
1034 sortedseries.sort(reverse=True)
1034 sortedseries.sort(reverse=True)
1035 for (i, p) in sortedseries:
1035 for (i, p) in sortedseries:
1036 if i != -1:
1036 if i != -1:
1037 del self.fullseries[i]
1037 del self.fullseries[i]
1038 else:
1038 else:
1039 unknown.append(p)
1039 unknown.append(p)
1040
1040
1041 if unknown:
1041 if unknown:
1042 if numrevs:
1042 if numrevs:
1043 rev = dict((entry.name, entry.node) for entry in qfinished)
1043 rev = dict((entry.name, entry.node) for entry in qfinished)
1044 for p in unknown:
1044 for p in unknown:
1045 msg = _('revision %s refers to unknown patches: %s\n')
1045 msg = _('revision %s refers to unknown patches: %s\n')
1046 self.ui.warn(msg % (short(rev[p]), p))
1046 self.ui.warn(msg % (short(rev[p]), p))
1047 else:
1047 else:
1048 msg = _('unknown patches: %s\n')
1048 msg = _('unknown patches: %s\n')
1049 raise error.Abort(''.join(msg % p for p in unknown))
1049 raise error.Abort(''.join(msg % p for p in unknown))
1050
1050
1051 self.parseseries()
1051 self.parseseries()
1052 self.seriesdirty = True
1052 self.seriesdirty = True
1053 return [entry.node for entry in qfinished]
1053 return [entry.node for entry in qfinished]
1054
1054
1055 def _revpatches(self, repo, revs):
1055 def _revpatches(self, repo, revs):
1056 firstrev = repo[self.applied[0].node].rev()
1056 firstrev = repo[self.applied[0].node].rev()
1057 patches = []
1057 patches = []
1058 for i, rev in enumerate(revs):
1058 for i, rev in enumerate(revs):
1059
1059
1060 if rev < firstrev:
1060 if rev < firstrev:
1061 raise error.Abort(_('revision %d is not managed') % rev)
1061 raise error.Abort(_('revision %d is not managed') % rev)
1062
1062
1063 ctx = repo[rev]
1063 ctx = repo[rev]
1064 base = self.applied[i].node
1064 base = self.applied[i].node
1065 if ctx.node() != base:
1065 if ctx.node() != base:
1066 msg = _('cannot delete revision %d above applied patches')
1066 msg = _('cannot delete revision %d above applied patches')
1067 raise error.Abort(msg % rev)
1067 raise error.Abort(msg % rev)
1068
1068
1069 patch = self.applied[i].name
1069 patch = self.applied[i].name
1070 for fmt in ('[mq]: %s', 'imported patch %s'):
1070 for fmt in ('[mq]: %s', 'imported patch %s'):
1071 if ctx.description() == fmt % patch:
1071 if ctx.description() == fmt % patch:
1072 msg = _('patch %s finalized without changeset message\n')
1072 msg = _('patch %s finalized without changeset message\n')
1073 repo.ui.status(msg % patch)
1073 repo.ui.status(msg % patch)
1074 break
1074 break
1075
1075
1076 patches.append(patch)
1076 patches.append(patch)
1077 return patches
1077 return patches
1078
1078
1079 def finish(self, repo, revs):
1079 def finish(self, repo, revs):
1080 # Manually trigger phase computation to ensure phasedefaults is
1080 # Manually trigger phase computation to ensure phasedefaults is
1081 # executed before we remove the patches.
1081 # executed before we remove the patches.
1082 repo._phasecache
1082 repo._phasecache
1083 patches = self._revpatches(repo, sorted(revs))
1083 patches = self._revpatches(repo, sorted(revs))
1084 qfinished = self._cleanup(patches, len(patches))
1084 qfinished = self._cleanup(patches, len(patches))
1085 if qfinished and repo.ui.configbool('mq', 'secret'):
1085 if qfinished and repo.ui.configbool('mq', 'secret'):
1086 # only use this logic when the secret option is added
1086 # only use this logic when the secret option is added
1087 oldqbase = repo[qfinished[0]]
1087 oldqbase = repo[qfinished[0]]
1088 tphase = phases.newcommitphase(repo.ui)
1088 tphase = phases.newcommitphase(repo.ui)
1089 if oldqbase.phase() > tphase and oldqbase.p1().phase() <= tphase:
1089 if oldqbase.phase() > tphase and oldqbase.p1().phase() <= tphase:
1090 with repo.transaction('qfinish') as tr:
1090 with repo.transaction('qfinish') as tr:
1091 phases.advanceboundary(repo, tr, tphase, qfinished)
1091 phases.advanceboundary(repo, tr, tphase, qfinished)
1092
1092
1093 def delete(self, repo, patches, opts):
1093 def delete(self, repo, patches, opts):
1094 if not patches and not opts.get('rev'):
1094 if not patches and not opts.get('rev'):
1095 raise error.Abort(_('qdelete requires at least one revision or '
1095 raise error.Abort(_('qdelete requires at least one revision or '
1096 'patch name'))
1096 'patch name'))
1097
1097
1098 realpatches = []
1098 realpatches = []
1099 for patch in patches:
1099 for patch in patches:
1100 patch = self.lookup(patch, strict=True)
1100 patch = self.lookup(patch, strict=True)
1101 info = self.isapplied(patch)
1101 info = self.isapplied(patch)
1102 if info:
1102 if info:
1103 raise error.Abort(_("cannot delete applied patch %s") % patch)
1103 raise error.Abort(_("cannot delete applied patch %s") % patch)
1104 if patch not in self.series:
1104 if patch not in self.series:
1105 raise error.Abort(_("patch %s not in series file") % patch)
1105 raise error.Abort(_("patch %s not in series file") % patch)
1106 if patch not in realpatches:
1106 if patch not in realpatches:
1107 realpatches.append(patch)
1107 realpatches.append(patch)
1108
1108
1109 numrevs = 0
1109 numrevs = 0
1110 if opts.get('rev'):
1110 if opts.get('rev'):
1111 if not self.applied:
1111 if not self.applied:
1112 raise error.Abort(_('no patches applied'))
1112 raise error.Abort(_('no patches applied'))
1113 revs = scmutil.revrange(repo, opts.get('rev'))
1113 revs = scmutil.revrange(repo, opts.get('rev'))
1114 revs.sort()
1114 revs.sort()
1115 revpatches = self._revpatches(repo, revs)
1115 revpatches = self._revpatches(repo, revs)
1116 realpatches += revpatches
1116 realpatches += revpatches
1117 numrevs = len(revpatches)
1117 numrevs = len(revpatches)
1118
1118
1119 self._cleanup(realpatches, numrevs, opts.get('keep'))
1119 self._cleanup(realpatches, numrevs, opts.get('keep'))
1120
1120
1121 def checktoppatch(self, repo):
1121 def checktoppatch(self, repo):
1122 '''check that working directory is at qtip'''
1122 '''check that working directory is at qtip'''
1123 if self.applied:
1123 if self.applied:
1124 top = self.applied[-1].node
1124 top = self.applied[-1].node
1125 patch = self.applied[-1].name
1125 patch = self.applied[-1].name
1126 if repo.dirstate.p1() != top:
1126 if repo.dirstate.p1() != top:
1127 raise error.Abort(_("working directory revision is not qtip"))
1127 raise error.Abort(_("working directory revision is not qtip"))
1128 return top, patch
1128 return top, patch
1129 return None, None
1129 return None, None
1130
1130
1131 def putsubstate2changes(self, substatestate, changes):
1131 def putsubstate2changes(self, substatestate, changes):
1132 for files in changes[:3]:
1132 for files in changes[:3]:
1133 if '.hgsubstate' in files:
1133 if '.hgsubstate' in files:
1134 return # already listed up
1134 return # already listed up
1135 # not yet listed up
1135 # not yet listed up
1136 if substatestate in 'a?':
1136 if substatestate in 'a?':
1137 changes[1].append('.hgsubstate')
1137 changes[1].append('.hgsubstate')
1138 elif substatestate in 'r':
1138 elif substatestate in 'r':
1139 changes[2].append('.hgsubstate')
1139 changes[2].append('.hgsubstate')
1140 else: # modified
1140 else: # modified
1141 changes[0].append('.hgsubstate')
1141 changes[0].append('.hgsubstate')
1142
1142
1143 def checklocalchanges(self, repo, force=False, refresh=True):
1143 def checklocalchanges(self, repo, force=False, refresh=True):
1144 excsuffix = ''
1144 excsuffix = ''
1145 if refresh:
1145 if refresh:
1146 excsuffix = ', qrefresh first'
1146 excsuffix = ', qrefresh first'
1147 # plain versions for i18n tool to detect them
1147 # plain versions for i18n tool to detect them
1148 _("local changes found, qrefresh first")
1148 _("local changes found, qrefresh first")
1149 _("local changed subrepos found, qrefresh first")
1149 _("local changed subrepos found, qrefresh first")
1150 return checklocalchanges(repo, force, excsuffix)
1150 return checklocalchanges(repo, force, excsuffix)
1151
1151
1152 _reserved = ('series', 'status', 'guards', '.', '..')
1152 _reserved = ('series', 'status', 'guards', '.', '..')
1153 def checkreservedname(self, name):
1153 def checkreservedname(self, name):
1154 if name in self._reserved:
1154 if name in self._reserved:
1155 raise error.Abort(_('"%s" cannot be used as the name of a patch')
1155 raise error.Abort(_('"%s" cannot be used as the name of a patch')
1156 % name)
1156 % name)
1157 if name != name.strip():
1157 if name != name.strip():
1158 # whitespace is stripped by parseseries()
1158 # whitespace is stripped by parseseries()
1159 raise error.Abort(_('patch name cannot begin or end with '
1159 raise error.Abort(_('patch name cannot begin or end with '
1160 'whitespace'))
1160 'whitespace'))
1161 for prefix in ('.hg', '.mq'):
1161 for prefix in ('.hg', '.mq'):
1162 if name.startswith(prefix):
1162 if name.startswith(prefix):
1163 raise error.Abort(_('patch name cannot begin with "%s"')
1163 raise error.Abort(_('patch name cannot begin with "%s"')
1164 % prefix)
1164 % prefix)
1165 for c in ('#', ':', '\r', '\n'):
1165 for c in ('#', ':', '\r', '\n'):
1166 if c in name:
1166 if c in name:
1167 raise error.Abort(_('%r cannot be used in the name of a patch')
1167 raise error.Abort(_('%r cannot be used in the name of a patch')
1168 % pycompat.bytestr(c))
1168 % pycompat.bytestr(c))
1169
1169
1170 def checkpatchname(self, name, force=False):
1170 def checkpatchname(self, name, force=False):
1171 self.checkreservedname(name)
1171 self.checkreservedname(name)
1172 if not force and os.path.exists(self.join(name)):
1172 if not force and os.path.exists(self.join(name)):
1173 if os.path.isdir(self.join(name)):
1173 if os.path.isdir(self.join(name)):
1174 raise error.Abort(_('"%s" already exists as a directory')
1174 raise error.Abort(_('"%s" already exists as a directory')
1175 % name)
1175 % name)
1176 else:
1176 else:
1177 raise error.Abort(_('patch "%s" already exists') % name)
1177 raise error.Abort(_('patch "%s" already exists') % name)
1178
1178
1179 def makepatchname(self, title, fallbackname):
1179 def makepatchname(self, title, fallbackname):
1180 """Return a suitable filename for title, adding a suffix to make
1180 """Return a suitable filename for title, adding a suffix to make
1181 it unique in the existing list"""
1181 it unique in the existing list"""
1182 namebase = re.sub('[\s\W_]+', '_', title.lower()).strip('_')
1182 namebase = re.sub('[\s\W_]+', '_', title.lower()).strip('_')
1183 namebase = namebase[:75] # avoid too long name (issue5117)
1183 namebase = namebase[:75] # avoid too long name (issue5117)
1184 if namebase:
1184 if namebase:
1185 try:
1185 try:
1186 self.checkreservedname(namebase)
1186 self.checkreservedname(namebase)
1187 except error.Abort:
1187 except error.Abort:
1188 namebase = fallbackname
1188 namebase = fallbackname
1189 else:
1189 else:
1190 namebase = fallbackname
1190 namebase = fallbackname
1191 name = namebase
1191 name = namebase
1192 i = 0
1192 i = 0
1193 while True:
1193 while True:
1194 if name not in self.fullseries:
1194 if name not in self.fullseries:
1195 try:
1195 try:
1196 self.checkpatchname(name)
1196 self.checkpatchname(name)
1197 break
1197 break
1198 except error.Abort:
1198 except error.Abort:
1199 pass
1199 pass
1200 i += 1
1200 i += 1
1201 name = '%s__%d' % (namebase, i)
1201 name = '%s__%d' % (namebase, i)
1202 return name
1202 return name
1203
1203
1204 def checkkeepchanges(self, keepchanges, force):
1204 def checkkeepchanges(self, keepchanges, force):
1205 if force and keepchanges:
1205 if force and keepchanges:
1206 raise error.Abort(_('cannot use both --force and --keep-changes'))
1206 raise error.Abort(_('cannot use both --force and --keep-changes'))
1207
1207
1208 def new(self, repo, patchfn, *pats, **opts):
1208 def new(self, repo, patchfn, *pats, **opts):
1209 """options:
1209 """options:
1210 msg: a string or a no-argument function returning a string
1210 msg: a string or a no-argument function returning a string
1211 """
1211 """
1212 opts = pycompat.byteskwargs(opts)
1212 opts = pycompat.byteskwargs(opts)
1213 msg = opts.get('msg')
1213 msg = opts.get('msg')
1214 edit = opts.get('edit')
1214 edit = opts.get('edit')
1215 editform = opts.get('editform', 'mq.qnew')
1215 editform = opts.get('editform', 'mq.qnew')
1216 user = opts.get('user')
1216 user = opts.get('user')
1217 date = opts.get('date')
1217 date = opts.get('date')
1218 if date:
1218 if date:
1219 date = dateutil.parsedate(date)
1219 date = dateutil.parsedate(date)
1220 diffopts = self.diffopts({'git': opts.get('git')}, plain=True)
1220 diffopts = self.diffopts({'git': opts.get('git')}, plain=True)
1221 if opts.get('checkname', True):
1221 if opts.get('checkname', True):
1222 self.checkpatchname(patchfn)
1222 self.checkpatchname(patchfn)
1223 inclsubs = checksubstate(repo)
1223 inclsubs = checksubstate(repo)
1224 if inclsubs:
1224 if inclsubs:
1225 substatestate = repo.dirstate['.hgsubstate']
1225 substatestate = repo.dirstate['.hgsubstate']
1226 if opts.get('include') or opts.get('exclude') or pats:
1226 if opts.get('include') or opts.get('exclude') or pats:
1227 # detect missing files in pats
1227 # detect missing files in pats
1228 def badfn(f, msg):
1228 def badfn(f, msg):
1229 if f != '.hgsubstate': # .hgsubstate is auto-created
1229 if f != '.hgsubstate': # .hgsubstate is auto-created
1230 raise error.Abort('%s: %s' % (f, msg))
1230 raise error.Abort('%s: %s' % (f, msg))
1231 match = scmutil.match(repo[None], pats, opts, badfn=badfn)
1231 match = scmutil.match(repo[None], pats, opts, badfn=badfn)
1232 changes = repo.status(match=match)
1232 changes = repo.status(match=match)
1233 else:
1233 else:
1234 changes = self.checklocalchanges(repo, force=True)
1234 changes = self.checklocalchanges(repo, force=True)
1235 commitfiles = list(inclsubs)
1235 commitfiles = list(inclsubs)
1236 for files in changes[:3]:
1236 for files in changes[:3]:
1237 commitfiles.extend(files)
1237 commitfiles.extend(files)
1238 match = scmutil.matchfiles(repo, commitfiles)
1238 match = scmutil.matchfiles(repo, commitfiles)
1239 if len(repo[None].parents()) > 1:
1239 if len(repo[None].parents()) > 1:
1240 raise error.Abort(_('cannot manage merge changesets'))
1240 raise error.Abort(_('cannot manage merge changesets'))
1241 self.checktoppatch(repo)
1241 self.checktoppatch(repo)
1242 insert = self.fullseriesend()
1242 insert = self.fullseriesend()
1243 with repo.wlock():
1243 with repo.wlock():
1244 try:
1244 try:
1245 # if patch file write fails, abort early
1245 # if patch file write fails, abort early
1246 p = self.opener(patchfn, "w")
1246 p = self.opener(patchfn, "w")
1247 except IOError as e:
1247 except IOError as e:
1248 raise error.Abort(_('cannot write patch "%s": %s')
1248 raise error.Abort(_('cannot write patch "%s": %s')
1249 % (patchfn, encoding.strtolocal(e.strerror)))
1249 % (patchfn, encoding.strtolocal(e.strerror)))
1250 try:
1250 try:
1251 defaultmsg = "[mq]: %s" % patchfn
1251 defaultmsg = "[mq]: %s" % patchfn
1252 editor = cmdutil.getcommiteditor(editform=editform)
1252 editor = cmdutil.getcommiteditor(editform=editform)
1253 if edit:
1253 if edit:
1254 def finishdesc(desc):
1254 def finishdesc(desc):
1255 if desc.rstrip():
1255 if desc.rstrip():
1256 return desc
1256 return desc
1257 else:
1257 else:
1258 return defaultmsg
1258 return defaultmsg
1259 # i18n: this message is shown in editor with "HG: " prefix
1259 # i18n: this message is shown in editor with "HG: " prefix
1260 extramsg = _('Leave message empty to use default message.')
1260 extramsg = _('Leave message empty to use default message.')
1261 editor = cmdutil.getcommiteditor(finishdesc=finishdesc,
1261 editor = cmdutil.getcommiteditor(finishdesc=finishdesc,
1262 extramsg=extramsg,
1262 extramsg=extramsg,
1263 editform=editform)
1263 editform=editform)
1264 commitmsg = msg
1264 commitmsg = msg
1265 else:
1265 else:
1266 commitmsg = msg or defaultmsg
1266 commitmsg = msg or defaultmsg
1267
1267
1268 n = newcommit(repo, None, commitmsg, user, date, match=match,
1268 n = newcommit(repo, None, commitmsg, user, date, match=match,
1269 force=True, editor=editor)
1269 force=True, editor=editor)
1270 if n is None:
1270 if n is None:
1271 raise error.Abort(_("repo commit failed"))
1271 raise error.Abort(_("repo commit failed"))
1272 try:
1272 try:
1273 self.fullseries[insert:insert] = [patchfn]
1273 self.fullseries[insert:insert] = [patchfn]
1274 self.applied.append(statusentry(n, patchfn))
1274 self.applied.append(statusentry(n, patchfn))
1275 self.parseseries()
1275 self.parseseries()
1276 self.seriesdirty = True
1276 self.seriesdirty = True
1277 self.applieddirty = True
1277 self.applieddirty = True
1278 nctx = repo[n]
1278 nctx = repo[n]
1279 ph = patchheader(self.join(patchfn), self.plainmode)
1279 ph = patchheader(self.join(patchfn), self.plainmode)
1280 if user:
1280 if user:
1281 ph.setuser(user)
1281 ph.setuser(user)
1282 if date:
1282 if date:
1283 ph.setdate('%d %d' % date)
1283 ph.setdate('%d %d' % date)
1284 ph.setparent(hex(nctx.p1().node()))
1284 ph.setparent(hex(nctx.p1().node()))
1285 msg = nctx.description().strip()
1285 msg = nctx.description().strip()
1286 if msg == defaultmsg.strip():
1286 if msg == defaultmsg.strip():
1287 msg = ''
1287 msg = ''
1288 ph.setmessage(msg)
1288 ph.setmessage(msg)
1289 p.write(bytes(ph))
1289 p.write(bytes(ph))
1290 if commitfiles:
1290 if commitfiles:
1291 parent = self.qparents(repo, n)
1291 parent = self.qparents(repo, n)
1292 if inclsubs:
1292 if inclsubs:
1293 self.putsubstate2changes(substatestate, changes)
1293 self.putsubstate2changes(substatestate, changes)
1294 chunks = patchmod.diff(repo, node1=parent, node2=n,
1294 chunks = patchmod.diff(repo, node1=parent, node2=n,
1295 changes=changes, opts=diffopts)
1295 changes=changes, opts=diffopts)
1296 for chunk in chunks:
1296 for chunk in chunks:
1297 p.write(chunk)
1297 p.write(chunk)
1298 p.close()
1298 p.close()
1299 r = self.qrepo()
1299 r = self.qrepo()
1300 if r:
1300 if r:
1301 r[None].add([patchfn])
1301 r[None].add([patchfn])
1302 except: # re-raises
1302 except: # re-raises
1303 repo.rollback()
1303 repo.rollback()
1304 raise
1304 raise
1305 except Exception:
1305 except Exception:
1306 patchpath = self.join(patchfn)
1306 patchpath = self.join(patchfn)
1307 try:
1307 try:
1308 os.unlink(patchpath)
1308 os.unlink(patchpath)
1309 except OSError:
1309 except OSError:
1310 self.ui.warn(_('error unlinking %s\n') % patchpath)
1310 self.ui.warn(_('error unlinking %s\n') % patchpath)
1311 raise
1311 raise
1312 self.removeundo(repo)
1312 self.removeundo(repo)
1313
1313
1314 def isapplied(self, patch):
1314 def isapplied(self, patch):
1315 """returns (index, rev, patch)"""
1315 """returns (index, rev, patch)"""
1316 for i, a in enumerate(self.applied):
1316 for i, a in enumerate(self.applied):
1317 if a.name == patch:
1317 if a.name == patch:
1318 return (i, a.node, a.name)
1318 return (i, a.node, a.name)
1319 return None
1319 return None
1320
1320
1321 # if the exact patch name does not exist, we try a few
1321 # if the exact patch name does not exist, we try a few
1322 # variations. If strict is passed, we try only #1
1322 # variations. If strict is passed, we try only #1
1323 #
1323 #
1324 # 1) a number (as string) to indicate an offset in the series file
1324 # 1) a number (as string) to indicate an offset in the series file
1325 # 2) a unique substring of the patch name was given
1325 # 2) a unique substring of the patch name was given
1326 # 3) patchname[-+]num to indicate an offset in the series file
1326 # 3) patchname[-+]num to indicate an offset in the series file
1327 def lookup(self, patch, strict=False):
1327 def lookup(self, patch, strict=False):
1328 def partialname(s):
1328 def partialname(s):
1329 if s in self.series:
1329 if s in self.series:
1330 return s
1330 return s
1331 matches = [x for x in self.series if s in x]
1331 matches = [x for x in self.series if s in x]
1332 if len(matches) > 1:
1332 if len(matches) > 1:
1333 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
1333 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
1334 for m in matches:
1334 for m in matches:
1335 self.ui.warn(' %s\n' % m)
1335 self.ui.warn(' %s\n' % m)
1336 return None
1336 return None
1337 if matches:
1337 if matches:
1338 return matches[0]
1338 return matches[0]
1339 if self.series and self.applied:
1339 if self.series and self.applied:
1340 if s == 'qtip':
1340 if s == 'qtip':
1341 return self.series[self.seriesend(True) - 1]
1341 return self.series[self.seriesend(True) - 1]
1342 if s == 'qbase':
1342 if s == 'qbase':
1343 return self.series[0]
1343 return self.series[0]
1344 return None
1344 return None
1345
1345
1346 if patch in self.series:
1346 if patch in self.series:
1347 return patch
1347 return patch
1348
1348
1349 if not os.path.isfile(self.join(patch)):
1349 if not os.path.isfile(self.join(patch)):
1350 try:
1350 try:
1351 sno = int(patch)
1351 sno = int(patch)
1352 except (ValueError, OverflowError):
1352 except (ValueError, OverflowError):
1353 pass
1353 pass
1354 else:
1354 else:
1355 if -len(self.series) <= sno < len(self.series):
1355 if -len(self.series) <= sno < len(self.series):
1356 return self.series[sno]
1356 return self.series[sno]
1357
1357
1358 if not strict:
1358 if not strict:
1359 res = partialname(patch)
1359 res = partialname(patch)
1360 if res:
1360 if res:
1361 return res
1361 return res
1362 minus = patch.rfind('-')
1362 minus = patch.rfind('-')
1363 if minus >= 0:
1363 if minus >= 0:
1364 res = partialname(patch[:minus])
1364 res = partialname(patch[:minus])
1365 if res:
1365 if res:
1366 i = self.series.index(res)
1366 i = self.series.index(res)
1367 try:
1367 try:
1368 off = int(patch[minus + 1:] or 1)
1368 off = int(patch[minus + 1:] or 1)
1369 except (ValueError, OverflowError):
1369 except (ValueError, OverflowError):
1370 pass
1370 pass
1371 else:
1371 else:
1372 if i - off >= 0:
1372 if i - off >= 0:
1373 return self.series[i - off]
1373 return self.series[i - off]
1374 plus = patch.rfind('+')
1374 plus = patch.rfind('+')
1375 if plus >= 0:
1375 if plus >= 0:
1376 res = partialname(patch[:plus])
1376 res = partialname(patch[:plus])
1377 if res:
1377 if res:
1378 i = self.series.index(res)
1378 i = self.series.index(res)
1379 try:
1379 try:
1380 off = int(patch[plus + 1:] or 1)
1380 off = int(patch[plus + 1:] or 1)
1381 except (ValueError, OverflowError):
1381 except (ValueError, OverflowError):
1382 pass
1382 pass
1383 else:
1383 else:
1384 if i + off < len(self.series):
1384 if i + off < len(self.series):
1385 return self.series[i + off]
1385 return self.series[i + off]
1386 raise error.Abort(_("patch %s not in series") % patch)
1386 raise error.Abort(_("patch %s not in series") % patch)
1387
1387
1388 def push(self, repo, patch=None, force=False, list=False, mergeq=None,
1388 def push(self, repo, patch=None, force=False, list=False, mergeq=None,
1389 all=False, move=False, exact=False, nobackup=False,
1389 all=False, move=False, exact=False, nobackup=False,
1390 keepchanges=False):
1390 keepchanges=False):
1391 self.checkkeepchanges(keepchanges, force)
1391 self.checkkeepchanges(keepchanges, force)
1392 diffopts = self.diffopts()
1392 diffopts = self.diffopts()
1393 with repo.wlock():
1393 with repo.wlock():
1394 heads = []
1394 heads = []
1395 for hs in repo.branchmap().itervalues():
1395 for hs in repo.branchmap().itervalues():
1396 heads.extend(hs)
1396 heads.extend(hs)
1397 if not heads:
1397 if not heads:
1398 heads = [nullid]
1398 heads = [nullid]
1399 if repo.dirstate.p1() not in heads and not exact:
1399 if repo.dirstate.p1() not in heads and not exact:
1400 self.ui.status(_("(working directory not at a head)\n"))
1400 self.ui.status(_("(working directory not at a head)\n"))
1401
1401
1402 if not self.series:
1402 if not self.series:
1403 self.ui.warn(_('no patches in series\n'))
1403 self.ui.warn(_('no patches in series\n'))
1404 return 0
1404 return 0
1405
1405
1406 # Suppose our series file is: A B C and the current 'top'
1406 # Suppose our series file is: A B C and the current 'top'
1407 # patch is B. qpush C should be performed (moving forward)
1407 # patch is B. qpush C should be performed (moving forward)
1408 # qpush B is a NOP (no change) qpush A is an error (can't
1408 # qpush B is a NOP (no change) qpush A is an error (can't
1409 # go backwards with qpush)
1409 # go backwards with qpush)
1410 if patch:
1410 if patch:
1411 patch = self.lookup(patch)
1411 patch = self.lookup(patch)
1412 info = self.isapplied(patch)
1412 info = self.isapplied(patch)
1413 if info and info[0] >= len(self.applied) - 1:
1413 if info and info[0] >= len(self.applied) - 1:
1414 self.ui.warn(
1414 self.ui.warn(
1415 _('qpush: %s is already at the top\n') % patch)
1415 _('qpush: %s is already at the top\n') % patch)
1416 return 0
1416 return 0
1417
1417
1418 pushable, reason = self.pushable(patch)
1418 pushable, reason = self.pushable(patch)
1419 if pushable:
1419 if pushable:
1420 if self.series.index(patch) < self.seriesend():
1420 if self.series.index(patch) < self.seriesend():
1421 raise error.Abort(
1421 raise error.Abort(
1422 _("cannot push to a previous patch: %s") % patch)
1422 _("cannot push to a previous patch: %s") % patch)
1423 else:
1423 else:
1424 if reason:
1424 if reason:
1425 reason = _('guarded by %s') % reason
1425 reason = _('guarded by %s') % reason
1426 else:
1426 else:
1427 reason = _('no matching guards')
1427 reason = _('no matching guards')
1428 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
1428 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
1429 return 1
1429 return 1
1430 elif all:
1430 elif all:
1431 patch = self.series[-1]
1431 patch = self.series[-1]
1432 if self.isapplied(patch):
1432 if self.isapplied(patch):
1433 self.ui.warn(_('all patches are currently applied\n'))
1433 self.ui.warn(_('all patches are currently applied\n'))
1434 return 0
1434 return 0
1435
1435
1436 # Following the above example, starting at 'top' of B:
1436 # Following the above example, starting at 'top' of B:
1437 # qpush should be performed (pushes C), but a subsequent
1437 # qpush should be performed (pushes C), but a subsequent
1438 # qpush without an argument is an error (nothing to
1438 # qpush without an argument is an error (nothing to
1439 # apply). This allows a loop of "...while hg qpush..." to
1439 # apply). This allows a loop of "...while hg qpush..." to
1440 # work as it detects an error when done
1440 # work as it detects an error when done
1441 start = self.seriesend()
1441 start = self.seriesend()
1442 if start == len(self.series):
1442 if start == len(self.series):
1443 self.ui.warn(_('patch series already fully applied\n'))
1443 self.ui.warn(_('patch series already fully applied\n'))
1444 return 1
1444 return 1
1445 if not force and not keepchanges:
1445 if not force and not keepchanges:
1446 self.checklocalchanges(repo, refresh=self.applied)
1446 self.checklocalchanges(repo, refresh=self.applied)
1447
1447
1448 if exact:
1448 if exact:
1449 if keepchanges:
1449 if keepchanges:
1450 raise error.Abort(
1450 raise error.Abort(
1451 _("cannot use --exact and --keep-changes together"))
1451 _("cannot use --exact and --keep-changes together"))
1452 if move:
1452 if move:
1453 raise error.Abort(_('cannot use --exact and --move '
1453 raise error.Abort(_('cannot use --exact and --move '
1454 'together'))
1454 'together'))
1455 if self.applied:
1455 if self.applied:
1456 raise error.Abort(_('cannot push --exact with applied '
1456 raise error.Abort(_('cannot push --exact with applied '
1457 'patches'))
1457 'patches'))
1458 root = self.series[start]
1458 root = self.series[start]
1459 target = patchheader(self.join(root), self.plainmode).parent
1459 target = patchheader(self.join(root), self.plainmode).parent
1460 if not target:
1460 if not target:
1461 raise error.Abort(
1461 raise error.Abort(
1462 _("%s does not have a parent recorded") % root)
1462 _("%s does not have a parent recorded") % root)
1463 if not repo[target] == repo['.']:
1463 if not repo[target] == repo['.']:
1464 hg.update(repo, target)
1464 hg.update(repo, target)
1465
1465
1466 if move:
1466 if move:
1467 if not patch:
1467 if not patch:
1468 raise error.Abort(_("please specify the patch to move"))
1468 raise error.Abort(_("please specify the patch to move"))
1469 for fullstart, rpn in enumerate(self.fullseries):
1469 for fullstart, rpn in enumerate(self.fullseries):
1470 # strip markers for patch guards
1470 # strip markers for patch guards
1471 if self.guard_re.split(rpn, 1)[0] == self.series[start]:
1471 if self.guard_re.split(rpn, 1)[0] == self.series[start]:
1472 break
1472 break
1473 for i, rpn in enumerate(self.fullseries[fullstart:]):
1473 for i, rpn in enumerate(self.fullseries[fullstart:]):
1474 # strip markers for patch guards
1474 # strip markers for patch guards
1475 if self.guard_re.split(rpn, 1)[0] == patch:
1475 if self.guard_re.split(rpn, 1)[0] == patch:
1476 break
1476 break
1477 index = fullstart + i
1477 index = fullstart + i
1478 assert index < len(self.fullseries)
1478 assert index < len(self.fullseries)
1479 fullpatch = self.fullseries[index]
1479 fullpatch = self.fullseries[index]
1480 del self.fullseries[index]
1480 del self.fullseries[index]
1481 self.fullseries.insert(fullstart, fullpatch)
1481 self.fullseries.insert(fullstart, fullpatch)
1482 self.parseseries()
1482 self.parseseries()
1483 self.seriesdirty = True
1483 self.seriesdirty = True
1484
1484
1485 self.applieddirty = True
1485 self.applieddirty = True
1486 if start > 0:
1486 if start > 0:
1487 self.checktoppatch(repo)
1487 self.checktoppatch(repo)
1488 if not patch:
1488 if not patch:
1489 patch = self.series[start]
1489 patch = self.series[start]
1490 end = start + 1
1490 end = start + 1
1491 else:
1491 else:
1492 end = self.series.index(patch, start) + 1
1492 end = self.series.index(patch, start) + 1
1493
1493
1494 tobackup = set()
1494 tobackup = set()
1495 if (not nobackup and force) or keepchanges:
1495 if (not nobackup and force) or keepchanges:
1496 status = self.checklocalchanges(repo, force=True)
1496 status = self.checklocalchanges(repo, force=True)
1497 if keepchanges:
1497 if keepchanges:
1498 tobackup.update(status.modified + status.added +
1498 tobackup.update(status.modified + status.added +
1499 status.removed + status.deleted)
1499 status.removed + status.deleted)
1500 else:
1500 else:
1501 tobackup.update(status.modified + status.added)
1501 tobackup.update(status.modified + status.added)
1502
1502
1503 s = self.series[start:end]
1503 s = self.series[start:end]
1504 all_files = set()
1504 all_files = set()
1505 try:
1505 try:
1506 if mergeq:
1506 if mergeq:
1507 ret = self.mergepatch(repo, mergeq, s, diffopts)
1507 ret = self.mergepatch(repo, mergeq, s, diffopts)
1508 else:
1508 else:
1509 ret = self.apply(repo, s, list, all_files=all_files,
1509 ret = self.apply(repo, s, list, all_files=all_files,
1510 tobackup=tobackup, keepchanges=keepchanges)
1510 tobackup=tobackup, keepchanges=keepchanges)
1511 except AbortNoCleanup:
1511 except AbortNoCleanup:
1512 raise
1512 raise
1513 except: # re-raises
1513 except: # re-raises
1514 self.ui.warn(_('cleaning up working directory...\n'))
1514 self.ui.warn(_('cleaning up working directory...\n'))
1515 cmdutil.revert(self.ui, repo, repo['.'],
1515 cmdutil.revert(self.ui, repo, repo['.'],
1516 repo.dirstate.parents(), no_backup=True)
1516 repo.dirstate.parents(), no_backup=True)
1517 # only remove unknown files that we know we touched or
1517 # only remove unknown files that we know we touched or
1518 # created while patching
1518 # created while patching
1519 for f in all_files:
1519 for f in all_files:
1520 if f not in repo.dirstate:
1520 if f not in repo.dirstate:
1521 repo.wvfs.unlinkpath(f, ignoremissing=True)
1521 repo.wvfs.unlinkpath(f, ignoremissing=True)
1522 self.ui.warn(_('done\n'))
1522 self.ui.warn(_('done\n'))
1523 raise
1523 raise
1524
1524
1525 if not self.applied:
1525 if not self.applied:
1526 return ret[0]
1526 return ret[0]
1527 top = self.applied[-1].name
1527 top = self.applied[-1].name
1528 if ret[0] and ret[0] > 1:
1528 if ret[0] and ret[0] > 1:
1529 msg = _("errors during apply, please fix and qrefresh %s\n")
1529 msg = _("errors during apply, please fix and qrefresh %s\n")
1530 self.ui.write(msg % top)
1530 self.ui.write(msg % top)
1531 else:
1531 else:
1532 self.ui.write(_("now at: %s\n") % top)
1532 self.ui.write(_("now at: %s\n") % top)
1533 return ret[0]
1533 return ret[0]
1534
1534
1535 def pop(self, repo, patch=None, force=False, update=True, all=False,
1535 def pop(self, repo, patch=None, force=False, update=True, all=False,
1536 nobackup=False, keepchanges=False):
1536 nobackup=False, keepchanges=False):
1537 self.checkkeepchanges(keepchanges, force)
1537 self.checkkeepchanges(keepchanges, force)
1538 with repo.wlock():
1538 with repo.wlock():
1539 if patch:
1539 if patch:
1540 # index, rev, patch
1540 # index, rev, patch
1541 info = self.isapplied(patch)
1541 info = self.isapplied(patch)
1542 if not info:
1542 if not info:
1543 patch = self.lookup(patch)
1543 patch = self.lookup(patch)
1544 info = self.isapplied(patch)
1544 info = self.isapplied(patch)
1545 if not info:
1545 if not info:
1546 raise error.Abort(_("patch %s is not applied") % patch)
1546 raise error.Abort(_("patch %s is not applied") % patch)
1547
1547
1548 if not self.applied:
1548 if not self.applied:
1549 # Allow qpop -a to work repeatedly,
1549 # Allow qpop -a to work repeatedly,
1550 # but not qpop without an argument
1550 # but not qpop without an argument
1551 self.ui.warn(_("no patches applied\n"))
1551 self.ui.warn(_("no patches applied\n"))
1552 return not all
1552 return not all
1553
1553
1554 if all:
1554 if all:
1555 start = 0
1555 start = 0
1556 elif patch:
1556 elif patch:
1557 start = info[0] + 1
1557 start = info[0] + 1
1558 else:
1558 else:
1559 start = len(self.applied) - 1
1559 start = len(self.applied) - 1
1560
1560
1561 if start >= len(self.applied):
1561 if start >= len(self.applied):
1562 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1562 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1563 return
1563 return
1564
1564
1565 if not update:
1565 if not update:
1566 parents = repo.dirstate.parents()
1566 parents = repo.dirstate.parents()
1567 rr = [x.node for x in self.applied]
1567 rr = [x.node for x in self.applied]
1568 for p in parents:
1568 for p in parents:
1569 if p in rr:
1569 if p in rr:
1570 self.ui.warn(_("qpop: forcing dirstate update\n"))
1570 self.ui.warn(_("qpop: forcing dirstate update\n"))
1571 update = True
1571 update = True
1572 else:
1572 else:
1573 parents = [p.node() for p in repo[None].parents()]
1573 parents = [p.node() for p in repo[None].parents()]
1574 update = any(entry.node in parents
1574 update = any(entry.node in parents
1575 for entry in self.applied[start:])
1575 for entry in self.applied[start:])
1576
1576
1577 tobackup = set()
1577 tobackup = set()
1578 if update:
1578 if update:
1579 s = self.checklocalchanges(repo, force=force or keepchanges)
1579 s = self.checklocalchanges(repo, force=force or keepchanges)
1580 if force:
1580 if force:
1581 if not nobackup:
1581 if not nobackup:
1582 tobackup.update(s.modified + s.added)
1582 tobackup.update(s.modified + s.added)
1583 elif keepchanges:
1583 elif keepchanges:
1584 tobackup.update(s.modified + s.added +
1584 tobackup.update(s.modified + s.added +
1585 s.removed + s.deleted)
1585 s.removed + s.deleted)
1586
1586
1587 self.applieddirty = True
1587 self.applieddirty = True
1588 end = len(self.applied)
1588 end = len(self.applied)
1589 rev = self.applied[start].node
1589 rev = self.applied[start].node
1590
1590
1591 try:
1591 try:
1592 heads = repo.changelog.heads(rev)
1592 heads = repo.changelog.heads(rev)
1593 except error.LookupError:
1593 except error.LookupError:
1594 node = short(rev)
1594 node = short(rev)
1595 raise error.Abort(_('trying to pop unknown node %s') % node)
1595 raise error.Abort(_('trying to pop unknown node %s') % node)
1596
1596
1597 if heads != [self.applied[-1].node]:
1597 if heads != [self.applied[-1].node]:
1598 raise error.Abort(_("popping would remove a revision not "
1598 raise error.Abort(_("popping would remove a revision not "
1599 "managed by this patch queue"))
1599 "managed by this patch queue"))
1600 if not repo[self.applied[-1].node].mutable():
1600 if not repo[self.applied[-1].node].mutable():
1601 raise error.Abort(
1601 raise error.Abort(
1602 _("popping would remove a public revision"),
1602 _("popping would remove a public revision"),
1603 hint=_("see 'hg help phases' for details"))
1603 hint=_("see 'hg help phases' for details"))
1604
1604
1605 # we know there are no local changes, so we can make a simplified
1605 # we know there are no local changes, so we can make a simplified
1606 # form of hg.update.
1606 # form of hg.update.
1607 if update:
1607 if update:
1608 qp = self.qparents(repo, rev)
1608 qp = self.qparents(repo, rev)
1609 ctx = repo[qp]
1609 ctx = repo[qp]
1610 m, a, r, d = repo.status(qp, '.')[:4]
1610 m, a, r, d = repo.status(qp, '.')[:4]
1611 if d:
1611 if d:
1612 raise error.Abort(_("deletions found between repo revs"))
1612 raise error.Abort(_("deletions found between repo revs"))
1613
1613
1614 tobackup = set(a + m + r) & tobackup
1614 tobackup = set(a + m + r) & tobackup
1615 if keepchanges and tobackup:
1615 if keepchanges and tobackup:
1616 raise error.Abort(_("local changes found, qrefresh first"))
1616 raise error.Abort(_("local changes found, qrefresh first"))
1617 self.backup(repo, tobackup)
1617 self.backup(repo, tobackup)
1618 with repo.dirstate.parentchange():
1618 with repo.dirstate.parentchange():
1619 for f in a:
1619 for f in a:
1620 repo.wvfs.unlinkpath(f, ignoremissing=True)
1620 repo.wvfs.unlinkpath(f, ignoremissing=True)
1621 repo.dirstate.drop(f)
1621 repo.dirstate.drop(f)
1622 for f in m + r:
1622 for f in m + r:
1623 fctx = ctx[f]
1623 fctx = ctx[f]
1624 repo.wwrite(f, fctx.data(), fctx.flags())
1624 repo.wwrite(f, fctx.data(), fctx.flags())
1625 repo.dirstate.normal(f)
1625 repo.dirstate.normal(f)
1626 repo.setparents(qp, nullid)
1626 repo.setparents(qp, nullid)
1627 for patch in reversed(self.applied[start:end]):
1627 for patch in reversed(self.applied[start:end]):
1628 self.ui.status(_("popping %s\n") % patch.name)
1628 self.ui.status(_("popping %s\n") % patch.name)
1629 del self.applied[start:end]
1629 del self.applied[start:end]
1630 strip(self.ui, repo, [rev], update=False, backup=False)
1630 strip(self.ui, repo, [rev], update=False, backup=False)
1631 for s, state in repo['.'].substate.items():
1631 for s, state in repo['.'].substate.items():
1632 repo['.'].sub(s).get(state)
1632 repo['.'].sub(s).get(state)
1633 if self.applied:
1633 if self.applied:
1634 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1634 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1635 else:
1635 else:
1636 self.ui.write(_("patch queue now empty\n"))
1636 self.ui.write(_("patch queue now empty\n"))
1637
1637
1638 def diff(self, repo, pats, opts):
1638 def diff(self, repo, pats, opts):
1639 top, patch = self.checktoppatch(repo)
1639 top, patch = self.checktoppatch(repo)
1640 if not top:
1640 if not top:
1641 self.ui.write(_("no patches applied\n"))
1641 self.ui.write(_("no patches applied\n"))
1642 return
1642 return
1643 qp = self.qparents(repo, top)
1643 qp = self.qparents(repo, top)
1644 if opts.get('reverse'):
1644 if opts.get('reverse'):
1645 node1, node2 = None, qp
1645 node1, node2 = None, qp
1646 else:
1646 else:
1647 node1, node2 = qp, None
1647 node1, node2 = qp, None
1648 diffopts = self.diffopts(opts, patch)
1648 diffopts = self.diffopts(opts, patch)
1649 self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
1649 self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
1650
1650
1651 def refresh(self, repo, pats=None, **opts):
1651 def refresh(self, repo, pats=None, **opts):
1652 opts = pycompat.byteskwargs(opts)
1652 opts = pycompat.byteskwargs(opts)
1653 if not self.applied:
1653 if not self.applied:
1654 self.ui.write(_("no patches applied\n"))
1654 self.ui.write(_("no patches applied\n"))
1655 return 1
1655 return 1
1656 msg = opts.get('msg', '').rstrip()
1656 msg = opts.get('msg', '').rstrip()
1657 edit = opts.get('edit')
1657 edit = opts.get('edit')
1658 editform = opts.get('editform', 'mq.qrefresh')
1658 editform = opts.get('editform', 'mq.qrefresh')
1659 newuser = opts.get('user')
1659 newuser = opts.get('user')
1660 newdate = opts.get('date')
1660 newdate = opts.get('date')
1661 if newdate:
1661 if newdate:
1662 newdate = '%d %d' % dateutil.parsedate(newdate)
1662 newdate = '%d %d' % dateutil.parsedate(newdate)
1663 wlock = repo.wlock()
1663 wlock = repo.wlock()
1664
1664
1665 try:
1665 try:
1666 self.checktoppatch(repo)
1666 self.checktoppatch(repo)
1667 (top, patchfn) = (self.applied[-1].node, self.applied[-1].name)
1667 (top, patchfn) = (self.applied[-1].node, self.applied[-1].name)
1668 if repo.changelog.heads(top) != [top]:
1668 if repo.changelog.heads(top) != [top]:
1669 raise error.Abort(_("cannot qrefresh a revision with children"))
1669 raise error.Abort(_("cannot qrefresh a revision with children"))
1670 if not repo[top].mutable():
1670 if not repo[top].mutable():
1671 raise error.Abort(_("cannot qrefresh public revision"),
1671 raise error.Abort(_("cannot qrefresh public revision"),
1672 hint=_("see 'hg help phases' for details"))
1672 hint=_("see 'hg help phases' for details"))
1673
1673
1674 cparents = repo.changelog.parents(top)
1674 cparents = repo.changelog.parents(top)
1675 patchparent = self.qparents(repo, top)
1675 patchparent = self.qparents(repo, top)
1676
1676
1677 inclsubs = checksubstate(repo, patchparent)
1677 inclsubs = checksubstate(repo, patchparent)
1678 if inclsubs:
1678 if inclsubs:
1679 substatestate = repo.dirstate['.hgsubstate']
1679 substatestate = repo.dirstate['.hgsubstate']
1680
1680
1681 ph = patchheader(self.join(patchfn), self.plainmode)
1681 ph = patchheader(self.join(patchfn), self.plainmode)
1682 diffopts = self.diffopts({'git': opts.get('git')}, patchfn,
1682 diffopts = self.diffopts({'git': opts.get('git')}, patchfn,
1683 plain=True)
1683 plain=True)
1684 if newuser:
1684 if newuser:
1685 ph.setuser(newuser)
1685 ph.setuser(newuser)
1686 if newdate:
1686 if newdate:
1687 ph.setdate(newdate)
1687 ph.setdate(newdate)
1688 ph.setparent(hex(patchparent))
1688 ph.setparent(hex(patchparent))
1689
1689
1690 # only commit new patch when write is complete
1690 # only commit new patch when write is complete
1691 patchf = self.opener(patchfn, 'w', atomictemp=True)
1691 patchf = self.opener(patchfn, 'w', atomictemp=True)
1692
1692
1693 # update the dirstate in place, strip off the qtip commit
1693 # update the dirstate in place, strip off the qtip commit
1694 # and then commit.
1694 # and then commit.
1695 #
1695 #
1696 # this should really read:
1696 # this should really read:
1697 # mm, dd, aa = repo.status(top, patchparent)[:3]
1697 # mm, dd, aa = repo.status(top, patchparent)[:3]
1698 # but we do it backwards to take advantage of manifest/changelog
1698 # but we do it backwards to take advantage of manifest/changelog
1699 # caching against the next repo.status call
1699 # caching against the next repo.status call
1700 mm, aa, dd = repo.status(patchparent, top)[:3]
1700 mm, aa, dd = repo.status(patchparent, top)[:3]
1701 changes = repo.changelog.read(top)
1701 changes = repo.changelog.read(top)
1702 man = repo.manifestlog[changes[0]].read()
1702 man = repo.manifestlog[changes[0]].read()
1703 aaa = aa[:]
1703 aaa = aa[:]
1704 match1 = scmutil.match(repo[None], pats, opts)
1704 match1 = scmutil.match(repo[None], pats, opts)
1705 # in short mode, we only diff the files included in the
1705 # in short mode, we only diff the files included in the
1706 # patch already plus specified files
1706 # patch already plus specified files
1707 if opts.get('short'):
1707 if opts.get('short'):
1708 # if amending a patch, we start with existing
1708 # if amending a patch, we start with existing
1709 # files plus specified files - unfiltered
1709 # files plus specified files - unfiltered
1710 match = scmutil.matchfiles(repo, mm + aa + dd + match1.files())
1710 match = scmutil.matchfiles(repo, mm + aa + dd + match1.files())
1711 # filter with include/exclude options
1711 # filter with include/exclude options
1712 match1 = scmutil.match(repo[None], opts=opts)
1712 match1 = scmutil.match(repo[None], opts=opts)
1713 else:
1713 else:
1714 match = scmutil.matchall(repo)
1714 match = scmutil.matchall(repo)
1715 m, a, r, d = repo.status(match=match)[:4]
1715 m, a, r, d = repo.status(match=match)[:4]
1716 mm = set(mm)
1716 mm = set(mm)
1717 aa = set(aa)
1717 aa = set(aa)
1718 dd = set(dd)
1718 dd = set(dd)
1719
1719
1720 # we might end up with files that were added between
1720 # we might end up with files that were added between
1721 # qtip and the dirstate parent, but then changed in the
1721 # qtip and the dirstate parent, but then changed in the
1722 # local dirstate. in this case, we want them to only
1722 # local dirstate. in this case, we want them to only
1723 # show up in the added section
1723 # show up in the added section
1724 for x in m:
1724 for x in m:
1725 if x not in aa:
1725 if x not in aa:
1726 mm.add(x)
1726 mm.add(x)
1727 # we might end up with files added by the local dirstate that
1727 # we might end up with files added by the local dirstate that
1728 # were deleted by the patch. In this case, they should only
1728 # were deleted by the patch. In this case, they should only
1729 # show up in the changed section.
1729 # show up in the changed section.
1730 for x in a:
1730 for x in a:
1731 if x in dd:
1731 if x in dd:
1732 dd.remove(x)
1732 dd.remove(x)
1733 mm.add(x)
1733 mm.add(x)
1734 else:
1734 else:
1735 aa.add(x)
1735 aa.add(x)
1736 # make sure any files deleted in the local dirstate
1736 # make sure any files deleted in the local dirstate
1737 # are not in the add or change column of the patch
1737 # are not in the add or change column of the patch
1738 forget = []
1738 forget = []
1739 for x in d + r:
1739 for x in d + r:
1740 if x in aa:
1740 if x in aa:
1741 aa.remove(x)
1741 aa.remove(x)
1742 forget.append(x)
1742 forget.append(x)
1743 continue
1743 continue
1744 else:
1744 else:
1745 mm.discard(x)
1745 mm.discard(x)
1746 dd.add(x)
1746 dd.add(x)
1747
1747
1748 m = list(mm)
1748 m = list(mm)
1749 r = list(dd)
1749 r = list(dd)
1750 a = list(aa)
1750 a = list(aa)
1751
1751
1752 # create 'match' that includes the files to be recommitted.
1752 # create 'match' that includes the files to be recommitted.
1753 # apply match1 via repo.status to ensure correct case handling.
1753 # apply match1 via repo.status to ensure correct case handling.
1754 cm, ca, cr, cd = repo.status(patchparent, match=match1)[:4]
1754 cm, ca, cr, cd = repo.status(patchparent, match=match1)[:4]
1755 allmatches = set(cm + ca + cr + cd)
1755 allmatches = set(cm + ca + cr + cd)
1756 refreshchanges = [x.intersection(allmatches) for x in (mm, aa, dd)]
1756 refreshchanges = [x.intersection(allmatches) for x in (mm, aa, dd)]
1757
1757
1758 files = set(inclsubs)
1758 files = set(inclsubs)
1759 for x in refreshchanges:
1759 for x in refreshchanges:
1760 files.update(x)
1760 files.update(x)
1761 match = scmutil.matchfiles(repo, files)
1761 match = scmutil.matchfiles(repo, files)
1762
1762
1763 bmlist = repo[top].bookmarks()
1763 bmlist = repo[top].bookmarks()
1764
1764
1765 dsguard = None
1765 dsguard = None
1766 try:
1766 try:
1767 dsguard = dirstateguard.dirstateguard(repo, 'mq.refresh')
1767 dsguard = dirstateguard.dirstateguard(repo, 'mq.refresh')
1768 if diffopts.git or diffopts.upgrade:
1768 if diffopts.git or diffopts.upgrade:
1769 copies = {}
1769 copies = {}
1770 for dst in a:
1770 for dst in a:
1771 src = repo.dirstate.copied(dst)
1771 src = repo.dirstate.copied(dst)
1772 # during qfold, the source file for copies may
1772 # during qfold, the source file for copies may
1773 # be removed. Treat this as a simple add.
1773 # be removed. Treat this as a simple add.
1774 if src is not None and src in repo.dirstate:
1774 if src is not None and src in repo.dirstate:
1775 copies.setdefault(src, []).append(dst)
1775 copies.setdefault(src, []).append(dst)
1776 repo.dirstate.add(dst)
1776 repo.dirstate.add(dst)
1777 # remember the copies between patchparent and qtip
1777 # remember the copies between patchparent and qtip
1778 for dst in aaa:
1778 for dst in aaa:
1779 f = repo.file(dst)
1779 f = repo.file(dst)
1780 src = f.renamed(man[dst])
1780 src = f.renamed(man[dst])
1781 if src:
1781 if src:
1782 copies.setdefault(src[0], []).extend(
1782 copies.setdefault(src[0], []).extend(
1783 copies.get(dst, []))
1783 copies.get(dst, []))
1784 if dst in a:
1784 if dst in a:
1785 copies[src[0]].append(dst)
1785 copies[src[0]].append(dst)
1786 # we can't copy a file created by the patch itself
1786 # we can't copy a file created by the patch itself
1787 if dst in copies:
1787 if dst in copies:
1788 del copies[dst]
1788 del copies[dst]
1789 for src, dsts in copies.iteritems():
1789 for src, dsts in copies.iteritems():
1790 for dst in dsts:
1790 for dst in dsts:
1791 repo.dirstate.copy(src, dst)
1791 repo.dirstate.copy(src, dst)
1792 else:
1792 else:
1793 for dst in a:
1793 for dst in a:
1794 repo.dirstate.add(dst)
1794 repo.dirstate.add(dst)
1795 # Drop useless copy information
1795 # Drop useless copy information
1796 for f in list(repo.dirstate.copies()):
1796 for f in list(repo.dirstate.copies()):
1797 repo.dirstate.copy(None, f)
1797 repo.dirstate.copy(None, f)
1798 for f in r:
1798 for f in r:
1799 repo.dirstate.remove(f)
1799 repo.dirstate.remove(f)
1800 # if the patch excludes a modified file, mark that
1800 # if the patch excludes a modified file, mark that
1801 # file with mtime=0 so status can see it.
1801 # file with mtime=0 so status can see it.
1802 mm = []
1802 mm = []
1803 for i in pycompat.xrange(len(m) - 1, -1, -1):
1803 for i in pycompat.xrange(len(m) - 1, -1, -1):
1804 if not match1(m[i]):
1804 if not match1(m[i]):
1805 mm.append(m[i])
1805 mm.append(m[i])
1806 del m[i]
1806 del m[i]
1807 for f in m:
1807 for f in m:
1808 repo.dirstate.normal(f)
1808 repo.dirstate.normal(f)
1809 for f in mm:
1809 for f in mm:
1810 repo.dirstate.normallookup(f)
1810 repo.dirstate.normallookup(f)
1811 for f in forget:
1811 for f in forget:
1812 repo.dirstate.drop(f)
1812 repo.dirstate.drop(f)
1813
1813
1814 user = ph.user or changes[1]
1814 user = ph.user or changes[1]
1815
1815
1816 oldphase = repo[top].phase()
1816 oldphase = repo[top].phase()
1817
1817
1818 # assumes strip can roll itself back if interrupted
1818 # assumes strip can roll itself back if interrupted
1819 repo.setparents(*cparents)
1819 repo.setparents(*cparents)
1820 self.applied.pop()
1820 self.applied.pop()
1821 self.applieddirty = True
1821 self.applieddirty = True
1822 strip(self.ui, repo, [top], update=False, backup=False)
1822 strip(self.ui, repo, [top], update=False, backup=False)
1823 dsguard.close()
1823 dsguard.close()
1824 finally:
1824 finally:
1825 release(dsguard)
1825 release(dsguard)
1826
1826
1827 try:
1827 try:
1828 # might be nice to attempt to roll back strip after this
1828 # might be nice to attempt to roll back strip after this
1829
1829
1830 defaultmsg = "[mq]: %s" % patchfn
1830 defaultmsg = "[mq]: %s" % patchfn
1831 editor = cmdutil.getcommiteditor(editform=editform)
1831 editor = cmdutil.getcommiteditor(editform=editform)
1832 if edit:
1832 if edit:
1833 def finishdesc(desc):
1833 def finishdesc(desc):
1834 if desc.rstrip():
1834 if desc.rstrip():
1835 ph.setmessage(desc)
1835 ph.setmessage(desc)
1836 return desc
1836 return desc
1837 return defaultmsg
1837 return defaultmsg
1838 # i18n: this message is shown in editor with "HG: " prefix
1838 # i18n: this message is shown in editor with "HG: " prefix
1839 extramsg = _('Leave message empty to use default message.')
1839 extramsg = _('Leave message empty to use default message.')
1840 editor = cmdutil.getcommiteditor(finishdesc=finishdesc,
1840 editor = cmdutil.getcommiteditor(finishdesc=finishdesc,
1841 extramsg=extramsg,
1841 extramsg=extramsg,
1842 editform=editform)
1842 editform=editform)
1843 message = msg or "\n".join(ph.message)
1843 message = msg or "\n".join(ph.message)
1844 elif not msg:
1844 elif not msg:
1845 if not ph.message:
1845 if not ph.message:
1846 message = defaultmsg
1846 message = defaultmsg
1847 else:
1847 else:
1848 message = "\n".join(ph.message)
1848 message = "\n".join(ph.message)
1849 else:
1849 else:
1850 message = msg
1850 message = msg
1851 ph.setmessage(msg)
1851 ph.setmessage(msg)
1852
1852
1853 # Ensure we create a new changeset in the same phase than
1853 # Ensure we create a new changeset in the same phase than
1854 # the old one.
1854 # the old one.
1855 lock = tr = None
1855 lock = tr = None
1856 try:
1856 try:
1857 lock = repo.lock()
1857 lock = repo.lock()
1858 tr = repo.transaction('mq')
1858 tr = repo.transaction('mq')
1859 n = newcommit(repo, oldphase, message, user, ph.date,
1859 n = newcommit(repo, oldphase, message, user, ph.date,
1860 match=match, force=True, editor=editor)
1860 match=match, force=True, editor=editor)
1861 # only write patch after a successful commit
1861 # only write patch after a successful commit
1862 c = [list(x) for x in refreshchanges]
1862 c = [list(x) for x in refreshchanges]
1863 if inclsubs:
1863 if inclsubs:
1864 self.putsubstate2changes(substatestate, c)
1864 self.putsubstate2changes(substatestate, c)
1865 chunks = patchmod.diff(repo, patchparent,
1865 chunks = patchmod.diff(repo, patchparent,
1866 changes=c, opts=diffopts)
1866 changes=c, opts=diffopts)
1867 comments = bytes(ph)
1867 comments = bytes(ph)
1868 if comments:
1868 if comments:
1869 patchf.write(comments)
1869 patchf.write(comments)
1870 for chunk in chunks:
1870 for chunk in chunks:
1871 patchf.write(chunk)
1871 patchf.write(chunk)
1872 patchf.close()
1872 patchf.close()
1873
1873
1874 marks = repo._bookmarks
1874 marks = repo._bookmarks
1875 marks.applychanges(repo, tr, [(bm, n) for bm in bmlist])
1875 marks.applychanges(repo, tr, [(bm, n) for bm in bmlist])
1876 tr.close()
1876 tr.close()
1877
1877
1878 self.applied.append(statusentry(n, patchfn))
1878 self.applied.append(statusentry(n, patchfn))
1879 finally:
1879 finally:
1880 lockmod.release(tr, lock)
1880 lockmod.release(tr, lock)
1881 except: # re-raises
1881 except: # re-raises
1882 ctx = repo[cparents[0]]
1882 ctx = repo[cparents[0]]
1883 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1883 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1884 self.savedirty()
1884 self.savedirty()
1885 self.ui.warn(_('qrefresh interrupted while patch was popped! '
1885 self.ui.warn(_('qrefresh interrupted while patch was popped! '
1886 '(revert --all, qpush to recover)\n'))
1886 '(revert --all, qpush to recover)\n'))
1887 raise
1887 raise
1888 finally:
1888 finally:
1889 wlock.release()
1889 wlock.release()
1890 self.removeundo(repo)
1890 self.removeundo(repo)
1891
1891
1892 def init(self, repo, create=False):
1892 def init(self, repo, create=False):
1893 if not create and os.path.isdir(self.path):
1893 if not create and os.path.isdir(self.path):
1894 raise error.Abort(_("patch queue directory already exists"))
1894 raise error.Abort(_("patch queue directory already exists"))
1895 try:
1895 try:
1896 os.mkdir(self.path)
1896 os.mkdir(self.path)
1897 except OSError as inst:
1897 except OSError as inst:
1898 if inst.errno != errno.EEXIST or not create:
1898 if inst.errno != errno.EEXIST or not create:
1899 raise
1899 raise
1900 if create:
1900 if create:
1901 return self.qrepo(create=True)
1901 return self.qrepo(create=True)
1902
1902
1903 def unapplied(self, repo, patch=None):
1903 def unapplied(self, repo, patch=None):
1904 if patch and patch not in self.series:
1904 if patch and patch not in self.series:
1905 raise error.Abort(_("patch %s is not in series file") % patch)
1905 raise error.Abort(_("patch %s is not in series file") % patch)
1906 if not patch:
1906 if not patch:
1907 start = self.seriesend()
1907 start = self.seriesend()
1908 else:
1908 else:
1909 start = self.series.index(patch) + 1
1909 start = self.series.index(patch) + 1
1910 unapplied = []
1910 unapplied = []
1911 for i in pycompat.xrange(start, len(self.series)):
1911 for i in pycompat.xrange(start, len(self.series)):
1912 pushable, reason = self.pushable(i)
1912 pushable, reason = self.pushable(i)
1913 if pushable:
1913 if pushable:
1914 unapplied.append((i, self.series[i]))
1914 unapplied.append((i, self.series[i]))
1915 self.explainpushable(i)
1915 self.explainpushable(i)
1916 return unapplied
1916 return unapplied
1917
1917
1918 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1918 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1919 summary=False):
1919 summary=False):
1920 def displayname(pfx, patchname, state):
1920 def displayname(pfx, patchname, state):
1921 if pfx:
1921 if pfx:
1922 self.ui.write(pfx)
1922 self.ui.write(pfx)
1923 if summary:
1923 if summary:
1924 ph = patchheader(self.join(patchname), self.plainmode)
1924 ph = patchheader(self.join(patchname), self.plainmode)
1925 if ph.message:
1925 if ph.message:
1926 msg = ph.message[0]
1926 msg = ph.message[0]
1927 else:
1927 else:
1928 msg = ''
1928 msg = ''
1929
1929
1930 if self.ui.formatted():
1930 if self.ui.formatted():
1931 width = self.ui.termwidth() - len(pfx) - len(patchname) - 2
1931 width = self.ui.termwidth() - len(pfx) - len(patchname) - 2
1932 if width > 0:
1932 if width > 0:
1933 msg = stringutil.ellipsis(msg, width)
1933 msg = stringutil.ellipsis(msg, width)
1934 else:
1934 else:
1935 msg = ''
1935 msg = ''
1936 self.ui.write(patchname, label='qseries.' + state)
1936 self.ui.write(patchname, label='qseries.' + state)
1937 self.ui.write(': ')
1937 self.ui.write(': ')
1938 self.ui.write(msg, label='qseries.message.' + state)
1938 self.ui.write(msg, label='qseries.message.' + state)
1939 else:
1939 else:
1940 self.ui.write(patchname, label='qseries.' + state)
1940 self.ui.write(patchname, label='qseries.' + state)
1941 self.ui.write('\n')
1941 self.ui.write('\n')
1942
1942
1943 applied = set([p.name for p in self.applied])
1943 applied = set([p.name for p in self.applied])
1944 if length is None:
1944 if length is None:
1945 length = len(self.series) - start
1945 length = len(self.series) - start
1946 if not missing:
1946 if not missing:
1947 if self.ui.verbose:
1947 if self.ui.verbose:
1948 idxwidth = len("%d" % (start + length - 1))
1948 idxwidth = len("%d" % (start + length - 1))
1949 for i in pycompat.xrange(start, start + length):
1949 for i in pycompat.xrange(start, start + length):
1950 patch = self.series[i]
1950 patch = self.series[i]
1951 if patch in applied:
1951 if patch in applied:
1952 char, state = 'A', 'applied'
1952 char, state = 'A', 'applied'
1953 elif self.pushable(i)[0]:
1953 elif self.pushable(i)[0]:
1954 char, state = 'U', 'unapplied'
1954 char, state = 'U', 'unapplied'
1955 else:
1955 else:
1956 char, state = 'G', 'guarded'
1956 char, state = 'G', 'guarded'
1957 pfx = ''
1957 pfx = ''
1958 if self.ui.verbose:
1958 if self.ui.verbose:
1959 pfx = '%*d %s ' % (idxwidth, i, char)
1959 pfx = '%*d %s ' % (idxwidth, i, char)
1960 elif status and status != char:
1960 elif status and status != char:
1961 continue
1961 continue
1962 displayname(pfx, patch, state)
1962 displayname(pfx, patch, state)
1963 else:
1963 else:
1964 msng_list = []
1964 msng_list = []
1965 for root, dirs, files in os.walk(self.path):
1965 for root, dirs, files in os.walk(self.path):
1966 d = root[len(self.path) + 1:]
1966 d = root[len(self.path) + 1:]
1967 for f in files:
1967 for f in files:
1968 fl = os.path.join(d, f)
1968 fl = os.path.join(d, f)
1969 if (fl not in self.series and
1969 if (fl not in self.series and
1970 fl not in (self.statuspath, self.seriespath,
1970 fl not in (self.statuspath, self.seriespath,
1971 self.guardspath)
1971 self.guardspath)
1972 and not fl.startswith('.')):
1972 and not fl.startswith('.')):
1973 msng_list.append(fl)
1973 msng_list.append(fl)
1974 for x in sorted(msng_list):
1974 for x in sorted(msng_list):
1975 pfx = self.ui.verbose and ('D ') or ''
1975 pfx = self.ui.verbose and ('D ') or ''
1976 displayname(pfx, x, 'missing')
1976 displayname(pfx, x, 'missing')
1977
1977
1978 def issaveline(self, l):
1978 def issaveline(self, l):
1979 if l.name == '.hg.patches.save.line':
1979 if l.name == '.hg.patches.save.line':
1980 return True
1980 return True
1981
1981
1982 def qrepo(self, create=False):
1982 def qrepo(self, create=False):
1983 ui = self.baseui.copy()
1983 ui = self.baseui.copy()
1984 # copy back attributes set by ui.pager()
1984 # copy back attributes set by ui.pager()
1985 if self.ui.pageractive and not ui.pageractive:
1985 if self.ui.pageractive and not ui.pageractive:
1986 ui.pageractive = self.ui.pageractive
1986 ui.pageractive = self.ui.pageractive
1987 # internal config: ui.formatted
1987 # internal config: ui.formatted
1988 ui.setconfig('ui', 'formatted',
1988 ui.setconfig('ui', 'formatted',
1989 self.ui.config('ui', 'formatted'), 'mqpager')
1989 self.ui.config('ui', 'formatted'), 'mqpager')
1990 ui.setconfig('ui', 'interactive',
1990 ui.setconfig('ui', 'interactive',
1991 self.ui.config('ui', 'interactive'), 'mqpager')
1991 self.ui.config('ui', 'interactive'), 'mqpager')
1992 if create or os.path.isdir(self.join(".hg")):
1992 if create or os.path.isdir(self.join(".hg")):
1993 return hg.repository(ui, path=self.path, create=create)
1993 return hg.repository(ui, path=self.path, create=create)
1994
1994
1995 def restore(self, repo, rev, delete=None, qupdate=None):
1995 def restore(self, repo, rev, delete=None, qupdate=None):
1996 desc = repo[rev].description().strip()
1996 desc = repo[rev].description().strip()
1997 lines = desc.splitlines()
1997 lines = desc.splitlines()
1998 i = 0
1998 i = 0
1999 datastart = None
1999 datastart = None
2000 series = []
2000 series = []
2001 applied = []
2001 applied = []
2002 qpp = None
2002 qpp = None
2003 for i, line in enumerate(lines):
2003 for i, line in enumerate(lines):
2004 if line == 'Patch Data:':
2004 if line == 'Patch Data:':
2005 datastart = i + 1
2005 datastart = i + 1
2006 elif line.startswith('Dirstate:'):
2006 elif line.startswith('Dirstate:'):
2007 l = line.rstrip()
2007 l = line.rstrip()
2008 l = l[10:].split(' ')
2008 l = l[10:].split(' ')
2009 qpp = [bin(x) for x in l]
2009 qpp = [bin(x) for x in l]
2010 elif datastart is not None:
2010 elif datastart is not None:
2011 l = line.rstrip()
2011 l = line.rstrip()
2012 n, name = l.split(':', 1)
2012 n, name = l.split(':', 1)
2013 if n:
2013 if n:
2014 applied.append(statusentry(bin(n), name))
2014 applied.append(statusentry(bin(n), name))
2015 else:
2015 else:
2016 series.append(l)
2016 series.append(l)
2017 if datastart is None:
2017 if datastart is None:
2018 self.ui.warn(_("no saved patch data found\n"))
2018 self.ui.warn(_("no saved patch data found\n"))
2019 return 1
2019 return 1
2020 self.ui.warn(_("restoring status: %s\n") % lines[0])
2020 self.ui.warn(_("restoring status: %s\n") % lines[0])
2021 self.fullseries = series
2021 self.fullseries = series
2022 self.applied = applied
2022 self.applied = applied
2023 self.parseseries()
2023 self.parseseries()
2024 self.seriesdirty = True
2024 self.seriesdirty = True
2025 self.applieddirty = True
2025 self.applieddirty = True
2026 heads = repo.changelog.heads()
2026 heads = repo.changelog.heads()
2027 if delete:
2027 if delete:
2028 if rev not in heads:
2028 if rev not in heads:
2029 self.ui.warn(_("save entry has children, leaving it alone\n"))
2029 self.ui.warn(_("save entry has children, leaving it alone\n"))
2030 else:
2030 else:
2031 self.ui.warn(_("removing save entry %s\n") % short(rev))
2031 self.ui.warn(_("removing save entry %s\n") % short(rev))
2032 pp = repo.dirstate.parents()
2032 pp = repo.dirstate.parents()
2033 if rev in pp:
2033 if rev in pp:
2034 update = True
2034 update = True
2035 else:
2035 else:
2036 update = False
2036 update = False
2037 strip(self.ui, repo, [rev], update=update, backup=False)
2037 strip(self.ui, repo, [rev], update=update, backup=False)
2038 if qpp:
2038 if qpp:
2039 self.ui.warn(_("saved queue repository parents: %s %s\n") %
2039 self.ui.warn(_("saved queue repository parents: %s %s\n") %
2040 (short(qpp[0]), short(qpp[1])))
2040 (short(qpp[0]), short(qpp[1])))
2041 if qupdate:
2041 if qupdate:
2042 self.ui.status(_("updating queue directory\n"))
2042 self.ui.status(_("updating queue directory\n"))
2043 r = self.qrepo()
2043 r = self.qrepo()
2044 if not r:
2044 if not r:
2045 self.ui.warn(_("unable to load queue repository\n"))
2045 self.ui.warn(_("unable to load queue repository\n"))
2046 return 1
2046 return 1
2047 hg.clean(r, qpp[0])
2047 hg.clean(r, qpp[0])
2048
2048
2049 def save(self, repo, msg=None):
2049 def save(self, repo, msg=None):
2050 if not self.applied:
2050 if not self.applied:
2051 self.ui.warn(_("save: no patches applied, exiting\n"))
2051 self.ui.warn(_("save: no patches applied, exiting\n"))
2052 return 1
2052 return 1
2053 if self.issaveline(self.applied[-1]):
2053 if self.issaveline(self.applied[-1]):
2054 self.ui.warn(_("status is already saved\n"))
2054 self.ui.warn(_("status is already saved\n"))
2055 return 1
2055 return 1
2056
2056
2057 if not msg:
2057 if not msg:
2058 msg = _("hg patches saved state")
2058 msg = _("hg patches saved state")
2059 else:
2059 else:
2060 msg = "hg patches: " + msg.rstrip('\r\n')
2060 msg = "hg patches: " + msg.rstrip('\r\n')
2061 r = self.qrepo()
2061 r = self.qrepo()
2062 if r:
2062 if r:
2063 pp = r.dirstate.parents()
2063 pp = r.dirstate.parents()
2064 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
2064 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
2065 msg += "\n\nPatch Data:\n"
2065 msg += "\n\nPatch Data:\n"
2066 msg += ''.join('%s\n' % x for x in self.applied)
2066 msg += ''.join('%s\n' % x for x in self.applied)
2067 msg += ''.join(':%s\n' % x for x in self.fullseries)
2067 msg += ''.join(':%s\n' % x for x in self.fullseries)
2068 n = repo.commit(msg, force=True)
2068 n = repo.commit(msg, force=True)
2069 if not n:
2069 if not n:
2070 self.ui.warn(_("repo commit failed\n"))
2070 self.ui.warn(_("repo commit failed\n"))
2071 return 1
2071 return 1
2072 self.applied.append(statusentry(n, '.hg.patches.save.line'))
2072 self.applied.append(statusentry(n, '.hg.patches.save.line'))
2073 self.applieddirty = True
2073 self.applieddirty = True
2074 self.removeundo(repo)
2074 self.removeundo(repo)
2075
2075
2076 def fullseriesend(self):
2076 def fullseriesend(self):
2077 if self.applied:
2077 if self.applied:
2078 p = self.applied[-1].name
2078 p = self.applied[-1].name
2079 end = self.findseries(p)
2079 end = self.findseries(p)
2080 if end is None:
2080 if end is None:
2081 return len(self.fullseries)
2081 return len(self.fullseries)
2082 return end + 1
2082 return end + 1
2083 return 0
2083 return 0
2084
2084
2085 def seriesend(self, all_patches=False):
2085 def seriesend(self, all_patches=False):
2086 """If all_patches is False, return the index of the next pushable patch
2086 """If all_patches is False, return the index of the next pushable patch
2087 in the series, or the series length. If all_patches is True, return the
2087 in the series, or the series length. If all_patches is True, return the
2088 index of the first patch past the last applied one.
2088 index of the first patch past the last applied one.
2089 """
2089 """
2090 end = 0
2090 end = 0
2091 def nextpatch(start):
2091 def nextpatch(start):
2092 if all_patches or start >= len(self.series):
2092 if all_patches or start >= len(self.series):
2093 return start
2093 return start
2094 for i in pycompat.xrange(start, len(self.series)):
2094 for i in pycompat.xrange(start, len(self.series)):
2095 p, reason = self.pushable(i)
2095 p, reason = self.pushable(i)
2096 if p:
2096 if p:
2097 return i
2097 return i
2098 self.explainpushable(i)
2098 self.explainpushable(i)
2099 return len(self.series)
2099 return len(self.series)
2100 if self.applied:
2100 if self.applied:
2101 p = self.applied[-1].name
2101 p = self.applied[-1].name
2102 try:
2102 try:
2103 end = self.series.index(p)
2103 end = self.series.index(p)
2104 except ValueError:
2104 except ValueError:
2105 return 0
2105 return 0
2106 return nextpatch(end + 1)
2106 return nextpatch(end + 1)
2107 return nextpatch(end)
2107 return nextpatch(end)
2108
2108
2109 def appliedname(self, index):
2109 def appliedname(self, index):
2110 pname = self.applied[index].name
2110 pname = self.applied[index].name
2111 if not self.ui.verbose:
2111 if not self.ui.verbose:
2112 p = pname
2112 p = pname
2113 else:
2113 else:
2114 p = ("%d" % self.series.index(pname)) + " " + pname
2114 p = ("%d" % self.series.index(pname)) + " " + pname
2115 return p
2115 return p
2116
2116
2117 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
2117 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
2118 force=None, git=False):
2118 force=None, git=False):
2119 def checkseries(patchname):
2119 def checkseries(patchname):
2120 if patchname in self.series:
2120 if patchname in self.series:
2121 raise error.Abort(_('patch %s is already in the series file')
2121 raise error.Abort(_('patch %s is already in the series file')
2122 % patchname)
2122 % patchname)
2123
2123
2124 if rev:
2124 if rev:
2125 if files:
2125 if files:
2126 raise error.Abort(_('option "-r" not valid when importing '
2126 raise error.Abort(_('option "-r" not valid when importing '
2127 'files'))
2127 'files'))
2128 rev = scmutil.revrange(repo, rev)
2128 rev = scmutil.revrange(repo, rev)
2129 rev.sort(reverse=True)
2129 rev.sort(reverse=True)
2130 elif not files:
2130 elif not files:
2131 raise error.Abort(_('no files or revisions specified'))
2131 raise error.Abort(_('no files or revisions specified'))
2132 if (len(files) > 1 or len(rev) > 1) and patchname:
2132 if (len(files) > 1 or len(rev) > 1) and patchname:
2133 raise error.Abort(_('option "-n" not valid when importing multiple '
2133 raise error.Abort(_('option "-n" not valid when importing multiple '
2134 'patches'))
2134 'patches'))
2135 imported = []
2135 imported = []
2136 if rev:
2136 if rev:
2137 # If mq patches are applied, we can only import revisions
2137 # If mq patches are applied, we can only import revisions
2138 # that form a linear path to qbase.
2138 # that form a linear path to qbase.
2139 # Otherwise, they should form a linear path to a head.
2139 # Otherwise, they should form a linear path to a head.
2140 heads = repo.changelog.heads(repo.changelog.node(rev.first()))
2140 heads = repo.changelog.heads(repo.changelog.node(rev.first()))
2141 if len(heads) > 1:
2141 if len(heads) > 1:
2142 raise error.Abort(_('revision %d is the root of more than one '
2142 raise error.Abort(_('revision %d is the root of more than one '
2143 'branch') % rev.last())
2143 'branch') % rev.last())
2144 if self.applied:
2144 if self.applied:
2145 base = repo.changelog.node(rev.first())
2145 base = repo.changelog.node(rev.first())
2146 if base in [n.node for n in self.applied]:
2146 if base in [n.node for n in self.applied]:
2147 raise error.Abort(_('revision %d is already managed')
2147 raise error.Abort(_('revision %d is already managed')
2148 % rev.first())
2148 % rev.first())
2149 if heads != [self.applied[-1].node]:
2149 if heads != [self.applied[-1].node]:
2150 raise error.Abort(_('revision %d is not the parent of '
2150 raise error.Abort(_('revision %d is not the parent of '
2151 'the queue') % rev.first())
2151 'the queue') % rev.first())
2152 base = repo.changelog.rev(self.applied[0].node)
2152 base = repo.changelog.rev(self.applied[0].node)
2153 lastparent = repo.changelog.parentrevs(base)[0]
2153 lastparent = repo.changelog.parentrevs(base)[0]
2154 else:
2154 else:
2155 if heads != [repo.changelog.node(rev.first())]:
2155 if heads != [repo.changelog.node(rev.first())]:
2156 raise error.Abort(_('revision %d has unmanaged children')
2156 raise error.Abort(_('revision %d has unmanaged children')
2157 % rev.first())
2157 % rev.first())
2158 lastparent = None
2158 lastparent = None
2159
2159
2160 diffopts = self.diffopts({'git': git})
2160 diffopts = self.diffopts({'git': git})
2161 with repo.transaction('qimport') as tr:
2161 with repo.transaction('qimport') as tr:
2162 for r in rev:
2162 for r in rev:
2163 if not repo[r].mutable():
2163 if not repo[r].mutable():
2164 raise error.Abort(_('revision %d is not mutable') % r,
2164 raise error.Abort(_('revision %d is not mutable') % r,
2165 hint=_("see 'hg help phases' "
2165 hint=_("see 'hg help phases' "
2166 'for details'))
2166 'for details'))
2167 p1, p2 = repo.changelog.parentrevs(r)
2167 p1, p2 = repo.changelog.parentrevs(r)
2168 n = repo.changelog.node(r)
2168 n = repo.changelog.node(r)
2169 if p2 != nullrev:
2169 if p2 != nullrev:
2170 raise error.Abort(_('cannot import merge revision %d')
2170 raise error.Abort(_('cannot import merge revision %d')
2171 % r)
2171 % r)
2172 if lastparent and lastparent != r:
2172 if lastparent and lastparent != r:
2173 raise error.Abort(_('revision %d is not the parent of '
2173 raise error.Abort(_('revision %d is not the parent of '
2174 '%d')
2174 '%d')
2175 % (r, lastparent))
2175 % (r, lastparent))
2176 lastparent = p1
2176 lastparent = p1
2177
2177
2178 if not patchname:
2178 if not patchname:
2179 patchname = self.makepatchname(
2179 patchname = self.makepatchname(
2180 repo[r].description().split('\n', 1)[0],
2180 repo[r].description().split('\n', 1)[0],
2181 '%d.diff' % r)
2181 '%d.diff' % r)
2182 checkseries(patchname)
2182 checkseries(patchname)
2183 self.checkpatchname(patchname, force)
2183 self.checkpatchname(patchname, force)
2184 self.fullseries.insert(0, patchname)
2184 self.fullseries.insert(0, patchname)
2185
2185
2186 with self.opener(patchname, "w") as fp:
2186 with self.opener(patchname, "w") as fp:
2187 cmdutil.exportfile(repo, [n], fp, opts=diffopts)
2187 cmdutil.exportfile(repo, [n], fp, opts=diffopts)
2188
2188
2189 se = statusentry(n, patchname)
2189 se = statusentry(n, patchname)
2190 self.applied.insert(0, se)
2190 self.applied.insert(0, se)
2191
2191
2192 self.added.append(patchname)
2192 self.added.append(patchname)
2193 imported.append(patchname)
2193 imported.append(patchname)
2194 patchname = None
2194 patchname = None
2195 if rev and repo.ui.configbool('mq', 'secret'):
2195 if rev and repo.ui.configbool('mq', 'secret'):
2196 # if we added anything with --rev, move the secret root
2196 # if we added anything with --rev, move the secret root
2197 phases.retractboundary(repo, tr, phases.secret, [n])
2197 phases.retractboundary(repo, tr, phases.secret, [n])
2198 self.parseseries()
2198 self.parseseries()
2199 self.applieddirty = True
2199 self.applieddirty = True
2200 self.seriesdirty = True
2200 self.seriesdirty = True
2201
2201
2202 for i, filename in enumerate(files):
2202 for i, filename in enumerate(files):
2203 if existing:
2203 if existing:
2204 if filename == '-':
2204 if filename == '-':
2205 raise error.Abort(_('-e is incompatible with import from -')
2205 raise error.Abort(_('-e is incompatible with import from -')
2206 )
2206 )
2207 filename = normname(filename)
2207 filename = normname(filename)
2208 self.checkreservedname(filename)
2208 self.checkreservedname(filename)
2209 if util.url(filename).islocal():
2209 if util.url(filename).islocal():
2210 originpath = self.join(filename)
2210 originpath = self.join(filename)
2211 if not os.path.isfile(originpath):
2211 if not os.path.isfile(originpath):
2212 raise error.Abort(
2212 raise error.Abort(
2213 _("patch %s does not exist") % filename)
2213 _("patch %s does not exist") % filename)
2214
2214
2215 if patchname:
2215 if patchname:
2216 self.checkpatchname(patchname, force)
2216 self.checkpatchname(patchname, force)
2217
2217
2218 self.ui.write(_('renaming %s to %s\n')
2218 self.ui.write(_('renaming %s to %s\n')
2219 % (filename, patchname))
2219 % (filename, patchname))
2220 util.rename(originpath, self.join(patchname))
2220 util.rename(originpath, self.join(patchname))
2221 else:
2221 else:
2222 patchname = filename
2222 patchname = filename
2223
2223
2224 else:
2224 else:
2225 if filename == '-' and not patchname:
2225 if filename == '-' and not patchname:
2226 raise error.Abort(_('need --name to import a patch from -'))
2226 raise error.Abort(_('need --name to import a patch from -'))
2227 elif not patchname:
2227 elif not patchname:
2228 patchname = normname(os.path.basename(filename.rstrip('/')))
2228 patchname = normname(os.path.basename(filename.rstrip('/')))
2229 self.checkpatchname(patchname, force)
2229 self.checkpatchname(patchname, force)
2230 try:
2230 try:
2231 if filename == '-':
2231 if filename == '-':
2232 text = self.ui.fin.read()
2232 text = self.ui.fin.read()
2233 else:
2233 else:
2234 fp = hg.openpath(self.ui, filename)
2234 fp = hg.openpath(self.ui, filename)
2235 text = fp.read()
2235 text = fp.read()
2236 fp.close()
2236 fp.close()
2237 except (OSError, IOError):
2237 except (OSError, IOError):
2238 raise error.Abort(_("unable to read file %s") % filename)
2238 raise error.Abort(_("unable to read file %s") % filename)
2239 patchf = self.opener(patchname, "w")
2239 patchf = self.opener(patchname, "w")
2240 patchf.write(text)
2240 patchf.write(text)
2241 patchf.close()
2241 patchf.close()
2242 if not force:
2242 if not force:
2243 checkseries(patchname)
2243 checkseries(patchname)
2244 if patchname not in self.series:
2244 if patchname not in self.series:
2245 index = self.fullseriesend() + i
2245 index = self.fullseriesend() + i
2246 self.fullseries[index:index] = [patchname]
2246 self.fullseries[index:index] = [patchname]
2247 self.parseseries()
2247 self.parseseries()
2248 self.seriesdirty = True
2248 self.seriesdirty = True
2249 self.ui.warn(_("adding %s to series file\n") % patchname)
2249 self.ui.warn(_("adding %s to series file\n") % patchname)
2250 self.added.append(patchname)
2250 self.added.append(patchname)
2251 imported.append(patchname)
2251 imported.append(patchname)
2252 patchname = None
2252 patchname = None
2253
2253
2254 self.removeundo(repo)
2254 self.removeundo(repo)
2255 return imported
2255 return imported
2256
2256
2257 def fixkeepchangesopts(ui, opts):
2257 def fixkeepchangesopts(ui, opts):
2258 if (not ui.configbool('mq', 'keepchanges') or opts.get('force')
2258 if (not ui.configbool('mq', 'keepchanges') or opts.get('force')
2259 or opts.get('exact')):
2259 or opts.get('exact')):
2260 return opts
2260 return opts
2261 opts = dict(opts)
2261 opts = dict(opts)
2262 opts['keep_changes'] = True
2262 opts['keep_changes'] = True
2263 return opts
2263 return opts
2264
2264
2265 @command("qdelete|qremove|qrm",
2265 @command("qdelete|qremove|qrm",
2266 [('k', 'keep', None, _('keep patch file')),
2266 [('k', 'keep', None, _('keep patch file')),
2267 ('r', 'rev', [],
2267 ('r', 'rev', [],
2268 _('stop managing a revision (DEPRECATED)'), _('REV'))],
2268 _('stop managing a revision (DEPRECATED)'), _('REV'))],
2269 _('hg qdelete [-k] [PATCH]...'),
2269 _('hg qdelete [-k] [PATCH]...'),
2270 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
2270 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
2271 def delete(ui, repo, *patches, **opts):
2271 def delete(ui, repo, *patches, **opts):
2272 """remove patches from queue
2272 """remove patches from queue
2273
2273
2274 The patches must not be applied, and at least one patch is required. Exact
2274 The patches must not be applied, and at least one patch is required. Exact
2275 patch identifiers must be given. With -k/--keep, the patch files are
2275 patch identifiers must be given. With -k/--keep, the patch files are
2276 preserved in the patch directory.
2276 preserved in the patch directory.
2277
2277
2278 To stop managing a patch and move it into permanent history,
2278 To stop managing a patch and move it into permanent history,
2279 use the :hg:`qfinish` command."""
2279 use the :hg:`qfinish` command."""
2280 q = repo.mq
2280 q = repo.mq
2281 q.delete(repo, patches, pycompat.byteskwargs(opts))
2281 q.delete(repo, patches, pycompat.byteskwargs(opts))
2282 q.savedirty()
2282 q.savedirty()
2283 return 0
2283 return 0
2284
2284
2285 @command("qapplied",
2285 @command("qapplied",
2286 [('1', 'last', None, _('show only the preceding applied patch'))
2286 [('1', 'last', None, _('show only the preceding applied patch'))
2287 ] + seriesopts,
2287 ] + seriesopts,
2288 _('hg qapplied [-1] [-s] [PATCH]'),
2288 _('hg qapplied [-1] [-s] [PATCH]'),
2289 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
2289 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
2290 def applied(ui, repo, patch=None, **opts):
2290 def applied(ui, repo, patch=None, **opts):
2291 """print the patches already applied
2291 """print the patches already applied
2292
2292
2293 Returns 0 on success."""
2293 Returns 0 on success."""
2294
2294
2295 q = repo.mq
2295 q = repo.mq
2296 opts = pycompat.byteskwargs(opts)
2296 opts = pycompat.byteskwargs(opts)
2297
2297
2298 if patch:
2298 if patch:
2299 if patch not in q.series:
2299 if patch not in q.series:
2300 raise error.Abort(_("patch %s is not in series file") % patch)
2300 raise error.Abort(_("patch %s is not in series file") % patch)
2301 end = q.series.index(patch) + 1
2301 end = q.series.index(patch) + 1
2302 else:
2302 else:
2303 end = q.seriesend(True)
2303 end = q.seriesend(True)
2304
2304
2305 if opts.get('last') and not end:
2305 if opts.get('last') and not end:
2306 ui.write(_("no patches applied\n"))
2306 ui.write(_("no patches applied\n"))
2307 return 1
2307 return 1
2308 elif opts.get('last') and end == 1:
2308 elif opts.get('last') and end == 1:
2309 ui.write(_("only one patch applied\n"))
2309 ui.write(_("only one patch applied\n"))
2310 return 1
2310 return 1
2311 elif opts.get('last'):
2311 elif opts.get('last'):
2312 start = end - 2
2312 start = end - 2
2313 end = 1
2313 end = 1
2314 else:
2314 else:
2315 start = 0
2315 start = 0
2316
2316
2317 q.qseries(repo, length=end, start=start, status='A',
2317 q.qseries(repo, length=end, start=start, status='A',
2318 summary=opts.get('summary'))
2318 summary=opts.get('summary'))
2319
2319
2320
2320
2321 @command("qunapplied",
2321 @command("qunapplied",
2322 [('1', 'first', None, _('show only the first patch'))] + seriesopts,
2322 [('1', 'first', None, _('show only the first patch'))] + seriesopts,
2323 _('hg qunapplied [-1] [-s] [PATCH]'),
2323 _('hg qunapplied [-1] [-s] [PATCH]'),
2324 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
2324 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
2325 def unapplied(ui, repo, patch=None, **opts):
2325 def unapplied(ui, repo, patch=None, **opts):
2326 """print the patches not yet applied
2326 """print the patches not yet applied
2327
2327
2328 Returns 0 on success."""
2328 Returns 0 on success."""
2329
2329
2330 q = repo.mq
2330 q = repo.mq
2331 opts = pycompat.byteskwargs(opts)
2331 opts = pycompat.byteskwargs(opts)
2332 if patch:
2332 if patch:
2333 if patch not in q.series:
2333 if patch not in q.series:
2334 raise error.Abort(_("patch %s is not in series file") % patch)
2334 raise error.Abort(_("patch %s is not in series file") % patch)
2335 start = q.series.index(patch) + 1
2335 start = q.series.index(patch) + 1
2336 else:
2336 else:
2337 start = q.seriesend(True)
2337 start = q.seriesend(True)
2338
2338
2339 if start == len(q.series) and opts.get('first'):
2339 if start == len(q.series) and opts.get('first'):
2340 ui.write(_("all patches applied\n"))
2340 ui.write(_("all patches applied\n"))
2341 return 1
2341 return 1
2342
2342
2343 if opts.get('first'):
2343 if opts.get('first'):
2344 length = 1
2344 length = 1
2345 else:
2345 else:
2346 length = None
2346 length = None
2347 q.qseries(repo, start=start, length=length, status='U',
2347 q.qseries(repo, start=start, length=length, status='U',
2348 summary=opts.get('summary'))
2348 summary=opts.get('summary'))
2349
2349
2350 @command("qimport",
2350 @command("qimport",
2351 [('e', 'existing', None, _('import file in patch directory')),
2351 [('e', 'existing', None, _('import file in patch directory')),
2352 ('n', 'name', '',
2352 ('n', 'name', '',
2353 _('name of patch file'), _('NAME')),
2353 _('name of patch file'), _('NAME')),
2354 ('f', 'force', None, _('overwrite existing files')),
2354 ('f', 'force', None, _('overwrite existing files')),
2355 ('r', 'rev', [],
2355 ('r', 'rev', [],
2356 _('place existing revisions under mq control'), _('REV')),
2356 _('place existing revisions under mq control'), _('REV')),
2357 ('g', 'git', None, _('use git extended diff format')),
2357 ('g', 'git', None, _('use git extended diff format')),
2358 ('P', 'push', None, _('qpush after importing'))],
2358 ('P', 'push', None, _('qpush after importing'))],
2359 _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... [FILE]...'),
2359 _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... [FILE]...'),
2360 helpcategory=command.CATEGORY_IMPORT_EXPORT)
2360 helpcategory=command.CATEGORY_IMPORT_EXPORT)
2361 def qimport(ui, repo, *filename, **opts):
2361 def qimport(ui, repo, *filename, **opts):
2362 """import a patch or existing changeset
2362 """import a patch or existing changeset
2363
2363
2364 The patch is inserted into the series after the last applied
2364 The patch is inserted into the series after the last applied
2365 patch. If no patches have been applied, qimport prepends the patch
2365 patch. If no patches have been applied, qimport prepends the patch
2366 to the series.
2366 to the series.
2367
2367
2368 The patch will have the same name as its source file unless you
2368 The patch will have the same name as its source file unless you
2369 give it a new one with -n/--name.
2369 give it a new one with -n/--name.
2370
2370
2371 You can register an existing patch inside the patch directory with
2371 You can register an existing patch inside the patch directory with
2372 the -e/--existing flag.
2372 the -e/--existing flag.
2373
2373
2374 With -f/--force, an existing patch of the same name will be
2374 With -f/--force, an existing patch of the same name will be
2375 overwritten.
2375 overwritten.
2376
2376
2377 An existing changeset may be placed under mq control with -r/--rev
2377 An existing changeset may be placed under mq control with -r/--rev
2378 (e.g. qimport --rev . -n patch will place the current revision
2378 (e.g. qimport --rev . -n patch will place the current revision
2379 under mq control). With -g/--git, patches imported with --rev will
2379 under mq control). With -g/--git, patches imported with --rev will
2380 use the git diff format. See the diffs help topic for information
2380 use the git diff format. See the diffs help topic for information
2381 on why this is important for preserving rename/copy information
2381 on why this is important for preserving rename/copy information
2382 and permission changes. Use :hg:`qfinish` to remove changesets
2382 and permission changes. Use :hg:`qfinish` to remove changesets
2383 from mq control.
2383 from mq control.
2384
2384
2385 To import a patch from standard input, pass - as the patch file.
2385 To import a patch from standard input, pass - as the patch file.
2386 When importing from standard input, a patch name must be specified
2386 When importing from standard input, a patch name must be specified
2387 using the --name flag.
2387 using the --name flag.
2388
2388
2389 To import an existing patch while renaming it::
2389 To import an existing patch while renaming it::
2390
2390
2391 hg qimport -e existing-patch -n new-name
2391 hg qimport -e existing-patch -n new-name
2392
2392
2393 Returns 0 if import succeeded.
2393 Returns 0 if import succeeded.
2394 """
2394 """
2395 opts = pycompat.byteskwargs(opts)
2395 opts = pycompat.byteskwargs(opts)
2396 with repo.lock(): # cause this may move phase
2396 with repo.lock(): # cause this may move phase
2397 q = repo.mq
2397 q = repo.mq
2398 try:
2398 try:
2399 imported = q.qimport(
2399 imported = q.qimport(
2400 repo, filename, patchname=opts.get('name'),
2400 repo, filename, patchname=opts.get('name'),
2401 existing=opts.get('existing'), force=opts.get('force'),
2401 existing=opts.get('existing'), force=opts.get('force'),
2402 rev=opts.get('rev'), git=opts.get('git'))
2402 rev=opts.get('rev'), git=opts.get('git'))
2403 finally:
2403 finally:
2404 q.savedirty()
2404 q.savedirty()
2405
2405
2406 if imported and opts.get('push') and not opts.get('rev'):
2406 if imported and opts.get('push') and not opts.get('rev'):
2407 return q.push(repo, imported[-1])
2407 return q.push(repo, imported[-1])
2408 return 0
2408 return 0
2409
2409
2410 def qinit(ui, repo, create):
2410 def qinit(ui, repo, create):
2411 """initialize a new queue repository
2411 """initialize a new queue repository
2412
2412
2413 This command also creates a series file for ordering patches, and
2413 This command also creates a series file for ordering patches, and
2414 an mq-specific .hgignore file in the queue repository, to exclude
2414 an mq-specific .hgignore file in the queue repository, to exclude
2415 the status and guards files (these contain mostly transient state).
2415 the status and guards files (these contain mostly transient state).
2416
2416
2417 Returns 0 if initialization succeeded."""
2417 Returns 0 if initialization succeeded."""
2418 q = repo.mq
2418 q = repo.mq
2419 r = q.init(repo, create)
2419 r = q.init(repo, create)
2420 q.savedirty()
2420 q.savedirty()
2421 if r:
2421 if r:
2422 if not os.path.exists(r.wjoin('.hgignore')):
2422 if not os.path.exists(r.wjoin('.hgignore')):
2423 fp = r.wvfs('.hgignore', 'w')
2423 fp = r.wvfs('.hgignore', 'w')
2424 fp.write('^\\.hg\n')
2424 fp.write('^\\.hg\n')
2425 fp.write('^\\.mq\n')
2425 fp.write('^\\.mq\n')
2426 fp.write('syntax: glob\n')
2426 fp.write('syntax: glob\n')
2427 fp.write('status\n')
2427 fp.write('status\n')
2428 fp.write('guards\n')
2428 fp.write('guards\n')
2429 fp.close()
2429 fp.close()
2430 if not os.path.exists(r.wjoin('series')):
2430 if not os.path.exists(r.wjoin('series')):
2431 r.wvfs('series', 'w').close()
2431 r.wvfs('series', 'w').close()
2432 r[None].add(['.hgignore', 'series'])
2432 r[None].add(['.hgignore', 'series'])
2433 commands.add(ui, r)
2433 commands.add(ui, r)
2434 return 0
2434 return 0
2435
2435
2436 @command("qinit",
2436 @command("qinit",
2437 [('c', 'create-repo', None, _('create queue repository'))],
2437 [('c', 'create-repo', None, _('create queue repository'))],
2438 _('hg qinit [-c]'),
2438 _('hg qinit [-c]'),
2439 helpcategory=command.CATEGORY_REPO_CREATION,
2439 helpcategory=command.CATEGORY_REPO_CREATION,
2440 helpbasic=True)
2440 helpbasic=True)
2441 def init(ui, repo, **opts):
2441 def init(ui, repo, **opts):
2442 """init a new queue repository (DEPRECATED)
2442 """init a new queue repository (DEPRECATED)
2443
2443
2444 The queue repository is unversioned by default. If
2444 The queue repository is unversioned by default. If
2445 -c/--create-repo is specified, qinit will create a separate nested
2445 -c/--create-repo is specified, qinit will create a separate nested
2446 repository for patches (qinit -c may also be run later to convert
2446 repository for patches (qinit -c may also be run later to convert
2447 an unversioned patch repository into a versioned one). You can use
2447 an unversioned patch repository into a versioned one). You can use
2448 qcommit to commit changes to this queue repository.
2448 qcommit to commit changes to this queue repository.
2449
2449
2450 This command is deprecated. Without -c, it's implied by other relevant
2450 This command is deprecated. Without -c, it's implied by other relevant
2451 commands. With -c, use :hg:`init --mq` instead."""
2451 commands. With -c, use :hg:`init --mq` instead."""
2452 return qinit(ui, repo, create=opts.get(r'create_repo'))
2452 return qinit(ui, repo, create=opts.get(r'create_repo'))
2453
2453
2454 @command("qclone",
2454 @command("qclone",
2455 [('', 'pull', None, _('use pull protocol to copy metadata')),
2455 [('', 'pull', None, _('use pull protocol to copy metadata')),
2456 ('U', 'noupdate', None,
2456 ('U', 'noupdate', None,
2457 _('do not update the new working directories')),
2457 _('do not update the new working directories')),
2458 ('', 'uncompressed', None,
2458 ('', 'uncompressed', None,
2459 _('use uncompressed transfer (fast over LAN)')),
2459 _('use uncompressed transfer (fast over LAN)')),
2460 ('p', 'patches', '',
2460 ('p', 'patches', '',
2461 _('location of source patch repository'), _('REPO')),
2461 _('location of source patch repository'), _('REPO')),
2462 ] + cmdutil.remoteopts,
2462 ] + cmdutil.remoteopts,
2463 _('hg qclone [OPTION]... SOURCE [DEST]'),
2463 _('hg qclone [OPTION]... SOURCE [DEST]'),
2464 helpcategory=command.CATEGORY_REPO_CREATION,
2464 helpcategory=command.CATEGORY_REPO_CREATION,
2465 norepo=True)
2465 norepo=True)
2466 def clone(ui, source, dest=None, **opts):
2466 def clone(ui, source, dest=None, **opts):
2467 '''clone main and patch repository at same time
2467 '''clone main and patch repository at same time
2468
2468
2469 If source is local, destination will have no patches applied. If
2469 If source is local, destination will have no patches applied. If
2470 source is remote, this command can not check if patches are
2470 source is remote, this command can not check if patches are
2471 applied in source, so cannot guarantee that patches are not
2471 applied in source, so cannot guarantee that patches are not
2472 applied in destination. If you clone remote repository, be sure
2472 applied in destination. If you clone remote repository, be sure
2473 before that it has no patches applied.
2473 before that it has no patches applied.
2474
2474
2475 Source patch repository is looked for in <src>/.hg/patches by
2475 Source patch repository is looked for in <src>/.hg/patches by
2476 default. Use -p <url> to change.
2476 default. Use -p <url> to change.
2477
2477
2478 The patch directory must be a nested Mercurial repository, as
2478 The patch directory must be a nested Mercurial repository, as
2479 would be created by :hg:`init --mq`.
2479 would be created by :hg:`init --mq`.
2480
2480
2481 Return 0 on success.
2481 Return 0 on success.
2482 '''
2482 '''
2483 opts = pycompat.byteskwargs(opts)
2483 opts = pycompat.byteskwargs(opts)
2484 def patchdir(repo):
2484 def patchdir(repo):
2485 """compute a patch repo url from a repo object"""
2485 """compute a patch repo url from a repo object"""
2486 url = repo.url()
2486 url = repo.url()
2487 if url.endswith('/'):
2487 if url.endswith('/'):
2488 url = url[:-1]
2488 url = url[:-1]
2489 return url + '/.hg/patches'
2489 return url + '/.hg/patches'
2490
2490
2491 # main repo (destination and sources)
2491 # main repo (destination and sources)
2492 if dest is None:
2492 if dest is None:
2493 dest = hg.defaultdest(source)
2493 dest = hg.defaultdest(source)
2494 sr = hg.peer(ui, opts, ui.expandpath(source))
2494 sr = hg.peer(ui, opts, ui.expandpath(source))
2495
2495
2496 # patches repo (source only)
2496 # patches repo (source only)
2497 if opts.get('patches'):
2497 if opts.get('patches'):
2498 patchespath = ui.expandpath(opts.get('patches'))
2498 patchespath = ui.expandpath(opts.get('patches'))
2499 else:
2499 else:
2500 patchespath = patchdir(sr)
2500 patchespath = patchdir(sr)
2501 try:
2501 try:
2502 hg.peer(ui, opts, patchespath)
2502 hg.peer(ui, opts, patchespath)
2503 except error.RepoError:
2503 except error.RepoError:
2504 raise error.Abort(_('versioned patch repository not found'
2504 raise error.Abort(_('versioned patch repository not found'
2505 ' (see init --mq)'))
2505 ' (see init --mq)'))
2506 qbase, destrev = None, None
2506 qbase, destrev = None, None
2507 if sr.local():
2507 if sr.local():
2508 repo = sr.local()
2508 repo = sr.local()
2509 if repo.mq.applied and repo[qbase].phase() != phases.secret:
2509 if repo.mq.applied and repo[qbase].phase() != phases.secret:
2510 qbase = repo.mq.applied[0].node
2510 qbase = repo.mq.applied[0].node
2511 if not hg.islocal(dest):
2511 if not hg.islocal(dest):
2512 heads = set(repo.heads())
2512 heads = set(repo.heads())
2513 destrev = list(heads.difference(repo.heads(qbase)))
2513 destrev = list(heads.difference(repo.heads(qbase)))
2514 destrev.append(repo.changelog.parents(qbase)[0])
2514 destrev.append(repo.changelog.parents(qbase)[0])
2515 elif sr.capable('lookup'):
2515 elif sr.capable('lookup'):
2516 try:
2516 try:
2517 qbase = sr.lookup('qbase')
2517 qbase = sr.lookup('qbase')
2518 except error.RepoError:
2518 except error.RepoError:
2519 pass
2519 pass
2520
2520
2521 ui.note(_('cloning main repository\n'))
2521 ui.note(_('cloning main repository\n'))
2522 sr, dr = hg.clone(ui, opts, sr.url(), dest,
2522 sr, dr = hg.clone(ui, opts, sr.url(), dest,
2523 pull=opts.get('pull'),
2523 pull=opts.get('pull'),
2524 revs=destrev,
2524 revs=destrev,
2525 update=False,
2525 update=False,
2526 stream=opts.get('uncompressed'))
2526 stream=opts.get('uncompressed'))
2527
2527
2528 ui.note(_('cloning patch repository\n'))
2528 ui.note(_('cloning patch repository\n'))
2529 hg.clone(ui, opts, opts.get('patches') or patchdir(sr), patchdir(dr),
2529 hg.clone(ui, opts, opts.get('patches') or patchdir(sr), patchdir(dr),
2530 pull=opts.get('pull'), update=not opts.get('noupdate'),
2530 pull=opts.get('pull'), update=not opts.get('noupdate'),
2531 stream=opts.get('uncompressed'))
2531 stream=opts.get('uncompressed'))
2532
2532
2533 if dr.local():
2533 if dr.local():
2534 repo = dr.local()
2534 repo = dr.local()
2535 if qbase:
2535 if qbase:
2536 ui.note(_('stripping applied patches from destination '
2536 ui.note(_('stripping applied patches from destination '
2537 'repository\n'))
2537 'repository\n'))
2538 strip(ui, repo, [qbase], update=False, backup=None)
2538 strip(ui, repo, [qbase], update=False, backup=None)
2539 if not opts.get('noupdate'):
2539 if not opts.get('noupdate'):
2540 ui.note(_('updating destination repository\n'))
2540 ui.note(_('updating destination repository\n'))
2541 hg.update(repo, repo.changelog.tip())
2541 hg.update(repo, repo.changelog.tip())
2542
2542
2543 @command("qcommit|qci",
2543 @command("qcommit|qci",
2544 commands.table["commit|ci"][1],
2544 commands.table["commit|ci"][1],
2545 _('hg qcommit [OPTION]... [FILE]...'),
2545 _('hg qcommit [OPTION]... [FILE]...'),
2546 helpcategory=command.CATEGORY_COMMITTING,
2546 helpcategory=command.CATEGORY_COMMITTING,
2547 inferrepo=True)
2547 inferrepo=True)
2548 def commit(ui, repo, *pats, **opts):
2548 def commit(ui, repo, *pats, **opts):
2549 """commit changes in the queue repository (DEPRECATED)
2549 """commit changes in the queue repository (DEPRECATED)
2550
2550
2551 This command is deprecated; use :hg:`commit --mq` instead."""
2551 This command is deprecated; use :hg:`commit --mq` instead."""
2552 q = repo.mq
2552 q = repo.mq
2553 r = q.qrepo()
2553 r = q.qrepo()
2554 if not r:
2554 if not r:
2555 raise error.Abort('no queue repository')
2555 raise error.Abort('no queue repository')
2556 commands.commit(r.ui, r, *pats, **opts)
2556 commands.commit(r.ui, r, *pats, **opts)
2557
2557
2558 @command("qseries",
2558 @command("qseries",
2559 [('m', 'missing', None, _('print patches not in series')),
2559 [('m', 'missing', None, _('print patches not in series')),
2560 ] + seriesopts,
2560 ] + seriesopts,
2561 _('hg qseries [-ms]'),
2561 _('hg qseries [-ms]'),
2562 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
2562 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
2563 def series(ui, repo, **opts):
2563 def series(ui, repo, **opts):
2564 """print the entire series file
2564 """print the entire series file
2565
2565
2566 Returns 0 on success."""
2566 Returns 0 on success."""
2567 repo.mq.qseries(repo, missing=opts.get(r'missing'),
2567 repo.mq.qseries(repo, missing=opts.get(r'missing'),
2568 summary=opts.get(r'summary'))
2568 summary=opts.get(r'summary'))
2569 return 0
2569 return 0
2570
2570
2571 @command("qtop", seriesopts, _('hg qtop [-s]'),
2571 @command("qtop", seriesopts, _('hg qtop [-s]'),
2572 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
2572 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
2573 def top(ui, repo, **opts):
2573 def top(ui, repo, **opts):
2574 """print the name of the current patch
2574 """print the name of the current patch
2575
2575
2576 Returns 0 on success."""
2576 Returns 0 on success."""
2577 q = repo.mq
2577 q = repo.mq
2578 if q.applied:
2578 if q.applied:
2579 t = q.seriesend(True)
2579 t = q.seriesend(True)
2580 else:
2580 else:
2581 t = 0
2581 t = 0
2582
2582
2583 if t:
2583 if t:
2584 q.qseries(repo, start=t - 1, length=1, status='A',
2584 q.qseries(repo, start=t - 1, length=1, status='A',
2585 summary=opts.get(r'summary'))
2585 summary=opts.get(r'summary'))
2586 else:
2586 else:
2587 ui.write(_("no patches applied\n"))
2587 ui.write(_("no patches applied\n"))
2588 return 1
2588 return 1
2589
2589
2590 @command("qnext", seriesopts, _('hg qnext [-s]'),
2590 @command("qnext", seriesopts, _('hg qnext [-s]'),
2591 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
2591 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
2592 def next(ui, repo, **opts):
2592 def next(ui, repo, **opts):
2593 """print the name of the next pushable patch
2593 """print the name of the next pushable patch
2594
2594
2595 Returns 0 on success."""
2595 Returns 0 on success."""
2596 q = repo.mq
2596 q = repo.mq
2597 end = q.seriesend()
2597 end = q.seriesend()
2598 if end == len(q.series):
2598 if end == len(q.series):
2599 ui.write(_("all patches applied\n"))
2599 ui.write(_("all patches applied\n"))
2600 return 1
2600 return 1
2601 q.qseries(repo, start=end, length=1, summary=opts.get(r'summary'))
2601 q.qseries(repo, start=end, length=1, summary=opts.get(r'summary'))
2602
2602
2603 @command("qprev", seriesopts, _('hg qprev [-s]'),
2603 @command("qprev", seriesopts, _('hg qprev [-s]'),
2604 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
2604 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
2605 def prev(ui, repo, **opts):
2605 def prev(ui, repo, **opts):
2606 """print the name of the preceding applied patch
2606 """print the name of the preceding applied patch
2607
2607
2608 Returns 0 on success."""
2608 Returns 0 on success."""
2609 q = repo.mq
2609 q = repo.mq
2610 l = len(q.applied)
2610 l = len(q.applied)
2611 if l == 1:
2611 if l == 1:
2612 ui.write(_("only one patch applied\n"))
2612 ui.write(_("only one patch applied\n"))
2613 return 1
2613 return 1
2614 if not l:
2614 if not l:
2615 ui.write(_("no patches applied\n"))
2615 ui.write(_("no patches applied\n"))
2616 return 1
2616 return 1
2617 idx = q.series.index(q.applied[-2].name)
2617 idx = q.series.index(q.applied[-2].name)
2618 q.qseries(repo, start=idx, length=1, status='A',
2618 q.qseries(repo, start=idx, length=1, status='A',
2619 summary=opts.get(r'summary'))
2619 summary=opts.get(r'summary'))
2620
2620
2621 def setupheaderopts(ui, opts):
2621 def setupheaderopts(ui, opts):
2622 if not opts.get('user') and opts.get('currentuser'):
2622 if not opts.get('user') and opts.get('currentuser'):
2623 opts['user'] = ui.username()
2623 opts['user'] = ui.username()
2624 if not opts.get('date') and opts.get('currentdate'):
2624 if not opts.get('date') and opts.get('currentdate'):
2625 opts['date'] = "%d %d" % dateutil.makedate()
2625 opts['date'] = "%d %d" % dateutil.makedate()
2626
2626
2627 @command("qnew",
2627 @command("qnew",
2628 [('e', 'edit', None, _('invoke editor on commit messages')),
2628 [('e', 'edit', None, _('invoke editor on commit messages')),
2629 ('f', 'force', None, _('import uncommitted changes (DEPRECATED)')),
2629 ('f', 'force', None, _('import uncommitted changes (DEPRECATED)')),
2630 ('g', 'git', None, _('use git extended diff format')),
2630 ('g', 'git', None, _('use git extended diff format')),
2631 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2631 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2632 ('u', 'user', '',
2632 ('u', 'user', '',
2633 _('add "From: <USER>" to patch'), _('USER')),
2633 _('add "From: <USER>" to patch'), _('USER')),
2634 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2634 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2635 ('d', 'date', '',
2635 ('d', 'date', '',
2636 _('add "Date: <DATE>" to patch'), _('DATE'))
2636 _('add "Date: <DATE>" to patch'), _('DATE'))
2637 ] + cmdutil.walkopts + cmdutil.commitopts,
2637 ] + cmdutil.walkopts + cmdutil.commitopts,
2638 _('hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...'),
2638 _('hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...'),
2639 helpcategory=command.CATEGORY_COMMITTING, helpbasic=True,
2639 helpcategory=command.CATEGORY_COMMITTING, helpbasic=True,
2640 inferrepo=True)
2640 inferrepo=True)
2641 def new(ui, repo, patch, *args, **opts):
2641 def new(ui, repo, patch, *args, **opts):
2642 """create a new patch
2642 """create a new patch
2643
2643
2644 qnew creates a new patch on top of the currently-applied patch (if
2644 qnew creates a new patch on top of the currently-applied patch (if
2645 any). The patch will be initialized with any outstanding changes
2645 any). The patch will be initialized with any outstanding changes
2646 in the working directory. You may also use -I/--include,
2646 in the working directory. You may also use -I/--include,
2647 -X/--exclude, and/or a list of files after the patch name to add
2647 -X/--exclude, and/or a list of files after the patch name to add
2648 only changes to matching files to the new patch, leaving the rest
2648 only changes to matching files to the new patch, leaving the rest
2649 as uncommitted modifications.
2649 as uncommitted modifications.
2650
2650
2651 -u/--user and -d/--date can be used to set the (given) user and
2651 -u/--user and -d/--date can be used to set the (given) user and
2652 date, respectively. -U/--currentuser and -D/--currentdate set user
2652 date, respectively. -U/--currentuser and -D/--currentdate set user
2653 to current user and date to current date.
2653 to current user and date to current date.
2654
2654
2655 -e/--edit, -m/--message or -l/--logfile set the patch header as
2655 -e/--edit, -m/--message or -l/--logfile set the patch header as
2656 well as the commit message. If none is specified, the header is
2656 well as the commit message. If none is specified, the header is
2657 empty and the commit message is '[mq]: PATCH'.
2657 empty and the commit message is '[mq]: PATCH'.
2658
2658
2659 Use the -g/--git option to keep the patch in the git extended diff
2659 Use the -g/--git option to keep the patch in the git extended diff
2660 format. Read the diffs help topic for more information on why this
2660 format. Read the diffs help topic for more information on why this
2661 is important for preserving permission changes and copy/rename
2661 is important for preserving permission changes and copy/rename
2662 information.
2662 information.
2663
2663
2664 Returns 0 on successful creation of a new patch.
2664 Returns 0 on successful creation of a new patch.
2665 """
2665 """
2666 opts = pycompat.byteskwargs(opts)
2666 opts = pycompat.byteskwargs(opts)
2667 msg = cmdutil.logmessage(ui, opts)
2667 msg = cmdutil.logmessage(ui, opts)
2668 q = repo.mq
2668 q = repo.mq
2669 opts['msg'] = msg
2669 opts['msg'] = msg
2670 setupheaderopts(ui, opts)
2670 setupheaderopts(ui, opts)
2671 q.new(repo, patch, *args, **pycompat.strkwargs(opts))
2671 q.new(repo, patch, *args, **pycompat.strkwargs(opts))
2672 q.savedirty()
2672 q.savedirty()
2673 return 0
2673 return 0
2674
2674
2675 @command("qrefresh",
2675 @command("qrefresh",
2676 [('e', 'edit', None, _('invoke editor on commit messages')),
2676 [('e', 'edit', None, _('invoke editor on commit messages')),
2677 ('g', 'git', None, _('use git extended diff format')),
2677 ('g', 'git', None, _('use git extended diff format')),
2678 ('s', 'short', None,
2678 ('s', 'short', None,
2679 _('refresh only files already in the patch and specified files')),
2679 _('refresh only files already in the patch and specified files')),
2680 ('U', 'currentuser', None,
2680 ('U', 'currentuser', None,
2681 _('add/update author field in patch with current user')),
2681 _('add/update author field in patch with current user')),
2682 ('u', 'user', '',
2682 ('u', 'user', '',
2683 _('add/update author field in patch with given user'), _('USER')),
2683 _('add/update author field in patch with given user'), _('USER')),
2684 ('D', 'currentdate', None,
2684 ('D', 'currentdate', None,
2685 _('add/update date field in patch with current date')),
2685 _('add/update date field in patch with current date')),
2686 ('d', 'date', '',
2686 ('d', 'date', '',
2687 _('add/update date field in patch with given date'), _('DATE'))
2687 _('add/update date field in patch with given date'), _('DATE'))
2688 ] + cmdutil.walkopts + cmdutil.commitopts,
2688 ] + cmdutil.walkopts + cmdutil.commitopts,
2689 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...'),
2689 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...'),
2690 helpcategory=command.CATEGORY_COMMITTING, helpbasic=True,
2690 helpcategory=command.CATEGORY_COMMITTING, helpbasic=True,
2691 inferrepo=True)
2691 inferrepo=True)
2692 def refresh(ui, repo, *pats, **opts):
2692 def refresh(ui, repo, *pats, **opts):
2693 """update the current patch
2693 """update the current patch
2694
2694
2695 If any file patterns are provided, the refreshed patch will
2695 If any file patterns are provided, the refreshed patch will
2696 contain only the modifications that match those patterns; the
2696 contain only the modifications that match those patterns; the
2697 remaining modifications will remain in the working directory.
2697 remaining modifications will remain in the working directory.
2698
2698
2699 If -s/--short is specified, files currently included in the patch
2699 If -s/--short is specified, files currently included in the patch
2700 will be refreshed just like matched files and remain in the patch.
2700 will be refreshed just like matched files and remain in the patch.
2701
2701
2702 If -e/--edit is specified, Mercurial will start your configured editor for
2702 If -e/--edit is specified, Mercurial will start your configured editor for
2703 you to enter a message. In case qrefresh fails, you will find a backup of
2703 you to enter a message. In case qrefresh fails, you will find a backup of
2704 your message in ``.hg/last-message.txt``.
2704 your message in ``.hg/last-message.txt``.
2705
2705
2706 hg add/remove/copy/rename work as usual, though you might want to
2706 hg add/remove/copy/rename work as usual, though you might want to
2707 use git-style patches (-g/--git or [diff] git=1) to track copies
2707 use git-style patches (-g/--git or [diff] git=1) to track copies
2708 and renames. See the diffs help topic for more information on the
2708 and renames. See the diffs help topic for more information on the
2709 git diff format.
2709 git diff format.
2710
2710
2711 Returns 0 on success.
2711 Returns 0 on success.
2712 """
2712 """
2713 opts = pycompat.byteskwargs(opts)
2713 opts = pycompat.byteskwargs(opts)
2714 q = repo.mq
2714 q = repo.mq
2715 message = cmdutil.logmessage(ui, opts)
2715 message = cmdutil.logmessage(ui, opts)
2716 setupheaderopts(ui, opts)
2716 setupheaderopts(ui, opts)
2717 with repo.wlock():
2717 with repo.wlock():
2718 ret = q.refresh(repo, pats, msg=message, **pycompat.strkwargs(opts))
2718 ret = q.refresh(repo, pats, msg=message, **pycompat.strkwargs(opts))
2719 q.savedirty()
2719 q.savedirty()
2720 return ret
2720 return ret
2721
2721
2722 @command("qdiff",
2722 @command("qdiff",
2723 cmdutil.diffopts + cmdutil.diffopts2 + cmdutil.walkopts,
2723 cmdutil.diffopts + cmdutil.diffopts2 + cmdutil.walkopts,
2724 _('hg qdiff [OPTION]... [FILE]...'),
2724 _('hg qdiff [OPTION]... [FILE]...'),
2725 helpcategory=command.CATEGORY_FILE_CONTENTS, helpbasic=True,
2725 helpcategory=command.CATEGORY_FILE_CONTENTS, helpbasic=True,
2726 inferrepo=True)
2726 inferrepo=True)
2727 def diff(ui, repo, *pats, **opts):
2727 def diff(ui, repo, *pats, **opts):
2728 """diff of the current patch and subsequent modifications
2728 """diff of the current patch and subsequent modifications
2729
2729
2730 Shows a diff which includes the current patch as well as any
2730 Shows a diff which includes the current patch as well as any
2731 changes which have been made in the working directory since the
2731 changes which have been made in the working directory since the
2732 last refresh (thus showing what the current patch would become
2732 last refresh (thus showing what the current patch would become
2733 after a qrefresh).
2733 after a qrefresh).
2734
2734
2735 Use :hg:`diff` if you only want to see the changes made since the
2735 Use :hg:`diff` if you only want to see the changes made since the
2736 last qrefresh, or :hg:`export qtip` if you want to see changes
2736 last qrefresh, or :hg:`export qtip` if you want to see changes
2737 made by the current patch without including changes made since the
2737 made by the current patch without including changes made since the
2738 qrefresh.
2738 qrefresh.
2739
2739
2740 Returns 0 on success.
2740 Returns 0 on success.
2741 """
2741 """
2742 ui.pager('qdiff')
2742 ui.pager('qdiff')
2743 repo.mq.diff(repo, pats, pycompat.byteskwargs(opts))
2743 repo.mq.diff(repo, pats, pycompat.byteskwargs(opts))
2744 return 0
2744 return 0
2745
2745
2746 @command('qfold',
2746 @command('qfold',
2747 [('e', 'edit', None, _('invoke editor on commit messages')),
2747 [('e', 'edit', None, _('invoke editor on commit messages')),
2748 ('k', 'keep', None, _('keep folded patch files')),
2748 ('k', 'keep', None, _('keep folded patch files')),
2749 ] + cmdutil.commitopts,
2749 ] + cmdutil.commitopts,
2750 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...'),
2750 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...'),
2751 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT)
2751 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT)
2752 def fold(ui, repo, *files, **opts):
2752 def fold(ui, repo, *files, **opts):
2753 """fold the named patches into the current patch
2753 """fold the named patches into the current patch
2754
2754
2755 Patches must not yet be applied. Each patch will be successively
2755 Patches must not yet be applied. Each patch will be successively
2756 applied to the current patch in the order given. If all the
2756 applied to the current patch in the order given. If all the
2757 patches apply successfully, the current patch will be refreshed
2757 patches apply successfully, the current patch will be refreshed
2758 with the new cumulative patch, and the folded patches will be
2758 with the new cumulative patch, and the folded patches will be
2759 deleted. With -k/--keep, the folded patch files will not be
2759 deleted. With -k/--keep, the folded patch files will not be
2760 removed afterwards.
2760 removed afterwards.
2761
2761
2762 The header for each folded patch will be concatenated with the
2762 The header for each folded patch will be concatenated with the
2763 current patch header, separated by a line of ``* * *``.
2763 current patch header, separated by a line of ``* * *``.
2764
2764
2765 Returns 0 on success."""
2765 Returns 0 on success."""
2766 opts = pycompat.byteskwargs(opts)
2766 opts = pycompat.byteskwargs(opts)
2767 q = repo.mq
2767 q = repo.mq
2768 if not files:
2768 if not files:
2769 raise error.Abort(_('qfold requires at least one patch name'))
2769 raise error.Abort(_('qfold requires at least one patch name'))
2770 if not q.checktoppatch(repo)[0]:
2770 if not q.checktoppatch(repo)[0]:
2771 raise error.Abort(_('no patches applied'))
2771 raise error.Abort(_('no patches applied'))
2772 q.checklocalchanges(repo)
2772 q.checklocalchanges(repo)
2773
2773
2774 message = cmdutil.logmessage(ui, opts)
2774 message = cmdutil.logmessage(ui, opts)
2775
2775
2776 parent = q.lookup('qtip')
2776 parent = q.lookup('qtip')
2777 patches = []
2777 patches = []
2778 messages = []
2778 messages = []
2779 for f in files:
2779 for f in files:
2780 p = q.lookup(f)
2780 p = q.lookup(f)
2781 if p in patches or p == parent:
2781 if p in patches or p == parent:
2782 ui.warn(_('skipping already folded patch %s\n') % p)
2782 ui.warn(_('skipping already folded patch %s\n') % p)
2783 if q.isapplied(p):
2783 if q.isapplied(p):
2784 raise error.Abort(_('qfold cannot fold already applied patch %s')
2784 raise error.Abort(_('qfold cannot fold already applied patch %s')
2785 % p)
2785 % p)
2786 patches.append(p)
2786 patches.append(p)
2787
2787
2788 for p in patches:
2788 for p in patches:
2789 if not message:
2789 if not message:
2790 ph = patchheader(q.join(p), q.plainmode)
2790 ph = patchheader(q.join(p), q.plainmode)
2791 if ph.message:
2791 if ph.message:
2792 messages.append(ph.message)
2792 messages.append(ph.message)
2793 pf = q.join(p)
2793 pf = q.join(p)
2794 (patchsuccess, files, fuzz) = q.patch(repo, pf)
2794 (patchsuccess, files, fuzz) = q.patch(repo, pf)
2795 if not patchsuccess:
2795 if not patchsuccess:
2796 raise error.Abort(_('error folding patch %s') % p)
2796 raise error.Abort(_('error folding patch %s') % p)
2797
2797
2798 if not message:
2798 if not message:
2799 ph = patchheader(q.join(parent), q.plainmode)
2799 ph = patchheader(q.join(parent), q.plainmode)
2800 message = ph.message
2800 message = ph.message
2801 for msg in messages:
2801 for msg in messages:
2802 if msg:
2802 if msg:
2803 if message:
2803 if message:
2804 message.append('* * *')
2804 message.append('* * *')
2805 message.extend(msg)
2805 message.extend(msg)
2806 message = '\n'.join(message)
2806 message = '\n'.join(message)
2807
2807
2808 diffopts = q.patchopts(q.diffopts(), *patches)
2808 diffopts = q.patchopts(q.diffopts(), *patches)
2809 with repo.wlock():
2809 with repo.wlock():
2810 q.refresh(repo, msg=message, git=diffopts.git, edit=opts.get('edit'),
2810 q.refresh(repo, msg=message, git=diffopts.git, edit=opts.get('edit'),
2811 editform='mq.qfold')
2811 editform='mq.qfold')
2812 q.delete(repo, patches, opts)
2812 q.delete(repo, patches, opts)
2813 q.savedirty()
2813 q.savedirty()
2814
2814
2815 @command("qgoto",
2815 @command("qgoto",
2816 [('', 'keep-changes', None,
2816 [('', 'keep-changes', None,
2817 _('tolerate non-conflicting local changes')),
2817 _('tolerate non-conflicting local changes')),
2818 ('f', 'force', None, _('overwrite any local changes')),
2818 ('f', 'force', None, _('overwrite any local changes')),
2819 ('', 'no-backup', None, _('do not save backup copies of files'))],
2819 ('', 'no-backup', None, _('do not save backup copies of files'))],
2820 _('hg qgoto [OPTION]... PATCH'),
2820 _('hg qgoto [OPTION]... PATCH'),
2821 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
2821 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
2822 def goto(ui, repo, patch, **opts):
2822 def goto(ui, repo, patch, **opts):
2823 '''push or pop patches until named patch is at top of stack
2823 '''push or pop patches until named patch is at top of stack
2824
2824
2825 Returns 0 on success.'''
2825 Returns 0 on success.'''
2826 opts = pycompat.byteskwargs(opts)
2826 opts = pycompat.byteskwargs(opts)
2827 opts = fixkeepchangesopts(ui, opts)
2827 opts = fixkeepchangesopts(ui, opts)
2828 q = repo.mq
2828 q = repo.mq
2829 patch = q.lookup(patch)
2829 patch = q.lookup(patch)
2830 nobackup = opts.get('no_backup')
2830 nobackup = opts.get('no_backup')
2831 keepchanges = opts.get('keep_changes')
2831 keepchanges = opts.get('keep_changes')
2832 if q.isapplied(patch):
2832 if q.isapplied(patch):
2833 ret = q.pop(repo, patch, force=opts.get('force'), nobackup=nobackup,
2833 ret = q.pop(repo, patch, force=opts.get('force'), nobackup=nobackup,
2834 keepchanges=keepchanges)
2834 keepchanges=keepchanges)
2835 else:
2835 else:
2836 ret = q.push(repo, patch, force=opts.get('force'), nobackup=nobackup,
2836 ret = q.push(repo, patch, force=opts.get('force'), nobackup=nobackup,
2837 keepchanges=keepchanges)
2837 keepchanges=keepchanges)
2838 q.savedirty()
2838 q.savedirty()
2839 return ret
2839 return ret
2840
2840
2841 @command("qguard",
2841 @command("qguard",
2842 [('l', 'list', None, _('list all patches and guards')),
2842 [('l', 'list', None, _('list all patches and guards')),
2843 ('n', 'none', None, _('drop all guards'))],
2843 ('n', 'none', None, _('drop all guards'))],
2844 _('hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]'),
2844 _('hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]'),
2845 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
2845 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
2846 def guard(ui, repo, *args, **opts):
2846 def guard(ui, repo, *args, **opts):
2847 '''set or print guards for a patch
2847 '''set or print guards for a patch
2848
2848
2849 Guards control whether a patch can be pushed. A patch with no
2849 Guards control whether a patch can be pushed. A patch with no
2850 guards is always pushed. A patch with a positive guard ("+foo") is
2850 guards is always pushed. A patch with a positive guard ("+foo") is
2851 pushed only if the :hg:`qselect` command has activated it. A patch with
2851 pushed only if the :hg:`qselect` command has activated it. A patch with
2852 a negative guard ("-foo") is never pushed if the :hg:`qselect` command
2852 a negative guard ("-foo") is never pushed if the :hg:`qselect` command
2853 has activated it.
2853 has activated it.
2854
2854
2855 With no arguments, print the currently active guards.
2855 With no arguments, print the currently active guards.
2856 With arguments, set guards for the named patch.
2856 With arguments, set guards for the named patch.
2857
2857
2858 .. note::
2858 .. note::
2859
2859
2860 Specifying negative guards now requires '--'.
2860 Specifying negative guards now requires '--'.
2861
2861
2862 To set guards on another patch::
2862 To set guards on another patch::
2863
2863
2864 hg qguard other.patch -- +2.6.17 -stable
2864 hg qguard other.patch -- +2.6.17 -stable
2865
2865
2866 Returns 0 on success.
2866 Returns 0 on success.
2867 '''
2867 '''
2868 def status(idx):
2868 def status(idx):
2869 guards = q.seriesguards[idx] or ['unguarded']
2869 guards = q.seriesguards[idx] or ['unguarded']
2870 if q.series[idx] in applied:
2870 if q.series[idx] in applied:
2871 state = 'applied'
2871 state = 'applied'
2872 elif q.pushable(idx)[0]:
2872 elif q.pushable(idx)[0]:
2873 state = 'unapplied'
2873 state = 'unapplied'
2874 else:
2874 else:
2875 state = 'guarded'
2875 state = 'guarded'
2876 label = 'qguard.patch qguard.%s qseries.%s' % (state, state)
2876 label = 'qguard.patch qguard.%s qseries.%s' % (state, state)
2877 ui.write('%s: ' % ui.label(q.series[idx], label))
2877 ui.write('%s: ' % ui.label(q.series[idx], label))
2878
2878
2879 for i, guard in enumerate(guards):
2879 for i, guard in enumerate(guards):
2880 if guard.startswith('+'):
2880 if guard.startswith('+'):
2881 ui.write(guard, label='qguard.positive')
2881 ui.write(guard, label='qguard.positive')
2882 elif guard.startswith('-'):
2882 elif guard.startswith('-'):
2883 ui.write(guard, label='qguard.negative')
2883 ui.write(guard, label='qguard.negative')
2884 else:
2884 else:
2885 ui.write(guard, label='qguard.unguarded')
2885 ui.write(guard, label='qguard.unguarded')
2886 if i != len(guards) - 1:
2886 if i != len(guards) - 1:
2887 ui.write(' ')
2887 ui.write(' ')
2888 ui.write('\n')
2888 ui.write('\n')
2889 q = repo.mq
2889 q = repo.mq
2890 applied = set(p.name for p in q.applied)
2890 applied = set(p.name for p in q.applied)
2891 patch = None
2891 patch = None
2892 args = list(args)
2892 args = list(args)
2893 if opts.get(r'list'):
2893 if opts.get(r'list'):
2894 if args or opts.get(r'none'):
2894 if args or opts.get(r'none'):
2895 raise error.Abort(_('cannot mix -l/--list with options or '
2895 raise error.Abort(_('cannot mix -l/--list with options or '
2896 'arguments'))
2896 'arguments'))
2897 for i in pycompat.xrange(len(q.series)):
2897 for i in pycompat.xrange(len(q.series)):
2898 status(i)
2898 status(i)
2899 return
2899 return
2900 if not args or args[0][0:1] in '-+':
2900 if not args or args[0][0:1] in '-+':
2901 if not q.applied:
2901 if not q.applied:
2902 raise error.Abort(_('no patches applied'))
2902 raise error.Abort(_('no patches applied'))
2903 patch = q.applied[-1].name
2903 patch = q.applied[-1].name
2904 if patch is None and args[0][0:1] not in '-+':
2904 if patch is None and args[0][0:1] not in '-+':
2905 patch = args.pop(0)
2905 patch = args.pop(0)
2906 if patch is None:
2906 if patch is None:
2907 raise error.Abort(_('no patch to work with'))
2907 raise error.Abort(_('no patch to work with'))
2908 if args or opts.get(r'none'):
2908 if args or opts.get(r'none'):
2909 idx = q.findseries(patch)
2909 idx = q.findseries(patch)
2910 if idx is None:
2910 if idx is None:
2911 raise error.Abort(_('no patch named %s') % patch)
2911 raise error.Abort(_('no patch named %s') % patch)
2912 q.setguards(idx, args)
2912 q.setguards(idx, args)
2913 q.savedirty()
2913 q.savedirty()
2914 else:
2914 else:
2915 status(q.series.index(q.lookup(patch)))
2915 status(q.series.index(q.lookup(patch)))
2916
2916
2917 @command("qheader", [], _('hg qheader [PATCH]'),
2917 @command("qheader", [], _('hg qheader [PATCH]'),
2918 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
2918 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
2919 def header(ui, repo, patch=None):
2919 def header(ui, repo, patch=None):
2920 """print the header of the topmost or specified patch
2920 """print the header of the topmost or specified patch
2921
2921
2922 Returns 0 on success."""
2922 Returns 0 on success."""
2923 q = repo.mq
2923 q = repo.mq
2924
2924
2925 if patch:
2925 if patch:
2926 patch = q.lookup(patch)
2926 patch = q.lookup(patch)
2927 else:
2927 else:
2928 if not q.applied:
2928 if not q.applied:
2929 ui.write(_('no patches applied\n'))
2929 ui.write(_('no patches applied\n'))
2930 return 1
2930 return 1
2931 patch = q.lookup('qtip')
2931 patch = q.lookup('qtip')
2932 ph = patchheader(q.join(patch), q.plainmode)
2932 ph = patchheader(q.join(patch), q.plainmode)
2933
2933
2934 ui.write('\n'.join(ph.message) + '\n')
2934 ui.write('\n'.join(ph.message) + '\n')
2935
2935
2936 def lastsavename(path):
2936 def lastsavename(path):
2937 (directory, base) = os.path.split(path)
2937 (directory, base) = os.path.split(path)
2938 names = os.listdir(directory)
2938 names = os.listdir(directory)
2939 namere = re.compile("%s.([0-9]+)" % base)
2939 namere = re.compile("%s.([0-9]+)" % base)
2940 maxindex = None
2940 maxindex = None
2941 maxname = None
2941 maxname = None
2942 for f in names:
2942 for f in names:
2943 m = namere.match(f)
2943 m = namere.match(f)
2944 if m:
2944 if m:
2945 index = int(m.group(1))
2945 index = int(m.group(1))
2946 if maxindex is None or index > maxindex:
2946 if maxindex is None or index > maxindex:
2947 maxindex = index
2947 maxindex = index
2948 maxname = f
2948 maxname = f
2949 if maxname:
2949 if maxname:
2950 return (os.path.join(directory, maxname), maxindex)
2950 return (os.path.join(directory, maxname), maxindex)
2951 return (None, None)
2951 return (None, None)
2952
2952
2953 def savename(path):
2953 def savename(path):
2954 (last, index) = lastsavename(path)
2954 (last, index) = lastsavename(path)
2955 if last is None:
2955 if last is None:
2956 index = 0
2956 index = 0
2957 newpath = path + ".%d" % (index + 1)
2957 newpath = path + ".%d" % (index + 1)
2958 return newpath
2958 return newpath
2959
2959
2960 @command("qpush",
2960 @command("qpush",
2961 [('', 'keep-changes', None,
2961 [('', 'keep-changes', None,
2962 _('tolerate non-conflicting local changes')),
2962 _('tolerate non-conflicting local changes')),
2963 ('f', 'force', None, _('apply on top of local changes')),
2963 ('f', 'force', None, _('apply on top of local changes')),
2964 ('e', 'exact', None,
2964 ('e', 'exact', None,
2965 _('apply the target patch to its recorded parent')),
2965 _('apply the target patch to its recorded parent')),
2966 ('l', 'list', None, _('list patch name in commit text')),
2966 ('l', 'list', None, _('list patch name in commit text')),
2967 ('a', 'all', None, _('apply all patches')),
2967 ('a', 'all', None, _('apply all patches')),
2968 ('m', 'merge', None, _('merge from another queue (DEPRECATED)')),
2968 ('m', 'merge', None, _('merge from another queue (DEPRECATED)')),
2969 ('n', 'name', '',
2969 ('n', 'name', '',
2970 _('merge queue name (DEPRECATED)'), _('NAME')),
2970 _('merge queue name (DEPRECATED)'), _('NAME')),
2971 ('', 'move', None,
2971 ('', 'move', None,
2972 _('reorder patch series and apply only the patch')),
2972 _('reorder patch series and apply only the patch')),
2973 ('', 'no-backup', None, _('do not save backup copies of files'))],
2973 ('', 'no-backup', None, _('do not save backup copies of files'))],
2974 _('hg qpush [-f] [-l] [-a] [--move] [PATCH | INDEX]'),
2974 _('hg qpush [-f] [-l] [-a] [--move] [PATCH | INDEX]'),
2975 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2975 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2976 helpbasic=True)
2976 helpbasic=True)
2977 def push(ui, repo, patch=None, **opts):
2977 def push(ui, repo, patch=None, **opts):
2978 """push the next patch onto the stack
2978 """push the next patch onto the stack
2979
2979
2980 By default, abort if the working directory contains uncommitted
2980 By default, abort if the working directory contains uncommitted
2981 changes. With --keep-changes, abort only if the uncommitted files
2981 changes. With --keep-changes, abort only if the uncommitted files
2982 overlap with patched files. With -f/--force, backup and patch over
2982 overlap with patched files. With -f/--force, backup and patch over
2983 uncommitted changes.
2983 uncommitted changes.
2984
2984
2985 Return 0 on success.
2985 Return 0 on success.
2986 """
2986 """
2987 q = repo.mq
2987 q = repo.mq
2988 mergeq = None
2988 mergeq = None
2989
2989
2990 opts = pycompat.byteskwargs(opts)
2990 opts = pycompat.byteskwargs(opts)
2991 opts = fixkeepchangesopts(ui, opts)
2991 opts = fixkeepchangesopts(ui, opts)
2992 if opts.get('merge'):
2992 if opts.get('merge'):
2993 if opts.get('name'):
2993 if opts.get('name'):
2994 newpath = repo.vfs.join(opts.get('name'))
2994 newpath = repo.vfs.join(opts.get('name'))
2995 else:
2995 else:
2996 newpath, i = lastsavename(q.path)
2996 newpath, i = lastsavename(q.path)
2997 if not newpath:
2997 if not newpath:
2998 ui.warn(_("no saved queues found, please use -n\n"))
2998 ui.warn(_("no saved queues found, please use -n\n"))
2999 return 1
2999 return 1
3000 mergeq = queue(ui, repo.baseui, repo.path, newpath)
3000 mergeq = queue(ui, repo.baseui, repo.path, newpath)
3001 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
3001 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
3002 ret = q.push(repo, patch, force=opts.get('force'), list=opts.get('list'),
3002 ret = q.push(repo, patch, force=opts.get('force'), list=opts.get('list'),
3003 mergeq=mergeq, all=opts.get('all'), move=opts.get('move'),
3003 mergeq=mergeq, all=opts.get('all'), move=opts.get('move'),
3004 exact=opts.get('exact'), nobackup=opts.get('no_backup'),
3004 exact=opts.get('exact'), nobackup=opts.get('no_backup'),
3005 keepchanges=opts.get('keep_changes'))
3005 keepchanges=opts.get('keep_changes'))
3006 return ret
3006 return ret
3007
3007
3008 @command("qpop",
3008 @command("qpop",
3009 [('a', 'all', None, _('pop all patches')),
3009 [('a', 'all', None, _('pop all patches')),
3010 ('n', 'name', '',
3010 ('n', 'name', '',
3011 _('queue name to pop (DEPRECATED)'), _('NAME')),
3011 _('queue name to pop (DEPRECATED)'), _('NAME')),
3012 ('', 'keep-changes', None,
3012 ('', 'keep-changes', None,
3013 _('tolerate non-conflicting local changes')),
3013 _('tolerate non-conflicting local changes')),
3014 ('f', 'force', None, _('forget any local changes to patched files')),
3014 ('f', 'force', None, _('forget any local changes to patched files')),
3015 ('', 'no-backup', None, _('do not save backup copies of files'))],
3015 ('', 'no-backup', None, _('do not save backup copies of files'))],
3016 _('hg qpop [-a] [-f] [PATCH | INDEX]'),
3016 _('hg qpop [-a] [-f] [PATCH | INDEX]'),
3017 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3017 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3018 helpbasic=True)
3018 helpbasic=True)
3019 def pop(ui, repo, patch=None, **opts):
3019 def pop(ui, repo, patch=None, **opts):
3020 """pop the current patch off the stack
3020 """pop the current patch off the stack
3021
3021
3022 Without argument, pops off the top of the patch stack. If given a
3022 Without argument, pops off the top of the patch stack. If given a
3023 patch name, keeps popping off patches until the named patch is at
3023 patch name, keeps popping off patches until the named patch is at
3024 the top of the stack.
3024 the top of the stack.
3025
3025
3026 By default, abort if the working directory contains uncommitted
3026 By default, abort if the working directory contains uncommitted
3027 changes. With --keep-changes, abort only if the uncommitted files
3027 changes. With --keep-changes, abort only if the uncommitted files
3028 overlap with patched files. With -f/--force, backup and discard
3028 overlap with patched files. With -f/--force, backup and discard
3029 changes made to such files.
3029 changes made to such files.
3030
3030
3031 Return 0 on success.
3031 Return 0 on success.
3032 """
3032 """
3033 opts = pycompat.byteskwargs(opts)
3033 opts = pycompat.byteskwargs(opts)
3034 opts = fixkeepchangesopts(ui, opts)
3034 opts = fixkeepchangesopts(ui, opts)
3035 localupdate = True
3035 localupdate = True
3036 if opts.get('name'):
3036 if opts.get('name'):
3037 q = queue(ui, repo.baseui, repo.path, repo.vfs.join(opts.get('name')))
3037 q = queue(ui, repo.baseui, repo.path, repo.vfs.join(opts.get('name')))
3038 ui.warn(_('using patch queue: %s\n') % q.path)
3038 ui.warn(_('using patch queue: %s\n') % q.path)
3039 localupdate = False
3039 localupdate = False
3040 else:
3040 else:
3041 q = repo.mq
3041 q = repo.mq
3042 ret = q.pop(repo, patch, force=opts.get('force'), update=localupdate,
3042 ret = q.pop(repo, patch, force=opts.get('force'), update=localupdate,
3043 all=opts.get('all'), nobackup=opts.get('no_backup'),
3043 all=opts.get('all'), nobackup=opts.get('no_backup'),
3044 keepchanges=opts.get('keep_changes'))
3044 keepchanges=opts.get('keep_changes'))
3045 q.savedirty()
3045 q.savedirty()
3046 return ret
3046 return ret
3047
3047
3048 @command("qrename|qmv", [], _('hg qrename PATCH1 [PATCH2]'),
3048 @command("qrename|qmv", [], _('hg qrename PATCH1 [PATCH2]'),
3049 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
3049 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
3050 def rename(ui, repo, patch, name=None, **opts):
3050 def rename(ui, repo, patch, name=None, **opts):
3051 """rename a patch
3051 """rename a patch
3052
3052
3053 With one argument, renames the current patch to PATCH1.
3053 With one argument, renames the current patch to PATCH1.
3054 With two arguments, renames PATCH1 to PATCH2.
3054 With two arguments, renames PATCH1 to PATCH2.
3055
3055
3056 Returns 0 on success."""
3056 Returns 0 on success."""
3057 q = repo.mq
3057 q = repo.mq
3058 if not name:
3058 if not name:
3059 name = patch
3059 name = patch
3060 patch = None
3060 patch = None
3061
3061
3062 if patch:
3062 if patch:
3063 patch = q.lookup(patch)
3063 patch = q.lookup(patch)
3064 else:
3064 else:
3065 if not q.applied:
3065 if not q.applied:
3066 ui.write(_('no patches applied\n'))
3066 ui.write(_('no patches applied\n'))
3067 return
3067 return
3068 patch = q.lookup('qtip')
3068 patch = q.lookup('qtip')
3069 absdest = q.join(name)
3069 absdest = q.join(name)
3070 if os.path.isdir(absdest):
3070 if os.path.isdir(absdest):
3071 name = normname(os.path.join(name, os.path.basename(patch)))
3071 name = normname(os.path.join(name, os.path.basename(patch)))
3072 absdest = q.join(name)
3072 absdest = q.join(name)
3073 q.checkpatchname(name)
3073 q.checkpatchname(name)
3074
3074
3075 ui.note(_('renaming %s to %s\n') % (patch, name))
3075 ui.note(_('renaming %s to %s\n') % (patch, name))
3076 i = q.findseries(patch)
3076 i = q.findseries(patch)
3077 guards = q.guard_re.findall(q.fullseries[i])
3077 guards = q.guard_re.findall(q.fullseries[i])
3078 q.fullseries[i] = name + ''.join([' #' + g for g in guards])
3078 q.fullseries[i] = name + ''.join([' #' + g for g in guards])
3079 q.parseseries()
3079 q.parseseries()
3080 q.seriesdirty = True
3080 q.seriesdirty = True
3081
3081
3082 info = q.isapplied(patch)
3082 info = q.isapplied(patch)
3083 if info:
3083 if info:
3084 q.applied[info[0]] = statusentry(info[1], name)
3084 q.applied[info[0]] = statusentry(info[1], name)
3085 q.applieddirty = True
3085 q.applieddirty = True
3086
3086
3087 destdir = os.path.dirname(absdest)
3087 destdir = os.path.dirname(absdest)
3088 if not os.path.isdir(destdir):
3088 if not os.path.isdir(destdir):
3089 os.makedirs(destdir)
3089 os.makedirs(destdir)
3090 util.rename(q.join(patch), absdest)
3090 util.rename(q.join(patch), absdest)
3091 r = q.qrepo()
3091 r = q.qrepo()
3092 if r and patch in r.dirstate:
3092 if r and patch in r.dirstate:
3093 wctx = r[None]
3093 wctx = r[None]
3094 with r.wlock():
3094 with r.wlock():
3095 if r.dirstate[patch] == 'a':
3095 if r.dirstate[patch] == 'a':
3096 r.dirstate.drop(patch)
3096 r.dirstate.drop(patch)
3097 r.dirstate.add(name)
3097 r.dirstate.add(name)
3098 else:
3098 else:
3099 wctx.copy(patch, name)
3099 wctx.copy(patch, name)
3100 wctx.forget([patch])
3100 wctx.forget([patch])
3101
3101
3102 q.savedirty()
3102 q.savedirty()
3103
3103
3104 @command("qrestore",
3104 @command("qrestore",
3105 [('d', 'delete', None, _('delete save entry')),
3105 [('d', 'delete', None, _('delete save entry')),
3106 ('u', 'update', None, _('update queue working directory'))],
3106 ('u', 'update', None, _('update queue working directory'))],
3107 _('hg qrestore [-d] [-u] REV'),
3107 _('hg qrestore [-d] [-u] REV'),
3108 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
3108 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
3109 def restore(ui, repo, rev, **opts):
3109 def restore(ui, repo, rev, **opts):
3110 """restore the queue state saved by a revision (DEPRECATED)
3110 """restore the queue state saved by a revision (DEPRECATED)
3111
3111
3112 This command is deprecated, use :hg:`rebase` instead."""
3112 This command is deprecated, use :hg:`rebase` instead."""
3113 rev = repo.lookup(rev)
3113 rev = repo.lookup(rev)
3114 q = repo.mq
3114 q = repo.mq
3115 q.restore(repo, rev, delete=opts.get(r'delete'),
3115 q.restore(repo, rev, delete=opts.get(r'delete'),
3116 qupdate=opts.get(r'update'))
3116 qupdate=opts.get(r'update'))
3117 q.savedirty()
3117 q.savedirty()
3118 return 0
3118 return 0
3119
3119
3120 @command("qsave",
3120 @command("qsave",
3121 [('c', 'copy', None, _('copy patch directory')),
3121 [('c', 'copy', None, _('copy patch directory')),
3122 ('n', 'name', '',
3122 ('n', 'name', '',
3123 _('copy directory name'), _('NAME')),
3123 _('copy directory name'), _('NAME')),
3124 ('e', 'empty', None, _('clear queue status file')),
3124 ('e', 'empty', None, _('clear queue status file')),
3125 ('f', 'force', None, _('force copy'))] + cmdutil.commitopts,
3125 ('f', 'force', None, _('force copy'))] + cmdutil.commitopts,
3126 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'),
3126 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'),
3127 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
3127 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
3128 def save(ui, repo, **opts):
3128 def save(ui, repo, **opts):
3129 """save current queue state (DEPRECATED)
3129 """save current queue state (DEPRECATED)
3130
3130
3131 This command is deprecated, use :hg:`rebase` instead."""
3131 This command is deprecated, use :hg:`rebase` instead."""
3132 q = repo.mq
3132 q = repo.mq
3133 opts = pycompat.byteskwargs(opts)
3133 opts = pycompat.byteskwargs(opts)
3134 message = cmdutil.logmessage(ui, opts)
3134 message = cmdutil.logmessage(ui, opts)
3135 ret = q.save(repo, msg=message)
3135 ret = q.save(repo, msg=message)
3136 if ret:
3136 if ret:
3137 return ret
3137 return ret
3138 q.savedirty() # save to .hg/patches before copying
3138 q.savedirty() # save to .hg/patches before copying
3139 if opts.get('copy'):
3139 if opts.get('copy'):
3140 path = q.path
3140 path = q.path
3141 if opts.get('name'):
3141 if opts.get('name'):
3142 newpath = os.path.join(q.basepath, opts.get('name'))
3142 newpath = os.path.join(q.basepath, opts.get('name'))
3143 if os.path.exists(newpath):
3143 if os.path.exists(newpath):
3144 if not os.path.isdir(newpath):
3144 if not os.path.isdir(newpath):
3145 raise error.Abort(_('destination %s exists and is not '
3145 raise error.Abort(_('destination %s exists and is not '
3146 'a directory') % newpath)
3146 'a directory') % newpath)
3147 if not opts.get('force'):
3147 if not opts.get('force'):
3148 raise error.Abort(_('destination %s exists, '
3148 raise error.Abort(_('destination %s exists, '
3149 'use -f to force') % newpath)
3149 'use -f to force') % newpath)
3150 else:
3150 else:
3151 newpath = savename(path)
3151 newpath = savename(path)
3152 ui.warn(_("copy %s to %s\n") % (path, newpath))
3152 ui.warn(_("copy %s to %s\n") % (path, newpath))
3153 util.copyfiles(path, newpath)
3153 util.copyfiles(path, newpath)
3154 if opts.get('empty'):
3154 if opts.get('empty'):
3155 del q.applied[:]
3155 del q.applied[:]
3156 q.applieddirty = True
3156 q.applieddirty = True
3157 q.savedirty()
3157 q.savedirty()
3158 return 0
3158 return 0
3159
3159
3160
3160
3161 @command("qselect",
3161 @command("qselect",
3162 [('n', 'none', None, _('disable all guards')),
3162 [('n', 'none', None, _('disable all guards')),
3163 ('s', 'series', None, _('list all guards in series file')),
3163 ('s', 'series', None, _('list all guards in series file')),
3164 ('', 'pop', None, _('pop to before first guarded applied patch')),
3164 ('', 'pop', None, _('pop to before first guarded applied patch')),
3165 ('', 'reapply', None, _('pop, then reapply patches'))],
3165 ('', 'reapply', None, _('pop, then reapply patches'))],
3166 _('hg qselect [OPTION]... [GUARD]...'),
3166 _('hg qselect [OPTION]... [GUARD]...'),
3167 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
3167 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
3168 def select(ui, repo, *args, **opts):
3168 def select(ui, repo, *args, **opts):
3169 '''set or print guarded patches to push
3169 '''set or print guarded patches to push
3170
3170
3171 Use the :hg:`qguard` command to set or print guards on patch, then use
3171 Use the :hg:`qguard` command to set or print guards on patch, then use
3172 qselect to tell mq which guards to use. A patch will be pushed if
3172 qselect to tell mq which guards to use. A patch will be pushed if
3173 it has no guards or any positive guards match the currently
3173 it has no guards or any positive guards match the currently
3174 selected guard, but will not be pushed if any negative guards
3174 selected guard, but will not be pushed if any negative guards
3175 match the current guard. For example::
3175 match the current guard. For example::
3176
3176
3177 qguard foo.patch -- -stable (negative guard)
3177 qguard foo.patch -- -stable (negative guard)
3178 qguard bar.patch +stable (positive guard)
3178 qguard bar.patch +stable (positive guard)
3179 qselect stable
3179 qselect stable
3180
3180
3181 This activates the "stable" guard. mq will skip foo.patch (because
3181 This activates the "stable" guard. mq will skip foo.patch (because
3182 it has a negative match) but push bar.patch (because it has a
3182 it has a negative match) but push bar.patch (because it has a
3183 positive match).
3183 positive match).
3184
3184
3185 With no arguments, prints the currently active guards.
3185 With no arguments, prints the currently active guards.
3186 With one argument, sets the active guard.
3186 With one argument, sets the active guard.
3187
3187
3188 Use -n/--none to deactivate guards (no other arguments needed).
3188 Use -n/--none to deactivate guards (no other arguments needed).
3189 When no guards are active, patches with positive guards are
3189 When no guards are active, patches with positive guards are
3190 skipped and patches with negative guards are pushed.
3190 skipped and patches with negative guards are pushed.
3191
3191
3192 qselect can change the guards on applied patches. It does not pop
3192 qselect can change the guards on applied patches. It does not pop
3193 guarded patches by default. Use --pop to pop back to the last
3193 guarded patches by default. Use --pop to pop back to the last
3194 applied patch that is not guarded. Use --reapply (which implies
3194 applied patch that is not guarded. Use --reapply (which implies
3195 --pop) to push back to the current patch afterwards, but skip
3195 --pop) to push back to the current patch afterwards, but skip
3196 guarded patches.
3196 guarded patches.
3197
3197
3198 Use -s/--series to print a list of all guards in the series file
3198 Use -s/--series to print a list of all guards in the series file
3199 (no other arguments needed). Use -v for more information.
3199 (no other arguments needed). Use -v for more information.
3200
3200
3201 Returns 0 on success.'''
3201 Returns 0 on success.'''
3202
3202
3203 q = repo.mq
3203 q = repo.mq
3204 opts = pycompat.byteskwargs(opts)
3204 opts = pycompat.byteskwargs(opts)
3205 guards = q.active()
3205 guards = q.active()
3206 pushable = lambda i: q.pushable(q.applied[i].name)[0]
3206 pushable = lambda i: q.pushable(q.applied[i].name)[0]
3207 if args or opts.get('none'):
3207 if args or opts.get('none'):
3208 old_unapplied = q.unapplied(repo)
3208 old_unapplied = q.unapplied(repo)
3209 old_guarded = [i for i in pycompat.xrange(len(q.applied))
3209 old_guarded = [i for i in pycompat.xrange(len(q.applied))
3210 if not pushable(i)]
3210 if not pushable(i)]
3211 q.setactive(args)
3211 q.setactive(args)
3212 q.savedirty()
3212 q.savedirty()
3213 if not args:
3213 if not args:
3214 ui.status(_('guards deactivated\n'))
3214 ui.status(_('guards deactivated\n'))
3215 if not opts.get('pop') and not opts.get('reapply'):
3215 if not opts.get('pop') and not opts.get('reapply'):
3216 unapplied = q.unapplied(repo)
3216 unapplied = q.unapplied(repo)
3217 guarded = [i for i in pycompat.xrange(len(q.applied))
3217 guarded = [i for i in pycompat.xrange(len(q.applied))
3218 if not pushable(i)]
3218 if not pushable(i)]
3219 if len(unapplied) != len(old_unapplied):
3219 if len(unapplied) != len(old_unapplied):
3220 ui.status(_('number of unguarded, unapplied patches has '
3220 ui.status(_('number of unguarded, unapplied patches has '
3221 'changed from %d to %d\n') %
3221 'changed from %d to %d\n') %
3222 (len(old_unapplied), len(unapplied)))
3222 (len(old_unapplied), len(unapplied)))
3223 if len(guarded) != len(old_guarded):
3223 if len(guarded) != len(old_guarded):
3224 ui.status(_('number of guarded, applied patches has changed '
3224 ui.status(_('number of guarded, applied patches has changed '
3225 'from %d to %d\n') %
3225 'from %d to %d\n') %
3226 (len(old_guarded), len(guarded)))
3226 (len(old_guarded), len(guarded)))
3227 elif opts.get('series'):
3227 elif opts.get('series'):
3228 guards = {}
3228 guards = {}
3229 noguards = 0
3229 noguards = 0
3230 for gs in q.seriesguards:
3230 for gs in q.seriesguards:
3231 if not gs:
3231 if not gs:
3232 noguards += 1
3232 noguards += 1
3233 for g in gs:
3233 for g in gs:
3234 guards.setdefault(g, 0)
3234 guards.setdefault(g, 0)
3235 guards[g] += 1
3235 guards[g] += 1
3236 if ui.verbose:
3236 if ui.verbose:
3237 guards['NONE'] = noguards
3237 guards['NONE'] = noguards
3238 guards = list(guards.items())
3238 guards = list(guards.items())
3239 guards.sort(key=lambda x: x[0][1:])
3239 guards.sort(key=lambda x: x[0][1:])
3240 if guards:
3240 if guards:
3241 ui.note(_('guards in series file:\n'))
3241 ui.note(_('guards in series file:\n'))
3242 for guard, count in guards:
3242 for guard, count in guards:
3243 ui.note('%2d ' % count)
3243 ui.note('%2d ' % count)
3244 ui.write(guard, '\n')
3244 ui.write(guard, '\n')
3245 else:
3245 else:
3246 ui.note(_('no guards in series file\n'))
3246 ui.note(_('no guards in series file\n'))
3247 else:
3247 else:
3248 if guards:
3248 if guards:
3249 ui.note(_('active guards:\n'))
3249 ui.note(_('active guards:\n'))
3250 for g in guards:
3250 for g in guards:
3251 ui.write(g, '\n')
3251 ui.write(g, '\n')
3252 else:
3252 else:
3253 ui.write(_('no active guards\n'))
3253 ui.write(_('no active guards\n'))
3254 reapply = opts.get('reapply') and q.applied and q.applied[-1].name
3254 reapply = opts.get('reapply') and q.applied and q.applied[-1].name
3255 popped = False
3255 popped = False
3256 if opts.get('pop') or opts.get('reapply'):
3256 if opts.get('pop') or opts.get('reapply'):
3257 for i in pycompat.xrange(len(q.applied)):
3257 for i in pycompat.xrange(len(q.applied)):
3258 if not pushable(i):
3258 if not pushable(i):
3259 ui.status(_('popping guarded patches\n'))
3259 ui.status(_('popping guarded patches\n'))
3260 popped = True
3260 popped = True
3261 if i == 0:
3261 if i == 0:
3262 q.pop(repo, all=True)
3262 q.pop(repo, all=True)
3263 else:
3263 else:
3264 q.pop(repo, q.applied[i - 1].name)
3264 q.pop(repo, q.applied[i - 1].name)
3265 break
3265 break
3266 if popped:
3266 if popped:
3267 try:
3267 try:
3268 if reapply:
3268 if reapply:
3269 ui.status(_('reapplying unguarded patches\n'))
3269 ui.status(_('reapplying unguarded patches\n'))
3270 q.push(repo, reapply)
3270 q.push(repo, reapply)
3271 finally:
3271 finally:
3272 q.savedirty()
3272 q.savedirty()
3273
3273
3274 @command("qfinish",
3274 @command("qfinish",
3275 [('a', 'applied', None, _('finish all applied changesets'))],
3275 [('a', 'applied', None, _('finish all applied changesets'))],
3276 _('hg qfinish [-a] [REV]...'),
3276 _('hg qfinish [-a] [REV]...'),
3277 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
3277 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
3278 def finish(ui, repo, *revrange, **opts):
3278 def finish(ui, repo, *revrange, **opts):
3279 """move applied patches into repository history
3279 """move applied patches into repository history
3280
3280
3281 Finishes the specified revisions (corresponding to applied
3281 Finishes the specified revisions (corresponding to applied
3282 patches) by moving them out of mq control into regular repository
3282 patches) by moving them out of mq control into regular repository
3283 history.
3283 history.
3284
3284
3285 Accepts a revision range or the -a/--applied option. If --applied
3285 Accepts a revision range or the -a/--applied option. If --applied
3286 is specified, all applied mq revisions are removed from mq
3286 is specified, all applied mq revisions are removed from mq
3287 control. Otherwise, the given revisions must be at the base of the
3287 control. Otherwise, the given revisions must be at the base of the
3288 stack of applied patches.
3288 stack of applied patches.
3289
3289
3290 This can be especially useful if your changes have been applied to
3290 This can be especially useful if your changes have been applied to
3291 an upstream repository, or if you are about to push your changes
3291 an upstream repository, or if you are about to push your changes
3292 to upstream.
3292 to upstream.
3293
3293
3294 Returns 0 on success.
3294 Returns 0 on success.
3295 """
3295 """
3296 if not opts.get(r'applied') and not revrange:
3296 if not opts.get(r'applied') and not revrange:
3297 raise error.Abort(_('no revisions specified'))
3297 raise error.Abort(_('no revisions specified'))
3298 elif opts.get(r'applied'):
3298 elif opts.get(r'applied'):
3299 revrange = ('qbase::qtip',) + revrange
3299 revrange = ('qbase::qtip',) + revrange
3300
3300
3301 q = repo.mq
3301 q = repo.mq
3302 if not q.applied:
3302 if not q.applied:
3303 ui.status(_('no patches applied\n'))
3303 ui.status(_('no patches applied\n'))
3304 return 0
3304 return 0
3305
3305
3306 revs = scmutil.revrange(repo, revrange)
3306 revs = scmutil.revrange(repo, revrange)
3307 if repo['.'].rev() in revs and repo[None].files():
3307 if repo['.'].rev() in revs and repo[None].files():
3308 ui.warn(_('warning: uncommitted changes in the working directory\n'))
3308 ui.warn(_('warning: uncommitted changes in the working directory\n'))
3309 # queue.finish may changes phases but leave the responsibility to lock the
3309 # queue.finish may changes phases but leave the responsibility to lock the
3310 # repo to the caller to avoid deadlock with wlock. This command code is
3310 # repo to the caller to avoid deadlock with wlock. This command code is
3311 # responsibility for this locking.
3311 # responsibility for this locking.
3312 with repo.lock():
3312 with repo.lock():
3313 q.finish(repo, revs)
3313 q.finish(repo, revs)
3314 q.savedirty()
3314 q.savedirty()
3315 return 0
3315 return 0
3316
3316
3317 @command("qqueue",
3317 @command("qqueue",
3318 [('l', 'list', False, _('list all available queues')),
3318 [('l', 'list', False, _('list all available queues')),
3319 ('', 'active', False, _('print name of active queue')),
3319 ('', 'active', False, _('print name of active queue')),
3320 ('c', 'create', False, _('create new queue')),
3320 ('c', 'create', False, _('create new queue')),
3321 ('', 'rename', False, _('rename active queue')),
3321 ('', 'rename', False, _('rename active queue')),
3322 ('', 'delete', False, _('delete reference to queue')),
3322 ('', 'delete', False, _('delete reference to queue')),
3323 ('', 'purge', False, _('delete queue, and remove patch dir')),
3323 ('', 'purge', False, _('delete queue, and remove patch dir')),
3324 ],
3324 ],
3325 _('[OPTION] [QUEUE]'),
3325 _('[OPTION] [QUEUE]'),
3326 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
3326 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION)
3327 def qqueue(ui, repo, name=None, **opts):
3327 def qqueue(ui, repo, name=None, **opts):
3328 '''manage multiple patch queues
3328 '''manage multiple patch queues
3329
3329
3330 Supports switching between different patch queues, as well as creating
3330 Supports switching between different patch queues, as well as creating
3331 new patch queues and deleting existing ones.
3331 new patch queues and deleting existing ones.
3332
3332
3333 Omitting a queue name or specifying -l/--list will show you the registered
3333 Omitting a queue name or specifying -l/--list will show you the registered
3334 queues - by default the "normal" patches queue is registered. The currently
3334 queues - by default the "normal" patches queue is registered. The currently
3335 active queue will be marked with "(active)". Specifying --active will print
3335 active queue will be marked with "(active)". Specifying --active will print
3336 only the name of the active queue.
3336 only the name of the active queue.
3337
3337
3338 To create a new queue, use -c/--create. The queue is automatically made
3338 To create a new queue, use -c/--create. The queue is automatically made
3339 active, except in the case where there are applied patches from the
3339 active, except in the case where there are applied patches from the
3340 currently active queue in the repository. Then the queue will only be
3340 currently active queue in the repository. Then the queue will only be
3341 created and switching will fail.
3341 created and switching will fail.
3342
3342
3343 To delete an existing queue, use --delete. You cannot delete the currently
3343 To delete an existing queue, use --delete. You cannot delete the currently
3344 active queue.
3344 active queue.
3345
3345
3346 Returns 0 on success.
3346 Returns 0 on success.
3347 '''
3347 '''
3348 q = repo.mq
3348 q = repo.mq
3349 _defaultqueue = 'patches'
3349 _defaultqueue = 'patches'
3350 _allqueues = 'patches.queues'
3350 _allqueues = 'patches.queues'
3351 _activequeue = 'patches.queue'
3351 _activequeue = 'patches.queue'
3352
3352
3353 def _getcurrent():
3353 def _getcurrent():
3354 cur = os.path.basename(q.path)
3354 cur = os.path.basename(q.path)
3355 if cur.startswith('patches-'):
3355 if cur.startswith('patches-'):
3356 cur = cur[8:]
3356 cur = cur[8:]
3357 return cur
3357 return cur
3358
3358
3359 def _noqueues():
3359 def _noqueues():
3360 try:
3360 try:
3361 fh = repo.vfs(_allqueues, 'r')
3361 fh = repo.vfs(_allqueues, 'r')
3362 fh.close()
3362 fh.close()
3363 except IOError:
3363 except IOError:
3364 return True
3364 return True
3365
3365
3366 return False
3366 return False
3367
3367
3368 def _getqueues():
3368 def _getqueues():
3369 current = _getcurrent()
3369 current = _getcurrent()
3370
3370
3371 try:
3371 try:
3372 fh = repo.vfs(_allqueues, 'r')
3372 fh = repo.vfs(_allqueues, 'r')
3373 queues = [queue.strip() for queue in fh if queue.strip()]
3373 queues = [queue.strip() for queue in fh if queue.strip()]
3374 fh.close()
3374 fh.close()
3375 if current not in queues:
3375 if current not in queues:
3376 queues.append(current)
3376 queues.append(current)
3377 except IOError:
3377 except IOError:
3378 queues = [_defaultqueue]
3378 queues = [_defaultqueue]
3379
3379
3380 return sorted(queues)
3380 return sorted(queues)
3381
3381
3382 def _setactive(name):
3382 def _setactive(name):
3383 if q.applied:
3383 if q.applied:
3384 raise error.Abort(_('new queue created, but cannot make active '
3384 raise error.Abort(_('new queue created, but cannot make active '
3385 'as patches are applied'))
3385 'as patches are applied'))
3386 _setactivenocheck(name)
3386 _setactivenocheck(name)
3387
3387
3388 def _setactivenocheck(name):
3388 def _setactivenocheck(name):
3389 fh = repo.vfs(_activequeue, 'w')
3389 fh = repo.vfs(_activequeue, 'w')
3390 if name != 'patches':
3390 if name != 'patches':
3391 fh.write(name)
3391 fh.write(name)
3392 fh.close()
3392 fh.close()
3393
3393
3394 def _addqueue(name):
3394 def _addqueue(name):
3395 fh = repo.vfs(_allqueues, 'a')
3395 fh = repo.vfs(_allqueues, 'a')
3396 fh.write('%s\n' % (name,))
3396 fh.write('%s\n' % (name,))
3397 fh.close()
3397 fh.close()
3398
3398
3399 def _queuedir(name):
3399 def _queuedir(name):
3400 if name == 'patches':
3400 if name == 'patches':
3401 return repo.vfs.join('patches')
3401 return repo.vfs.join('patches')
3402 else:
3402 else:
3403 return repo.vfs.join('patches-' + name)
3403 return repo.vfs.join('patches-' + name)
3404
3404
3405 def _validname(name):
3405 def _validname(name):
3406 for n in name:
3406 for n in name:
3407 if n in ':\\/.':
3407 if n in ':\\/.':
3408 return False
3408 return False
3409 return True
3409 return True
3410
3410
3411 def _delete(name):
3411 def _delete(name):
3412 if name not in existing:
3412 if name not in existing:
3413 raise error.Abort(_('cannot delete queue that does not exist'))
3413 raise error.Abort(_('cannot delete queue that does not exist'))
3414
3414
3415 current = _getcurrent()
3415 current = _getcurrent()
3416
3416
3417 if name == current:
3417 if name == current:
3418 raise error.Abort(_('cannot delete currently active queue'))
3418 raise error.Abort(_('cannot delete currently active queue'))
3419
3419
3420 fh = repo.vfs('patches.queues.new', 'w')
3420 fh = repo.vfs('patches.queues.new', 'w')
3421 for queue in existing:
3421 for queue in existing:
3422 if queue == name:
3422 if queue == name:
3423 continue
3423 continue
3424 fh.write('%s\n' % (queue,))
3424 fh.write('%s\n' % (queue,))
3425 fh.close()
3425 fh.close()
3426 repo.vfs.rename('patches.queues.new', _allqueues)
3426 repo.vfs.rename('patches.queues.new', _allqueues)
3427
3427
3428 opts = pycompat.byteskwargs(opts)
3428 opts = pycompat.byteskwargs(opts)
3429 if not name or opts.get('list') or opts.get('active'):
3429 if not name or opts.get('list') or opts.get('active'):
3430 current = _getcurrent()
3430 current = _getcurrent()
3431 if opts.get('active'):
3431 if opts.get('active'):
3432 ui.write('%s\n' % (current,))
3432 ui.write('%s\n' % (current,))
3433 return
3433 return
3434 for queue in _getqueues():
3434 for queue in _getqueues():
3435 ui.write('%s' % (queue,))
3435 ui.write('%s' % (queue,))
3436 if queue == current and not ui.quiet:
3436 if queue == current and not ui.quiet:
3437 ui.write(_(' (active)\n'))
3437 ui.write(_(' (active)\n'))
3438 else:
3438 else:
3439 ui.write('\n')
3439 ui.write('\n')
3440 return
3440 return
3441
3441
3442 if not _validname(name):
3442 if not _validname(name):
3443 raise error.Abort(
3443 raise error.Abort(
3444 _('invalid queue name, may not contain the characters ":\\/."'))
3444 _('invalid queue name, may not contain the characters ":\\/."'))
3445
3445
3446 with repo.wlock():
3446 with repo.wlock():
3447 existing = _getqueues()
3447 existing = _getqueues()
3448
3448
3449 if opts.get('create'):
3449 if opts.get('create'):
3450 if name in existing:
3450 if name in existing:
3451 raise error.Abort(_('queue "%s" already exists') % name)
3451 raise error.Abort(_('queue "%s" already exists') % name)
3452 if _noqueues():
3452 if _noqueues():
3453 _addqueue(_defaultqueue)
3453 _addqueue(_defaultqueue)
3454 _addqueue(name)
3454 _addqueue(name)
3455 _setactive(name)
3455 _setactive(name)
3456 elif opts.get('rename'):
3456 elif opts.get('rename'):
3457 current = _getcurrent()
3457 current = _getcurrent()
3458 if name == current:
3458 if name == current:
3459 raise error.Abort(_('can\'t rename "%s" to its current name')
3459 raise error.Abort(_('can\'t rename "%s" to its current name')
3460 % name)
3460 % name)
3461 if name in existing:
3461 if name in existing:
3462 raise error.Abort(_('queue "%s" already exists') % name)
3462 raise error.Abort(_('queue "%s" already exists') % name)
3463
3463
3464 olddir = _queuedir(current)
3464 olddir = _queuedir(current)
3465 newdir = _queuedir(name)
3465 newdir = _queuedir(name)
3466
3466
3467 if os.path.exists(newdir):
3467 if os.path.exists(newdir):
3468 raise error.Abort(_('non-queue directory "%s" already exists') %
3468 raise error.Abort(_('non-queue directory "%s" already exists') %
3469 newdir)
3469 newdir)
3470
3470
3471 fh = repo.vfs('patches.queues.new', 'w')
3471 fh = repo.vfs('patches.queues.new', 'w')
3472 for queue in existing:
3472 for queue in existing:
3473 if queue == current:
3473 if queue == current:
3474 fh.write('%s\n' % (name,))
3474 fh.write('%s\n' % (name,))
3475 if os.path.exists(olddir):
3475 if os.path.exists(olddir):
3476 util.rename(olddir, newdir)
3476 util.rename(olddir, newdir)
3477 else:
3477 else:
3478 fh.write('%s\n' % (queue,))
3478 fh.write('%s\n' % (queue,))
3479 fh.close()
3479 fh.close()
3480 repo.vfs.rename('patches.queues.new', _allqueues)
3480 repo.vfs.rename('patches.queues.new', _allqueues)
3481 _setactivenocheck(name)
3481 _setactivenocheck(name)
3482 elif opts.get('delete'):
3482 elif opts.get('delete'):
3483 _delete(name)
3483 _delete(name)
3484 elif opts.get('purge'):
3484 elif opts.get('purge'):
3485 if name in existing:
3485 if name in existing:
3486 _delete(name)
3486 _delete(name)
3487 qdir = _queuedir(name)
3487 qdir = _queuedir(name)
3488 if os.path.exists(qdir):
3488 if os.path.exists(qdir):
3489 shutil.rmtree(qdir)
3489 shutil.rmtree(qdir)
3490 else:
3490 else:
3491 if name not in existing:
3491 if name not in existing:
3492 raise error.Abort(_('use --create to create a new queue'))
3492 raise error.Abort(_('use --create to create a new queue'))
3493 _setactive(name)
3493 _setactive(name)
3494
3494
3495 def mqphasedefaults(repo, roots):
3495 def mqphasedefaults(repo, roots):
3496 """callback used to set mq changeset as secret when no phase data exists"""
3496 """callback used to set mq changeset as secret when no phase data exists"""
3497 if repo.mq.applied:
3497 if repo.mq.applied:
3498 if repo.ui.configbool('mq', 'secret'):
3498 if repo.ui.configbool('mq', 'secret'):
3499 mqphase = phases.secret
3499 mqphase = phases.secret
3500 else:
3500 else:
3501 mqphase = phases.draft
3501 mqphase = phases.draft
3502 qbase = repo[repo.mq.applied[0].node]
3502 qbase = repo[repo.mq.applied[0].node]
3503 roots[mqphase].add(qbase.node())
3503 roots[mqphase].add(qbase.node())
3504 return roots
3504 return roots
3505
3505
3506 def reposetup(ui, repo):
3506 def reposetup(ui, repo):
3507 class mqrepo(repo.__class__):
3507 class mqrepo(repo.__class__):
3508 @localrepo.unfilteredpropertycache
3508 @localrepo.unfilteredpropertycache
3509 def mq(self):
3509 def mq(self):
3510 return queue(self.ui, self.baseui, self.path)
3510 return queue(self.ui, self.baseui, self.path)
3511
3511
3512 def invalidateall(self):
3512 def invalidateall(self):
3513 super(mqrepo, self).invalidateall()
3513 super(mqrepo, self).invalidateall()
3514 if localrepo.hasunfilteredcache(self, 'mq'):
3514 if localrepo.hasunfilteredcache(self, r'mq'):
3515 # recreate mq in case queue path was changed
3515 # recreate mq in case queue path was changed
3516 delattr(self.unfiltered(), 'mq')
3516 delattr(self.unfiltered(), r'mq')
3517
3517
3518 def abortifwdirpatched(self, errmsg, force=False):
3518 def abortifwdirpatched(self, errmsg, force=False):
3519 if self.mq.applied and self.mq.checkapplied and not force:
3519 if self.mq.applied and self.mq.checkapplied and not force:
3520 parents = self.dirstate.parents()
3520 parents = self.dirstate.parents()
3521 patches = [s.node for s in self.mq.applied]
3521 patches = [s.node for s in self.mq.applied]
3522 if parents[0] in patches or parents[1] in patches:
3522 if parents[0] in patches or parents[1] in patches:
3523 raise error.Abort(errmsg)
3523 raise error.Abort(errmsg)
3524
3524
3525 def commit(self, text="", user=None, date=None, match=None,
3525 def commit(self, text="", user=None, date=None, match=None,
3526 force=False, editor=False, extra=None):
3526 force=False, editor=False, extra=None):
3527 if extra is None:
3527 if extra is None:
3528 extra = {}
3528 extra = {}
3529 self.abortifwdirpatched(
3529 self.abortifwdirpatched(
3530 _('cannot commit over an applied mq patch'),
3530 _('cannot commit over an applied mq patch'),
3531 force)
3531 force)
3532
3532
3533 return super(mqrepo, self).commit(text, user, date, match, force,
3533 return super(mqrepo, self).commit(text, user, date, match, force,
3534 editor, extra)
3534 editor, extra)
3535
3535
3536 def checkpush(self, pushop):
3536 def checkpush(self, pushop):
3537 if self.mq.applied and self.mq.checkapplied and not pushop.force:
3537 if self.mq.applied and self.mq.checkapplied and not pushop.force:
3538 outapplied = [e.node for e in self.mq.applied]
3538 outapplied = [e.node for e in self.mq.applied]
3539 if pushop.revs:
3539 if pushop.revs:
3540 # Assume applied patches have no non-patch descendants and
3540 # Assume applied patches have no non-patch descendants and
3541 # are not on remote already. Filtering any changeset not
3541 # are not on remote already. Filtering any changeset not
3542 # pushed.
3542 # pushed.
3543 heads = set(pushop.revs)
3543 heads = set(pushop.revs)
3544 for node in reversed(outapplied):
3544 for node in reversed(outapplied):
3545 if node in heads:
3545 if node in heads:
3546 break
3546 break
3547 else:
3547 else:
3548 outapplied.pop()
3548 outapplied.pop()
3549 # looking for pushed and shared changeset
3549 # looking for pushed and shared changeset
3550 for node in outapplied:
3550 for node in outapplied:
3551 if self[node].phase() < phases.secret:
3551 if self[node].phase() < phases.secret:
3552 raise error.Abort(_('source has mq patches applied'))
3552 raise error.Abort(_('source has mq patches applied'))
3553 # no non-secret patches pushed
3553 # no non-secret patches pushed
3554 super(mqrepo, self).checkpush(pushop)
3554 super(mqrepo, self).checkpush(pushop)
3555
3555
3556 def _findtags(self):
3556 def _findtags(self):
3557 '''augment tags from base class with patch tags'''
3557 '''augment tags from base class with patch tags'''
3558 result = super(mqrepo, self)._findtags()
3558 result = super(mqrepo, self)._findtags()
3559
3559
3560 q = self.mq
3560 q = self.mq
3561 if not q.applied:
3561 if not q.applied:
3562 return result
3562 return result
3563
3563
3564 mqtags = [(patch.node, patch.name) for patch in q.applied]
3564 mqtags = [(patch.node, patch.name) for patch in q.applied]
3565
3565
3566 try:
3566 try:
3567 # for now ignore filtering business
3567 # for now ignore filtering business
3568 self.unfiltered().changelog.rev(mqtags[-1][0])
3568 self.unfiltered().changelog.rev(mqtags[-1][0])
3569 except error.LookupError:
3569 except error.LookupError:
3570 self.ui.warn(_('mq status file refers to unknown node %s\n')
3570 self.ui.warn(_('mq status file refers to unknown node %s\n')
3571 % short(mqtags[-1][0]))
3571 % short(mqtags[-1][0]))
3572 return result
3572 return result
3573
3573
3574 # do not add fake tags for filtered revisions
3574 # do not add fake tags for filtered revisions
3575 included = self.changelog.hasnode
3575 included = self.changelog.hasnode
3576 mqtags = [mqt for mqt in mqtags if included(mqt[0])]
3576 mqtags = [mqt for mqt in mqtags if included(mqt[0])]
3577 if not mqtags:
3577 if not mqtags:
3578 return result
3578 return result
3579
3579
3580 mqtags.append((mqtags[-1][0], 'qtip'))
3580 mqtags.append((mqtags[-1][0], 'qtip'))
3581 mqtags.append((mqtags[0][0], 'qbase'))
3581 mqtags.append((mqtags[0][0], 'qbase'))
3582 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
3582 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
3583 tags = result[0]
3583 tags = result[0]
3584 for patch in mqtags:
3584 for patch in mqtags:
3585 if patch[1] in tags:
3585 if patch[1] in tags:
3586 self.ui.warn(_('tag %s overrides mq patch of the same '
3586 self.ui.warn(_('tag %s overrides mq patch of the same '
3587 'name\n') % patch[1])
3587 'name\n') % patch[1])
3588 else:
3588 else:
3589 tags[patch[1]] = patch[0]
3589 tags[patch[1]] = patch[0]
3590
3590
3591 return result
3591 return result
3592
3592
3593 if repo.local():
3593 if repo.local():
3594 repo.__class__ = mqrepo
3594 repo.__class__ = mqrepo
3595
3595
3596 repo._phasedefaults.append(mqphasedefaults)
3596 repo._phasedefaults.append(mqphasedefaults)
3597
3597
3598 def mqimport(orig, ui, repo, *args, **kwargs):
3598 def mqimport(orig, ui, repo, *args, **kwargs):
3599 if (util.safehasattr(repo, 'abortifwdirpatched')
3599 if (util.safehasattr(repo, 'abortifwdirpatched')
3600 and not kwargs.get(r'no_commit', False)):
3600 and not kwargs.get(r'no_commit', False)):
3601 repo.abortifwdirpatched(_('cannot import over an applied patch'),
3601 repo.abortifwdirpatched(_('cannot import over an applied patch'),
3602 kwargs.get(r'force'))
3602 kwargs.get(r'force'))
3603 return orig(ui, repo, *args, **kwargs)
3603 return orig(ui, repo, *args, **kwargs)
3604
3604
3605 def mqinit(orig, ui, *args, **kwargs):
3605 def mqinit(orig, ui, *args, **kwargs):
3606 mq = kwargs.pop(r'mq', None)
3606 mq = kwargs.pop(r'mq', None)
3607
3607
3608 if not mq:
3608 if not mq:
3609 return orig(ui, *args, **kwargs)
3609 return orig(ui, *args, **kwargs)
3610
3610
3611 if args:
3611 if args:
3612 repopath = args[0]
3612 repopath = args[0]
3613 if not hg.islocal(repopath):
3613 if not hg.islocal(repopath):
3614 raise error.Abort(_('only a local queue repository '
3614 raise error.Abort(_('only a local queue repository '
3615 'may be initialized'))
3615 'may be initialized'))
3616 else:
3616 else:
3617 repopath = cmdutil.findrepo(encoding.getcwd())
3617 repopath = cmdutil.findrepo(encoding.getcwd())
3618 if not repopath:
3618 if not repopath:
3619 raise error.Abort(_('there is no Mercurial repository here '
3619 raise error.Abort(_('there is no Mercurial repository here '
3620 '(.hg not found)'))
3620 '(.hg not found)'))
3621 repo = hg.repository(ui, repopath)
3621 repo = hg.repository(ui, repopath)
3622 return qinit(ui, repo, True)
3622 return qinit(ui, repo, True)
3623
3623
3624 def mqcommand(orig, ui, repo, *args, **kwargs):
3624 def mqcommand(orig, ui, repo, *args, **kwargs):
3625 """Add --mq option to operate on patch repository instead of main"""
3625 """Add --mq option to operate on patch repository instead of main"""
3626
3626
3627 # some commands do not like getting unknown options
3627 # some commands do not like getting unknown options
3628 mq = kwargs.pop(r'mq', None)
3628 mq = kwargs.pop(r'mq', None)
3629
3629
3630 if not mq:
3630 if not mq:
3631 return orig(ui, repo, *args, **kwargs)
3631 return orig(ui, repo, *args, **kwargs)
3632
3632
3633 q = repo.mq
3633 q = repo.mq
3634 r = q.qrepo()
3634 r = q.qrepo()
3635 if not r:
3635 if not r:
3636 raise error.Abort(_('no queue repository'))
3636 raise error.Abort(_('no queue repository'))
3637 return orig(r.ui, r, *args, **kwargs)
3637 return orig(r.ui, r, *args, **kwargs)
3638
3638
3639 def summaryhook(ui, repo):
3639 def summaryhook(ui, repo):
3640 q = repo.mq
3640 q = repo.mq
3641 m = []
3641 m = []
3642 a, u = len(q.applied), len(q.unapplied(repo))
3642 a, u = len(q.applied), len(q.unapplied(repo))
3643 if a:
3643 if a:
3644 m.append(ui.label(_("%d applied"), 'qseries.applied') % a)
3644 m.append(ui.label(_("%d applied"), 'qseries.applied') % a)
3645 if u:
3645 if u:
3646 m.append(ui.label(_("%d unapplied"), 'qseries.unapplied') % u)
3646 m.append(ui.label(_("%d unapplied"), 'qseries.unapplied') % u)
3647 if m:
3647 if m:
3648 # i18n: column positioning for "hg summary"
3648 # i18n: column positioning for "hg summary"
3649 ui.write(_("mq: %s\n") % ', '.join(m))
3649 ui.write(_("mq: %s\n") % ', '.join(m))
3650 else:
3650 else:
3651 # i18n: column positioning for "hg summary"
3651 # i18n: column positioning for "hg summary"
3652 ui.note(_("mq: (empty queue)\n"))
3652 ui.note(_("mq: (empty queue)\n"))
3653
3653
3654 revsetpredicate = registrar.revsetpredicate()
3654 revsetpredicate = registrar.revsetpredicate()
3655
3655
3656 @revsetpredicate('mq()')
3656 @revsetpredicate('mq()')
3657 def revsetmq(repo, subset, x):
3657 def revsetmq(repo, subset, x):
3658 """Changesets managed by MQ.
3658 """Changesets managed by MQ.
3659 """
3659 """
3660 revsetlang.getargs(x, 0, 0, _("mq takes no arguments"))
3660 revsetlang.getargs(x, 0, 0, _("mq takes no arguments"))
3661 applied = set([repo[r.node].rev() for r in repo.mq.applied])
3661 applied = set([repo[r.node].rev() for r in repo.mq.applied])
3662 return smartset.baseset([r for r in subset if r in applied])
3662 return smartset.baseset([r for r in subset if r in applied])
3663
3663
3664 # tell hggettext to extract docstrings from these functions:
3664 # tell hggettext to extract docstrings from these functions:
3665 i18nfunctions = [revsetmq]
3665 i18nfunctions = [revsetmq]
3666
3666
3667 def extsetup(ui):
3667 def extsetup(ui):
3668 # Ensure mq wrappers are called first, regardless of extension load order by
3668 # Ensure mq wrappers are called first, regardless of extension load order by
3669 # NOT wrapping in uisetup() and instead deferring to init stage two here.
3669 # NOT wrapping in uisetup() and instead deferring to init stage two here.
3670 mqopt = [('', 'mq', None, _("operate on patch repository"))]
3670 mqopt = [('', 'mq', None, _("operate on patch repository"))]
3671
3671
3672 extensions.wrapcommand(commands.table, 'import', mqimport)
3672 extensions.wrapcommand(commands.table, 'import', mqimport)
3673 cmdutil.summaryhooks.add('mq', summaryhook)
3673 cmdutil.summaryhooks.add('mq', summaryhook)
3674
3674
3675 entry = extensions.wrapcommand(commands.table, 'init', mqinit)
3675 entry = extensions.wrapcommand(commands.table, 'init', mqinit)
3676 entry[1].extend(mqopt)
3676 entry[1].extend(mqopt)
3677
3677
3678 def dotable(cmdtable):
3678 def dotable(cmdtable):
3679 for cmd, entry in cmdtable.iteritems():
3679 for cmd, entry in cmdtable.iteritems():
3680 cmd = cmdutil.parsealiases(cmd)[0]
3680 cmd = cmdutil.parsealiases(cmd)[0]
3681 func = entry[0]
3681 func = entry[0]
3682 if func.norepo:
3682 if func.norepo:
3683 continue
3683 continue
3684 entry = extensions.wrapcommand(cmdtable, cmd, mqcommand)
3684 entry = extensions.wrapcommand(cmdtable, cmd, mqcommand)
3685 entry[1].extend(mqopt)
3685 entry[1].extend(mqopt)
3686
3686
3687 dotable(commands.table)
3687 dotable(commands.table)
3688
3688
3689 for extname, extmodule in extensions.extensions():
3689 for extname, extmodule in extensions.extensions():
3690 if extmodule.__file__ != __file__:
3690 if extmodule.__file__ != __file__:
3691 dotable(getattr(extmodule, 'cmdtable', {}))
3691 dotable(getattr(extmodule, 'cmdtable', {}))
3692
3692
3693 colortable = {'qguard.negative': 'red',
3693 colortable = {'qguard.negative': 'red',
3694 'qguard.positive': 'yellow',
3694 'qguard.positive': 'yellow',
3695 'qguard.unguarded': 'green',
3695 'qguard.unguarded': 'green',
3696 'qseries.applied': 'blue bold underline',
3696 'qseries.applied': 'blue bold underline',
3697 'qseries.guarded': 'black bold',
3697 'qseries.guarded': 'black bold',
3698 'qseries.missing': 'red bold',
3698 'qseries.missing': 'red bold',
3699 'qseries.unapplied': 'black bold'}
3699 'qseries.unapplied': 'black bold'}
@@ -1,3040 +1,3040 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 )
25 )
26 from . import (
26 from . import (
27 bookmarks,
27 bookmarks,
28 branchmap,
28 branchmap,
29 bundle2,
29 bundle2,
30 changegroup,
30 changegroup,
31 changelog,
31 changelog,
32 color,
32 color,
33 context,
33 context,
34 dirstate,
34 dirstate,
35 dirstateguard,
35 dirstateguard,
36 discovery,
36 discovery,
37 encoding,
37 encoding,
38 error,
38 error,
39 exchange,
39 exchange,
40 extensions,
40 extensions,
41 filelog,
41 filelog,
42 hook,
42 hook,
43 lock as lockmod,
43 lock as lockmod,
44 manifest,
44 manifest,
45 match as matchmod,
45 match as matchmod,
46 merge as mergemod,
46 merge as mergemod,
47 mergeutil,
47 mergeutil,
48 namespaces,
48 namespaces,
49 narrowspec,
49 narrowspec,
50 obsolete,
50 obsolete,
51 pathutil,
51 pathutil,
52 phases,
52 phases,
53 pushkey,
53 pushkey,
54 pycompat,
54 pycompat,
55 repository,
55 repository,
56 repoview,
56 repoview,
57 revset,
57 revset,
58 revsetlang,
58 revsetlang,
59 scmutil,
59 scmutil,
60 sparse,
60 sparse,
61 store as storemod,
61 store as storemod,
62 subrepoutil,
62 subrepoutil,
63 tags as tagsmod,
63 tags as tagsmod,
64 transaction,
64 transaction,
65 txnutil,
65 txnutil,
66 util,
66 util,
67 vfs as vfsmod,
67 vfs as vfsmod,
68 )
68 )
69 from .utils import (
69 from .utils import (
70 interfaceutil,
70 interfaceutil,
71 procutil,
71 procutil,
72 stringutil,
72 stringutil,
73 )
73 )
74
74
75 from .revlogutils import (
75 from .revlogutils import (
76 constants as revlogconst,
76 constants as revlogconst,
77 )
77 )
78
78
79 release = lockmod.release
79 release = lockmod.release
80 urlerr = util.urlerr
80 urlerr = util.urlerr
81 urlreq = util.urlreq
81 urlreq = util.urlreq
82
82
83 # set of (path, vfs-location) tuples. vfs-location is:
83 # set of (path, vfs-location) tuples. vfs-location is:
84 # - 'plain for vfs relative paths
84 # - 'plain for vfs relative paths
85 # - '' for svfs relative paths
85 # - '' for svfs relative paths
86 _cachedfiles = set()
86 _cachedfiles = set()
87
87
88 class _basefilecache(scmutil.filecache):
88 class _basefilecache(scmutil.filecache):
89 """All filecache usage on repo are done for logic that should be unfiltered
89 """All filecache usage on repo are done for logic that should be unfiltered
90 """
90 """
91 def __get__(self, repo, type=None):
91 def __get__(self, repo, type=None):
92 if repo is None:
92 if repo is None:
93 return self
93 return self
94 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
94 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
95 def __set__(self, repo, value):
95 def __set__(self, repo, value):
96 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
96 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
97 def __delete__(self, repo):
97 def __delete__(self, repo):
98 return super(_basefilecache, self).__delete__(repo.unfiltered())
98 return super(_basefilecache, self).__delete__(repo.unfiltered())
99
99
100 class repofilecache(_basefilecache):
100 class repofilecache(_basefilecache):
101 """filecache for files in .hg but outside of .hg/store"""
101 """filecache for files in .hg but outside of .hg/store"""
102 def __init__(self, *paths):
102 def __init__(self, *paths):
103 super(repofilecache, self).__init__(*paths)
103 super(repofilecache, self).__init__(*paths)
104 for path in paths:
104 for path in paths:
105 _cachedfiles.add((path, 'plain'))
105 _cachedfiles.add((path, 'plain'))
106
106
107 def join(self, obj, fname):
107 def join(self, obj, fname):
108 return obj.vfs.join(fname)
108 return obj.vfs.join(fname)
109
109
110 class storecache(_basefilecache):
110 class storecache(_basefilecache):
111 """filecache for files in the store"""
111 """filecache for files in the store"""
112 def __init__(self, *paths):
112 def __init__(self, *paths):
113 super(storecache, self).__init__(*paths)
113 super(storecache, self).__init__(*paths)
114 for path in paths:
114 for path in paths:
115 _cachedfiles.add((path, ''))
115 _cachedfiles.add((path, ''))
116
116
117 def join(self, obj, fname):
117 def join(self, obj, fname):
118 return obj.sjoin(fname)
118 return obj.sjoin(fname)
119
119
120 def isfilecached(repo, name):
120 def isfilecached(repo, name):
121 """check if a repo has already cached "name" filecache-ed property
121 """check if a repo has already cached "name" filecache-ed property
122
122
123 This returns (cachedobj-or-None, iscached) tuple.
123 This returns (cachedobj-or-None, iscached) tuple.
124 """
124 """
125 cacheentry = repo.unfiltered()._filecache.get(name, None)
125 cacheentry = repo.unfiltered()._filecache.get(name, None)
126 if not cacheentry:
126 if not cacheentry:
127 return None, False
127 return None, False
128 return cacheentry.obj, True
128 return cacheentry.obj, True
129
129
130 class unfilteredpropertycache(util.propertycache):
130 class unfilteredpropertycache(util.propertycache):
131 """propertycache that apply to unfiltered repo only"""
131 """propertycache that apply to unfiltered repo only"""
132
132
133 def __get__(self, repo, type=None):
133 def __get__(self, repo, type=None):
134 unfi = repo.unfiltered()
134 unfi = repo.unfiltered()
135 if unfi is repo:
135 if unfi is repo:
136 return super(unfilteredpropertycache, self).__get__(unfi)
136 return super(unfilteredpropertycache, self).__get__(unfi)
137 return getattr(unfi, self.name)
137 return getattr(unfi, self.name)
138
138
139 class filteredpropertycache(util.propertycache):
139 class filteredpropertycache(util.propertycache):
140 """propertycache that must take filtering in account"""
140 """propertycache that must take filtering in account"""
141
141
142 def cachevalue(self, obj, value):
142 def cachevalue(self, obj, value):
143 object.__setattr__(obj, self.name, value)
143 object.__setattr__(obj, self.name, value)
144
144
145
145
146 def hasunfilteredcache(repo, name):
146 def hasunfilteredcache(repo, name):
147 """check if a repo has an unfilteredpropertycache value for <name>"""
147 """check if a repo has an unfilteredpropertycache value for <name>"""
148 return name in vars(repo.unfiltered())
148 return name in vars(repo.unfiltered())
149
149
150 def unfilteredmethod(orig):
150 def unfilteredmethod(orig):
151 """decorate method that always need to be run on unfiltered version"""
151 """decorate method that always need to be run on unfiltered version"""
152 def wrapper(repo, *args, **kwargs):
152 def wrapper(repo, *args, **kwargs):
153 return orig(repo.unfiltered(), *args, **kwargs)
153 return orig(repo.unfiltered(), *args, **kwargs)
154 return wrapper
154 return wrapper
155
155
156 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
156 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
157 'unbundle'}
157 'unbundle'}
158 legacycaps = moderncaps.union({'changegroupsubset'})
158 legacycaps = moderncaps.union({'changegroupsubset'})
159
159
160 @interfaceutil.implementer(repository.ipeercommandexecutor)
160 @interfaceutil.implementer(repository.ipeercommandexecutor)
161 class localcommandexecutor(object):
161 class localcommandexecutor(object):
162 def __init__(self, peer):
162 def __init__(self, peer):
163 self._peer = peer
163 self._peer = peer
164 self._sent = False
164 self._sent = False
165 self._closed = False
165 self._closed = False
166
166
167 def __enter__(self):
167 def __enter__(self):
168 return self
168 return self
169
169
170 def __exit__(self, exctype, excvalue, exctb):
170 def __exit__(self, exctype, excvalue, exctb):
171 self.close()
171 self.close()
172
172
173 def callcommand(self, command, args):
173 def callcommand(self, command, args):
174 if self._sent:
174 if self._sent:
175 raise error.ProgrammingError('callcommand() cannot be used after '
175 raise error.ProgrammingError('callcommand() cannot be used after '
176 'sendcommands()')
176 'sendcommands()')
177
177
178 if self._closed:
178 if self._closed:
179 raise error.ProgrammingError('callcommand() cannot be used after '
179 raise error.ProgrammingError('callcommand() cannot be used after '
180 'close()')
180 'close()')
181
181
182 # We don't need to support anything fancy. Just call the named
182 # We don't need to support anything fancy. Just call the named
183 # method on the peer and return a resolved future.
183 # method on the peer and return a resolved future.
184 fn = getattr(self._peer, pycompat.sysstr(command))
184 fn = getattr(self._peer, pycompat.sysstr(command))
185
185
186 f = pycompat.futures.Future()
186 f = pycompat.futures.Future()
187
187
188 try:
188 try:
189 result = fn(**pycompat.strkwargs(args))
189 result = fn(**pycompat.strkwargs(args))
190 except Exception:
190 except Exception:
191 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
191 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
192 else:
192 else:
193 f.set_result(result)
193 f.set_result(result)
194
194
195 return f
195 return f
196
196
197 def sendcommands(self):
197 def sendcommands(self):
198 self._sent = True
198 self._sent = True
199
199
200 def close(self):
200 def close(self):
201 self._closed = True
201 self._closed = True
202
202
203 @interfaceutil.implementer(repository.ipeercommands)
203 @interfaceutil.implementer(repository.ipeercommands)
204 class localpeer(repository.peer):
204 class localpeer(repository.peer):
205 '''peer for a local repo; reflects only the most recent API'''
205 '''peer for a local repo; reflects only the most recent API'''
206
206
207 def __init__(self, repo, caps=None):
207 def __init__(self, repo, caps=None):
208 super(localpeer, self).__init__()
208 super(localpeer, self).__init__()
209
209
210 if caps is None:
210 if caps is None:
211 caps = moderncaps.copy()
211 caps = moderncaps.copy()
212 self._repo = repo.filtered('served')
212 self._repo = repo.filtered('served')
213 self.ui = repo.ui
213 self.ui = repo.ui
214 self._caps = repo._restrictcapabilities(caps)
214 self._caps = repo._restrictcapabilities(caps)
215
215
216 # Begin of _basepeer interface.
216 # Begin of _basepeer interface.
217
217
218 def url(self):
218 def url(self):
219 return self._repo.url()
219 return self._repo.url()
220
220
221 def local(self):
221 def local(self):
222 return self._repo
222 return self._repo
223
223
224 def peer(self):
224 def peer(self):
225 return self
225 return self
226
226
227 def canpush(self):
227 def canpush(self):
228 return True
228 return True
229
229
230 def close(self):
230 def close(self):
231 self._repo.close()
231 self._repo.close()
232
232
233 # End of _basepeer interface.
233 # End of _basepeer interface.
234
234
235 # Begin of _basewirecommands interface.
235 # Begin of _basewirecommands interface.
236
236
237 def branchmap(self):
237 def branchmap(self):
238 return self._repo.branchmap()
238 return self._repo.branchmap()
239
239
240 def capabilities(self):
240 def capabilities(self):
241 return self._caps
241 return self._caps
242
242
243 def clonebundles(self):
243 def clonebundles(self):
244 return self._repo.tryread('clonebundles.manifest')
244 return self._repo.tryread('clonebundles.manifest')
245
245
246 def debugwireargs(self, one, two, three=None, four=None, five=None):
246 def debugwireargs(self, one, two, three=None, four=None, five=None):
247 """Used to test argument passing over the wire"""
247 """Used to test argument passing over the wire"""
248 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
248 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
249 pycompat.bytestr(four),
249 pycompat.bytestr(four),
250 pycompat.bytestr(five))
250 pycompat.bytestr(five))
251
251
252 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
252 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
253 **kwargs):
253 **kwargs):
254 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
254 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
255 common=common, bundlecaps=bundlecaps,
255 common=common, bundlecaps=bundlecaps,
256 **kwargs)[1]
256 **kwargs)[1]
257 cb = util.chunkbuffer(chunks)
257 cb = util.chunkbuffer(chunks)
258
258
259 if exchange.bundle2requested(bundlecaps):
259 if exchange.bundle2requested(bundlecaps):
260 # When requesting a bundle2, getbundle returns a stream to make the
260 # When requesting a bundle2, getbundle returns a stream to make the
261 # wire level function happier. We need to build a proper object
261 # wire level function happier. We need to build a proper object
262 # from it in local peer.
262 # from it in local peer.
263 return bundle2.getunbundler(self.ui, cb)
263 return bundle2.getunbundler(self.ui, cb)
264 else:
264 else:
265 return changegroup.getunbundler('01', cb, None)
265 return changegroup.getunbundler('01', cb, None)
266
266
267 def heads(self):
267 def heads(self):
268 return self._repo.heads()
268 return self._repo.heads()
269
269
270 def known(self, nodes):
270 def known(self, nodes):
271 return self._repo.known(nodes)
271 return self._repo.known(nodes)
272
272
273 def listkeys(self, namespace):
273 def listkeys(self, namespace):
274 return self._repo.listkeys(namespace)
274 return self._repo.listkeys(namespace)
275
275
276 def lookup(self, key):
276 def lookup(self, key):
277 return self._repo.lookup(key)
277 return self._repo.lookup(key)
278
278
279 def pushkey(self, namespace, key, old, new):
279 def pushkey(self, namespace, key, old, new):
280 return self._repo.pushkey(namespace, key, old, new)
280 return self._repo.pushkey(namespace, key, old, new)
281
281
282 def stream_out(self):
282 def stream_out(self):
283 raise error.Abort(_('cannot perform stream clone against local '
283 raise error.Abort(_('cannot perform stream clone against local '
284 'peer'))
284 'peer'))
285
285
286 def unbundle(self, bundle, heads, url):
286 def unbundle(self, bundle, heads, url):
287 """apply a bundle on a repo
287 """apply a bundle on a repo
288
288
289 This function handles the repo locking itself."""
289 This function handles the repo locking itself."""
290 try:
290 try:
291 try:
291 try:
292 bundle = exchange.readbundle(self.ui, bundle, None)
292 bundle = exchange.readbundle(self.ui, bundle, None)
293 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
293 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
294 if util.safehasattr(ret, 'getchunks'):
294 if util.safehasattr(ret, 'getchunks'):
295 # This is a bundle20 object, turn it into an unbundler.
295 # This is a bundle20 object, turn it into an unbundler.
296 # This little dance should be dropped eventually when the
296 # This little dance should be dropped eventually when the
297 # API is finally improved.
297 # API is finally improved.
298 stream = util.chunkbuffer(ret.getchunks())
298 stream = util.chunkbuffer(ret.getchunks())
299 ret = bundle2.getunbundler(self.ui, stream)
299 ret = bundle2.getunbundler(self.ui, stream)
300 return ret
300 return ret
301 except Exception as exc:
301 except Exception as exc:
302 # If the exception contains output salvaged from a bundle2
302 # If the exception contains output salvaged from a bundle2
303 # reply, we need to make sure it is printed before continuing
303 # reply, we need to make sure it is printed before continuing
304 # to fail. So we build a bundle2 with such output and consume
304 # to fail. So we build a bundle2 with such output and consume
305 # it directly.
305 # it directly.
306 #
306 #
307 # This is not very elegant but allows a "simple" solution for
307 # This is not very elegant but allows a "simple" solution for
308 # issue4594
308 # issue4594
309 output = getattr(exc, '_bundle2salvagedoutput', ())
309 output = getattr(exc, '_bundle2salvagedoutput', ())
310 if output:
310 if output:
311 bundler = bundle2.bundle20(self._repo.ui)
311 bundler = bundle2.bundle20(self._repo.ui)
312 for out in output:
312 for out in output:
313 bundler.addpart(out)
313 bundler.addpart(out)
314 stream = util.chunkbuffer(bundler.getchunks())
314 stream = util.chunkbuffer(bundler.getchunks())
315 b = bundle2.getunbundler(self.ui, stream)
315 b = bundle2.getunbundler(self.ui, stream)
316 bundle2.processbundle(self._repo, b)
316 bundle2.processbundle(self._repo, b)
317 raise
317 raise
318 except error.PushRaced as exc:
318 except error.PushRaced as exc:
319 raise error.ResponseError(_('push failed:'),
319 raise error.ResponseError(_('push failed:'),
320 stringutil.forcebytestr(exc))
320 stringutil.forcebytestr(exc))
321
321
322 # End of _basewirecommands interface.
322 # End of _basewirecommands interface.
323
323
324 # Begin of peer interface.
324 # Begin of peer interface.
325
325
326 def commandexecutor(self):
326 def commandexecutor(self):
327 return localcommandexecutor(self)
327 return localcommandexecutor(self)
328
328
329 # End of peer interface.
329 # End of peer interface.
330
330
331 @interfaceutil.implementer(repository.ipeerlegacycommands)
331 @interfaceutil.implementer(repository.ipeerlegacycommands)
332 class locallegacypeer(localpeer):
332 class locallegacypeer(localpeer):
333 '''peer extension which implements legacy methods too; used for tests with
333 '''peer extension which implements legacy methods too; used for tests with
334 restricted capabilities'''
334 restricted capabilities'''
335
335
336 def __init__(self, repo):
336 def __init__(self, repo):
337 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
337 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
338
338
339 # Begin of baselegacywirecommands interface.
339 # Begin of baselegacywirecommands interface.
340
340
341 def between(self, pairs):
341 def between(self, pairs):
342 return self._repo.between(pairs)
342 return self._repo.between(pairs)
343
343
344 def branches(self, nodes):
344 def branches(self, nodes):
345 return self._repo.branches(nodes)
345 return self._repo.branches(nodes)
346
346
347 def changegroup(self, nodes, source):
347 def changegroup(self, nodes, source):
348 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
348 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
349 missingheads=self._repo.heads())
349 missingheads=self._repo.heads())
350 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
350 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
351
351
352 def changegroupsubset(self, bases, heads, source):
352 def changegroupsubset(self, bases, heads, source):
353 outgoing = discovery.outgoing(self._repo, missingroots=bases,
353 outgoing = discovery.outgoing(self._repo, missingroots=bases,
354 missingheads=heads)
354 missingheads=heads)
355 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
355 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
356
356
357 # End of baselegacywirecommands interface.
357 # End of baselegacywirecommands interface.
358
358
359 # Increment the sub-version when the revlog v2 format changes to lock out old
359 # Increment the sub-version when the revlog v2 format changes to lock out old
360 # clients.
360 # clients.
361 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
361 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
362
362
363 # A repository with the sparserevlog feature will have delta chains that
363 # A repository with the sparserevlog feature will have delta chains that
364 # can spread over a larger span. Sparse reading cuts these large spans into
364 # can spread over a larger span. Sparse reading cuts these large spans into
365 # pieces, so that each piece isn't too big.
365 # pieces, so that each piece isn't too big.
366 # Without the sparserevlog capability, reading from the repository could use
366 # Without the sparserevlog capability, reading from the repository could use
367 # huge amounts of memory, because the whole span would be read at once,
367 # huge amounts of memory, because the whole span would be read at once,
368 # including all the intermediate revisions that aren't pertinent for the chain.
368 # including all the intermediate revisions that aren't pertinent for the chain.
369 # This is why once a repository has enabled sparse-read, it becomes required.
369 # This is why once a repository has enabled sparse-read, it becomes required.
370 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
370 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
371
371
372 # Functions receiving (ui, features) that extensions can register to impact
372 # Functions receiving (ui, features) that extensions can register to impact
373 # the ability to load repositories with custom requirements. Only
373 # the ability to load repositories with custom requirements. Only
374 # functions defined in loaded extensions are called.
374 # functions defined in loaded extensions are called.
375 #
375 #
376 # The function receives a set of requirement strings that the repository
376 # The function receives a set of requirement strings that the repository
377 # is capable of opening. Functions will typically add elements to the
377 # is capable of opening. Functions will typically add elements to the
378 # set to reflect that the extension knows how to handle that requirements.
378 # set to reflect that the extension knows how to handle that requirements.
379 featuresetupfuncs = set()
379 featuresetupfuncs = set()
380
380
381 def makelocalrepository(baseui, path, intents=None):
381 def makelocalrepository(baseui, path, intents=None):
382 """Create a local repository object.
382 """Create a local repository object.
383
383
384 Given arguments needed to construct a local repository, this function
384 Given arguments needed to construct a local repository, this function
385 performs various early repository loading functionality (such as
385 performs various early repository loading functionality (such as
386 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
386 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
387 the repository can be opened, derives a type suitable for representing
387 the repository can be opened, derives a type suitable for representing
388 that repository, and returns an instance of it.
388 that repository, and returns an instance of it.
389
389
390 The returned object conforms to the ``repository.completelocalrepository``
390 The returned object conforms to the ``repository.completelocalrepository``
391 interface.
391 interface.
392
392
393 The repository type is derived by calling a series of factory functions
393 The repository type is derived by calling a series of factory functions
394 for each aspect/interface of the final repository. These are defined by
394 for each aspect/interface of the final repository. These are defined by
395 ``REPO_INTERFACES``.
395 ``REPO_INTERFACES``.
396
396
397 Each factory function is called to produce a type implementing a specific
397 Each factory function is called to produce a type implementing a specific
398 interface. The cumulative list of returned types will be combined into a
398 interface. The cumulative list of returned types will be combined into a
399 new type and that type will be instantiated to represent the local
399 new type and that type will be instantiated to represent the local
400 repository.
400 repository.
401
401
402 The factory functions each receive various state that may be consulted
402 The factory functions each receive various state that may be consulted
403 as part of deriving a type.
403 as part of deriving a type.
404
404
405 Extensions should wrap these factory functions to customize repository type
405 Extensions should wrap these factory functions to customize repository type
406 creation. Note that an extension's wrapped function may be called even if
406 creation. Note that an extension's wrapped function may be called even if
407 that extension is not loaded for the repo being constructed. Extensions
407 that extension is not loaded for the repo being constructed. Extensions
408 should check if their ``__name__`` appears in the
408 should check if their ``__name__`` appears in the
409 ``extensionmodulenames`` set passed to the factory function and no-op if
409 ``extensionmodulenames`` set passed to the factory function and no-op if
410 not.
410 not.
411 """
411 """
412 ui = baseui.copy()
412 ui = baseui.copy()
413 # Prevent copying repo configuration.
413 # Prevent copying repo configuration.
414 ui.copy = baseui.copy
414 ui.copy = baseui.copy
415
415
416 # Working directory VFS rooted at repository root.
416 # Working directory VFS rooted at repository root.
417 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
417 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
418
418
419 # Main VFS for .hg/ directory.
419 # Main VFS for .hg/ directory.
420 hgpath = wdirvfs.join(b'.hg')
420 hgpath = wdirvfs.join(b'.hg')
421 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
421 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
422
422
423 # The .hg/ path should exist and should be a directory. All other
423 # The .hg/ path should exist and should be a directory. All other
424 # cases are errors.
424 # cases are errors.
425 if not hgvfs.isdir():
425 if not hgvfs.isdir():
426 try:
426 try:
427 hgvfs.stat()
427 hgvfs.stat()
428 except OSError as e:
428 except OSError as e:
429 if e.errno != errno.ENOENT:
429 if e.errno != errno.ENOENT:
430 raise
430 raise
431
431
432 raise error.RepoError(_(b'repository %s not found') % path)
432 raise error.RepoError(_(b'repository %s not found') % path)
433
433
434 # .hg/requires file contains a newline-delimited list of
434 # .hg/requires file contains a newline-delimited list of
435 # features/capabilities the opener (us) must have in order to use
435 # features/capabilities the opener (us) must have in order to use
436 # the repository. This file was introduced in Mercurial 0.9.2,
436 # the repository. This file was introduced in Mercurial 0.9.2,
437 # which means very old repositories may not have one. We assume
437 # which means very old repositories may not have one. We assume
438 # a missing file translates to no requirements.
438 # a missing file translates to no requirements.
439 try:
439 try:
440 requirements = set(hgvfs.read(b'requires').splitlines())
440 requirements = set(hgvfs.read(b'requires').splitlines())
441 except IOError as e:
441 except IOError as e:
442 if e.errno != errno.ENOENT:
442 if e.errno != errno.ENOENT:
443 raise
443 raise
444 requirements = set()
444 requirements = set()
445
445
446 # The .hg/hgrc file may load extensions or contain config options
446 # The .hg/hgrc file may load extensions or contain config options
447 # that influence repository construction. Attempt to load it and
447 # that influence repository construction. Attempt to load it and
448 # process any new extensions that it may have pulled in.
448 # process any new extensions that it may have pulled in.
449 try:
449 try:
450 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
450 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
451 # Run this before extensions.loadall() so extensions can be
451 # Run this before extensions.loadall() so extensions can be
452 # automatically enabled.
452 # automatically enabled.
453 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
453 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
454 except IOError:
454 except IOError:
455 pass
455 pass
456 else:
456 else:
457 extensions.loadall(ui)
457 extensions.loadall(ui)
458
458
459 # Set of module names of extensions loaded for this repository.
459 # Set of module names of extensions loaded for this repository.
460 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
460 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
461
461
462 supportedrequirements = gathersupportedrequirements(ui)
462 supportedrequirements = gathersupportedrequirements(ui)
463
463
464 # We first validate the requirements are known.
464 # We first validate the requirements are known.
465 ensurerequirementsrecognized(requirements, supportedrequirements)
465 ensurerequirementsrecognized(requirements, supportedrequirements)
466
466
467 # Then we validate that the known set is reasonable to use together.
467 # Then we validate that the known set is reasonable to use together.
468 ensurerequirementscompatible(ui, requirements)
468 ensurerequirementscompatible(ui, requirements)
469
469
470 # TODO there are unhandled edge cases related to opening repositories with
470 # TODO there are unhandled edge cases related to opening repositories with
471 # shared storage. If storage is shared, we should also test for requirements
471 # shared storage. If storage is shared, we should also test for requirements
472 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
472 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
473 # that repo, as that repo may load extensions needed to open it. This is a
473 # that repo, as that repo may load extensions needed to open it. This is a
474 # bit complicated because we don't want the other hgrc to overwrite settings
474 # bit complicated because we don't want the other hgrc to overwrite settings
475 # in this hgrc.
475 # in this hgrc.
476 #
476 #
477 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
477 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
478 # file when sharing repos. But if a requirement is added after the share is
478 # file when sharing repos. But if a requirement is added after the share is
479 # performed, thereby introducing a new requirement for the opener, we may
479 # performed, thereby introducing a new requirement for the opener, we may
480 # will not see that and could encounter a run-time error interacting with
480 # will not see that and could encounter a run-time error interacting with
481 # that shared store since it has an unknown-to-us requirement.
481 # that shared store since it has an unknown-to-us requirement.
482
482
483 # At this point, we know we should be capable of opening the repository.
483 # At this point, we know we should be capable of opening the repository.
484 # Now get on with doing that.
484 # Now get on with doing that.
485
485
486 features = set()
486 features = set()
487
487
488 # The "store" part of the repository holds versioned data. How it is
488 # The "store" part of the repository holds versioned data. How it is
489 # accessed is determined by various requirements. The ``shared`` or
489 # accessed is determined by various requirements. The ``shared`` or
490 # ``relshared`` requirements indicate the store lives in the path contained
490 # ``relshared`` requirements indicate the store lives in the path contained
491 # in the ``.hg/sharedpath`` file. This is an absolute path for
491 # in the ``.hg/sharedpath`` file. This is an absolute path for
492 # ``shared`` and relative to ``.hg/`` for ``relshared``.
492 # ``shared`` and relative to ``.hg/`` for ``relshared``.
493 if b'shared' in requirements or b'relshared' in requirements:
493 if b'shared' in requirements or b'relshared' in requirements:
494 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
494 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
495 if b'relshared' in requirements:
495 if b'relshared' in requirements:
496 sharedpath = hgvfs.join(sharedpath)
496 sharedpath = hgvfs.join(sharedpath)
497
497
498 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
498 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
499
499
500 if not sharedvfs.exists():
500 if not sharedvfs.exists():
501 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
501 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
502 b'directory %s') % sharedvfs.base)
502 b'directory %s') % sharedvfs.base)
503
503
504 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
504 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
505
505
506 storebasepath = sharedvfs.base
506 storebasepath = sharedvfs.base
507 cachepath = sharedvfs.join(b'cache')
507 cachepath = sharedvfs.join(b'cache')
508 else:
508 else:
509 storebasepath = hgvfs.base
509 storebasepath = hgvfs.base
510 cachepath = hgvfs.join(b'cache')
510 cachepath = hgvfs.join(b'cache')
511
511
512 # The store has changed over time and the exact layout is dictated by
512 # The store has changed over time and the exact layout is dictated by
513 # requirements. The store interface abstracts differences across all
513 # requirements. The store interface abstracts differences across all
514 # of them.
514 # of them.
515 store = makestore(requirements, storebasepath,
515 store = makestore(requirements, storebasepath,
516 lambda base: vfsmod.vfs(base, cacheaudited=True))
516 lambda base: vfsmod.vfs(base, cacheaudited=True))
517 hgvfs.createmode = store.createmode
517 hgvfs.createmode = store.createmode
518
518
519 storevfs = store.vfs
519 storevfs = store.vfs
520 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
520 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
521
521
522 # The cache vfs is used to manage cache files.
522 # The cache vfs is used to manage cache files.
523 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
523 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
524 cachevfs.createmode = store.createmode
524 cachevfs.createmode = store.createmode
525
525
526 # Now resolve the type for the repository object. We do this by repeatedly
526 # Now resolve the type for the repository object. We do this by repeatedly
527 # calling a factory function to produces types for specific aspects of the
527 # calling a factory function to produces types for specific aspects of the
528 # repo's operation. The aggregate returned types are used as base classes
528 # repo's operation. The aggregate returned types are used as base classes
529 # for a dynamically-derived type, which will represent our new repository.
529 # for a dynamically-derived type, which will represent our new repository.
530
530
531 bases = []
531 bases = []
532 extrastate = {}
532 extrastate = {}
533
533
534 for iface, fn in REPO_INTERFACES:
534 for iface, fn in REPO_INTERFACES:
535 # We pass all potentially useful state to give extensions tons of
535 # We pass all potentially useful state to give extensions tons of
536 # flexibility.
536 # flexibility.
537 typ = fn()(ui=ui,
537 typ = fn()(ui=ui,
538 intents=intents,
538 intents=intents,
539 requirements=requirements,
539 requirements=requirements,
540 features=features,
540 features=features,
541 wdirvfs=wdirvfs,
541 wdirvfs=wdirvfs,
542 hgvfs=hgvfs,
542 hgvfs=hgvfs,
543 store=store,
543 store=store,
544 storevfs=storevfs,
544 storevfs=storevfs,
545 storeoptions=storevfs.options,
545 storeoptions=storevfs.options,
546 cachevfs=cachevfs,
546 cachevfs=cachevfs,
547 extensionmodulenames=extensionmodulenames,
547 extensionmodulenames=extensionmodulenames,
548 extrastate=extrastate,
548 extrastate=extrastate,
549 baseclasses=bases)
549 baseclasses=bases)
550
550
551 if not isinstance(typ, type):
551 if not isinstance(typ, type):
552 raise error.ProgrammingError('unable to construct type for %s' %
552 raise error.ProgrammingError('unable to construct type for %s' %
553 iface)
553 iface)
554
554
555 bases.append(typ)
555 bases.append(typ)
556
556
557 # type() allows you to use characters in type names that wouldn't be
557 # type() allows you to use characters in type names that wouldn't be
558 # recognized as Python symbols in source code. We abuse that to add
558 # recognized as Python symbols in source code. We abuse that to add
559 # rich information about our constructed repo.
559 # rich information about our constructed repo.
560 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
560 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
561 wdirvfs.base,
561 wdirvfs.base,
562 b','.join(sorted(requirements))))
562 b','.join(sorted(requirements))))
563
563
564 cls = type(name, tuple(bases), {})
564 cls = type(name, tuple(bases), {})
565
565
566 return cls(
566 return cls(
567 baseui=baseui,
567 baseui=baseui,
568 ui=ui,
568 ui=ui,
569 origroot=path,
569 origroot=path,
570 wdirvfs=wdirvfs,
570 wdirvfs=wdirvfs,
571 hgvfs=hgvfs,
571 hgvfs=hgvfs,
572 requirements=requirements,
572 requirements=requirements,
573 supportedrequirements=supportedrequirements,
573 supportedrequirements=supportedrequirements,
574 sharedpath=storebasepath,
574 sharedpath=storebasepath,
575 store=store,
575 store=store,
576 cachevfs=cachevfs,
576 cachevfs=cachevfs,
577 features=features,
577 features=features,
578 intents=intents)
578 intents=intents)
579
579
580 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
580 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
581 """Perform additional actions after .hg/hgrc is loaded.
581 """Perform additional actions after .hg/hgrc is loaded.
582
582
583 This function is called during repository loading immediately after
583 This function is called during repository loading immediately after
584 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
584 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
585
585
586 The function can be used to validate configs, automatically add
586 The function can be used to validate configs, automatically add
587 options (including extensions) based on requirements, etc.
587 options (including extensions) based on requirements, etc.
588 """
588 """
589
589
590 # Map of requirements to list of extensions to load automatically when
590 # Map of requirements to list of extensions to load automatically when
591 # requirement is present.
591 # requirement is present.
592 autoextensions = {
592 autoextensions = {
593 b'largefiles': [b'largefiles'],
593 b'largefiles': [b'largefiles'],
594 b'lfs': [b'lfs'],
594 b'lfs': [b'lfs'],
595 }
595 }
596
596
597 for requirement, names in sorted(autoextensions.items()):
597 for requirement, names in sorted(autoextensions.items()):
598 if requirement not in requirements:
598 if requirement not in requirements:
599 continue
599 continue
600
600
601 for name in names:
601 for name in names:
602 if not ui.hasconfig(b'extensions', name):
602 if not ui.hasconfig(b'extensions', name):
603 ui.setconfig(b'extensions', name, b'', source='autoload')
603 ui.setconfig(b'extensions', name, b'', source='autoload')
604
604
605 def gathersupportedrequirements(ui):
605 def gathersupportedrequirements(ui):
606 """Determine the complete set of recognized requirements."""
606 """Determine the complete set of recognized requirements."""
607 # Start with all requirements supported by this file.
607 # Start with all requirements supported by this file.
608 supported = set(localrepository._basesupported)
608 supported = set(localrepository._basesupported)
609
609
610 # Execute ``featuresetupfuncs`` entries if they belong to an extension
610 # Execute ``featuresetupfuncs`` entries if they belong to an extension
611 # relevant to this ui instance.
611 # relevant to this ui instance.
612 modules = {m.__name__ for n, m in extensions.extensions(ui)}
612 modules = {m.__name__ for n, m in extensions.extensions(ui)}
613
613
614 for fn in featuresetupfuncs:
614 for fn in featuresetupfuncs:
615 if fn.__module__ in modules:
615 if fn.__module__ in modules:
616 fn(ui, supported)
616 fn(ui, supported)
617
617
618 # Add derived requirements from registered compression engines.
618 # Add derived requirements from registered compression engines.
619 for name in util.compengines:
619 for name in util.compengines:
620 engine = util.compengines[name]
620 engine = util.compengines[name]
621 if engine.revlogheader():
621 if engine.revlogheader():
622 supported.add(b'exp-compression-%s' % name)
622 supported.add(b'exp-compression-%s' % name)
623
623
624 return supported
624 return supported
625
625
626 def ensurerequirementsrecognized(requirements, supported):
626 def ensurerequirementsrecognized(requirements, supported):
627 """Validate that a set of local requirements is recognized.
627 """Validate that a set of local requirements is recognized.
628
628
629 Receives a set of requirements. Raises an ``error.RepoError`` if there
629 Receives a set of requirements. Raises an ``error.RepoError`` if there
630 exists any requirement in that set that currently loaded code doesn't
630 exists any requirement in that set that currently loaded code doesn't
631 recognize.
631 recognize.
632
632
633 Returns a set of supported requirements.
633 Returns a set of supported requirements.
634 """
634 """
635 missing = set()
635 missing = set()
636
636
637 for requirement in requirements:
637 for requirement in requirements:
638 if requirement in supported:
638 if requirement in supported:
639 continue
639 continue
640
640
641 if not requirement or not requirement[0:1].isalnum():
641 if not requirement or not requirement[0:1].isalnum():
642 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
642 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
643
643
644 missing.add(requirement)
644 missing.add(requirement)
645
645
646 if missing:
646 if missing:
647 raise error.RequirementError(
647 raise error.RequirementError(
648 _(b'repository requires features unknown to this Mercurial: %s') %
648 _(b'repository requires features unknown to this Mercurial: %s') %
649 b' '.join(sorted(missing)),
649 b' '.join(sorted(missing)),
650 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
650 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
651 b'for more information'))
651 b'for more information'))
652
652
653 def ensurerequirementscompatible(ui, requirements):
653 def ensurerequirementscompatible(ui, requirements):
654 """Validates that a set of recognized requirements is mutually compatible.
654 """Validates that a set of recognized requirements is mutually compatible.
655
655
656 Some requirements may not be compatible with others or require
656 Some requirements may not be compatible with others or require
657 config options that aren't enabled. This function is called during
657 config options that aren't enabled. This function is called during
658 repository opening to ensure that the set of requirements needed
658 repository opening to ensure that the set of requirements needed
659 to open a repository is sane and compatible with config options.
659 to open a repository is sane and compatible with config options.
660
660
661 Extensions can monkeypatch this function to perform additional
661 Extensions can monkeypatch this function to perform additional
662 checking.
662 checking.
663
663
664 ``error.RepoError`` should be raised on failure.
664 ``error.RepoError`` should be raised on failure.
665 """
665 """
666 if b'exp-sparse' in requirements and not sparse.enabled:
666 if b'exp-sparse' in requirements and not sparse.enabled:
667 raise error.RepoError(_(b'repository is using sparse feature but '
667 raise error.RepoError(_(b'repository is using sparse feature but '
668 b'sparse is not enabled; enable the '
668 b'sparse is not enabled; enable the '
669 b'"sparse" extensions to access'))
669 b'"sparse" extensions to access'))
670
670
671 def makestore(requirements, path, vfstype):
671 def makestore(requirements, path, vfstype):
672 """Construct a storage object for a repository."""
672 """Construct a storage object for a repository."""
673 if b'store' in requirements:
673 if b'store' in requirements:
674 if b'fncache' in requirements:
674 if b'fncache' in requirements:
675 return storemod.fncachestore(path, vfstype,
675 return storemod.fncachestore(path, vfstype,
676 b'dotencode' in requirements)
676 b'dotencode' in requirements)
677
677
678 return storemod.encodedstore(path, vfstype)
678 return storemod.encodedstore(path, vfstype)
679
679
680 return storemod.basicstore(path, vfstype)
680 return storemod.basicstore(path, vfstype)
681
681
682 def resolvestorevfsoptions(ui, requirements, features):
682 def resolvestorevfsoptions(ui, requirements, features):
683 """Resolve the options to pass to the store vfs opener.
683 """Resolve the options to pass to the store vfs opener.
684
684
685 The returned dict is used to influence behavior of the storage layer.
685 The returned dict is used to influence behavior of the storage layer.
686 """
686 """
687 options = {}
687 options = {}
688
688
689 if b'treemanifest' in requirements:
689 if b'treemanifest' in requirements:
690 options[b'treemanifest'] = True
690 options[b'treemanifest'] = True
691
691
692 # experimental config: format.manifestcachesize
692 # experimental config: format.manifestcachesize
693 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
693 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
694 if manifestcachesize is not None:
694 if manifestcachesize is not None:
695 options[b'manifestcachesize'] = manifestcachesize
695 options[b'manifestcachesize'] = manifestcachesize
696
696
697 # In the absence of another requirement superseding a revlog-related
697 # In the absence of another requirement superseding a revlog-related
698 # requirement, we have to assume the repo is using revlog version 0.
698 # requirement, we have to assume the repo is using revlog version 0.
699 # This revlog format is super old and we don't bother trying to parse
699 # This revlog format is super old and we don't bother trying to parse
700 # opener options for it because those options wouldn't do anything
700 # opener options for it because those options wouldn't do anything
701 # meaningful on such old repos.
701 # meaningful on such old repos.
702 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
702 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
703 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
703 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
704
704
705 return options
705 return options
706
706
707 def resolverevlogstorevfsoptions(ui, requirements, features):
707 def resolverevlogstorevfsoptions(ui, requirements, features):
708 """Resolve opener options specific to revlogs."""
708 """Resolve opener options specific to revlogs."""
709
709
710 options = {}
710 options = {}
711 options[b'flagprocessors'] = {}
711 options[b'flagprocessors'] = {}
712
712
713 if b'revlogv1' in requirements:
713 if b'revlogv1' in requirements:
714 options[b'revlogv1'] = True
714 options[b'revlogv1'] = True
715 if REVLOGV2_REQUIREMENT in requirements:
715 if REVLOGV2_REQUIREMENT in requirements:
716 options[b'revlogv2'] = True
716 options[b'revlogv2'] = True
717
717
718 if b'generaldelta' in requirements:
718 if b'generaldelta' in requirements:
719 options[b'generaldelta'] = True
719 options[b'generaldelta'] = True
720
720
721 # experimental config: format.chunkcachesize
721 # experimental config: format.chunkcachesize
722 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
722 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
723 if chunkcachesize is not None:
723 if chunkcachesize is not None:
724 options[b'chunkcachesize'] = chunkcachesize
724 options[b'chunkcachesize'] = chunkcachesize
725
725
726 deltabothparents = ui.configbool(b'storage',
726 deltabothparents = ui.configbool(b'storage',
727 b'revlog.optimize-delta-parent-choice')
727 b'revlog.optimize-delta-parent-choice')
728 options[b'deltabothparents'] = deltabothparents
728 options[b'deltabothparents'] = deltabothparents
729
729
730 options[b'lazydeltabase'] = not scmutil.gddeltaconfig(ui)
730 options[b'lazydeltabase'] = not scmutil.gddeltaconfig(ui)
731
731
732 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
732 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
733 if 0 <= chainspan:
733 if 0 <= chainspan:
734 options[b'maxdeltachainspan'] = chainspan
734 options[b'maxdeltachainspan'] = chainspan
735
735
736 mmapindexthreshold = ui.configbytes(b'experimental',
736 mmapindexthreshold = ui.configbytes(b'experimental',
737 b'mmapindexthreshold')
737 b'mmapindexthreshold')
738 if mmapindexthreshold is not None:
738 if mmapindexthreshold is not None:
739 options[b'mmapindexthreshold'] = mmapindexthreshold
739 options[b'mmapindexthreshold'] = mmapindexthreshold
740
740
741 withsparseread = ui.configbool(b'experimental', b'sparse-read')
741 withsparseread = ui.configbool(b'experimental', b'sparse-read')
742 srdensitythres = float(ui.config(b'experimental',
742 srdensitythres = float(ui.config(b'experimental',
743 b'sparse-read.density-threshold'))
743 b'sparse-read.density-threshold'))
744 srmingapsize = ui.configbytes(b'experimental',
744 srmingapsize = ui.configbytes(b'experimental',
745 b'sparse-read.min-gap-size')
745 b'sparse-read.min-gap-size')
746 options[b'with-sparse-read'] = withsparseread
746 options[b'with-sparse-read'] = withsparseread
747 options[b'sparse-read-density-threshold'] = srdensitythres
747 options[b'sparse-read-density-threshold'] = srdensitythres
748 options[b'sparse-read-min-gap-size'] = srmingapsize
748 options[b'sparse-read-min-gap-size'] = srmingapsize
749
749
750 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
750 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
751 options[b'sparse-revlog'] = sparserevlog
751 options[b'sparse-revlog'] = sparserevlog
752 if sparserevlog:
752 if sparserevlog:
753 options[b'generaldelta'] = True
753 options[b'generaldelta'] = True
754
754
755 maxchainlen = None
755 maxchainlen = None
756 if sparserevlog:
756 if sparserevlog:
757 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
757 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
758 # experimental config: format.maxchainlen
758 # experimental config: format.maxchainlen
759 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
759 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
760 if maxchainlen is not None:
760 if maxchainlen is not None:
761 options[b'maxchainlen'] = maxchainlen
761 options[b'maxchainlen'] = maxchainlen
762
762
763 for r in requirements:
763 for r in requirements:
764 if r.startswith(b'exp-compression-'):
764 if r.startswith(b'exp-compression-'):
765 options[b'compengine'] = r[len(b'exp-compression-'):]
765 options[b'compengine'] = r[len(b'exp-compression-'):]
766
766
767 if repository.NARROW_REQUIREMENT in requirements:
767 if repository.NARROW_REQUIREMENT in requirements:
768 options[b'enableellipsis'] = True
768 options[b'enableellipsis'] = True
769
769
770 return options
770 return options
771
771
772 def makemain(**kwargs):
772 def makemain(**kwargs):
773 """Produce a type conforming to ``ilocalrepositorymain``."""
773 """Produce a type conforming to ``ilocalrepositorymain``."""
774 return localrepository
774 return localrepository
775
775
776 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
776 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
777 class revlogfilestorage(object):
777 class revlogfilestorage(object):
778 """File storage when using revlogs."""
778 """File storage when using revlogs."""
779
779
780 def file(self, path):
780 def file(self, path):
781 if path[0] == b'/':
781 if path[0] == b'/':
782 path = path[1:]
782 path = path[1:]
783
783
784 return filelog.filelog(self.svfs, path)
784 return filelog.filelog(self.svfs, path)
785
785
786 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
786 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
787 class revlognarrowfilestorage(object):
787 class revlognarrowfilestorage(object):
788 """File storage when using revlogs and narrow files."""
788 """File storage when using revlogs and narrow files."""
789
789
790 def file(self, path):
790 def file(self, path):
791 if path[0] == b'/':
791 if path[0] == b'/':
792 path = path[1:]
792 path = path[1:]
793
793
794 return filelog.narrowfilelog(self.svfs, path, self.narrowmatch())
794 return filelog.narrowfilelog(self.svfs, path, self.narrowmatch())
795
795
796 def makefilestorage(requirements, features, **kwargs):
796 def makefilestorage(requirements, features, **kwargs):
797 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
797 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
798 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
798 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
799 features.add(repository.REPO_FEATURE_STREAM_CLONE)
799 features.add(repository.REPO_FEATURE_STREAM_CLONE)
800
800
801 if repository.NARROW_REQUIREMENT in requirements:
801 if repository.NARROW_REQUIREMENT in requirements:
802 return revlognarrowfilestorage
802 return revlognarrowfilestorage
803 else:
803 else:
804 return revlogfilestorage
804 return revlogfilestorage
805
805
806 # List of repository interfaces and factory functions for them. Each
806 # List of repository interfaces and factory functions for them. Each
807 # will be called in order during ``makelocalrepository()`` to iteratively
807 # will be called in order during ``makelocalrepository()`` to iteratively
808 # derive the final type for a local repository instance. We capture the
808 # derive the final type for a local repository instance. We capture the
809 # function as a lambda so we don't hold a reference and the module-level
809 # function as a lambda so we don't hold a reference and the module-level
810 # functions can be wrapped.
810 # functions can be wrapped.
811 REPO_INTERFACES = [
811 REPO_INTERFACES = [
812 (repository.ilocalrepositorymain, lambda: makemain),
812 (repository.ilocalrepositorymain, lambda: makemain),
813 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
813 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
814 ]
814 ]
815
815
816 @interfaceutil.implementer(repository.ilocalrepositorymain)
816 @interfaceutil.implementer(repository.ilocalrepositorymain)
817 class localrepository(object):
817 class localrepository(object):
818 """Main class for representing local repositories.
818 """Main class for representing local repositories.
819
819
820 All local repositories are instances of this class.
820 All local repositories are instances of this class.
821
821
822 Constructed on its own, instances of this class are not usable as
822 Constructed on its own, instances of this class are not usable as
823 repository objects. To obtain a usable repository object, call
823 repository objects. To obtain a usable repository object, call
824 ``hg.repository()``, ``localrepo.instance()``, or
824 ``hg.repository()``, ``localrepo.instance()``, or
825 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
825 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
826 ``instance()`` adds support for creating new repositories.
826 ``instance()`` adds support for creating new repositories.
827 ``hg.repository()`` adds more extension integration, including calling
827 ``hg.repository()`` adds more extension integration, including calling
828 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
828 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
829 used.
829 used.
830 """
830 """
831
831
832 # obsolete experimental requirements:
832 # obsolete experimental requirements:
833 # - manifestv2: An experimental new manifest format that allowed
833 # - manifestv2: An experimental new manifest format that allowed
834 # for stem compression of long paths. Experiment ended up not
834 # for stem compression of long paths. Experiment ended up not
835 # being successful (repository sizes went up due to worse delta
835 # being successful (repository sizes went up due to worse delta
836 # chains), and the code was deleted in 4.6.
836 # chains), and the code was deleted in 4.6.
837 supportedformats = {
837 supportedformats = {
838 'revlogv1',
838 'revlogv1',
839 'generaldelta',
839 'generaldelta',
840 'treemanifest',
840 'treemanifest',
841 REVLOGV2_REQUIREMENT,
841 REVLOGV2_REQUIREMENT,
842 SPARSEREVLOG_REQUIREMENT,
842 SPARSEREVLOG_REQUIREMENT,
843 }
843 }
844 _basesupported = supportedformats | {
844 _basesupported = supportedformats | {
845 'store',
845 'store',
846 'fncache',
846 'fncache',
847 'shared',
847 'shared',
848 'relshared',
848 'relshared',
849 'dotencode',
849 'dotencode',
850 'exp-sparse',
850 'exp-sparse',
851 'internal-phase'
851 'internal-phase'
852 }
852 }
853
853
854 # list of prefix for file which can be written without 'wlock'
854 # list of prefix for file which can be written without 'wlock'
855 # Extensions should extend this list when needed
855 # Extensions should extend this list when needed
856 _wlockfreeprefix = {
856 _wlockfreeprefix = {
857 # We migh consider requiring 'wlock' for the next
857 # We migh consider requiring 'wlock' for the next
858 # two, but pretty much all the existing code assume
858 # two, but pretty much all the existing code assume
859 # wlock is not needed so we keep them excluded for
859 # wlock is not needed so we keep them excluded for
860 # now.
860 # now.
861 'hgrc',
861 'hgrc',
862 'requires',
862 'requires',
863 # XXX cache is a complicatged business someone
863 # XXX cache is a complicatged business someone
864 # should investigate this in depth at some point
864 # should investigate this in depth at some point
865 'cache/',
865 'cache/',
866 # XXX shouldn't be dirstate covered by the wlock?
866 # XXX shouldn't be dirstate covered by the wlock?
867 'dirstate',
867 'dirstate',
868 # XXX bisect was still a bit too messy at the time
868 # XXX bisect was still a bit too messy at the time
869 # this changeset was introduced. Someone should fix
869 # this changeset was introduced. Someone should fix
870 # the remainig bit and drop this line
870 # the remainig bit and drop this line
871 'bisect.state',
871 'bisect.state',
872 }
872 }
873
873
874 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
874 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
875 supportedrequirements, sharedpath, store, cachevfs,
875 supportedrequirements, sharedpath, store, cachevfs,
876 features, intents=None):
876 features, intents=None):
877 """Create a new local repository instance.
877 """Create a new local repository instance.
878
878
879 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
879 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
880 or ``localrepo.makelocalrepository()`` for obtaining a new repository
880 or ``localrepo.makelocalrepository()`` for obtaining a new repository
881 object.
881 object.
882
882
883 Arguments:
883 Arguments:
884
884
885 baseui
885 baseui
886 ``ui.ui`` instance that ``ui`` argument was based off of.
886 ``ui.ui`` instance that ``ui`` argument was based off of.
887
887
888 ui
888 ui
889 ``ui.ui`` instance for use by the repository.
889 ``ui.ui`` instance for use by the repository.
890
890
891 origroot
891 origroot
892 ``bytes`` path to working directory root of this repository.
892 ``bytes`` path to working directory root of this repository.
893
893
894 wdirvfs
894 wdirvfs
895 ``vfs.vfs`` rooted at the working directory.
895 ``vfs.vfs`` rooted at the working directory.
896
896
897 hgvfs
897 hgvfs
898 ``vfs.vfs`` rooted at .hg/
898 ``vfs.vfs`` rooted at .hg/
899
899
900 requirements
900 requirements
901 ``set`` of bytestrings representing repository opening requirements.
901 ``set`` of bytestrings representing repository opening requirements.
902
902
903 supportedrequirements
903 supportedrequirements
904 ``set`` of bytestrings representing repository requirements that we
904 ``set`` of bytestrings representing repository requirements that we
905 know how to open. May be a supetset of ``requirements``.
905 know how to open. May be a supetset of ``requirements``.
906
906
907 sharedpath
907 sharedpath
908 ``bytes`` Defining path to storage base directory. Points to a
908 ``bytes`` Defining path to storage base directory. Points to a
909 ``.hg/`` directory somewhere.
909 ``.hg/`` directory somewhere.
910
910
911 store
911 store
912 ``store.basicstore`` (or derived) instance providing access to
912 ``store.basicstore`` (or derived) instance providing access to
913 versioned storage.
913 versioned storage.
914
914
915 cachevfs
915 cachevfs
916 ``vfs.vfs`` used for cache files.
916 ``vfs.vfs`` used for cache files.
917
917
918 features
918 features
919 ``set`` of bytestrings defining features/capabilities of this
919 ``set`` of bytestrings defining features/capabilities of this
920 instance.
920 instance.
921
921
922 intents
922 intents
923 ``set`` of system strings indicating what this repo will be used
923 ``set`` of system strings indicating what this repo will be used
924 for.
924 for.
925 """
925 """
926 self.baseui = baseui
926 self.baseui = baseui
927 self.ui = ui
927 self.ui = ui
928 self.origroot = origroot
928 self.origroot = origroot
929 # vfs rooted at working directory.
929 # vfs rooted at working directory.
930 self.wvfs = wdirvfs
930 self.wvfs = wdirvfs
931 self.root = wdirvfs.base
931 self.root = wdirvfs.base
932 # vfs rooted at .hg/. Used to access most non-store paths.
932 # vfs rooted at .hg/. Used to access most non-store paths.
933 self.vfs = hgvfs
933 self.vfs = hgvfs
934 self.path = hgvfs.base
934 self.path = hgvfs.base
935 self.requirements = requirements
935 self.requirements = requirements
936 self.supported = supportedrequirements
936 self.supported = supportedrequirements
937 self.sharedpath = sharedpath
937 self.sharedpath = sharedpath
938 self.store = store
938 self.store = store
939 self.cachevfs = cachevfs
939 self.cachevfs = cachevfs
940 self.features = features
940 self.features = features
941
941
942 self.filtername = None
942 self.filtername = None
943
943
944 if (self.ui.configbool('devel', 'all-warnings') or
944 if (self.ui.configbool('devel', 'all-warnings') or
945 self.ui.configbool('devel', 'check-locks')):
945 self.ui.configbool('devel', 'check-locks')):
946 self.vfs.audit = self._getvfsward(self.vfs.audit)
946 self.vfs.audit = self._getvfsward(self.vfs.audit)
947 # A list of callback to shape the phase if no data were found.
947 # A list of callback to shape the phase if no data were found.
948 # Callback are in the form: func(repo, roots) --> processed root.
948 # Callback are in the form: func(repo, roots) --> processed root.
949 # This list it to be filled by extension during repo setup
949 # This list it to be filled by extension during repo setup
950 self._phasedefaults = []
950 self._phasedefaults = []
951
951
952 color.setup(self.ui)
952 color.setup(self.ui)
953
953
954 self.spath = self.store.path
954 self.spath = self.store.path
955 self.svfs = self.store.vfs
955 self.svfs = self.store.vfs
956 self.sjoin = self.store.join
956 self.sjoin = self.store.join
957 if (self.ui.configbool('devel', 'all-warnings') or
957 if (self.ui.configbool('devel', 'all-warnings') or
958 self.ui.configbool('devel', 'check-locks')):
958 self.ui.configbool('devel', 'check-locks')):
959 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
959 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
960 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
960 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
961 else: # standard vfs
961 else: # standard vfs
962 self.svfs.audit = self._getsvfsward(self.svfs.audit)
962 self.svfs.audit = self._getsvfsward(self.svfs.audit)
963
963
964 self._dirstatevalidatewarned = False
964 self._dirstatevalidatewarned = False
965
965
966 self._branchcaches = {}
966 self._branchcaches = {}
967 self._revbranchcache = None
967 self._revbranchcache = None
968 self._filterpats = {}
968 self._filterpats = {}
969 self._datafilters = {}
969 self._datafilters = {}
970 self._transref = self._lockref = self._wlockref = None
970 self._transref = self._lockref = self._wlockref = None
971
971
972 # A cache for various files under .hg/ that tracks file changes,
972 # A cache for various files under .hg/ that tracks file changes,
973 # (used by the filecache decorator)
973 # (used by the filecache decorator)
974 #
974 #
975 # Maps a property name to its util.filecacheentry
975 # Maps a property name to its util.filecacheentry
976 self._filecache = {}
976 self._filecache = {}
977
977
978 # hold sets of revision to be filtered
978 # hold sets of revision to be filtered
979 # should be cleared when something might have changed the filter value:
979 # should be cleared when something might have changed the filter value:
980 # - new changesets,
980 # - new changesets,
981 # - phase change,
981 # - phase change,
982 # - new obsolescence marker,
982 # - new obsolescence marker,
983 # - working directory parent change,
983 # - working directory parent change,
984 # - bookmark changes
984 # - bookmark changes
985 self.filteredrevcache = {}
985 self.filteredrevcache = {}
986
986
987 # post-dirstate-status hooks
987 # post-dirstate-status hooks
988 self._postdsstatus = []
988 self._postdsstatus = []
989
989
990 # generic mapping between names and nodes
990 # generic mapping between names and nodes
991 self.names = namespaces.namespaces()
991 self.names = namespaces.namespaces()
992
992
993 # Key to signature value.
993 # Key to signature value.
994 self._sparsesignaturecache = {}
994 self._sparsesignaturecache = {}
995 # Signature to cached matcher instance.
995 # Signature to cached matcher instance.
996 self._sparsematchercache = {}
996 self._sparsematchercache = {}
997
997
998 def _getvfsward(self, origfunc):
998 def _getvfsward(self, origfunc):
999 """build a ward for self.vfs"""
999 """build a ward for self.vfs"""
1000 rref = weakref.ref(self)
1000 rref = weakref.ref(self)
1001 def checkvfs(path, mode=None):
1001 def checkvfs(path, mode=None):
1002 ret = origfunc(path, mode=mode)
1002 ret = origfunc(path, mode=mode)
1003 repo = rref()
1003 repo = rref()
1004 if (repo is None
1004 if (repo is None
1005 or not util.safehasattr(repo, '_wlockref')
1005 or not util.safehasattr(repo, '_wlockref')
1006 or not util.safehasattr(repo, '_lockref')):
1006 or not util.safehasattr(repo, '_lockref')):
1007 return
1007 return
1008 if mode in (None, 'r', 'rb'):
1008 if mode in (None, 'r', 'rb'):
1009 return
1009 return
1010 if path.startswith(repo.path):
1010 if path.startswith(repo.path):
1011 # truncate name relative to the repository (.hg)
1011 # truncate name relative to the repository (.hg)
1012 path = path[len(repo.path) + 1:]
1012 path = path[len(repo.path) + 1:]
1013 if path.startswith('cache/'):
1013 if path.startswith('cache/'):
1014 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1014 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1015 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
1015 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
1016 if path.startswith('journal.'):
1016 if path.startswith('journal.'):
1017 # journal is covered by 'lock'
1017 # journal is covered by 'lock'
1018 if repo._currentlock(repo._lockref) is None:
1018 if repo._currentlock(repo._lockref) is None:
1019 repo.ui.develwarn('write with no lock: "%s"' % path,
1019 repo.ui.develwarn('write with no lock: "%s"' % path,
1020 stacklevel=2, config='check-locks')
1020 stacklevel=2, config='check-locks')
1021 elif repo._currentlock(repo._wlockref) is None:
1021 elif repo._currentlock(repo._wlockref) is None:
1022 # rest of vfs files are covered by 'wlock'
1022 # rest of vfs files are covered by 'wlock'
1023 #
1023 #
1024 # exclude special files
1024 # exclude special files
1025 for prefix in self._wlockfreeprefix:
1025 for prefix in self._wlockfreeprefix:
1026 if path.startswith(prefix):
1026 if path.startswith(prefix):
1027 return
1027 return
1028 repo.ui.develwarn('write with no wlock: "%s"' % path,
1028 repo.ui.develwarn('write with no wlock: "%s"' % path,
1029 stacklevel=2, config='check-locks')
1029 stacklevel=2, config='check-locks')
1030 return ret
1030 return ret
1031 return checkvfs
1031 return checkvfs
1032
1032
1033 def _getsvfsward(self, origfunc):
1033 def _getsvfsward(self, origfunc):
1034 """build a ward for self.svfs"""
1034 """build a ward for self.svfs"""
1035 rref = weakref.ref(self)
1035 rref = weakref.ref(self)
1036 def checksvfs(path, mode=None):
1036 def checksvfs(path, mode=None):
1037 ret = origfunc(path, mode=mode)
1037 ret = origfunc(path, mode=mode)
1038 repo = rref()
1038 repo = rref()
1039 if repo is None or not util.safehasattr(repo, '_lockref'):
1039 if repo is None or not util.safehasattr(repo, '_lockref'):
1040 return
1040 return
1041 if mode in (None, 'r', 'rb'):
1041 if mode in (None, 'r', 'rb'):
1042 return
1042 return
1043 if path.startswith(repo.sharedpath):
1043 if path.startswith(repo.sharedpath):
1044 # truncate name relative to the repository (.hg)
1044 # truncate name relative to the repository (.hg)
1045 path = path[len(repo.sharedpath) + 1:]
1045 path = path[len(repo.sharedpath) + 1:]
1046 if repo._currentlock(repo._lockref) is None:
1046 if repo._currentlock(repo._lockref) is None:
1047 repo.ui.develwarn('write with no lock: "%s"' % path,
1047 repo.ui.develwarn('write with no lock: "%s"' % path,
1048 stacklevel=3)
1048 stacklevel=3)
1049 return ret
1049 return ret
1050 return checksvfs
1050 return checksvfs
1051
1051
1052 def close(self):
1052 def close(self):
1053 self._writecaches()
1053 self._writecaches()
1054
1054
1055 def _writecaches(self):
1055 def _writecaches(self):
1056 if self._revbranchcache:
1056 if self._revbranchcache:
1057 self._revbranchcache.write()
1057 self._revbranchcache.write()
1058
1058
1059 def _restrictcapabilities(self, caps):
1059 def _restrictcapabilities(self, caps):
1060 if self.ui.configbool('experimental', 'bundle2-advertise'):
1060 if self.ui.configbool('experimental', 'bundle2-advertise'):
1061 caps = set(caps)
1061 caps = set(caps)
1062 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1062 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1063 role='client'))
1063 role='client'))
1064 caps.add('bundle2=' + urlreq.quote(capsblob))
1064 caps.add('bundle2=' + urlreq.quote(capsblob))
1065 return caps
1065 return caps
1066
1066
1067 def _writerequirements(self):
1067 def _writerequirements(self):
1068 scmutil.writerequires(self.vfs, self.requirements)
1068 scmutil.writerequires(self.vfs, self.requirements)
1069
1069
1070 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1070 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1071 # self -> auditor -> self._checknested -> self
1071 # self -> auditor -> self._checknested -> self
1072
1072
1073 @property
1073 @property
1074 def auditor(self):
1074 def auditor(self):
1075 # This is only used by context.workingctx.match in order to
1075 # This is only used by context.workingctx.match in order to
1076 # detect files in subrepos.
1076 # detect files in subrepos.
1077 return pathutil.pathauditor(self.root, callback=self._checknested)
1077 return pathutil.pathauditor(self.root, callback=self._checknested)
1078
1078
1079 @property
1079 @property
1080 def nofsauditor(self):
1080 def nofsauditor(self):
1081 # This is only used by context.basectx.match in order to detect
1081 # This is only used by context.basectx.match in order to detect
1082 # files in subrepos.
1082 # files in subrepos.
1083 return pathutil.pathauditor(self.root, callback=self._checknested,
1083 return pathutil.pathauditor(self.root, callback=self._checknested,
1084 realfs=False, cached=True)
1084 realfs=False, cached=True)
1085
1085
1086 def _checknested(self, path):
1086 def _checknested(self, path):
1087 """Determine if path is a legal nested repository."""
1087 """Determine if path is a legal nested repository."""
1088 if not path.startswith(self.root):
1088 if not path.startswith(self.root):
1089 return False
1089 return False
1090 subpath = path[len(self.root) + 1:]
1090 subpath = path[len(self.root) + 1:]
1091 normsubpath = util.pconvert(subpath)
1091 normsubpath = util.pconvert(subpath)
1092
1092
1093 # XXX: Checking against the current working copy is wrong in
1093 # XXX: Checking against the current working copy is wrong in
1094 # the sense that it can reject things like
1094 # the sense that it can reject things like
1095 #
1095 #
1096 # $ hg cat -r 10 sub/x.txt
1096 # $ hg cat -r 10 sub/x.txt
1097 #
1097 #
1098 # if sub/ is no longer a subrepository in the working copy
1098 # if sub/ is no longer a subrepository in the working copy
1099 # parent revision.
1099 # parent revision.
1100 #
1100 #
1101 # However, it can of course also allow things that would have
1101 # However, it can of course also allow things that would have
1102 # been rejected before, such as the above cat command if sub/
1102 # been rejected before, such as the above cat command if sub/
1103 # is a subrepository now, but was a normal directory before.
1103 # is a subrepository now, but was a normal directory before.
1104 # The old path auditor would have rejected by mistake since it
1104 # The old path auditor would have rejected by mistake since it
1105 # panics when it sees sub/.hg/.
1105 # panics when it sees sub/.hg/.
1106 #
1106 #
1107 # All in all, checking against the working copy seems sensible
1107 # All in all, checking against the working copy seems sensible
1108 # since we want to prevent access to nested repositories on
1108 # since we want to prevent access to nested repositories on
1109 # the filesystem *now*.
1109 # the filesystem *now*.
1110 ctx = self[None]
1110 ctx = self[None]
1111 parts = util.splitpath(subpath)
1111 parts = util.splitpath(subpath)
1112 while parts:
1112 while parts:
1113 prefix = '/'.join(parts)
1113 prefix = '/'.join(parts)
1114 if prefix in ctx.substate:
1114 if prefix in ctx.substate:
1115 if prefix == normsubpath:
1115 if prefix == normsubpath:
1116 return True
1116 return True
1117 else:
1117 else:
1118 sub = ctx.sub(prefix)
1118 sub = ctx.sub(prefix)
1119 return sub.checknested(subpath[len(prefix) + 1:])
1119 return sub.checknested(subpath[len(prefix) + 1:])
1120 else:
1120 else:
1121 parts.pop()
1121 parts.pop()
1122 return False
1122 return False
1123
1123
1124 def peer(self):
1124 def peer(self):
1125 return localpeer(self) # not cached to avoid reference cycle
1125 return localpeer(self) # not cached to avoid reference cycle
1126
1126
1127 def unfiltered(self):
1127 def unfiltered(self):
1128 """Return unfiltered version of the repository
1128 """Return unfiltered version of the repository
1129
1129
1130 Intended to be overwritten by filtered repo."""
1130 Intended to be overwritten by filtered repo."""
1131 return self
1131 return self
1132
1132
1133 def filtered(self, name, visibilityexceptions=None):
1133 def filtered(self, name, visibilityexceptions=None):
1134 """Return a filtered version of a repository"""
1134 """Return a filtered version of a repository"""
1135 cls = repoview.newtype(self.unfiltered().__class__)
1135 cls = repoview.newtype(self.unfiltered().__class__)
1136 return cls(self, name, visibilityexceptions)
1136 return cls(self, name, visibilityexceptions)
1137
1137
1138 @repofilecache('bookmarks', 'bookmarks.current')
1138 @repofilecache('bookmarks', 'bookmarks.current')
1139 def _bookmarks(self):
1139 def _bookmarks(self):
1140 return bookmarks.bmstore(self)
1140 return bookmarks.bmstore(self)
1141
1141
1142 @property
1142 @property
1143 def _activebookmark(self):
1143 def _activebookmark(self):
1144 return self._bookmarks.active
1144 return self._bookmarks.active
1145
1145
1146 # _phasesets depend on changelog. what we need is to call
1146 # _phasesets depend on changelog. what we need is to call
1147 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1147 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1148 # can't be easily expressed in filecache mechanism.
1148 # can't be easily expressed in filecache mechanism.
1149 @storecache('phaseroots', '00changelog.i')
1149 @storecache('phaseroots', '00changelog.i')
1150 def _phasecache(self):
1150 def _phasecache(self):
1151 return phases.phasecache(self, self._phasedefaults)
1151 return phases.phasecache(self, self._phasedefaults)
1152
1152
1153 @storecache('obsstore')
1153 @storecache('obsstore')
1154 def obsstore(self):
1154 def obsstore(self):
1155 return obsolete.makestore(self.ui, self)
1155 return obsolete.makestore(self.ui, self)
1156
1156
1157 @storecache('00changelog.i')
1157 @storecache('00changelog.i')
1158 def changelog(self):
1158 def changelog(self):
1159 return changelog.changelog(self.svfs,
1159 return changelog.changelog(self.svfs,
1160 trypending=txnutil.mayhavepending(self.root))
1160 trypending=txnutil.mayhavepending(self.root))
1161
1161
1162 @storecache('00manifest.i')
1162 @storecache('00manifest.i')
1163 def manifestlog(self):
1163 def manifestlog(self):
1164 rootstore = manifest.manifestrevlog(self.svfs)
1164 rootstore = manifest.manifestrevlog(self.svfs)
1165 return manifest.manifestlog(self.svfs, self, rootstore)
1165 return manifest.manifestlog(self.svfs, self, rootstore)
1166
1166
1167 @repofilecache('dirstate')
1167 @repofilecache('dirstate')
1168 def dirstate(self):
1168 def dirstate(self):
1169 return self._makedirstate()
1169 return self._makedirstate()
1170
1170
1171 def _makedirstate(self):
1171 def _makedirstate(self):
1172 """Extension point for wrapping the dirstate per-repo."""
1172 """Extension point for wrapping the dirstate per-repo."""
1173 sparsematchfn = lambda: sparse.matcher(self)
1173 sparsematchfn = lambda: sparse.matcher(self)
1174
1174
1175 return dirstate.dirstate(self.vfs, self.ui, self.root,
1175 return dirstate.dirstate(self.vfs, self.ui, self.root,
1176 self._dirstatevalidate, sparsematchfn)
1176 self._dirstatevalidate, sparsematchfn)
1177
1177
1178 def _dirstatevalidate(self, node):
1178 def _dirstatevalidate(self, node):
1179 try:
1179 try:
1180 self.changelog.rev(node)
1180 self.changelog.rev(node)
1181 return node
1181 return node
1182 except error.LookupError:
1182 except error.LookupError:
1183 if not self._dirstatevalidatewarned:
1183 if not self._dirstatevalidatewarned:
1184 self._dirstatevalidatewarned = True
1184 self._dirstatevalidatewarned = True
1185 self.ui.warn(_("warning: ignoring unknown"
1185 self.ui.warn(_("warning: ignoring unknown"
1186 " working parent %s!\n") % short(node))
1186 " working parent %s!\n") % short(node))
1187 return nullid
1187 return nullid
1188
1188
1189 @storecache(narrowspec.FILENAME)
1189 @storecache(narrowspec.FILENAME)
1190 def narrowpats(self):
1190 def narrowpats(self):
1191 """matcher patterns for this repository's narrowspec
1191 """matcher patterns for this repository's narrowspec
1192
1192
1193 A tuple of (includes, excludes).
1193 A tuple of (includes, excludes).
1194 """
1194 """
1195 return narrowspec.load(self)
1195 return narrowspec.load(self)
1196
1196
1197 @storecache(narrowspec.FILENAME)
1197 @storecache(narrowspec.FILENAME)
1198 def _narrowmatch(self):
1198 def _narrowmatch(self):
1199 if repository.NARROW_REQUIREMENT not in self.requirements:
1199 if repository.NARROW_REQUIREMENT not in self.requirements:
1200 return matchmod.always(self.root, '')
1200 return matchmod.always(self.root, '')
1201 include, exclude = self.narrowpats
1201 include, exclude = self.narrowpats
1202 return narrowspec.match(self.root, include=include, exclude=exclude)
1202 return narrowspec.match(self.root, include=include, exclude=exclude)
1203
1203
1204 def narrowmatch(self, match=None, includeexact=False):
1204 def narrowmatch(self, match=None, includeexact=False):
1205 """matcher corresponding the the repo's narrowspec
1205 """matcher corresponding the the repo's narrowspec
1206
1206
1207 If `match` is given, then that will be intersected with the narrow
1207 If `match` is given, then that will be intersected with the narrow
1208 matcher.
1208 matcher.
1209
1209
1210 If `includeexact` is True, then any exact matches from `match` will
1210 If `includeexact` is True, then any exact matches from `match` will
1211 be included even if they're outside the narrowspec.
1211 be included even if they're outside the narrowspec.
1212 """
1212 """
1213 if match:
1213 if match:
1214 if includeexact and not self._narrowmatch.always():
1214 if includeexact and not self._narrowmatch.always():
1215 # do not exclude explicitly-specified paths so that they can
1215 # do not exclude explicitly-specified paths so that they can
1216 # be warned later on
1216 # be warned later on
1217 em = matchmod.exact(match._root, match._cwd, match.files())
1217 em = matchmod.exact(match._root, match._cwd, match.files())
1218 nm = matchmod.unionmatcher([self._narrowmatch, em])
1218 nm = matchmod.unionmatcher([self._narrowmatch, em])
1219 return matchmod.intersectmatchers(match, nm)
1219 return matchmod.intersectmatchers(match, nm)
1220 return matchmod.intersectmatchers(match, self._narrowmatch)
1220 return matchmod.intersectmatchers(match, self._narrowmatch)
1221 return self._narrowmatch
1221 return self._narrowmatch
1222
1222
1223 def setnarrowpats(self, newincludes, newexcludes):
1223 def setnarrowpats(self, newincludes, newexcludes):
1224 narrowspec.save(self, newincludes, newexcludes)
1224 narrowspec.save(self, newincludes, newexcludes)
1225 self.invalidate(clearfilecache=True)
1225 self.invalidate(clearfilecache=True)
1226
1226
1227 def __getitem__(self, changeid):
1227 def __getitem__(self, changeid):
1228 if changeid is None:
1228 if changeid is None:
1229 return context.workingctx(self)
1229 return context.workingctx(self)
1230 if isinstance(changeid, context.basectx):
1230 if isinstance(changeid, context.basectx):
1231 return changeid
1231 return changeid
1232 if isinstance(changeid, slice):
1232 if isinstance(changeid, slice):
1233 # wdirrev isn't contiguous so the slice shouldn't include it
1233 # wdirrev isn't contiguous so the slice shouldn't include it
1234 return [self[i]
1234 return [self[i]
1235 for i in pycompat.xrange(*changeid.indices(len(self)))
1235 for i in pycompat.xrange(*changeid.indices(len(self)))
1236 if i not in self.changelog.filteredrevs]
1236 if i not in self.changelog.filteredrevs]
1237 try:
1237 try:
1238 if isinstance(changeid, int):
1238 if isinstance(changeid, int):
1239 node = self.changelog.node(changeid)
1239 node = self.changelog.node(changeid)
1240 rev = changeid
1240 rev = changeid
1241 elif changeid == 'null':
1241 elif changeid == 'null':
1242 node = nullid
1242 node = nullid
1243 rev = nullrev
1243 rev = nullrev
1244 elif changeid == 'tip':
1244 elif changeid == 'tip':
1245 node = self.changelog.tip()
1245 node = self.changelog.tip()
1246 rev = self.changelog.rev(node)
1246 rev = self.changelog.rev(node)
1247 elif changeid == '.':
1247 elif changeid == '.':
1248 # this is a hack to delay/avoid loading obsmarkers
1248 # this is a hack to delay/avoid loading obsmarkers
1249 # when we know that '.' won't be hidden
1249 # when we know that '.' won't be hidden
1250 node = self.dirstate.p1()
1250 node = self.dirstate.p1()
1251 rev = self.unfiltered().changelog.rev(node)
1251 rev = self.unfiltered().changelog.rev(node)
1252 elif len(changeid) == 20:
1252 elif len(changeid) == 20:
1253 try:
1253 try:
1254 node = changeid
1254 node = changeid
1255 rev = self.changelog.rev(changeid)
1255 rev = self.changelog.rev(changeid)
1256 except error.FilteredLookupError:
1256 except error.FilteredLookupError:
1257 changeid = hex(changeid) # for the error message
1257 changeid = hex(changeid) # for the error message
1258 raise
1258 raise
1259 except LookupError:
1259 except LookupError:
1260 # check if it might have come from damaged dirstate
1260 # check if it might have come from damaged dirstate
1261 #
1261 #
1262 # XXX we could avoid the unfiltered if we had a recognizable
1262 # XXX we could avoid the unfiltered if we had a recognizable
1263 # exception for filtered changeset access
1263 # exception for filtered changeset access
1264 if (self.local()
1264 if (self.local()
1265 and changeid in self.unfiltered().dirstate.parents()):
1265 and changeid in self.unfiltered().dirstate.parents()):
1266 msg = _("working directory has unknown parent '%s'!")
1266 msg = _("working directory has unknown parent '%s'!")
1267 raise error.Abort(msg % short(changeid))
1267 raise error.Abort(msg % short(changeid))
1268 changeid = hex(changeid) # for the error message
1268 changeid = hex(changeid) # for the error message
1269 raise
1269 raise
1270
1270
1271 elif len(changeid) == 40:
1271 elif len(changeid) == 40:
1272 node = bin(changeid)
1272 node = bin(changeid)
1273 rev = self.changelog.rev(node)
1273 rev = self.changelog.rev(node)
1274 else:
1274 else:
1275 raise error.ProgrammingError(
1275 raise error.ProgrammingError(
1276 "unsupported changeid '%s' of type %s" %
1276 "unsupported changeid '%s' of type %s" %
1277 (changeid, type(changeid)))
1277 (changeid, type(changeid)))
1278
1278
1279 return context.changectx(self, rev, node)
1279 return context.changectx(self, rev, node)
1280
1280
1281 except (error.FilteredIndexError, error.FilteredLookupError):
1281 except (error.FilteredIndexError, error.FilteredLookupError):
1282 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1282 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1283 % pycompat.bytestr(changeid))
1283 % pycompat.bytestr(changeid))
1284 except (IndexError, LookupError):
1284 except (IndexError, LookupError):
1285 raise error.RepoLookupError(
1285 raise error.RepoLookupError(
1286 _("unknown revision '%s'") % pycompat.bytestr(changeid))
1286 _("unknown revision '%s'") % pycompat.bytestr(changeid))
1287 except error.WdirUnsupported:
1287 except error.WdirUnsupported:
1288 return context.workingctx(self)
1288 return context.workingctx(self)
1289
1289
1290 def __contains__(self, changeid):
1290 def __contains__(self, changeid):
1291 """True if the given changeid exists
1291 """True if the given changeid exists
1292
1292
1293 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1293 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1294 specified.
1294 specified.
1295 """
1295 """
1296 try:
1296 try:
1297 self[changeid]
1297 self[changeid]
1298 return True
1298 return True
1299 except error.RepoLookupError:
1299 except error.RepoLookupError:
1300 return False
1300 return False
1301
1301
1302 def __nonzero__(self):
1302 def __nonzero__(self):
1303 return True
1303 return True
1304
1304
1305 __bool__ = __nonzero__
1305 __bool__ = __nonzero__
1306
1306
1307 def __len__(self):
1307 def __len__(self):
1308 # no need to pay the cost of repoview.changelog
1308 # no need to pay the cost of repoview.changelog
1309 unfi = self.unfiltered()
1309 unfi = self.unfiltered()
1310 return len(unfi.changelog)
1310 return len(unfi.changelog)
1311
1311
1312 def __iter__(self):
1312 def __iter__(self):
1313 return iter(self.changelog)
1313 return iter(self.changelog)
1314
1314
1315 def revs(self, expr, *args):
1315 def revs(self, expr, *args):
1316 '''Find revisions matching a revset.
1316 '''Find revisions matching a revset.
1317
1317
1318 The revset is specified as a string ``expr`` that may contain
1318 The revset is specified as a string ``expr`` that may contain
1319 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1319 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1320
1320
1321 Revset aliases from the configuration are not expanded. To expand
1321 Revset aliases from the configuration are not expanded. To expand
1322 user aliases, consider calling ``scmutil.revrange()`` or
1322 user aliases, consider calling ``scmutil.revrange()`` or
1323 ``repo.anyrevs([expr], user=True)``.
1323 ``repo.anyrevs([expr], user=True)``.
1324
1324
1325 Returns a revset.abstractsmartset, which is a list-like interface
1325 Returns a revset.abstractsmartset, which is a list-like interface
1326 that contains integer revisions.
1326 that contains integer revisions.
1327 '''
1327 '''
1328 expr = revsetlang.formatspec(expr, *args)
1328 expr = revsetlang.formatspec(expr, *args)
1329 m = revset.match(None, expr)
1329 m = revset.match(None, expr)
1330 return m(self)
1330 return m(self)
1331
1331
1332 def set(self, expr, *args):
1332 def set(self, expr, *args):
1333 '''Find revisions matching a revset and emit changectx instances.
1333 '''Find revisions matching a revset and emit changectx instances.
1334
1334
1335 This is a convenience wrapper around ``revs()`` that iterates the
1335 This is a convenience wrapper around ``revs()`` that iterates the
1336 result and is a generator of changectx instances.
1336 result and is a generator of changectx instances.
1337
1337
1338 Revset aliases from the configuration are not expanded. To expand
1338 Revset aliases from the configuration are not expanded. To expand
1339 user aliases, consider calling ``scmutil.revrange()``.
1339 user aliases, consider calling ``scmutil.revrange()``.
1340 '''
1340 '''
1341 for r in self.revs(expr, *args):
1341 for r in self.revs(expr, *args):
1342 yield self[r]
1342 yield self[r]
1343
1343
1344 def anyrevs(self, specs, user=False, localalias=None):
1344 def anyrevs(self, specs, user=False, localalias=None):
1345 '''Find revisions matching one of the given revsets.
1345 '''Find revisions matching one of the given revsets.
1346
1346
1347 Revset aliases from the configuration are not expanded by default. To
1347 Revset aliases from the configuration are not expanded by default. To
1348 expand user aliases, specify ``user=True``. To provide some local
1348 expand user aliases, specify ``user=True``. To provide some local
1349 definitions overriding user aliases, set ``localalias`` to
1349 definitions overriding user aliases, set ``localalias`` to
1350 ``{name: definitionstring}``.
1350 ``{name: definitionstring}``.
1351 '''
1351 '''
1352 if user:
1352 if user:
1353 m = revset.matchany(self.ui, specs,
1353 m = revset.matchany(self.ui, specs,
1354 lookup=revset.lookupfn(self),
1354 lookup=revset.lookupfn(self),
1355 localalias=localalias)
1355 localalias=localalias)
1356 else:
1356 else:
1357 m = revset.matchany(None, specs, localalias=localalias)
1357 m = revset.matchany(None, specs, localalias=localalias)
1358 return m(self)
1358 return m(self)
1359
1359
1360 def url(self):
1360 def url(self):
1361 return 'file:' + self.root
1361 return 'file:' + self.root
1362
1362
1363 def hook(self, name, throw=False, **args):
1363 def hook(self, name, throw=False, **args):
1364 """Call a hook, passing this repo instance.
1364 """Call a hook, passing this repo instance.
1365
1365
1366 This a convenience method to aid invoking hooks. Extensions likely
1366 This a convenience method to aid invoking hooks. Extensions likely
1367 won't call this unless they have registered a custom hook or are
1367 won't call this unless they have registered a custom hook or are
1368 replacing code that is expected to call a hook.
1368 replacing code that is expected to call a hook.
1369 """
1369 """
1370 return hook.hook(self.ui, self, name, throw, **args)
1370 return hook.hook(self.ui, self, name, throw, **args)
1371
1371
1372 @filteredpropertycache
1372 @filteredpropertycache
1373 def _tagscache(self):
1373 def _tagscache(self):
1374 '''Returns a tagscache object that contains various tags related
1374 '''Returns a tagscache object that contains various tags related
1375 caches.'''
1375 caches.'''
1376
1376
1377 # This simplifies its cache management by having one decorated
1377 # This simplifies its cache management by having one decorated
1378 # function (this one) and the rest simply fetch things from it.
1378 # function (this one) and the rest simply fetch things from it.
1379 class tagscache(object):
1379 class tagscache(object):
1380 def __init__(self):
1380 def __init__(self):
1381 # These two define the set of tags for this repository. tags
1381 # These two define the set of tags for this repository. tags
1382 # maps tag name to node; tagtypes maps tag name to 'global' or
1382 # maps tag name to node; tagtypes maps tag name to 'global' or
1383 # 'local'. (Global tags are defined by .hgtags across all
1383 # 'local'. (Global tags are defined by .hgtags across all
1384 # heads, and local tags are defined in .hg/localtags.)
1384 # heads, and local tags are defined in .hg/localtags.)
1385 # They constitute the in-memory cache of tags.
1385 # They constitute the in-memory cache of tags.
1386 self.tags = self.tagtypes = None
1386 self.tags = self.tagtypes = None
1387
1387
1388 self.nodetagscache = self.tagslist = None
1388 self.nodetagscache = self.tagslist = None
1389
1389
1390 cache = tagscache()
1390 cache = tagscache()
1391 cache.tags, cache.tagtypes = self._findtags()
1391 cache.tags, cache.tagtypes = self._findtags()
1392
1392
1393 return cache
1393 return cache
1394
1394
1395 def tags(self):
1395 def tags(self):
1396 '''return a mapping of tag to node'''
1396 '''return a mapping of tag to node'''
1397 t = {}
1397 t = {}
1398 if self.changelog.filteredrevs:
1398 if self.changelog.filteredrevs:
1399 tags, tt = self._findtags()
1399 tags, tt = self._findtags()
1400 else:
1400 else:
1401 tags = self._tagscache.tags
1401 tags = self._tagscache.tags
1402 for k, v in tags.iteritems():
1402 for k, v in tags.iteritems():
1403 try:
1403 try:
1404 # ignore tags to unknown nodes
1404 # ignore tags to unknown nodes
1405 self.changelog.rev(v)
1405 self.changelog.rev(v)
1406 t[k] = v
1406 t[k] = v
1407 except (error.LookupError, ValueError):
1407 except (error.LookupError, ValueError):
1408 pass
1408 pass
1409 return t
1409 return t
1410
1410
1411 def _findtags(self):
1411 def _findtags(self):
1412 '''Do the hard work of finding tags. Return a pair of dicts
1412 '''Do the hard work of finding tags. Return a pair of dicts
1413 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1413 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1414 maps tag name to a string like \'global\' or \'local\'.
1414 maps tag name to a string like \'global\' or \'local\'.
1415 Subclasses or extensions are free to add their own tags, but
1415 Subclasses or extensions are free to add their own tags, but
1416 should be aware that the returned dicts will be retained for the
1416 should be aware that the returned dicts will be retained for the
1417 duration of the localrepo object.'''
1417 duration of the localrepo object.'''
1418
1418
1419 # XXX what tagtype should subclasses/extensions use? Currently
1419 # XXX what tagtype should subclasses/extensions use? Currently
1420 # mq and bookmarks add tags, but do not set the tagtype at all.
1420 # mq and bookmarks add tags, but do not set the tagtype at all.
1421 # Should each extension invent its own tag type? Should there
1421 # Should each extension invent its own tag type? Should there
1422 # be one tagtype for all such "virtual" tags? Or is the status
1422 # be one tagtype for all such "virtual" tags? Or is the status
1423 # quo fine?
1423 # quo fine?
1424
1424
1425
1425
1426 # map tag name to (node, hist)
1426 # map tag name to (node, hist)
1427 alltags = tagsmod.findglobaltags(self.ui, self)
1427 alltags = tagsmod.findglobaltags(self.ui, self)
1428 # map tag name to tag type
1428 # map tag name to tag type
1429 tagtypes = dict((tag, 'global') for tag in alltags)
1429 tagtypes = dict((tag, 'global') for tag in alltags)
1430
1430
1431 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1431 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1432
1432
1433 # Build the return dicts. Have to re-encode tag names because
1433 # Build the return dicts. Have to re-encode tag names because
1434 # the tags module always uses UTF-8 (in order not to lose info
1434 # the tags module always uses UTF-8 (in order not to lose info
1435 # writing to the cache), but the rest of Mercurial wants them in
1435 # writing to the cache), but the rest of Mercurial wants them in
1436 # local encoding.
1436 # local encoding.
1437 tags = {}
1437 tags = {}
1438 for (name, (node, hist)) in alltags.iteritems():
1438 for (name, (node, hist)) in alltags.iteritems():
1439 if node != nullid:
1439 if node != nullid:
1440 tags[encoding.tolocal(name)] = node
1440 tags[encoding.tolocal(name)] = node
1441 tags['tip'] = self.changelog.tip()
1441 tags['tip'] = self.changelog.tip()
1442 tagtypes = dict([(encoding.tolocal(name), value)
1442 tagtypes = dict([(encoding.tolocal(name), value)
1443 for (name, value) in tagtypes.iteritems()])
1443 for (name, value) in tagtypes.iteritems()])
1444 return (tags, tagtypes)
1444 return (tags, tagtypes)
1445
1445
1446 def tagtype(self, tagname):
1446 def tagtype(self, tagname):
1447 '''
1447 '''
1448 return the type of the given tag. result can be:
1448 return the type of the given tag. result can be:
1449
1449
1450 'local' : a local tag
1450 'local' : a local tag
1451 'global' : a global tag
1451 'global' : a global tag
1452 None : tag does not exist
1452 None : tag does not exist
1453 '''
1453 '''
1454
1454
1455 return self._tagscache.tagtypes.get(tagname)
1455 return self._tagscache.tagtypes.get(tagname)
1456
1456
1457 def tagslist(self):
1457 def tagslist(self):
1458 '''return a list of tags ordered by revision'''
1458 '''return a list of tags ordered by revision'''
1459 if not self._tagscache.tagslist:
1459 if not self._tagscache.tagslist:
1460 l = []
1460 l = []
1461 for t, n in self.tags().iteritems():
1461 for t, n in self.tags().iteritems():
1462 l.append((self.changelog.rev(n), t, n))
1462 l.append((self.changelog.rev(n), t, n))
1463 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1463 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1464
1464
1465 return self._tagscache.tagslist
1465 return self._tagscache.tagslist
1466
1466
1467 def nodetags(self, node):
1467 def nodetags(self, node):
1468 '''return the tags associated with a node'''
1468 '''return the tags associated with a node'''
1469 if not self._tagscache.nodetagscache:
1469 if not self._tagscache.nodetagscache:
1470 nodetagscache = {}
1470 nodetagscache = {}
1471 for t, n in self._tagscache.tags.iteritems():
1471 for t, n in self._tagscache.tags.iteritems():
1472 nodetagscache.setdefault(n, []).append(t)
1472 nodetagscache.setdefault(n, []).append(t)
1473 for tags in nodetagscache.itervalues():
1473 for tags in nodetagscache.itervalues():
1474 tags.sort()
1474 tags.sort()
1475 self._tagscache.nodetagscache = nodetagscache
1475 self._tagscache.nodetagscache = nodetagscache
1476 return self._tagscache.nodetagscache.get(node, [])
1476 return self._tagscache.nodetagscache.get(node, [])
1477
1477
1478 def nodebookmarks(self, node):
1478 def nodebookmarks(self, node):
1479 """return the list of bookmarks pointing to the specified node"""
1479 """return the list of bookmarks pointing to the specified node"""
1480 return self._bookmarks.names(node)
1480 return self._bookmarks.names(node)
1481
1481
1482 def branchmap(self):
1482 def branchmap(self):
1483 '''returns a dictionary {branch: [branchheads]} with branchheads
1483 '''returns a dictionary {branch: [branchheads]} with branchheads
1484 ordered by increasing revision number'''
1484 ordered by increasing revision number'''
1485 branchmap.updatecache(self)
1485 branchmap.updatecache(self)
1486 return self._branchcaches[self.filtername]
1486 return self._branchcaches[self.filtername]
1487
1487
1488 @unfilteredmethod
1488 @unfilteredmethod
1489 def revbranchcache(self):
1489 def revbranchcache(self):
1490 if not self._revbranchcache:
1490 if not self._revbranchcache:
1491 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1491 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1492 return self._revbranchcache
1492 return self._revbranchcache
1493
1493
1494 def branchtip(self, branch, ignoremissing=False):
1494 def branchtip(self, branch, ignoremissing=False):
1495 '''return the tip node for a given branch
1495 '''return the tip node for a given branch
1496
1496
1497 If ignoremissing is True, then this method will not raise an error.
1497 If ignoremissing is True, then this method will not raise an error.
1498 This is helpful for callers that only expect None for a missing branch
1498 This is helpful for callers that only expect None for a missing branch
1499 (e.g. namespace).
1499 (e.g. namespace).
1500
1500
1501 '''
1501 '''
1502 try:
1502 try:
1503 return self.branchmap().branchtip(branch)
1503 return self.branchmap().branchtip(branch)
1504 except KeyError:
1504 except KeyError:
1505 if not ignoremissing:
1505 if not ignoremissing:
1506 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1506 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1507 else:
1507 else:
1508 pass
1508 pass
1509
1509
1510 def lookup(self, key):
1510 def lookup(self, key):
1511 return scmutil.revsymbol(self, key).node()
1511 return scmutil.revsymbol(self, key).node()
1512
1512
1513 def lookupbranch(self, key):
1513 def lookupbranch(self, key):
1514 if key in self.branchmap():
1514 if key in self.branchmap():
1515 return key
1515 return key
1516
1516
1517 return scmutil.revsymbol(self, key).branch()
1517 return scmutil.revsymbol(self, key).branch()
1518
1518
1519 def known(self, nodes):
1519 def known(self, nodes):
1520 cl = self.changelog
1520 cl = self.changelog
1521 nm = cl.nodemap
1521 nm = cl.nodemap
1522 filtered = cl.filteredrevs
1522 filtered = cl.filteredrevs
1523 result = []
1523 result = []
1524 for n in nodes:
1524 for n in nodes:
1525 r = nm.get(n)
1525 r = nm.get(n)
1526 resp = not (r is None or r in filtered)
1526 resp = not (r is None or r in filtered)
1527 result.append(resp)
1527 result.append(resp)
1528 return result
1528 return result
1529
1529
1530 def local(self):
1530 def local(self):
1531 return self
1531 return self
1532
1532
1533 def publishing(self):
1533 def publishing(self):
1534 # it's safe (and desirable) to trust the publish flag unconditionally
1534 # it's safe (and desirable) to trust the publish flag unconditionally
1535 # so that we don't finalize changes shared between users via ssh or nfs
1535 # so that we don't finalize changes shared between users via ssh or nfs
1536 return self.ui.configbool('phases', 'publish', untrusted=True)
1536 return self.ui.configbool('phases', 'publish', untrusted=True)
1537
1537
1538 def cancopy(self):
1538 def cancopy(self):
1539 # so statichttprepo's override of local() works
1539 # so statichttprepo's override of local() works
1540 if not self.local():
1540 if not self.local():
1541 return False
1541 return False
1542 if not self.publishing():
1542 if not self.publishing():
1543 return True
1543 return True
1544 # if publishing we can't copy if there is filtered content
1544 # if publishing we can't copy if there is filtered content
1545 return not self.filtered('visible').changelog.filteredrevs
1545 return not self.filtered('visible').changelog.filteredrevs
1546
1546
1547 def shared(self):
1547 def shared(self):
1548 '''the type of shared repository (None if not shared)'''
1548 '''the type of shared repository (None if not shared)'''
1549 if self.sharedpath != self.path:
1549 if self.sharedpath != self.path:
1550 return 'store'
1550 return 'store'
1551 return None
1551 return None
1552
1552
1553 def wjoin(self, f, *insidef):
1553 def wjoin(self, f, *insidef):
1554 return self.vfs.reljoin(self.root, f, *insidef)
1554 return self.vfs.reljoin(self.root, f, *insidef)
1555
1555
1556 def setparents(self, p1, p2=nullid):
1556 def setparents(self, p1, p2=nullid):
1557 with self.dirstate.parentchange():
1557 with self.dirstate.parentchange():
1558 copies = self.dirstate.setparents(p1, p2)
1558 copies = self.dirstate.setparents(p1, p2)
1559 pctx = self[p1]
1559 pctx = self[p1]
1560 if copies:
1560 if copies:
1561 # Adjust copy records, the dirstate cannot do it, it
1561 # Adjust copy records, the dirstate cannot do it, it
1562 # requires access to parents manifests. Preserve them
1562 # requires access to parents manifests. Preserve them
1563 # only for entries added to first parent.
1563 # only for entries added to first parent.
1564 for f in copies:
1564 for f in copies:
1565 if f not in pctx and copies[f] in pctx:
1565 if f not in pctx and copies[f] in pctx:
1566 self.dirstate.copy(copies[f], f)
1566 self.dirstate.copy(copies[f], f)
1567 if p2 == nullid:
1567 if p2 == nullid:
1568 for f, s in sorted(self.dirstate.copies().items()):
1568 for f, s in sorted(self.dirstate.copies().items()):
1569 if f not in pctx and s not in pctx:
1569 if f not in pctx and s not in pctx:
1570 self.dirstate.copy(None, f)
1570 self.dirstate.copy(None, f)
1571
1571
1572 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1572 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1573 """changeid can be a changeset revision, node, or tag.
1573 """changeid can be a changeset revision, node, or tag.
1574 fileid can be a file revision or node."""
1574 fileid can be a file revision or node."""
1575 return context.filectx(self, path, changeid, fileid,
1575 return context.filectx(self, path, changeid, fileid,
1576 changectx=changectx)
1576 changectx=changectx)
1577
1577
1578 def getcwd(self):
1578 def getcwd(self):
1579 return self.dirstate.getcwd()
1579 return self.dirstate.getcwd()
1580
1580
1581 def pathto(self, f, cwd=None):
1581 def pathto(self, f, cwd=None):
1582 return self.dirstate.pathto(f, cwd)
1582 return self.dirstate.pathto(f, cwd)
1583
1583
1584 def _loadfilter(self, filter):
1584 def _loadfilter(self, filter):
1585 if filter not in self._filterpats:
1585 if filter not in self._filterpats:
1586 l = []
1586 l = []
1587 for pat, cmd in self.ui.configitems(filter):
1587 for pat, cmd in self.ui.configitems(filter):
1588 if cmd == '!':
1588 if cmd == '!':
1589 continue
1589 continue
1590 mf = matchmod.match(self.root, '', [pat])
1590 mf = matchmod.match(self.root, '', [pat])
1591 fn = None
1591 fn = None
1592 params = cmd
1592 params = cmd
1593 for name, filterfn in self._datafilters.iteritems():
1593 for name, filterfn in self._datafilters.iteritems():
1594 if cmd.startswith(name):
1594 if cmd.startswith(name):
1595 fn = filterfn
1595 fn = filterfn
1596 params = cmd[len(name):].lstrip()
1596 params = cmd[len(name):].lstrip()
1597 break
1597 break
1598 if not fn:
1598 if not fn:
1599 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1599 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1600 # Wrap old filters not supporting keyword arguments
1600 # Wrap old filters not supporting keyword arguments
1601 if not pycompat.getargspec(fn)[2]:
1601 if not pycompat.getargspec(fn)[2]:
1602 oldfn = fn
1602 oldfn = fn
1603 fn = lambda s, c, **kwargs: oldfn(s, c)
1603 fn = lambda s, c, **kwargs: oldfn(s, c)
1604 l.append((mf, fn, params))
1604 l.append((mf, fn, params))
1605 self._filterpats[filter] = l
1605 self._filterpats[filter] = l
1606 return self._filterpats[filter]
1606 return self._filterpats[filter]
1607
1607
1608 def _filter(self, filterpats, filename, data):
1608 def _filter(self, filterpats, filename, data):
1609 for mf, fn, cmd in filterpats:
1609 for mf, fn, cmd in filterpats:
1610 if mf(filename):
1610 if mf(filename):
1611 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1611 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1612 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1612 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1613 break
1613 break
1614
1614
1615 return data
1615 return data
1616
1616
1617 @unfilteredpropertycache
1617 @unfilteredpropertycache
1618 def _encodefilterpats(self):
1618 def _encodefilterpats(self):
1619 return self._loadfilter('encode')
1619 return self._loadfilter('encode')
1620
1620
1621 @unfilteredpropertycache
1621 @unfilteredpropertycache
1622 def _decodefilterpats(self):
1622 def _decodefilterpats(self):
1623 return self._loadfilter('decode')
1623 return self._loadfilter('decode')
1624
1624
1625 def adddatafilter(self, name, filter):
1625 def adddatafilter(self, name, filter):
1626 self._datafilters[name] = filter
1626 self._datafilters[name] = filter
1627
1627
1628 def wread(self, filename):
1628 def wread(self, filename):
1629 if self.wvfs.islink(filename):
1629 if self.wvfs.islink(filename):
1630 data = self.wvfs.readlink(filename)
1630 data = self.wvfs.readlink(filename)
1631 else:
1631 else:
1632 data = self.wvfs.read(filename)
1632 data = self.wvfs.read(filename)
1633 return self._filter(self._encodefilterpats, filename, data)
1633 return self._filter(self._encodefilterpats, filename, data)
1634
1634
1635 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1635 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1636 """write ``data`` into ``filename`` in the working directory
1636 """write ``data`` into ``filename`` in the working directory
1637
1637
1638 This returns length of written (maybe decoded) data.
1638 This returns length of written (maybe decoded) data.
1639 """
1639 """
1640 data = self._filter(self._decodefilterpats, filename, data)
1640 data = self._filter(self._decodefilterpats, filename, data)
1641 if 'l' in flags:
1641 if 'l' in flags:
1642 self.wvfs.symlink(data, filename)
1642 self.wvfs.symlink(data, filename)
1643 else:
1643 else:
1644 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1644 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1645 **kwargs)
1645 **kwargs)
1646 if 'x' in flags:
1646 if 'x' in flags:
1647 self.wvfs.setflags(filename, False, True)
1647 self.wvfs.setflags(filename, False, True)
1648 else:
1648 else:
1649 self.wvfs.setflags(filename, False, False)
1649 self.wvfs.setflags(filename, False, False)
1650 return len(data)
1650 return len(data)
1651
1651
1652 def wwritedata(self, filename, data):
1652 def wwritedata(self, filename, data):
1653 return self._filter(self._decodefilterpats, filename, data)
1653 return self._filter(self._decodefilterpats, filename, data)
1654
1654
1655 def currenttransaction(self):
1655 def currenttransaction(self):
1656 """return the current transaction or None if non exists"""
1656 """return the current transaction or None if non exists"""
1657 if self._transref:
1657 if self._transref:
1658 tr = self._transref()
1658 tr = self._transref()
1659 else:
1659 else:
1660 tr = None
1660 tr = None
1661
1661
1662 if tr and tr.running():
1662 if tr and tr.running():
1663 return tr
1663 return tr
1664 return None
1664 return None
1665
1665
1666 def transaction(self, desc, report=None):
1666 def transaction(self, desc, report=None):
1667 if (self.ui.configbool('devel', 'all-warnings')
1667 if (self.ui.configbool('devel', 'all-warnings')
1668 or self.ui.configbool('devel', 'check-locks')):
1668 or self.ui.configbool('devel', 'check-locks')):
1669 if self._currentlock(self._lockref) is None:
1669 if self._currentlock(self._lockref) is None:
1670 raise error.ProgrammingError('transaction requires locking')
1670 raise error.ProgrammingError('transaction requires locking')
1671 tr = self.currenttransaction()
1671 tr = self.currenttransaction()
1672 if tr is not None:
1672 if tr is not None:
1673 return tr.nest(name=desc)
1673 return tr.nest(name=desc)
1674
1674
1675 # abort here if the journal already exists
1675 # abort here if the journal already exists
1676 if self.svfs.exists("journal"):
1676 if self.svfs.exists("journal"):
1677 raise error.RepoError(
1677 raise error.RepoError(
1678 _("abandoned transaction found"),
1678 _("abandoned transaction found"),
1679 hint=_("run 'hg recover' to clean up transaction"))
1679 hint=_("run 'hg recover' to clean up transaction"))
1680
1680
1681 idbase = "%.40f#%f" % (random.random(), time.time())
1681 idbase = "%.40f#%f" % (random.random(), time.time())
1682 ha = hex(hashlib.sha1(idbase).digest())
1682 ha = hex(hashlib.sha1(idbase).digest())
1683 txnid = 'TXN:' + ha
1683 txnid = 'TXN:' + ha
1684 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1684 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1685
1685
1686 self._writejournal(desc)
1686 self._writejournal(desc)
1687 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1687 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1688 if report:
1688 if report:
1689 rp = report
1689 rp = report
1690 else:
1690 else:
1691 rp = self.ui.warn
1691 rp = self.ui.warn
1692 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1692 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1693 # we must avoid cyclic reference between repo and transaction.
1693 # we must avoid cyclic reference between repo and transaction.
1694 reporef = weakref.ref(self)
1694 reporef = weakref.ref(self)
1695 # Code to track tag movement
1695 # Code to track tag movement
1696 #
1696 #
1697 # Since tags are all handled as file content, it is actually quite hard
1697 # Since tags are all handled as file content, it is actually quite hard
1698 # to track these movement from a code perspective. So we fallback to a
1698 # to track these movement from a code perspective. So we fallback to a
1699 # tracking at the repository level. One could envision to track changes
1699 # tracking at the repository level. One could envision to track changes
1700 # to the '.hgtags' file through changegroup apply but that fails to
1700 # to the '.hgtags' file through changegroup apply but that fails to
1701 # cope with case where transaction expose new heads without changegroup
1701 # cope with case where transaction expose new heads without changegroup
1702 # being involved (eg: phase movement).
1702 # being involved (eg: phase movement).
1703 #
1703 #
1704 # For now, We gate the feature behind a flag since this likely comes
1704 # For now, We gate the feature behind a flag since this likely comes
1705 # with performance impacts. The current code run more often than needed
1705 # with performance impacts. The current code run more often than needed
1706 # and do not use caches as much as it could. The current focus is on
1706 # and do not use caches as much as it could. The current focus is on
1707 # the behavior of the feature so we disable it by default. The flag
1707 # the behavior of the feature so we disable it by default. The flag
1708 # will be removed when we are happy with the performance impact.
1708 # will be removed when we are happy with the performance impact.
1709 #
1709 #
1710 # Once this feature is no longer experimental move the following
1710 # Once this feature is no longer experimental move the following
1711 # documentation to the appropriate help section:
1711 # documentation to the appropriate help section:
1712 #
1712 #
1713 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1713 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1714 # tags (new or changed or deleted tags). In addition the details of
1714 # tags (new or changed or deleted tags). In addition the details of
1715 # these changes are made available in a file at:
1715 # these changes are made available in a file at:
1716 # ``REPOROOT/.hg/changes/tags.changes``.
1716 # ``REPOROOT/.hg/changes/tags.changes``.
1717 # Make sure you check for HG_TAG_MOVED before reading that file as it
1717 # Make sure you check for HG_TAG_MOVED before reading that file as it
1718 # might exist from a previous transaction even if no tag were touched
1718 # might exist from a previous transaction even if no tag were touched
1719 # in this one. Changes are recorded in a line base format::
1719 # in this one. Changes are recorded in a line base format::
1720 #
1720 #
1721 # <action> <hex-node> <tag-name>\n
1721 # <action> <hex-node> <tag-name>\n
1722 #
1722 #
1723 # Actions are defined as follow:
1723 # Actions are defined as follow:
1724 # "-R": tag is removed,
1724 # "-R": tag is removed,
1725 # "+A": tag is added,
1725 # "+A": tag is added,
1726 # "-M": tag is moved (old value),
1726 # "-M": tag is moved (old value),
1727 # "+M": tag is moved (new value),
1727 # "+M": tag is moved (new value),
1728 tracktags = lambda x: None
1728 tracktags = lambda x: None
1729 # experimental config: experimental.hook-track-tags
1729 # experimental config: experimental.hook-track-tags
1730 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1730 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1731 if desc != 'strip' and shouldtracktags:
1731 if desc != 'strip' and shouldtracktags:
1732 oldheads = self.changelog.headrevs()
1732 oldheads = self.changelog.headrevs()
1733 def tracktags(tr2):
1733 def tracktags(tr2):
1734 repo = reporef()
1734 repo = reporef()
1735 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1735 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1736 newheads = repo.changelog.headrevs()
1736 newheads = repo.changelog.headrevs()
1737 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1737 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1738 # notes: we compare lists here.
1738 # notes: we compare lists here.
1739 # As we do it only once buiding set would not be cheaper
1739 # As we do it only once buiding set would not be cheaper
1740 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1740 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1741 if changes:
1741 if changes:
1742 tr2.hookargs['tag_moved'] = '1'
1742 tr2.hookargs['tag_moved'] = '1'
1743 with repo.vfs('changes/tags.changes', 'w',
1743 with repo.vfs('changes/tags.changes', 'w',
1744 atomictemp=True) as changesfile:
1744 atomictemp=True) as changesfile:
1745 # note: we do not register the file to the transaction
1745 # note: we do not register the file to the transaction
1746 # because we needs it to still exist on the transaction
1746 # because we needs it to still exist on the transaction
1747 # is close (for txnclose hooks)
1747 # is close (for txnclose hooks)
1748 tagsmod.writediff(changesfile, changes)
1748 tagsmod.writediff(changesfile, changes)
1749 def validate(tr2):
1749 def validate(tr2):
1750 """will run pre-closing hooks"""
1750 """will run pre-closing hooks"""
1751 # XXX the transaction API is a bit lacking here so we take a hacky
1751 # XXX the transaction API is a bit lacking here so we take a hacky
1752 # path for now
1752 # path for now
1753 #
1753 #
1754 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1754 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1755 # dict is copied before these run. In addition we needs the data
1755 # dict is copied before these run. In addition we needs the data
1756 # available to in memory hooks too.
1756 # available to in memory hooks too.
1757 #
1757 #
1758 # Moreover, we also need to make sure this runs before txnclose
1758 # Moreover, we also need to make sure this runs before txnclose
1759 # hooks and there is no "pending" mechanism that would execute
1759 # hooks and there is no "pending" mechanism that would execute
1760 # logic only if hooks are about to run.
1760 # logic only if hooks are about to run.
1761 #
1761 #
1762 # Fixing this limitation of the transaction is also needed to track
1762 # Fixing this limitation of the transaction is also needed to track
1763 # other families of changes (bookmarks, phases, obsolescence).
1763 # other families of changes (bookmarks, phases, obsolescence).
1764 #
1764 #
1765 # This will have to be fixed before we remove the experimental
1765 # This will have to be fixed before we remove the experimental
1766 # gating.
1766 # gating.
1767 tracktags(tr2)
1767 tracktags(tr2)
1768 repo = reporef()
1768 repo = reporef()
1769 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1769 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1770 scmutil.enforcesinglehead(repo, tr2, desc)
1770 scmutil.enforcesinglehead(repo, tr2, desc)
1771 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1771 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1772 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1772 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1773 args = tr.hookargs.copy()
1773 args = tr.hookargs.copy()
1774 args.update(bookmarks.preparehookargs(name, old, new))
1774 args.update(bookmarks.preparehookargs(name, old, new))
1775 repo.hook('pretxnclose-bookmark', throw=True,
1775 repo.hook('pretxnclose-bookmark', throw=True,
1776 txnname=desc,
1776 txnname=desc,
1777 **pycompat.strkwargs(args))
1777 **pycompat.strkwargs(args))
1778 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1778 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1779 cl = repo.unfiltered().changelog
1779 cl = repo.unfiltered().changelog
1780 for rev, (old, new) in tr.changes['phases'].items():
1780 for rev, (old, new) in tr.changes['phases'].items():
1781 args = tr.hookargs.copy()
1781 args = tr.hookargs.copy()
1782 node = hex(cl.node(rev))
1782 node = hex(cl.node(rev))
1783 args.update(phases.preparehookargs(node, old, new))
1783 args.update(phases.preparehookargs(node, old, new))
1784 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1784 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1785 **pycompat.strkwargs(args))
1785 **pycompat.strkwargs(args))
1786
1786
1787 repo.hook('pretxnclose', throw=True,
1787 repo.hook('pretxnclose', throw=True,
1788 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1788 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1789 def releasefn(tr, success):
1789 def releasefn(tr, success):
1790 repo = reporef()
1790 repo = reporef()
1791 if success:
1791 if success:
1792 # this should be explicitly invoked here, because
1792 # this should be explicitly invoked here, because
1793 # in-memory changes aren't written out at closing
1793 # in-memory changes aren't written out at closing
1794 # transaction, if tr.addfilegenerator (via
1794 # transaction, if tr.addfilegenerator (via
1795 # dirstate.write or so) isn't invoked while
1795 # dirstate.write or so) isn't invoked while
1796 # transaction running
1796 # transaction running
1797 repo.dirstate.write(None)
1797 repo.dirstate.write(None)
1798 else:
1798 else:
1799 # discard all changes (including ones already written
1799 # discard all changes (including ones already written
1800 # out) in this transaction
1800 # out) in this transaction
1801 narrowspec.restorebackup(self, 'journal.narrowspec')
1801 narrowspec.restorebackup(self, 'journal.narrowspec')
1802 repo.dirstate.restorebackup(None, 'journal.dirstate')
1802 repo.dirstate.restorebackup(None, 'journal.dirstate')
1803
1803
1804 repo.invalidate(clearfilecache=True)
1804 repo.invalidate(clearfilecache=True)
1805
1805
1806 tr = transaction.transaction(rp, self.svfs, vfsmap,
1806 tr = transaction.transaction(rp, self.svfs, vfsmap,
1807 "journal",
1807 "journal",
1808 "undo",
1808 "undo",
1809 aftertrans(renames),
1809 aftertrans(renames),
1810 self.store.createmode,
1810 self.store.createmode,
1811 validator=validate,
1811 validator=validate,
1812 releasefn=releasefn,
1812 releasefn=releasefn,
1813 checkambigfiles=_cachedfiles,
1813 checkambigfiles=_cachedfiles,
1814 name=desc)
1814 name=desc)
1815 tr.changes['origrepolen'] = len(self)
1815 tr.changes['origrepolen'] = len(self)
1816 tr.changes['obsmarkers'] = set()
1816 tr.changes['obsmarkers'] = set()
1817 tr.changes['phases'] = {}
1817 tr.changes['phases'] = {}
1818 tr.changes['bookmarks'] = {}
1818 tr.changes['bookmarks'] = {}
1819
1819
1820 tr.hookargs['txnid'] = txnid
1820 tr.hookargs['txnid'] = txnid
1821 # note: writing the fncache only during finalize mean that the file is
1821 # note: writing the fncache only during finalize mean that the file is
1822 # outdated when running hooks. As fncache is used for streaming clone,
1822 # outdated when running hooks. As fncache is used for streaming clone,
1823 # this is not expected to break anything that happen during the hooks.
1823 # this is not expected to break anything that happen during the hooks.
1824 tr.addfinalize('flush-fncache', self.store.write)
1824 tr.addfinalize('flush-fncache', self.store.write)
1825 def txnclosehook(tr2):
1825 def txnclosehook(tr2):
1826 """To be run if transaction is successful, will schedule a hook run
1826 """To be run if transaction is successful, will schedule a hook run
1827 """
1827 """
1828 # Don't reference tr2 in hook() so we don't hold a reference.
1828 # Don't reference tr2 in hook() so we don't hold a reference.
1829 # This reduces memory consumption when there are multiple
1829 # This reduces memory consumption when there are multiple
1830 # transactions per lock. This can likely go away if issue5045
1830 # transactions per lock. This can likely go away if issue5045
1831 # fixes the function accumulation.
1831 # fixes the function accumulation.
1832 hookargs = tr2.hookargs
1832 hookargs = tr2.hookargs
1833
1833
1834 def hookfunc():
1834 def hookfunc():
1835 repo = reporef()
1835 repo = reporef()
1836 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1836 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1837 bmchanges = sorted(tr.changes['bookmarks'].items())
1837 bmchanges = sorted(tr.changes['bookmarks'].items())
1838 for name, (old, new) in bmchanges:
1838 for name, (old, new) in bmchanges:
1839 args = tr.hookargs.copy()
1839 args = tr.hookargs.copy()
1840 args.update(bookmarks.preparehookargs(name, old, new))
1840 args.update(bookmarks.preparehookargs(name, old, new))
1841 repo.hook('txnclose-bookmark', throw=False,
1841 repo.hook('txnclose-bookmark', throw=False,
1842 txnname=desc, **pycompat.strkwargs(args))
1842 txnname=desc, **pycompat.strkwargs(args))
1843
1843
1844 if hook.hashook(repo.ui, 'txnclose-phase'):
1844 if hook.hashook(repo.ui, 'txnclose-phase'):
1845 cl = repo.unfiltered().changelog
1845 cl = repo.unfiltered().changelog
1846 phasemv = sorted(tr.changes['phases'].items())
1846 phasemv = sorted(tr.changes['phases'].items())
1847 for rev, (old, new) in phasemv:
1847 for rev, (old, new) in phasemv:
1848 args = tr.hookargs.copy()
1848 args = tr.hookargs.copy()
1849 node = hex(cl.node(rev))
1849 node = hex(cl.node(rev))
1850 args.update(phases.preparehookargs(node, old, new))
1850 args.update(phases.preparehookargs(node, old, new))
1851 repo.hook('txnclose-phase', throw=False, txnname=desc,
1851 repo.hook('txnclose-phase', throw=False, txnname=desc,
1852 **pycompat.strkwargs(args))
1852 **pycompat.strkwargs(args))
1853
1853
1854 repo.hook('txnclose', throw=False, txnname=desc,
1854 repo.hook('txnclose', throw=False, txnname=desc,
1855 **pycompat.strkwargs(hookargs))
1855 **pycompat.strkwargs(hookargs))
1856 reporef()._afterlock(hookfunc)
1856 reporef()._afterlock(hookfunc)
1857 tr.addfinalize('txnclose-hook', txnclosehook)
1857 tr.addfinalize('txnclose-hook', txnclosehook)
1858 # Include a leading "-" to make it happen before the transaction summary
1858 # Include a leading "-" to make it happen before the transaction summary
1859 # reports registered via scmutil.registersummarycallback() whose names
1859 # reports registered via scmutil.registersummarycallback() whose names
1860 # are 00-txnreport etc. That way, the caches will be warm when the
1860 # are 00-txnreport etc. That way, the caches will be warm when the
1861 # callbacks run.
1861 # callbacks run.
1862 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1862 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1863 def txnaborthook(tr2):
1863 def txnaborthook(tr2):
1864 """To be run if transaction is aborted
1864 """To be run if transaction is aborted
1865 """
1865 """
1866 reporef().hook('txnabort', throw=False, txnname=desc,
1866 reporef().hook('txnabort', throw=False, txnname=desc,
1867 **pycompat.strkwargs(tr2.hookargs))
1867 **pycompat.strkwargs(tr2.hookargs))
1868 tr.addabort('txnabort-hook', txnaborthook)
1868 tr.addabort('txnabort-hook', txnaborthook)
1869 # avoid eager cache invalidation. in-memory data should be identical
1869 # avoid eager cache invalidation. in-memory data should be identical
1870 # to stored data if transaction has no error.
1870 # to stored data if transaction has no error.
1871 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1871 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1872 self._transref = weakref.ref(tr)
1872 self._transref = weakref.ref(tr)
1873 scmutil.registersummarycallback(self, tr, desc)
1873 scmutil.registersummarycallback(self, tr, desc)
1874 return tr
1874 return tr
1875
1875
1876 def _journalfiles(self):
1876 def _journalfiles(self):
1877 return ((self.svfs, 'journal'),
1877 return ((self.svfs, 'journal'),
1878 (self.vfs, 'journal.dirstate'),
1878 (self.vfs, 'journal.dirstate'),
1879 (self.vfs, 'journal.branch'),
1879 (self.vfs, 'journal.branch'),
1880 (self.vfs, 'journal.desc'),
1880 (self.vfs, 'journal.desc'),
1881 (self.vfs, 'journal.bookmarks'),
1881 (self.vfs, 'journal.bookmarks'),
1882 (self.svfs, 'journal.phaseroots'))
1882 (self.svfs, 'journal.phaseroots'))
1883
1883
1884 def undofiles(self):
1884 def undofiles(self):
1885 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1885 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1886
1886
1887 @unfilteredmethod
1887 @unfilteredmethod
1888 def _writejournal(self, desc):
1888 def _writejournal(self, desc):
1889 self.dirstate.savebackup(None, 'journal.dirstate')
1889 self.dirstate.savebackup(None, 'journal.dirstate')
1890 narrowspec.savebackup(self, 'journal.narrowspec')
1890 narrowspec.savebackup(self, 'journal.narrowspec')
1891 self.vfs.write("journal.branch",
1891 self.vfs.write("journal.branch",
1892 encoding.fromlocal(self.dirstate.branch()))
1892 encoding.fromlocal(self.dirstate.branch()))
1893 self.vfs.write("journal.desc",
1893 self.vfs.write("journal.desc",
1894 "%d\n%s\n" % (len(self), desc))
1894 "%d\n%s\n" % (len(self), desc))
1895 self.vfs.write("journal.bookmarks",
1895 self.vfs.write("journal.bookmarks",
1896 self.vfs.tryread("bookmarks"))
1896 self.vfs.tryread("bookmarks"))
1897 self.svfs.write("journal.phaseroots",
1897 self.svfs.write("journal.phaseroots",
1898 self.svfs.tryread("phaseroots"))
1898 self.svfs.tryread("phaseroots"))
1899
1899
1900 def recover(self):
1900 def recover(self):
1901 with self.lock():
1901 with self.lock():
1902 if self.svfs.exists("journal"):
1902 if self.svfs.exists("journal"):
1903 self.ui.status(_("rolling back interrupted transaction\n"))
1903 self.ui.status(_("rolling back interrupted transaction\n"))
1904 vfsmap = {'': self.svfs,
1904 vfsmap = {'': self.svfs,
1905 'plain': self.vfs,}
1905 'plain': self.vfs,}
1906 transaction.rollback(self.svfs, vfsmap, "journal",
1906 transaction.rollback(self.svfs, vfsmap, "journal",
1907 self.ui.warn,
1907 self.ui.warn,
1908 checkambigfiles=_cachedfiles)
1908 checkambigfiles=_cachedfiles)
1909 self.invalidate()
1909 self.invalidate()
1910 return True
1910 return True
1911 else:
1911 else:
1912 self.ui.warn(_("no interrupted transaction available\n"))
1912 self.ui.warn(_("no interrupted transaction available\n"))
1913 return False
1913 return False
1914
1914
1915 def rollback(self, dryrun=False, force=False):
1915 def rollback(self, dryrun=False, force=False):
1916 wlock = lock = dsguard = None
1916 wlock = lock = dsguard = None
1917 try:
1917 try:
1918 wlock = self.wlock()
1918 wlock = self.wlock()
1919 lock = self.lock()
1919 lock = self.lock()
1920 if self.svfs.exists("undo"):
1920 if self.svfs.exists("undo"):
1921 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1921 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1922
1922
1923 return self._rollback(dryrun, force, dsguard)
1923 return self._rollback(dryrun, force, dsguard)
1924 else:
1924 else:
1925 self.ui.warn(_("no rollback information available\n"))
1925 self.ui.warn(_("no rollback information available\n"))
1926 return 1
1926 return 1
1927 finally:
1927 finally:
1928 release(dsguard, lock, wlock)
1928 release(dsguard, lock, wlock)
1929
1929
1930 @unfilteredmethod # Until we get smarter cache management
1930 @unfilteredmethod # Until we get smarter cache management
1931 def _rollback(self, dryrun, force, dsguard):
1931 def _rollback(self, dryrun, force, dsguard):
1932 ui = self.ui
1932 ui = self.ui
1933 try:
1933 try:
1934 args = self.vfs.read('undo.desc').splitlines()
1934 args = self.vfs.read('undo.desc').splitlines()
1935 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1935 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1936 if len(args) >= 3:
1936 if len(args) >= 3:
1937 detail = args[2]
1937 detail = args[2]
1938 oldtip = oldlen - 1
1938 oldtip = oldlen - 1
1939
1939
1940 if detail and ui.verbose:
1940 if detail and ui.verbose:
1941 msg = (_('repository tip rolled back to revision %d'
1941 msg = (_('repository tip rolled back to revision %d'
1942 ' (undo %s: %s)\n')
1942 ' (undo %s: %s)\n')
1943 % (oldtip, desc, detail))
1943 % (oldtip, desc, detail))
1944 else:
1944 else:
1945 msg = (_('repository tip rolled back to revision %d'
1945 msg = (_('repository tip rolled back to revision %d'
1946 ' (undo %s)\n')
1946 ' (undo %s)\n')
1947 % (oldtip, desc))
1947 % (oldtip, desc))
1948 except IOError:
1948 except IOError:
1949 msg = _('rolling back unknown transaction\n')
1949 msg = _('rolling back unknown transaction\n')
1950 desc = None
1950 desc = None
1951
1951
1952 if not force and self['.'] != self['tip'] and desc == 'commit':
1952 if not force and self['.'] != self['tip'] and desc == 'commit':
1953 raise error.Abort(
1953 raise error.Abort(
1954 _('rollback of last commit while not checked out '
1954 _('rollback of last commit while not checked out '
1955 'may lose data'), hint=_('use -f to force'))
1955 'may lose data'), hint=_('use -f to force'))
1956
1956
1957 ui.status(msg)
1957 ui.status(msg)
1958 if dryrun:
1958 if dryrun:
1959 return 0
1959 return 0
1960
1960
1961 parents = self.dirstate.parents()
1961 parents = self.dirstate.parents()
1962 self.destroying()
1962 self.destroying()
1963 vfsmap = {'plain': self.vfs, '': self.svfs}
1963 vfsmap = {'plain': self.vfs, '': self.svfs}
1964 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1964 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1965 checkambigfiles=_cachedfiles)
1965 checkambigfiles=_cachedfiles)
1966 if self.vfs.exists('undo.bookmarks'):
1966 if self.vfs.exists('undo.bookmarks'):
1967 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1967 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1968 if self.svfs.exists('undo.phaseroots'):
1968 if self.svfs.exists('undo.phaseroots'):
1969 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1969 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1970 self.invalidate()
1970 self.invalidate()
1971
1971
1972 parentgone = (parents[0] not in self.changelog.nodemap or
1972 parentgone = (parents[0] not in self.changelog.nodemap or
1973 parents[1] not in self.changelog.nodemap)
1973 parents[1] not in self.changelog.nodemap)
1974 if parentgone:
1974 if parentgone:
1975 # prevent dirstateguard from overwriting already restored one
1975 # prevent dirstateguard from overwriting already restored one
1976 dsguard.close()
1976 dsguard.close()
1977
1977
1978 narrowspec.restorebackup(self, 'undo.narrowspec')
1978 narrowspec.restorebackup(self, 'undo.narrowspec')
1979 self.dirstate.restorebackup(None, 'undo.dirstate')
1979 self.dirstate.restorebackup(None, 'undo.dirstate')
1980 try:
1980 try:
1981 branch = self.vfs.read('undo.branch')
1981 branch = self.vfs.read('undo.branch')
1982 self.dirstate.setbranch(encoding.tolocal(branch))
1982 self.dirstate.setbranch(encoding.tolocal(branch))
1983 except IOError:
1983 except IOError:
1984 ui.warn(_('named branch could not be reset: '
1984 ui.warn(_('named branch could not be reset: '
1985 'current branch is still \'%s\'\n')
1985 'current branch is still \'%s\'\n')
1986 % self.dirstate.branch())
1986 % self.dirstate.branch())
1987
1987
1988 parents = tuple([p.rev() for p in self[None].parents()])
1988 parents = tuple([p.rev() for p in self[None].parents()])
1989 if len(parents) > 1:
1989 if len(parents) > 1:
1990 ui.status(_('working directory now based on '
1990 ui.status(_('working directory now based on '
1991 'revisions %d and %d\n') % parents)
1991 'revisions %d and %d\n') % parents)
1992 else:
1992 else:
1993 ui.status(_('working directory now based on '
1993 ui.status(_('working directory now based on '
1994 'revision %d\n') % parents)
1994 'revision %d\n') % parents)
1995 mergemod.mergestate.clean(self, self['.'].node())
1995 mergemod.mergestate.clean(self, self['.'].node())
1996
1996
1997 # TODO: if we know which new heads may result from this rollback, pass
1997 # TODO: if we know which new heads may result from this rollback, pass
1998 # them to destroy(), which will prevent the branchhead cache from being
1998 # them to destroy(), which will prevent the branchhead cache from being
1999 # invalidated.
1999 # invalidated.
2000 self.destroyed()
2000 self.destroyed()
2001 return 0
2001 return 0
2002
2002
2003 def _buildcacheupdater(self, newtransaction):
2003 def _buildcacheupdater(self, newtransaction):
2004 """called during transaction to build the callback updating cache
2004 """called during transaction to build the callback updating cache
2005
2005
2006 Lives on the repository to help extension who might want to augment
2006 Lives on the repository to help extension who might want to augment
2007 this logic. For this purpose, the created transaction is passed to the
2007 this logic. For this purpose, the created transaction is passed to the
2008 method.
2008 method.
2009 """
2009 """
2010 # we must avoid cyclic reference between repo and transaction.
2010 # we must avoid cyclic reference between repo and transaction.
2011 reporef = weakref.ref(self)
2011 reporef = weakref.ref(self)
2012 def updater(tr):
2012 def updater(tr):
2013 repo = reporef()
2013 repo = reporef()
2014 repo.updatecaches(tr)
2014 repo.updatecaches(tr)
2015 return updater
2015 return updater
2016
2016
2017 @unfilteredmethod
2017 @unfilteredmethod
2018 def updatecaches(self, tr=None, full=False):
2018 def updatecaches(self, tr=None, full=False):
2019 """warm appropriate caches
2019 """warm appropriate caches
2020
2020
2021 If this function is called after a transaction closed. The transaction
2021 If this function is called after a transaction closed. The transaction
2022 will be available in the 'tr' argument. This can be used to selectively
2022 will be available in the 'tr' argument. This can be used to selectively
2023 update caches relevant to the changes in that transaction.
2023 update caches relevant to the changes in that transaction.
2024
2024
2025 If 'full' is set, make sure all caches the function knows about have
2025 If 'full' is set, make sure all caches the function knows about have
2026 up-to-date data. Even the ones usually loaded more lazily.
2026 up-to-date data. Even the ones usually loaded more lazily.
2027 """
2027 """
2028 if tr is not None and tr.hookargs.get('source') == 'strip':
2028 if tr is not None and tr.hookargs.get('source') == 'strip':
2029 # During strip, many caches are invalid but
2029 # During strip, many caches are invalid but
2030 # later call to `destroyed` will refresh them.
2030 # later call to `destroyed` will refresh them.
2031 return
2031 return
2032
2032
2033 if tr is None or tr.changes['origrepolen'] < len(self):
2033 if tr is None or tr.changes['origrepolen'] < len(self):
2034 # updating the unfiltered branchmap should refresh all the others,
2034 # updating the unfiltered branchmap should refresh all the others,
2035 self.ui.debug('updating the branch cache\n')
2035 self.ui.debug('updating the branch cache\n')
2036 branchmap.updatecache(self.filtered('served'))
2036 branchmap.updatecache(self.filtered('served'))
2037
2037
2038 if full:
2038 if full:
2039 rbc = self.revbranchcache()
2039 rbc = self.revbranchcache()
2040 for r in self.changelog:
2040 for r in self.changelog:
2041 rbc.branchinfo(r)
2041 rbc.branchinfo(r)
2042 rbc.write()
2042 rbc.write()
2043
2043
2044 # ensure the working copy parents are in the manifestfulltextcache
2044 # ensure the working copy parents are in the manifestfulltextcache
2045 for ctx in self['.'].parents():
2045 for ctx in self['.'].parents():
2046 ctx.manifest() # accessing the manifest is enough
2046 ctx.manifest() # accessing the manifest is enough
2047
2047
2048 def invalidatecaches(self):
2048 def invalidatecaches(self):
2049
2049
2050 if '_tagscache' in vars(self):
2050 if r'_tagscache' in vars(self):
2051 # can't use delattr on proxy
2051 # can't use delattr on proxy
2052 del self.__dict__['_tagscache']
2052 del self.__dict__[r'_tagscache']
2053
2053
2054 self.unfiltered()._branchcaches.clear()
2054 self.unfiltered()._branchcaches.clear()
2055 self.invalidatevolatilesets()
2055 self.invalidatevolatilesets()
2056 self._sparsesignaturecache.clear()
2056 self._sparsesignaturecache.clear()
2057
2057
2058 def invalidatevolatilesets(self):
2058 def invalidatevolatilesets(self):
2059 self.filteredrevcache.clear()
2059 self.filteredrevcache.clear()
2060 obsolete.clearobscaches(self)
2060 obsolete.clearobscaches(self)
2061
2061
2062 def invalidatedirstate(self):
2062 def invalidatedirstate(self):
2063 '''Invalidates the dirstate, causing the next call to dirstate
2063 '''Invalidates the dirstate, causing the next call to dirstate
2064 to check if it was modified since the last time it was read,
2064 to check if it was modified since the last time it was read,
2065 rereading it if it has.
2065 rereading it if it has.
2066
2066
2067 This is different to dirstate.invalidate() that it doesn't always
2067 This is different to dirstate.invalidate() that it doesn't always
2068 rereads the dirstate. Use dirstate.invalidate() if you want to
2068 rereads the dirstate. Use dirstate.invalidate() if you want to
2069 explicitly read the dirstate again (i.e. restoring it to a previous
2069 explicitly read the dirstate again (i.e. restoring it to a previous
2070 known good state).'''
2070 known good state).'''
2071 if hasunfilteredcache(self, r'dirstate'):
2071 if hasunfilteredcache(self, r'dirstate'):
2072 for k in self.dirstate._filecache:
2072 for k in self.dirstate._filecache:
2073 try:
2073 try:
2074 delattr(self.dirstate, k)
2074 delattr(self.dirstate, k)
2075 except AttributeError:
2075 except AttributeError:
2076 pass
2076 pass
2077 delattr(self.unfiltered(), r'dirstate')
2077 delattr(self.unfiltered(), r'dirstate')
2078
2078
2079 def invalidate(self, clearfilecache=False):
2079 def invalidate(self, clearfilecache=False):
2080 '''Invalidates both store and non-store parts other than dirstate
2080 '''Invalidates both store and non-store parts other than dirstate
2081
2081
2082 If a transaction is running, invalidation of store is omitted,
2082 If a transaction is running, invalidation of store is omitted,
2083 because discarding in-memory changes might cause inconsistency
2083 because discarding in-memory changes might cause inconsistency
2084 (e.g. incomplete fncache causes unintentional failure, but
2084 (e.g. incomplete fncache causes unintentional failure, but
2085 redundant one doesn't).
2085 redundant one doesn't).
2086 '''
2086 '''
2087 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2087 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2088 for k in list(self._filecache.keys()):
2088 for k in list(self._filecache.keys()):
2089 # dirstate is invalidated separately in invalidatedirstate()
2089 # dirstate is invalidated separately in invalidatedirstate()
2090 if k == 'dirstate':
2090 if k == 'dirstate':
2091 continue
2091 continue
2092 if (k == 'changelog' and
2092 if (k == 'changelog' and
2093 self.currenttransaction() and
2093 self.currenttransaction() and
2094 self.changelog._delayed):
2094 self.changelog._delayed):
2095 # The changelog object may store unwritten revisions. We don't
2095 # The changelog object may store unwritten revisions. We don't
2096 # want to lose them.
2096 # want to lose them.
2097 # TODO: Solve the problem instead of working around it.
2097 # TODO: Solve the problem instead of working around it.
2098 continue
2098 continue
2099
2099
2100 if clearfilecache:
2100 if clearfilecache:
2101 del self._filecache[k]
2101 del self._filecache[k]
2102 try:
2102 try:
2103 delattr(unfiltered, k)
2103 delattr(unfiltered, k)
2104 except AttributeError:
2104 except AttributeError:
2105 pass
2105 pass
2106 self.invalidatecaches()
2106 self.invalidatecaches()
2107 if not self.currenttransaction():
2107 if not self.currenttransaction():
2108 # TODO: Changing contents of store outside transaction
2108 # TODO: Changing contents of store outside transaction
2109 # causes inconsistency. We should make in-memory store
2109 # causes inconsistency. We should make in-memory store
2110 # changes detectable, and abort if changed.
2110 # changes detectable, and abort if changed.
2111 self.store.invalidatecaches()
2111 self.store.invalidatecaches()
2112
2112
2113 def invalidateall(self):
2113 def invalidateall(self):
2114 '''Fully invalidates both store and non-store parts, causing the
2114 '''Fully invalidates both store and non-store parts, causing the
2115 subsequent operation to reread any outside changes.'''
2115 subsequent operation to reread any outside changes.'''
2116 # extension should hook this to invalidate its caches
2116 # extension should hook this to invalidate its caches
2117 self.invalidate()
2117 self.invalidate()
2118 self.invalidatedirstate()
2118 self.invalidatedirstate()
2119
2119
2120 @unfilteredmethod
2120 @unfilteredmethod
2121 def _refreshfilecachestats(self, tr):
2121 def _refreshfilecachestats(self, tr):
2122 """Reload stats of cached files so that they are flagged as valid"""
2122 """Reload stats of cached files so that they are flagged as valid"""
2123 for k, ce in self._filecache.items():
2123 for k, ce in self._filecache.items():
2124 k = pycompat.sysstr(k)
2124 k = pycompat.sysstr(k)
2125 if k == r'dirstate' or k not in self.__dict__:
2125 if k == r'dirstate' or k not in self.__dict__:
2126 continue
2126 continue
2127 ce.refresh()
2127 ce.refresh()
2128
2128
2129 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2129 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2130 inheritchecker=None, parentenvvar=None):
2130 inheritchecker=None, parentenvvar=None):
2131 parentlock = None
2131 parentlock = None
2132 # the contents of parentenvvar are used by the underlying lock to
2132 # the contents of parentenvvar are used by the underlying lock to
2133 # determine whether it can be inherited
2133 # determine whether it can be inherited
2134 if parentenvvar is not None:
2134 if parentenvvar is not None:
2135 parentlock = encoding.environ.get(parentenvvar)
2135 parentlock = encoding.environ.get(parentenvvar)
2136
2136
2137 timeout = 0
2137 timeout = 0
2138 warntimeout = 0
2138 warntimeout = 0
2139 if wait:
2139 if wait:
2140 timeout = self.ui.configint("ui", "timeout")
2140 timeout = self.ui.configint("ui", "timeout")
2141 warntimeout = self.ui.configint("ui", "timeout.warn")
2141 warntimeout = self.ui.configint("ui", "timeout.warn")
2142 # internal config: ui.signal-safe-lock
2142 # internal config: ui.signal-safe-lock
2143 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2143 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2144
2144
2145 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2145 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2146 releasefn=releasefn,
2146 releasefn=releasefn,
2147 acquirefn=acquirefn, desc=desc,
2147 acquirefn=acquirefn, desc=desc,
2148 inheritchecker=inheritchecker,
2148 inheritchecker=inheritchecker,
2149 parentlock=parentlock,
2149 parentlock=parentlock,
2150 signalsafe=signalsafe)
2150 signalsafe=signalsafe)
2151 return l
2151 return l
2152
2152
2153 def _afterlock(self, callback):
2153 def _afterlock(self, callback):
2154 """add a callback to be run when the repository is fully unlocked
2154 """add a callback to be run when the repository is fully unlocked
2155
2155
2156 The callback will be executed when the outermost lock is released
2156 The callback will be executed when the outermost lock is released
2157 (with wlock being higher level than 'lock')."""
2157 (with wlock being higher level than 'lock')."""
2158 for ref in (self._wlockref, self._lockref):
2158 for ref in (self._wlockref, self._lockref):
2159 l = ref and ref()
2159 l = ref and ref()
2160 if l and l.held:
2160 if l and l.held:
2161 l.postrelease.append(callback)
2161 l.postrelease.append(callback)
2162 break
2162 break
2163 else: # no lock have been found.
2163 else: # no lock have been found.
2164 callback()
2164 callback()
2165
2165
2166 def lock(self, wait=True):
2166 def lock(self, wait=True):
2167 '''Lock the repository store (.hg/store) and return a weak reference
2167 '''Lock the repository store (.hg/store) and return a weak reference
2168 to the lock. Use this before modifying the store (e.g. committing or
2168 to the lock. Use this before modifying the store (e.g. committing or
2169 stripping). If you are opening a transaction, get a lock as well.)
2169 stripping). If you are opening a transaction, get a lock as well.)
2170
2170
2171 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2171 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2172 'wlock' first to avoid a dead-lock hazard.'''
2172 'wlock' first to avoid a dead-lock hazard.'''
2173 l = self._currentlock(self._lockref)
2173 l = self._currentlock(self._lockref)
2174 if l is not None:
2174 if l is not None:
2175 l.lock()
2175 l.lock()
2176 return l
2176 return l
2177
2177
2178 l = self._lock(self.svfs, "lock", wait, None,
2178 l = self._lock(self.svfs, "lock", wait, None,
2179 self.invalidate, _('repository %s') % self.origroot)
2179 self.invalidate, _('repository %s') % self.origroot)
2180 self._lockref = weakref.ref(l)
2180 self._lockref = weakref.ref(l)
2181 return l
2181 return l
2182
2182
2183 def _wlockchecktransaction(self):
2183 def _wlockchecktransaction(self):
2184 if self.currenttransaction() is not None:
2184 if self.currenttransaction() is not None:
2185 raise error.LockInheritanceContractViolation(
2185 raise error.LockInheritanceContractViolation(
2186 'wlock cannot be inherited in the middle of a transaction')
2186 'wlock cannot be inherited in the middle of a transaction')
2187
2187
2188 def wlock(self, wait=True):
2188 def wlock(self, wait=True):
2189 '''Lock the non-store parts of the repository (everything under
2189 '''Lock the non-store parts of the repository (everything under
2190 .hg except .hg/store) and return a weak reference to the lock.
2190 .hg except .hg/store) and return a weak reference to the lock.
2191
2191
2192 Use this before modifying files in .hg.
2192 Use this before modifying files in .hg.
2193
2193
2194 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2194 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2195 'wlock' first to avoid a dead-lock hazard.'''
2195 'wlock' first to avoid a dead-lock hazard.'''
2196 l = self._wlockref and self._wlockref()
2196 l = self._wlockref and self._wlockref()
2197 if l is not None and l.held:
2197 if l is not None and l.held:
2198 l.lock()
2198 l.lock()
2199 return l
2199 return l
2200
2200
2201 # We do not need to check for non-waiting lock acquisition. Such
2201 # We do not need to check for non-waiting lock acquisition. Such
2202 # acquisition would not cause dead-lock as they would just fail.
2202 # acquisition would not cause dead-lock as they would just fail.
2203 if wait and (self.ui.configbool('devel', 'all-warnings')
2203 if wait and (self.ui.configbool('devel', 'all-warnings')
2204 or self.ui.configbool('devel', 'check-locks')):
2204 or self.ui.configbool('devel', 'check-locks')):
2205 if self._currentlock(self._lockref) is not None:
2205 if self._currentlock(self._lockref) is not None:
2206 self.ui.develwarn('"wlock" acquired after "lock"')
2206 self.ui.develwarn('"wlock" acquired after "lock"')
2207
2207
2208 def unlock():
2208 def unlock():
2209 if self.dirstate.pendingparentchange():
2209 if self.dirstate.pendingparentchange():
2210 self.dirstate.invalidate()
2210 self.dirstate.invalidate()
2211 else:
2211 else:
2212 self.dirstate.write(None)
2212 self.dirstate.write(None)
2213
2213
2214 self._filecache['dirstate'].refresh()
2214 self._filecache['dirstate'].refresh()
2215
2215
2216 l = self._lock(self.vfs, "wlock", wait, unlock,
2216 l = self._lock(self.vfs, "wlock", wait, unlock,
2217 self.invalidatedirstate, _('working directory of %s') %
2217 self.invalidatedirstate, _('working directory of %s') %
2218 self.origroot,
2218 self.origroot,
2219 inheritchecker=self._wlockchecktransaction,
2219 inheritchecker=self._wlockchecktransaction,
2220 parentenvvar='HG_WLOCK_LOCKER')
2220 parentenvvar='HG_WLOCK_LOCKER')
2221 self._wlockref = weakref.ref(l)
2221 self._wlockref = weakref.ref(l)
2222 return l
2222 return l
2223
2223
2224 def _currentlock(self, lockref):
2224 def _currentlock(self, lockref):
2225 """Returns the lock if it's held, or None if it's not."""
2225 """Returns the lock if it's held, or None if it's not."""
2226 if lockref is None:
2226 if lockref is None:
2227 return None
2227 return None
2228 l = lockref()
2228 l = lockref()
2229 if l is None or not l.held:
2229 if l is None or not l.held:
2230 return None
2230 return None
2231 return l
2231 return l
2232
2232
2233 def currentwlock(self):
2233 def currentwlock(self):
2234 """Returns the wlock if it's held, or None if it's not."""
2234 """Returns the wlock if it's held, or None if it's not."""
2235 return self._currentlock(self._wlockref)
2235 return self._currentlock(self._wlockref)
2236
2236
2237 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2237 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2238 """
2238 """
2239 commit an individual file as part of a larger transaction
2239 commit an individual file as part of a larger transaction
2240 """
2240 """
2241
2241
2242 fname = fctx.path()
2242 fname = fctx.path()
2243 fparent1 = manifest1.get(fname, nullid)
2243 fparent1 = manifest1.get(fname, nullid)
2244 fparent2 = manifest2.get(fname, nullid)
2244 fparent2 = manifest2.get(fname, nullid)
2245 if isinstance(fctx, context.filectx):
2245 if isinstance(fctx, context.filectx):
2246 node = fctx.filenode()
2246 node = fctx.filenode()
2247 if node in [fparent1, fparent2]:
2247 if node in [fparent1, fparent2]:
2248 self.ui.debug('reusing %s filelog entry\n' % fname)
2248 self.ui.debug('reusing %s filelog entry\n' % fname)
2249 if manifest1.flags(fname) != fctx.flags():
2249 if manifest1.flags(fname) != fctx.flags():
2250 changelist.append(fname)
2250 changelist.append(fname)
2251 return node
2251 return node
2252
2252
2253 flog = self.file(fname)
2253 flog = self.file(fname)
2254 meta = {}
2254 meta = {}
2255 copy = fctx.renamed()
2255 copy = fctx.renamed()
2256 if copy and copy[0] != fname:
2256 if copy and copy[0] != fname:
2257 # Mark the new revision of this file as a copy of another
2257 # Mark the new revision of this file as a copy of another
2258 # file. This copy data will effectively act as a parent
2258 # file. This copy data will effectively act as a parent
2259 # of this new revision. If this is a merge, the first
2259 # of this new revision. If this is a merge, the first
2260 # parent will be the nullid (meaning "look up the copy data")
2260 # parent will be the nullid (meaning "look up the copy data")
2261 # and the second one will be the other parent. For example:
2261 # and the second one will be the other parent. For example:
2262 #
2262 #
2263 # 0 --- 1 --- 3 rev1 changes file foo
2263 # 0 --- 1 --- 3 rev1 changes file foo
2264 # \ / rev2 renames foo to bar and changes it
2264 # \ / rev2 renames foo to bar and changes it
2265 # \- 2 -/ rev3 should have bar with all changes and
2265 # \- 2 -/ rev3 should have bar with all changes and
2266 # should record that bar descends from
2266 # should record that bar descends from
2267 # bar in rev2 and foo in rev1
2267 # bar in rev2 and foo in rev1
2268 #
2268 #
2269 # this allows this merge to succeed:
2269 # this allows this merge to succeed:
2270 #
2270 #
2271 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2271 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2272 # \ / merging rev3 and rev4 should use bar@rev2
2272 # \ / merging rev3 and rev4 should use bar@rev2
2273 # \- 2 --- 4 as the merge base
2273 # \- 2 --- 4 as the merge base
2274 #
2274 #
2275
2275
2276 cfname = copy[0]
2276 cfname = copy[0]
2277 crev = manifest1.get(cfname)
2277 crev = manifest1.get(cfname)
2278 newfparent = fparent2
2278 newfparent = fparent2
2279
2279
2280 if manifest2: # branch merge
2280 if manifest2: # branch merge
2281 if fparent2 == nullid or crev is None: # copied on remote side
2281 if fparent2 == nullid or crev is None: # copied on remote side
2282 if cfname in manifest2:
2282 if cfname in manifest2:
2283 crev = manifest2[cfname]
2283 crev = manifest2[cfname]
2284 newfparent = fparent1
2284 newfparent = fparent1
2285
2285
2286 # Here, we used to search backwards through history to try to find
2286 # Here, we used to search backwards through history to try to find
2287 # where the file copy came from if the source of a copy was not in
2287 # where the file copy came from if the source of a copy was not in
2288 # the parent directory. However, this doesn't actually make sense to
2288 # the parent directory. However, this doesn't actually make sense to
2289 # do (what does a copy from something not in your working copy even
2289 # do (what does a copy from something not in your working copy even
2290 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2290 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2291 # the user that copy information was dropped, so if they didn't
2291 # the user that copy information was dropped, so if they didn't
2292 # expect this outcome it can be fixed, but this is the correct
2292 # expect this outcome it can be fixed, but this is the correct
2293 # behavior in this circumstance.
2293 # behavior in this circumstance.
2294
2294
2295 if crev:
2295 if crev:
2296 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2296 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2297 meta["copy"] = cfname
2297 meta["copy"] = cfname
2298 meta["copyrev"] = hex(crev)
2298 meta["copyrev"] = hex(crev)
2299 fparent1, fparent2 = nullid, newfparent
2299 fparent1, fparent2 = nullid, newfparent
2300 else:
2300 else:
2301 self.ui.warn(_("warning: can't find ancestor for '%s' "
2301 self.ui.warn(_("warning: can't find ancestor for '%s' "
2302 "copied from '%s'!\n") % (fname, cfname))
2302 "copied from '%s'!\n") % (fname, cfname))
2303
2303
2304 elif fparent1 == nullid:
2304 elif fparent1 == nullid:
2305 fparent1, fparent2 = fparent2, nullid
2305 fparent1, fparent2 = fparent2, nullid
2306 elif fparent2 != nullid:
2306 elif fparent2 != nullid:
2307 # is one parent an ancestor of the other?
2307 # is one parent an ancestor of the other?
2308 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2308 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2309 if fparent1 in fparentancestors:
2309 if fparent1 in fparentancestors:
2310 fparent1, fparent2 = fparent2, nullid
2310 fparent1, fparent2 = fparent2, nullid
2311 elif fparent2 in fparentancestors:
2311 elif fparent2 in fparentancestors:
2312 fparent2 = nullid
2312 fparent2 = nullid
2313
2313
2314 # is the file changed?
2314 # is the file changed?
2315 text = fctx.data()
2315 text = fctx.data()
2316 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2316 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2317 changelist.append(fname)
2317 changelist.append(fname)
2318 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2318 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2319 # are just the flags changed during merge?
2319 # are just the flags changed during merge?
2320 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2320 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2321 changelist.append(fname)
2321 changelist.append(fname)
2322
2322
2323 return fparent1
2323 return fparent1
2324
2324
2325 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2325 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2326 """check for commit arguments that aren't committable"""
2326 """check for commit arguments that aren't committable"""
2327 if match.isexact() or match.prefix():
2327 if match.isexact() or match.prefix():
2328 matched = set(status.modified + status.added + status.removed)
2328 matched = set(status.modified + status.added + status.removed)
2329
2329
2330 for f in match.files():
2330 for f in match.files():
2331 f = self.dirstate.normalize(f)
2331 f = self.dirstate.normalize(f)
2332 if f == '.' or f in matched or f in wctx.substate:
2332 if f == '.' or f in matched or f in wctx.substate:
2333 continue
2333 continue
2334 if f in status.deleted:
2334 if f in status.deleted:
2335 fail(f, _('file not found!'))
2335 fail(f, _('file not found!'))
2336 if f in vdirs: # visited directory
2336 if f in vdirs: # visited directory
2337 d = f + '/'
2337 d = f + '/'
2338 for mf in matched:
2338 for mf in matched:
2339 if mf.startswith(d):
2339 if mf.startswith(d):
2340 break
2340 break
2341 else:
2341 else:
2342 fail(f, _("no match under directory!"))
2342 fail(f, _("no match under directory!"))
2343 elif f not in self.dirstate:
2343 elif f not in self.dirstate:
2344 fail(f, _("file not tracked!"))
2344 fail(f, _("file not tracked!"))
2345
2345
2346 @unfilteredmethod
2346 @unfilteredmethod
2347 def commit(self, text="", user=None, date=None, match=None, force=False,
2347 def commit(self, text="", user=None, date=None, match=None, force=False,
2348 editor=False, extra=None):
2348 editor=False, extra=None):
2349 """Add a new revision to current repository.
2349 """Add a new revision to current repository.
2350
2350
2351 Revision information is gathered from the working directory,
2351 Revision information is gathered from the working directory,
2352 match can be used to filter the committed files. If editor is
2352 match can be used to filter the committed files. If editor is
2353 supplied, it is called to get a commit message.
2353 supplied, it is called to get a commit message.
2354 """
2354 """
2355 if extra is None:
2355 if extra is None:
2356 extra = {}
2356 extra = {}
2357
2357
2358 def fail(f, msg):
2358 def fail(f, msg):
2359 raise error.Abort('%s: %s' % (f, msg))
2359 raise error.Abort('%s: %s' % (f, msg))
2360
2360
2361 if not match:
2361 if not match:
2362 match = matchmod.always(self.root, '')
2362 match = matchmod.always(self.root, '')
2363
2363
2364 if not force:
2364 if not force:
2365 vdirs = []
2365 vdirs = []
2366 match.explicitdir = vdirs.append
2366 match.explicitdir = vdirs.append
2367 match.bad = fail
2367 match.bad = fail
2368
2368
2369 wlock = lock = tr = None
2369 wlock = lock = tr = None
2370 try:
2370 try:
2371 wlock = self.wlock()
2371 wlock = self.wlock()
2372 lock = self.lock() # for recent changelog (see issue4368)
2372 lock = self.lock() # for recent changelog (see issue4368)
2373
2373
2374 wctx = self[None]
2374 wctx = self[None]
2375 merge = len(wctx.parents()) > 1
2375 merge = len(wctx.parents()) > 1
2376
2376
2377 if not force and merge and not match.always():
2377 if not force and merge and not match.always():
2378 raise error.Abort(_('cannot partially commit a merge '
2378 raise error.Abort(_('cannot partially commit a merge '
2379 '(do not specify files or patterns)'))
2379 '(do not specify files or patterns)'))
2380
2380
2381 status = self.status(match=match, clean=force)
2381 status = self.status(match=match, clean=force)
2382 if force:
2382 if force:
2383 status.modified.extend(status.clean) # mq may commit clean files
2383 status.modified.extend(status.clean) # mq may commit clean files
2384
2384
2385 # check subrepos
2385 # check subrepos
2386 subs, commitsubs, newstate = subrepoutil.precommit(
2386 subs, commitsubs, newstate = subrepoutil.precommit(
2387 self.ui, wctx, status, match, force=force)
2387 self.ui, wctx, status, match, force=force)
2388
2388
2389 # make sure all explicit patterns are matched
2389 # make sure all explicit patterns are matched
2390 if not force:
2390 if not force:
2391 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2391 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2392
2392
2393 cctx = context.workingcommitctx(self, status,
2393 cctx = context.workingcommitctx(self, status,
2394 text, user, date, extra)
2394 text, user, date, extra)
2395
2395
2396 # internal config: ui.allowemptycommit
2396 # internal config: ui.allowemptycommit
2397 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2397 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2398 or extra.get('close') or merge or cctx.files()
2398 or extra.get('close') or merge or cctx.files()
2399 or self.ui.configbool('ui', 'allowemptycommit'))
2399 or self.ui.configbool('ui', 'allowemptycommit'))
2400 if not allowemptycommit:
2400 if not allowemptycommit:
2401 return None
2401 return None
2402
2402
2403 if merge and cctx.deleted():
2403 if merge and cctx.deleted():
2404 raise error.Abort(_("cannot commit merge with missing files"))
2404 raise error.Abort(_("cannot commit merge with missing files"))
2405
2405
2406 ms = mergemod.mergestate.read(self)
2406 ms = mergemod.mergestate.read(self)
2407 mergeutil.checkunresolved(ms)
2407 mergeutil.checkunresolved(ms)
2408
2408
2409 if editor:
2409 if editor:
2410 cctx._text = editor(self, cctx, subs)
2410 cctx._text = editor(self, cctx, subs)
2411 edited = (text != cctx._text)
2411 edited = (text != cctx._text)
2412
2412
2413 # Save commit message in case this transaction gets rolled back
2413 # Save commit message in case this transaction gets rolled back
2414 # (e.g. by a pretxncommit hook). Leave the content alone on
2414 # (e.g. by a pretxncommit hook). Leave the content alone on
2415 # the assumption that the user will use the same editor again.
2415 # the assumption that the user will use the same editor again.
2416 msgfn = self.savecommitmessage(cctx._text)
2416 msgfn = self.savecommitmessage(cctx._text)
2417
2417
2418 # commit subs and write new state
2418 # commit subs and write new state
2419 if subs:
2419 if subs:
2420 for s in sorted(commitsubs):
2420 for s in sorted(commitsubs):
2421 sub = wctx.sub(s)
2421 sub = wctx.sub(s)
2422 self.ui.status(_('committing subrepository %s\n') %
2422 self.ui.status(_('committing subrepository %s\n') %
2423 subrepoutil.subrelpath(sub))
2423 subrepoutil.subrelpath(sub))
2424 sr = sub.commit(cctx._text, user, date)
2424 sr = sub.commit(cctx._text, user, date)
2425 newstate[s] = (newstate[s][0], sr)
2425 newstate[s] = (newstate[s][0], sr)
2426 subrepoutil.writestate(self, newstate)
2426 subrepoutil.writestate(self, newstate)
2427
2427
2428 p1, p2 = self.dirstate.parents()
2428 p1, p2 = self.dirstate.parents()
2429 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2429 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2430 try:
2430 try:
2431 self.hook("precommit", throw=True, parent1=hookp1,
2431 self.hook("precommit", throw=True, parent1=hookp1,
2432 parent2=hookp2)
2432 parent2=hookp2)
2433 tr = self.transaction('commit')
2433 tr = self.transaction('commit')
2434 ret = self.commitctx(cctx, True)
2434 ret = self.commitctx(cctx, True)
2435 except: # re-raises
2435 except: # re-raises
2436 if edited:
2436 if edited:
2437 self.ui.write(
2437 self.ui.write(
2438 _('note: commit message saved in %s\n') % msgfn)
2438 _('note: commit message saved in %s\n') % msgfn)
2439 raise
2439 raise
2440 # update bookmarks, dirstate and mergestate
2440 # update bookmarks, dirstate and mergestate
2441 bookmarks.update(self, [p1, p2], ret)
2441 bookmarks.update(self, [p1, p2], ret)
2442 cctx.markcommitted(ret)
2442 cctx.markcommitted(ret)
2443 ms.reset()
2443 ms.reset()
2444 tr.close()
2444 tr.close()
2445
2445
2446 finally:
2446 finally:
2447 lockmod.release(tr, lock, wlock)
2447 lockmod.release(tr, lock, wlock)
2448
2448
2449 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2449 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2450 # hack for command that use a temporary commit (eg: histedit)
2450 # hack for command that use a temporary commit (eg: histedit)
2451 # temporary commit got stripped before hook release
2451 # temporary commit got stripped before hook release
2452 if self.changelog.hasnode(ret):
2452 if self.changelog.hasnode(ret):
2453 self.hook("commit", node=node, parent1=parent1,
2453 self.hook("commit", node=node, parent1=parent1,
2454 parent2=parent2)
2454 parent2=parent2)
2455 self._afterlock(commithook)
2455 self._afterlock(commithook)
2456 return ret
2456 return ret
2457
2457
2458 @unfilteredmethod
2458 @unfilteredmethod
2459 def commitctx(self, ctx, error=False):
2459 def commitctx(self, ctx, error=False):
2460 """Add a new revision to current repository.
2460 """Add a new revision to current repository.
2461 Revision information is passed via the context argument.
2461 Revision information is passed via the context argument.
2462
2462
2463 ctx.files() should list all files involved in this commit, i.e.
2463 ctx.files() should list all files involved in this commit, i.e.
2464 modified/added/removed files. On merge, it may be wider than the
2464 modified/added/removed files. On merge, it may be wider than the
2465 ctx.files() to be committed, since any file nodes derived directly
2465 ctx.files() to be committed, since any file nodes derived directly
2466 from p1 or p2 are excluded from the committed ctx.files().
2466 from p1 or p2 are excluded from the committed ctx.files().
2467 """
2467 """
2468
2468
2469 tr = None
2469 tr = None
2470 p1, p2 = ctx.p1(), ctx.p2()
2470 p1, p2 = ctx.p1(), ctx.p2()
2471 user = ctx.user()
2471 user = ctx.user()
2472
2472
2473 lock = self.lock()
2473 lock = self.lock()
2474 try:
2474 try:
2475 tr = self.transaction("commit")
2475 tr = self.transaction("commit")
2476 trp = weakref.proxy(tr)
2476 trp = weakref.proxy(tr)
2477
2477
2478 if ctx.manifestnode():
2478 if ctx.manifestnode():
2479 # reuse an existing manifest revision
2479 # reuse an existing manifest revision
2480 self.ui.debug('reusing known manifest\n')
2480 self.ui.debug('reusing known manifest\n')
2481 mn = ctx.manifestnode()
2481 mn = ctx.manifestnode()
2482 files = ctx.files()
2482 files = ctx.files()
2483 elif ctx.files():
2483 elif ctx.files():
2484 m1ctx = p1.manifestctx()
2484 m1ctx = p1.manifestctx()
2485 m2ctx = p2.manifestctx()
2485 m2ctx = p2.manifestctx()
2486 mctx = m1ctx.copy()
2486 mctx = m1ctx.copy()
2487
2487
2488 m = mctx.read()
2488 m = mctx.read()
2489 m1 = m1ctx.read()
2489 m1 = m1ctx.read()
2490 m2 = m2ctx.read()
2490 m2 = m2ctx.read()
2491
2491
2492 # check in files
2492 # check in files
2493 added = []
2493 added = []
2494 changed = []
2494 changed = []
2495 removed = list(ctx.removed())
2495 removed = list(ctx.removed())
2496 linkrev = len(self)
2496 linkrev = len(self)
2497 self.ui.note(_("committing files:\n"))
2497 self.ui.note(_("committing files:\n"))
2498 for f in sorted(ctx.modified() + ctx.added()):
2498 for f in sorted(ctx.modified() + ctx.added()):
2499 self.ui.note(f + "\n")
2499 self.ui.note(f + "\n")
2500 try:
2500 try:
2501 fctx = ctx[f]
2501 fctx = ctx[f]
2502 if fctx is None:
2502 if fctx is None:
2503 removed.append(f)
2503 removed.append(f)
2504 else:
2504 else:
2505 added.append(f)
2505 added.append(f)
2506 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2506 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2507 trp, changed)
2507 trp, changed)
2508 m.setflag(f, fctx.flags())
2508 m.setflag(f, fctx.flags())
2509 except OSError as inst:
2509 except OSError as inst:
2510 self.ui.warn(_("trouble committing %s!\n") % f)
2510 self.ui.warn(_("trouble committing %s!\n") % f)
2511 raise
2511 raise
2512 except IOError as inst:
2512 except IOError as inst:
2513 errcode = getattr(inst, 'errno', errno.ENOENT)
2513 errcode = getattr(inst, 'errno', errno.ENOENT)
2514 if error or errcode and errcode != errno.ENOENT:
2514 if error or errcode and errcode != errno.ENOENT:
2515 self.ui.warn(_("trouble committing %s!\n") % f)
2515 self.ui.warn(_("trouble committing %s!\n") % f)
2516 raise
2516 raise
2517
2517
2518 # update manifest
2518 # update manifest
2519 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2519 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2520 drop = [f for f in removed if f in m]
2520 drop = [f for f in removed if f in m]
2521 for f in drop:
2521 for f in drop:
2522 del m[f]
2522 del m[f]
2523 files = changed + removed
2523 files = changed + removed
2524 md = None
2524 md = None
2525 if not files:
2525 if not files:
2526 # if no "files" actually changed in terms of the changelog,
2526 # if no "files" actually changed in terms of the changelog,
2527 # try hard to detect unmodified manifest entry so that the
2527 # try hard to detect unmodified manifest entry so that the
2528 # exact same commit can be reproduced later on convert.
2528 # exact same commit can be reproduced later on convert.
2529 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2529 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2530 if not files and md:
2530 if not files and md:
2531 self.ui.debug('not reusing manifest (no file change in '
2531 self.ui.debug('not reusing manifest (no file change in '
2532 'changelog, but manifest differs)\n')
2532 'changelog, but manifest differs)\n')
2533 if files or md:
2533 if files or md:
2534 self.ui.note(_("committing manifest\n"))
2534 self.ui.note(_("committing manifest\n"))
2535 # we're using narrowmatch here since it's already applied at
2535 # we're using narrowmatch here since it's already applied at
2536 # other stages (such as dirstate.walk), so we're already
2536 # other stages (such as dirstate.walk), so we're already
2537 # ignoring things outside of narrowspec in most cases. The
2537 # ignoring things outside of narrowspec in most cases. The
2538 # one case where we might have files outside the narrowspec
2538 # one case where we might have files outside the narrowspec
2539 # at this point is merges, and we already error out in the
2539 # at this point is merges, and we already error out in the
2540 # case where the merge has files outside of the narrowspec,
2540 # case where the merge has files outside of the narrowspec,
2541 # so this is safe.
2541 # so this is safe.
2542 mn = mctx.write(trp, linkrev,
2542 mn = mctx.write(trp, linkrev,
2543 p1.manifestnode(), p2.manifestnode(),
2543 p1.manifestnode(), p2.manifestnode(),
2544 added, drop, match=self.narrowmatch())
2544 added, drop, match=self.narrowmatch())
2545 else:
2545 else:
2546 self.ui.debug('reusing manifest form p1 (listed files '
2546 self.ui.debug('reusing manifest form p1 (listed files '
2547 'actually unchanged)\n')
2547 'actually unchanged)\n')
2548 mn = p1.manifestnode()
2548 mn = p1.manifestnode()
2549 else:
2549 else:
2550 self.ui.debug('reusing manifest from p1 (no file change)\n')
2550 self.ui.debug('reusing manifest from p1 (no file change)\n')
2551 mn = p1.manifestnode()
2551 mn = p1.manifestnode()
2552 files = []
2552 files = []
2553
2553
2554 # update changelog
2554 # update changelog
2555 self.ui.note(_("committing changelog\n"))
2555 self.ui.note(_("committing changelog\n"))
2556 self.changelog.delayupdate(tr)
2556 self.changelog.delayupdate(tr)
2557 n = self.changelog.add(mn, files, ctx.description(),
2557 n = self.changelog.add(mn, files, ctx.description(),
2558 trp, p1.node(), p2.node(),
2558 trp, p1.node(), p2.node(),
2559 user, ctx.date(), ctx.extra().copy())
2559 user, ctx.date(), ctx.extra().copy())
2560 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2560 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2561 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2561 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2562 parent2=xp2)
2562 parent2=xp2)
2563 # set the new commit is proper phase
2563 # set the new commit is proper phase
2564 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2564 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2565 if targetphase:
2565 if targetphase:
2566 # retract boundary do not alter parent changeset.
2566 # retract boundary do not alter parent changeset.
2567 # if a parent have higher the resulting phase will
2567 # if a parent have higher the resulting phase will
2568 # be compliant anyway
2568 # be compliant anyway
2569 #
2569 #
2570 # if minimal phase was 0 we don't need to retract anything
2570 # if minimal phase was 0 we don't need to retract anything
2571 phases.registernew(self, tr, targetphase, [n])
2571 phases.registernew(self, tr, targetphase, [n])
2572 tr.close()
2572 tr.close()
2573 return n
2573 return n
2574 finally:
2574 finally:
2575 if tr:
2575 if tr:
2576 tr.release()
2576 tr.release()
2577 lock.release()
2577 lock.release()
2578
2578
2579 @unfilteredmethod
2579 @unfilteredmethod
2580 def destroying(self):
2580 def destroying(self):
2581 '''Inform the repository that nodes are about to be destroyed.
2581 '''Inform the repository that nodes are about to be destroyed.
2582 Intended for use by strip and rollback, so there's a common
2582 Intended for use by strip and rollback, so there's a common
2583 place for anything that has to be done before destroying history.
2583 place for anything that has to be done before destroying history.
2584
2584
2585 This is mostly useful for saving state that is in memory and waiting
2585 This is mostly useful for saving state that is in memory and waiting
2586 to be flushed when the current lock is released. Because a call to
2586 to be flushed when the current lock is released. Because a call to
2587 destroyed is imminent, the repo will be invalidated causing those
2587 destroyed is imminent, the repo will be invalidated causing those
2588 changes to stay in memory (waiting for the next unlock), or vanish
2588 changes to stay in memory (waiting for the next unlock), or vanish
2589 completely.
2589 completely.
2590 '''
2590 '''
2591 # When using the same lock to commit and strip, the phasecache is left
2591 # When using the same lock to commit and strip, the phasecache is left
2592 # dirty after committing. Then when we strip, the repo is invalidated,
2592 # dirty after committing. Then when we strip, the repo is invalidated,
2593 # causing those changes to disappear.
2593 # causing those changes to disappear.
2594 if '_phasecache' in vars(self):
2594 if '_phasecache' in vars(self):
2595 self._phasecache.write()
2595 self._phasecache.write()
2596
2596
2597 @unfilteredmethod
2597 @unfilteredmethod
2598 def destroyed(self):
2598 def destroyed(self):
2599 '''Inform the repository that nodes have been destroyed.
2599 '''Inform the repository that nodes have been destroyed.
2600 Intended for use by strip and rollback, so there's a common
2600 Intended for use by strip and rollback, so there's a common
2601 place for anything that has to be done after destroying history.
2601 place for anything that has to be done after destroying history.
2602 '''
2602 '''
2603 # When one tries to:
2603 # When one tries to:
2604 # 1) destroy nodes thus calling this method (e.g. strip)
2604 # 1) destroy nodes thus calling this method (e.g. strip)
2605 # 2) use phasecache somewhere (e.g. commit)
2605 # 2) use phasecache somewhere (e.g. commit)
2606 #
2606 #
2607 # then 2) will fail because the phasecache contains nodes that were
2607 # then 2) will fail because the phasecache contains nodes that were
2608 # removed. We can either remove phasecache from the filecache,
2608 # removed. We can either remove phasecache from the filecache,
2609 # causing it to reload next time it is accessed, or simply filter
2609 # causing it to reload next time it is accessed, or simply filter
2610 # the removed nodes now and write the updated cache.
2610 # the removed nodes now and write the updated cache.
2611 self._phasecache.filterunknown(self)
2611 self._phasecache.filterunknown(self)
2612 self._phasecache.write()
2612 self._phasecache.write()
2613
2613
2614 # refresh all repository caches
2614 # refresh all repository caches
2615 self.updatecaches()
2615 self.updatecaches()
2616
2616
2617 # Ensure the persistent tag cache is updated. Doing it now
2617 # Ensure the persistent tag cache is updated. Doing it now
2618 # means that the tag cache only has to worry about destroyed
2618 # means that the tag cache only has to worry about destroyed
2619 # heads immediately after a strip/rollback. That in turn
2619 # heads immediately after a strip/rollback. That in turn
2620 # guarantees that "cachetip == currenttip" (comparing both rev
2620 # guarantees that "cachetip == currenttip" (comparing both rev
2621 # and node) always means no nodes have been added or destroyed.
2621 # and node) always means no nodes have been added or destroyed.
2622
2622
2623 # XXX this is suboptimal when qrefresh'ing: we strip the current
2623 # XXX this is suboptimal when qrefresh'ing: we strip the current
2624 # head, refresh the tag cache, then immediately add a new head.
2624 # head, refresh the tag cache, then immediately add a new head.
2625 # But I think doing it this way is necessary for the "instant
2625 # But I think doing it this way is necessary for the "instant
2626 # tag cache retrieval" case to work.
2626 # tag cache retrieval" case to work.
2627 self.invalidate()
2627 self.invalidate()
2628
2628
2629 def status(self, node1='.', node2=None, match=None,
2629 def status(self, node1='.', node2=None, match=None,
2630 ignored=False, clean=False, unknown=False,
2630 ignored=False, clean=False, unknown=False,
2631 listsubrepos=False):
2631 listsubrepos=False):
2632 '''a convenience method that calls node1.status(node2)'''
2632 '''a convenience method that calls node1.status(node2)'''
2633 return self[node1].status(node2, match, ignored, clean, unknown,
2633 return self[node1].status(node2, match, ignored, clean, unknown,
2634 listsubrepos)
2634 listsubrepos)
2635
2635
2636 def addpostdsstatus(self, ps):
2636 def addpostdsstatus(self, ps):
2637 """Add a callback to run within the wlock, at the point at which status
2637 """Add a callback to run within the wlock, at the point at which status
2638 fixups happen.
2638 fixups happen.
2639
2639
2640 On status completion, callback(wctx, status) will be called with the
2640 On status completion, callback(wctx, status) will be called with the
2641 wlock held, unless the dirstate has changed from underneath or the wlock
2641 wlock held, unless the dirstate has changed from underneath or the wlock
2642 couldn't be grabbed.
2642 couldn't be grabbed.
2643
2643
2644 Callbacks should not capture and use a cached copy of the dirstate --
2644 Callbacks should not capture and use a cached copy of the dirstate --
2645 it might change in the meanwhile. Instead, they should access the
2645 it might change in the meanwhile. Instead, they should access the
2646 dirstate via wctx.repo().dirstate.
2646 dirstate via wctx.repo().dirstate.
2647
2647
2648 This list is emptied out after each status run -- extensions should
2648 This list is emptied out after each status run -- extensions should
2649 make sure it adds to this list each time dirstate.status is called.
2649 make sure it adds to this list each time dirstate.status is called.
2650 Extensions should also make sure they don't call this for statuses
2650 Extensions should also make sure they don't call this for statuses
2651 that don't involve the dirstate.
2651 that don't involve the dirstate.
2652 """
2652 """
2653
2653
2654 # The list is located here for uniqueness reasons -- it is actually
2654 # The list is located here for uniqueness reasons -- it is actually
2655 # managed by the workingctx, but that isn't unique per-repo.
2655 # managed by the workingctx, but that isn't unique per-repo.
2656 self._postdsstatus.append(ps)
2656 self._postdsstatus.append(ps)
2657
2657
2658 def postdsstatus(self):
2658 def postdsstatus(self):
2659 """Used by workingctx to get the list of post-dirstate-status hooks."""
2659 """Used by workingctx to get the list of post-dirstate-status hooks."""
2660 return self._postdsstatus
2660 return self._postdsstatus
2661
2661
2662 def clearpostdsstatus(self):
2662 def clearpostdsstatus(self):
2663 """Used by workingctx to clear post-dirstate-status hooks."""
2663 """Used by workingctx to clear post-dirstate-status hooks."""
2664 del self._postdsstatus[:]
2664 del self._postdsstatus[:]
2665
2665
2666 def heads(self, start=None):
2666 def heads(self, start=None):
2667 if start is None:
2667 if start is None:
2668 cl = self.changelog
2668 cl = self.changelog
2669 headrevs = reversed(cl.headrevs())
2669 headrevs = reversed(cl.headrevs())
2670 return [cl.node(rev) for rev in headrevs]
2670 return [cl.node(rev) for rev in headrevs]
2671
2671
2672 heads = self.changelog.heads(start)
2672 heads = self.changelog.heads(start)
2673 # sort the output in rev descending order
2673 # sort the output in rev descending order
2674 return sorted(heads, key=self.changelog.rev, reverse=True)
2674 return sorted(heads, key=self.changelog.rev, reverse=True)
2675
2675
2676 def branchheads(self, branch=None, start=None, closed=False):
2676 def branchheads(self, branch=None, start=None, closed=False):
2677 '''return a (possibly filtered) list of heads for the given branch
2677 '''return a (possibly filtered) list of heads for the given branch
2678
2678
2679 Heads are returned in topological order, from newest to oldest.
2679 Heads are returned in topological order, from newest to oldest.
2680 If branch is None, use the dirstate branch.
2680 If branch is None, use the dirstate branch.
2681 If start is not None, return only heads reachable from start.
2681 If start is not None, return only heads reachable from start.
2682 If closed is True, return heads that are marked as closed as well.
2682 If closed is True, return heads that are marked as closed as well.
2683 '''
2683 '''
2684 if branch is None:
2684 if branch is None:
2685 branch = self[None].branch()
2685 branch = self[None].branch()
2686 branches = self.branchmap()
2686 branches = self.branchmap()
2687 if branch not in branches:
2687 if branch not in branches:
2688 return []
2688 return []
2689 # the cache returns heads ordered lowest to highest
2689 # the cache returns heads ordered lowest to highest
2690 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2690 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2691 if start is not None:
2691 if start is not None:
2692 # filter out the heads that cannot be reached from startrev
2692 # filter out the heads that cannot be reached from startrev
2693 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2693 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2694 bheads = [h for h in bheads if h in fbheads]
2694 bheads = [h for h in bheads if h in fbheads]
2695 return bheads
2695 return bheads
2696
2696
2697 def branches(self, nodes):
2697 def branches(self, nodes):
2698 if not nodes:
2698 if not nodes:
2699 nodes = [self.changelog.tip()]
2699 nodes = [self.changelog.tip()]
2700 b = []
2700 b = []
2701 for n in nodes:
2701 for n in nodes:
2702 t = n
2702 t = n
2703 while True:
2703 while True:
2704 p = self.changelog.parents(n)
2704 p = self.changelog.parents(n)
2705 if p[1] != nullid or p[0] == nullid:
2705 if p[1] != nullid or p[0] == nullid:
2706 b.append((t, n, p[0], p[1]))
2706 b.append((t, n, p[0], p[1]))
2707 break
2707 break
2708 n = p[0]
2708 n = p[0]
2709 return b
2709 return b
2710
2710
2711 def between(self, pairs):
2711 def between(self, pairs):
2712 r = []
2712 r = []
2713
2713
2714 for top, bottom in pairs:
2714 for top, bottom in pairs:
2715 n, l, i = top, [], 0
2715 n, l, i = top, [], 0
2716 f = 1
2716 f = 1
2717
2717
2718 while n != bottom and n != nullid:
2718 while n != bottom and n != nullid:
2719 p = self.changelog.parents(n)[0]
2719 p = self.changelog.parents(n)[0]
2720 if i == f:
2720 if i == f:
2721 l.append(n)
2721 l.append(n)
2722 f = f * 2
2722 f = f * 2
2723 n = p
2723 n = p
2724 i += 1
2724 i += 1
2725
2725
2726 r.append(l)
2726 r.append(l)
2727
2727
2728 return r
2728 return r
2729
2729
2730 def checkpush(self, pushop):
2730 def checkpush(self, pushop):
2731 """Extensions can override this function if additional checks have
2731 """Extensions can override this function if additional checks have
2732 to be performed before pushing, or call it if they override push
2732 to be performed before pushing, or call it if they override push
2733 command.
2733 command.
2734 """
2734 """
2735
2735
2736 @unfilteredpropertycache
2736 @unfilteredpropertycache
2737 def prepushoutgoinghooks(self):
2737 def prepushoutgoinghooks(self):
2738 """Return util.hooks consists of a pushop with repo, remote, outgoing
2738 """Return util.hooks consists of a pushop with repo, remote, outgoing
2739 methods, which are called before pushing changesets.
2739 methods, which are called before pushing changesets.
2740 """
2740 """
2741 return util.hooks()
2741 return util.hooks()
2742
2742
2743 def pushkey(self, namespace, key, old, new):
2743 def pushkey(self, namespace, key, old, new):
2744 try:
2744 try:
2745 tr = self.currenttransaction()
2745 tr = self.currenttransaction()
2746 hookargs = {}
2746 hookargs = {}
2747 if tr is not None:
2747 if tr is not None:
2748 hookargs.update(tr.hookargs)
2748 hookargs.update(tr.hookargs)
2749 hookargs = pycompat.strkwargs(hookargs)
2749 hookargs = pycompat.strkwargs(hookargs)
2750 hookargs[r'namespace'] = namespace
2750 hookargs[r'namespace'] = namespace
2751 hookargs[r'key'] = key
2751 hookargs[r'key'] = key
2752 hookargs[r'old'] = old
2752 hookargs[r'old'] = old
2753 hookargs[r'new'] = new
2753 hookargs[r'new'] = new
2754 self.hook('prepushkey', throw=True, **hookargs)
2754 self.hook('prepushkey', throw=True, **hookargs)
2755 except error.HookAbort as exc:
2755 except error.HookAbort as exc:
2756 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2756 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2757 if exc.hint:
2757 if exc.hint:
2758 self.ui.write_err(_("(%s)\n") % exc.hint)
2758 self.ui.write_err(_("(%s)\n") % exc.hint)
2759 return False
2759 return False
2760 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2760 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2761 ret = pushkey.push(self, namespace, key, old, new)
2761 ret = pushkey.push(self, namespace, key, old, new)
2762 def runhook():
2762 def runhook():
2763 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2763 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2764 ret=ret)
2764 ret=ret)
2765 self._afterlock(runhook)
2765 self._afterlock(runhook)
2766 return ret
2766 return ret
2767
2767
2768 def listkeys(self, namespace):
2768 def listkeys(self, namespace):
2769 self.hook('prelistkeys', throw=True, namespace=namespace)
2769 self.hook('prelistkeys', throw=True, namespace=namespace)
2770 self.ui.debug('listing keys for "%s"\n' % namespace)
2770 self.ui.debug('listing keys for "%s"\n' % namespace)
2771 values = pushkey.list(self, namespace)
2771 values = pushkey.list(self, namespace)
2772 self.hook('listkeys', namespace=namespace, values=values)
2772 self.hook('listkeys', namespace=namespace, values=values)
2773 return values
2773 return values
2774
2774
2775 def debugwireargs(self, one, two, three=None, four=None, five=None):
2775 def debugwireargs(self, one, two, three=None, four=None, five=None):
2776 '''used to test argument passing over the wire'''
2776 '''used to test argument passing over the wire'''
2777 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2777 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2778 pycompat.bytestr(four),
2778 pycompat.bytestr(four),
2779 pycompat.bytestr(five))
2779 pycompat.bytestr(five))
2780
2780
2781 def savecommitmessage(self, text):
2781 def savecommitmessage(self, text):
2782 fp = self.vfs('last-message.txt', 'wb')
2782 fp = self.vfs('last-message.txt', 'wb')
2783 try:
2783 try:
2784 fp.write(text)
2784 fp.write(text)
2785 finally:
2785 finally:
2786 fp.close()
2786 fp.close()
2787 return self.pathto(fp.name[len(self.root) + 1:])
2787 return self.pathto(fp.name[len(self.root) + 1:])
2788
2788
2789 # used to avoid circular references so destructors work
2789 # used to avoid circular references so destructors work
2790 def aftertrans(files):
2790 def aftertrans(files):
2791 renamefiles = [tuple(t) for t in files]
2791 renamefiles = [tuple(t) for t in files]
2792 def a():
2792 def a():
2793 for vfs, src, dest in renamefiles:
2793 for vfs, src, dest in renamefiles:
2794 # if src and dest refer to a same file, vfs.rename is a no-op,
2794 # if src and dest refer to a same file, vfs.rename is a no-op,
2795 # leaving both src and dest on disk. delete dest to make sure
2795 # leaving both src and dest on disk. delete dest to make sure
2796 # the rename couldn't be such a no-op.
2796 # the rename couldn't be such a no-op.
2797 vfs.tryunlink(dest)
2797 vfs.tryunlink(dest)
2798 try:
2798 try:
2799 vfs.rename(src, dest)
2799 vfs.rename(src, dest)
2800 except OSError: # journal file does not yet exist
2800 except OSError: # journal file does not yet exist
2801 pass
2801 pass
2802 return a
2802 return a
2803
2803
2804 def undoname(fn):
2804 def undoname(fn):
2805 base, name = os.path.split(fn)
2805 base, name = os.path.split(fn)
2806 assert name.startswith('journal')
2806 assert name.startswith('journal')
2807 return os.path.join(base, name.replace('journal', 'undo', 1))
2807 return os.path.join(base, name.replace('journal', 'undo', 1))
2808
2808
2809 def instance(ui, path, create, intents=None, createopts=None):
2809 def instance(ui, path, create, intents=None, createopts=None):
2810 localpath = util.urllocalpath(path)
2810 localpath = util.urllocalpath(path)
2811 if create:
2811 if create:
2812 createrepository(ui, localpath, createopts=createopts)
2812 createrepository(ui, localpath, createopts=createopts)
2813
2813
2814 return makelocalrepository(ui, localpath, intents=intents)
2814 return makelocalrepository(ui, localpath, intents=intents)
2815
2815
2816 def islocal(path):
2816 def islocal(path):
2817 return True
2817 return True
2818
2818
2819 def defaultcreateopts(ui, createopts=None):
2819 def defaultcreateopts(ui, createopts=None):
2820 """Populate the default creation options for a repository.
2820 """Populate the default creation options for a repository.
2821
2821
2822 A dictionary of explicitly requested creation options can be passed
2822 A dictionary of explicitly requested creation options can be passed
2823 in. Missing keys will be populated.
2823 in. Missing keys will be populated.
2824 """
2824 """
2825 createopts = dict(createopts or {})
2825 createopts = dict(createopts or {})
2826
2826
2827 if 'backend' not in createopts:
2827 if 'backend' not in createopts:
2828 # experimental config: storage.new-repo-backend
2828 # experimental config: storage.new-repo-backend
2829 createopts['backend'] = ui.config('storage', 'new-repo-backend')
2829 createopts['backend'] = ui.config('storage', 'new-repo-backend')
2830
2830
2831 return createopts
2831 return createopts
2832
2832
2833 def newreporequirements(ui, createopts):
2833 def newreporequirements(ui, createopts):
2834 """Determine the set of requirements for a new local repository.
2834 """Determine the set of requirements for a new local repository.
2835
2835
2836 Extensions can wrap this function to specify custom requirements for
2836 Extensions can wrap this function to specify custom requirements for
2837 new repositories.
2837 new repositories.
2838 """
2838 """
2839 # If the repo is being created from a shared repository, we copy
2839 # If the repo is being created from a shared repository, we copy
2840 # its requirements.
2840 # its requirements.
2841 if 'sharedrepo' in createopts:
2841 if 'sharedrepo' in createopts:
2842 requirements = set(createopts['sharedrepo'].requirements)
2842 requirements = set(createopts['sharedrepo'].requirements)
2843 if createopts.get('sharedrelative'):
2843 if createopts.get('sharedrelative'):
2844 requirements.add('relshared')
2844 requirements.add('relshared')
2845 else:
2845 else:
2846 requirements.add('shared')
2846 requirements.add('shared')
2847
2847
2848 return requirements
2848 return requirements
2849
2849
2850 if 'backend' not in createopts:
2850 if 'backend' not in createopts:
2851 raise error.ProgrammingError('backend key not present in createopts; '
2851 raise error.ProgrammingError('backend key not present in createopts; '
2852 'was defaultcreateopts() called?')
2852 'was defaultcreateopts() called?')
2853
2853
2854 if createopts['backend'] != 'revlogv1':
2854 if createopts['backend'] != 'revlogv1':
2855 raise error.Abort(_('unable to determine repository requirements for '
2855 raise error.Abort(_('unable to determine repository requirements for '
2856 'storage backend: %s') % createopts['backend'])
2856 'storage backend: %s') % createopts['backend'])
2857
2857
2858 requirements = {'revlogv1'}
2858 requirements = {'revlogv1'}
2859 if ui.configbool('format', 'usestore'):
2859 if ui.configbool('format', 'usestore'):
2860 requirements.add('store')
2860 requirements.add('store')
2861 if ui.configbool('format', 'usefncache'):
2861 if ui.configbool('format', 'usefncache'):
2862 requirements.add('fncache')
2862 requirements.add('fncache')
2863 if ui.configbool('format', 'dotencode'):
2863 if ui.configbool('format', 'dotencode'):
2864 requirements.add('dotencode')
2864 requirements.add('dotencode')
2865
2865
2866 compengine = ui.config('experimental', 'format.compression')
2866 compengine = ui.config('experimental', 'format.compression')
2867 if compengine not in util.compengines:
2867 if compengine not in util.compengines:
2868 raise error.Abort(_('compression engine %s defined by '
2868 raise error.Abort(_('compression engine %s defined by '
2869 'experimental.format.compression not available') %
2869 'experimental.format.compression not available') %
2870 compengine,
2870 compengine,
2871 hint=_('run "hg debuginstall" to list available '
2871 hint=_('run "hg debuginstall" to list available '
2872 'compression engines'))
2872 'compression engines'))
2873
2873
2874 # zlib is the historical default and doesn't need an explicit requirement.
2874 # zlib is the historical default and doesn't need an explicit requirement.
2875 if compengine != 'zlib':
2875 if compengine != 'zlib':
2876 requirements.add('exp-compression-%s' % compengine)
2876 requirements.add('exp-compression-%s' % compengine)
2877
2877
2878 if scmutil.gdinitconfig(ui):
2878 if scmutil.gdinitconfig(ui):
2879 requirements.add('generaldelta')
2879 requirements.add('generaldelta')
2880 if ui.configbool('experimental', 'treemanifest'):
2880 if ui.configbool('experimental', 'treemanifest'):
2881 requirements.add('treemanifest')
2881 requirements.add('treemanifest')
2882 # experimental config: format.sparse-revlog
2882 # experimental config: format.sparse-revlog
2883 if ui.configbool('format', 'sparse-revlog'):
2883 if ui.configbool('format', 'sparse-revlog'):
2884 requirements.add(SPARSEREVLOG_REQUIREMENT)
2884 requirements.add(SPARSEREVLOG_REQUIREMENT)
2885
2885
2886 revlogv2 = ui.config('experimental', 'revlogv2')
2886 revlogv2 = ui.config('experimental', 'revlogv2')
2887 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2887 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2888 requirements.remove('revlogv1')
2888 requirements.remove('revlogv1')
2889 # generaldelta is implied by revlogv2.
2889 # generaldelta is implied by revlogv2.
2890 requirements.discard('generaldelta')
2890 requirements.discard('generaldelta')
2891 requirements.add(REVLOGV2_REQUIREMENT)
2891 requirements.add(REVLOGV2_REQUIREMENT)
2892 # experimental config: format.internal-phase
2892 # experimental config: format.internal-phase
2893 if ui.configbool('format', 'internal-phase'):
2893 if ui.configbool('format', 'internal-phase'):
2894 requirements.add('internal-phase')
2894 requirements.add('internal-phase')
2895
2895
2896 if createopts.get('narrowfiles'):
2896 if createopts.get('narrowfiles'):
2897 requirements.add(repository.NARROW_REQUIREMENT)
2897 requirements.add(repository.NARROW_REQUIREMENT)
2898
2898
2899 if createopts.get('lfs'):
2899 if createopts.get('lfs'):
2900 requirements.add('lfs')
2900 requirements.add('lfs')
2901
2901
2902 return requirements
2902 return requirements
2903
2903
2904 def filterknowncreateopts(ui, createopts):
2904 def filterknowncreateopts(ui, createopts):
2905 """Filters a dict of repo creation options against options that are known.
2905 """Filters a dict of repo creation options against options that are known.
2906
2906
2907 Receives a dict of repo creation options and returns a dict of those
2907 Receives a dict of repo creation options and returns a dict of those
2908 options that we don't know how to handle.
2908 options that we don't know how to handle.
2909
2909
2910 This function is called as part of repository creation. If the
2910 This function is called as part of repository creation. If the
2911 returned dict contains any items, repository creation will not
2911 returned dict contains any items, repository creation will not
2912 be allowed, as it means there was a request to create a repository
2912 be allowed, as it means there was a request to create a repository
2913 with options not recognized by loaded code.
2913 with options not recognized by loaded code.
2914
2914
2915 Extensions can wrap this function to filter out creation options
2915 Extensions can wrap this function to filter out creation options
2916 they know how to handle.
2916 they know how to handle.
2917 """
2917 """
2918 known = {
2918 known = {
2919 'backend',
2919 'backend',
2920 'lfs',
2920 'lfs',
2921 'narrowfiles',
2921 'narrowfiles',
2922 'sharedrepo',
2922 'sharedrepo',
2923 'sharedrelative',
2923 'sharedrelative',
2924 'shareditems',
2924 'shareditems',
2925 }
2925 }
2926
2926
2927 return {k: v for k, v in createopts.items() if k not in known}
2927 return {k: v for k, v in createopts.items() if k not in known}
2928
2928
2929 def createrepository(ui, path, createopts=None):
2929 def createrepository(ui, path, createopts=None):
2930 """Create a new repository in a vfs.
2930 """Create a new repository in a vfs.
2931
2931
2932 ``path`` path to the new repo's working directory.
2932 ``path`` path to the new repo's working directory.
2933 ``createopts`` options for the new repository.
2933 ``createopts`` options for the new repository.
2934
2934
2935 The following keys for ``createopts`` are recognized:
2935 The following keys for ``createopts`` are recognized:
2936
2936
2937 backend
2937 backend
2938 The storage backend to use.
2938 The storage backend to use.
2939 lfs
2939 lfs
2940 Repository will be created with ``lfs`` requirement. The lfs extension
2940 Repository will be created with ``lfs`` requirement. The lfs extension
2941 will automatically be loaded when the repository is accessed.
2941 will automatically be loaded when the repository is accessed.
2942 narrowfiles
2942 narrowfiles
2943 Set up repository to support narrow file storage.
2943 Set up repository to support narrow file storage.
2944 sharedrepo
2944 sharedrepo
2945 Repository object from which storage should be shared.
2945 Repository object from which storage should be shared.
2946 sharedrelative
2946 sharedrelative
2947 Boolean indicating if the path to the shared repo should be
2947 Boolean indicating if the path to the shared repo should be
2948 stored as relative. By default, the pointer to the "parent" repo
2948 stored as relative. By default, the pointer to the "parent" repo
2949 is stored as an absolute path.
2949 is stored as an absolute path.
2950 shareditems
2950 shareditems
2951 Set of items to share to the new repository (in addition to storage).
2951 Set of items to share to the new repository (in addition to storage).
2952 """
2952 """
2953 createopts = defaultcreateopts(ui, createopts=createopts)
2953 createopts = defaultcreateopts(ui, createopts=createopts)
2954
2954
2955 unknownopts = filterknowncreateopts(ui, createopts)
2955 unknownopts = filterknowncreateopts(ui, createopts)
2956
2956
2957 if not isinstance(unknownopts, dict):
2957 if not isinstance(unknownopts, dict):
2958 raise error.ProgrammingError('filterknowncreateopts() did not return '
2958 raise error.ProgrammingError('filterknowncreateopts() did not return '
2959 'a dict')
2959 'a dict')
2960
2960
2961 if unknownopts:
2961 if unknownopts:
2962 raise error.Abort(_('unable to create repository because of unknown '
2962 raise error.Abort(_('unable to create repository because of unknown '
2963 'creation option: %s') %
2963 'creation option: %s') %
2964 ', '.join(sorted(unknownopts)),
2964 ', '.join(sorted(unknownopts)),
2965 hint=_('is a required extension not loaded?'))
2965 hint=_('is a required extension not loaded?'))
2966
2966
2967 requirements = newreporequirements(ui, createopts=createopts)
2967 requirements = newreporequirements(ui, createopts=createopts)
2968
2968
2969 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2969 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2970
2970
2971 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2971 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2972 if hgvfs.exists():
2972 if hgvfs.exists():
2973 raise error.RepoError(_('repository %s already exists') % path)
2973 raise error.RepoError(_('repository %s already exists') % path)
2974
2974
2975 if 'sharedrepo' in createopts:
2975 if 'sharedrepo' in createopts:
2976 sharedpath = createopts['sharedrepo'].sharedpath
2976 sharedpath = createopts['sharedrepo'].sharedpath
2977
2977
2978 if createopts.get('sharedrelative'):
2978 if createopts.get('sharedrelative'):
2979 try:
2979 try:
2980 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
2980 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
2981 except (IOError, ValueError) as e:
2981 except (IOError, ValueError) as e:
2982 # ValueError is raised on Windows if the drive letters differ
2982 # ValueError is raised on Windows if the drive letters differ
2983 # on each path.
2983 # on each path.
2984 raise error.Abort(_('cannot calculate relative path'),
2984 raise error.Abort(_('cannot calculate relative path'),
2985 hint=stringutil.forcebytestr(e))
2985 hint=stringutil.forcebytestr(e))
2986
2986
2987 if not wdirvfs.exists():
2987 if not wdirvfs.exists():
2988 wdirvfs.makedirs()
2988 wdirvfs.makedirs()
2989
2989
2990 hgvfs.makedir(notindexed=True)
2990 hgvfs.makedir(notindexed=True)
2991
2991
2992 if b'store' in requirements and 'sharedrepo' not in createopts:
2992 if b'store' in requirements and 'sharedrepo' not in createopts:
2993 hgvfs.mkdir(b'store')
2993 hgvfs.mkdir(b'store')
2994
2994
2995 # We create an invalid changelog outside the store so very old
2995 # We create an invalid changelog outside the store so very old
2996 # Mercurial versions (which didn't know about the requirements
2996 # Mercurial versions (which didn't know about the requirements
2997 # file) encounter an error on reading the changelog. This
2997 # file) encounter an error on reading the changelog. This
2998 # effectively locks out old clients and prevents them from
2998 # effectively locks out old clients and prevents them from
2999 # mucking with a repo in an unknown format.
2999 # mucking with a repo in an unknown format.
3000 #
3000 #
3001 # The revlog header has version 2, which won't be recognized by
3001 # The revlog header has version 2, which won't be recognized by
3002 # such old clients.
3002 # such old clients.
3003 hgvfs.append(b'00changelog.i',
3003 hgvfs.append(b'00changelog.i',
3004 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3004 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3005 b'layout')
3005 b'layout')
3006
3006
3007 scmutil.writerequires(hgvfs, requirements)
3007 scmutil.writerequires(hgvfs, requirements)
3008
3008
3009 # Write out file telling readers where to find the shared store.
3009 # Write out file telling readers where to find the shared store.
3010 if 'sharedrepo' in createopts:
3010 if 'sharedrepo' in createopts:
3011 hgvfs.write(b'sharedpath', sharedpath)
3011 hgvfs.write(b'sharedpath', sharedpath)
3012
3012
3013 if createopts.get('shareditems'):
3013 if createopts.get('shareditems'):
3014 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3014 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3015 hgvfs.write(b'shared', shared)
3015 hgvfs.write(b'shared', shared)
3016
3016
3017 def poisonrepository(repo):
3017 def poisonrepository(repo):
3018 """Poison a repository instance so it can no longer be used."""
3018 """Poison a repository instance so it can no longer be used."""
3019 # Perform any cleanup on the instance.
3019 # Perform any cleanup on the instance.
3020 repo.close()
3020 repo.close()
3021
3021
3022 # Our strategy is to replace the type of the object with one that
3022 # Our strategy is to replace the type of the object with one that
3023 # has all attribute lookups result in error.
3023 # has all attribute lookups result in error.
3024 #
3024 #
3025 # But we have to allow the close() method because some constructors
3025 # But we have to allow the close() method because some constructors
3026 # of repos call close() on repo references.
3026 # of repos call close() on repo references.
3027 class poisonedrepository(object):
3027 class poisonedrepository(object):
3028 def __getattribute__(self, item):
3028 def __getattribute__(self, item):
3029 if item == r'close':
3029 if item == r'close':
3030 return object.__getattribute__(self, item)
3030 return object.__getattribute__(self, item)
3031
3031
3032 raise error.ProgrammingError('repo instances should not be used '
3032 raise error.ProgrammingError('repo instances should not be used '
3033 'after unshare')
3033 'after unshare')
3034
3034
3035 def close(self):
3035 def close(self):
3036 pass
3036 pass
3037
3037
3038 # We may have a repoview, which intercepts __setattr__. So be sure
3038 # We may have a repoview, which intercepts __setattr__. So be sure
3039 # we operate at the lowest level possible.
3039 # we operate at the lowest level possible.
3040 object.__setattr__(repo, r'__class__', poisonedrepository)
3040 object.__setattr__(repo, r'__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now