##// END OF EJS Templates
coding style: fix gratuitous whitespace after Python keywords
Thomas Arendsen Hein -
r13075:d73c3034 default
parent child Browse files
Show More
@@ -1,3228 +1,3228 b''
1 # mq.py - patch queues for mercurial
1 # mq.py - patch queues for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''manage a stack of patches
8 '''manage a stack of patches
9
9
10 This extension lets you work with a stack of patches in a Mercurial
10 This extension lets you work with a stack of patches in a Mercurial
11 repository. It manages two stacks of patches - all known patches, and
11 repository. It manages two stacks of patches - all known patches, and
12 applied patches (subset of known patches).
12 applied patches (subset of known patches).
13
13
14 Known patches are represented as patch files in the .hg/patches
14 Known patches are represented as patch files in the .hg/patches
15 directory. Applied patches are both patch files and changesets.
15 directory. Applied patches are both patch files and changesets.
16
16
17 Common tasks (use :hg:`help command` for more details)::
17 Common tasks (use :hg:`help command` for more details)::
18
18
19 create new patch qnew
19 create new patch qnew
20 import existing patch qimport
20 import existing patch qimport
21
21
22 print patch series qseries
22 print patch series qseries
23 print applied patches qapplied
23 print applied patches qapplied
24
24
25 add known patch to applied stack qpush
25 add known patch to applied stack qpush
26 remove patch from applied stack qpop
26 remove patch from applied stack qpop
27 refresh contents of top applied patch qrefresh
27 refresh contents of top applied patch qrefresh
28
28
29 By default, mq will automatically use git patches when required to
29 By default, mq will automatically use git patches when required to
30 avoid losing file mode changes, copy records, binary files or empty
30 avoid losing file mode changes, copy records, binary files or empty
31 files creations or deletions. This behaviour can be configured with::
31 files creations or deletions. This behaviour can be configured with::
32
32
33 [mq]
33 [mq]
34 git = auto/keep/yes/no
34 git = auto/keep/yes/no
35
35
36 If set to 'keep', mq will obey the [diff] section configuration while
36 If set to 'keep', mq will obey the [diff] section configuration while
37 preserving existing git patches upon qrefresh. If set to 'yes' or
37 preserving existing git patches upon qrefresh. If set to 'yes' or
38 'no', mq will override the [diff] section and always generate git or
38 'no', mq will override the [diff] section and always generate git or
39 regular patches, possibly losing data in the second case.
39 regular patches, possibly losing data in the second case.
40
40
41 You will by default be managing a patch queue named "patches". You can
41 You will by default be managing a patch queue named "patches". You can
42 create other, independent patch queues with the :hg:`qqueue` command.
42 create other, independent patch queues with the :hg:`qqueue` command.
43 '''
43 '''
44
44
45 from mercurial.i18n import _
45 from mercurial.i18n import _
46 from mercurial.node import bin, hex, short, nullid, nullrev
46 from mercurial.node import bin, hex, short, nullid, nullrev
47 from mercurial.lock import release
47 from mercurial.lock import release
48 from mercurial import commands, cmdutil, hg, patch, util
48 from mercurial import commands, cmdutil, hg, patch, util
49 from mercurial import repair, extensions, url, error
49 from mercurial import repair, extensions, url, error
50 import os, sys, re, errno, shutil
50 import os, sys, re, errno, shutil
51
51
52 commands.norepo += " qclone"
52 commands.norepo += " qclone"
53
53
54 # Patch names looks like unix-file names.
54 # Patch names looks like unix-file names.
55 # They must be joinable with queue directory and result in the patch path.
55 # They must be joinable with queue directory and result in the patch path.
56 normname = util.normpath
56 normname = util.normpath
57
57
58 class statusentry(object):
58 class statusentry(object):
59 def __init__(self, node, name):
59 def __init__(self, node, name):
60 self.node, self.name = node, name
60 self.node, self.name = node, name
61 def __repr__(self):
61 def __repr__(self):
62 return hex(self.node) + ':' + self.name
62 return hex(self.node) + ':' + self.name
63
63
64 class patchheader(object):
64 class patchheader(object):
65 def __init__(self, pf, plainmode=False):
65 def __init__(self, pf, plainmode=False):
66 def eatdiff(lines):
66 def eatdiff(lines):
67 while lines:
67 while lines:
68 l = lines[-1]
68 l = lines[-1]
69 if (l.startswith("diff -") or
69 if (l.startswith("diff -") or
70 l.startswith("Index:") or
70 l.startswith("Index:") or
71 l.startswith("===========")):
71 l.startswith("===========")):
72 del lines[-1]
72 del lines[-1]
73 else:
73 else:
74 break
74 break
75 def eatempty(lines):
75 def eatempty(lines):
76 while lines:
76 while lines:
77 if not lines[-1].strip():
77 if not lines[-1].strip():
78 del lines[-1]
78 del lines[-1]
79 else:
79 else:
80 break
80 break
81
81
82 message = []
82 message = []
83 comments = []
83 comments = []
84 user = None
84 user = None
85 date = None
85 date = None
86 parent = None
86 parent = None
87 format = None
87 format = None
88 subject = None
88 subject = None
89 diffstart = 0
89 diffstart = 0
90
90
91 for line in file(pf):
91 for line in file(pf):
92 line = line.rstrip()
92 line = line.rstrip()
93 if (line.startswith('diff --git')
93 if (line.startswith('diff --git')
94 or (diffstart and line.startswith('+++ '))):
94 or (diffstart and line.startswith('+++ '))):
95 diffstart = 2
95 diffstart = 2
96 break
96 break
97 diffstart = 0 # reset
97 diffstart = 0 # reset
98 if line.startswith("--- "):
98 if line.startswith("--- "):
99 diffstart = 1
99 diffstart = 1
100 continue
100 continue
101 elif format == "hgpatch":
101 elif format == "hgpatch":
102 # parse values when importing the result of an hg export
102 # parse values when importing the result of an hg export
103 if line.startswith("# User "):
103 if line.startswith("# User "):
104 user = line[7:]
104 user = line[7:]
105 elif line.startswith("# Date "):
105 elif line.startswith("# Date "):
106 date = line[7:]
106 date = line[7:]
107 elif line.startswith("# Parent "):
107 elif line.startswith("# Parent "):
108 parent = line[9:]
108 parent = line[9:]
109 elif not line.startswith("# ") and line:
109 elif not line.startswith("# ") and line:
110 message.append(line)
110 message.append(line)
111 format = None
111 format = None
112 elif line == '# HG changeset patch':
112 elif line == '# HG changeset patch':
113 message = []
113 message = []
114 format = "hgpatch"
114 format = "hgpatch"
115 elif (format != "tagdone" and (line.startswith("Subject: ") or
115 elif (format != "tagdone" and (line.startswith("Subject: ") or
116 line.startswith("subject: "))):
116 line.startswith("subject: "))):
117 subject = line[9:]
117 subject = line[9:]
118 format = "tag"
118 format = "tag"
119 elif (format != "tagdone" and (line.startswith("From: ") or
119 elif (format != "tagdone" and (line.startswith("From: ") or
120 line.startswith("from: "))):
120 line.startswith("from: "))):
121 user = line[6:]
121 user = line[6:]
122 format = "tag"
122 format = "tag"
123 elif (format != "tagdone" and (line.startswith("Date: ") or
123 elif (format != "tagdone" and (line.startswith("Date: ") or
124 line.startswith("date: "))):
124 line.startswith("date: "))):
125 date = line[6:]
125 date = line[6:]
126 format = "tag"
126 format = "tag"
127 elif format == "tag" and line == "":
127 elif format == "tag" and line == "":
128 # when looking for tags (subject: from: etc) they
128 # when looking for tags (subject: from: etc) they
129 # end once you find a blank line in the source
129 # end once you find a blank line in the source
130 format = "tagdone"
130 format = "tagdone"
131 elif message or line:
131 elif message or line:
132 message.append(line)
132 message.append(line)
133 comments.append(line)
133 comments.append(line)
134
134
135 eatdiff(message)
135 eatdiff(message)
136 eatdiff(comments)
136 eatdiff(comments)
137 eatempty(message)
137 eatempty(message)
138 eatempty(comments)
138 eatempty(comments)
139
139
140 # make sure message isn't empty
140 # make sure message isn't empty
141 if format and format.startswith("tag") and subject:
141 if format and format.startswith("tag") and subject:
142 message.insert(0, "")
142 message.insert(0, "")
143 message.insert(0, subject)
143 message.insert(0, subject)
144
144
145 self.message = message
145 self.message = message
146 self.comments = comments
146 self.comments = comments
147 self.user = user
147 self.user = user
148 self.date = date
148 self.date = date
149 self.parent = parent
149 self.parent = parent
150 self.haspatch = diffstart > 1
150 self.haspatch = diffstart > 1
151 self.plainmode = plainmode
151 self.plainmode = plainmode
152
152
153 def setuser(self, user):
153 def setuser(self, user):
154 if not self.updateheader(['From: ', '# User '], user):
154 if not self.updateheader(['From: ', '# User '], user):
155 try:
155 try:
156 patchheaderat = self.comments.index('# HG changeset patch')
156 patchheaderat = self.comments.index('# HG changeset patch')
157 self.comments.insert(patchheaderat + 1, '# User ' + user)
157 self.comments.insert(patchheaderat + 1, '# User ' + user)
158 except ValueError:
158 except ValueError:
159 if self.plainmode or self._hasheader(['Date: ']):
159 if self.plainmode or self._hasheader(['Date: ']):
160 self.comments = ['From: ' + user] + self.comments
160 self.comments = ['From: ' + user] + self.comments
161 else:
161 else:
162 tmp = ['# HG changeset patch', '# User ' + user, '']
162 tmp = ['# HG changeset patch', '# User ' + user, '']
163 self.comments = tmp + self.comments
163 self.comments = tmp + self.comments
164 self.user = user
164 self.user = user
165
165
166 def setdate(self, date):
166 def setdate(self, date):
167 if not self.updateheader(['Date: ', '# Date '], date):
167 if not self.updateheader(['Date: ', '# Date '], date):
168 try:
168 try:
169 patchheaderat = self.comments.index('# HG changeset patch')
169 patchheaderat = self.comments.index('# HG changeset patch')
170 self.comments.insert(patchheaderat + 1, '# Date ' + date)
170 self.comments.insert(patchheaderat + 1, '# Date ' + date)
171 except ValueError:
171 except ValueError:
172 if self.plainmode or self._hasheader(['From: ']):
172 if self.plainmode or self._hasheader(['From: ']):
173 self.comments = ['Date: ' + date] + self.comments
173 self.comments = ['Date: ' + date] + self.comments
174 else:
174 else:
175 tmp = ['# HG changeset patch', '# Date ' + date, '']
175 tmp = ['# HG changeset patch', '# Date ' + date, '']
176 self.comments = tmp + self.comments
176 self.comments = tmp + self.comments
177 self.date = date
177 self.date = date
178
178
179 def setparent(self, parent):
179 def setparent(self, parent):
180 if not self.updateheader(['# Parent '], parent):
180 if not self.updateheader(['# Parent '], parent):
181 try:
181 try:
182 patchheaderat = self.comments.index('# HG changeset patch')
182 patchheaderat = self.comments.index('# HG changeset patch')
183 self.comments.insert(patchheaderat + 1, '# Parent ' + parent)
183 self.comments.insert(patchheaderat + 1, '# Parent ' + parent)
184 except ValueError:
184 except ValueError:
185 pass
185 pass
186 self.parent = parent
186 self.parent = parent
187
187
188 def setmessage(self, message):
188 def setmessage(self, message):
189 if self.comments:
189 if self.comments:
190 self._delmsg()
190 self._delmsg()
191 self.message = [message]
191 self.message = [message]
192 self.comments += self.message
192 self.comments += self.message
193
193
194 def updateheader(self, prefixes, new):
194 def updateheader(self, prefixes, new):
195 '''Update all references to a field in the patch header.
195 '''Update all references to a field in the patch header.
196 Return whether the field is present.'''
196 Return whether the field is present.'''
197 res = False
197 res = False
198 for prefix in prefixes:
198 for prefix in prefixes:
199 for i in xrange(len(self.comments)):
199 for i in xrange(len(self.comments)):
200 if self.comments[i].startswith(prefix):
200 if self.comments[i].startswith(prefix):
201 self.comments[i] = prefix + new
201 self.comments[i] = prefix + new
202 res = True
202 res = True
203 break
203 break
204 return res
204 return res
205
205
206 def _hasheader(self, prefixes):
206 def _hasheader(self, prefixes):
207 '''Check if a header starts with any of the given prefixes.'''
207 '''Check if a header starts with any of the given prefixes.'''
208 for prefix in prefixes:
208 for prefix in prefixes:
209 for comment in self.comments:
209 for comment in self.comments:
210 if comment.startswith(prefix):
210 if comment.startswith(prefix):
211 return True
211 return True
212 return False
212 return False
213
213
214 def __str__(self):
214 def __str__(self):
215 if not self.comments:
215 if not self.comments:
216 return ''
216 return ''
217 return '\n'.join(self.comments) + '\n\n'
217 return '\n'.join(self.comments) + '\n\n'
218
218
219 def _delmsg(self):
219 def _delmsg(self):
220 '''Remove existing message, keeping the rest of the comments fields.
220 '''Remove existing message, keeping the rest of the comments fields.
221 If comments contains 'subject: ', message will prepend
221 If comments contains 'subject: ', message will prepend
222 the field and a blank line.'''
222 the field and a blank line.'''
223 if self.message:
223 if self.message:
224 subj = 'subject: ' + self.message[0].lower()
224 subj = 'subject: ' + self.message[0].lower()
225 for i in xrange(len(self.comments)):
225 for i in xrange(len(self.comments)):
226 if subj == self.comments[i].lower():
226 if subj == self.comments[i].lower():
227 del self.comments[i]
227 del self.comments[i]
228 self.message = self.message[2:]
228 self.message = self.message[2:]
229 break
229 break
230 ci = 0
230 ci = 0
231 for mi in self.message:
231 for mi in self.message:
232 while mi != self.comments[ci]:
232 while mi != self.comments[ci]:
233 ci += 1
233 ci += 1
234 del self.comments[ci]
234 del self.comments[ci]
235
235
236 class queue(object):
236 class queue(object):
237 def __init__(self, ui, path, patchdir=None):
237 def __init__(self, ui, path, patchdir=None):
238 self.basepath = path
238 self.basepath = path
239 try:
239 try:
240 fh = open(os.path.join(path, 'patches.queue'))
240 fh = open(os.path.join(path, 'patches.queue'))
241 cur = fh.read().rstrip()
241 cur = fh.read().rstrip()
242 if not cur:
242 if not cur:
243 curpath = os.path.join(path, 'patches')
243 curpath = os.path.join(path, 'patches')
244 else:
244 else:
245 curpath = os.path.join(path, 'patches-' + cur)
245 curpath = os.path.join(path, 'patches-' + cur)
246 except IOError:
246 except IOError:
247 curpath = os.path.join(path, 'patches')
247 curpath = os.path.join(path, 'patches')
248 self.path = patchdir or curpath
248 self.path = patchdir or curpath
249 self.opener = util.opener(self.path)
249 self.opener = util.opener(self.path)
250 self.ui = ui
250 self.ui = ui
251 self.applied_dirty = 0
251 self.applied_dirty = 0
252 self.series_dirty = 0
252 self.series_dirty = 0
253 self.added = []
253 self.added = []
254 self.series_path = "series"
254 self.series_path = "series"
255 self.status_path = "status"
255 self.status_path = "status"
256 self.guards_path = "guards"
256 self.guards_path = "guards"
257 self.active_guards = None
257 self.active_guards = None
258 self.guards_dirty = False
258 self.guards_dirty = False
259 # Handle mq.git as a bool with extended values
259 # Handle mq.git as a bool with extended values
260 try:
260 try:
261 gitmode = ui.configbool('mq', 'git', None)
261 gitmode = ui.configbool('mq', 'git', None)
262 if gitmode is None:
262 if gitmode is None:
263 raise error.ConfigError()
263 raise error.ConfigError()
264 self.gitmode = gitmode and 'yes' or 'no'
264 self.gitmode = gitmode and 'yes' or 'no'
265 except error.ConfigError:
265 except error.ConfigError:
266 self.gitmode = ui.config('mq', 'git', 'auto').lower()
266 self.gitmode = ui.config('mq', 'git', 'auto').lower()
267 self.plainmode = ui.configbool('mq', 'plain', False)
267 self.plainmode = ui.configbool('mq', 'plain', False)
268
268
269 @util.propertycache
269 @util.propertycache
270 def applied(self):
270 def applied(self):
271 if os.path.exists(self.join(self.status_path)):
271 if os.path.exists(self.join(self.status_path)):
272 def parse(l):
272 def parse(l):
273 n, name = l.split(':', 1)
273 n, name = l.split(':', 1)
274 return statusentry(bin(n), name)
274 return statusentry(bin(n), name)
275 lines = self.opener(self.status_path).read().splitlines()
275 lines = self.opener(self.status_path).read().splitlines()
276 return [parse(l) for l in lines]
276 return [parse(l) for l in lines]
277 return []
277 return []
278
278
279 @util.propertycache
279 @util.propertycache
280 def full_series(self):
280 def full_series(self):
281 if os.path.exists(self.join(self.series_path)):
281 if os.path.exists(self.join(self.series_path)):
282 return self.opener(self.series_path).read().splitlines()
282 return self.opener(self.series_path).read().splitlines()
283 return []
283 return []
284
284
285 @util.propertycache
285 @util.propertycache
286 def series(self):
286 def series(self):
287 self.parse_series()
287 self.parse_series()
288 return self.series
288 return self.series
289
289
290 @util.propertycache
290 @util.propertycache
291 def series_guards(self):
291 def series_guards(self):
292 self.parse_series()
292 self.parse_series()
293 return self.series_guards
293 return self.series_guards
294
294
295 def invalidate(self):
295 def invalidate(self):
296 for a in 'applied full_series series series_guards'.split():
296 for a in 'applied full_series series series_guards'.split():
297 if a in self.__dict__:
297 if a in self.__dict__:
298 delattr(self, a)
298 delattr(self, a)
299 self.applied_dirty = 0
299 self.applied_dirty = 0
300 self.series_dirty = 0
300 self.series_dirty = 0
301 self.guards_dirty = False
301 self.guards_dirty = False
302 self.active_guards = None
302 self.active_guards = None
303
303
304 def diffopts(self, opts={}, patchfn=None):
304 def diffopts(self, opts={}, patchfn=None):
305 diffopts = patch.diffopts(self.ui, opts)
305 diffopts = patch.diffopts(self.ui, opts)
306 if self.gitmode == 'auto':
306 if self.gitmode == 'auto':
307 diffopts.upgrade = True
307 diffopts.upgrade = True
308 elif self.gitmode == 'keep':
308 elif self.gitmode == 'keep':
309 pass
309 pass
310 elif self.gitmode in ('yes', 'no'):
310 elif self.gitmode in ('yes', 'no'):
311 diffopts.git = self.gitmode == 'yes'
311 diffopts.git = self.gitmode == 'yes'
312 else:
312 else:
313 raise util.Abort(_('mq.git option can be auto/keep/yes/no'
313 raise util.Abort(_('mq.git option can be auto/keep/yes/no'
314 ' got %s') % self.gitmode)
314 ' got %s') % self.gitmode)
315 if patchfn:
315 if patchfn:
316 diffopts = self.patchopts(diffopts, patchfn)
316 diffopts = self.patchopts(diffopts, patchfn)
317 return diffopts
317 return diffopts
318
318
319 def patchopts(self, diffopts, *patches):
319 def patchopts(self, diffopts, *patches):
320 """Return a copy of input diff options with git set to true if
320 """Return a copy of input diff options with git set to true if
321 referenced patch is a git patch and should be preserved as such.
321 referenced patch is a git patch and should be preserved as such.
322 """
322 """
323 diffopts = diffopts.copy()
323 diffopts = diffopts.copy()
324 if not diffopts.git and self.gitmode == 'keep':
324 if not diffopts.git and self.gitmode == 'keep':
325 for patchfn in patches:
325 for patchfn in patches:
326 patchf = self.opener(patchfn, 'r')
326 patchf = self.opener(patchfn, 'r')
327 # if the patch was a git patch, refresh it as a git patch
327 # if the patch was a git patch, refresh it as a git patch
328 for line in patchf:
328 for line in patchf:
329 if line.startswith('diff --git'):
329 if line.startswith('diff --git'):
330 diffopts.git = True
330 diffopts.git = True
331 break
331 break
332 patchf.close()
332 patchf.close()
333 return diffopts
333 return diffopts
334
334
335 def join(self, *p):
335 def join(self, *p):
336 return os.path.join(self.path, *p)
336 return os.path.join(self.path, *p)
337
337
338 def find_series(self, patch):
338 def find_series(self, patch):
339 def matchpatch(l):
339 def matchpatch(l):
340 l = l.split('#', 1)[0]
340 l = l.split('#', 1)[0]
341 return l.strip() == patch
341 return l.strip() == patch
342 for index, l in enumerate(self.full_series):
342 for index, l in enumerate(self.full_series):
343 if matchpatch(l):
343 if matchpatch(l):
344 return index
344 return index
345 return None
345 return None
346
346
347 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
347 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
348
348
349 def parse_series(self):
349 def parse_series(self):
350 self.series = []
350 self.series = []
351 self.series_guards = []
351 self.series_guards = []
352 for l in self.full_series:
352 for l in self.full_series:
353 h = l.find('#')
353 h = l.find('#')
354 if h == -1:
354 if h == -1:
355 patch = l
355 patch = l
356 comment = ''
356 comment = ''
357 elif h == 0:
357 elif h == 0:
358 continue
358 continue
359 else:
359 else:
360 patch = l[:h]
360 patch = l[:h]
361 comment = l[h:]
361 comment = l[h:]
362 patch = patch.strip()
362 patch = patch.strip()
363 if patch:
363 if patch:
364 if patch in self.series:
364 if patch in self.series:
365 raise util.Abort(_('%s appears more than once in %s') %
365 raise util.Abort(_('%s appears more than once in %s') %
366 (patch, self.join(self.series_path)))
366 (patch, self.join(self.series_path)))
367 self.series.append(patch)
367 self.series.append(patch)
368 self.series_guards.append(self.guard_re.findall(comment))
368 self.series_guards.append(self.guard_re.findall(comment))
369
369
370 def check_guard(self, guard):
370 def check_guard(self, guard):
371 if not guard:
371 if not guard:
372 return _('guard cannot be an empty string')
372 return _('guard cannot be an empty string')
373 bad_chars = '# \t\r\n\f'
373 bad_chars = '# \t\r\n\f'
374 first = guard[0]
374 first = guard[0]
375 if first in '-+':
375 if first in '-+':
376 return (_('guard %r starts with invalid character: %r') %
376 return (_('guard %r starts with invalid character: %r') %
377 (guard, first))
377 (guard, first))
378 for c in bad_chars:
378 for c in bad_chars:
379 if c in guard:
379 if c in guard:
380 return _('invalid character in guard %r: %r') % (guard, c)
380 return _('invalid character in guard %r: %r') % (guard, c)
381
381
382 def set_active(self, guards):
382 def set_active(self, guards):
383 for guard in guards:
383 for guard in guards:
384 bad = self.check_guard(guard)
384 bad = self.check_guard(guard)
385 if bad:
385 if bad:
386 raise util.Abort(bad)
386 raise util.Abort(bad)
387 guards = sorted(set(guards))
387 guards = sorted(set(guards))
388 self.ui.debug('active guards: %s\n' % ' '.join(guards))
388 self.ui.debug('active guards: %s\n' % ' '.join(guards))
389 self.active_guards = guards
389 self.active_guards = guards
390 self.guards_dirty = True
390 self.guards_dirty = True
391
391
392 def active(self):
392 def active(self):
393 if self.active_guards is None:
393 if self.active_guards is None:
394 self.active_guards = []
394 self.active_guards = []
395 try:
395 try:
396 guards = self.opener(self.guards_path).read().split()
396 guards = self.opener(self.guards_path).read().split()
397 except IOError, err:
397 except IOError, err:
398 if err.errno != errno.ENOENT:
398 if err.errno != errno.ENOENT:
399 raise
399 raise
400 guards = []
400 guards = []
401 for i, guard in enumerate(guards):
401 for i, guard in enumerate(guards):
402 bad = self.check_guard(guard)
402 bad = self.check_guard(guard)
403 if bad:
403 if bad:
404 self.ui.warn('%s:%d: %s\n' %
404 self.ui.warn('%s:%d: %s\n' %
405 (self.join(self.guards_path), i + 1, bad))
405 (self.join(self.guards_path), i + 1, bad))
406 else:
406 else:
407 self.active_guards.append(guard)
407 self.active_guards.append(guard)
408 return self.active_guards
408 return self.active_guards
409
409
410 def set_guards(self, idx, guards):
410 def set_guards(self, idx, guards):
411 for g in guards:
411 for g in guards:
412 if len(g) < 2:
412 if len(g) < 2:
413 raise util.Abort(_('guard %r too short') % g)
413 raise util.Abort(_('guard %r too short') % g)
414 if g[0] not in '-+':
414 if g[0] not in '-+':
415 raise util.Abort(_('guard %r starts with invalid char') % g)
415 raise util.Abort(_('guard %r starts with invalid char') % g)
416 bad = self.check_guard(g[1:])
416 bad = self.check_guard(g[1:])
417 if bad:
417 if bad:
418 raise util.Abort(bad)
418 raise util.Abort(bad)
419 drop = self.guard_re.sub('', self.full_series[idx])
419 drop = self.guard_re.sub('', self.full_series[idx])
420 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
420 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
421 self.parse_series()
421 self.parse_series()
422 self.series_dirty = True
422 self.series_dirty = True
423
423
424 def pushable(self, idx):
424 def pushable(self, idx):
425 if isinstance(idx, str):
425 if isinstance(idx, str):
426 idx = self.series.index(idx)
426 idx = self.series.index(idx)
427 patchguards = self.series_guards[idx]
427 patchguards = self.series_guards[idx]
428 if not patchguards:
428 if not patchguards:
429 return True, None
429 return True, None
430 guards = self.active()
430 guards = self.active()
431 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
431 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
432 if exactneg:
432 if exactneg:
433 return False, exactneg[0]
433 return False, exactneg[0]
434 pos = [g for g in patchguards if g[0] == '+']
434 pos = [g for g in patchguards if g[0] == '+']
435 exactpos = [g for g in pos if g[1:] in guards]
435 exactpos = [g for g in pos if g[1:] in guards]
436 if pos:
436 if pos:
437 if exactpos:
437 if exactpos:
438 return True, exactpos[0]
438 return True, exactpos[0]
439 return False, pos
439 return False, pos
440 return True, ''
440 return True, ''
441
441
442 def explain_pushable(self, idx, all_patches=False):
442 def explain_pushable(self, idx, all_patches=False):
443 write = all_patches and self.ui.write or self.ui.warn
443 write = all_patches and self.ui.write or self.ui.warn
444 if all_patches or self.ui.verbose:
444 if all_patches or self.ui.verbose:
445 if isinstance(idx, str):
445 if isinstance(idx, str):
446 idx = self.series.index(idx)
446 idx = self.series.index(idx)
447 pushable, why = self.pushable(idx)
447 pushable, why = self.pushable(idx)
448 if all_patches and pushable:
448 if all_patches and pushable:
449 if why is None:
449 if why is None:
450 write(_('allowing %s - no guards in effect\n') %
450 write(_('allowing %s - no guards in effect\n') %
451 self.series[idx])
451 self.series[idx])
452 else:
452 else:
453 if not why:
453 if not why:
454 write(_('allowing %s - no matching negative guards\n') %
454 write(_('allowing %s - no matching negative guards\n') %
455 self.series[idx])
455 self.series[idx])
456 else:
456 else:
457 write(_('allowing %s - guarded by %r\n') %
457 write(_('allowing %s - guarded by %r\n') %
458 (self.series[idx], why))
458 (self.series[idx], why))
459 if not pushable:
459 if not pushable:
460 if why:
460 if why:
461 write(_('skipping %s - guarded by %r\n') %
461 write(_('skipping %s - guarded by %r\n') %
462 (self.series[idx], why))
462 (self.series[idx], why))
463 else:
463 else:
464 write(_('skipping %s - no matching guards\n') %
464 write(_('skipping %s - no matching guards\n') %
465 self.series[idx])
465 self.series[idx])
466
466
467 def save_dirty(self):
467 def save_dirty(self):
468 def write_list(items, path):
468 def write_list(items, path):
469 fp = self.opener(path, 'w')
469 fp = self.opener(path, 'w')
470 for i in items:
470 for i in items:
471 fp.write("%s\n" % i)
471 fp.write("%s\n" % i)
472 fp.close()
472 fp.close()
473 if self.applied_dirty:
473 if self.applied_dirty:
474 write_list(map(str, self.applied), self.status_path)
474 write_list(map(str, self.applied), self.status_path)
475 if self.series_dirty:
475 if self.series_dirty:
476 write_list(self.full_series, self.series_path)
476 write_list(self.full_series, self.series_path)
477 if self.guards_dirty:
477 if self.guards_dirty:
478 write_list(self.active_guards, self.guards_path)
478 write_list(self.active_guards, self.guards_path)
479 if self.added:
479 if self.added:
480 qrepo = self.qrepo()
480 qrepo = self.qrepo()
481 if qrepo:
481 if qrepo:
482 qrepo[None].add(f for f in self.added if f not in qrepo[None])
482 qrepo[None].add(f for f in self.added if f not in qrepo[None])
483 self.added = []
483 self.added = []
484
484
485 def removeundo(self, repo):
485 def removeundo(self, repo):
486 undo = repo.sjoin('undo')
486 undo = repo.sjoin('undo')
487 if not os.path.exists(undo):
487 if not os.path.exists(undo):
488 return
488 return
489 try:
489 try:
490 os.unlink(undo)
490 os.unlink(undo)
491 except OSError, inst:
491 except OSError, inst:
492 self.ui.warn(_('error removing undo: %s\n') % str(inst))
492 self.ui.warn(_('error removing undo: %s\n') % str(inst))
493
493
494 def printdiff(self, repo, diffopts, node1, node2=None, files=None,
494 def printdiff(self, repo, diffopts, node1, node2=None, files=None,
495 fp=None, changes=None, opts={}):
495 fp=None, changes=None, opts={}):
496 stat = opts.get('stat')
496 stat = opts.get('stat')
497 m = cmdutil.match(repo, files, opts)
497 m = cmdutil.match(repo, files, opts)
498 cmdutil.diffordiffstat(self.ui, repo, diffopts, node1, node2, m,
498 cmdutil.diffordiffstat(self.ui, repo, diffopts, node1, node2, m,
499 changes, stat, fp)
499 changes, stat, fp)
500
500
501 def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
501 def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
502 # first try just applying the patch
502 # first try just applying the patch
503 (err, n) = self.apply(repo, [patch], update_status=False,
503 (err, n) = self.apply(repo, [patch], update_status=False,
504 strict=True, merge=rev)
504 strict=True, merge=rev)
505
505
506 if err == 0:
506 if err == 0:
507 return (err, n)
507 return (err, n)
508
508
509 if n is None:
509 if n is None:
510 raise util.Abort(_("apply failed for patch %s") % patch)
510 raise util.Abort(_("apply failed for patch %s") % patch)
511
511
512 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
512 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
513
513
514 # apply failed, strip away that rev and merge.
514 # apply failed, strip away that rev and merge.
515 hg.clean(repo, head)
515 hg.clean(repo, head)
516 self.strip(repo, [n], update=False, backup='strip')
516 self.strip(repo, [n], update=False, backup='strip')
517
517
518 ctx = repo[rev]
518 ctx = repo[rev]
519 ret = hg.merge(repo, rev)
519 ret = hg.merge(repo, rev)
520 if ret:
520 if ret:
521 raise util.Abort(_("update returned %d") % ret)
521 raise util.Abort(_("update returned %d") % ret)
522 n = repo.commit(ctx.description(), ctx.user(), force=True)
522 n = repo.commit(ctx.description(), ctx.user(), force=True)
523 if n is None:
523 if n is None:
524 raise util.Abort(_("repo commit failed"))
524 raise util.Abort(_("repo commit failed"))
525 try:
525 try:
526 ph = patchheader(mergeq.join(patch), self.plainmode)
526 ph = patchheader(mergeq.join(patch), self.plainmode)
527 except:
527 except:
528 raise util.Abort(_("unable to read %s") % patch)
528 raise util.Abort(_("unable to read %s") % patch)
529
529
530 diffopts = self.patchopts(diffopts, patch)
530 diffopts = self.patchopts(diffopts, patch)
531 patchf = self.opener(patch, "w")
531 patchf = self.opener(patch, "w")
532 comments = str(ph)
532 comments = str(ph)
533 if comments:
533 if comments:
534 patchf.write(comments)
534 patchf.write(comments)
535 self.printdiff(repo, diffopts, head, n, fp=patchf)
535 self.printdiff(repo, diffopts, head, n, fp=patchf)
536 patchf.close()
536 patchf.close()
537 self.removeundo(repo)
537 self.removeundo(repo)
538 return (0, n)
538 return (0, n)
539
539
540 def qparents(self, repo, rev=None):
540 def qparents(self, repo, rev=None):
541 if rev is None:
541 if rev is None:
542 (p1, p2) = repo.dirstate.parents()
542 (p1, p2) = repo.dirstate.parents()
543 if p2 == nullid:
543 if p2 == nullid:
544 return p1
544 return p1
545 if not self.applied:
545 if not self.applied:
546 return None
546 return None
547 return self.applied[-1].node
547 return self.applied[-1].node
548 p1, p2 = repo.changelog.parents(rev)
548 p1, p2 = repo.changelog.parents(rev)
549 if p2 != nullid and p2 in [x.node for x in self.applied]:
549 if p2 != nullid and p2 in [x.node for x in self.applied]:
550 return p2
550 return p2
551 return p1
551 return p1
552
552
553 def mergepatch(self, repo, mergeq, series, diffopts):
553 def mergepatch(self, repo, mergeq, series, diffopts):
554 if not self.applied:
554 if not self.applied:
555 # each of the patches merged in will have two parents. This
555 # each of the patches merged in will have two parents. This
556 # can confuse the qrefresh, qdiff, and strip code because it
556 # can confuse the qrefresh, qdiff, and strip code because it
557 # needs to know which parent is actually in the patch queue.
557 # needs to know which parent is actually in the patch queue.
558 # so, we insert a merge marker with only one parent. This way
558 # so, we insert a merge marker with only one parent. This way
559 # the first patch in the queue is never a merge patch
559 # the first patch in the queue is never a merge patch
560 #
560 #
561 pname = ".hg.patches.merge.marker"
561 pname = ".hg.patches.merge.marker"
562 n = repo.commit('[mq]: merge marker', force=True)
562 n = repo.commit('[mq]: merge marker', force=True)
563 self.removeundo(repo)
563 self.removeundo(repo)
564 self.applied.append(statusentry(n, pname))
564 self.applied.append(statusentry(n, pname))
565 self.applied_dirty = 1
565 self.applied_dirty = 1
566
566
567 head = self.qparents(repo)
567 head = self.qparents(repo)
568
568
569 for patch in series:
569 for patch in series:
570 patch = mergeq.lookup(patch, strict=True)
570 patch = mergeq.lookup(patch, strict=True)
571 if not patch:
571 if not patch:
572 self.ui.warn(_("patch %s does not exist\n") % patch)
572 self.ui.warn(_("patch %s does not exist\n") % patch)
573 return (1, None)
573 return (1, None)
574 pushable, reason = self.pushable(patch)
574 pushable, reason = self.pushable(patch)
575 if not pushable:
575 if not pushable:
576 self.explain_pushable(patch, all_patches=True)
576 self.explain_pushable(patch, all_patches=True)
577 continue
577 continue
578 info = mergeq.isapplied(patch)
578 info = mergeq.isapplied(patch)
579 if not info:
579 if not info:
580 self.ui.warn(_("patch %s is not applied\n") % patch)
580 self.ui.warn(_("patch %s is not applied\n") % patch)
581 return (1, None)
581 return (1, None)
582 rev = info[1]
582 rev = info[1]
583 err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
583 err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
584 if head:
584 if head:
585 self.applied.append(statusentry(head, patch))
585 self.applied.append(statusentry(head, patch))
586 self.applied_dirty = 1
586 self.applied_dirty = 1
587 if err:
587 if err:
588 return (err, head)
588 return (err, head)
589 self.save_dirty()
589 self.save_dirty()
590 return (0, head)
590 return (0, head)
591
591
592 def patch(self, repo, patchfile):
592 def patch(self, repo, patchfile):
593 '''Apply patchfile to the working directory.
593 '''Apply patchfile to the working directory.
594 patchfile: name of patch file'''
594 patchfile: name of patch file'''
595 files = {}
595 files = {}
596 try:
596 try:
597 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
597 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
598 files=files, eolmode=None)
598 files=files, eolmode=None)
599 except Exception, inst:
599 except Exception, inst:
600 self.ui.note(str(inst) + '\n')
600 self.ui.note(str(inst) + '\n')
601 if not self.ui.verbose:
601 if not self.ui.verbose:
602 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
602 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
603 return (False, files, False)
603 return (False, files, False)
604
604
605 return (True, files, fuzz)
605 return (True, files, fuzz)
606
606
607 def apply(self, repo, series, list=False, update_status=True,
607 def apply(self, repo, series, list=False, update_status=True,
608 strict=False, patchdir=None, merge=None, all_files=None):
608 strict=False, patchdir=None, merge=None, all_files=None):
609 wlock = lock = tr = None
609 wlock = lock = tr = None
610 try:
610 try:
611 wlock = repo.wlock()
611 wlock = repo.wlock()
612 lock = repo.lock()
612 lock = repo.lock()
613 tr = repo.transaction("qpush")
613 tr = repo.transaction("qpush")
614 try:
614 try:
615 ret = self._apply(repo, series, list, update_status,
615 ret = self._apply(repo, series, list, update_status,
616 strict, patchdir, merge, all_files=all_files)
616 strict, patchdir, merge, all_files=all_files)
617 tr.close()
617 tr.close()
618 self.save_dirty()
618 self.save_dirty()
619 return ret
619 return ret
620 except:
620 except:
621 try:
621 try:
622 tr.abort()
622 tr.abort()
623 finally:
623 finally:
624 repo.invalidate()
624 repo.invalidate()
625 repo.dirstate.invalidate()
625 repo.dirstate.invalidate()
626 raise
626 raise
627 finally:
627 finally:
628 release(tr, lock, wlock)
628 release(tr, lock, wlock)
629 self.removeundo(repo)
629 self.removeundo(repo)
630
630
631 def _apply(self, repo, series, list=False, update_status=True,
631 def _apply(self, repo, series, list=False, update_status=True,
632 strict=False, patchdir=None, merge=None, all_files=None):
632 strict=False, patchdir=None, merge=None, all_files=None):
633 '''returns (error, hash)
633 '''returns (error, hash)
634 error = 1 for unable to read, 2 for patch failed, 3 for patch fuzz'''
634 error = 1 for unable to read, 2 for patch failed, 3 for patch fuzz'''
635 # TODO unify with commands.py
635 # TODO unify with commands.py
636 if not patchdir:
636 if not patchdir:
637 patchdir = self.path
637 patchdir = self.path
638 err = 0
638 err = 0
639 n = None
639 n = None
640 for patchname in series:
640 for patchname in series:
641 pushable, reason = self.pushable(patchname)
641 pushable, reason = self.pushable(patchname)
642 if not pushable:
642 if not pushable:
643 self.explain_pushable(patchname, all_patches=True)
643 self.explain_pushable(patchname, all_patches=True)
644 continue
644 continue
645 self.ui.status(_("applying %s\n") % patchname)
645 self.ui.status(_("applying %s\n") % patchname)
646 pf = os.path.join(patchdir, patchname)
646 pf = os.path.join(patchdir, patchname)
647
647
648 try:
648 try:
649 ph = patchheader(self.join(patchname), self.plainmode)
649 ph = patchheader(self.join(patchname), self.plainmode)
650 except:
650 except:
651 self.ui.warn(_("unable to read %s\n") % patchname)
651 self.ui.warn(_("unable to read %s\n") % patchname)
652 err = 1
652 err = 1
653 break
653 break
654
654
655 message = ph.message
655 message = ph.message
656 if not message:
656 if not message:
657 # The commit message should not be translated
657 # The commit message should not be translated
658 message = "imported patch %s\n" % patchname
658 message = "imported patch %s\n" % patchname
659 else:
659 else:
660 if list:
660 if list:
661 # The commit message should not be translated
661 # The commit message should not be translated
662 message.append("\nimported patch %s" % patchname)
662 message.append("\nimported patch %s" % patchname)
663 message = '\n'.join(message)
663 message = '\n'.join(message)
664
664
665 if ph.haspatch:
665 if ph.haspatch:
666 (patcherr, files, fuzz) = self.patch(repo, pf)
666 (patcherr, files, fuzz) = self.patch(repo, pf)
667 if all_files is not None:
667 if all_files is not None:
668 all_files.update(files)
668 all_files.update(files)
669 patcherr = not patcherr
669 patcherr = not patcherr
670 else:
670 else:
671 self.ui.warn(_("patch %s is empty\n") % patchname)
671 self.ui.warn(_("patch %s is empty\n") % patchname)
672 patcherr, files, fuzz = 0, [], 0
672 patcherr, files, fuzz = 0, [], 0
673
673
674 if merge and files:
674 if merge and files:
675 # Mark as removed/merged and update dirstate parent info
675 # Mark as removed/merged and update dirstate parent info
676 removed = []
676 removed = []
677 merged = []
677 merged = []
678 for f in files:
678 for f in files:
679 if os.path.lexists(repo.wjoin(f)):
679 if os.path.lexists(repo.wjoin(f)):
680 merged.append(f)
680 merged.append(f)
681 else:
681 else:
682 removed.append(f)
682 removed.append(f)
683 for f in removed:
683 for f in removed:
684 repo.dirstate.remove(f)
684 repo.dirstate.remove(f)
685 for f in merged:
685 for f in merged:
686 repo.dirstate.merge(f)
686 repo.dirstate.merge(f)
687 p1, p2 = repo.dirstate.parents()
687 p1, p2 = repo.dirstate.parents()
688 repo.dirstate.setparents(p1, merge)
688 repo.dirstate.setparents(p1, merge)
689
689
690 files = cmdutil.updatedir(self.ui, repo, files)
690 files = cmdutil.updatedir(self.ui, repo, files)
691 match = cmdutil.matchfiles(repo, files or [])
691 match = cmdutil.matchfiles(repo, files or [])
692 n = repo.commit(message, ph.user, ph.date, match=match, force=True)
692 n = repo.commit(message, ph.user, ph.date, match=match, force=True)
693
693
694 if n is None:
694 if n is None:
695 raise util.Abort(_("repository commit failed"))
695 raise util.Abort(_("repository commit failed"))
696
696
697 if update_status:
697 if update_status:
698 self.applied.append(statusentry(n, patchname))
698 self.applied.append(statusentry(n, patchname))
699
699
700 if patcherr:
700 if patcherr:
701 self.ui.warn(_("patch failed, rejects left in working dir\n"))
701 self.ui.warn(_("patch failed, rejects left in working dir\n"))
702 err = 2
702 err = 2
703 break
703 break
704
704
705 if fuzz and strict:
705 if fuzz and strict:
706 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
706 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
707 err = 3
707 err = 3
708 break
708 break
709 return (err, n)
709 return (err, n)
710
710
711 def _cleanup(self, patches, numrevs, keep=False):
711 def _cleanup(self, patches, numrevs, keep=False):
712 if not keep:
712 if not keep:
713 r = self.qrepo()
713 r = self.qrepo()
714 if r:
714 if r:
715 r[None].remove(patches, True)
715 r[None].remove(patches, True)
716 else:
716 else:
717 for p in patches:
717 for p in patches:
718 os.unlink(self.join(p))
718 os.unlink(self.join(p))
719
719
720 if numrevs:
720 if numrevs:
721 del self.applied[:numrevs]
721 del self.applied[:numrevs]
722 self.applied_dirty = 1
722 self.applied_dirty = 1
723
723
724 for i in sorted([self.find_series(p) for p in patches], reverse=True):
724 for i in sorted([self.find_series(p) for p in patches], reverse=True):
725 del self.full_series[i]
725 del self.full_series[i]
726 self.parse_series()
726 self.parse_series()
727 self.series_dirty = 1
727 self.series_dirty = 1
728
728
729 def _revpatches(self, repo, revs):
729 def _revpatches(self, repo, revs):
730 firstrev = repo[self.applied[0].node].rev()
730 firstrev = repo[self.applied[0].node].rev()
731 patches = []
731 patches = []
732 for i, rev in enumerate(revs):
732 for i, rev in enumerate(revs):
733
733
734 if rev < firstrev:
734 if rev < firstrev:
735 raise util.Abort(_('revision %d is not managed') % rev)
735 raise util.Abort(_('revision %d is not managed') % rev)
736
736
737 ctx = repo[rev]
737 ctx = repo[rev]
738 base = self.applied[i].node
738 base = self.applied[i].node
739 if ctx.node() != base:
739 if ctx.node() != base:
740 msg = _('cannot delete revision %d above applied patches')
740 msg = _('cannot delete revision %d above applied patches')
741 raise util.Abort(msg % rev)
741 raise util.Abort(msg % rev)
742
742
743 patch = self.applied[i].name
743 patch = self.applied[i].name
744 for fmt in ('[mq]: %s', 'imported patch %s'):
744 for fmt in ('[mq]: %s', 'imported patch %s'):
745 if ctx.description() == fmt % patch:
745 if ctx.description() == fmt % patch:
746 msg = _('patch %s finalized without changeset message\n')
746 msg = _('patch %s finalized without changeset message\n')
747 repo.ui.status(msg % patch)
747 repo.ui.status(msg % patch)
748 break
748 break
749
749
750 patches.append(patch)
750 patches.append(patch)
751 return patches
751 return patches
752
752
753 def finish(self, repo, revs):
753 def finish(self, repo, revs):
754 patches = self._revpatches(repo, sorted(revs))
754 patches = self._revpatches(repo, sorted(revs))
755 self._cleanup(patches, len(patches))
755 self._cleanup(patches, len(patches))
756
756
757 def delete(self, repo, patches, opts):
757 def delete(self, repo, patches, opts):
758 if not patches and not opts.get('rev'):
758 if not patches and not opts.get('rev'):
759 raise util.Abort(_('qdelete requires at least one revision or '
759 raise util.Abort(_('qdelete requires at least one revision or '
760 'patch name'))
760 'patch name'))
761
761
762 realpatches = []
762 realpatches = []
763 for patch in patches:
763 for patch in patches:
764 patch = self.lookup(patch, strict=True)
764 patch = self.lookup(patch, strict=True)
765 info = self.isapplied(patch)
765 info = self.isapplied(patch)
766 if info:
766 if info:
767 raise util.Abort(_("cannot delete applied patch %s") % patch)
767 raise util.Abort(_("cannot delete applied patch %s") % patch)
768 if patch not in self.series:
768 if patch not in self.series:
769 raise util.Abort(_("patch %s not in series file") % patch)
769 raise util.Abort(_("patch %s not in series file") % patch)
770 if patch not in realpatches:
770 if patch not in realpatches:
771 realpatches.append(patch)
771 realpatches.append(patch)
772
772
773 numrevs = 0
773 numrevs = 0
774 if opts.get('rev'):
774 if opts.get('rev'):
775 if not self.applied:
775 if not self.applied:
776 raise util.Abort(_('no patches applied'))
776 raise util.Abort(_('no patches applied'))
777 revs = cmdutil.revrange(repo, opts.get('rev'))
777 revs = cmdutil.revrange(repo, opts.get('rev'))
778 if len(revs) > 1 and revs[0] > revs[1]:
778 if len(revs) > 1 and revs[0] > revs[1]:
779 revs.reverse()
779 revs.reverse()
780 revpatches = self._revpatches(repo, revs)
780 revpatches = self._revpatches(repo, revs)
781 realpatches += revpatches
781 realpatches += revpatches
782 numrevs = len(revpatches)
782 numrevs = len(revpatches)
783
783
784 self._cleanup(realpatches, numrevs, opts.get('keep'))
784 self._cleanup(realpatches, numrevs, opts.get('keep'))
785
785
786 def check_toppatch(self, repo):
786 def check_toppatch(self, repo):
787 if self.applied:
787 if self.applied:
788 top = self.applied[-1].node
788 top = self.applied[-1].node
789 patch = self.applied[-1].name
789 patch = self.applied[-1].name
790 pp = repo.dirstate.parents()
790 pp = repo.dirstate.parents()
791 if top not in pp:
791 if top not in pp:
792 raise util.Abort(_("working directory revision is not qtip"))
792 raise util.Abort(_("working directory revision is not qtip"))
793 return top, patch
793 return top, patch
794 return None, None
794 return None, None
795
795
796 def check_localchanges(self, repo, force=False, refresh=True):
796 def check_localchanges(self, repo, force=False, refresh=True):
797 m, a, r, d = repo.status()[:4]
797 m, a, r, d = repo.status()[:4]
798 if (m or a or r or d) and not force:
798 if (m or a or r or d) and not force:
799 if refresh:
799 if refresh:
800 raise util.Abort(_("local changes found, refresh first"))
800 raise util.Abort(_("local changes found, refresh first"))
801 else:
801 else:
802 raise util.Abort(_("local changes found"))
802 raise util.Abort(_("local changes found"))
803 return m, a, r, d
803 return m, a, r, d
804
804
805 _reserved = ('series', 'status', 'guards')
805 _reserved = ('series', 'status', 'guards')
806 def check_reserved_name(self, name):
806 def check_reserved_name(self, name):
807 if (name in self._reserved or name.startswith('.hg')
807 if (name in self._reserved or name.startswith('.hg')
808 or name.startswith('.mq') or '#' in name or ':' in name):
808 or name.startswith('.mq') or '#' in name or ':' in name):
809 raise util.Abort(_('"%s" cannot be used as the name of a patch')
809 raise util.Abort(_('"%s" cannot be used as the name of a patch')
810 % name)
810 % name)
811
811
812 def new(self, repo, patchfn, *pats, **opts):
812 def new(self, repo, patchfn, *pats, **opts):
813 """options:
813 """options:
814 msg: a string or a no-argument function returning a string
814 msg: a string or a no-argument function returning a string
815 """
815 """
816 msg = opts.get('msg')
816 msg = opts.get('msg')
817 user = opts.get('user')
817 user = opts.get('user')
818 date = opts.get('date')
818 date = opts.get('date')
819 if date:
819 if date:
820 date = util.parsedate(date)
820 date = util.parsedate(date)
821 diffopts = self.diffopts({'git': opts.get('git')})
821 diffopts = self.diffopts({'git': opts.get('git')})
822 self.check_reserved_name(patchfn)
822 self.check_reserved_name(patchfn)
823 if os.path.exists(self.join(patchfn)):
823 if os.path.exists(self.join(patchfn)):
824 if os.path.isdir(self.join(patchfn)):
824 if os.path.isdir(self.join(patchfn)):
825 raise util.Abort(_('"%s" already exists as a directory')
825 raise util.Abort(_('"%s" already exists as a directory')
826 % patchfn)
826 % patchfn)
827 else:
827 else:
828 raise util.Abort(_('patch "%s" already exists') % patchfn)
828 raise util.Abort(_('patch "%s" already exists') % patchfn)
829 if opts.get('include') or opts.get('exclude') or pats:
829 if opts.get('include') or opts.get('exclude') or pats:
830 match = cmdutil.match(repo, pats, opts)
830 match = cmdutil.match(repo, pats, opts)
831 # detect missing files in pats
831 # detect missing files in pats
832 def badfn(f, msg):
832 def badfn(f, msg):
833 raise util.Abort('%s: %s' % (f, msg))
833 raise util.Abort('%s: %s' % (f, msg))
834 match.bad = badfn
834 match.bad = badfn
835 m, a, r, d = repo.status(match=match)[:4]
835 m, a, r, d = repo.status(match=match)[:4]
836 else:
836 else:
837 m, a, r, d = self.check_localchanges(repo, force=True)
837 m, a, r, d = self.check_localchanges(repo, force=True)
838 match = cmdutil.matchfiles(repo, m + a + r)
838 match = cmdutil.matchfiles(repo, m + a + r)
839 if len(repo[None].parents()) > 1:
839 if len(repo[None].parents()) > 1:
840 raise util.Abort(_('cannot manage merge changesets'))
840 raise util.Abort(_('cannot manage merge changesets'))
841 commitfiles = m + a + r
841 commitfiles = m + a + r
842 self.check_toppatch(repo)
842 self.check_toppatch(repo)
843 insert = self.full_series_end()
843 insert = self.full_series_end()
844 wlock = repo.wlock()
844 wlock = repo.wlock()
845 try:
845 try:
846 try:
846 try:
847 # if patch file write fails, abort early
847 # if patch file write fails, abort early
848 p = self.opener(patchfn, "w")
848 p = self.opener(patchfn, "w")
849 except IOError, e:
849 except IOError, e:
850 raise util.Abort(_('cannot write patch "%s": %s')
850 raise util.Abort(_('cannot write patch "%s": %s')
851 % (patchfn, e.strerror))
851 % (patchfn, e.strerror))
852 try:
852 try:
853 if self.plainmode:
853 if self.plainmode:
854 if user:
854 if user:
855 p.write("From: " + user + "\n")
855 p.write("From: " + user + "\n")
856 if not date:
856 if not date:
857 p.write("\n")
857 p.write("\n")
858 if date:
858 if date:
859 p.write("Date: %d %d\n\n" % date)
859 p.write("Date: %d %d\n\n" % date)
860 else:
860 else:
861 p.write("# HG changeset patch\n")
861 p.write("# HG changeset patch\n")
862 p.write("# Parent "
862 p.write("# Parent "
863 + hex(repo[None].parents()[0].node()) + "\n")
863 + hex(repo[None].parents()[0].node()) + "\n")
864 if user:
864 if user:
865 p.write("# User " + user + "\n")
865 p.write("# User " + user + "\n")
866 if date:
866 if date:
867 p.write("# Date %s %s\n\n" % date)
867 p.write("# Date %s %s\n\n" % date)
868 if hasattr(msg, '__call__'):
868 if hasattr(msg, '__call__'):
869 msg = msg()
869 msg = msg()
870 commitmsg = msg and msg or ("[mq]: %s" % patchfn)
870 commitmsg = msg and msg or ("[mq]: %s" % patchfn)
871 n = repo.commit(commitmsg, user, date, match=match, force=True)
871 n = repo.commit(commitmsg, user, date, match=match, force=True)
872 if n is None:
872 if n is None:
873 raise util.Abort(_("repo commit failed"))
873 raise util.Abort(_("repo commit failed"))
874 try:
874 try:
875 self.full_series[insert:insert] = [patchfn]
875 self.full_series[insert:insert] = [patchfn]
876 self.applied.append(statusentry(n, patchfn))
876 self.applied.append(statusentry(n, patchfn))
877 self.parse_series()
877 self.parse_series()
878 self.series_dirty = 1
878 self.series_dirty = 1
879 self.applied_dirty = 1
879 self.applied_dirty = 1
880 if msg:
880 if msg:
881 msg = msg + "\n\n"
881 msg = msg + "\n\n"
882 p.write(msg)
882 p.write(msg)
883 if commitfiles:
883 if commitfiles:
884 parent = self.qparents(repo, n)
884 parent = self.qparents(repo, n)
885 chunks = patch.diff(repo, node1=parent, node2=n,
885 chunks = patch.diff(repo, node1=parent, node2=n,
886 match=match, opts=diffopts)
886 match=match, opts=diffopts)
887 for chunk in chunks:
887 for chunk in chunks:
888 p.write(chunk)
888 p.write(chunk)
889 p.close()
889 p.close()
890 wlock.release()
890 wlock.release()
891 wlock = None
891 wlock = None
892 r = self.qrepo()
892 r = self.qrepo()
893 if r:
893 if r:
894 r[None].add([patchfn])
894 r[None].add([patchfn])
895 except:
895 except:
896 repo.rollback()
896 repo.rollback()
897 raise
897 raise
898 except Exception:
898 except Exception:
899 patchpath = self.join(patchfn)
899 patchpath = self.join(patchfn)
900 try:
900 try:
901 os.unlink(patchpath)
901 os.unlink(patchpath)
902 except:
902 except:
903 self.ui.warn(_('error unlinking %s\n') % patchpath)
903 self.ui.warn(_('error unlinking %s\n') % patchpath)
904 raise
904 raise
905 self.removeundo(repo)
905 self.removeundo(repo)
906 finally:
906 finally:
907 release(wlock)
907 release(wlock)
908
908
909 def strip(self, repo, revs, update=True, backup="all", force=None):
909 def strip(self, repo, revs, update=True, backup="all", force=None):
910 wlock = lock = None
910 wlock = lock = None
911 try:
911 try:
912 wlock = repo.wlock()
912 wlock = repo.wlock()
913 lock = repo.lock()
913 lock = repo.lock()
914
914
915 if update:
915 if update:
916 self.check_localchanges(repo, force=force, refresh=False)
916 self.check_localchanges(repo, force=force, refresh=False)
917 urev = self.qparents(repo, revs[0])
917 urev = self.qparents(repo, revs[0])
918 hg.clean(repo, urev)
918 hg.clean(repo, urev)
919 repo.dirstate.write()
919 repo.dirstate.write()
920
920
921 self.removeundo(repo)
921 self.removeundo(repo)
922 for rev in revs:
922 for rev in revs:
923 repair.strip(self.ui, repo, rev, backup)
923 repair.strip(self.ui, repo, rev, backup)
924 # strip may have unbundled a set of backed up revisions after
924 # strip may have unbundled a set of backed up revisions after
925 # the actual strip
925 # the actual strip
926 self.removeundo(repo)
926 self.removeundo(repo)
927 finally:
927 finally:
928 release(lock, wlock)
928 release(lock, wlock)
929
929
930 def isapplied(self, patch):
930 def isapplied(self, patch):
931 """returns (index, rev, patch)"""
931 """returns (index, rev, patch)"""
932 for i, a in enumerate(self.applied):
932 for i, a in enumerate(self.applied):
933 if a.name == patch:
933 if a.name == patch:
934 return (i, a.node, a.name)
934 return (i, a.node, a.name)
935 return None
935 return None
936
936
937 # if the exact patch name does not exist, we try a few
937 # if the exact patch name does not exist, we try a few
938 # variations. If strict is passed, we try only #1
938 # variations. If strict is passed, we try only #1
939 #
939 #
940 # 1) a number to indicate an offset in the series file
940 # 1) a number to indicate an offset in the series file
941 # 2) a unique substring of the patch name was given
941 # 2) a unique substring of the patch name was given
942 # 3) patchname[-+]num to indicate an offset in the series file
942 # 3) patchname[-+]num to indicate an offset in the series file
943 def lookup(self, patch, strict=False):
943 def lookup(self, patch, strict=False):
944 patch = patch and str(patch)
944 patch = patch and str(patch)
945
945
946 def partial_name(s):
946 def partial_name(s):
947 if s in self.series:
947 if s in self.series:
948 return s
948 return s
949 matches = [x for x in self.series if s in x]
949 matches = [x for x in self.series if s in x]
950 if len(matches) > 1:
950 if len(matches) > 1:
951 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
951 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
952 for m in matches:
952 for m in matches:
953 self.ui.warn(' %s\n' % m)
953 self.ui.warn(' %s\n' % m)
954 return None
954 return None
955 if matches:
955 if matches:
956 return matches[0]
956 return matches[0]
957 if self.series and self.applied:
957 if self.series and self.applied:
958 if s == 'qtip':
958 if s == 'qtip':
959 return self.series[self.series_end(True)-1]
959 return self.series[self.series_end(True)-1]
960 if s == 'qbase':
960 if s == 'qbase':
961 return self.series[0]
961 return self.series[0]
962 return None
962 return None
963
963
964 if patch is None:
964 if patch is None:
965 return None
965 return None
966 if patch in self.series:
966 if patch in self.series:
967 return patch
967 return patch
968
968
969 if not os.path.isfile(self.join(patch)):
969 if not os.path.isfile(self.join(patch)):
970 try:
970 try:
971 sno = int(patch)
971 sno = int(patch)
972 except (ValueError, OverflowError):
972 except (ValueError, OverflowError):
973 pass
973 pass
974 else:
974 else:
975 if -len(self.series) <= sno < len(self.series):
975 if -len(self.series) <= sno < len(self.series):
976 return self.series[sno]
976 return self.series[sno]
977
977
978 if not strict:
978 if not strict:
979 res = partial_name(patch)
979 res = partial_name(patch)
980 if res:
980 if res:
981 return res
981 return res
982 minus = patch.rfind('-')
982 minus = patch.rfind('-')
983 if minus >= 0:
983 if minus >= 0:
984 res = partial_name(patch[:minus])
984 res = partial_name(patch[:minus])
985 if res:
985 if res:
986 i = self.series.index(res)
986 i = self.series.index(res)
987 try:
987 try:
988 off = int(patch[minus + 1:] or 1)
988 off = int(patch[minus + 1:] or 1)
989 except (ValueError, OverflowError):
989 except (ValueError, OverflowError):
990 pass
990 pass
991 else:
991 else:
992 if i - off >= 0:
992 if i - off >= 0:
993 return self.series[i - off]
993 return self.series[i - off]
994 plus = patch.rfind('+')
994 plus = patch.rfind('+')
995 if plus >= 0:
995 if plus >= 0:
996 res = partial_name(patch[:plus])
996 res = partial_name(patch[:plus])
997 if res:
997 if res:
998 i = self.series.index(res)
998 i = self.series.index(res)
999 try:
999 try:
1000 off = int(patch[plus + 1:] or 1)
1000 off = int(patch[plus + 1:] or 1)
1001 except (ValueError, OverflowError):
1001 except (ValueError, OverflowError):
1002 pass
1002 pass
1003 else:
1003 else:
1004 if i + off < len(self.series):
1004 if i + off < len(self.series):
1005 return self.series[i + off]
1005 return self.series[i + off]
1006 raise util.Abort(_("patch %s not in series") % patch)
1006 raise util.Abort(_("patch %s not in series") % patch)
1007
1007
1008 def push(self, repo, patch=None, force=False, list=False,
1008 def push(self, repo, patch=None, force=False, list=False,
1009 mergeq=None, all=False, move=False, exact=False):
1009 mergeq=None, all=False, move=False, exact=False):
1010 diffopts = self.diffopts()
1010 diffopts = self.diffopts()
1011 wlock = repo.wlock()
1011 wlock = repo.wlock()
1012 try:
1012 try:
1013 heads = []
1013 heads = []
1014 for b, ls in repo.branchmap().iteritems():
1014 for b, ls in repo.branchmap().iteritems():
1015 heads += ls
1015 heads += ls
1016 if not heads:
1016 if not heads:
1017 heads = [nullid]
1017 heads = [nullid]
1018 if repo.dirstate.parents()[0] not in heads and not exact:
1018 if repo.dirstate.parents()[0] not in heads and not exact:
1019 self.ui.status(_("(working directory not at a head)\n"))
1019 self.ui.status(_("(working directory not at a head)\n"))
1020
1020
1021 if not self.series:
1021 if not self.series:
1022 self.ui.warn(_('no patches in series\n'))
1022 self.ui.warn(_('no patches in series\n'))
1023 return 0
1023 return 0
1024
1024
1025 patch = self.lookup(patch)
1025 patch = self.lookup(patch)
1026 # Suppose our series file is: A B C and the current 'top'
1026 # Suppose our series file is: A B C and the current 'top'
1027 # patch is B. qpush C should be performed (moving forward)
1027 # patch is B. qpush C should be performed (moving forward)
1028 # qpush B is a NOP (no change) qpush A is an error (can't
1028 # qpush B is a NOP (no change) qpush A is an error (can't
1029 # go backwards with qpush)
1029 # go backwards with qpush)
1030 if patch:
1030 if patch:
1031 info = self.isapplied(patch)
1031 info = self.isapplied(patch)
1032 if info:
1032 if info:
1033 if info[0] < len(self.applied) - 1:
1033 if info[0] < len(self.applied) - 1:
1034 raise util.Abort(
1034 raise util.Abort(
1035 _("cannot push to a previous patch: %s") % patch)
1035 _("cannot push to a previous patch: %s") % patch)
1036 self.ui.warn(
1036 self.ui.warn(
1037 _('qpush: %s is already at the top\n') % patch)
1037 _('qpush: %s is already at the top\n') % patch)
1038 return 0
1038 return 0
1039 pushable, reason = self.pushable(patch)
1039 pushable, reason = self.pushable(patch)
1040 if not pushable:
1040 if not pushable:
1041 if reason:
1041 if reason:
1042 reason = _('guarded by %r') % reason
1042 reason = _('guarded by %r') % reason
1043 else:
1043 else:
1044 reason = _('no matching guards')
1044 reason = _('no matching guards')
1045 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
1045 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
1046 return 1
1046 return 1
1047 elif all:
1047 elif all:
1048 patch = self.series[-1]
1048 patch = self.series[-1]
1049 if self.isapplied(patch):
1049 if self.isapplied(patch):
1050 self.ui.warn(_('all patches are currently applied\n'))
1050 self.ui.warn(_('all patches are currently applied\n'))
1051 return 0
1051 return 0
1052
1052
1053 # Following the above example, starting at 'top' of B:
1053 # Following the above example, starting at 'top' of B:
1054 # qpush should be performed (pushes C), but a subsequent
1054 # qpush should be performed (pushes C), but a subsequent
1055 # qpush without an argument is an error (nothing to
1055 # qpush without an argument is an error (nothing to
1056 # apply). This allows a loop of "...while hg qpush..." to
1056 # apply). This allows a loop of "...while hg qpush..." to
1057 # work as it detects an error when done
1057 # work as it detects an error when done
1058 start = self.series_end()
1058 start = self.series_end()
1059 if start == len(self.series):
1059 if start == len(self.series):
1060 self.ui.warn(_('patch series already fully applied\n'))
1060 self.ui.warn(_('patch series already fully applied\n'))
1061 return 1
1061 return 1
1062 if not force:
1062 if not force:
1063 self.check_localchanges(repo)
1063 self.check_localchanges(repo)
1064
1064
1065 if exact:
1065 if exact:
1066 if move:
1066 if move:
1067 raise util.Abort(_("cannot use --exact and --move together"))
1067 raise util.Abort(_("cannot use --exact and --move together"))
1068 if self.applied:
1068 if self.applied:
1069 raise util.Abort(_("cannot push --exact with applied patches"))
1069 raise util.Abort(_("cannot push --exact with applied patches"))
1070 root = self.series[start]
1070 root = self.series[start]
1071 target = patchheader(self.join(root), self.plainmode).parent
1071 target = patchheader(self.join(root), self.plainmode).parent
1072 if not target:
1072 if not target:
1073 raise util.Abort(_("%s does not have a parent recorded" % root))
1073 raise util.Abort(_("%s does not have a parent recorded" % root))
1074 if not repo[target] == repo['.']:
1074 if not repo[target] == repo['.']:
1075 hg.update(repo, target)
1075 hg.update(repo, target)
1076
1076
1077 if move:
1077 if move:
1078 if not patch:
1078 if not patch:
1079 raise util.Abort(_("please specify the patch to move"))
1079 raise util.Abort(_("please specify the patch to move"))
1080 for i, rpn in enumerate(self.full_series[start:]):
1080 for i, rpn in enumerate(self.full_series[start:]):
1081 # strip markers for patch guards
1081 # strip markers for patch guards
1082 if self.guard_re.split(rpn, 1)[0] == patch:
1082 if self.guard_re.split(rpn, 1)[0] == patch:
1083 break
1083 break
1084 index = start + i
1084 index = start + i
1085 assert index < len(self.full_series)
1085 assert index < len(self.full_series)
1086 fullpatch = self.full_series[index]
1086 fullpatch = self.full_series[index]
1087 del self.full_series[index]
1087 del self.full_series[index]
1088 self.full_series.insert(start, fullpatch)
1088 self.full_series.insert(start, fullpatch)
1089 self.parse_series()
1089 self.parse_series()
1090 self.series_dirty = 1
1090 self.series_dirty = 1
1091
1091
1092 self.applied_dirty = 1
1092 self.applied_dirty = 1
1093 if start > 0:
1093 if start > 0:
1094 self.check_toppatch(repo)
1094 self.check_toppatch(repo)
1095 if not patch:
1095 if not patch:
1096 patch = self.series[start]
1096 patch = self.series[start]
1097 end = start + 1
1097 end = start + 1
1098 else:
1098 else:
1099 end = self.series.index(patch, start) + 1
1099 end = self.series.index(patch, start) + 1
1100
1100
1101 s = self.series[start:end]
1101 s = self.series[start:end]
1102 all_files = set()
1102 all_files = set()
1103 try:
1103 try:
1104 if mergeq:
1104 if mergeq:
1105 ret = self.mergepatch(repo, mergeq, s, diffopts)
1105 ret = self.mergepatch(repo, mergeq, s, diffopts)
1106 else:
1106 else:
1107 ret = self.apply(repo, s, list, all_files=all_files)
1107 ret = self.apply(repo, s, list, all_files=all_files)
1108 except:
1108 except:
1109 self.ui.warn(_('cleaning up working directory...'))
1109 self.ui.warn(_('cleaning up working directory...'))
1110 node = repo.dirstate.parents()[0]
1110 node = repo.dirstate.parents()[0]
1111 hg.revert(repo, node, None)
1111 hg.revert(repo, node, None)
1112 # only remove unknown files that we know we touched or
1112 # only remove unknown files that we know we touched or
1113 # created while patching
1113 # created while patching
1114 for f in all_files:
1114 for f in all_files:
1115 if f not in repo.dirstate:
1115 if f not in repo.dirstate:
1116 try:
1116 try:
1117 util.unlink(repo.wjoin(f))
1117 util.unlink(repo.wjoin(f))
1118 except OSError, inst:
1118 except OSError, inst:
1119 if inst.errno != errno.ENOENT:
1119 if inst.errno != errno.ENOENT:
1120 raise
1120 raise
1121 self.ui.warn(_('done\n'))
1121 self.ui.warn(_('done\n'))
1122 raise
1122 raise
1123
1123
1124 if not self.applied:
1124 if not self.applied:
1125 return ret[0]
1125 return ret[0]
1126 top = self.applied[-1].name
1126 top = self.applied[-1].name
1127 if ret[0] and ret[0] > 1:
1127 if ret[0] and ret[0] > 1:
1128 msg = _("errors during apply, please fix and refresh %s\n")
1128 msg = _("errors during apply, please fix and refresh %s\n")
1129 self.ui.write(msg % top)
1129 self.ui.write(msg % top)
1130 else:
1130 else:
1131 self.ui.write(_("now at: %s\n") % top)
1131 self.ui.write(_("now at: %s\n") % top)
1132 return ret[0]
1132 return ret[0]
1133
1133
1134 finally:
1134 finally:
1135 wlock.release()
1135 wlock.release()
1136
1136
1137 def pop(self, repo, patch=None, force=False, update=True, all=False):
1137 def pop(self, repo, patch=None, force=False, update=True, all=False):
1138 wlock = repo.wlock()
1138 wlock = repo.wlock()
1139 try:
1139 try:
1140 if patch:
1140 if patch:
1141 # index, rev, patch
1141 # index, rev, patch
1142 info = self.isapplied(patch)
1142 info = self.isapplied(patch)
1143 if not info:
1143 if not info:
1144 patch = self.lookup(patch)
1144 patch = self.lookup(patch)
1145 info = self.isapplied(patch)
1145 info = self.isapplied(patch)
1146 if not info:
1146 if not info:
1147 raise util.Abort(_("patch %s is not applied") % patch)
1147 raise util.Abort(_("patch %s is not applied") % patch)
1148
1148
1149 if not self.applied:
1149 if not self.applied:
1150 # Allow qpop -a to work repeatedly,
1150 # Allow qpop -a to work repeatedly,
1151 # but not qpop without an argument
1151 # but not qpop without an argument
1152 self.ui.warn(_("no patches applied\n"))
1152 self.ui.warn(_("no patches applied\n"))
1153 return not all
1153 return not all
1154
1154
1155 if all:
1155 if all:
1156 start = 0
1156 start = 0
1157 elif patch:
1157 elif patch:
1158 start = info[0] + 1
1158 start = info[0] + 1
1159 else:
1159 else:
1160 start = len(self.applied) - 1
1160 start = len(self.applied) - 1
1161
1161
1162 if start >= len(self.applied):
1162 if start >= len(self.applied):
1163 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1163 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1164 return
1164 return
1165
1165
1166 if not update:
1166 if not update:
1167 parents = repo.dirstate.parents()
1167 parents = repo.dirstate.parents()
1168 rr = [x.node for x in self.applied]
1168 rr = [x.node for x in self.applied]
1169 for p in parents:
1169 for p in parents:
1170 if p in rr:
1170 if p in rr:
1171 self.ui.warn(_("qpop: forcing dirstate update\n"))
1171 self.ui.warn(_("qpop: forcing dirstate update\n"))
1172 update = True
1172 update = True
1173 else:
1173 else:
1174 parents = [p.node() for p in repo[None].parents()]
1174 parents = [p.node() for p in repo[None].parents()]
1175 needupdate = False
1175 needupdate = False
1176 for entry in self.applied[start:]:
1176 for entry in self.applied[start:]:
1177 if entry.node in parents:
1177 if entry.node in parents:
1178 needupdate = True
1178 needupdate = True
1179 break
1179 break
1180 update = needupdate
1180 update = needupdate
1181
1181
1182 if not force and update:
1182 if not force and update:
1183 self.check_localchanges(repo)
1183 self.check_localchanges(repo)
1184
1184
1185 self.applied_dirty = 1
1185 self.applied_dirty = 1
1186 end = len(self.applied)
1186 end = len(self.applied)
1187 rev = self.applied[start].node
1187 rev = self.applied[start].node
1188 if update:
1188 if update:
1189 top = self.check_toppatch(repo)[0]
1189 top = self.check_toppatch(repo)[0]
1190
1190
1191 try:
1191 try:
1192 heads = repo.changelog.heads(rev)
1192 heads = repo.changelog.heads(rev)
1193 except error.LookupError:
1193 except error.LookupError:
1194 node = short(rev)
1194 node = short(rev)
1195 raise util.Abort(_('trying to pop unknown node %s') % node)
1195 raise util.Abort(_('trying to pop unknown node %s') % node)
1196
1196
1197 if heads != [self.applied[-1].node]:
1197 if heads != [self.applied[-1].node]:
1198 raise util.Abort(_("popping would remove a revision not "
1198 raise util.Abort(_("popping would remove a revision not "
1199 "managed by this patch queue"))
1199 "managed by this patch queue"))
1200
1200
1201 # we know there are no local changes, so we can make a simplified
1201 # we know there are no local changes, so we can make a simplified
1202 # form of hg.update.
1202 # form of hg.update.
1203 if update:
1203 if update:
1204 qp = self.qparents(repo, rev)
1204 qp = self.qparents(repo, rev)
1205 ctx = repo[qp]
1205 ctx = repo[qp]
1206 m, a, r, d = repo.status(qp, top)[:4]
1206 m, a, r, d = repo.status(qp, top)[:4]
1207 if d:
1207 if d:
1208 raise util.Abort(_("deletions found between repo revs"))
1208 raise util.Abort(_("deletions found between repo revs"))
1209 for f in a:
1209 for f in a:
1210 try:
1210 try:
1211 util.unlink(repo.wjoin(f))
1211 util.unlink(repo.wjoin(f))
1212 except OSError, e:
1212 except OSError, e:
1213 if e.errno != errno.ENOENT:
1213 if e.errno != errno.ENOENT:
1214 raise
1214 raise
1215 repo.dirstate.forget(f)
1215 repo.dirstate.forget(f)
1216 for f in m + r:
1216 for f in m + r:
1217 fctx = ctx[f]
1217 fctx = ctx[f]
1218 repo.wwrite(f, fctx.data(), fctx.flags())
1218 repo.wwrite(f, fctx.data(), fctx.flags())
1219 repo.dirstate.normal(f)
1219 repo.dirstate.normal(f)
1220 repo.dirstate.setparents(qp, nullid)
1220 repo.dirstate.setparents(qp, nullid)
1221 for patch in reversed(self.applied[start:end]):
1221 for patch in reversed(self.applied[start:end]):
1222 self.ui.status(_("popping %s\n") % patch.name)
1222 self.ui.status(_("popping %s\n") % patch.name)
1223 del self.applied[start:end]
1223 del self.applied[start:end]
1224 self.strip(repo, [rev], update=False, backup='strip')
1224 self.strip(repo, [rev], update=False, backup='strip')
1225 if self.applied:
1225 if self.applied:
1226 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1226 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1227 else:
1227 else:
1228 self.ui.write(_("patch queue now empty\n"))
1228 self.ui.write(_("patch queue now empty\n"))
1229 finally:
1229 finally:
1230 wlock.release()
1230 wlock.release()
1231
1231
1232 def diff(self, repo, pats, opts):
1232 def diff(self, repo, pats, opts):
1233 top, patch = self.check_toppatch(repo)
1233 top, patch = self.check_toppatch(repo)
1234 if not top:
1234 if not top:
1235 self.ui.write(_("no patches applied\n"))
1235 self.ui.write(_("no patches applied\n"))
1236 return
1236 return
1237 qp = self.qparents(repo, top)
1237 qp = self.qparents(repo, top)
1238 if opts.get('reverse'):
1238 if opts.get('reverse'):
1239 node1, node2 = None, qp
1239 node1, node2 = None, qp
1240 else:
1240 else:
1241 node1, node2 = qp, None
1241 node1, node2 = qp, None
1242 diffopts = self.diffopts(opts, patch)
1242 diffopts = self.diffopts(opts, patch)
1243 self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
1243 self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
1244
1244
1245 def refresh(self, repo, pats=None, **opts):
1245 def refresh(self, repo, pats=None, **opts):
1246 if not self.applied:
1246 if not self.applied:
1247 self.ui.write(_("no patches applied\n"))
1247 self.ui.write(_("no patches applied\n"))
1248 return 1
1248 return 1
1249 msg = opts.get('msg', '').rstrip()
1249 msg = opts.get('msg', '').rstrip()
1250 newuser = opts.get('user')
1250 newuser = opts.get('user')
1251 newdate = opts.get('date')
1251 newdate = opts.get('date')
1252 if newdate:
1252 if newdate:
1253 newdate = '%d %d' % util.parsedate(newdate)
1253 newdate = '%d %d' % util.parsedate(newdate)
1254 wlock = repo.wlock()
1254 wlock = repo.wlock()
1255
1255
1256 try:
1256 try:
1257 self.check_toppatch(repo)
1257 self.check_toppatch(repo)
1258 (top, patchfn) = (self.applied[-1].node, self.applied[-1].name)
1258 (top, patchfn) = (self.applied[-1].node, self.applied[-1].name)
1259 if repo.changelog.heads(top) != [top]:
1259 if repo.changelog.heads(top) != [top]:
1260 raise util.Abort(_("cannot refresh a revision with children"))
1260 raise util.Abort(_("cannot refresh a revision with children"))
1261
1261
1262 cparents = repo.changelog.parents(top)
1262 cparents = repo.changelog.parents(top)
1263 patchparent = self.qparents(repo, top)
1263 patchparent = self.qparents(repo, top)
1264 ph = patchheader(self.join(patchfn), self.plainmode)
1264 ph = patchheader(self.join(patchfn), self.plainmode)
1265 diffopts = self.diffopts({'git': opts.get('git')}, patchfn)
1265 diffopts = self.diffopts({'git': opts.get('git')}, patchfn)
1266 if msg:
1266 if msg:
1267 ph.setmessage(msg)
1267 ph.setmessage(msg)
1268 if newuser:
1268 if newuser:
1269 ph.setuser(newuser)
1269 ph.setuser(newuser)
1270 if newdate:
1270 if newdate:
1271 ph.setdate(newdate)
1271 ph.setdate(newdate)
1272 ph.setparent(hex(patchparent))
1272 ph.setparent(hex(patchparent))
1273
1273
1274 # only commit new patch when write is complete
1274 # only commit new patch when write is complete
1275 patchf = self.opener(patchfn, 'w', atomictemp=True)
1275 patchf = self.opener(patchfn, 'w', atomictemp=True)
1276
1276
1277 comments = str(ph)
1277 comments = str(ph)
1278 if comments:
1278 if comments:
1279 patchf.write(comments)
1279 patchf.write(comments)
1280
1280
1281 # update the dirstate in place, strip off the qtip commit
1281 # update the dirstate in place, strip off the qtip commit
1282 # and then commit.
1282 # and then commit.
1283 #
1283 #
1284 # this should really read:
1284 # this should really read:
1285 # mm, dd, aa = repo.status(top, patchparent)[:3]
1285 # mm, dd, aa = repo.status(top, patchparent)[:3]
1286 # but we do it backwards to take advantage of manifest/chlog
1286 # but we do it backwards to take advantage of manifest/chlog
1287 # caching against the next repo.status call
1287 # caching against the next repo.status call
1288 mm, aa, dd = repo.status(patchparent, top)[:3]
1288 mm, aa, dd = repo.status(patchparent, top)[:3]
1289 changes = repo.changelog.read(top)
1289 changes = repo.changelog.read(top)
1290 man = repo.manifest.read(changes[0])
1290 man = repo.manifest.read(changes[0])
1291 aaa = aa[:]
1291 aaa = aa[:]
1292 matchfn = cmdutil.match(repo, pats, opts)
1292 matchfn = cmdutil.match(repo, pats, opts)
1293 # in short mode, we only diff the files included in the
1293 # in short mode, we only diff the files included in the
1294 # patch already plus specified files
1294 # patch already plus specified files
1295 if opts.get('short'):
1295 if opts.get('short'):
1296 # if amending a patch, we start with existing
1296 # if amending a patch, we start with existing
1297 # files plus specified files - unfiltered
1297 # files plus specified files - unfiltered
1298 match = cmdutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1298 match = cmdutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1299 # filter with inc/exl options
1299 # filter with inc/exl options
1300 matchfn = cmdutil.match(repo, opts=opts)
1300 matchfn = cmdutil.match(repo, opts=opts)
1301 else:
1301 else:
1302 match = cmdutil.matchall(repo)
1302 match = cmdutil.matchall(repo)
1303 m, a, r, d = repo.status(match=match)[:4]
1303 m, a, r, d = repo.status(match=match)[:4]
1304 mm = set(mm)
1304 mm = set(mm)
1305 aa = set(aa)
1305 aa = set(aa)
1306 dd = set(dd)
1306 dd = set(dd)
1307
1307
1308 # we might end up with files that were added between
1308 # we might end up with files that were added between
1309 # qtip and the dirstate parent, but then changed in the
1309 # qtip and the dirstate parent, but then changed in the
1310 # local dirstate. in this case, we want them to only
1310 # local dirstate. in this case, we want them to only
1311 # show up in the added section
1311 # show up in the added section
1312 for x in m:
1312 for x in m:
1313 if x == '.hgsub' or x == '.hgsubstate':
1313 if x == '.hgsub' or x == '.hgsubstate':
1314 self.ui.warn(_('warning: not refreshing %s\n') % x)
1314 self.ui.warn(_('warning: not refreshing %s\n') % x)
1315 continue
1315 continue
1316 if x not in aa:
1316 if x not in aa:
1317 mm.add(x)
1317 mm.add(x)
1318 # we might end up with files added by the local dirstate that
1318 # we might end up with files added by the local dirstate that
1319 # were deleted by the patch. In this case, they should only
1319 # were deleted by the patch. In this case, they should only
1320 # show up in the changed section.
1320 # show up in the changed section.
1321 for x in a:
1321 for x in a:
1322 if x == '.hgsub' or x == '.hgsubstate':
1322 if x == '.hgsub' or x == '.hgsubstate':
1323 self.ui.warn(_('warning: not adding %s\n') % x)
1323 self.ui.warn(_('warning: not adding %s\n') % x)
1324 continue
1324 continue
1325 if x in dd:
1325 if x in dd:
1326 dd.remove(x)
1326 dd.remove(x)
1327 mm.add(x)
1327 mm.add(x)
1328 else:
1328 else:
1329 aa.add(x)
1329 aa.add(x)
1330 # make sure any files deleted in the local dirstate
1330 # make sure any files deleted in the local dirstate
1331 # are not in the add or change column of the patch
1331 # are not in the add or change column of the patch
1332 forget = []
1332 forget = []
1333 for x in d + r:
1333 for x in d + r:
1334 if x == '.hgsub' or x == '.hgsubstate':
1334 if x == '.hgsub' or x == '.hgsubstate':
1335 self.ui.warn(_('warning: not removing %s\n') % x)
1335 self.ui.warn(_('warning: not removing %s\n') % x)
1336 continue
1336 continue
1337 if x in aa:
1337 if x in aa:
1338 aa.remove(x)
1338 aa.remove(x)
1339 forget.append(x)
1339 forget.append(x)
1340 continue
1340 continue
1341 else:
1341 else:
1342 mm.discard(x)
1342 mm.discard(x)
1343 dd.add(x)
1343 dd.add(x)
1344
1344
1345 m = list(mm)
1345 m = list(mm)
1346 r = list(dd)
1346 r = list(dd)
1347 a = list(aa)
1347 a = list(aa)
1348 c = [filter(matchfn, l) for l in (m, a, r)]
1348 c = [filter(matchfn, l) for l in (m, a, r)]
1349 match = cmdutil.matchfiles(repo, set(c[0] + c[1] + c[2]))
1349 match = cmdutil.matchfiles(repo, set(c[0] + c[1] + c[2]))
1350 chunks = patch.diff(repo, patchparent, match=match,
1350 chunks = patch.diff(repo, patchparent, match=match,
1351 changes=c, opts=diffopts)
1351 changes=c, opts=diffopts)
1352 for chunk in chunks:
1352 for chunk in chunks:
1353 patchf.write(chunk)
1353 patchf.write(chunk)
1354
1354
1355 try:
1355 try:
1356 if diffopts.git or diffopts.upgrade:
1356 if diffopts.git or diffopts.upgrade:
1357 copies = {}
1357 copies = {}
1358 for dst in a:
1358 for dst in a:
1359 src = repo.dirstate.copied(dst)
1359 src = repo.dirstate.copied(dst)
1360 # during qfold, the source file for copies may
1360 # during qfold, the source file for copies may
1361 # be removed. Treat this as a simple add.
1361 # be removed. Treat this as a simple add.
1362 if src is not None and src in repo.dirstate:
1362 if src is not None and src in repo.dirstate:
1363 copies.setdefault(src, []).append(dst)
1363 copies.setdefault(src, []).append(dst)
1364 repo.dirstate.add(dst)
1364 repo.dirstate.add(dst)
1365 # remember the copies between patchparent and qtip
1365 # remember the copies between patchparent and qtip
1366 for dst in aaa:
1366 for dst in aaa:
1367 f = repo.file(dst)
1367 f = repo.file(dst)
1368 src = f.renamed(man[dst])
1368 src = f.renamed(man[dst])
1369 if src:
1369 if src:
1370 copies.setdefault(src[0], []).extend(
1370 copies.setdefault(src[0], []).extend(
1371 copies.get(dst, []))
1371 copies.get(dst, []))
1372 if dst in a:
1372 if dst in a:
1373 copies[src[0]].append(dst)
1373 copies[src[0]].append(dst)
1374 # we can't copy a file created by the patch itself
1374 # we can't copy a file created by the patch itself
1375 if dst in copies:
1375 if dst in copies:
1376 del copies[dst]
1376 del copies[dst]
1377 for src, dsts in copies.iteritems():
1377 for src, dsts in copies.iteritems():
1378 for dst in dsts:
1378 for dst in dsts:
1379 repo.dirstate.copy(src, dst)
1379 repo.dirstate.copy(src, dst)
1380 else:
1380 else:
1381 for dst in a:
1381 for dst in a:
1382 repo.dirstate.add(dst)
1382 repo.dirstate.add(dst)
1383 # Drop useless copy information
1383 # Drop useless copy information
1384 for f in list(repo.dirstate.copies()):
1384 for f in list(repo.dirstate.copies()):
1385 repo.dirstate.copy(None, f)
1385 repo.dirstate.copy(None, f)
1386 for f in r:
1386 for f in r:
1387 repo.dirstate.remove(f)
1387 repo.dirstate.remove(f)
1388 # if the patch excludes a modified file, mark that
1388 # if the patch excludes a modified file, mark that
1389 # file with mtime=0 so status can see it.
1389 # file with mtime=0 so status can see it.
1390 mm = []
1390 mm = []
1391 for i in xrange(len(m)-1, -1, -1):
1391 for i in xrange(len(m)-1, -1, -1):
1392 if not matchfn(m[i]):
1392 if not matchfn(m[i]):
1393 mm.append(m[i])
1393 mm.append(m[i])
1394 del m[i]
1394 del m[i]
1395 for f in m:
1395 for f in m:
1396 repo.dirstate.normal(f)
1396 repo.dirstate.normal(f)
1397 for f in mm:
1397 for f in mm:
1398 repo.dirstate.normallookup(f)
1398 repo.dirstate.normallookup(f)
1399 for f in forget:
1399 for f in forget:
1400 repo.dirstate.forget(f)
1400 repo.dirstate.forget(f)
1401
1401
1402 if not msg:
1402 if not msg:
1403 if not ph.message:
1403 if not ph.message:
1404 message = "[mq]: %s\n" % patchfn
1404 message = "[mq]: %s\n" % patchfn
1405 else:
1405 else:
1406 message = "\n".join(ph.message)
1406 message = "\n".join(ph.message)
1407 else:
1407 else:
1408 message = msg
1408 message = msg
1409
1409
1410 user = ph.user or changes[1]
1410 user = ph.user or changes[1]
1411
1411
1412 # assumes strip can roll itself back if interrupted
1412 # assumes strip can roll itself back if interrupted
1413 repo.dirstate.setparents(*cparents)
1413 repo.dirstate.setparents(*cparents)
1414 self.applied.pop()
1414 self.applied.pop()
1415 self.applied_dirty = 1
1415 self.applied_dirty = 1
1416 self.strip(repo, [top], update=False,
1416 self.strip(repo, [top], update=False,
1417 backup='strip')
1417 backup='strip')
1418 except:
1418 except:
1419 repo.dirstate.invalidate()
1419 repo.dirstate.invalidate()
1420 raise
1420 raise
1421
1421
1422 try:
1422 try:
1423 # might be nice to attempt to roll back strip after this
1423 # might be nice to attempt to roll back strip after this
1424 patchf.rename()
1424 patchf.rename()
1425 n = repo.commit(message, user, ph.date, match=match,
1425 n = repo.commit(message, user, ph.date, match=match,
1426 force=True)
1426 force=True)
1427 self.applied.append(statusentry(n, patchfn))
1427 self.applied.append(statusentry(n, patchfn))
1428 except:
1428 except:
1429 ctx = repo[cparents[0]]
1429 ctx = repo[cparents[0]]
1430 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1430 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1431 self.save_dirty()
1431 self.save_dirty()
1432 self.ui.warn(_('refresh interrupted while patch was popped! '
1432 self.ui.warn(_('refresh interrupted while patch was popped! '
1433 '(revert --all, qpush to recover)\n'))
1433 '(revert --all, qpush to recover)\n'))
1434 raise
1434 raise
1435 finally:
1435 finally:
1436 wlock.release()
1436 wlock.release()
1437 self.removeundo(repo)
1437 self.removeundo(repo)
1438
1438
1439 def init(self, repo, create=False):
1439 def init(self, repo, create=False):
1440 if not create and os.path.isdir(self.path):
1440 if not create and os.path.isdir(self.path):
1441 raise util.Abort(_("patch queue directory already exists"))
1441 raise util.Abort(_("patch queue directory already exists"))
1442 try:
1442 try:
1443 os.mkdir(self.path)
1443 os.mkdir(self.path)
1444 except OSError, inst:
1444 except OSError, inst:
1445 if inst.errno != errno.EEXIST or not create:
1445 if inst.errno != errno.EEXIST or not create:
1446 raise
1446 raise
1447 if create:
1447 if create:
1448 return self.qrepo(create=True)
1448 return self.qrepo(create=True)
1449
1449
1450 def unapplied(self, repo, patch=None):
1450 def unapplied(self, repo, patch=None):
1451 if patch and patch not in self.series:
1451 if patch and patch not in self.series:
1452 raise util.Abort(_("patch %s is not in series file") % patch)
1452 raise util.Abort(_("patch %s is not in series file") % patch)
1453 if not patch:
1453 if not patch:
1454 start = self.series_end()
1454 start = self.series_end()
1455 else:
1455 else:
1456 start = self.series.index(patch) + 1
1456 start = self.series.index(patch) + 1
1457 unapplied = []
1457 unapplied = []
1458 for i in xrange(start, len(self.series)):
1458 for i in xrange(start, len(self.series)):
1459 pushable, reason = self.pushable(i)
1459 pushable, reason = self.pushable(i)
1460 if pushable:
1460 if pushable:
1461 unapplied.append((i, self.series[i]))
1461 unapplied.append((i, self.series[i]))
1462 self.explain_pushable(i)
1462 self.explain_pushable(i)
1463 return unapplied
1463 return unapplied
1464
1464
1465 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1465 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1466 summary=False):
1466 summary=False):
1467 def displayname(pfx, patchname, state):
1467 def displayname(pfx, patchname, state):
1468 if pfx:
1468 if pfx:
1469 self.ui.write(pfx)
1469 self.ui.write(pfx)
1470 if summary:
1470 if summary:
1471 ph = patchheader(self.join(patchname), self.plainmode)
1471 ph = patchheader(self.join(patchname), self.plainmode)
1472 msg = ph.message and ph.message[0] or ''
1472 msg = ph.message and ph.message[0] or ''
1473 if self.ui.formatted():
1473 if self.ui.formatted():
1474 width = self.ui.termwidth() - len(pfx) - len(patchname) - 2
1474 width = self.ui.termwidth() - len(pfx) - len(patchname) - 2
1475 if width > 0:
1475 if width > 0:
1476 msg = util.ellipsis(msg, width)
1476 msg = util.ellipsis(msg, width)
1477 else:
1477 else:
1478 msg = ''
1478 msg = ''
1479 self.ui.write(patchname, label='qseries.' + state)
1479 self.ui.write(patchname, label='qseries.' + state)
1480 self.ui.write(': ')
1480 self.ui.write(': ')
1481 self.ui.write(msg, label='qseries.message.' + state)
1481 self.ui.write(msg, label='qseries.message.' + state)
1482 else:
1482 else:
1483 self.ui.write(patchname, label='qseries.' + state)
1483 self.ui.write(patchname, label='qseries.' + state)
1484 self.ui.write('\n')
1484 self.ui.write('\n')
1485
1485
1486 applied = set([p.name for p in self.applied])
1486 applied = set([p.name for p in self.applied])
1487 if length is None:
1487 if length is None:
1488 length = len(self.series) - start
1488 length = len(self.series) - start
1489 if not missing:
1489 if not missing:
1490 if self.ui.verbose:
1490 if self.ui.verbose:
1491 idxwidth = len(str(start + length - 1))
1491 idxwidth = len(str(start + length - 1))
1492 for i in xrange(start, start + length):
1492 for i in xrange(start, start + length):
1493 patch = self.series[i]
1493 patch = self.series[i]
1494 if patch in applied:
1494 if patch in applied:
1495 char, state = 'A', 'applied'
1495 char, state = 'A', 'applied'
1496 elif self.pushable(i)[0]:
1496 elif self.pushable(i)[0]:
1497 char, state = 'U', 'unapplied'
1497 char, state = 'U', 'unapplied'
1498 else:
1498 else:
1499 char, state = 'G', 'guarded'
1499 char, state = 'G', 'guarded'
1500 pfx = ''
1500 pfx = ''
1501 if self.ui.verbose:
1501 if self.ui.verbose:
1502 pfx = '%*d %s ' % (idxwidth, i, char)
1502 pfx = '%*d %s ' % (idxwidth, i, char)
1503 elif status and status != char:
1503 elif status and status != char:
1504 continue
1504 continue
1505 displayname(pfx, patch, state)
1505 displayname(pfx, patch, state)
1506 else:
1506 else:
1507 msng_list = []
1507 msng_list = []
1508 for root, dirs, files in os.walk(self.path):
1508 for root, dirs, files in os.walk(self.path):
1509 d = root[len(self.path) + 1:]
1509 d = root[len(self.path) + 1:]
1510 for f in files:
1510 for f in files:
1511 fl = os.path.join(d, f)
1511 fl = os.path.join(d, f)
1512 if (fl not in self.series and
1512 if (fl not in self.series and
1513 fl not in (self.status_path, self.series_path,
1513 fl not in (self.status_path, self.series_path,
1514 self.guards_path)
1514 self.guards_path)
1515 and not fl.startswith('.')):
1515 and not fl.startswith('.')):
1516 msng_list.append(fl)
1516 msng_list.append(fl)
1517 for x in sorted(msng_list):
1517 for x in sorted(msng_list):
1518 pfx = self.ui.verbose and ('D ') or ''
1518 pfx = self.ui.verbose and ('D ') or ''
1519 displayname(pfx, x, 'missing')
1519 displayname(pfx, x, 'missing')
1520
1520
1521 def issaveline(self, l):
1521 def issaveline(self, l):
1522 if l.name == '.hg.patches.save.line':
1522 if l.name == '.hg.patches.save.line':
1523 return True
1523 return True
1524
1524
1525 def qrepo(self, create=False):
1525 def qrepo(self, create=False):
1526 ui = self.ui.copy()
1526 ui = self.ui.copy()
1527 ui.setconfig('paths', 'default', '', overlay=False)
1527 ui.setconfig('paths', 'default', '', overlay=False)
1528 ui.setconfig('paths', 'default-push', '', overlay=False)
1528 ui.setconfig('paths', 'default-push', '', overlay=False)
1529 if create or os.path.isdir(self.join(".hg")):
1529 if create or os.path.isdir(self.join(".hg")):
1530 return hg.repository(ui, path=self.path, create=create)
1530 return hg.repository(ui, path=self.path, create=create)
1531
1531
1532 def restore(self, repo, rev, delete=None, qupdate=None):
1532 def restore(self, repo, rev, delete=None, qupdate=None):
1533 desc = repo[rev].description().strip()
1533 desc = repo[rev].description().strip()
1534 lines = desc.splitlines()
1534 lines = desc.splitlines()
1535 i = 0
1535 i = 0
1536 datastart = None
1536 datastart = None
1537 series = []
1537 series = []
1538 applied = []
1538 applied = []
1539 qpp = None
1539 qpp = None
1540 for i, line in enumerate(lines):
1540 for i, line in enumerate(lines):
1541 if line == 'Patch Data:':
1541 if line == 'Patch Data:':
1542 datastart = i + 1
1542 datastart = i + 1
1543 elif line.startswith('Dirstate:'):
1543 elif line.startswith('Dirstate:'):
1544 l = line.rstrip()
1544 l = line.rstrip()
1545 l = l[10:].split(' ')
1545 l = l[10:].split(' ')
1546 qpp = [bin(x) for x in l]
1546 qpp = [bin(x) for x in l]
1547 elif datastart is not None:
1547 elif datastart is not None:
1548 l = line.rstrip()
1548 l = line.rstrip()
1549 n, name = l.split(':', 1)
1549 n, name = l.split(':', 1)
1550 if n:
1550 if n:
1551 applied.append(statusentry(bin(n), name))
1551 applied.append(statusentry(bin(n), name))
1552 else:
1552 else:
1553 series.append(l)
1553 series.append(l)
1554 if datastart is None:
1554 if datastart is None:
1555 self.ui.warn(_("No saved patch data found\n"))
1555 self.ui.warn(_("No saved patch data found\n"))
1556 return 1
1556 return 1
1557 self.ui.warn(_("restoring status: %s\n") % lines[0])
1557 self.ui.warn(_("restoring status: %s\n") % lines[0])
1558 self.full_series = series
1558 self.full_series = series
1559 self.applied = applied
1559 self.applied = applied
1560 self.parse_series()
1560 self.parse_series()
1561 self.series_dirty = 1
1561 self.series_dirty = 1
1562 self.applied_dirty = 1
1562 self.applied_dirty = 1
1563 heads = repo.changelog.heads()
1563 heads = repo.changelog.heads()
1564 if delete:
1564 if delete:
1565 if rev not in heads:
1565 if rev not in heads:
1566 self.ui.warn(_("save entry has children, leaving it alone\n"))
1566 self.ui.warn(_("save entry has children, leaving it alone\n"))
1567 else:
1567 else:
1568 self.ui.warn(_("removing save entry %s\n") % short(rev))
1568 self.ui.warn(_("removing save entry %s\n") % short(rev))
1569 pp = repo.dirstate.parents()
1569 pp = repo.dirstate.parents()
1570 if rev in pp:
1570 if rev in pp:
1571 update = True
1571 update = True
1572 else:
1572 else:
1573 update = False
1573 update = False
1574 self.strip(repo, [rev], update=update, backup='strip')
1574 self.strip(repo, [rev], update=update, backup='strip')
1575 if qpp:
1575 if qpp:
1576 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1576 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1577 (short(qpp[0]), short(qpp[1])))
1577 (short(qpp[0]), short(qpp[1])))
1578 if qupdate:
1578 if qupdate:
1579 self.ui.status(_("updating queue directory\n"))
1579 self.ui.status(_("updating queue directory\n"))
1580 r = self.qrepo()
1580 r = self.qrepo()
1581 if not r:
1581 if not r:
1582 self.ui.warn(_("Unable to load queue repository\n"))
1582 self.ui.warn(_("Unable to load queue repository\n"))
1583 return 1
1583 return 1
1584 hg.clean(r, qpp[0])
1584 hg.clean(r, qpp[0])
1585
1585
1586 def save(self, repo, msg=None):
1586 def save(self, repo, msg=None):
1587 if not self.applied:
1587 if not self.applied:
1588 self.ui.warn(_("save: no patches applied, exiting\n"))
1588 self.ui.warn(_("save: no patches applied, exiting\n"))
1589 return 1
1589 return 1
1590 if self.issaveline(self.applied[-1]):
1590 if self.issaveline(self.applied[-1]):
1591 self.ui.warn(_("status is already saved\n"))
1591 self.ui.warn(_("status is already saved\n"))
1592 return 1
1592 return 1
1593
1593
1594 if not msg:
1594 if not msg:
1595 msg = _("hg patches saved state")
1595 msg = _("hg patches saved state")
1596 else:
1596 else:
1597 msg = "hg patches: " + msg.rstrip('\r\n')
1597 msg = "hg patches: " + msg.rstrip('\r\n')
1598 r = self.qrepo()
1598 r = self.qrepo()
1599 if r:
1599 if r:
1600 pp = r.dirstate.parents()
1600 pp = r.dirstate.parents()
1601 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
1601 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
1602 msg += "\n\nPatch Data:\n"
1602 msg += "\n\nPatch Data:\n"
1603 msg += ''.join('%s\n' % x for x in self.applied)
1603 msg += ''.join('%s\n' % x for x in self.applied)
1604 msg += ''.join(':%s\n' % x for x in self.full_series)
1604 msg += ''.join(':%s\n' % x for x in self.full_series)
1605 n = repo.commit(msg, force=True)
1605 n = repo.commit(msg, force=True)
1606 if not n:
1606 if not n:
1607 self.ui.warn(_("repo commit failed\n"))
1607 self.ui.warn(_("repo commit failed\n"))
1608 return 1
1608 return 1
1609 self.applied.append(statusentry(n, '.hg.patches.save.line'))
1609 self.applied.append(statusentry(n, '.hg.patches.save.line'))
1610 self.applied_dirty = 1
1610 self.applied_dirty = 1
1611 self.removeundo(repo)
1611 self.removeundo(repo)
1612
1612
1613 def full_series_end(self):
1613 def full_series_end(self):
1614 if self.applied:
1614 if self.applied:
1615 p = self.applied[-1].name
1615 p = self.applied[-1].name
1616 end = self.find_series(p)
1616 end = self.find_series(p)
1617 if end is None:
1617 if end is None:
1618 return len(self.full_series)
1618 return len(self.full_series)
1619 return end + 1
1619 return end + 1
1620 return 0
1620 return 0
1621
1621
1622 def series_end(self, all_patches=False):
1622 def series_end(self, all_patches=False):
1623 """If all_patches is False, return the index of the next pushable patch
1623 """If all_patches is False, return the index of the next pushable patch
1624 in the series, or the series length. If all_patches is True, return the
1624 in the series, or the series length. If all_patches is True, return the
1625 index of the first patch past the last applied one.
1625 index of the first patch past the last applied one.
1626 """
1626 """
1627 end = 0
1627 end = 0
1628 def next(start):
1628 def next(start):
1629 if all_patches or start >= len(self.series):
1629 if all_patches or start >= len(self.series):
1630 return start
1630 return start
1631 for i in xrange(start, len(self.series)):
1631 for i in xrange(start, len(self.series)):
1632 p, reason = self.pushable(i)
1632 p, reason = self.pushable(i)
1633 if p:
1633 if p:
1634 break
1634 break
1635 self.explain_pushable(i)
1635 self.explain_pushable(i)
1636 return i
1636 return i
1637 if self.applied:
1637 if self.applied:
1638 p = self.applied[-1].name
1638 p = self.applied[-1].name
1639 try:
1639 try:
1640 end = self.series.index(p)
1640 end = self.series.index(p)
1641 except ValueError:
1641 except ValueError:
1642 return 0
1642 return 0
1643 return next(end + 1)
1643 return next(end + 1)
1644 return next(end)
1644 return next(end)
1645
1645
1646 def appliedname(self, index):
1646 def appliedname(self, index):
1647 pname = self.applied[index].name
1647 pname = self.applied[index].name
1648 if not self.ui.verbose:
1648 if not self.ui.verbose:
1649 p = pname
1649 p = pname
1650 else:
1650 else:
1651 p = str(self.series.index(pname)) + " " + pname
1651 p = str(self.series.index(pname)) + " " + pname
1652 return p
1652 return p
1653
1653
1654 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1654 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1655 force=None, git=False):
1655 force=None, git=False):
1656 def checkseries(patchname):
1656 def checkseries(patchname):
1657 if patchname in self.series:
1657 if patchname in self.series:
1658 raise util.Abort(_('patch %s is already in the series file')
1658 raise util.Abort(_('patch %s is already in the series file')
1659 % patchname)
1659 % patchname)
1660 def checkfile(patchname):
1660 def checkfile(patchname):
1661 if not force and os.path.exists(self.join(patchname)):
1661 if not force and os.path.exists(self.join(patchname)):
1662 raise util.Abort(_('patch "%s" already exists')
1662 raise util.Abort(_('patch "%s" already exists')
1663 % patchname)
1663 % patchname)
1664
1664
1665 if rev:
1665 if rev:
1666 if files:
1666 if files:
1667 raise util.Abort(_('option "-r" not valid when importing '
1667 raise util.Abort(_('option "-r" not valid when importing '
1668 'files'))
1668 'files'))
1669 rev = cmdutil.revrange(repo, rev)
1669 rev = cmdutil.revrange(repo, rev)
1670 rev.sort(reverse=True)
1670 rev.sort(reverse=True)
1671 if (len(files) > 1 or len(rev) > 1) and patchname:
1671 if (len(files) > 1 or len(rev) > 1) and patchname:
1672 raise util.Abort(_('option "-n" not valid when importing multiple '
1672 raise util.Abort(_('option "-n" not valid when importing multiple '
1673 'patches'))
1673 'patches'))
1674 if rev:
1674 if rev:
1675 # If mq patches are applied, we can only import revisions
1675 # If mq patches are applied, we can only import revisions
1676 # that form a linear path to qbase.
1676 # that form a linear path to qbase.
1677 # Otherwise, they should form a linear path to a head.
1677 # Otherwise, they should form a linear path to a head.
1678 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1678 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1679 if len(heads) > 1:
1679 if len(heads) > 1:
1680 raise util.Abort(_('revision %d is the root of more than one '
1680 raise util.Abort(_('revision %d is the root of more than one '
1681 'branch') % rev[-1])
1681 'branch') % rev[-1])
1682 if self.applied:
1682 if self.applied:
1683 base = repo.changelog.node(rev[0])
1683 base = repo.changelog.node(rev[0])
1684 if base in [n.node for n in self.applied]:
1684 if base in [n.node for n in self.applied]:
1685 raise util.Abort(_('revision %d is already managed')
1685 raise util.Abort(_('revision %d is already managed')
1686 % rev[0])
1686 % rev[0])
1687 if heads != [self.applied[-1].node]:
1687 if heads != [self.applied[-1].node]:
1688 raise util.Abort(_('revision %d is not the parent of '
1688 raise util.Abort(_('revision %d is not the parent of '
1689 'the queue') % rev[0])
1689 'the queue') % rev[0])
1690 base = repo.changelog.rev(self.applied[0].node)
1690 base = repo.changelog.rev(self.applied[0].node)
1691 lastparent = repo.changelog.parentrevs(base)[0]
1691 lastparent = repo.changelog.parentrevs(base)[0]
1692 else:
1692 else:
1693 if heads != [repo.changelog.node(rev[0])]:
1693 if heads != [repo.changelog.node(rev[0])]:
1694 raise util.Abort(_('revision %d has unmanaged children')
1694 raise util.Abort(_('revision %d has unmanaged children')
1695 % rev[0])
1695 % rev[0])
1696 lastparent = None
1696 lastparent = None
1697
1697
1698 diffopts = self.diffopts({'git': git})
1698 diffopts = self.diffopts({'git': git})
1699 for r in rev:
1699 for r in rev:
1700 p1, p2 = repo.changelog.parentrevs(r)
1700 p1, p2 = repo.changelog.parentrevs(r)
1701 n = repo.changelog.node(r)
1701 n = repo.changelog.node(r)
1702 if p2 != nullrev:
1702 if p2 != nullrev:
1703 raise util.Abort(_('cannot import merge revision %d') % r)
1703 raise util.Abort(_('cannot import merge revision %d') % r)
1704 if lastparent and lastparent != r:
1704 if lastparent and lastparent != r:
1705 raise util.Abort(_('revision %d is not the parent of %d')
1705 raise util.Abort(_('revision %d is not the parent of %d')
1706 % (r, lastparent))
1706 % (r, lastparent))
1707 lastparent = p1
1707 lastparent = p1
1708
1708
1709 if not patchname:
1709 if not patchname:
1710 patchname = normname('%d.diff' % r)
1710 patchname = normname('%d.diff' % r)
1711 self.check_reserved_name(patchname)
1711 self.check_reserved_name(patchname)
1712 checkseries(patchname)
1712 checkseries(patchname)
1713 checkfile(patchname)
1713 checkfile(patchname)
1714 self.full_series.insert(0, patchname)
1714 self.full_series.insert(0, patchname)
1715
1715
1716 patchf = self.opener(patchname, "w")
1716 patchf = self.opener(patchname, "w")
1717 cmdutil.export(repo, [n], fp=patchf, opts=diffopts)
1717 cmdutil.export(repo, [n], fp=patchf, opts=diffopts)
1718 patchf.close()
1718 patchf.close()
1719
1719
1720 se = statusentry(n, patchname)
1720 se = statusentry(n, patchname)
1721 self.applied.insert(0, se)
1721 self.applied.insert(0, se)
1722
1722
1723 self.added.append(patchname)
1723 self.added.append(patchname)
1724 patchname = None
1724 patchname = None
1725 self.parse_series()
1725 self.parse_series()
1726 self.applied_dirty = 1
1726 self.applied_dirty = 1
1727 self.series_dirty = True
1727 self.series_dirty = True
1728
1728
1729 for i, filename in enumerate(files):
1729 for i, filename in enumerate(files):
1730 if existing:
1730 if existing:
1731 if filename == '-':
1731 if filename == '-':
1732 raise util.Abort(_('-e is incompatible with import from -'))
1732 raise util.Abort(_('-e is incompatible with import from -'))
1733 filename = normname(filename)
1733 filename = normname(filename)
1734 self.check_reserved_name(filename)
1734 self.check_reserved_name(filename)
1735 originpath = self.join(filename)
1735 originpath = self.join(filename)
1736 if not os.path.isfile(originpath):
1736 if not os.path.isfile(originpath):
1737 raise util.Abort(_("patch %s does not exist") % filename)
1737 raise util.Abort(_("patch %s does not exist") % filename)
1738
1738
1739 if patchname:
1739 if patchname:
1740 self.check_reserved_name(patchname)
1740 self.check_reserved_name(patchname)
1741 checkfile(patchname)
1741 checkfile(patchname)
1742
1742
1743 self.ui.write(_('renaming %s to %s\n')
1743 self.ui.write(_('renaming %s to %s\n')
1744 % (filename, patchname))
1744 % (filename, patchname))
1745 util.rename(originpath, self.join(patchname))
1745 util.rename(originpath, self.join(patchname))
1746 else:
1746 else:
1747 patchname = filename
1747 patchname = filename
1748
1748
1749 else:
1749 else:
1750 try:
1750 try:
1751 if filename == '-':
1751 if filename == '-':
1752 if not patchname:
1752 if not patchname:
1753 raise util.Abort(
1753 raise util.Abort(
1754 _('need --name to import a patch from -'))
1754 _('need --name to import a patch from -'))
1755 text = sys.stdin.read()
1755 text = sys.stdin.read()
1756 else:
1756 else:
1757 text = url.open(self.ui, filename).read()
1757 text = url.open(self.ui, filename).read()
1758 except (OSError, IOError):
1758 except (OSError, IOError):
1759 raise util.Abort(_("unable to read file %s") % filename)
1759 raise util.Abort(_("unable to read file %s") % filename)
1760 if not patchname:
1760 if not patchname:
1761 patchname = normname(os.path.basename(filename))
1761 patchname = normname(os.path.basename(filename))
1762 self.check_reserved_name(patchname)
1762 self.check_reserved_name(patchname)
1763 checkfile(patchname)
1763 checkfile(patchname)
1764 patchf = self.opener(patchname, "w")
1764 patchf = self.opener(patchname, "w")
1765 patchf.write(text)
1765 patchf.write(text)
1766 if not force:
1766 if not force:
1767 checkseries(patchname)
1767 checkseries(patchname)
1768 if patchname not in self.series:
1768 if patchname not in self.series:
1769 index = self.full_series_end() + i
1769 index = self.full_series_end() + i
1770 self.full_series[index:index] = [patchname]
1770 self.full_series[index:index] = [patchname]
1771 self.parse_series()
1771 self.parse_series()
1772 self.series_dirty = True
1772 self.series_dirty = True
1773 self.ui.warn(_("adding %s to series file\n") % patchname)
1773 self.ui.warn(_("adding %s to series file\n") % patchname)
1774 self.added.append(patchname)
1774 self.added.append(patchname)
1775 patchname = None
1775 patchname = None
1776
1776
1777 def delete(ui, repo, *patches, **opts):
1777 def delete(ui, repo, *patches, **opts):
1778 """remove patches from queue
1778 """remove patches from queue
1779
1779
1780 The patches must not be applied, and at least one patch is required. With
1780 The patches must not be applied, and at least one patch is required. With
1781 -k/--keep, the patch files are preserved in the patch directory.
1781 -k/--keep, the patch files are preserved in the patch directory.
1782
1782
1783 To stop managing a patch and move it into permanent history,
1783 To stop managing a patch and move it into permanent history,
1784 use the :hg:`qfinish` command."""
1784 use the :hg:`qfinish` command."""
1785 q = repo.mq
1785 q = repo.mq
1786 q.delete(repo, patches, opts)
1786 q.delete(repo, patches, opts)
1787 q.save_dirty()
1787 q.save_dirty()
1788 return 0
1788 return 0
1789
1789
1790 def applied(ui, repo, patch=None, **opts):
1790 def applied(ui, repo, patch=None, **opts):
1791 """print the patches already applied
1791 """print the patches already applied
1792
1792
1793 Returns 0 on success."""
1793 Returns 0 on success."""
1794
1794
1795 q = repo.mq
1795 q = repo.mq
1796
1796
1797 if patch:
1797 if patch:
1798 if patch not in q.series:
1798 if patch not in q.series:
1799 raise util.Abort(_("patch %s is not in series file") % patch)
1799 raise util.Abort(_("patch %s is not in series file") % patch)
1800 end = q.series.index(patch) + 1
1800 end = q.series.index(patch) + 1
1801 else:
1801 else:
1802 end = q.series_end(True)
1802 end = q.series_end(True)
1803
1803
1804 if opts.get('last') and not end:
1804 if opts.get('last') and not end:
1805 ui.write(_("no patches applied\n"))
1805 ui.write(_("no patches applied\n"))
1806 return 1
1806 return 1
1807 elif opts.get('last') and end == 1:
1807 elif opts.get('last') and end == 1:
1808 ui.write(_("only one patch applied\n"))
1808 ui.write(_("only one patch applied\n"))
1809 return 1
1809 return 1
1810 elif opts.get('last'):
1810 elif opts.get('last'):
1811 start = end - 2
1811 start = end - 2
1812 end = 1
1812 end = 1
1813 else:
1813 else:
1814 start = 0
1814 start = 0
1815
1815
1816 q.qseries(repo, length=end, start=start, status='A',
1816 q.qseries(repo, length=end, start=start, status='A',
1817 summary=opts.get('summary'))
1817 summary=opts.get('summary'))
1818
1818
1819
1819
1820 def unapplied(ui, repo, patch=None, **opts):
1820 def unapplied(ui, repo, patch=None, **opts):
1821 """print the patches not yet applied
1821 """print the patches not yet applied
1822
1822
1823 Returns 0 on success."""
1823 Returns 0 on success."""
1824
1824
1825 q = repo.mq
1825 q = repo.mq
1826 if patch:
1826 if patch:
1827 if patch not in q.series:
1827 if patch not in q.series:
1828 raise util.Abort(_("patch %s is not in series file") % patch)
1828 raise util.Abort(_("patch %s is not in series file") % patch)
1829 start = q.series.index(patch) + 1
1829 start = q.series.index(patch) + 1
1830 else:
1830 else:
1831 start = q.series_end(True)
1831 start = q.series_end(True)
1832
1832
1833 if start == len(q.series) and opts.get('first'):
1833 if start == len(q.series) and opts.get('first'):
1834 ui.write(_("all patches applied\n"))
1834 ui.write(_("all patches applied\n"))
1835 return 1
1835 return 1
1836
1836
1837 length = opts.get('first') and 1 or None
1837 length = opts.get('first') and 1 or None
1838 q.qseries(repo, start=start, length=length, status='U',
1838 q.qseries(repo, start=start, length=length, status='U',
1839 summary=opts.get('summary'))
1839 summary=opts.get('summary'))
1840
1840
1841 def qimport(ui, repo, *filename, **opts):
1841 def qimport(ui, repo, *filename, **opts):
1842 """import a patch
1842 """import a patch
1843
1843
1844 The patch is inserted into the series after the last applied
1844 The patch is inserted into the series after the last applied
1845 patch. If no patches have been applied, qimport prepends the patch
1845 patch. If no patches have been applied, qimport prepends the patch
1846 to the series.
1846 to the series.
1847
1847
1848 The patch will have the same name as its source file unless you
1848 The patch will have the same name as its source file unless you
1849 give it a new one with -n/--name.
1849 give it a new one with -n/--name.
1850
1850
1851 You can register an existing patch inside the patch directory with
1851 You can register an existing patch inside the patch directory with
1852 the -e/--existing flag.
1852 the -e/--existing flag.
1853
1853
1854 With -f/--force, an existing patch of the same name will be
1854 With -f/--force, an existing patch of the same name will be
1855 overwritten.
1855 overwritten.
1856
1856
1857 An existing changeset may be placed under mq control with -r/--rev
1857 An existing changeset may be placed under mq control with -r/--rev
1858 (e.g. qimport --rev tip -n patch will place tip under mq control).
1858 (e.g. qimport --rev tip -n patch will place tip under mq control).
1859 With -g/--git, patches imported with --rev will use the git diff
1859 With -g/--git, patches imported with --rev will use the git diff
1860 format. See the diffs help topic for information on why this is
1860 format. See the diffs help topic for information on why this is
1861 important for preserving rename/copy information and permission
1861 important for preserving rename/copy information and permission
1862 changes.
1862 changes.
1863
1863
1864 To import a patch from standard input, pass - as the patch file.
1864 To import a patch from standard input, pass - as the patch file.
1865 When importing from standard input, a patch name must be specified
1865 When importing from standard input, a patch name must be specified
1866 using the --name flag.
1866 using the --name flag.
1867
1867
1868 To import an existing patch while renaming it::
1868 To import an existing patch while renaming it::
1869
1869
1870 hg qimport -e existing-patch -n new-name
1870 hg qimport -e existing-patch -n new-name
1871
1871
1872 Returns 0 if import succeeded.
1872 Returns 0 if import succeeded.
1873 """
1873 """
1874 q = repo.mq
1874 q = repo.mq
1875 try:
1875 try:
1876 q.qimport(repo, filename, patchname=opts.get('name'),
1876 q.qimport(repo, filename, patchname=opts.get('name'),
1877 existing=opts.get('existing'), force=opts.get('force'),
1877 existing=opts.get('existing'), force=opts.get('force'),
1878 rev=opts.get('rev'), git=opts.get('git'))
1878 rev=opts.get('rev'), git=opts.get('git'))
1879 finally:
1879 finally:
1880 q.save_dirty()
1880 q.save_dirty()
1881
1881
1882 if opts.get('push') and not opts.get('rev'):
1882 if opts.get('push') and not opts.get('rev'):
1883 return q.push(repo, None)
1883 return q.push(repo, None)
1884 return 0
1884 return 0
1885
1885
1886 def qinit(ui, repo, create):
1886 def qinit(ui, repo, create):
1887 """initialize a new queue repository
1887 """initialize a new queue repository
1888
1888
1889 This command also creates a series file for ordering patches, and
1889 This command also creates a series file for ordering patches, and
1890 an mq-specific .hgignore file in the queue repository, to exclude
1890 an mq-specific .hgignore file in the queue repository, to exclude
1891 the status and guards files (these contain mostly transient state).
1891 the status and guards files (these contain mostly transient state).
1892
1892
1893 Returns 0 if initialization succeeded."""
1893 Returns 0 if initialization succeeded."""
1894 q = repo.mq
1894 q = repo.mq
1895 r = q.init(repo, create)
1895 r = q.init(repo, create)
1896 q.save_dirty()
1896 q.save_dirty()
1897 if r:
1897 if r:
1898 if not os.path.exists(r.wjoin('.hgignore')):
1898 if not os.path.exists(r.wjoin('.hgignore')):
1899 fp = r.wopener('.hgignore', 'w')
1899 fp = r.wopener('.hgignore', 'w')
1900 fp.write('^\\.hg\n')
1900 fp.write('^\\.hg\n')
1901 fp.write('^\\.mq\n')
1901 fp.write('^\\.mq\n')
1902 fp.write('syntax: glob\n')
1902 fp.write('syntax: glob\n')
1903 fp.write('status\n')
1903 fp.write('status\n')
1904 fp.write('guards\n')
1904 fp.write('guards\n')
1905 fp.close()
1905 fp.close()
1906 if not os.path.exists(r.wjoin('series')):
1906 if not os.path.exists(r.wjoin('series')):
1907 r.wopener('series', 'w').close()
1907 r.wopener('series', 'w').close()
1908 r[None].add(['.hgignore', 'series'])
1908 r[None].add(['.hgignore', 'series'])
1909 commands.add(ui, r)
1909 commands.add(ui, r)
1910 return 0
1910 return 0
1911
1911
1912 def init(ui, repo, **opts):
1912 def init(ui, repo, **opts):
1913 """init a new queue repository (DEPRECATED)
1913 """init a new queue repository (DEPRECATED)
1914
1914
1915 The queue repository is unversioned by default. If
1915 The queue repository is unversioned by default. If
1916 -c/--create-repo is specified, qinit will create a separate nested
1916 -c/--create-repo is specified, qinit will create a separate nested
1917 repository for patches (qinit -c may also be run later to convert
1917 repository for patches (qinit -c may also be run later to convert
1918 an unversioned patch repository into a versioned one). You can use
1918 an unversioned patch repository into a versioned one). You can use
1919 qcommit to commit changes to this queue repository.
1919 qcommit to commit changes to this queue repository.
1920
1920
1921 This command is deprecated. Without -c, it's implied by other relevant
1921 This command is deprecated. Without -c, it's implied by other relevant
1922 commands. With -c, use :hg:`init --mq` instead."""
1922 commands. With -c, use :hg:`init --mq` instead."""
1923 return qinit(ui, repo, create=opts.get('create_repo'))
1923 return qinit(ui, repo, create=opts.get('create_repo'))
1924
1924
1925 def clone(ui, source, dest=None, **opts):
1925 def clone(ui, source, dest=None, **opts):
1926 '''clone main and patch repository at same time
1926 '''clone main and patch repository at same time
1927
1927
1928 If source is local, destination will have no patches applied. If
1928 If source is local, destination will have no patches applied. If
1929 source is remote, this command can not check if patches are
1929 source is remote, this command can not check if patches are
1930 applied in source, so cannot guarantee that patches are not
1930 applied in source, so cannot guarantee that patches are not
1931 applied in destination. If you clone remote repository, be sure
1931 applied in destination. If you clone remote repository, be sure
1932 before that it has no patches applied.
1932 before that it has no patches applied.
1933
1933
1934 Source patch repository is looked for in <src>/.hg/patches by
1934 Source patch repository is looked for in <src>/.hg/patches by
1935 default. Use -p <url> to change.
1935 default. Use -p <url> to change.
1936
1936
1937 The patch directory must be a nested Mercurial repository, as
1937 The patch directory must be a nested Mercurial repository, as
1938 would be created by :hg:`init --mq`.
1938 would be created by :hg:`init --mq`.
1939
1939
1940 Return 0 on success.
1940 Return 0 on success.
1941 '''
1941 '''
1942 def patchdir(repo):
1942 def patchdir(repo):
1943 url = repo.url()
1943 url = repo.url()
1944 if url.endswith('/'):
1944 if url.endswith('/'):
1945 url = url[:-1]
1945 url = url[:-1]
1946 return url + '/.hg/patches'
1946 return url + '/.hg/patches'
1947 if dest is None:
1947 if dest is None:
1948 dest = hg.defaultdest(source)
1948 dest = hg.defaultdest(source)
1949 sr = hg.repository(hg.remoteui(ui, opts), ui.expandpath(source))
1949 sr = hg.repository(hg.remoteui(ui, opts), ui.expandpath(source))
1950 if opts.get('patches'):
1950 if opts.get('patches'):
1951 patchespath = ui.expandpath(opts.get('patches'))
1951 patchespath = ui.expandpath(opts.get('patches'))
1952 else:
1952 else:
1953 patchespath = patchdir(sr)
1953 patchespath = patchdir(sr)
1954 try:
1954 try:
1955 hg.repository(ui, patchespath)
1955 hg.repository(ui, patchespath)
1956 except error.RepoError:
1956 except error.RepoError:
1957 raise util.Abort(_('versioned patch repository not found'
1957 raise util.Abort(_('versioned patch repository not found'
1958 ' (see init --mq)'))
1958 ' (see init --mq)'))
1959 qbase, destrev = None, None
1959 qbase, destrev = None, None
1960 if sr.local():
1960 if sr.local():
1961 if sr.mq.applied:
1961 if sr.mq.applied:
1962 qbase = sr.mq.applied[0].node
1962 qbase = sr.mq.applied[0].node
1963 if not hg.islocal(dest):
1963 if not hg.islocal(dest):
1964 heads = set(sr.heads())
1964 heads = set(sr.heads())
1965 destrev = list(heads.difference(sr.heads(qbase)))
1965 destrev = list(heads.difference(sr.heads(qbase)))
1966 destrev.append(sr.changelog.parents(qbase)[0])
1966 destrev.append(sr.changelog.parents(qbase)[0])
1967 elif sr.capable('lookup'):
1967 elif sr.capable('lookup'):
1968 try:
1968 try:
1969 qbase = sr.lookup('qbase')
1969 qbase = sr.lookup('qbase')
1970 except error.RepoError:
1970 except error.RepoError:
1971 pass
1971 pass
1972 ui.note(_('cloning main repository\n'))
1972 ui.note(_('cloning main repository\n'))
1973 sr, dr = hg.clone(ui, sr.url(), dest,
1973 sr, dr = hg.clone(ui, sr.url(), dest,
1974 pull=opts.get('pull'),
1974 pull=opts.get('pull'),
1975 rev=destrev,
1975 rev=destrev,
1976 update=False,
1976 update=False,
1977 stream=opts.get('uncompressed'))
1977 stream=opts.get('uncompressed'))
1978 ui.note(_('cloning patch repository\n'))
1978 ui.note(_('cloning patch repository\n'))
1979 hg.clone(ui, opts.get('patches') or patchdir(sr), patchdir(dr),
1979 hg.clone(ui, opts.get('patches') or patchdir(sr), patchdir(dr),
1980 pull=opts.get('pull'), update=not opts.get('noupdate'),
1980 pull=opts.get('pull'), update=not opts.get('noupdate'),
1981 stream=opts.get('uncompressed'))
1981 stream=opts.get('uncompressed'))
1982 if dr.local():
1982 if dr.local():
1983 if qbase:
1983 if qbase:
1984 ui.note(_('stripping applied patches from destination '
1984 ui.note(_('stripping applied patches from destination '
1985 'repository\n'))
1985 'repository\n'))
1986 dr.mq.strip(dr, [qbase], update=False, backup=None)
1986 dr.mq.strip(dr, [qbase], update=False, backup=None)
1987 if not opts.get('noupdate'):
1987 if not opts.get('noupdate'):
1988 ui.note(_('updating destination repository\n'))
1988 ui.note(_('updating destination repository\n'))
1989 hg.update(dr, dr.changelog.tip())
1989 hg.update(dr, dr.changelog.tip())
1990
1990
1991 def commit(ui, repo, *pats, **opts):
1991 def commit(ui, repo, *pats, **opts):
1992 """commit changes in the queue repository (DEPRECATED)
1992 """commit changes in the queue repository (DEPRECATED)
1993
1993
1994 This command is deprecated; use :hg:`commit --mq` instead."""
1994 This command is deprecated; use :hg:`commit --mq` instead."""
1995 q = repo.mq
1995 q = repo.mq
1996 r = q.qrepo()
1996 r = q.qrepo()
1997 if not r:
1997 if not r:
1998 raise util.Abort('no queue repository')
1998 raise util.Abort('no queue repository')
1999 commands.commit(r.ui, r, *pats, **opts)
1999 commands.commit(r.ui, r, *pats, **opts)
2000
2000
2001 def series(ui, repo, **opts):
2001 def series(ui, repo, **opts):
2002 """print the entire series file
2002 """print the entire series file
2003
2003
2004 Returns 0 on success."""
2004 Returns 0 on success."""
2005 repo.mq.qseries(repo, missing=opts.get('missing'), summary=opts.get('summary'))
2005 repo.mq.qseries(repo, missing=opts.get('missing'), summary=opts.get('summary'))
2006 return 0
2006 return 0
2007
2007
2008 def top(ui, repo, **opts):
2008 def top(ui, repo, **opts):
2009 """print the name of the current patch
2009 """print the name of the current patch
2010
2010
2011 Returns 0 on success."""
2011 Returns 0 on success."""
2012 q = repo.mq
2012 q = repo.mq
2013 t = q.applied and q.series_end(True) or 0
2013 t = q.applied and q.series_end(True) or 0
2014 if t:
2014 if t:
2015 q.qseries(repo, start=t - 1, length=1, status='A',
2015 q.qseries(repo, start=t - 1, length=1, status='A',
2016 summary=opts.get('summary'))
2016 summary=opts.get('summary'))
2017 else:
2017 else:
2018 ui.write(_("no patches applied\n"))
2018 ui.write(_("no patches applied\n"))
2019 return 1
2019 return 1
2020
2020
2021 def next(ui, repo, **opts):
2021 def next(ui, repo, **opts):
2022 """print the name of the next patch
2022 """print the name of the next patch
2023
2023
2024 Returns 0 on success."""
2024 Returns 0 on success."""
2025 q = repo.mq
2025 q = repo.mq
2026 end = q.series_end()
2026 end = q.series_end()
2027 if end == len(q.series):
2027 if end == len(q.series):
2028 ui.write(_("all patches applied\n"))
2028 ui.write(_("all patches applied\n"))
2029 return 1
2029 return 1
2030 q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
2030 q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
2031
2031
2032 def prev(ui, repo, **opts):
2032 def prev(ui, repo, **opts):
2033 """print the name of the previous patch
2033 """print the name of the previous patch
2034
2034
2035 Returns 0 on success."""
2035 Returns 0 on success."""
2036 q = repo.mq
2036 q = repo.mq
2037 l = len(q.applied)
2037 l = len(q.applied)
2038 if l == 1:
2038 if l == 1:
2039 ui.write(_("only one patch applied\n"))
2039 ui.write(_("only one patch applied\n"))
2040 return 1
2040 return 1
2041 if not l:
2041 if not l:
2042 ui.write(_("no patches applied\n"))
2042 ui.write(_("no patches applied\n"))
2043 return 1
2043 return 1
2044 q.qseries(repo, start=l - 2, length=1, status='A',
2044 q.qseries(repo, start=l - 2, length=1, status='A',
2045 summary=opts.get('summary'))
2045 summary=opts.get('summary'))
2046
2046
2047 def setupheaderopts(ui, opts):
2047 def setupheaderopts(ui, opts):
2048 if not opts.get('user') and opts.get('currentuser'):
2048 if not opts.get('user') and opts.get('currentuser'):
2049 opts['user'] = ui.username()
2049 opts['user'] = ui.username()
2050 if not opts.get('date') and opts.get('currentdate'):
2050 if not opts.get('date') and opts.get('currentdate'):
2051 opts['date'] = "%d %d" % util.makedate()
2051 opts['date'] = "%d %d" % util.makedate()
2052
2052
2053 def new(ui, repo, patch, *args, **opts):
2053 def new(ui, repo, patch, *args, **opts):
2054 """create a new patch
2054 """create a new patch
2055
2055
2056 qnew creates a new patch on top of the currently-applied patch (if
2056 qnew creates a new patch on top of the currently-applied patch (if
2057 any). The patch will be initialized with any outstanding changes
2057 any). The patch will be initialized with any outstanding changes
2058 in the working directory. You may also use -I/--include,
2058 in the working directory. You may also use -I/--include,
2059 -X/--exclude, and/or a list of files after the patch name to add
2059 -X/--exclude, and/or a list of files after the patch name to add
2060 only changes to matching files to the new patch, leaving the rest
2060 only changes to matching files to the new patch, leaving the rest
2061 as uncommitted modifications.
2061 as uncommitted modifications.
2062
2062
2063 -u/--user and -d/--date can be used to set the (given) user and
2063 -u/--user and -d/--date can be used to set the (given) user and
2064 date, respectively. -U/--currentuser and -D/--currentdate set user
2064 date, respectively. -U/--currentuser and -D/--currentdate set user
2065 to current user and date to current date.
2065 to current user and date to current date.
2066
2066
2067 -e/--edit, -m/--message or -l/--logfile set the patch header as
2067 -e/--edit, -m/--message or -l/--logfile set the patch header as
2068 well as the commit message. If none is specified, the header is
2068 well as the commit message. If none is specified, the header is
2069 empty and the commit message is '[mq]: PATCH'.
2069 empty and the commit message is '[mq]: PATCH'.
2070
2070
2071 Use the -g/--git option to keep the patch in the git extended diff
2071 Use the -g/--git option to keep the patch in the git extended diff
2072 format. Read the diffs help topic for more information on why this
2072 format. Read the diffs help topic for more information on why this
2073 is important for preserving permission changes and copy/rename
2073 is important for preserving permission changes and copy/rename
2074 information.
2074 information.
2075
2075
2076 Returns 0 on successful creation of a new patch.
2076 Returns 0 on successful creation of a new patch.
2077 """
2077 """
2078 msg = cmdutil.logmessage(opts)
2078 msg = cmdutil.logmessage(opts)
2079 def getmsg():
2079 def getmsg():
2080 return ui.edit(msg, opts.get('user') or ui.username())
2080 return ui.edit(msg, opts.get('user') or ui.username())
2081 q = repo.mq
2081 q = repo.mq
2082 opts['msg'] = msg
2082 opts['msg'] = msg
2083 if opts.get('edit'):
2083 if opts.get('edit'):
2084 opts['msg'] = getmsg
2084 opts['msg'] = getmsg
2085 else:
2085 else:
2086 opts['msg'] = msg
2086 opts['msg'] = msg
2087 setupheaderopts(ui, opts)
2087 setupheaderopts(ui, opts)
2088 q.new(repo, patch, *args, **opts)
2088 q.new(repo, patch, *args, **opts)
2089 q.save_dirty()
2089 q.save_dirty()
2090 return 0
2090 return 0
2091
2091
2092 def refresh(ui, repo, *pats, **opts):
2092 def refresh(ui, repo, *pats, **opts):
2093 """update the current patch
2093 """update the current patch
2094
2094
2095 If any file patterns are provided, the refreshed patch will
2095 If any file patterns are provided, the refreshed patch will
2096 contain only the modifications that match those patterns; the
2096 contain only the modifications that match those patterns; the
2097 remaining modifications will remain in the working directory.
2097 remaining modifications will remain in the working directory.
2098
2098
2099 If -s/--short is specified, files currently included in the patch
2099 If -s/--short is specified, files currently included in the patch
2100 will be refreshed just like matched files and remain in the patch.
2100 will be refreshed just like matched files and remain in the patch.
2101
2101
2102 If -e/--edit is specified, Mercurial will start your configured editor for
2102 If -e/--edit is specified, Mercurial will start your configured editor for
2103 you to enter a message. In case qrefresh fails, you will find a backup of
2103 you to enter a message. In case qrefresh fails, you will find a backup of
2104 your message in ``.hg/last-message.txt``.
2104 your message in ``.hg/last-message.txt``.
2105
2105
2106 hg add/remove/copy/rename work as usual, though you might want to
2106 hg add/remove/copy/rename work as usual, though you might want to
2107 use git-style patches (-g/--git or [diff] git=1) to track copies
2107 use git-style patches (-g/--git or [diff] git=1) to track copies
2108 and renames. See the diffs help topic for more information on the
2108 and renames. See the diffs help topic for more information on the
2109 git diff format.
2109 git diff format.
2110
2110
2111 Returns 0 on success.
2111 Returns 0 on success.
2112 """
2112 """
2113 q = repo.mq
2113 q = repo.mq
2114 message = cmdutil.logmessage(opts)
2114 message = cmdutil.logmessage(opts)
2115 if opts.get('edit'):
2115 if opts.get('edit'):
2116 if not q.applied:
2116 if not q.applied:
2117 ui.write(_("no patches applied\n"))
2117 ui.write(_("no patches applied\n"))
2118 return 1
2118 return 1
2119 if message:
2119 if message:
2120 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2120 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2121 patch = q.applied[-1].name
2121 patch = q.applied[-1].name
2122 ph = patchheader(q.join(patch), q.plainmode)
2122 ph = patchheader(q.join(patch), q.plainmode)
2123 message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
2123 message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
2124 # We don't want to lose the patch message if qrefresh fails (issue2062)
2124 # We don't want to lose the patch message if qrefresh fails (issue2062)
2125 msgfile = repo.opener('last-message.txt', 'wb')
2125 msgfile = repo.opener('last-message.txt', 'wb')
2126 msgfile.write(message)
2126 msgfile.write(message)
2127 msgfile.close()
2127 msgfile.close()
2128 setupheaderopts(ui, opts)
2128 setupheaderopts(ui, opts)
2129 ret = q.refresh(repo, pats, msg=message, **opts)
2129 ret = q.refresh(repo, pats, msg=message, **opts)
2130 q.save_dirty()
2130 q.save_dirty()
2131 return ret
2131 return ret
2132
2132
2133 def diff(ui, repo, *pats, **opts):
2133 def diff(ui, repo, *pats, **opts):
2134 """diff of the current patch and subsequent modifications
2134 """diff of the current patch and subsequent modifications
2135
2135
2136 Shows a diff which includes the current patch as well as any
2136 Shows a diff which includes the current patch as well as any
2137 changes which have been made in the working directory since the
2137 changes which have been made in the working directory since the
2138 last refresh (thus showing what the current patch would become
2138 last refresh (thus showing what the current patch would become
2139 after a qrefresh).
2139 after a qrefresh).
2140
2140
2141 Use :hg:`diff` if you only want to see the changes made since the
2141 Use :hg:`diff` if you only want to see the changes made since the
2142 last qrefresh, or :hg:`export qtip` if you want to see changes
2142 last qrefresh, or :hg:`export qtip` if you want to see changes
2143 made by the current patch without including changes made since the
2143 made by the current patch without including changes made since the
2144 qrefresh.
2144 qrefresh.
2145
2145
2146 Returns 0 on success.
2146 Returns 0 on success.
2147 """
2147 """
2148 repo.mq.diff(repo, pats, opts)
2148 repo.mq.diff(repo, pats, opts)
2149 return 0
2149 return 0
2150
2150
2151 def fold(ui, repo, *files, **opts):
2151 def fold(ui, repo, *files, **opts):
2152 """fold the named patches into the current patch
2152 """fold the named patches into the current patch
2153
2153
2154 Patches must not yet be applied. Each patch will be successively
2154 Patches must not yet be applied. Each patch will be successively
2155 applied to the current patch in the order given. If all the
2155 applied to the current patch in the order given. If all the
2156 patches apply successfully, the current patch will be refreshed
2156 patches apply successfully, the current patch will be refreshed
2157 with the new cumulative patch, and the folded patches will be
2157 with the new cumulative patch, and the folded patches will be
2158 deleted. With -k/--keep, the folded patch files will not be
2158 deleted. With -k/--keep, the folded patch files will not be
2159 removed afterwards.
2159 removed afterwards.
2160
2160
2161 The header for each folded patch will be concatenated with the
2161 The header for each folded patch will be concatenated with the
2162 current patch header, separated by a line of ``* * *``.
2162 current patch header, separated by a line of ``* * *``.
2163
2163
2164 Returns 0 on success."""
2164 Returns 0 on success."""
2165
2165
2166 q = repo.mq
2166 q = repo.mq
2167
2167
2168 if not files:
2168 if not files:
2169 raise util.Abort(_('qfold requires at least one patch name'))
2169 raise util.Abort(_('qfold requires at least one patch name'))
2170 if not q.check_toppatch(repo)[0]:
2170 if not q.check_toppatch(repo)[0]:
2171 raise util.Abort(_('no patches applied'))
2171 raise util.Abort(_('no patches applied'))
2172 q.check_localchanges(repo)
2172 q.check_localchanges(repo)
2173
2173
2174 message = cmdutil.logmessage(opts)
2174 message = cmdutil.logmessage(opts)
2175 if opts.get('edit'):
2175 if opts.get('edit'):
2176 if message:
2176 if message:
2177 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2177 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2178
2178
2179 parent = q.lookup('qtip')
2179 parent = q.lookup('qtip')
2180 patches = []
2180 patches = []
2181 messages = []
2181 messages = []
2182 for f in files:
2182 for f in files:
2183 p = q.lookup(f)
2183 p = q.lookup(f)
2184 if p in patches or p == parent:
2184 if p in patches or p == parent:
2185 ui.warn(_('Skipping already folded patch %s\n') % p)
2185 ui.warn(_('Skipping already folded patch %s\n') % p)
2186 if q.isapplied(p):
2186 if q.isapplied(p):
2187 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
2187 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
2188 patches.append(p)
2188 patches.append(p)
2189
2189
2190 for p in patches:
2190 for p in patches:
2191 if not message:
2191 if not message:
2192 ph = patchheader(q.join(p), q.plainmode)
2192 ph = patchheader(q.join(p), q.plainmode)
2193 if ph.message:
2193 if ph.message:
2194 messages.append(ph.message)
2194 messages.append(ph.message)
2195 pf = q.join(p)
2195 pf = q.join(p)
2196 (patchsuccess, files, fuzz) = q.patch(repo, pf)
2196 (patchsuccess, files, fuzz) = q.patch(repo, pf)
2197 if not patchsuccess:
2197 if not patchsuccess:
2198 raise util.Abort(_('error folding patch %s') % p)
2198 raise util.Abort(_('error folding patch %s') % p)
2199 cmdutil.updatedir(ui, repo, files)
2199 cmdutil.updatedir(ui, repo, files)
2200
2200
2201 if not message:
2201 if not message:
2202 ph = patchheader(q.join(parent), q.plainmode)
2202 ph = patchheader(q.join(parent), q.plainmode)
2203 message, user = ph.message, ph.user
2203 message, user = ph.message, ph.user
2204 for msg in messages:
2204 for msg in messages:
2205 message.append('* * *')
2205 message.append('* * *')
2206 message.extend(msg)
2206 message.extend(msg)
2207 message = '\n'.join(message)
2207 message = '\n'.join(message)
2208
2208
2209 if opts.get('edit'):
2209 if opts.get('edit'):
2210 message = ui.edit(message, user or ui.username())
2210 message = ui.edit(message, user or ui.username())
2211
2211
2212 diffopts = q.patchopts(q.diffopts(), *patches)
2212 diffopts = q.patchopts(q.diffopts(), *patches)
2213 q.refresh(repo, msg=message, git=diffopts.git)
2213 q.refresh(repo, msg=message, git=diffopts.git)
2214 q.delete(repo, patches, opts)
2214 q.delete(repo, patches, opts)
2215 q.save_dirty()
2215 q.save_dirty()
2216
2216
2217 def goto(ui, repo, patch, **opts):
2217 def goto(ui, repo, patch, **opts):
2218 '''push or pop patches until named patch is at top of stack
2218 '''push or pop patches until named patch is at top of stack
2219
2219
2220 Returns 0 on success.'''
2220 Returns 0 on success.'''
2221 q = repo.mq
2221 q = repo.mq
2222 patch = q.lookup(patch)
2222 patch = q.lookup(patch)
2223 if q.isapplied(patch):
2223 if q.isapplied(patch):
2224 ret = q.pop(repo, patch, force=opts.get('force'))
2224 ret = q.pop(repo, patch, force=opts.get('force'))
2225 else:
2225 else:
2226 ret = q.push(repo, patch, force=opts.get('force'))
2226 ret = q.push(repo, patch, force=opts.get('force'))
2227 q.save_dirty()
2227 q.save_dirty()
2228 return ret
2228 return ret
2229
2229
2230 def guard(ui, repo, *args, **opts):
2230 def guard(ui, repo, *args, **opts):
2231 '''set or print guards for a patch
2231 '''set or print guards for a patch
2232
2232
2233 Guards control whether a patch can be pushed. A patch with no
2233 Guards control whether a patch can be pushed. A patch with no
2234 guards is always pushed. A patch with a positive guard ("+foo") is
2234 guards is always pushed. A patch with a positive guard ("+foo") is
2235 pushed only if the :hg:`qselect` command has activated it. A patch with
2235 pushed only if the :hg:`qselect` command has activated it. A patch with
2236 a negative guard ("-foo") is never pushed if the :hg:`qselect` command
2236 a negative guard ("-foo") is never pushed if the :hg:`qselect` command
2237 has activated it.
2237 has activated it.
2238
2238
2239 With no arguments, print the currently active guards.
2239 With no arguments, print the currently active guards.
2240 With arguments, set guards for the named patch.
2240 With arguments, set guards for the named patch.
2241
2241
2242 .. note::
2242 .. note::
2243 Specifying negative guards now requires '--'.
2243 Specifying negative guards now requires '--'.
2244
2244
2245 To set guards on another patch::
2245 To set guards on another patch::
2246
2246
2247 hg qguard other.patch -- +2.6.17 -stable
2247 hg qguard other.patch -- +2.6.17 -stable
2248
2248
2249 Returns 0 on success.
2249 Returns 0 on success.
2250 '''
2250 '''
2251 def status(idx):
2251 def status(idx):
2252 guards = q.series_guards[idx] or ['unguarded']
2252 guards = q.series_guards[idx] or ['unguarded']
2253 if q.series[idx] in applied:
2253 if q.series[idx] in applied:
2254 state = 'applied'
2254 state = 'applied'
2255 elif q.pushable(idx)[0]:
2255 elif q.pushable(idx)[0]:
2256 state = 'unapplied'
2256 state = 'unapplied'
2257 else:
2257 else:
2258 state = 'guarded'
2258 state = 'guarded'
2259 label = 'qguard.patch qguard.%s qseries.%s' % (state, state)
2259 label = 'qguard.patch qguard.%s qseries.%s' % (state, state)
2260 ui.write('%s: ' % ui.label(q.series[idx], label))
2260 ui.write('%s: ' % ui.label(q.series[idx], label))
2261
2261
2262 for i, guard in enumerate(guards):
2262 for i, guard in enumerate(guards):
2263 if guard.startswith('+'):
2263 if guard.startswith('+'):
2264 ui.write(guard, label='qguard.positive')
2264 ui.write(guard, label='qguard.positive')
2265 elif guard.startswith('-'):
2265 elif guard.startswith('-'):
2266 ui.write(guard, label='qguard.negative')
2266 ui.write(guard, label='qguard.negative')
2267 else:
2267 else:
2268 ui.write(guard, label='qguard.unguarded')
2268 ui.write(guard, label='qguard.unguarded')
2269 if i != len(guards) - 1:
2269 if i != len(guards) - 1:
2270 ui.write(' ')
2270 ui.write(' ')
2271 ui.write('\n')
2271 ui.write('\n')
2272 q = repo.mq
2272 q = repo.mq
2273 applied = set(p.name for p in q.applied)
2273 applied = set(p.name for p in q.applied)
2274 patch = None
2274 patch = None
2275 args = list(args)
2275 args = list(args)
2276 if opts.get('list'):
2276 if opts.get('list'):
2277 if args or opts.get('none'):
2277 if args or opts.get('none'):
2278 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
2278 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
2279 for i in xrange(len(q.series)):
2279 for i in xrange(len(q.series)):
2280 status(i)
2280 status(i)
2281 return
2281 return
2282 if not args or args[0][0:1] in '-+':
2282 if not args or args[0][0:1] in '-+':
2283 if not q.applied:
2283 if not q.applied:
2284 raise util.Abort(_('no patches applied'))
2284 raise util.Abort(_('no patches applied'))
2285 patch = q.applied[-1].name
2285 patch = q.applied[-1].name
2286 if patch is None and args[0][0:1] not in '-+':
2286 if patch is None and args[0][0:1] not in '-+':
2287 patch = args.pop(0)
2287 patch = args.pop(0)
2288 if patch is None:
2288 if patch is None:
2289 raise util.Abort(_('no patch to work with'))
2289 raise util.Abort(_('no patch to work with'))
2290 if args or opts.get('none'):
2290 if args or opts.get('none'):
2291 idx = q.find_series(patch)
2291 idx = q.find_series(patch)
2292 if idx is None:
2292 if idx is None:
2293 raise util.Abort(_('no patch named %s') % patch)
2293 raise util.Abort(_('no patch named %s') % patch)
2294 q.set_guards(idx, args)
2294 q.set_guards(idx, args)
2295 q.save_dirty()
2295 q.save_dirty()
2296 else:
2296 else:
2297 status(q.series.index(q.lookup(patch)))
2297 status(q.series.index(q.lookup(patch)))
2298
2298
2299 def header(ui, repo, patch=None):
2299 def header(ui, repo, patch=None):
2300 """print the header of the topmost or specified patch
2300 """print the header of the topmost or specified patch
2301
2301
2302 Returns 0 on success."""
2302 Returns 0 on success."""
2303 q = repo.mq
2303 q = repo.mq
2304
2304
2305 if patch:
2305 if patch:
2306 patch = q.lookup(patch)
2306 patch = q.lookup(patch)
2307 else:
2307 else:
2308 if not q.applied:
2308 if not q.applied:
2309 ui.write(_('no patches applied\n'))
2309 ui.write(_('no patches applied\n'))
2310 return 1
2310 return 1
2311 patch = q.lookup('qtip')
2311 patch = q.lookup('qtip')
2312 ph = patchheader(q.join(patch), q.plainmode)
2312 ph = patchheader(q.join(patch), q.plainmode)
2313
2313
2314 ui.write('\n'.join(ph.message) + '\n')
2314 ui.write('\n'.join(ph.message) + '\n')
2315
2315
2316 def lastsavename(path):
2316 def lastsavename(path):
2317 (directory, base) = os.path.split(path)
2317 (directory, base) = os.path.split(path)
2318 names = os.listdir(directory)
2318 names = os.listdir(directory)
2319 namere = re.compile("%s.([0-9]+)" % base)
2319 namere = re.compile("%s.([0-9]+)" % base)
2320 maxindex = None
2320 maxindex = None
2321 maxname = None
2321 maxname = None
2322 for f in names:
2322 for f in names:
2323 m = namere.match(f)
2323 m = namere.match(f)
2324 if m:
2324 if m:
2325 index = int(m.group(1))
2325 index = int(m.group(1))
2326 if maxindex is None or index > maxindex:
2326 if maxindex is None or index > maxindex:
2327 maxindex = index
2327 maxindex = index
2328 maxname = f
2328 maxname = f
2329 if maxname:
2329 if maxname:
2330 return (os.path.join(directory, maxname), maxindex)
2330 return (os.path.join(directory, maxname), maxindex)
2331 return (None, None)
2331 return (None, None)
2332
2332
2333 def savename(path):
2333 def savename(path):
2334 (last, index) = lastsavename(path)
2334 (last, index) = lastsavename(path)
2335 if last is None:
2335 if last is None:
2336 index = 0
2336 index = 0
2337 newpath = path + ".%d" % (index + 1)
2337 newpath = path + ".%d" % (index + 1)
2338 return newpath
2338 return newpath
2339
2339
2340 def push(ui, repo, patch=None, **opts):
2340 def push(ui, repo, patch=None, **opts):
2341 """push the next patch onto the stack
2341 """push the next patch onto the stack
2342
2342
2343 When -f/--force is applied, all local changes in patched files
2343 When -f/--force is applied, all local changes in patched files
2344 will be lost.
2344 will be lost.
2345
2345
2346 Return 0 on succces.
2346 Return 0 on succces.
2347 """
2347 """
2348 q = repo.mq
2348 q = repo.mq
2349 mergeq = None
2349 mergeq = None
2350
2350
2351 if opts.get('merge'):
2351 if opts.get('merge'):
2352 if opts.get('name'):
2352 if opts.get('name'):
2353 newpath = repo.join(opts.get('name'))
2353 newpath = repo.join(opts.get('name'))
2354 else:
2354 else:
2355 newpath, i = lastsavename(q.path)
2355 newpath, i = lastsavename(q.path)
2356 if not newpath:
2356 if not newpath:
2357 ui.warn(_("no saved queues found, please use -n\n"))
2357 ui.warn(_("no saved queues found, please use -n\n"))
2358 return 1
2358 return 1
2359 mergeq = queue(ui, repo.join(""), newpath)
2359 mergeq = queue(ui, repo.join(""), newpath)
2360 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2360 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2361 ret = q.push(repo, patch, force=opts.get('force'), list=opts.get('list'),
2361 ret = q.push(repo, patch, force=opts.get('force'), list=opts.get('list'),
2362 mergeq=mergeq, all=opts.get('all'), move=opts.get('move'),
2362 mergeq=mergeq, all=opts.get('all'), move=opts.get('move'),
2363 exact=opts.get('exact'))
2363 exact=opts.get('exact'))
2364 return ret
2364 return ret
2365
2365
2366 def pop(ui, repo, patch=None, **opts):
2366 def pop(ui, repo, patch=None, **opts):
2367 """pop the current patch off the stack
2367 """pop the current patch off the stack
2368
2368
2369 By default, pops off the top of the patch stack. If given a patch
2369 By default, pops off the top of the patch stack. If given a patch
2370 name, keeps popping off patches until the named patch is at the
2370 name, keeps popping off patches until the named patch is at the
2371 top of the stack.
2371 top of the stack.
2372
2372
2373 Return 0 on success.
2373 Return 0 on success.
2374 """
2374 """
2375 localupdate = True
2375 localupdate = True
2376 if opts.get('name'):
2376 if opts.get('name'):
2377 q = queue(ui, repo.join(""), repo.join(opts.get('name')))
2377 q = queue(ui, repo.join(""), repo.join(opts.get('name')))
2378 ui.warn(_('using patch queue: %s\n') % q.path)
2378 ui.warn(_('using patch queue: %s\n') % q.path)
2379 localupdate = False
2379 localupdate = False
2380 else:
2380 else:
2381 q = repo.mq
2381 q = repo.mq
2382 ret = q.pop(repo, patch, force=opts.get('force'), update=localupdate,
2382 ret = q.pop(repo, patch, force=opts.get('force'), update=localupdate,
2383 all=opts.get('all'))
2383 all=opts.get('all'))
2384 q.save_dirty()
2384 q.save_dirty()
2385 return ret
2385 return ret
2386
2386
2387 def rename(ui, repo, patch, name=None, **opts):
2387 def rename(ui, repo, patch, name=None, **opts):
2388 """rename a patch
2388 """rename a patch
2389
2389
2390 With one argument, renames the current patch to PATCH1.
2390 With one argument, renames the current patch to PATCH1.
2391 With two arguments, renames PATCH1 to PATCH2.
2391 With two arguments, renames PATCH1 to PATCH2.
2392
2392
2393 Returns 0 on success."""
2393 Returns 0 on success."""
2394
2394
2395 q = repo.mq
2395 q = repo.mq
2396
2396
2397 if not name:
2397 if not name:
2398 name = patch
2398 name = patch
2399 patch = None
2399 patch = None
2400
2400
2401 if patch:
2401 if patch:
2402 patch = q.lookup(patch)
2402 patch = q.lookup(patch)
2403 else:
2403 else:
2404 if not q.applied:
2404 if not q.applied:
2405 ui.write(_('no patches applied\n'))
2405 ui.write(_('no patches applied\n'))
2406 return
2406 return
2407 patch = q.lookup('qtip')
2407 patch = q.lookup('qtip')
2408 absdest = q.join(name)
2408 absdest = q.join(name)
2409 if os.path.isdir(absdest):
2409 if os.path.isdir(absdest):
2410 name = normname(os.path.join(name, os.path.basename(patch)))
2410 name = normname(os.path.join(name, os.path.basename(patch)))
2411 absdest = q.join(name)
2411 absdest = q.join(name)
2412 if os.path.exists(absdest):
2412 if os.path.exists(absdest):
2413 raise util.Abort(_('%s already exists') % absdest)
2413 raise util.Abort(_('%s already exists') % absdest)
2414
2414
2415 if name in q.series:
2415 if name in q.series:
2416 raise util.Abort(
2416 raise util.Abort(
2417 _('A patch named %s already exists in the series file') % name)
2417 _('A patch named %s already exists in the series file') % name)
2418
2418
2419 ui.note(_('renaming %s to %s\n') % (patch, name))
2419 ui.note(_('renaming %s to %s\n') % (patch, name))
2420 i = q.find_series(patch)
2420 i = q.find_series(patch)
2421 guards = q.guard_re.findall(q.full_series[i])
2421 guards = q.guard_re.findall(q.full_series[i])
2422 q.full_series[i] = name + ''.join([' #' + g for g in guards])
2422 q.full_series[i] = name + ''.join([' #' + g for g in guards])
2423 q.parse_series()
2423 q.parse_series()
2424 q.series_dirty = 1
2424 q.series_dirty = 1
2425
2425
2426 info = q.isapplied(patch)
2426 info = q.isapplied(patch)
2427 if info:
2427 if info:
2428 q.applied[info[0]] = statusentry(info[1], name)
2428 q.applied[info[0]] = statusentry(info[1], name)
2429 q.applied_dirty = 1
2429 q.applied_dirty = 1
2430
2430
2431 destdir = os.path.dirname(absdest)
2431 destdir = os.path.dirname(absdest)
2432 if not os.path.isdir(destdir):
2432 if not os.path.isdir(destdir):
2433 os.makedirs(destdir)
2433 os.makedirs(destdir)
2434 util.rename(q.join(patch), absdest)
2434 util.rename(q.join(patch), absdest)
2435 r = q.qrepo()
2435 r = q.qrepo()
2436 if r and patch in r.dirstate:
2436 if r and patch in r.dirstate:
2437 wctx = r[None]
2437 wctx = r[None]
2438 wlock = r.wlock()
2438 wlock = r.wlock()
2439 try:
2439 try:
2440 if r.dirstate[patch] == 'a':
2440 if r.dirstate[patch] == 'a':
2441 r.dirstate.forget(patch)
2441 r.dirstate.forget(patch)
2442 r.dirstate.add(name)
2442 r.dirstate.add(name)
2443 else:
2443 else:
2444 if r.dirstate[name] == 'r':
2444 if r.dirstate[name] == 'r':
2445 wctx.undelete([name])
2445 wctx.undelete([name])
2446 wctx.copy(patch, name)
2446 wctx.copy(patch, name)
2447 wctx.remove([patch], False)
2447 wctx.remove([patch], False)
2448 finally:
2448 finally:
2449 wlock.release()
2449 wlock.release()
2450
2450
2451 q.save_dirty()
2451 q.save_dirty()
2452
2452
2453 def restore(ui, repo, rev, **opts):
2453 def restore(ui, repo, rev, **opts):
2454 """restore the queue state saved by a revision (DEPRECATED)
2454 """restore the queue state saved by a revision (DEPRECATED)
2455
2455
2456 This command is deprecated, use :hg:`rebase` instead."""
2456 This command is deprecated, use :hg:`rebase` instead."""
2457 rev = repo.lookup(rev)
2457 rev = repo.lookup(rev)
2458 q = repo.mq
2458 q = repo.mq
2459 q.restore(repo, rev, delete=opts.get('delete'),
2459 q.restore(repo, rev, delete=opts.get('delete'),
2460 qupdate=opts.get('update'))
2460 qupdate=opts.get('update'))
2461 q.save_dirty()
2461 q.save_dirty()
2462 return 0
2462 return 0
2463
2463
2464 def save(ui, repo, **opts):
2464 def save(ui, repo, **opts):
2465 """save current queue state (DEPRECATED)
2465 """save current queue state (DEPRECATED)
2466
2466
2467 This command is deprecated, use :hg:`rebase` instead."""
2467 This command is deprecated, use :hg:`rebase` instead."""
2468 q = repo.mq
2468 q = repo.mq
2469 message = cmdutil.logmessage(opts)
2469 message = cmdutil.logmessage(opts)
2470 ret = q.save(repo, msg=message)
2470 ret = q.save(repo, msg=message)
2471 if ret:
2471 if ret:
2472 return ret
2472 return ret
2473 q.save_dirty()
2473 q.save_dirty()
2474 if opts.get('copy'):
2474 if opts.get('copy'):
2475 path = q.path
2475 path = q.path
2476 if opts.get('name'):
2476 if opts.get('name'):
2477 newpath = os.path.join(q.basepath, opts.get('name'))
2477 newpath = os.path.join(q.basepath, opts.get('name'))
2478 if os.path.exists(newpath):
2478 if os.path.exists(newpath):
2479 if not os.path.isdir(newpath):
2479 if not os.path.isdir(newpath):
2480 raise util.Abort(_('destination %s exists and is not '
2480 raise util.Abort(_('destination %s exists and is not '
2481 'a directory') % newpath)
2481 'a directory') % newpath)
2482 if not opts.get('force'):
2482 if not opts.get('force'):
2483 raise util.Abort(_('destination %s exists, '
2483 raise util.Abort(_('destination %s exists, '
2484 'use -f to force') % newpath)
2484 'use -f to force') % newpath)
2485 else:
2485 else:
2486 newpath = savename(path)
2486 newpath = savename(path)
2487 ui.warn(_("copy %s to %s\n") % (path, newpath))
2487 ui.warn(_("copy %s to %s\n") % (path, newpath))
2488 util.copyfiles(path, newpath)
2488 util.copyfiles(path, newpath)
2489 if opts.get('empty'):
2489 if opts.get('empty'):
2490 try:
2490 try:
2491 os.unlink(q.join(q.status_path))
2491 os.unlink(q.join(q.status_path))
2492 except:
2492 except:
2493 pass
2493 pass
2494 return 0
2494 return 0
2495
2495
2496 def strip(ui, repo, *revs, **opts):
2496 def strip(ui, repo, *revs, **opts):
2497 """strip changesets and all their descendants from the repository
2497 """strip changesets and all their descendants from the repository
2498
2498
2499 The strip command removes the specified changesets and all their
2499 The strip command removes the specified changesets and all their
2500 descendants. If the working directory has uncommitted changes,
2500 descendants. If the working directory has uncommitted changes,
2501 the operation is aborted unless the --force flag is supplied.
2501 the operation is aborted unless the --force flag is supplied.
2502
2502
2503 If a parent of the working directory is stripped, then the working
2503 If a parent of the working directory is stripped, then the working
2504 directory will automatically be updated to the most recent
2504 directory will automatically be updated to the most recent
2505 available ancestor of the stripped parent after the operation
2505 available ancestor of the stripped parent after the operation
2506 completes.
2506 completes.
2507
2507
2508 Any stripped changesets are stored in ``.hg/strip-backup`` as a
2508 Any stripped changesets are stored in ``.hg/strip-backup`` as a
2509 bundle (see :hg:`help bundle` and :hg:`help unbundle`). They can
2509 bundle (see :hg:`help bundle` and :hg:`help unbundle`). They can
2510 be restored by running :hg:`unbundle .hg/strip-backup/BUNDLE`,
2510 be restored by running :hg:`unbundle .hg/strip-backup/BUNDLE`,
2511 where BUNDLE is the bundle file created by the strip. Note that
2511 where BUNDLE is the bundle file created by the strip. Note that
2512 the local revision numbers will in general be different after the
2512 the local revision numbers will in general be different after the
2513 restore.
2513 restore.
2514
2514
2515 Use the --no-backup option to discard the backup bundle once the
2515 Use the --no-backup option to discard the backup bundle once the
2516 operation completes.
2516 operation completes.
2517
2517
2518 Return 0 on success.
2518 Return 0 on success.
2519 """
2519 """
2520 backup = 'all'
2520 backup = 'all'
2521 if opts.get('backup'):
2521 if opts.get('backup'):
2522 backup = 'strip'
2522 backup = 'strip'
2523 elif opts.get('no-backup') or opts.get('nobackup'):
2523 elif opts.get('no-backup') or opts.get('nobackup'):
2524 backup = 'none'
2524 backup = 'none'
2525
2525
2526 cl = repo.changelog
2526 cl = repo.changelog
2527 revs = set(cmdutil.revrange(repo, revs))
2527 revs = set(cmdutil.revrange(repo, revs))
2528 if not revs:
2528 if not revs:
2529 raise util.Abort(_('empty revision set'))
2529 raise util.Abort(_('empty revision set'))
2530
2530
2531 descendants = set(cl.descendants(*revs))
2531 descendants = set(cl.descendants(*revs))
2532 strippedrevs = revs.union(descendants)
2532 strippedrevs = revs.union(descendants)
2533 roots = revs.difference(descendants)
2533 roots = revs.difference(descendants)
2534
2534
2535 update = False
2535 update = False
2536 # if one of the wdir parent is stripped we'll need
2536 # if one of the wdir parent is stripped we'll need
2537 # to update away to an earlier revision
2537 # to update away to an earlier revision
2538 for p in repo.dirstate.parents():
2538 for p in repo.dirstate.parents():
2539 if p != nullid and cl.rev(p) in strippedrevs:
2539 if p != nullid and cl.rev(p) in strippedrevs:
2540 update = True
2540 update = True
2541 break
2541 break
2542
2542
2543 rootnodes = set(cl.node(r) for r in roots)
2543 rootnodes = set(cl.node(r) for r in roots)
2544
2544
2545 q = repo.mq
2545 q = repo.mq
2546 if q.applied:
2546 if q.applied:
2547 # refresh queue state if we're about to strip
2547 # refresh queue state if we're about to strip
2548 # applied patches
2548 # applied patches
2549 if cl.rev(repo.lookup('qtip')) in strippedrevs:
2549 if cl.rev(repo.lookup('qtip')) in strippedrevs:
2550 q.applied_dirty = True
2550 q.applied_dirty = True
2551 start = 0
2551 start = 0
2552 end = len(q.applied)
2552 end = len(q.applied)
2553 for i, statusentry in enumerate(q.applied):
2553 for i, statusentry in enumerate(q.applied):
2554 if statusentry.node in rootnodes:
2554 if statusentry.node in rootnodes:
2555 # if one of the stripped roots is an applied
2555 # if one of the stripped roots is an applied
2556 # patch, only part of the queue is stripped
2556 # patch, only part of the queue is stripped
2557 start = i
2557 start = i
2558 break
2558 break
2559 del q.applied[start:end]
2559 del q.applied[start:end]
2560 q.save_dirty()
2560 q.save_dirty()
2561
2561
2562 revs = list(rootnodes)
2562 revs = list(rootnodes)
2563 if update and opts.get('keep'):
2563 if update and opts.get('keep'):
2564 wlock = repo.wlock()
2564 wlock = repo.wlock()
2565 try:
2565 try:
2566 urev = repo.mq.qparents(repo, revs[0])
2566 urev = repo.mq.qparents(repo, revs[0])
2567 repo.dirstate.rebuild(urev, repo[urev].manifest())
2567 repo.dirstate.rebuild(urev, repo[urev].manifest())
2568 repo.dirstate.write()
2568 repo.dirstate.write()
2569 update = False
2569 update = False
2570 finally:
2570 finally:
2571 wlock.release()
2571 wlock.release()
2572
2572
2573 repo.mq.strip(repo, revs, backup=backup, update=update,
2573 repo.mq.strip(repo, revs, backup=backup, update=update,
2574 force=opts.get('force'))
2574 force=opts.get('force'))
2575 return 0
2575 return 0
2576
2576
2577 def select(ui, repo, *args, **opts):
2577 def select(ui, repo, *args, **opts):
2578 '''set or print guarded patches to push
2578 '''set or print guarded patches to push
2579
2579
2580 Use the :hg:`qguard` command to set or print guards on patch, then use
2580 Use the :hg:`qguard` command to set or print guards on patch, then use
2581 qselect to tell mq which guards to use. A patch will be pushed if
2581 qselect to tell mq which guards to use. A patch will be pushed if
2582 it has no guards or any positive guards match the currently
2582 it has no guards or any positive guards match the currently
2583 selected guard, but will not be pushed if any negative guards
2583 selected guard, but will not be pushed if any negative guards
2584 match the current guard. For example::
2584 match the current guard. For example::
2585
2585
2586 qguard foo.patch -stable (negative guard)
2586 qguard foo.patch -stable (negative guard)
2587 qguard bar.patch +stable (positive guard)
2587 qguard bar.patch +stable (positive guard)
2588 qselect stable
2588 qselect stable
2589
2589
2590 This activates the "stable" guard. mq will skip foo.patch (because
2590 This activates the "stable" guard. mq will skip foo.patch (because
2591 it has a negative match) but push bar.patch (because it has a
2591 it has a negative match) but push bar.patch (because it has a
2592 positive match).
2592 positive match).
2593
2593
2594 With no arguments, prints the currently active guards.
2594 With no arguments, prints the currently active guards.
2595 With one argument, sets the active guard.
2595 With one argument, sets the active guard.
2596
2596
2597 Use -n/--none to deactivate guards (no other arguments needed).
2597 Use -n/--none to deactivate guards (no other arguments needed).
2598 When no guards are active, patches with positive guards are
2598 When no guards are active, patches with positive guards are
2599 skipped and patches with negative guards are pushed.
2599 skipped and patches with negative guards are pushed.
2600
2600
2601 qselect can change the guards on applied patches. It does not pop
2601 qselect can change the guards on applied patches. It does not pop
2602 guarded patches by default. Use --pop to pop back to the last
2602 guarded patches by default. Use --pop to pop back to the last
2603 applied patch that is not guarded. Use --reapply (which implies
2603 applied patch that is not guarded. Use --reapply (which implies
2604 --pop) to push back to the current patch afterwards, but skip
2604 --pop) to push back to the current patch afterwards, but skip
2605 guarded patches.
2605 guarded patches.
2606
2606
2607 Use -s/--series to print a list of all guards in the series file
2607 Use -s/--series to print a list of all guards in the series file
2608 (no other arguments needed). Use -v for more information.
2608 (no other arguments needed). Use -v for more information.
2609
2609
2610 Returns 0 on success.'''
2610 Returns 0 on success.'''
2611
2611
2612 q = repo.mq
2612 q = repo.mq
2613 guards = q.active()
2613 guards = q.active()
2614 if args or opts.get('none'):
2614 if args or opts.get('none'):
2615 old_unapplied = q.unapplied(repo)
2615 old_unapplied = q.unapplied(repo)
2616 old_guarded = [i for i in xrange(len(q.applied)) if
2616 old_guarded = [i for i in xrange(len(q.applied)) if
2617 not q.pushable(i)[0]]
2617 not q.pushable(i)[0]]
2618 q.set_active(args)
2618 q.set_active(args)
2619 q.save_dirty()
2619 q.save_dirty()
2620 if not args:
2620 if not args:
2621 ui.status(_('guards deactivated\n'))
2621 ui.status(_('guards deactivated\n'))
2622 if not opts.get('pop') and not opts.get('reapply'):
2622 if not opts.get('pop') and not opts.get('reapply'):
2623 unapplied = q.unapplied(repo)
2623 unapplied = q.unapplied(repo)
2624 guarded = [i for i in xrange(len(q.applied))
2624 guarded = [i for i in xrange(len(q.applied))
2625 if not q.pushable(i)[0]]
2625 if not q.pushable(i)[0]]
2626 if len(unapplied) != len(old_unapplied):
2626 if len(unapplied) != len(old_unapplied):
2627 ui.status(_('number of unguarded, unapplied patches has '
2627 ui.status(_('number of unguarded, unapplied patches has '
2628 'changed from %d to %d\n') %
2628 'changed from %d to %d\n') %
2629 (len(old_unapplied), len(unapplied)))
2629 (len(old_unapplied), len(unapplied)))
2630 if len(guarded) != len(old_guarded):
2630 if len(guarded) != len(old_guarded):
2631 ui.status(_('number of guarded, applied patches has changed '
2631 ui.status(_('number of guarded, applied patches has changed '
2632 'from %d to %d\n') %
2632 'from %d to %d\n') %
2633 (len(old_guarded), len(guarded)))
2633 (len(old_guarded), len(guarded)))
2634 elif opts.get('series'):
2634 elif opts.get('series'):
2635 guards = {}
2635 guards = {}
2636 noguards = 0
2636 noguards = 0
2637 for gs in q.series_guards:
2637 for gs in q.series_guards:
2638 if not gs:
2638 if not gs:
2639 noguards += 1
2639 noguards += 1
2640 for g in gs:
2640 for g in gs:
2641 guards.setdefault(g, 0)
2641 guards.setdefault(g, 0)
2642 guards[g] += 1
2642 guards[g] += 1
2643 if ui.verbose:
2643 if ui.verbose:
2644 guards['NONE'] = noguards
2644 guards['NONE'] = noguards
2645 guards = guards.items()
2645 guards = guards.items()
2646 guards.sort(key=lambda x: x[0][1:])
2646 guards.sort(key=lambda x: x[0][1:])
2647 if guards:
2647 if guards:
2648 ui.note(_('guards in series file:\n'))
2648 ui.note(_('guards in series file:\n'))
2649 for guard, count in guards:
2649 for guard, count in guards:
2650 ui.note('%2d ' % count)
2650 ui.note('%2d ' % count)
2651 ui.write(guard, '\n')
2651 ui.write(guard, '\n')
2652 else:
2652 else:
2653 ui.note(_('no guards in series file\n'))
2653 ui.note(_('no guards in series file\n'))
2654 else:
2654 else:
2655 if guards:
2655 if guards:
2656 ui.note(_('active guards:\n'))
2656 ui.note(_('active guards:\n'))
2657 for g in guards:
2657 for g in guards:
2658 ui.write(g, '\n')
2658 ui.write(g, '\n')
2659 else:
2659 else:
2660 ui.write(_('no active guards\n'))
2660 ui.write(_('no active guards\n'))
2661 reapply = opts.get('reapply') and q.applied and q.appliedname(-1)
2661 reapply = opts.get('reapply') and q.applied and q.appliedname(-1)
2662 popped = False
2662 popped = False
2663 if opts.get('pop') or opts.get('reapply'):
2663 if opts.get('pop') or opts.get('reapply'):
2664 for i in xrange(len(q.applied)):
2664 for i in xrange(len(q.applied)):
2665 pushable, reason = q.pushable(i)
2665 pushable, reason = q.pushable(i)
2666 if not pushable:
2666 if not pushable:
2667 ui.status(_('popping guarded patches\n'))
2667 ui.status(_('popping guarded patches\n'))
2668 popped = True
2668 popped = True
2669 if i == 0:
2669 if i == 0:
2670 q.pop(repo, all=True)
2670 q.pop(repo, all=True)
2671 else:
2671 else:
2672 q.pop(repo, i - 1)
2672 q.pop(repo, i - 1)
2673 break
2673 break
2674 if popped:
2674 if popped:
2675 try:
2675 try:
2676 if reapply:
2676 if reapply:
2677 ui.status(_('reapplying unguarded patches\n'))
2677 ui.status(_('reapplying unguarded patches\n'))
2678 q.push(repo, reapply)
2678 q.push(repo, reapply)
2679 finally:
2679 finally:
2680 q.save_dirty()
2680 q.save_dirty()
2681
2681
2682 def finish(ui, repo, *revrange, **opts):
2682 def finish(ui, repo, *revrange, **opts):
2683 """move applied patches into repository history
2683 """move applied patches into repository history
2684
2684
2685 Finishes the specified revisions (corresponding to applied
2685 Finishes the specified revisions (corresponding to applied
2686 patches) by moving them out of mq control into regular repository
2686 patches) by moving them out of mq control into regular repository
2687 history.
2687 history.
2688
2688
2689 Accepts a revision range or the -a/--applied option. If --applied
2689 Accepts a revision range or the -a/--applied option. If --applied
2690 is specified, all applied mq revisions are removed from mq
2690 is specified, all applied mq revisions are removed from mq
2691 control. Otherwise, the given revisions must be at the base of the
2691 control. Otherwise, the given revisions must be at the base of the
2692 stack of applied patches.
2692 stack of applied patches.
2693
2693
2694 This can be especially useful if your changes have been applied to
2694 This can be especially useful if your changes have been applied to
2695 an upstream repository, or if you are about to push your changes
2695 an upstream repository, or if you are about to push your changes
2696 to upstream.
2696 to upstream.
2697
2697
2698 Returns 0 on success.
2698 Returns 0 on success.
2699 """
2699 """
2700 if not opts.get('applied') and not revrange:
2700 if not opts.get('applied') and not revrange:
2701 raise util.Abort(_('no revisions specified'))
2701 raise util.Abort(_('no revisions specified'))
2702 elif opts.get('applied'):
2702 elif opts.get('applied'):
2703 revrange = ('qbase::qtip',) + revrange
2703 revrange = ('qbase::qtip',) + revrange
2704
2704
2705 q = repo.mq
2705 q = repo.mq
2706 if not q.applied:
2706 if not q.applied:
2707 ui.status(_('no patches applied\n'))
2707 ui.status(_('no patches applied\n'))
2708 return 0
2708 return 0
2709
2709
2710 revs = cmdutil.revrange(repo, revrange)
2710 revs = cmdutil.revrange(repo, revrange)
2711 q.finish(repo, revs)
2711 q.finish(repo, revs)
2712 q.save_dirty()
2712 q.save_dirty()
2713 return 0
2713 return 0
2714
2714
2715 def qqueue(ui, repo, name=None, **opts):
2715 def qqueue(ui, repo, name=None, **opts):
2716 '''manage multiple patch queues
2716 '''manage multiple patch queues
2717
2717
2718 Supports switching between different patch queues, as well as creating
2718 Supports switching between different patch queues, as well as creating
2719 new patch queues and deleting existing ones.
2719 new patch queues and deleting existing ones.
2720
2720
2721 Omitting a queue name or specifying -l/--list will show you the registered
2721 Omitting a queue name or specifying -l/--list will show you the registered
2722 queues - by default the "normal" patches queue is registered. The currently
2722 queues - by default the "normal" patches queue is registered. The currently
2723 active queue will be marked with "(active)".
2723 active queue will be marked with "(active)".
2724
2724
2725 To create a new queue, use -c/--create. The queue is automatically made
2725 To create a new queue, use -c/--create. The queue is automatically made
2726 active, except in the case where there are applied patches from the
2726 active, except in the case where there are applied patches from the
2727 currently active queue in the repository. Then the queue will only be
2727 currently active queue in the repository. Then the queue will only be
2728 created and switching will fail.
2728 created and switching will fail.
2729
2729
2730 To delete an existing queue, use --delete. You cannot delete the currently
2730 To delete an existing queue, use --delete. You cannot delete the currently
2731 active queue.
2731 active queue.
2732
2732
2733 Returns 0 on success.
2733 Returns 0 on success.
2734 '''
2734 '''
2735
2735
2736 q = repo.mq
2736 q = repo.mq
2737
2737
2738 _defaultqueue = 'patches'
2738 _defaultqueue = 'patches'
2739 _allqueues = 'patches.queues'
2739 _allqueues = 'patches.queues'
2740 _activequeue = 'patches.queue'
2740 _activequeue = 'patches.queue'
2741
2741
2742 def _getcurrent():
2742 def _getcurrent():
2743 cur = os.path.basename(q.path)
2743 cur = os.path.basename(q.path)
2744 if cur.startswith('patches-'):
2744 if cur.startswith('patches-'):
2745 cur = cur[8:]
2745 cur = cur[8:]
2746 return cur
2746 return cur
2747
2747
2748 def _noqueues():
2748 def _noqueues():
2749 try:
2749 try:
2750 fh = repo.opener(_allqueues, 'r')
2750 fh = repo.opener(_allqueues, 'r')
2751 fh.close()
2751 fh.close()
2752 except IOError:
2752 except IOError:
2753 return True
2753 return True
2754
2754
2755 return False
2755 return False
2756
2756
2757 def _getqueues():
2757 def _getqueues():
2758 current = _getcurrent()
2758 current = _getcurrent()
2759
2759
2760 try:
2760 try:
2761 fh = repo.opener(_allqueues, 'r')
2761 fh = repo.opener(_allqueues, 'r')
2762 queues = [queue.strip() for queue in fh if queue.strip()]
2762 queues = [queue.strip() for queue in fh if queue.strip()]
2763 if current not in queues:
2763 if current not in queues:
2764 queues.append(current)
2764 queues.append(current)
2765 except IOError:
2765 except IOError:
2766 queues = [_defaultqueue]
2766 queues = [_defaultqueue]
2767
2767
2768 return sorted(queues)
2768 return sorted(queues)
2769
2769
2770 def _setactive(name):
2770 def _setactive(name):
2771 if q.applied:
2771 if q.applied:
2772 raise util.Abort(_('patches applied - cannot set new queue active'))
2772 raise util.Abort(_('patches applied - cannot set new queue active'))
2773 _setactivenocheck(name)
2773 _setactivenocheck(name)
2774
2774
2775 def _setactivenocheck(name):
2775 def _setactivenocheck(name):
2776 fh = repo.opener(_activequeue, 'w')
2776 fh = repo.opener(_activequeue, 'w')
2777 if name != 'patches':
2777 if name != 'patches':
2778 fh.write(name)
2778 fh.write(name)
2779 fh.close()
2779 fh.close()
2780
2780
2781 def _addqueue(name):
2781 def _addqueue(name):
2782 fh = repo.opener(_allqueues, 'a')
2782 fh = repo.opener(_allqueues, 'a')
2783 fh.write('%s\n' % (name,))
2783 fh.write('%s\n' % (name,))
2784 fh.close()
2784 fh.close()
2785
2785
2786 def _queuedir(name):
2786 def _queuedir(name):
2787 if name == 'patches':
2787 if name == 'patches':
2788 return repo.join('patches')
2788 return repo.join('patches')
2789 else:
2789 else:
2790 return repo.join('patches-' + name)
2790 return repo.join('patches-' + name)
2791
2791
2792 def _validname(name):
2792 def _validname(name):
2793 for n in name:
2793 for n in name:
2794 if n in ':\\/.':
2794 if n in ':\\/.':
2795 return False
2795 return False
2796 return True
2796 return True
2797
2797
2798 def _delete(name):
2798 def _delete(name):
2799 if name not in existing:
2799 if name not in existing:
2800 raise util.Abort(_('cannot delete queue that does not exist'))
2800 raise util.Abort(_('cannot delete queue that does not exist'))
2801
2801
2802 current = _getcurrent()
2802 current = _getcurrent()
2803
2803
2804 if name == current:
2804 if name == current:
2805 raise util.Abort(_('cannot delete currently active queue'))
2805 raise util.Abort(_('cannot delete currently active queue'))
2806
2806
2807 fh = repo.opener('patches.queues.new', 'w')
2807 fh = repo.opener('patches.queues.new', 'w')
2808 for queue in existing:
2808 for queue in existing:
2809 if queue == name:
2809 if queue == name:
2810 continue
2810 continue
2811 fh.write('%s\n' % (queue,))
2811 fh.write('%s\n' % (queue,))
2812 fh.close()
2812 fh.close()
2813 util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
2813 util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
2814
2814
2815 if not name or opts.get('list'):
2815 if not name or opts.get('list'):
2816 current = _getcurrent()
2816 current = _getcurrent()
2817 for queue in _getqueues():
2817 for queue in _getqueues():
2818 ui.write('%s' % (queue,))
2818 ui.write('%s' % (queue,))
2819 if queue == current and not ui.quiet:
2819 if queue == current and not ui.quiet:
2820 ui.write(_(' (active)\n'))
2820 ui.write(_(' (active)\n'))
2821 else:
2821 else:
2822 ui.write('\n')
2822 ui.write('\n')
2823 return
2823 return
2824
2824
2825 if not _validname(name):
2825 if not _validname(name):
2826 raise util.Abort(
2826 raise util.Abort(
2827 _('invalid queue name, may not contain the characters ":\\/."'))
2827 _('invalid queue name, may not contain the characters ":\\/."'))
2828
2828
2829 existing = _getqueues()
2829 existing = _getqueues()
2830
2830
2831 if opts.get('create'):
2831 if opts.get('create'):
2832 if name in existing:
2832 if name in existing:
2833 raise util.Abort(_('queue "%s" already exists') % name)
2833 raise util.Abort(_('queue "%s" already exists') % name)
2834 if _noqueues():
2834 if _noqueues():
2835 _addqueue(_defaultqueue)
2835 _addqueue(_defaultqueue)
2836 _addqueue(name)
2836 _addqueue(name)
2837 _setactive(name)
2837 _setactive(name)
2838 elif opts.get('rename'):
2838 elif opts.get('rename'):
2839 current = _getcurrent()
2839 current = _getcurrent()
2840 if name == current:
2840 if name == current:
2841 raise util.Abort(_('can\'t rename "%s" to its current name') % name)
2841 raise util.Abort(_('can\'t rename "%s" to its current name') % name)
2842 if name in existing:
2842 if name in existing:
2843 raise util.Abort(_('queue "%s" already exists') % name)
2843 raise util.Abort(_('queue "%s" already exists') % name)
2844
2844
2845 olddir = _queuedir(current)
2845 olddir = _queuedir(current)
2846 newdir = _queuedir(name)
2846 newdir = _queuedir(name)
2847
2847
2848 if os.path.exists(newdir):
2848 if os.path.exists(newdir):
2849 raise util.Abort(_('non-queue directory "%s" already exists') %
2849 raise util.Abort(_('non-queue directory "%s" already exists') %
2850 newdir)
2850 newdir)
2851
2851
2852 fh = repo.opener('patches.queues.new', 'w')
2852 fh = repo.opener('patches.queues.new', 'w')
2853 for queue in existing:
2853 for queue in existing:
2854 if queue == current:
2854 if queue == current:
2855 fh.write('%s\n' % (name,))
2855 fh.write('%s\n' % (name,))
2856 if os.path.exists(olddir):
2856 if os.path.exists(olddir):
2857 util.rename(olddir, newdir)
2857 util.rename(olddir, newdir)
2858 else:
2858 else:
2859 fh.write('%s\n' % (queue,))
2859 fh.write('%s\n' % (queue,))
2860 fh.close()
2860 fh.close()
2861 util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
2861 util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
2862 _setactivenocheck(name)
2862 _setactivenocheck(name)
2863 elif opts.get('delete'):
2863 elif opts.get('delete'):
2864 _delete(name)
2864 _delete(name)
2865 elif opts.get('purge'):
2865 elif opts.get('purge'):
2866 if name in existing:
2866 if name in existing:
2867 _delete(name)
2867 _delete(name)
2868 qdir = _queuedir(name)
2868 qdir = _queuedir(name)
2869 if os.path.exists(qdir):
2869 if os.path.exists(qdir):
2870 shutil.rmtree(qdir)
2870 shutil.rmtree(qdir)
2871 else:
2871 else:
2872 if name not in existing:
2872 if name not in existing:
2873 raise util.Abort(_('use --create to create a new queue'))
2873 raise util.Abort(_('use --create to create a new queue'))
2874 _setactive(name)
2874 _setactive(name)
2875
2875
2876 def reposetup(ui, repo):
2876 def reposetup(ui, repo):
2877 class mqrepo(repo.__class__):
2877 class mqrepo(repo.__class__):
2878 @util.propertycache
2878 @util.propertycache
2879 def mq(self):
2879 def mq(self):
2880 return queue(self.ui, self.join(""))
2880 return queue(self.ui, self.join(""))
2881
2881
2882 def abort_if_wdir_patched(self, errmsg, force=False):
2882 def abort_if_wdir_patched(self, errmsg, force=False):
2883 if self.mq.applied and not force:
2883 if self.mq.applied and not force:
2884 parent = self.dirstate.parents()[0]
2884 parent = self.dirstate.parents()[0]
2885 if parent in [s.node for s in self.mq.applied]:
2885 if parent in [s.node for s in self.mq.applied]:
2886 raise util.Abort(errmsg)
2886 raise util.Abort(errmsg)
2887
2887
2888 def commit(self, text="", user=None, date=None, match=None,
2888 def commit(self, text="", user=None, date=None, match=None,
2889 force=False, editor=False, extra={}):
2889 force=False, editor=False, extra={}):
2890 self.abort_if_wdir_patched(
2890 self.abort_if_wdir_patched(
2891 _('cannot commit over an applied mq patch'),
2891 _('cannot commit over an applied mq patch'),
2892 force)
2892 force)
2893
2893
2894 return super(mqrepo, self).commit(text, user, date, match, force,
2894 return super(mqrepo, self).commit(text, user, date, match, force,
2895 editor, extra)
2895 editor, extra)
2896
2896
2897 def push(self, remote, force=False, revs=None, newbranch=False):
2897 def push(self, remote, force=False, revs=None, newbranch=False):
2898 if self.mq.applied and not force:
2898 if self.mq.applied and not force:
2899 haspatches = True
2899 haspatches = True
2900 if revs:
2900 if revs:
2901 # Assume applied patches have no non-patch descendants
2901 # Assume applied patches have no non-patch descendants
2902 # and are not on remote already. If they appear in the
2902 # and are not on remote already. If they appear in the
2903 # set of resolved 'revs', bail out.
2903 # set of resolved 'revs', bail out.
2904 applied = set(e.node for e in self.mq.applied)
2904 applied = set(e.node for e in self.mq.applied)
2905 haspatches = bool([n for n in revs if n in applied])
2905 haspatches = bool([n for n in revs if n in applied])
2906 if haspatches:
2906 if haspatches:
2907 raise util.Abort(_('source has mq patches applied'))
2907 raise util.Abort(_('source has mq patches applied'))
2908 return super(mqrepo, self).push(remote, force, revs, newbranch)
2908 return super(mqrepo, self).push(remote, force, revs, newbranch)
2909
2909
2910 def _findtags(self):
2910 def _findtags(self):
2911 '''augment tags from base class with patch tags'''
2911 '''augment tags from base class with patch tags'''
2912 result = super(mqrepo, self)._findtags()
2912 result = super(mqrepo, self)._findtags()
2913
2913
2914 q = self.mq
2914 q = self.mq
2915 if not q.applied:
2915 if not q.applied:
2916 return result
2916 return result
2917
2917
2918 mqtags = [(patch.node, patch.name) for patch in q.applied]
2918 mqtags = [(patch.node, patch.name) for patch in q.applied]
2919
2919
2920 if mqtags[-1][0] not in self.changelog.nodemap:
2920 if mqtags[-1][0] not in self.changelog.nodemap:
2921 self.ui.warn(_('mq status file refers to unknown node %s\n')
2921 self.ui.warn(_('mq status file refers to unknown node %s\n')
2922 % short(mqtags[-1][0]))
2922 % short(mqtags[-1][0]))
2923 return result
2923 return result
2924
2924
2925 mqtags.append((mqtags[-1][0], 'qtip'))
2925 mqtags.append((mqtags[-1][0], 'qtip'))
2926 mqtags.append((mqtags[0][0], 'qbase'))
2926 mqtags.append((mqtags[0][0], 'qbase'))
2927 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2927 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2928 tags = result[0]
2928 tags = result[0]
2929 for patch in mqtags:
2929 for patch in mqtags:
2930 if patch[1] in tags:
2930 if patch[1] in tags:
2931 self.ui.warn(_('Tag %s overrides mq patch of the same name\n')
2931 self.ui.warn(_('Tag %s overrides mq patch of the same name\n')
2932 % patch[1])
2932 % patch[1])
2933 else:
2933 else:
2934 tags[patch[1]] = patch[0]
2934 tags[patch[1]] = patch[0]
2935
2935
2936 return result
2936 return result
2937
2937
2938 def _branchtags(self, partial, lrev):
2938 def _branchtags(self, partial, lrev):
2939 q = self.mq
2939 q = self.mq
2940 if not q.applied:
2940 if not q.applied:
2941 return super(mqrepo, self)._branchtags(partial, lrev)
2941 return super(mqrepo, self)._branchtags(partial, lrev)
2942
2942
2943 cl = self.changelog
2943 cl = self.changelog
2944 qbasenode = q.applied[0].node
2944 qbasenode = q.applied[0].node
2945 if qbasenode not in cl.nodemap:
2945 if qbasenode not in cl.nodemap:
2946 self.ui.warn(_('mq status file refers to unknown node %s\n')
2946 self.ui.warn(_('mq status file refers to unknown node %s\n')
2947 % short(qbasenode))
2947 % short(qbasenode))
2948 return super(mqrepo, self)._branchtags(partial, lrev)
2948 return super(mqrepo, self)._branchtags(partial, lrev)
2949
2949
2950 qbase = cl.rev(qbasenode)
2950 qbase = cl.rev(qbasenode)
2951 start = lrev + 1
2951 start = lrev + 1
2952 if start < qbase:
2952 if start < qbase:
2953 # update the cache (excluding the patches) and save it
2953 # update the cache (excluding the patches) and save it
2954 ctxgen = (self[r] for r in xrange(lrev + 1, qbase))
2954 ctxgen = (self[r] for r in xrange(lrev + 1, qbase))
2955 self._updatebranchcache(partial, ctxgen)
2955 self._updatebranchcache(partial, ctxgen)
2956 self._writebranchcache(partial, cl.node(qbase - 1), qbase - 1)
2956 self._writebranchcache(partial, cl.node(qbase - 1), qbase - 1)
2957 start = qbase
2957 start = qbase
2958 # if start = qbase, the cache is as updated as it should be.
2958 # if start = qbase, the cache is as updated as it should be.
2959 # if start > qbase, the cache includes (part of) the patches.
2959 # if start > qbase, the cache includes (part of) the patches.
2960 # we might as well use it, but we won't save it.
2960 # we might as well use it, but we won't save it.
2961
2961
2962 # update the cache up to the tip
2962 # update the cache up to the tip
2963 ctxgen = (self[r] for r in xrange(start, len(cl)))
2963 ctxgen = (self[r] for r in xrange(start, len(cl)))
2964 self._updatebranchcache(partial, ctxgen)
2964 self._updatebranchcache(partial, ctxgen)
2965
2965
2966 return partial
2966 return partial
2967
2967
2968 if repo.local():
2968 if repo.local():
2969 repo.__class__ = mqrepo
2969 repo.__class__ = mqrepo
2970
2970
2971 def mqimport(orig, ui, repo, *args, **kwargs):
2971 def mqimport(orig, ui, repo, *args, **kwargs):
2972 if (hasattr(repo, 'abort_if_wdir_patched')
2972 if (hasattr(repo, 'abort_if_wdir_patched')
2973 and not kwargs.get('no_commit', False)):
2973 and not kwargs.get('no_commit', False)):
2974 repo.abort_if_wdir_patched(_('cannot import over an applied patch'),
2974 repo.abort_if_wdir_patched(_('cannot import over an applied patch'),
2975 kwargs.get('force'))
2975 kwargs.get('force'))
2976 return orig(ui, repo, *args, **kwargs)
2976 return orig(ui, repo, *args, **kwargs)
2977
2977
2978 def mqinit(orig, ui, *args, **kwargs):
2978 def mqinit(orig, ui, *args, **kwargs):
2979 mq = kwargs.pop('mq', None)
2979 mq = kwargs.pop('mq', None)
2980
2980
2981 if not mq:
2981 if not mq:
2982 return orig(ui, *args, **kwargs)
2982 return orig(ui, *args, **kwargs)
2983
2983
2984 if args:
2984 if args:
2985 repopath = args[0]
2985 repopath = args[0]
2986 if not hg.islocal(repopath):
2986 if not hg.islocal(repopath):
2987 raise util.Abort(_('only a local queue repository '
2987 raise util.Abort(_('only a local queue repository '
2988 'may be initialized'))
2988 'may be initialized'))
2989 else:
2989 else:
2990 repopath = cmdutil.findrepo(os.getcwd())
2990 repopath = cmdutil.findrepo(os.getcwd())
2991 if not repopath:
2991 if not repopath:
2992 raise util.Abort(_('there is no Mercurial repository here '
2992 raise util.Abort(_('there is no Mercurial repository here '
2993 '(.hg not found)'))
2993 '(.hg not found)'))
2994 repo = hg.repository(ui, repopath)
2994 repo = hg.repository(ui, repopath)
2995 return qinit(ui, repo, True)
2995 return qinit(ui, repo, True)
2996
2996
2997 def mqcommand(orig, ui, repo, *args, **kwargs):
2997 def mqcommand(orig, ui, repo, *args, **kwargs):
2998 """Add --mq option to operate on patch repository instead of main"""
2998 """Add --mq option to operate on patch repository instead of main"""
2999
2999
3000 # some commands do not like getting unknown options
3000 # some commands do not like getting unknown options
3001 mq = kwargs.pop('mq', None)
3001 mq = kwargs.pop('mq', None)
3002
3002
3003 if not mq:
3003 if not mq:
3004 return orig(ui, repo, *args, **kwargs)
3004 return orig(ui, repo, *args, **kwargs)
3005
3005
3006 q = repo.mq
3006 q = repo.mq
3007 r = q.qrepo()
3007 r = q.qrepo()
3008 if not r:
3008 if not r:
3009 raise util.Abort(_('no queue repository'))
3009 raise util.Abort(_('no queue repository'))
3010 return orig(r.ui, r, *args, **kwargs)
3010 return orig(r.ui, r, *args, **kwargs)
3011
3011
3012 def summary(orig, ui, repo, *args, **kwargs):
3012 def summary(orig, ui, repo, *args, **kwargs):
3013 r = orig(ui, repo, *args, **kwargs)
3013 r = orig(ui, repo, *args, **kwargs)
3014 q = repo.mq
3014 q = repo.mq
3015 m = []
3015 m = []
3016 a, u = len(q.applied), len(q.unapplied(repo))
3016 a, u = len(q.applied), len(q.unapplied(repo))
3017 if a:
3017 if a:
3018 m.append(ui.label(_("%d applied"), 'qseries.applied') % a)
3018 m.append(ui.label(_("%d applied"), 'qseries.applied') % a)
3019 if u:
3019 if u:
3020 m.append(ui.label(_("%d unapplied"), 'qseries.unapplied') % u)
3020 m.append(ui.label(_("%d unapplied"), 'qseries.unapplied') % u)
3021 if m:
3021 if m:
3022 ui.write("mq: %s\n" % ', '.join(m))
3022 ui.write("mq: %s\n" % ', '.join(m))
3023 else:
3023 else:
3024 ui.note(_("mq: (empty queue)\n"))
3024 ui.note(_("mq: (empty queue)\n"))
3025 return r
3025 return r
3026
3026
3027 def uisetup(ui):
3027 def uisetup(ui):
3028 mqopt = [('', 'mq', None, _("operate on patch repository"))]
3028 mqopt = [('', 'mq', None, _("operate on patch repository"))]
3029
3029
3030 extensions.wrapcommand(commands.table, 'import', mqimport)
3030 extensions.wrapcommand(commands.table, 'import', mqimport)
3031 extensions.wrapcommand(commands.table, 'summary', summary)
3031 extensions.wrapcommand(commands.table, 'summary', summary)
3032
3032
3033 entry = extensions.wrapcommand(commands.table, 'init', mqinit)
3033 entry = extensions.wrapcommand(commands.table, 'init', mqinit)
3034 entry[1].extend(mqopt)
3034 entry[1].extend(mqopt)
3035
3035
3036 nowrap = set(commands.norepo.split(" ") + ['qrecord'])
3036 nowrap = set(commands.norepo.split(" ") + ['qrecord'])
3037
3037
3038 def dotable(cmdtable):
3038 def dotable(cmdtable):
3039 for cmd in cmdtable.keys():
3039 for cmd in cmdtable.keys():
3040 cmd = cmdutil.parsealiases(cmd)[0]
3040 cmd = cmdutil.parsealiases(cmd)[0]
3041 if cmd in nowrap:
3041 if cmd in nowrap:
3042 continue
3042 continue
3043 entry = extensions.wrapcommand(cmdtable, cmd, mqcommand)
3043 entry = extensions.wrapcommand(cmdtable, cmd, mqcommand)
3044 entry[1].extend(mqopt)
3044 entry[1].extend(mqopt)
3045
3045
3046 dotable(commands.table)
3046 dotable(commands.table)
3047
3047
3048 for extname, extmodule in extensions.extensions():
3048 for extname, extmodule in extensions.extensions():
3049 if extmodule.__file__ != __file__:
3049 if extmodule.__file__ != __file__:
3050 dotable(getattr(extmodule, 'cmdtable', {}))
3050 dotable(getattr(extmodule, 'cmdtable', {}))
3051
3051
3052 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
3052 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
3053
3053
3054 cmdtable = {
3054 cmdtable = {
3055 "qapplied":
3055 "qapplied":
3056 (applied,
3056 (applied,
3057 [('1', 'last', None, _('show only the last patch'))] + seriesopts,
3057 [('1', 'last', None, _('show only the last patch'))] + seriesopts,
3058 _('hg qapplied [-1] [-s] [PATCH]')),
3058 _('hg qapplied [-1] [-s] [PATCH]')),
3059 "qclone":
3059 "qclone":
3060 (clone,
3060 (clone,
3061 [('', 'pull', None, _('use pull protocol to copy metadata')),
3061 [('', 'pull', None, _('use pull protocol to copy metadata')),
3062 ('U', 'noupdate', None, _('do not update the new working directories')),
3062 ('U', 'noupdate', None, _('do not update the new working directories')),
3063 ('', 'uncompressed', None,
3063 ('', 'uncompressed', None,
3064 _('use uncompressed transfer (fast over LAN)')),
3064 _('use uncompressed transfer (fast over LAN)')),
3065 ('p', 'patches', '',
3065 ('p', 'patches', '',
3066 _('location of source patch repository'), _('REPO')),
3066 _('location of source patch repository'), _('REPO')),
3067 ] + commands.remoteopts,
3067 ] + commands.remoteopts,
3068 _('hg qclone [OPTION]... SOURCE [DEST]')),
3068 _('hg qclone [OPTION]... SOURCE [DEST]')),
3069 "qcommit|qci":
3069 "qcommit|qci":
3070 (commit,
3070 (commit,
3071 commands.table["^commit|ci"][1],
3071 commands.table["^commit|ci"][1],
3072 _('hg qcommit [OPTION]... [FILE]...')),
3072 _('hg qcommit [OPTION]... [FILE]...')),
3073 "^qdiff":
3073 "^qdiff":
3074 (diff,
3074 (diff,
3075 commands.diffopts + commands.diffopts2 + commands.walkopts,
3075 commands.diffopts + commands.diffopts2 + commands.walkopts,
3076 _('hg qdiff [OPTION]... [FILE]...')),
3076 _('hg qdiff [OPTION]... [FILE]...')),
3077 "qdelete|qremove|qrm":
3077 "qdelete|qremove|qrm":
3078 (delete,
3078 (delete,
3079 [('k', 'keep', None, _('keep patch file')),
3079 [('k', 'keep', None, _('keep patch file')),
3080 ('r', 'rev', [],
3080 ('r', 'rev', [],
3081 _('stop managing a revision (DEPRECATED)'), _('REV'))],
3081 _('stop managing a revision (DEPRECATED)'), _('REV'))],
3082 _('hg qdelete [-k] [PATCH]...')),
3082 _('hg qdelete [-k] [PATCH]...')),
3083 'qfold':
3083 'qfold':
3084 (fold,
3084 (fold,
3085 [('e', 'edit', None, _('edit patch header')),
3085 [('e', 'edit', None, _('edit patch header')),
3086 ('k', 'keep', None, _('keep folded patch files')),
3086 ('k', 'keep', None, _('keep folded patch files')),
3087 ] + commands.commitopts,
3087 ] + commands.commitopts,
3088 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')),
3088 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')),
3089 'qgoto':
3089 'qgoto':
3090 (goto,
3090 (goto,
3091 [('f', 'force', None, _('overwrite any local changes'))],
3091 [('f', 'force', None, _('overwrite any local changes'))],
3092 _('hg qgoto [OPTION]... PATCH')),
3092 _('hg qgoto [OPTION]... PATCH')),
3093 'qguard':
3093 'qguard':
3094 (guard,
3094 (guard,
3095 [('l', 'list', None, _('list all patches and guards')),
3095 [('l', 'list', None, _('list all patches and guards')),
3096 ('n', 'none', None, _('drop all guards'))],
3096 ('n', 'none', None, _('drop all guards'))],
3097 _('hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]')),
3097 _('hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]')),
3098 'qheader': (header, [], _('hg qheader [PATCH]')),
3098 'qheader': (header, [], _('hg qheader [PATCH]')),
3099 "qimport":
3099 "qimport":
3100 (qimport,
3100 (qimport,
3101 [('e', 'existing', None, _('import file in patch directory')),
3101 [('e', 'existing', None, _('import file in patch directory')),
3102 ('n', 'name', '',
3102 ('n', 'name', '',
3103 _('name of patch file'), _('NAME')),
3103 _('name of patch file'), _('NAME')),
3104 ('f', 'force', None, _('overwrite existing files')),
3104 ('f', 'force', None, _('overwrite existing files')),
3105 ('r', 'rev', [],
3105 ('r', 'rev', [],
3106 _('place existing revisions under mq control'), _('REV')),
3106 _('place existing revisions under mq control'), _('REV')),
3107 ('g', 'git', None, _('use git extended diff format')),
3107 ('g', 'git', None, _('use git extended diff format')),
3108 ('P', 'push', None, _('qpush after importing'))],
3108 ('P', 'push', None, _('qpush after importing'))],
3109 _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... FILE...')),
3109 _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... FILE...')),
3110 "^qinit":
3110 "^qinit":
3111 (init,
3111 (init,
3112 [('c', 'create-repo', None, _('create queue repository'))],
3112 [('c', 'create-repo', None, _('create queue repository'))],
3113 _('hg qinit [-c]')),
3113 _('hg qinit [-c]')),
3114 "^qnew":
3114 "^qnew":
3115 (new,
3115 (new,
3116 [('e', 'edit', None, _('edit commit message')),
3116 [('e', 'edit', None, _('edit commit message')),
3117 ('f', 'force', None, _('import uncommitted changes (DEPRECATED)')),
3117 ('f', 'force', None, _('import uncommitted changes (DEPRECATED)')),
3118 ('g', 'git', None, _('use git extended diff format')),
3118 ('g', 'git', None, _('use git extended diff format')),
3119 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
3119 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
3120 ('u', 'user', '',
3120 ('u', 'user', '',
3121 _('add "From: <USER>" to patch'), _('USER')),
3121 _('add "From: <USER>" to patch'), _('USER')),
3122 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
3122 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
3123 ('d', 'date', '',
3123 ('d', 'date', '',
3124 _('add "Date: <DATE>" to patch'), _('DATE'))
3124 _('add "Date: <DATE>" to patch'), _('DATE'))
3125 ] + commands.walkopts + commands.commitopts,
3125 ] + commands.walkopts + commands.commitopts,
3126 _('hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...')),
3126 _('hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...')),
3127 "qnext": (next, [] + seriesopts, _('hg qnext [-s]')),
3127 "qnext": (next, [] + seriesopts, _('hg qnext [-s]')),
3128 "qprev": (prev, [] + seriesopts, _('hg qprev [-s]')),
3128 "qprev": (prev, [] + seriesopts, _('hg qprev [-s]')),
3129 "^qpop":
3129 "^qpop":
3130 (pop,
3130 (pop,
3131 [('a', 'all', None, _('pop all patches')),
3131 [('a', 'all', None, _('pop all patches')),
3132 ('n', 'name', '',
3132 ('n', 'name', '',
3133 _('queue name to pop (DEPRECATED)'), _('NAME')),
3133 _('queue name to pop (DEPRECATED)'), _('NAME')),
3134 ('f', 'force', None, _('forget any local changes to patched files'))],
3134 ('f', 'force', None, _('forget any local changes to patched files'))],
3135 _('hg qpop [-a] [-f] [PATCH | INDEX]')),
3135 _('hg qpop [-a] [-f] [PATCH | INDEX]')),
3136 "^qpush":
3136 "^qpush":
3137 (push,
3137 (push,
3138 [('f', 'force', None, _('apply on top of local changes')),
3138 [('f', 'force', None, _('apply on top of local changes')),
3139 ('e', 'exact', None, _('apply the target patch to its recorded parent')),
3139 ('e', 'exact', None, _('apply the target patch to its recorded parent')),
3140 ('l', 'list', None, _('list patch name in commit text')),
3140 ('l', 'list', None, _('list patch name in commit text')),
3141 ('a', 'all', None, _('apply all patches')),
3141 ('a', 'all', None, _('apply all patches')),
3142 ('m', 'merge', None, _('merge from another queue (DEPRECATED)')),
3142 ('m', 'merge', None, _('merge from another queue (DEPRECATED)')),
3143 ('n', 'name', '',
3143 ('n', 'name', '',
3144 _('merge queue name (DEPRECATED)'), _('NAME')),
3144 _('merge queue name (DEPRECATED)'), _('NAME')),
3145 ('', 'move', None, _('reorder patch series and apply only the patch'))],
3145 ('', 'move', None, _('reorder patch series and apply only the patch'))],
3146 _('hg qpush [-f] [-l] [-a] [--move] [PATCH | INDEX]')),
3146 _('hg qpush [-f] [-l] [-a] [--move] [PATCH | INDEX]')),
3147 "^qrefresh":
3147 "^qrefresh":
3148 (refresh,
3148 (refresh,
3149 [('e', 'edit', None, _('edit commit message')),
3149 [('e', 'edit', None, _('edit commit message')),
3150 ('g', 'git', None, _('use git extended diff format')),
3150 ('g', 'git', None, _('use git extended diff format')),
3151 ('s', 'short', None,
3151 ('s', 'short', None,
3152 _('refresh only files already in the patch and specified files')),
3152 _('refresh only files already in the patch and specified files')),
3153 ('U', 'currentuser', None,
3153 ('U', 'currentuser', None,
3154 _('add/update author field in patch with current user')),
3154 _('add/update author field in patch with current user')),
3155 ('u', 'user', '',
3155 ('u', 'user', '',
3156 _('add/update author field in patch with given user'), _('USER')),
3156 _('add/update author field in patch with given user'), _('USER')),
3157 ('D', 'currentdate', None,
3157 ('D', 'currentdate', None,
3158 _('add/update date field in patch with current date')),
3158 _('add/update date field in patch with current date')),
3159 ('d', 'date', '',
3159 ('d', 'date', '',
3160 _('add/update date field in patch with given date'), _('DATE'))
3160 _('add/update date field in patch with given date'), _('DATE'))
3161 ] + commands.walkopts + commands.commitopts,
3161 ] + commands.walkopts + commands.commitopts,
3162 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')),
3162 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')),
3163 'qrename|qmv':
3163 'qrename|qmv':
3164 (rename, [], _('hg qrename PATCH1 [PATCH2]')),
3164 (rename, [], _('hg qrename PATCH1 [PATCH2]')),
3165 "qrestore":
3165 "qrestore":
3166 (restore,
3166 (restore,
3167 [('d', 'delete', None, _('delete save entry')),
3167 [('d', 'delete', None, _('delete save entry')),
3168 ('u', 'update', None, _('update queue working directory'))],
3168 ('u', 'update', None, _('update queue working directory'))],
3169 _('hg qrestore [-d] [-u] REV')),
3169 _('hg qrestore [-d] [-u] REV')),
3170 "qsave":
3170 "qsave":
3171 (save,
3171 (save,
3172 [('c', 'copy', None, _('copy patch directory')),
3172 [('c', 'copy', None, _('copy patch directory')),
3173 ('n', 'name', '',
3173 ('n', 'name', '',
3174 _('copy directory name'), _('NAME')),
3174 _('copy directory name'), _('NAME')),
3175 ('e', 'empty', None, _('clear queue status file')),
3175 ('e', 'empty', None, _('clear queue status file')),
3176 ('f', 'force', None, _('force copy'))] + commands.commitopts,
3176 ('f', 'force', None, _('force copy'))] + commands.commitopts,
3177 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')),
3177 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')),
3178 "qselect":
3178 "qselect":
3179 (select,
3179 (select,
3180 [('n', 'none', None, _('disable all guards')),
3180 [('n', 'none', None, _('disable all guards')),
3181 ('s', 'series', None, _('list all guards in series file')),
3181 ('s', 'series', None, _('list all guards in series file')),
3182 ('', 'pop', None, _('pop to before first guarded applied patch')),
3182 ('', 'pop', None, _('pop to before first guarded applied patch')),
3183 ('', 'reapply', None, _('pop, then reapply patches'))],
3183 ('', 'reapply', None, _('pop, then reapply patches'))],
3184 _('hg qselect [OPTION]... [GUARD]...')),
3184 _('hg qselect [OPTION]... [GUARD]...')),
3185 "qseries":
3185 "qseries":
3186 (series,
3186 (series,
3187 [('m', 'missing', None, _('print patches not in series')),
3187 [('m', 'missing', None, _('print patches not in series')),
3188 ] + seriesopts,
3188 ] + seriesopts,
3189 _('hg qseries [-ms]')),
3189 _('hg qseries [-ms]')),
3190 "strip":
3190 "strip":
3191 (strip,
3191 (strip,
3192 [('f', 'force', None, _('force removal of changesets even if the '
3192 [('f', 'force', None, _('force removal of changesets even if the '
3193 'working directory has uncommitted changes')),
3193 'working directory has uncommitted changes')),
3194 ('b', 'backup', None, _('bundle only changesets with local revision'
3194 ('b', 'backup', None, _('bundle only changesets with local revision'
3195 ' number greater than REV which are not'
3195 ' number greater than REV which are not'
3196 ' descendants of REV (DEPRECATED)')),
3196 ' descendants of REV (DEPRECATED)')),
3197 ('n', 'no-backup', None, _('no backups')),
3197 ('n', 'no-backup', None, _('no backups')),
3198 ('', 'nobackup', None, _('no backups (DEPRECATED)')),
3198 ('', 'nobackup', None, _('no backups (DEPRECATED)')),
3199 ('k', 'keep', None, _("do not modify working copy during strip"))],
3199 ('k', 'keep', None, _("do not modify working copy during strip"))],
3200 _('hg strip [-k] [-f] [-n] REV...')),
3200 _('hg strip [-k] [-f] [-n] REV...')),
3201 "qtop": (top, [] + seriesopts, _('hg qtop [-s]')),
3201 "qtop": (top, [] + seriesopts, _('hg qtop [-s]')),
3202 "qunapplied":
3202 "qunapplied":
3203 (unapplied,
3203 (unapplied,
3204 [('1', 'first', None, _('show only the first patch'))] + seriesopts,
3204 [('1', 'first', None, _('show only the first patch'))] + seriesopts,
3205 _('hg qunapplied [-1] [-s] [PATCH]')),
3205 _('hg qunapplied [-1] [-s] [PATCH]')),
3206 "qfinish":
3206 "qfinish":
3207 (finish,
3207 (finish,
3208 [('a', 'applied', None, _('finish all applied changesets'))],
3208 [('a', 'applied', None, _('finish all applied changesets'))],
3209 _('hg qfinish [-a] [REV]...')),
3209 _('hg qfinish [-a] [REV]...')),
3210 'qqueue':
3210 'qqueue':
3211 (qqueue,
3211 (qqueue,
3212 [
3212 [
3213 ('l', 'list', False, _('list all available queues')),
3213 ('l', 'list', False, _('list all available queues')),
3214 ('c', 'create', False, _('create new queue')),
3214 ('c', 'create', False, _('create new queue')),
3215 ('', 'rename', False, _('rename active queue')),
3215 ('', 'rename', False, _('rename active queue')),
3216 ('', 'delete', False, _('delete reference to queue')),
3216 ('', 'delete', False, _('delete reference to queue')),
3217 ('', 'purge', False, _('delete queue, and remove patch dir')),
3217 ('', 'purge', False, _('delete queue, and remove patch dir')),
3218 ],
3218 ],
3219 _('[OPTION] [QUEUE]')),
3219 _('[OPTION] [QUEUE]')),
3220 }
3220 }
3221
3221
3222 colortable = {'qguard.negative': 'red',
3222 colortable = {'qguard.negative': 'red',
3223 'qguard.positive': 'yellow',
3223 'qguard.positive': 'yellow',
3224 'qguard.unguarded': 'green',
3224 'qguard.unguarded': 'green',
3225 'qseries.applied': 'blue bold underline',
3225 'qseries.applied': 'blue bold underline',
3226 'qseries.guarded': 'black bold',
3226 'qseries.guarded': 'black bold',
3227 'qseries.missing': 'red bold',
3227 'qseries.missing': 'red bold',
3228 'qseries.unapplied': 'black bold'}
3228 'qseries.unapplied': 'black bold'}
@@ -1,569 +1,569 b''
1 # record.py
1 # record.py
2 #
2 #
3 # Copyright 2007 Bryan O'Sullivan <bos@serpentine.com>
3 # Copyright 2007 Bryan O'Sullivan <bos@serpentine.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''commands to interactively select changes for commit/qrefresh'''
8 '''commands to interactively select changes for commit/qrefresh'''
9
9
10 from mercurial.i18n import gettext, _
10 from mercurial.i18n import gettext, _
11 from mercurial import cmdutil, commands, extensions, hg, mdiff, patch
11 from mercurial import cmdutil, commands, extensions, hg, mdiff, patch
12 from mercurial import util
12 from mercurial import util
13 import copy, cStringIO, errno, os, re, tempfile
13 import copy, cStringIO, errno, os, re, tempfile
14
14
15 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
15 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
16
16
17 def scanpatch(fp):
17 def scanpatch(fp):
18 """like patch.iterhunks, but yield different events
18 """like patch.iterhunks, but yield different events
19
19
20 - ('file', [header_lines + fromfile + tofile])
20 - ('file', [header_lines + fromfile + tofile])
21 - ('context', [context_lines])
21 - ('context', [context_lines])
22 - ('hunk', [hunk_lines])
22 - ('hunk', [hunk_lines])
23 - ('range', (-start,len, +start,len, diffp))
23 - ('range', (-start,len, +start,len, diffp))
24 """
24 """
25 lr = patch.linereader(fp)
25 lr = patch.linereader(fp)
26
26
27 def scanwhile(first, p):
27 def scanwhile(first, p):
28 """scan lr while predicate holds"""
28 """scan lr while predicate holds"""
29 lines = [first]
29 lines = [first]
30 while True:
30 while True:
31 line = lr.readline()
31 line = lr.readline()
32 if not line:
32 if not line:
33 break
33 break
34 if p(line):
34 if p(line):
35 lines.append(line)
35 lines.append(line)
36 else:
36 else:
37 lr.push(line)
37 lr.push(line)
38 break
38 break
39 return lines
39 return lines
40
40
41 while True:
41 while True:
42 line = lr.readline()
42 line = lr.readline()
43 if not line:
43 if not line:
44 break
44 break
45 if line.startswith('diff --git a/'):
45 if line.startswith('diff --git a/'):
46 def notheader(line):
46 def notheader(line):
47 s = line.split(None, 1)
47 s = line.split(None, 1)
48 return not s or s[0] not in ('---', 'diff')
48 return not s or s[0] not in ('---', 'diff')
49 header = scanwhile(line, notheader)
49 header = scanwhile(line, notheader)
50 fromfile = lr.readline()
50 fromfile = lr.readline()
51 if fromfile.startswith('---'):
51 if fromfile.startswith('---'):
52 tofile = lr.readline()
52 tofile = lr.readline()
53 header += [fromfile, tofile]
53 header += [fromfile, tofile]
54 else:
54 else:
55 lr.push(fromfile)
55 lr.push(fromfile)
56 yield 'file', header
56 yield 'file', header
57 elif line[0] == ' ':
57 elif line[0] == ' ':
58 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
58 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
59 elif line[0] in '-+':
59 elif line[0] in '-+':
60 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
60 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
61 else:
61 else:
62 m = lines_re.match(line)
62 m = lines_re.match(line)
63 if m:
63 if m:
64 yield 'range', m.groups()
64 yield 'range', m.groups()
65 else:
65 else:
66 raise patch.PatchError('unknown patch content: %r' % line)
66 raise patch.PatchError('unknown patch content: %r' % line)
67
67
68 class header(object):
68 class header(object):
69 """patch header
69 """patch header
70
70
71 XXX shoudn't we move this to mercurial/patch.py ?
71 XXX shoudn't we move this to mercurial/patch.py ?
72 """
72 """
73 diff_re = re.compile('diff --git a/(.*) b/(.*)$')
73 diff_re = re.compile('diff --git a/(.*) b/(.*)$')
74 allhunks_re = re.compile('(?:index|new file|deleted file) ')
74 allhunks_re = re.compile('(?:index|new file|deleted file) ')
75 pretty_re = re.compile('(?:new file|deleted file) ')
75 pretty_re = re.compile('(?:new file|deleted file) ')
76 special_re = re.compile('(?:index|new|deleted|copy|rename) ')
76 special_re = re.compile('(?:index|new|deleted|copy|rename) ')
77
77
78 def __init__(self, header):
78 def __init__(self, header):
79 self.header = header
79 self.header = header
80 self.hunks = []
80 self.hunks = []
81
81
82 def binary(self):
82 def binary(self):
83 for h in self.header:
83 for h in self.header:
84 if h.startswith('index '):
84 if h.startswith('index '):
85 return True
85 return True
86
86
87 def pretty(self, fp):
87 def pretty(self, fp):
88 for h in self.header:
88 for h in self.header:
89 if h.startswith('index '):
89 if h.startswith('index '):
90 fp.write(_('this modifies a binary file (all or nothing)\n'))
90 fp.write(_('this modifies a binary file (all or nothing)\n'))
91 break
91 break
92 if self.pretty_re.match(h):
92 if self.pretty_re.match(h):
93 fp.write(h)
93 fp.write(h)
94 if self.binary():
94 if self.binary():
95 fp.write(_('this is a binary file\n'))
95 fp.write(_('this is a binary file\n'))
96 break
96 break
97 if h.startswith('---'):
97 if h.startswith('---'):
98 fp.write(_('%d hunks, %d lines changed\n') %
98 fp.write(_('%d hunks, %d lines changed\n') %
99 (len(self.hunks),
99 (len(self.hunks),
100 sum([max(h.added, h.removed) for h in self.hunks])))
100 sum([max(h.added, h.removed) for h in self.hunks])))
101 break
101 break
102 fp.write(h)
102 fp.write(h)
103
103
104 def write(self, fp):
104 def write(self, fp):
105 fp.write(''.join(self.header))
105 fp.write(''.join(self.header))
106
106
107 def allhunks(self):
107 def allhunks(self):
108 for h in self.header:
108 for h in self.header:
109 if self.allhunks_re.match(h):
109 if self.allhunks_re.match(h):
110 return True
110 return True
111
111
112 def files(self):
112 def files(self):
113 fromfile, tofile = self.diff_re.match(self.header[0]).groups()
113 fromfile, tofile = self.diff_re.match(self.header[0]).groups()
114 if fromfile == tofile:
114 if fromfile == tofile:
115 return [fromfile]
115 return [fromfile]
116 return [fromfile, tofile]
116 return [fromfile, tofile]
117
117
118 def filename(self):
118 def filename(self):
119 return self.files()[-1]
119 return self.files()[-1]
120
120
121 def __repr__(self):
121 def __repr__(self):
122 return '<header %s>' % (' '.join(map(repr, self.files())))
122 return '<header %s>' % (' '.join(map(repr, self.files())))
123
123
124 def special(self):
124 def special(self):
125 for h in self.header:
125 for h in self.header:
126 if self.special_re.match(h):
126 if self.special_re.match(h):
127 return True
127 return True
128
128
129 def countchanges(hunk):
129 def countchanges(hunk):
130 """hunk -> (n+,n-)"""
130 """hunk -> (n+,n-)"""
131 add = len([h for h in hunk if h[0] == '+'])
131 add = len([h for h in hunk if h[0] == '+'])
132 rem = len([h for h in hunk if h[0] == '-'])
132 rem = len([h for h in hunk if h[0] == '-'])
133 return add, rem
133 return add, rem
134
134
135 class hunk(object):
135 class hunk(object):
136 """patch hunk
136 """patch hunk
137
137
138 XXX shouldn't we merge this with patch.hunk ?
138 XXX shouldn't we merge this with patch.hunk ?
139 """
139 """
140 maxcontext = 3
140 maxcontext = 3
141
141
142 def __init__(self, header, fromline, toline, proc, before, hunk, after):
142 def __init__(self, header, fromline, toline, proc, before, hunk, after):
143 def trimcontext(number, lines):
143 def trimcontext(number, lines):
144 delta = len(lines) - self.maxcontext
144 delta = len(lines) - self.maxcontext
145 if False and delta > 0:
145 if False and delta > 0:
146 return number + delta, lines[:self.maxcontext]
146 return number + delta, lines[:self.maxcontext]
147 return number, lines
147 return number, lines
148
148
149 self.header = header
149 self.header = header
150 self.fromline, self.before = trimcontext(fromline, before)
150 self.fromline, self.before = trimcontext(fromline, before)
151 self.toline, self.after = trimcontext(toline, after)
151 self.toline, self.after = trimcontext(toline, after)
152 self.proc = proc
152 self.proc = proc
153 self.hunk = hunk
153 self.hunk = hunk
154 self.added, self.removed = countchanges(self.hunk)
154 self.added, self.removed = countchanges(self.hunk)
155
155
156 def write(self, fp):
156 def write(self, fp):
157 delta = len(self.before) + len(self.after)
157 delta = len(self.before) + len(self.after)
158 if self.after and self.after[-1] == '\\ No newline at end of file\n':
158 if self.after and self.after[-1] == '\\ No newline at end of file\n':
159 delta -= 1
159 delta -= 1
160 fromlen = delta + self.removed
160 fromlen = delta + self.removed
161 tolen = delta + self.added
161 tolen = delta + self.added
162 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
162 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
163 (self.fromline, fromlen, self.toline, tolen,
163 (self.fromline, fromlen, self.toline, tolen,
164 self.proc and (' ' + self.proc)))
164 self.proc and (' ' + self.proc)))
165 fp.write(''.join(self.before + self.hunk + self.after))
165 fp.write(''.join(self.before + self.hunk + self.after))
166
166
167 pretty = write
167 pretty = write
168
168
169 def filename(self):
169 def filename(self):
170 return self.header.filename()
170 return self.header.filename()
171
171
172 def __repr__(self):
172 def __repr__(self):
173 return '<hunk %r@%d>' % (self.filename(), self.fromline)
173 return '<hunk %r@%d>' % (self.filename(), self.fromline)
174
174
175 def parsepatch(fp):
175 def parsepatch(fp):
176 """patch -> [] of hunks """
176 """patch -> [] of hunks """
177 class parser(object):
177 class parser(object):
178 """patch parsing state machine"""
178 """patch parsing state machine"""
179 def __init__(self):
179 def __init__(self):
180 self.fromline = 0
180 self.fromline = 0
181 self.toline = 0
181 self.toline = 0
182 self.proc = ''
182 self.proc = ''
183 self.header = None
183 self.header = None
184 self.context = []
184 self.context = []
185 self.before = []
185 self.before = []
186 self.hunk = []
186 self.hunk = []
187 self.stream = []
187 self.stream = []
188
188
189 def addrange(self, limits):
189 def addrange(self, limits):
190 fromstart, fromend, tostart, toend, proc = limits
190 fromstart, fromend, tostart, toend, proc = limits
191 self.fromline = int(fromstart)
191 self.fromline = int(fromstart)
192 self.toline = int(tostart)
192 self.toline = int(tostart)
193 self.proc = proc
193 self.proc = proc
194
194
195 def addcontext(self, context):
195 def addcontext(self, context):
196 if self.hunk:
196 if self.hunk:
197 h = hunk(self.header, self.fromline, self.toline, self.proc,
197 h = hunk(self.header, self.fromline, self.toline, self.proc,
198 self.before, self.hunk, context)
198 self.before, self.hunk, context)
199 self.header.hunks.append(h)
199 self.header.hunks.append(h)
200 self.stream.append(h)
200 self.stream.append(h)
201 self.fromline += len(self.before) + h.removed
201 self.fromline += len(self.before) + h.removed
202 self.toline += len(self.before) + h.added
202 self.toline += len(self.before) + h.added
203 self.before = []
203 self.before = []
204 self.hunk = []
204 self.hunk = []
205 self.proc = ''
205 self.proc = ''
206 self.context = context
206 self.context = context
207
207
208 def addhunk(self, hunk):
208 def addhunk(self, hunk):
209 if self.context:
209 if self.context:
210 self.before = self.context
210 self.before = self.context
211 self.context = []
211 self.context = []
212 self.hunk = hunk
212 self.hunk = hunk
213
213
214 def newfile(self, hdr):
214 def newfile(self, hdr):
215 self.addcontext([])
215 self.addcontext([])
216 h = header(hdr)
216 h = header(hdr)
217 self.stream.append(h)
217 self.stream.append(h)
218 self.header = h
218 self.header = h
219
219
220 def finished(self):
220 def finished(self):
221 self.addcontext([])
221 self.addcontext([])
222 return self.stream
222 return self.stream
223
223
224 transitions = {
224 transitions = {
225 'file': {'context': addcontext,
225 'file': {'context': addcontext,
226 'file': newfile,
226 'file': newfile,
227 'hunk': addhunk,
227 'hunk': addhunk,
228 'range': addrange},
228 'range': addrange},
229 'context': {'file': newfile,
229 'context': {'file': newfile,
230 'hunk': addhunk,
230 'hunk': addhunk,
231 'range': addrange},
231 'range': addrange},
232 'hunk': {'context': addcontext,
232 'hunk': {'context': addcontext,
233 'file': newfile,
233 'file': newfile,
234 'range': addrange},
234 'range': addrange},
235 'range': {'context': addcontext,
235 'range': {'context': addcontext,
236 'hunk': addhunk},
236 'hunk': addhunk},
237 }
237 }
238
238
239 p = parser()
239 p = parser()
240
240
241 state = 'context'
241 state = 'context'
242 for newstate, data in scanpatch(fp):
242 for newstate, data in scanpatch(fp):
243 try:
243 try:
244 p.transitions[state][newstate](p, data)
244 p.transitions[state][newstate](p, data)
245 except KeyError:
245 except KeyError:
246 raise patch.PatchError('unhandled transition: %s -> %s' %
246 raise patch.PatchError('unhandled transition: %s -> %s' %
247 (state, newstate))
247 (state, newstate))
248 state = newstate
248 state = newstate
249 return p.finished()
249 return p.finished()
250
250
251 def filterpatch(ui, chunks):
251 def filterpatch(ui, chunks):
252 """Interactively filter patch chunks into applied-only chunks"""
252 """Interactively filter patch chunks into applied-only chunks"""
253 chunks = list(chunks)
253 chunks = list(chunks)
254 chunks.reverse()
254 chunks.reverse()
255 seen = set()
255 seen = set()
256 def consumefile():
256 def consumefile():
257 """fetch next portion from chunks until a 'header' is seen
257 """fetch next portion from chunks until a 'header' is seen
258 NB: header == new-file mark
258 NB: header == new-file mark
259 """
259 """
260 consumed = []
260 consumed = []
261 while chunks:
261 while chunks:
262 if isinstance(chunks[-1], header):
262 if isinstance(chunks[-1], header):
263 break
263 break
264 else:
264 else:
265 consumed.append(chunks.pop())
265 consumed.append(chunks.pop())
266 return consumed
266 return consumed
267
267
268 resp_all = [None] # this two are changed from inside prompt,
268 resp_all = [None] # this two are changed from inside prompt,
269 resp_file = [None] # so can't be usual variables
269 resp_file = [None] # so can't be usual variables
270 applied = {} # 'filename' -> [] of chunks
270 applied = {} # 'filename' -> [] of chunks
271 def prompt(query):
271 def prompt(query):
272 """prompt query, and process base inputs
272 """prompt query, and process base inputs
273
273
274 - y/n for the rest of file
274 - y/n for the rest of file
275 - y/n for the rest
275 - y/n for the rest
276 - ? (help)
276 - ? (help)
277 - q (quit)
277 - q (quit)
278
278
279 Returns True/False and sets reps_all and resp_file as
279 Returns True/False and sets reps_all and resp_file as
280 appropriate.
280 appropriate.
281 """
281 """
282 if resp_all[0] is not None:
282 if resp_all[0] is not None:
283 return resp_all[0]
283 return resp_all[0]
284 if resp_file[0] is not None:
284 if resp_file[0] is not None:
285 return resp_file[0]
285 return resp_file[0]
286 while True:
286 while True:
287 resps = _('[Ynsfdaq?]')
287 resps = _('[Ynsfdaq?]')
288 choices = (_('&Yes, record this change'),
288 choices = (_('&Yes, record this change'),
289 _('&No, skip this change'),
289 _('&No, skip this change'),
290 _('&Skip remaining changes to this file'),
290 _('&Skip remaining changes to this file'),
291 _('Record remaining changes to this &file'),
291 _('Record remaining changes to this &file'),
292 _('&Done, skip remaining changes and files'),
292 _('&Done, skip remaining changes and files'),
293 _('Record &all changes to all remaining files'),
293 _('Record &all changes to all remaining files'),
294 _('&Quit, recording no changes'),
294 _('&Quit, recording no changes'),
295 _('&?'))
295 _('&?'))
296 r = ui.promptchoice("%s %s" % (query, resps), choices)
296 r = ui.promptchoice("%s %s" % (query, resps), choices)
297 ui.write("\n")
297 ui.write("\n")
298 if r == 7: # ?
298 if r == 7: # ?
299 doc = gettext(record.__doc__)
299 doc = gettext(record.__doc__)
300 c = doc.find('::') + 2
300 c = doc.find('::') + 2
301 for l in doc[c:].splitlines():
301 for l in doc[c:].splitlines():
302 if l.startswith(' '):
302 if l.startswith(' '):
303 ui.write(l.strip(), '\n')
303 ui.write(l.strip(), '\n')
304 continue
304 continue
305 elif r == 0: # yes
305 elif r == 0: # yes
306 ret = True
306 ret = True
307 elif r == 1: # no
307 elif r == 1: # no
308 ret = False
308 ret = False
309 elif r == 2: # Skip
309 elif r == 2: # Skip
310 ret = resp_file[0] = False
310 ret = resp_file[0] = False
311 elif r == 3: # file (Record remaining)
311 elif r == 3: # file (Record remaining)
312 ret = resp_file[0] = True
312 ret = resp_file[0] = True
313 elif r == 4: # done, skip remaining
313 elif r == 4: # done, skip remaining
314 ret = resp_all[0] = False
314 ret = resp_all[0] = False
315 elif r == 5: # all
315 elif r == 5: # all
316 ret = resp_all[0] = True
316 ret = resp_all[0] = True
317 elif r == 6: # quit
317 elif r == 6: # quit
318 raise util.Abort(_('user quit'))
318 raise util.Abort(_('user quit'))
319 return ret
319 return ret
320 pos, total = 0, len(chunks) - 1
320 pos, total = 0, len(chunks) - 1
321 while chunks:
321 while chunks:
322 pos = total - len(chunks) + 1
322 pos = total - len(chunks) + 1
323 chunk = chunks.pop()
323 chunk = chunks.pop()
324 if isinstance(chunk, header):
324 if isinstance(chunk, header):
325 # new-file mark
325 # new-file mark
326 resp_file = [None]
326 resp_file = [None]
327 fixoffset = 0
327 fixoffset = 0
328 hdr = ''.join(chunk.header)
328 hdr = ''.join(chunk.header)
329 if hdr in seen:
329 if hdr in seen:
330 consumefile()
330 consumefile()
331 continue
331 continue
332 seen.add(hdr)
332 seen.add(hdr)
333 if resp_all[0] is None:
333 if resp_all[0] is None:
334 chunk.pretty(ui)
334 chunk.pretty(ui)
335 r = prompt(_('examine changes to %s?') %
335 r = prompt(_('examine changes to %s?') %
336 _(' and ').join(map(repr, chunk.files())))
336 _(' and ').join(map(repr, chunk.files())))
337 if r:
337 if r:
338 applied[chunk.filename()] = [chunk]
338 applied[chunk.filename()] = [chunk]
339 if chunk.allhunks():
339 if chunk.allhunks():
340 applied[chunk.filename()] += consumefile()
340 applied[chunk.filename()] += consumefile()
341 else:
341 else:
342 consumefile()
342 consumefile()
343 else:
343 else:
344 # new hunk
344 # new hunk
345 if resp_file[0] is None and resp_all[0] is None:
345 if resp_file[0] is None and resp_all[0] is None:
346 chunk.pretty(ui)
346 chunk.pretty(ui)
347 r = total == 1 and prompt(_('record this change to %r?') %
347 r = (total == 1
348 chunk.filename()) \
348 and prompt(_('record this change to %r?') % chunk.filename())
349 or prompt(_('record change %d/%d to %r?') %
349 or prompt(_('record change %d/%d to %r?') %
350 (pos, total, chunk.filename()))
350 (pos, total, chunk.filename())))
351 if r:
351 if r:
352 if fixoffset:
352 if fixoffset:
353 chunk = copy.copy(chunk)
353 chunk = copy.copy(chunk)
354 chunk.toline += fixoffset
354 chunk.toline += fixoffset
355 applied[chunk.filename()].append(chunk)
355 applied[chunk.filename()].append(chunk)
356 else:
356 else:
357 fixoffset += chunk.removed - chunk.added
357 fixoffset += chunk.removed - chunk.added
358 return sum([h for h in applied.itervalues()
358 return sum([h for h in applied.itervalues()
359 if h[0].special() or len(h) > 1], [])
359 if h[0].special() or len(h) > 1], [])
360
360
361 def record(ui, repo, *pats, **opts):
361 def record(ui, repo, *pats, **opts):
362 '''interactively select changes to commit
362 '''interactively select changes to commit
363
363
364 If a list of files is omitted, all changes reported by :hg:`status`
364 If a list of files is omitted, all changes reported by :hg:`status`
365 will be candidates for recording.
365 will be candidates for recording.
366
366
367 See :hg:`help dates` for a list of formats valid for -d/--date.
367 See :hg:`help dates` for a list of formats valid for -d/--date.
368
368
369 You will be prompted for whether to record changes to each
369 You will be prompted for whether to record changes to each
370 modified file, and for files with multiple changes, for each
370 modified file, and for files with multiple changes, for each
371 change to use. For each query, the following responses are
371 change to use. For each query, the following responses are
372 possible::
372 possible::
373
373
374 y - record this change
374 y - record this change
375 n - skip this change
375 n - skip this change
376
376
377 s - skip remaining changes to this file
377 s - skip remaining changes to this file
378 f - record remaining changes to this file
378 f - record remaining changes to this file
379
379
380 d - done, skip remaining changes and files
380 d - done, skip remaining changes and files
381 a - record all changes to all remaining files
381 a - record all changes to all remaining files
382 q - quit, recording no changes
382 q - quit, recording no changes
383
383
384 ? - display help
384 ? - display help
385
385
386 This command is not available when committing a merge.'''
386 This command is not available when committing a merge.'''
387
387
388 dorecord(ui, repo, commands.commit, *pats, **opts)
388 dorecord(ui, repo, commands.commit, *pats, **opts)
389
389
390
390
391 def qrecord(ui, repo, patch, *pats, **opts):
391 def qrecord(ui, repo, patch, *pats, **opts):
392 '''interactively record a new patch
392 '''interactively record a new patch
393
393
394 See :hg:`help qnew` & :hg:`help record` for more information and
394 See :hg:`help qnew` & :hg:`help record` for more information and
395 usage.
395 usage.
396 '''
396 '''
397
397
398 try:
398 try:
399 mq = extensions.find('mq')
399 mq = extensions.find('mq')
400 except KeyError:
400 except KeyError:
401 raise util.Abort(_("'mq' extension not loaded"))
401 raise util.Abort(_("'mq' extension not loaded"))
402
402
403 def committomq(ui, repo, *pats, **opts):
403 def committomq(ui, repo, *pats, **opts):
404 mq.new(ui, repo, patch, *pats, **opts)
404 mq.new(ui, repo, patch, *pats, **opts)
405
405
406 opts = opts.copy()
406 opts = opts.copy()
407 opts['force'] = True # always 'qnew -f'
407 opts['force'] = True # always 'qnew -f'
408 dorecord(ui, repo, committomq, *pats, **opts)
408 dorecord(ui, repo, committomq, *pats, **opts)
409
409
410
410
411 def dorecord(ui, repo, commitfunc, *pats, **opts):
411 def dorecord(ui, repo, commitfunc, *pats, **opts):
412 if not ui.interactive():
412 if not ui.interactive():
413 raise util.Abort(_('running non-interactively, use commit instead'))
413 raise util.Abort(_('running non-interactively, use commit instead'))
414
414
415 def recordfunc(ui, repo, message, match, opts):
415 def recordfunc(ui, repo, message, match, opts):
416 """This is generic record driver.
416 """This is generic record driver.
417
417
418 Its job is to interactively filter local changes, and accordingly
418 Its job is to interactively filter local changes, and accordingly
419 prepare working dir into a state, where the job can be delegated to
419 prepare working dir into a state, where the job can be delegated to
420 non-interactive commit command such as 'commit' or 'qrefresh'.
420 non-interactive commit command such as 'commit' or 'qrefresh'.
421
421
422 After the actual job is done by non-interactive command, working dir
422 After the actual job is done by non-interactive command, working dir
423 state is restored to original.
423 state is restored to original.
424
424
425 In the end we'll record interesting changes, and everything else will be
425 In the end we'll record interesting changes, and everything else will be
426 left in place, so the user can continue his work.
426 left in place, so the user can continue his work.
427 """
427 """
428
428
429 merge = len(repo[None].parents()) > 1
429 merge = len(repo[None].parents()) > 1
430 if merge:
430 if merge:
431 raise util.Abort(_('cannot partially commit a merge '
431 raise util.Abort(_('cannot partially commit a merge '
432 '(use "hg commit" instead)'))
432 '(use "hg commit" instead)'))
433
433
434 changes = repo.status(match=match)[:3]
434 changes = repo.status(match=match)[:3]
435 diffopts = mdiff.diffopts(git=True, nodates=True)
435 diffopts = mdiff.diffopts(git=True, nodates=True)
436 chunks = patch.diff(repo, changes=changes, opts=diffopts)
436 chunks = patch.diff(repo, changes=changes, opts=diffopts)
437 fp = cStringIO.StringIO()
437 fp = cStringIO.StringIO()
438 fp.write(''.join(chunks))
438 fp.write(''.join(chunks))
439 fp.seek(0)
439 fp.seek(0)
440
440
441 # 1. filter patch, so we have intending-to apply subset of it
441 # 1. filter patch, so we have intending-to apply subset of it
442 chunks = filterpatch(ui, parsepatch(fp))
442 chunks = filterpatch(ui, parsepatch(fp))
443 del fp
443 del fp
444
444
445 contenders = set()
445 contenders = set()
446 for h in chunks:
446 for h in chunks:
447 try:
447 try:
448 contenders.update(set(h.files()))
448 contenders.update(set(h.files()))
449 except AttributeError:
449 except AttributeError:
450 pass
450 pass
451
451
452 changed = changes[0] + changes[1] + changes[2]
452 changed = changes[0] + changes[1] + changes[2]
453 newfiles = [f for f in changed if f in contenders]
453 newfiles = [f for f in changed if f in contenders]
454 if not newfiles:
454 if not newfiles:
455 ui.status(_('no changes to record\n'))
455 ui.status(_('no changes to record\n'))
456 return 0
456 return 0
457
457
458 modified = set(changes[0])
458 modified = set(changes[0])
459
459
460 # 2. backup changed files, so we can restore them in the end
460 # 2. backup changed files, so we can restore them in the end
461 backups = {}
461 backups = {}
462 backupdir = repo.join('record-backups')
462 backupdir = repo.join('record-backups')
463 try:
463 try:
464 os.mkdir(backupdir)
464 os.mkdir(backupdir)
465 except OSError, err:
465 except OSError, err:
466 if err.errno != errno.EEXIST:
466 if err.errno != errno.EEXIST:
467 raise
467 raise
468 try:
468 try:
469 # backup continues
469 # backup continues
470 for f in newfiles:
470 for f in newfiles:
471 if f not in modified:
471 if f not in modified:
472 continue
472 continue
473 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
473 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
474 dir=backupdir)
474 dir=backupdir)
475 os.close(fd)
475 os.close(fd)
476 ui.debug('backup %r as %r\n' % (f, tmpname))
476 ui.debug('backup %r as %r\n' % (f, tmpname))
477 util.copyfile(repo.wjoin(f), tmpname)
477 util.copyfile(repo.wjoin(f), tmpname)
478 backups[f] = tmpname
478 backups[f] = tmpname
479
479
480 fp = cStringIO.StringIO()
480 fp = cStringIO.StringIO()
481 for c in chunks:
481 for c in chunks:
482 if c.filename() in backups:
482 if c.filename() in backups:
483 c.write(fp)
483 c.write(fp)
484 dopatch = fp.tell()
484 dopatch = fp.tell()
485 fp.seek(0)
485 fp.seek(0)
486
486
487 # 3a. apply filtered patch to clean repo (clean)
487 # 3a. apply filtered patch to clean repo (clean)
488 if backups:
488 if backups:
489 hg.revert(repo, repo.dirstate.parents()[0],
489 hg.revert(repo, repo.dirstate.parents()[0],
490 lambda key: key in backups)
490 lambda key: key in backups)
491
491
492 # 3b. (apply)
492 # 3b. (apply)
493 if dopatch:
493 if dopatch:
494 try:
494 try:
495 ui.debug('applying patch\n')
495 ui.debug('applying patch\n')
496 ui.debug(fp.getvalue())
496 ui.debug(fp.getvalue())
497 pfiles = {}
497 pfiles = {}
498 patch.internalpatch(fp, ui, 1, repo.root, files=pfiles,
498 patch.internalpatch(fp, ui, 1, repo.root, files=pfiles,
499 eolmode=None)
499 eolmode=None)
500 cmdutil.updatedir(ui, repo, pfiles)
500 cmdutil.updatedir(ui, repo, pfiles)
501 except patch.PatchError, err:
501 except patch.PatchError, err:
502 raise util.Abort(str(err))
502 raise util.Abort(str(err))
503 del fp
503 del fp
504
504
505 # 4. We prepared working directory according to filtered patch.
505 # 4. We prepared working directory according to filtered patch.
506 # Now is the time to delegate the job to commit/qrefresh or the like!
506 # Now is the time to delegate the job to commit/qrefresh or the like!
507
507
508 # it is important to first chdir to repo root -- we'll call a
508 # it is important to first chdir to repo root -- we'll call a
509 # highlevel command with list of pathnames relative to repo root
509 # highlevel command with list of pathnames relative to repo root
510 cwd = os.getcwd()
510 cwd = os.getcwd()
511 os.chdir(repo.root)
511 os.chdir(repo.root)
512 try:
512 try:
513 commitfunc(ui, repo, *newfiles, **opts)
513 commitfunc(ui, repo, *newfiles, **opts)
514 finally:
514 finally:
515 os.chdir(cwd)
515 os.chdir(cwd)
516
516
517 return 0
517 return 0
518 finally:
518 finally:
519 # 5. finally restore backed-up files
519 # 5. finally restore backed-up files
520 try:
520 try:
521 for realname, tmpname in backups.iteritems():
521 for realname, tmpname in backups.iteritems():
522 ui.debug('restoring %r to %r\n' % (tmpname, realname))
522 ui.debug('restoring %r to %r\n' % (tmpname, realname))
523 util.copyfile(tmpname, repo.wjoin(realname))
523 util.copyfile(tmpname, repo.wjoin(realname))
524 os.unlink(tmpname)
524 os.unlink(tmpname)
525 os.rmdir(backupdir)
525 os.rmdir(backupdir)
526 except OSError:
526 except OSError:
527 pass
527 pass
528
528
529 # wrap ui.write so diff output can be labeled/colorized
529 # wrap ui.write so diff output can be labeled/colorized
530 def wrapwrite(orig, *args, **kw):
530 def wrapwrite(orig, *args, **kw):
531 label = kw.pop('label', '')
531 label = kw.pop('label', '')
532 for chunk, l in patch.difflabel(lambda: args):
532 for chunk, l in patch.difflabel(lambda: args):
533 orig(chunk, label=label + l)
533 orig(chunk, label=label + l)
534 oldwrite = ui.write
534 oldwrite = ui.write
535 extensions.wrapfunction(ui, 'write', wrapwrite)
535 extensions.wrapfunction(ui, 'write', wrapwrite)
536 try:
536 try:
537 return cmdutil.commit(ui, repo, recordfunc, pats, opts)
537 return cmdutil.commit(ui, repo, recordfunc, pats, opts)
538 finally:
538 finally:
539 ui.write = oldwrite
539 ui.write = oldwrite
540
540
541 cmdtable = {
541 cmdtable = {
542 "record":
542 "record":
543 (record,
543 (record,
544
544
545 # add commit options
545 # add commit options
546 commands.table['^commit|ci'][1],
546 commands.table['^commit|ci'][1],
547
547
548 _('hg record [OPTION]... [FILE]...')),
548 _('hg record [OPTION]... [FILE]...')),
549 }
549 }
550
550
551
551
552 def uisetup(ui):
552 def uisetup(ui):
553 try:
553 try:
554 mq = extensions.find('mq')
554 mq = extensions.find('mq')
555 except KeyError:
555 except KeyError:
556 return
556 return
557
557
558 qcmdtable = {
558 qcmdtable = {
559 "qrecord":
559 "qrecord":
560 (qrecord,
560 (qrecord,
561
561
562 # add qnew options, except '--force'
562 # add qnew options, except '--force'
563 [opt for opt in mq.cmdtable['^qnew'][1] if opt[1] != 'force'],
563 [opt for opt in mq.cmdtable['^qnew'][1] if opt[1] != 'force'],
564
564
565 _('hg qrecord [OPTION]... PATCH [FILE]...')),
565 _('hg qrecord [OPTION]... PATCH [FILE]...')),
566 }
566 }
567
567
568 cmdtable.update(qcmdtable)
568 cmdtable.update(qcmdtable)
569
569
@@ -1,1917 +1,1917 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 import url as urlmod
17 import url as urlmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
24 supportedformats = set(('revlogv1', 'parentdelta'))
24 supportedformats = set(('revlogv1', 'parentdelta'))
25 supported = supportedformats | set(('store', 'fncache', 'shared',
25 supported = supportedformats | set(('store', 'fncache', 'shared',
26 'dotencode'))
26 'dotencode'))
27
27
28 def __init__(self, baseui, path=None, create=0):
28 def __init__(self, baseui, path=None, create=0):
29 repo.repository.__init__(self)
29 repo.repository.__init__(self)
30 self.root = os.path.realpath(util.expandpath(path))
30 self.root = os.path.realpath(util.expandpath(path))
31 self.path = os.path.join(self.root, ".hg")
31 self.path = os.path.join(self.root, ".hg")
32 self.origroot = path
32 self.origroot = path
33 self.auditor = util.path_auditor(self.root, self._checknested)
33 self.auditor = util.path_auditor(self.root, self._checknested)
34 self.opener = util.opener(self.path)
34 self.opener = util.opener(self.path)
35 self.wopener = util.opener(self.root)
35 self.wopener = util.opener(self.root)
36 self.baseui = baseui
36 self.baseui = baseui
37 self.ui = baseui.copy()
37 self.ui = baseui.copy()
38
38
39 try:
39 try:
40 self.ui.readconfig(self.join("hgrc"), self.root)
40 self.ui.readconfig(self.join("hgrc"), self.root)
41 extensions.loadall(self.ui)
41 extensions.loadall(self.ui)
42 except IOError:
42 except IOError:
43 pass
43 pass
44
44
45 if not os.path.isdir(self.path):
45 if not os.path.isdir(self.path):
46 if create:
46 if create:
47 if not os.path.exists(path):
47 if not os.path.exists(path):
48 util.makedirs(path)
48 util.makedirs(path)
49 os.mkdir(self.path)
49 os.mkdir(self.path)
50 requirements = ["revlogv1"]
50 requirements = ["revlogv1"]
51 if self.ui.configbool('format', 'usestore', True):
51 if self.ui.configbool('format', 'usestore', True):
52 os.mkdir(os.path.join(self.path, "store"))
52 os.mkdir(os.path.join(self.path, "store"))
53 requirements.append("store")
53 requirements.append("store")
54 if self.ui.configbool('format', 'usefncache', True):
54 if self.ui.configbool('format', 'usefncache', True):
55 requirements.append("fncache")
55 requirements.append("fncache")
56 if self.ui.configbool('format', 'dotencode', True):
56 if self.ui.configbool('format', 'dotencode', True):
57 requirements.append('dotencode')
57 requirements.append('dotencode')
58 # create an invalid changelog
58 # create an invalid changelog
59 self.opener("00changelog.i", "a").write(
59 self.opener("00changelog.i", "a").write(
60 '\0\0\0\2' # represents revlogv2
60 '\0\0\0\2' # represents revlogv2
61 ' dummy changelog to prevent using the old repo layout'
61 ' dummy changelog to prevent using the old repo layout'
62 )
62 )
63 if self.ui.configbool('format', 'parentdelta', False):
63 if self.ui.configbool('format', 'parentdelta', False):
64 requirements.append("parentdelta")
64 requirements.append("parentdelta")
65 else:
65 else:
66 raise error.RepoError(_("repository %s not found") % path)
66 raise error.RepoError(_("repository %s not found") % path)
67 elif create:
67 elif create:
68 raise error.RepoError(_("repository %s already exists") % path)
68 raise error.RepoError(_("repository %s already exists") % path)
69 else:
69 else:
70 # find requirements
70 # find requirements
71 requirements = set()
71 requirements = set()
72 try:
72 try:
73 requirements = set(self.opener("requires").read().splitlines())
73 requirements = set(self.opener("requires").read().splitlines())
74 except IOError, inst:
74 except IOError, inst:
75 if inst.errno != errno.ENOENT:
75 if inst.errno != errno.ENOENT:
76 raise
76 raise
77 for r in requirements - self.supported:
77 for r in requirements - self.supported:
78 raise error.RepoError(_("requirement '%s' not supported") % r)
78 raise error.RepoError(_("requirement '%s' not supported") % r)
79
79
80 self.sharedpath = self.path
80 self.sharedpath = self.path
81 try:
81 try:
82 s = os.path.realpath(self.opener("sharedpath").read())
82 s = os.path.realpath(self.opener("sharedpath").read())
83 if not os.path.exists(s):
83 if not os.path.exists(s):
84 raise error.RepoError(
84 raise error.RepoError(
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 self.sharedpath = s
86 self.sharedpath = s
87 except IOError, inst:
87 except IOError, inst:
88 if inst.errno != errno.ENOENT:
88 if inst.errno != errno.ENOENT:
89 raise
89 raise
90
90
91 self.store = store.store(requirements, self.sharedpath, util.opener)
91 self.store = store.store(requirements, self.sharedpath, util.opener)
92 self.spath = self.store.path
92 self.spath = self.store.path
93 self.sopener = self.store.opener
93 self.sopener = self.store.opener
94 self.sjoin = self.store.join
94 self.sjoin = self.store.join
95 self.opener.createmode = self.store.createmode
95 self.opener.createmode = self.store.createmode
96 self._applyrequirements(requirements)
96 self._applyrequirements(requirements)
97 if create:
97 if create:
98 self._writerequirements()
98 self._writerequirements()
99
99
100 # These two define the set of tags for this repository. _tags
100 # These two define the set of tags for this repository. _tags
101 # maps tag name to node; _tagtypes maps tag name to 'global' or
101 # maps tag name to node; _tagtypes maps tag name to 'global' or
102 # 'local'. (Global tags are defined by .hgtags across all
102 # 'local'. (Global tags are defined by .hgtags across all
103 # heads, and local tags are defined in .hg/localtags.) They
103 # heads, and local tags are defined in .hg/localtags.) They
104 # constitute the in-memory cache of tags.
104 # constitute the in-memory cache of tags.
105 self._tags = None
105 self._tags = None
106 self._tagtypes = None
106 self._tagtypes = None
107
107
108 self._branchcache = None
108 self._branchcache = None
109 self._branchcachetip = None
109 self._branchcachetip = None
110 self.nodetagscache = None
110 self.nodetagscache = None
111 self.filterpats = {}
111 self.filterpats = {}
112 self._datafilters = {}
112 self._datafilters = {}
113 self._transref = self._lockref = self._wlockref = None
113 self._transref = self._lockref = self._wlockref = None
114
114
115 def _applyrequirements(self, requirements):
115 def _applyrequirements(self, requirements):
116 self.requirements = requirements
116 self.requirements = requirements
117 self.sopener.options = {}
117 self.sopener.options = {}
118 if 'parentdelta' in requirements:
118 if 'parentdelta' in requirements:
119 self.sopener.options['parentdelta'] = 1
119 self.sopener.options['parentdelta'] = 1
120
120
121 def _writerequirements(self):
121 def _writerequirements(self):
122 reqfile = self.opener("requires", "w")
122 reqfile = self.opener("requires", "w")
123 for r in self.requirements:
123 for r in self.requirements:
124 reqfile.write("%s\n" % r)
124 reqfile.write("%s\n" % r)
125 reqfile.close()
125 reqfile.close()
126
126
127 def _checknested(self, path):
127 def _checknested(self, path):
128 """Determine if path is a legal nested repository."""
128 """Determine if path is a legal nested repository."""
129 if not path.startswith(self.root):
129 if not path.startswith(self.root):
130 return False
130 return False
131 subpath = path[len(self.root) + 1:]
131 subpath = path[len(self.root) + 1:]
132
132
133 # XXX: Checking against the current working copy is wrong in
133 # XXX: Checking against the current working copy is wrong in
134 # the sense that it can reject things like
134 # the sense that it can reject things like
135 #
135 #
136 # $ hg cat -r 10 sub/x.txt
136 # $ hg cat -r 10 sub/x.txt
137 #
137 #
138 # if sub/ is no longer a subrepository in the working copy
138 # if sub/ is no longer a subrepository in the working copy
139 # parent revision.
139 # parent revision.
140 #
140 #
141 # However, it can of course also allow things that would have
141 # However, it can of course also allow things that would have
142 # been rejected before, such as the above cat command if sub/
142 # been rejected before, such as the above cat command if sub/
143 # is a subrepository now, but was a normal directory before.
143 # is a subrepository now, but was a normal directory before.
144 # The old path auditor would have rejected by mistake since it
144 # The old path auditor would have rejected by mistake since it
145 # panics when it sees sub/.hg/.
145 # panics when it sees sub/.hg/.
146 #
146 #
147 # All in all, checking against the working copy seems sensible
147 # All in all, checking against the working copy seems sensible
148 # since we want to prevent access to nested repositories on
148 # since we want to prevent access to nested repositories on
149 # the filesystem *now*.
149 # the filesystem *now*.
150 ctx = self[None]
150 ctx = self[None]
151 parts = util.splitpath(subpath)
151 parts = util.splitpath(subpath)
152 while parts:
152 while parts:
153 prefix = os.sep.join(parts)
153 prefix = os.sep.join(parts)
154 if prefix in ctx.substate:
154 if prefix in ctx.substate:
155 if prefix == subpath:
155 if prefix == subpath:
156 return True
156 return True
157 else:
157 else:
158 sub = ctx.sub(prefix)
158 sub = ctx.sub(prefix)
159 return sub.checknested(subpath[len(prefix) + 1:])
159 return sub.checknested(subpath[len(prefix) + 1:])
160 else:
160 else:
161 parts.pop()
161 parts.pop()
162 return False
162 return False
163
163
164
164
165 @propertycache
165 @propertycache
166 def changelog(self):
166 def changelog(self):
167 c = changelog.changelog(self.sopener)
167 c = changelog.changelog(self.sopener)
168 if 'HG_PENDING' in os.environ:
168 if 'HG_PENDING' in os.environ:
169 p = os.environ['HG_PENDING']
169 p = os.environ['HG_PENDING']
170 if p.startswith(self.root):
170 if p.startswith(self.root):
171 c.readpending('00changelog.i.a')
171 c.readpending('00changelog.i.a')
172 self.sopener.options['defversion'] = c.version
172 self.sopener.options['defversion'] = c.version
173 return c
173 return c
174
174
175 @propertycache
175 @propertycache
176 def manifest(self):
176 def manifest(self):
177 return manifest.manifest(self.sopener)
177 return manifest.manifest(self.sopener)
178
178
179 @propertycache
179 @propertycache
180 def dirstate(self):
180 def dirstate(self):
181 warned = [0]
181 warned = [0]
182 def validate(node):
182 def validate(node):
183 try:
183 try:
184 r = self.changelog.rev(node)
184 r = self.changelog.rev(node)
185 return node
185 return node
186 except error.LookupError:
186 except error.LookupError:
187 if not warned[0]:
187 if not warned[0]:
188 warned[0] = True
188 warned[0] = True
189 self.ui.warn(_("warning: ignoring unknown"
189 self.ui.warn(_("warning: ignoring unknown"
190 " working parent %s!\n") % short(node))
190 " working parent %s!\n") % short(node))
191 return nullid
191 return nullid
192
192
193 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
193 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
194
194
195 def __getitem__(self, changeid):
195 def __getitem__(self, changeid):
196 if changeid is None:
196 if changeid is None:
197 return context.workingctx(self)
197 return context.workingctx(self)
198 return context.changectx(self, changeid)
198 return context.changectx(self, changeid)
199
199
200 def __contains__(self, changeid):
200 def __contains__(self, changeid):
201 try:
201 try:
202 return bool(self.lookup(changeid))
202 return bool(self.lookup(changeid))
203 except error.RepoLookupError:
203 except error.RepoLookupError:
204 return False
204 return False
205
205
206 def __nonzero__(self):
206 def __nonzero__(self):
207 return True
207 return True
208
208
209 def __len__(self):
209 def __len__(self):
210 return len(self.changelog)
210 return len(self.changelog)
211
211
212 def __iter__(self):
212 def __iter__(self):
213 for i in xrange(len(self)):
213 for i in xrange(len(self)):
214 yield i
214 yield i
215
215
216 def url(self):
216 def url(self):
217 return 'file:' + self.root
217 return 'file:' + self.root
218
218
219 def hook(self, name, throw=False, **args):
219 def hook(self, name, throw=False, **args):
220 return hook.hook(self.ui, self, name, throw, **args)
220 return hook.hook(self.ui, self, name, throw, **args)
221
221
222 tag_disallowed = ':\r\n'
222 tag_disallowed = ':\r\n'
223
223
224 def _tag(self, names, node, message, local, user, date, extra={}):
224 def _tag(self, names, node, message, local, user, date, extra={}):
225 if isinstance(names, str):
225 if isinstance(names, str):
226 allchars = names
226 allchars = names
227 names = (names,)
227 names = (names,)
228 else:
228 else:
229 allchars = ''.join(names)
229 allchars = ''.join(names)
230 for c in self.tag_disallowed:
230 for c in self.tag_disallowed:
231 if c in allchars:
231 if c in allchars:
232 raise util.Abort(_('%r cannot be used in a tag name') % c)
232 raise util.Abort(_('%r cannot be used in a tag name') % c)
233
233
234 branches = self.branchmap()
234 branches = self.branchmap()
235 for name in names:
235 for name in names:
236 self.hook('pretag', throw=True, node=hex(node), tag=name,
236 self.hook('pretag', throw=True, node=hex(node), tag=name,
237 local=local)
237 local=local)
238 if name in branches:
238 if name in branches:
239 self.ui.warn(_("warning: tag %s conflicts with existing"
239 self.ui.warn(_("warning: tag %s conflicts with existing"
240 " branch name\n") % name)
240 " branch name\n") % name)
241
241
242 def writetags(fp, names, munge, prevtags):
242 def writetags(fp, names, munge, prevtags):
243 fp.seek(0, 2)
243 fp.seek(0, 2)
244 if prevtags and prevtags[-1] != '\n':
244 if prevtags and prevtags[-1] != '\n':
245 fp.write('\n')
245 fp.write('\n')
246 for name in names:
246 for name in names:
247 m = munge and munge(name) or name
247 m = munge and munge(name) or name
248 if self._tagtypes and name in self._tagtypes:
248 if self._tagtypes and name in self._tagtypes:
249 old = self._tags.get(name, nullid)
249 old = self._tags.get(name, nullid)
250 fp.write('%s %s\n' % (hex(old), m))
250 fp.write('%s %s\n' % (hex(old), m))
251 fp.write('%s %s\n' % (hex(node), m))
251 fp.write('%s %s\n' % (hex(node), m))
252 fp.close()
252 fp.close()
253
253
254 prevtags = ''
254 prevtags = ''
255 if local:
255 if local:
256 try:
256 try:
257 fp = self.opener('localtags', 'r+')
257 fp = self.opener('localtags', 'r+')
258 except IOError:
258 except IOError:
259 fp = self.opener('localtags', 'a')
259 fp = self.opener('localtags', 'a')
260 else:
260 else:
261 prevtags = fp.read()
261 prevtags = fp.read()
262
262
263 # local tags are stored in the current charset
263 # local tags are stored in the current charset
264 writetags(fp, names, None, prevtags)
264 writetags(fp, names, None, prevtags)
265 for name in names:
265 for name in names:
266 self.hook('tag', node=hex(node), tag=name, local=local)
266 self.hook('tag', node=hex(node), tag=name, local=local)
267 return
267 return
268
268
269 try:
269 try:
270 fp = self.wfile('.hgtags', 'rb+')
270 fp = self.wfile('.hgtags', 'rb+')
271 except IOError:
271 except IOError:
272 fp = self.wfile('.hgtags', 'ab')
272 fp = self.wfile('.hgtags', 'ab')
273 else:
273 else:
274 prevtags = fp.read()
274 prevtags = fp.read()
275
275
276 # committed tags are stored in UTF-8
276 # committed tags are stored in UTF-8
277 writetags(fp, names, encoding.fromlocal, prevtags)
277 writetags(fp, names, encoding.fromlocal, prevtags)
278
278
279 if '.hgtags' not in self.dirstate:
279 if '.hgtags' not in self.dirstate:
280 self[None].add(['.hgtags'])
280 self[None].add(['.hgtags'])
281
281
282 m = matchmod.exact(self.root, '', ['.hgtags'])
282 m = matchmod.exact(self.root, '', ['.hgtags'])
283 tagnode = self.commit(message, user, date, extra=extra, match=m)
283 tagnode = self.commit(message, user, date, extra=extra, match=m)
284
284
285 for name in names:
285 for name in names:
286 self.hook('tag', node=hex(node), tag=name, local=local)
286 self.hook('tag', node=hex(node), tag=name, local=local)
287
287
288 return tagnode
288 return tagnode
289
289
290 def tag(self, names, node, message, local, user, date):
290 def tag(self, names, node, message, local, user, date):
291 '''tag a revision with one or more symbolic names.
291 '''tag a revision with one or more symbolic names.
292
292
293 names is a list of strings or, when adding a single tag, names may be a
293 names is a list of strings or, when adding a single tag, names may be a
294 string.
294 string.
295
295
296 if local is True, the tags are stored in a per-repository file.
296 if local is True, the tags are stored in a per-repository file.
297 otherwise, they are stored in the .hgtags file, and a new
297 otherwise, they are stored in the .hgtags file, and a new
298 changeset is committed with the change.
298 changeset is committed with the change.
299
299
300 keyword arguments:
300 keyword arguments:
301
301
302 local: whether to store tags in non-version-controlled file
302 local: whether to store tags in non-version-controlled file
303 (default False)
303 (default False)
304
304
305 message: commit message to use if committing
305 message: commit message to use if committing
306
306
307 user: name of user to use if committing
307 user: name of user to use if committing
308
308
309 date: date tuple to use if committing'''
309 date: date tuple to use if committing'''
310
310
311 for x in self.status()[:5]:
311 for x in self.status()[:5]:
312 if '.hgtags' in x:
312 if '.hgtags' in x:
313 raise util.Abort(_('working copy of .hgtags is changed '
313 raise util.Abort(_('working copy of .hgtags is changed '
314 '(please commit .hgtags manually)'))
314 '(please commit .hgtags manually)'))
315
315
316 self.tags() # instantiate the cache
316 self.tags() # instantiate the cache
317 self._tag(names, node, message, local, user, date)
317 self._tag(names, node, message, local, user, date)
318
318
319 def tags(self):
319 def tags(self):
320 '''return a mapping of tag to node'''
320 '''return a mapping of tag to node'''
321 if self._tags is None:
321 if self._tags is None:
322 (self._tags, self._tagtypes) = self._findtags()
322 (self._tags, self._tagtypes) = self._findtags()
323
323
324 return self._tags
324 return self._tags
325
325
326 def _findtags(self):
326 def _findtags(self):
327 '''Do the hard work of finding tags. Return a pair of dicts
327 '''Do the hard work of finding tags. Return a pair of dicts
328 (tags, tagtypes) where tags maps tag name to node, and tagtypes
328 (tags, tagtypes) where tags maps tag name to node, and tagtypes
329 maps tag name to a string like \'global\' or \'local\'.
329 maps tag name to a string like \'global\' or \'local\'.
330 Subclasses or extensions are free to add their own tags, but
330 Subclasses or extensions are free to add their own tags, but
331 should be aware that the returned dicts will be retained for the
331 should be aware that the returned dicts will be retained for the
332 duration of the localrepo object.'''
332 duration of the localrepo object.'''
333
333
334 # XXX what tagtype should subclasses/extensions use? Currently
334 # XXX what tagtype should subclasses/extensions use? Currently
335 # mq and bookmarks add tags, but do not set the tagtype at all.
335 # mq and bookmarks add tags, but do not set the tagtype at all.
336 # Should each extension invent its own tag type? Should there
336 # Should each extension invent its own tag type? Should there
337 # be one tagtype for all such "virtual" tags? Or is the status
337 # be one tagtype for all such "virtual" tags? Or is the status
338 # quo fine?
338 # quo fine?
339
339
340 alltags = {} # map tag name to (node, hist)
340 alltags = {} # map tag name to (node, hist)
341 tagtypes = {}
341 tagtypes = {}
342
342
343 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
343 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
344 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
344 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
345
345
346 # Build the return dicts. Have to re-encode tag names because
346 # Build the return dicts. Have to re-encode tag names because
347 # the tags module always uses UTF-8 (in order not to lose info
347 # the tags module always uses UTF-8 (in order not to lose info
348 # writing to the cache), but the rest of Mercurial wants them in
348 # writing to the cache), but the rest of Mercurial wants them in
349 # local encoding.
349 # local encoding.
350 tags = {}
350 tags = {}
351 for (name, (node, hist)) in alltags.iteritems():
351 for (name, (node, hist)) in alltags.iteritems():
352 if node != nullid:
352 if node != nullid:
353 tags[encoding.tolocal(name)] = node
353 tags[encoding.tolocal(name)] = node
354 tags['tip'] = self.changelog.tip()
354 tags['tip'] = self.changelog.tip()
355 tagtypes = dict([(encoding.tolocal(name), value)
355 tagtypes = dict([(encoding.tolocal(name), value)
356 for (name, value) in tagtypes.iteritems()])
356 for (name, value) in tagtypes.iteritems()])
357 return (tags, tagtypes)
357 return (tags, tagtypes)
358
358
359 def tagtype(self, tagname):
359 def tagtype(self, tagname):
360 '''
360 '''
361 return the type of the given tag. result can be:
361 return the type of the given tag. result can be:
362
362
363 'local' : a local tag
363 'local' : a local tag
364 'global' : a global tag
364 'global' : a global tag
365 None : tag does not exist
365 None : tag does not exist
366 '''
366 '''
367
367
368 self.tags()
368 self.tags()
369
369
370 return self._tagtypes.get(tagname)
370 return self._tagtypes.get(tagname)
371
371
372 def tagslist(self):
372 def tagslist(self):
373 '''return a list of tags ordered by revision'''
373 '''return a list of tags ordered by revision'''
374 l = []
374 l = []
375 for t, n in self.tags().iteritems():
375 for t, n in self.tags().iteritems():
376 try:
376 try:
377 r = self.changelog.rev(n)
377 r = self.changelog.rev(n)
378 except:
378 except:
379 r = -2 # sort to the beginning of the list if unknown
379 r = -2 # sort to the beginning of the list if unknown
380 l.append((r, t, n))
380 l.append((r, t, n))
381 return [(t, n) for r, t, n in sorted(l)]
381 return [(t, n) for r, t, n in sorted(l)]
382
382
383 def nodetags(self, node):
383 def nodetags(self, node):
384 '''return the tags associated with a node'''
384 '''return the tags associated with a node'''
385 if not self.nodetagscache:
385 if not self.nodetagscache:
386 self.nodetagscache = {}
386 self.nodetagscache = {}
387 for t, n in self.tags().iteritems():
387 for t, n in self.tags().iteritems():
388 self.nodetagscache.setdefault(n, []).append(t)
388 self.nodetagscache.setdefault(n, []).append(t)
389 for tags in self.nodetagscache.itervalues():
389 for tags in self.nodetagscache.itervalues():
390 tags.sort()
390 tags.sort()
391 return self.nodetagscache.get(node, [])
391 return self.nodetagscache.get(node, [])
392
392
393 def _branchtags(self, partial, lrev):
393 def _branchtags(self, partial, lrev):
394 # TODO: rename this function?
394 # TODO: rename this function?
395 tiprev = len(self) - 1
395 tiprev = len(self) - 1
396 if lrev != tiprev:
396 if lrev != tiprev:
397 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
397 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
398 self._updatebranchcache(partial, ctxgen)
398 self._updatebranchcache(partial, ctxgen)
399 self._writebranchcache(partial, self.changelog.tip(), tiprev)
399 self._writebranchcache(partial, self.changelog.tip(), tiprev)
400
400
401 return partial
401 return partial
402
402
403 def updatebranchcache(self):
403 def updatebranchcache(self):
404 tip = self.changelog.tip()
404 tip = self.changelog.tip()
405 if self._branchcache is not None and self._branchcachetip == tip:
405 if self._branchcache is not None and self._branchcachetip == tip:
406 return self._branchcache
406 return self._branchcache
407
407
408 oldtip = self._branchcachetip
408 oldtip = self._branchcachetip
409 self._branchcachetip = tip
409 self._branchcachetip = tip
410 if oldtip is None or oldtip not in self.changelog.nodemap:
410 if oldtip is None or oldtip not in self.changelog.nodemap:
411 partial, last, lrev = self._readbranchcache()
411 partial, last, lrev = self._readbranchcache()
412 else:
412 else:
413 lrev = self.changelog.rev(oldtip)
413 lrev = self.changelog.rev(oldtip)
414 partial = self._branchcache
414 partial = self._branchcache
415
415
416 self._branchtags(partial, lrev)
416 self._branchtags(partial, lrev)
417 # this private cache holds all heads (not just tips)
417 # this private cache holds all heads (not just tips)
418 self._branchcache = partial
418 self._branchcache = partial
419
419
420 def branchmap(self):
420 def branchmap(self):
421 '''returns a dictionary {branch: [branchheads]}'''
421 '''returns a dictionary {branch: [branchheads]}'''
422 self.updatebranchcache()
422 self.updatebranchcache()
423 return self._branchcache
423 return self._branchcache
424
424
425 def branchtags(self):
425 def branchtags(self):
426 '''return a dict where branch names map to the tipmost head of
426 '''return a dict where branch names map to the tipmost head of
427 the branch, open heads come before closed'''
427 the branch, open heads come before closed'''
428 bt = {}
428 bt = {}
429 for bn, heads in self.branchmap().iteritems():
429 for bn, heads in self.branchmap().iteritems():
430 tip = heads[-1]
430 tip = heads[-1]
431 for h in reversed(heads):
431 for h in reversed(heads):
432 if 'close' not in self.changelog.read(h)[5]:
432 if 'close' not in self.changelog.read(h)[5]:
433 tip = h
433 tip = h
434 break
434 break
435 bt[bn] = tip
435 bt[bn] = tip
436 return bt
436 return bt
437
437
438 def _readbranchcache(self):
438 def _readbranchcache(self):
439 partial = {}
439 partial = {}
440 try:
440 try:
441 f = self.opener("branchheads.cache")
441 f = self.opener("branchheads.cache")
442 lines = f.read().split('\n')
442 lines = f.read().split('\n')
443 f.close()
443 f.close()
444 except (IOError, OSError):
444 except (IOError, OSError):
445 return {}, nullid, nullrev
445 return {}, nullid, nullrev
446
446
447 try:
447 try:
448 last, lrev = lines.pop(0).split(" ", 1)
448 last, lrev = lines.pop(0).split(" ", 1)
449 last, lrev = bin(last), int(lrev)
449 last, lrev = bin(last), int(lrev)
450 if lrev >= len(self) or self[lrev].node() != last:
450 if lrev >= len(self) or self[lrev].node() != last:
451 # invalidate the cache
451 # invalidate the cache
452 raise ValueError('invalidating branch cache (tip differs)')
452 raise ValueError('invalidating branch cache (tip differs)')
453 for l in lines:
453 for l in lines:
454 if not l:
454 if not l:
455 continue
455 continue
456 node, label = l.split(" ", 1)
456 node, label = l.split(" ", 1)
457 label = encoding.tolocal(label.strip())
457 label = encoding.tolocal(label.strip())
458 partial.setdefault(label, []).append(bin(node))
458 partial.setdefault(label, []).append(bin(node))
459 except KeyboardInterrupt:
459 except KeyboardInterrupt:
460 raise
460 raise
461 except Exception, inst:
461 except Exception, inst:
462 if self.ui.debugflag:
462 if self.ui.debugflag:
463 self.ui.warn(str(inst), '\n')
463 self.ui.warn(str(inst), '\n')
464 partial, last, lrev = {}, nullid, nullrev
464 partial, last, lrev = {}, nullid, nullrev
465 return partial, last, lrev
465 return partial, last, lrev
466
466
467 def _writebranchcache(self, branches, tip, tiprev):
467 def _writebranchcache(self, branches, tip, tiprev):
468 try:
468 try:
469 f = self.opener("branchheads.cache", "w", atomictemp=True)
469 f = self.opener("branchheads.cache", "w", atomictemp=True)
470 f.write("%s %s\n" % (hex(tip), tiprev))
470 f.write("%s %s\n" % (hex(tip), tiprev))
471 for label, nodes in branches.iteritems():
471 for label, nodes in branches.iteritems():
472 for node in nodes:
472 for node in nodes:
473 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
473 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
474 f.rename()
474 f.rename()
475 except (IOError, OSError):
475 except (IOError, OSError):
476 pass
476 pass
477
477
478 def _updatebranchcache(self, partial, ctxgen):
478 def _updatebranchcache(self, partial, ctxgen):
479 # collect new branch entries
479 # collect new branch entries
480 newbranches = {}
480 newbranches = {}
481 for c in ctxgen:
481 for c in ctxgen:
482 newbranches.setdefault(c.branch(), []).append(c.node())
482 newbranches.setdefault(c.branch(), []).append(c.node())
483 # if older branchheads are reachable from new ones, they aren't
483 # if older branchheads are reachable from new ones, they aren't
484 # really branchheads. Note checking parents is insufficient:
484 # really branchheads. Note checking parents is insufficient:
485 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
485 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
486 for branch, newnodes in newbranches.iteritems():
486 for branch, newnodes in newbranches.iteritems():
487 bheads = partial.setdefault(branch, [])
487 bheads = partial.setdefault(branch, [])
488 bheads.extend(newnodes)
488 bheads.extend(newnodes)
489 if len(bheads) <= 1:
489 if len(bheads) <= 1:
490 continue
490 continue
491 # starting from tip means fewer passes over reachable
491 # starting from tip means fewer passes over reachable
492 while newnodes:
492 while newnodes:
493 latest = newnodes.pop()
493 latest = newnodes.pop()
494 if latest not in bheads:
494 if latest not in bheads:
495 continue
495 continue
496 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
496 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
497 reachable = self.changelog.reachable(latest, minbhrev)
497 reachable = self.changelog.reachable(latest, minbhrev)
498 reachable.remove(latest)
498 reachable.remove(latest)
499 bheads = [b for b in bheads if b not in reachable]
499 bheads = [b for b in bheads if b not in reachable]
500 partial[branch] = bheads
500 partial[branch] = bheads
501
501
502 def lookup(self, key):
502 def lookup(self, key):
503 if isinstance(key, int):
503 if isinstance(key, int):
504 return self.changelog.node(key)
504 return self.changelog.node(key)
505 elif key == '.':
505 elif key == '.':
506 return self.dirstate.parents()[0]
506 return self.dirstate.parents()[0]
507 elif key == 'null':
507 elif key == 'null':
508 return nullid
508 return nullid
509 elif key == 'tip':
509 elif key == 'tip':
510 return self.changelog.tip()
510 return self.changelog.tip()
511 n = self.changelog._match(key)
511 n = self.changelog._match(key)
512 if n:
512 if n:
513 return n
513 return n
514 if key in self.tags():
514 if key in self.tags():
515 return self.tags()[key]
515 return self.tags()[key]
516 if key in self.branchtags():
516 if key in self.branchtags():
517 return self.branchtags()[key]
517 return self.branchtags()[key]
518 n = self.changelog._partialmatch(key)
518 n = self.changelog._partialmatch(key)
519 if n:
519 if n:
520 return n
520 return n
521
521
522 # can't find key, check if it might have come from damaged dirstate
522 # can't find key, check if it might have come from damaged dirstate
523 if key in self.dirstate.parents():
523 if key in self.dirstate.parents():
524 raise error.Abort(_("working directory has unknown parent '%s'!")
524 raise error.Abort(_("working directory has unknown parent '%s'!")
525 % short(key))
525 % short(key))
526 try:
526 try:
527 if len(key) == 20:
527 if len(key) == 20:
528 key = hex(key)
528 key = hex(key)
529 except:
529 except:
530 pass
530 pass
531 raise error.RepoLookupError(_("unknown revision '%s'") % key)
531 raise error.RepoLookupError(_("unknown revision '%s'") % key)
532
532
533 def lookupbranch(self, key, remote=None):
533 def lookupbranch(self, key, remote=None):
534 repo = remote or self
534 repo = remote or self
535 if key in repo.branchmap():
535 if key in repo.branchmap():
536 return key
536 return key
537
537
538 repo = (remote and remote.local()) and remote or self
538 repo = (remote and remote.local()) and remote or self
539 return repo[key].branch()
539 return repo[key].branch()
540
540
541 def local(self):
541 def local(self):
542 return True
542 return True
543
543
544 def join(self, f):
544 def join(self, f):
545 return os.path.join(self.path, f)
545 return os.path.join(self.path, f)
546
546
547 def wjoin(self, f):
547 def wjoin(self, f):
548 return os.path.join(self.root, f)
548 return os.path.join(self.root, f)
549
549
550 def file(self, f):
550 def file(self, f):
551 if f[0] == '/':
551 if f[0] == '/':
552 f = f[1:]
552 f = f[1:]
553 return filelog.filelog(self.sopener, f)
553 return filelog.filelog(self.sopener, f)
554
554
555 def changectx(self, changeid):
555 def changectx(self, changeid):
556 return self[changeid]
556 return self[changeid]
557
557
558 def parents(self, changeid=None):
558 def parents(self, changeid=None):
559 '''get list of changectxs for parents of changeid'''
559 '''get list of changectxs for parents of changeid'''
560 return self[changeid].parents()
560 return self[changeid].parents()
561
561
562 def filectx(self, path, changeid=None, fileid=None):
562 def filectx(self, path, changeid=None, fileid=None):
563 """changeid can be a changeset revision, node, or tag.
563 """changeid can be a changeset revision, node, or tag.
564 fileid can be a file revision or node."""
564 fileid can be a file revision or node."""
565 return context.filectx(self, path, changeid, fileid)
565 return context.filectx(self, path, changeid, fileid)
566
566
567 def getcwd(self):
567 def getcwd(self):
568 return self.dirstate.getcwd()
568 return self.dirstate.getcwd()
569
569
570 def pathto(self, f, cwd=None):
570 def pathto(self, f, cwd=None):
571 return self.dirstate.pathto(f, cwd)
571 return self.dirstate.pathto(f, cwd)
572
572
573 def wfile(self, f, mode='r'):
573 def wfile(self, f, mode='r'):
574 return self.wopener(f, mode)
574 return self.wopener(f, mode)
575
575
576 def _link(self, f):
576 def _link(self, f):
577 return os.path.islink(self.wjoin(f))
577 return os.path.islink(self.wjoin(f))
578
578
579 def _loadfilter(self, filter):
579 def _loadfilter(self, filter):
580 if filter not in self.filterpats:
580 if filter not in self.filterpats:
581 l = []
581 l = []
582 for pat, cmd in self.ui.configitems(filter):
582 for pat, cmd in self.ui.configitems(filter):
583 if cmd == '!':
583 if cmd == '!':
584 continue
584 continue
585 mf = matchmod.match(self.root, '', [pat])
585 mf = matchmod.match(self.root, '', [pat])
586 fn = None
586 fn = None
587 params = cmd
587 params = cmd
588 for name, filterfn in self._datafilters.iteritems():
588 for name, filterfn in self._datafilters.iteritems():
589 if cmd.startswith(name):
589 if cmd.startswith(name):
590 fn = filterfn
590 fn = filterfn
591 params = cmd[len(name):].lstrip()
591 params = cmd[len(name):].lstrip()
592 break
592 break
593 if not fn:
593 if not fn:
594 fn = lambda s, c, **kwargs: util.filter(s, c)
594 fn = lambda s, c, **kwargs: util.filter(s, c)
595 # Wrap old filters not supporting keyword arguments
595 # Wrap old filters not supporting keyword arguments
596 if not inspect.getargspec(fn)[2]:
596 if not inspect.getargspec(fn)[2]:
597 oldfn = fn
597 oldfn = fn
598 fn = lambda s, c, **kwargs: oldfn(s, c)
598 fn = lambda s, c, **kwargs: oldfn(s, c)
599 l.append((mf, fn, params))
599 l.append((mf, fn, params))
600 self.filterpats[filter] = l
600 self.filterpats[filter] = l
601 return self.filterpats[filter]
601 return self.filterpats[filter]
602
602
603 def _filter(self, filterpats, filename, data):
603 def _filter(self, filterpats, filename, data):
604 for mf, fn, cmd in filterpats:
604 for mf, fn, cmd in filterpats:
605 if mf(filename):
605 if mf(filename):
606 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
606 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
607 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
607 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
608 break
608 break
609
609
610 return data
610 return data
611
611
612 @propertycache
612 @propertycache
613 def _encodefilterpats(self):
613 def _encodefilterpats(self):
614 return self._loadfilter('encode')
614 return self._loadfilter('encode')
615
615
616 @propertycache
616 @propertycache
617 def _decodefilterpats(self):
617 def _decodefilterpats(self):
618 return self._loadfilter('decode')
618 return self._loadfilter('decode')
619
619
620 def adddatafilter(self, name, filter):
620 def adddatafilter(self, name, filter):
621 self._datafilters[name] = filter
621 self._datafilters[name] = filter
622
622
623 def wread(self, filename):
623 def wread(self, filename):
624 if self._link(filename):
624 if self._link(filename):
625 data = os.readlink(self.wjoin(filename))
625 data = os.readlink(self.wjoin(filename))
626 else:
626 else:
627 data = self.wopener(filename, 'r').read()
627 data = self.wopener(filename, 'r').read()
628 return self._filter(self._encodefilterpats, filename, data)
628 return self._filter(self._encodefilterpats, filename, data)
629
629
630 def wwrite(self, filename, data, flags):
630 def wwrite(self, filename, data, flags):
631 data = self._filter(self._decodefilterpats, filename, data)
631 data = self._filter(self._decodefilterpats, filename, data)
632 try:
632 try:
633 os.unlink(self.wjoin(filename))
633 os.unlink(self.wjoin(filename))
634 except OSError:
634 except OSError:
635 pass
635 pass
636 if 'l' in flags:
636 if 'l' in flags:
637 self.wopener.symlink(data, filename)
637 self.wopener.symlink(data, filename)
638 else:
638 else:
639 self.wopener(filename, 'w').write(data)
639 self.wopener(filename, 'w').write(data)
640 if 'x' in flags:
640 if 'x' in flags:
641 util.set_flags(self.wjoin(filename), False, True)
641 util.set_flags(self.wjoin(filename), False, True)
642
642
643 def wwritedata(self, filename, data):
643 def wwritedata(self, filename, data):
644 return self._filter(self._decodefilterpats, filename, data)
644 return self._filter(self._decodefilterpats, filename, data)
645
645
646 def transaction(self, desc):
646 def transaction(self, desc):
647 tr = self._transref and self._transref() or None
647 tr = self._transref and self._transref() or None
648 if tr and tr.running():
648 if tr and tr.running():
649 return tr.nest()
649 return tr.nest()
650
650
651 # abort here if the journal already exists
651 # abort here if the journal already exists
652 if os.path.exists(self.sjoin("journal")):
652 if os.path.exists(self.sjoin("journal")):
653 raise error.RepoError(
653 raise error.RepoError(
654 _("abandoned transaction found - run hg recover"))
654 _("abandoned transaction found - run hg recover"))
655
655
656 # save dirstate for rollback
656 # save dirstate for rollback
657 try:
657 try:
658 ds = self.opener("dirstate").read()
658 ds = self.opener("dirstate").read()
659 except IOError:
659 except IOError:
660 ds = ""
660 ds = ""
661 self.opener("journal.dirstate", "w").write(ds)
661 self.opener("journal.dirstate", "w").write(ds)
662 self.opener("journal.branch", "w").write(
662 self.opener("journal.branch", "w").write(
663 encoding.fromlocal(self.dirstate.branch()))
663 encoding.fromlocal(self.dirstate.branch()))
664 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
664 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
665
665
666 renames = [(self.sjoin("journal"), self.sjoin("undo")),
666 renames = [(self.sjoin("journal"), self.sjoin("undo")),
667 (self.join("journal.dirstate"), self.join("undo.dirstate")),
667 (self.join("journal.dirstate"), self.join("undo.dirstate")),
668 (self.join("journal.branch"), self.join("undo.branch")),
668 (self.join("journal.branch"), self.join("undo.branch")),
669 (self.join("journal.desc"), self.join("undo.desc"))]
669 (self.join("journal.desc"), self.join("undo.desc"))]
670 tr = transaction.transaction(self.ui.warn, self.sopener,
670 tr = transaction.transaction(self.ui.warn, self.sopener,
671 self.sjoin("journal"),
671 self.sjoin("journal"),
672 aftertrans(renames),
672 aftertrans(renames),
673 self.store.createmode)
673 self.store.createmode)
674 self._transref = weakref.ref(tr)
674 self._transref = weakref.ref(tr)
675 return tr
675 return tr
676
676
677 def recover(self):
677 def recover(self):
678 lock = self.lock()
678 lock = self.lock()
679 try:
679 try:
680 if os.path.exists(self.sjoin("journal")):
680 if os.path.exists(self.sjoin("journal")):
681 self.ui.status(_("rolling back interrupted transaction\n"))
681 self.ui.status(_("rolling back interrupted transaction\n"))
682 transaction.rollback(self.sopener, self.sjoin("journal"),
682 transaction.rollback(self.sopener, self.sjoin("journal"),
683 self.ui.warn)
683 self.ui.warn)
684 self.invalidate()
684 self.invalidate()
685 return True
685 return True
686 else:
686 else:
687 self.ui.warn(_("no interrupted transaction available\n"))
687 self.ui.warn(_("no interrupted transaction available\n"))
688 return False
688 return False
689 finally:
689 finally:
690 lock.release()
690 lock.release()
691
691
692 def rollback(self, dryrun=False):
692 def rollback(self, dryrun=False):
693 wlock = lock = None
693 wlock = lock = None
694 try:
694 try:
695 wlock = self.wlock()
695 wlock = self.wlock()
696 lock = self.lock()
696 lock = self.lock()
697 if os.path.exists(self.sjoin("undo")):
697 if os.path.exists(self.sjoin("undo")):
698 try:
698 try:
699 args = self.opener("undo.desc", "r").read().splitlines()
699 args = self.opener("undo.desc", "r").read().splitlines()
700 if len(args) >= 3 and self.ui.verbose:
700 if len(args) >= 3 and self.ui.verbose:
701 desc = _("rolling back to revision %s"
701 desc = _("rolling back to revision %s"
702 " (undo %s: %s)\n") % (
702 " (undo %s: %s)\n") % (
703 int(args[0]) - 1, args[1], args[2])
703 int(args[0]) - 1, args[1], args[2])
704 elif len(args) >= 2:
704 elif len(args) >= 2:
705 desc = _("rolling back to revision %s (undo %s)\n") % (
705 desc = _("rolling back to revision %s (undo %s)\n") % (
706 int(args[0]) - 1, args[1])
706 int(args[0]) - 1, args[1])
707 except IOError:
707 except IOError:
708 desc = _("rolling back unknown transaction\n")
708 desc = _("rolling back unknown transaction\n")
709 self.ui.status(desc)
709 self.ui.status(desc)
710 if dryrun:
710 if dryrun:
711 return
711 return
712 transaction.rollback(self.sopener, self.sjoin("undo"),
712 transaction.rollback(self.sopener, self.sjoin("undo"),
713 self.ui.warn)
713 self.ui.warn)
714 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
714 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
715 try:
715 try:
716 branch = self.opener("undo.branch").read()
716 branch = self.opener("undo.branch").read()
717 self.dirstate.setbranch(branch)
717 self.dirstate.setbranch(branch)
718 except IOError:
718 except IOError:
719 self.ui.warn(_("Named branch could not be reset, "
719 self.ui.warn(_("Named branch could not be reset, "
720 "current branch still is: %s\n")
720 "current branch still is: %s\n")
721 % self.dirstate.branch())
721 % self.dirstate.branch())
722 self.invalidate()
722 self.invalidate()
723 self.dirstate.invalidate()
723 self.dirstate.invalidate()
724 self.destroyed()
724 self.destroyed()
725 else:
725 else:
726 self.ui.warn(_("no rollback information available\n"))
726 self.ui.warn(_("no rollback information available\n"))
727 return 1
727 return 1
728 finally:
728 finally:
729 release(lock, wlock)
729 release(lock, wlock)
730
730
731 def invalidatecaches(self):
731 def invalidatecaches(self):
732 self._tags = None
732 self._tags = None
733 self._tagtypes = None
733 self._tagtypes = None
734 self.nodetagscache = None
734 self.nodetagscache = None
735 self._branchcache = None # in UTF-8
735 self._branchcache = None # in UTF-8
736 self._branchcachetip = None
736 self._branchcachetip = None
737
737
738 def invalidate(self):
738 def invalidate(self):
739 for a in "changelog manifest".split():
739 for a in "changelog manifest".split():
740 if a in self.__dict__:
740 if a in self.__dict__:
741 delattr(self, a)
741 delattr(self, a)
742 self.invalidatecaches()
742 self.invalidatecaches()
743
743
744 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
744 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
745 try:
745 try:
746 l = lock.lock(lockname, 0, releasefn, desc=desc)
746 l = lock.lock(lockname, 0, releasefn, desc=desc)
747 except error.LockHeld, inst:
747 except error.LockHeld, inst:
748 if not wait:
748 if not wait:
749 raise
749 raise
750 self.ui.warn(_("waiting for lock on %s held by %r\n") %
750 self.ui.warn(_("waiting for lock on %s held by %r\n") %
751 (desc, inst.locker))
751 (desc, inst.locker))
752 # default to 600 seconds timeout
752 # default to 600 seconds timeout
753 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
753 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
754 releasefn, desc=desc)
754 releasefn, desc=desc)
755 if acquirefn:
755 if acquirefn:
756 acquirefn()
756 acquirefn()
757 return l
757 return l
758
758
759 def lock(self, wait=True):
759 def lock(self, wait=True):
760 '''Lock the repository store (.hg/store) and return a weak reference
760 '''Lock the repository store (.hg/store) and return a weak reference
761 to the lock. Use this before modifying the store (e.g. committing or
761 to the lock. Use this before modifying the store (e.g. committing or
762 stripping). If you are opening a transaction, get a lock as well.)'''
762 stripping). If you are opening a transaction, get a lock as well.)'''
763 l = self._lockref and self._lockref()
763 l = self._lockref and self._lockref()
764 if l is not None and l.held:
764 if l is not None and l.held:
765 l.lock()
765 l.lock()
766 return l
766 return l
767
767
768 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
768 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
769 _('repository %s') % self.origroot)
769 _('repository %s') % self.origroot)
770 self._lockref = weakref.ref(l)
770 self._lockref = weakref.ref(l)
771 return l
771 return l
772
772
773 def wlock(self, wait=True):
773 def wlock(self, wait=True):
774 '''Lock the non-store parts of the repository (everything under
774 '''Lock the non-store parts of the repository (everything under
775 .hg except .hg/store) and return a weak reference to the lock.
775 .hg except .hg/store) and return a weak reference to the lock.
776 Use this before modifying files in .hg.'''
776 Use this before modifying files in .hg.'''
777 l = self._wlockref and self._wlockref()
777 l = self._wlockref and self._wlockref()
778 if l is not None and l.held:
778 if l is not None and l.held:
779 l.lock()
779 l.lock()
780 return l
780 return l
781
781
782 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
782 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
783 self.dirstate.invalidate, _('working directory of %s') %
783 self.dirstate.invalidate, _('working directory of %s') %
784 self.origroot)
784 self.origroot)
785 self._wlockref = weakref.ref(l)
785 self._wlockref = weakref.ref(l)
786 return l
786 return l
787
787
788 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
788 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
789 """
789 """
790 commit an individual file as part of a larger transaction
790 commit an individual file as part of a larger transaction
791 """
791 """
792
792
793 fname = fctx.path()
793 fname = fctx.path()
794 text = fctx.data()
794 text = fctx.data()
795 flog = self.file(fname)
795 flog = self.file(fname)
796 fparent1 = manifest1.get(fname, nullid)
796 fparent1 = manifest1.get(fname, nullid)
797 fparent2 = fparent2o = manifest2.get(fname, nullid)
797 fparent2 = fparent2o = manifest2.get(fname, nullid)
798
798
799 meta = {}
799 meta = {}
800 copy = fctx.renamed()
800 copy = fctx.renamed()
801 if copy and copy[0] != fname:
801 if copy and copy[0] != fname:
802 # Mark the new revision of this file as a copy of another
802 # Mark the new revision of this file as a copy of another
803 # file. This copy data will effectively act as a parent
803 # file. This copy data will effectively act as a parent
804 # of this new revision. If this is a merge, the first
804 # of this new revision. If this is a merge, the first
805 # parent will be the nullid (meaning "look up the copy data")
805 # parent will be the nullid (meaning "look up the copy data")
806 # and the second one will be the other parent. For example:
806 # and the second one will be the other parent. For example:
807 #
807 #
808 # 0 --- 1 --- 3 rev1 changes file foo
808 # 0 --- 1 --- 3 rev1 changes file foo
809 # \ / rev2 renames foo to bar and changes it
809 # \ / rev2 renames foo to bar and changes it
810 # \- 2 -/ rev3 should have bar with all changes and
810 # \- 2 -/ rev3 should have bar with all changes and
811 # should record that bar descends from
811 # should record that bar descends from
812 # bar in rev2 and foo in rev1
812 # bar in rev2 and foo in rev1
813 #
813 #
814 # this allows this merge to succeed:
814 # this allows this merge to succeed:
815 #
815 #
816 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
816 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
817 # \ / merging rev3 and rev4 should use bar@rev2
817 # \ / merging rev3 and rev4 should use bar@rev2
818 # \- 2 --- 4 as the merge base
818 # \- 2 --- 4 as the merge base
819 #
819 #
820
820
821 cfname = copy[0]
821 cfname = copy[0]
822 crev = manifest1.get(cfname)
822 crev = manifest1.get(cfname)
823 newfparent = fparent2
823 newfparent = fparent2
824
824
825 if manifest2: # branch merge
825 if manifest2: # branch merge
826 if fparent2 == nullid or crev is None: # copied on remote side
826 if fparent2 == nullid or crev is None: # copied on remote side
827 if cfname in manifest2:
827 if cfname in manifest2:
828 crev = manifest2[cfname]
828 crev = manifest2[cfname]
829 newfparent = fparent1
829 newfparent = fparent1
830
830
831 # find source in nearest ancestor if we've lost track
831 # find source in nearest ancestor if we've lost track
832 if not crev:
832 if not crev:
833 self.ui.debug(" %s: searching for copy revision for %s\n" %
833 self.ui.debug(" %s: searching for copy revision for %s\n" %
834 (fname, cfname))
834 (fname, cfname))
835 for ancestor in self[None].ancestors():
835 for ancestor in self[None].ancestors():
836 if cfname in ancestor:
836 if cfname in ancestor:
837 crev = ancestor[cfname].filenode()
837 crev = ancestor[cfname].filenode()
838 break
838 break
839
839
840 if crev:
840 if crev:
841 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
841 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
842 meta["copy"] = cfname
842 meta["copy"] = cfname
843 meta["copyrev"] = hex(crev)
843 meta["copyrev"] = hex(crev)
844 fparent1, fparent2 = nullid, newfparent
844 fparent1, fparent2 = nullid, newfparent
845 else:
845 else:
846 self.ui.warn(_("warning: can't find ancestor for '%s' "
846 self.ui.warn(_("warning: can't find ancestor for '%s' "
847 "copied from '%s'!\n") % (fname, cfname))
847 "copied from '%s'!\n") % (fname, cfname))
848
848
849 elif fparent2 != nullid:
849 elif fparent2 != nullid:
850 # is one parent an ancestor of the other?
850 # is one parent an ancestor of the other?
851 fparentancestor = flog.ancestor(fparent1, fparent2)
851 fparentancestor = flog.ancestor(fparent1, fparent2)
852 if fparentancestor == fparent1:
852 if fparentancestor == fparent1:
853 fparent1, fparent2 = fparent2, nullid
853 fparent1, fparent2 = fparent2, nullid
854 elif fparentancestor == fparent2:
854 elif fparentancestor == fparent2:
855 fparent2 = nullid
855 fparent2 = nullid
856
856
857 # is the file changed?
857 # is the file changed?
858 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
858 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
859 changelist.append(fname)
859 changelist.append(fname)
860 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
860 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
861
861
862 # are just the flags changed during merge?
862 # are just the flags changed during merge?
863 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
863 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
864 changelist.append(fname)
864 changelist.append(fname)
865
865
866 return fparent1
866 return fparent1
867
867
868 def commit(self, text="", user=None, date=None, match=None, force=False,
868 def commit(self, text="", user=None, date=None, match=None, force=False,
869 editor=False, extra={}):
869 editor=False, extra={}):
870 """Add a new revision to current repository.
870 """Add a new revision to current repository.
871
871
872 Revision information is gathered from the working directory,
872 Revision information is gathered from the working directory,
873 match can be used to filter the committed files. If editor is
873 match can be used to filter the committed files. If editor is
874 supplied, it is called to get a commit message.
874 supplied, it is called to get a commit message.
875 """
875 """
876
876
877 def fail(f, msg):
877 def fail(f, msg):
878 raise util.Abort('%s: %s' % (f, msg))
878 raise util.Abort('%s: %s' % (f, msg))
879
879
880 if not match:
880 if not match:
881 match = matchmod.always(self.root, '')
881 match = matchmod.always(self.root, '')
882
882
883 if not force:
883 if not force:
884 vdirs = []
884 vdirs = []
885 match.dir = vdirs.append
885 match.dir = vdirs.append
886 match.bad = fail
886 match.bad = fail
887
887
888 wlock = self.wlock()
888 wlock = self.wlock()
889 try:
889 try:
890 wctx = self[None]
890 wctx = self[None]
891 merge = len(wctx.parents()) > 1
891 merge = len(wctx.parents()) > 1
892
892
893 if (not force and merge and match and
893 if (not force and merge and match and
894 (match.files() or match.anypats())):
894 (match.files() or match.anypats())):
895 raise util.Abort(_('cannot partially commit a merge '
895 raise util.Abort(_('cannot partially commit a merge '
896 '(do not specify files or patterns)'))
896 '(do not specify files or patterns)'))
897
897
898 changes = self.status(match=match, clean=force)
898 changes = self.status(match=match, clean=force)
899 if force:
899 if force:
900 changes[0].extend(changes[6]) # mq may commit unchanged files
900 changes[0].extend(changes[6]) # mq may commit unchanged files
901
901
902 # check subrepos
902 # check subrepos
903 subs = []
903 subs = []
904 removedsubs = set()
904 removedsubs = set()
905 for p in wctx.parents():
905 for p in wctx.parents():
906 removedsubs.update(s for s in p.substate if match(s))
906 removedsubs.update(s for s in p.substate if match(s))
907 for s in wctx.substate:
907 for s in wctx.substate:
908 removedsubs.discard(s)
908 removedsubs.discard(s)
909 if match(s) and wctx.sub(s).dirty():
909 if match(s) and wctx.sub(s).dirty():
910 subs.append(s)
910 subs.append(s)
911 if (subs or removedsubs):
911 if (subs or removedsubs):
912 if (not match('.hgsub') and
912 if (not match('.hgsub') and
913 '.hgsub' in (wctx.modified() + wctx.added())):
913 '.hgsub' in (wctx.modified() + wctx.added())):
914 raise util.Abort(_("can't commit subrepos without .hgsub"))
914 raise util.Abort(_("can't commit subrepos without .hgsub"))
915 if '.hgsubstate' not in changes[0]:
915 if '.hgsubstate' not in changes[0]:
916 changes[0].insert(0, '.hgsubstate')
916 changes[0].insert(0, '.hgsubstate')
917
917
918 # make sure all explicit patterns are matched
918 # make sure all explicit patterns are matched
919 if not force and match.files():
919 if not force and match.files():
920 matched = set(changes[0] + changes[1] + changes[2])
920 matched = set(changes[0] + changes[1] + changes[2])
921
921
922 for f in match.files():
922 for f in match.files():
923 if f == '.' or f in matched or f in wctx.substate:
923 if f == '.' or f in matched or f in wctx.substate:
924 continue
924 continue
925 if f in changes[3]: # missing
925 if f in changes[3]: # missing
926 fail(f, _('file not found!'))
926 fail(f, _('file not found!'))
927 if f in vdirs: # visited directory
927 if f in vdirs: # visited directory
928 d = f + '/'
928 d = f + '/'
929 for mf in matched:
929 for mf in matched:
930 if mf.startswith(d):
930 if mf.startswith(d):
931 break
931 break
932 else:
932 else:
933 fail(f, _("no match under directory!"))
933 fail(f, _("no match under directory!"))
934 elif f not in self.dirstate:
934 elif f not in self.dirstate:
935 fail(f, _("file not tracked!"))
935 fail(f, _("file not tracked!"))
936
936
937 if (not force and not extra.get("close") and not merge
937 if (not force and not extra.get("close") and not merge
938 and not (changes[0] or changes[1] or changes[2])
938 and not (changes[0] or changes[1] or changes[2])
939 and wctx.branch() == wctx.p1().branch()):
939 and wctx.branch() == wctx.p1().branch()):
940 return None
940 return None
941
941
942 ms = mergemod.mergestate(self)
942 ms = mergemod.mergestate(self)
943 for f in changes[0]:
943 for f in changes[0]:
944 if f in ms and ms[f] == 'u':
944 if f in ms and ms[f] == 'u':
945 raise util.Abort(_("unresolved merge conflicts "
945 raise util.Abort(_("unresolved merge conflicts "
946 "(see hg resolve)"))
946 "(see hg resolve)"))
947
947
948 cctx = context.workingctx(self, text, user, date, extra, changes)
948 cctx = context.workingctx(self, text, user, date, extra, changes)
949 if editor:
949 if editor:
950 cctx._text = editor(self, cctx, subs)
950 cctx._text = editor(self, cctx, subs)
951 edited = (text != cctx._text)
951 edited = (text != cctx._text)
952
952
953 # commit subs
953 # commit subs
954 if subs or removedsubs:
954 if subs or removedsubs:
955 state = wctx.substate.copy()
955 state = wctx.substate.copy()
956 for s in sorted(subs):
956 for s in sorted(subs):
957 sub = wctx.sub(s)
957 sub = wctx.sub(s)
958 self.ui.status(_('committing subrepository %s\n') %
958 self.ui.status(_('committing subrepository %s\n') %
959 subrepo.subrelpath(sub))
959 subrepo.subrelpath(sub))
960 sr = sub.commit(cctx._text, user, date)
960 sr = sub.commit(cctx._text, user, date)
961 state[s] = (state[s][0], sr)
961 state[s] = (state[s][0], sr)
962 subrepo.writestate(self, state)
962 subrepo.writestate(self, state)
963
963
964 # Save commit message in case this transaction gets rolled back
964 # Save commit message in case this transaction gets rolled back
965 # (e.g. by a pretxncommit hook). Leave the content alone on
965 # (e.g. by a pretxncommit hook). Leave the content alone on
966 # the assumption that the user will use the same editor again.
966 # the assumption that the user will use the same editor again.
967 msgfile = self.opener('last-message.txt', 'wb')
967 msgfile = self.opener('last-message.txt', 'wb')
968 msgfile.write(cctx._text)
968 msgfile.write(cctx._text)
969 msgfile.close()
969 msgfile.close()
970
970
971 p1, p2 = self.dirstate.parents()
971 p1, p2 = self.dirstate.parents()
972 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
972 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
973 try:
973 try:
974 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
974 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
975 ret = self.commitctx(cctx, True)
975 ret = self.commitctx(cctx, True)
976 except:
976 except:
977 if edited:
977 if edited:
978 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
978 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
979 self.ui.write(
979 self.ui.write(
980 _('note: commit message saved in %s\n') % msgfn)
980 _('note: commit message saved in %s\n') % msgfn)
981 raise
981 raise
982
982
983 # update dirstate and mergestate
983 # update dirstate and mergestate
984 for f in changes[0] + changes[1]:
984 for f in changes[0] + changes[1]:
985 self.dirstate.normal(f)
985 self.dirstate.normal(f)
986 for f in changes[2]:
986 for f in changes[2]:
987 self.dirstate.forget(f)
987 self.dirstate.forget(f)
988 self.dirstate.setparents(ret)
988 self.dirstate.setparents(ret)
989 ms.reset()
989 ms.reset()
990 finally:
990 finally:
991 wlock.release()
991 wlock.release()
992
992
993 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
993 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
994 return ret
994 return ret
995
995
996 def commitctx(self, ctx, error=False):
996 def commitctx(self, ctx, error=False):
997 """Add a new revision to current repository.
997 """Add a new revision to current repository.
998 Revision information is passed via the context argument.
998 Revision information is passed via the context argument.
999 """
999 """
1000
1000
1001 tr = lock = None
1001 tr = lock = None
1002 removed = list(ctx.removed())
1002 removed = list(ctx.removed())
1003 p1, p2 = ctx.p1(), ctx.p2()
1003 p1, p2 = ctx.p1(), ctx.p2()
1004 m1 = p1.manifest().copy()
1004 m1 = p1.manifest().copy()
1005 m2 = p2.manifest()
1005 m2 = p2.manifest()
1006 user = ctx.user()
1006 user = ctx.user()
1007
1007
1008 lock = self.lock()
1008 lock = self.lock()
1009 try:
1009 try:
1010 tr = self.transaction("commit")
1010 tr = self.transaction("commit")
1011 trp = weakref.proxy(tr)
1011 trp = weakref.proxy(tr)
1012
1012
1013 # check in files
1013 # check in files
1014 new = {}
1014 new = {}
1015 changed = []
1015 changed = []
1016 linkrev = len(self)
1016 linkrev = len(self)
1017 for f in sorted(ctx.modified() + ctx.added()):
1017 for f in sorted(ctx.modified() + ctx.added()):
1018 self.ui.note(f + "\n")
1018 self.ui.note(f + "\n")
1019 try:
1019 try:
1020 fctx = ctx[f]
1020 fctx = ctx[f]
1021 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1021 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1022 changed)
1022 changed)
1023 m1.set(f, fctx.flags())
1023 m1.set(f, fctx.flags())
1024 except OSError, inst:
1024 except OSError, inst:
1025 self.ui.warn(_("trouble committing %s!\n") % f)
1025 self.ui.warn(_("trouble committing %s!\n") % f)
1026 raise
1026 raise
1027 except IOError, inst:
1027 except IOError, inst:
1028 errcode = getattr(inst, 'errno', errno.ENOENT)
1028 errcode = getattr(inst, 'errno', errno.ENOENT)
1029 if error or errcode and errcode != errno.ENOENT:
1029 if error or errcode and errcode != errno.ENOENT:
1030 self.ui.warn(_("trouble committing %s!\n") % f)
1030 self.ui.warn(_("trouble committing %s!\n") % f)
1031 raise
1031 raise
1032 else:
1032 else:
1033 removed.append(f)
1033 removed.append(f)
1034
1034
1035 # update manifest
1035 # update manifest
1036 m1.update(new)
1036 m1.update(new)
1037 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1037 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1038 drop = [f for f in removed if f in m1]
1038 drop = [f for f in removed if f in m1]
1039 for f in drop:
1039 for f in drop:
1040 del m1[f]
1040 del m1[f]
1041 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1041 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1042 p2.manifestnode(), (new, drop))
1042 p2.manifestnode(), (new, drop))
1043
1043
1044 # update changelog
1044 # update changelog
1045 self.changelog.delayupdate()
1045 self.changelog.delayupdate()
1046 n = self.changelog.add(mn, changed + removed, ctx.description(),
1046 n = self.changelog.add(mn, changed + removed, ctx.description(),
1047 trp, p1.node(), p2.node(),
1047 trp, p1.node(), p2.node(),
1048 user, ctx.date(), ctx.extra().copy())
1048 user, ctx.date(), ctx.extra().copy())
1049 p = lambda: self.changelog.writepending() and self.root or ""
1049 p = lambda: self.changelog.writepending() and self.root or ""
1050 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1050 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1051 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1051 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1052 parent2=xp2, pending=p)
1052 parent2=xp2, pending=p)
1053 self.changelog.finalize(trp)
1053 self.changelog.finalize(trp)
1054 tr.close()
1054 tr.close()
1055
1055
1056 if self._branchcache:
1056 if self._branchcache:
1057 self.updatebranchcache()
1057 self.updatebranchcache()
1058 return n
1058 return n
1059 finally:
1059 finally:
1060 if tr:
1060 if tr:
1061 tr.release()
1061 tr.release()
1062 lock.release()
1062 lock.release()
1063
1063
1064 def destroyed(self):
1064 def destroyed(self):
1065 '''Inform the repository that nodes have been destroyed.
1065 '''Inform the repository that nodes have been destroyed.
1066 Intended for use by strip and rollback, so there's a common
1066 Intended for use by strip and rollback, so there's a common
1067 place for anything that has to be done after destroying history.'''
1067 place for anything that has to be done after destroying history.'''
1068 # XXX it might be nice if we could take the list of destroyed
1068 # XXX it might be nice if we could take the list of destroyed
1069 # nodes, but I don't see an easy way for rollback() to do that
1069 # nodes, but I don't see an easy way for rollback() to do that
1070
1070
1071 # Ensure the persistent tag cache is updated. Doing it now
1071 # Ensure the persistent tag cache is updated. Doing it now
1072 # means that the tag cache only has to worry about destroyed
1072 # means that the tag cache only has to worry about destroyed
1073 # heads immediately after a strip/rollback. That in turn
1073 # heads immediately after a strip/rollback. That in turn
1074 # guarantees that "cachetip == currenttip" (comparing both rev
1074 # guarantees that "cachetip == currenttip" (comparing both rev
1075 # and node) always means no nodes have been added or destroyed.
1075 # and node) always means no nodes have been added or destroyed.
1076
1076
1077 # XXX this is suboptimal when qrefresh'ing: we strip the current
1077 # XXX this is suboptimal when qrefresh'ing: we strip the current
1078 # head, refresh the tag cache, then immediately add a new head.
1078 # head, refresh the tag cache, then immediately add a new head.
1079 # But I think doing it this way is necessary for the "instant
1079 # But I think doing it this way is necessary for the "instant
1080 # tag cache retrieval" case to work.
1080 # tag cache retrieval" case to work.
1081 self.invalidatecaches()
1081 self.invalidatecaches()
1082
1082
1083 def walk(self, match, node=None):
1083 def walk(self, match, node=None):
1084 '''
1084 '''
1085 walk recursively through the directory tree or a given
1085 walk recursively through the directory tree or a given
1086 changeset, finding all files matched by the match
1086 changeset, finding all files matched by the match
1087 function
1087 function
1088 '''
1088 '''
1089 return self[node].walk(match)
1089 return self[node].walk(match)
1090
1090
1091 def status(self, node1='.', node2=None, match=None,
1091 def status(self, node1='.', node2=None, match=None,
1092 ignored=False, clean=False, unknown=False,
1092 ignored=False, clean=False, unknown=False,
1093 listsubrepos=False):
1093 listsubrepos=False):
1094 """return status of files between two nodes or node and working directory
1094 """return status of files between two nodes or node and working directory
1095
1095
1096 If node1 is None, use the first dirstate parent instead.
1096 If node1 is None, use the first dirstate parent instead.
1097 If node2 is None, compare node1 with working directory.
1097 If node2 is None, compare node1 with working directory.
1098 """
1098 """
1099
1099
1100 def mfmatches(ctx):
1100 def mfmatches(ctx):
1101 mf = ctx.manifest().copy()
1101 mf = ctx.manifest().copy()
1102 for fn in mf.keys():
1102 for fn in mf.keys():
1103 if not match(fn):
1103 if not match(fn):
1104 del mf[fn]
1104 del mf[fn]
1105 return mf
1105 return mf
1106
1106
1107 if isinstance(node1, context.changectx):
1107 if isinstance(node1, context.changectx):
1108 ctx1 = node1
1108 ctx1 = node1
1109 else:
1109 else:
1110 ctx1 = self[node1]
1110 ctx1 = self[node1]
1111 if isinstance(node2, context.changectx):
1111 if isinstance(node2, context.changectx):
1112 ctx2 = node2
1112 ctx2 = node2
1113 else:
1113 else:
1114 ctx2 = self[node2]
1114 ctx2 = self[node2]
1115
1115
1116 working = ctx2.rev() is None
1116 working = ctx2.rev() is None
1117 parentworking = working and ctx1 == self['.']
1117 parentworking = working and ctx1 == self['.']
1118 match = match or matchmod.always(self.root, self.getcwd())
1118 match = match or matchmod.always(self.root, self.getcwd())
1119 listignored, listclean, listunknown = ignored, clean, unknown
1119 listignored, listclean, listunknown = ignored, clean, unknown
1120
1120
1121 # load earliest manifest first for caching reasons
1121 # load earliest manifest first for caching reasons
1122 if not working and ctx2.rev() < ctx1.rev():
1122 if not working and ctx2.rev() < ctx1.rev():
1123 ctx2.manifest()
1123 ctx2.manifest()
1124
1124
1125 if not parentworking:
1125 if not parentworking:
1126 def bad(f, msg):
1126 def bad(f, msg):
1127 if f not in ctx1:
1127 if f not in ctx1:
1128 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1128 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1129 match.bad = bad
1129 match.bad = bad
1130
1130
1131 if working: # we need to scan the working dir
1131 if working: # we need to scan the working dir
1132 subrepos = []
1132 subrepos = []
1133 if '.hgsub' in self.dirstate:
1133 if '.hgsub' in self.dirstate:
1134 subrepos = ctx1.substate.keys()
1134 subrepos = ctx1.substate.keys()
1135 s = self.dirstate.status(match, subrepos, listignored,
1135 s = self.dirstate.status(match, subrepos, listignored,
1136 listclean, listunknown)
1136 listclean, listunknown)
1137 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1137 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1138
1138
1139 # check for any possibly clean files
1139 # check for any possibly clean files
1140 if parentworking and cmp:
1140 if parentworking and cmp:
1141 fixup = []
1141 fixup = []
1142 # do a full compare of any files that might have changed
1142 # do a full compare of any files that might have changed
1143 for f in sorted(cmp):
1143 for f in sorted(cmp):
1144 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1144 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1145 or ctx1[f].cmp(ctx2[f])):
1145 or ctx1[f].cmp(ctx2[f])):
1146 modified.append(f)
1146 modified.append(f)
1147 else:
1147 else:
1148 fixup.append(f)
1148 fixup.append(f)
1149
1149
1150 # update dirstate for files that are actually clean
1150 # update dirstate for files that are actually clean
1151 if fixup:
1151 if fixup:
1152 if listclean:
1152 if listclean:
1153 clean += fixup
1153 clean += fixup
1154
1154
1155 try:
1155 try:
1156 # updating the dirstate is optional
1156 # updating the dirstate is optional
1157 # so we don't wait on the lock
1157 # so we don't wait on the lock
1158 wlock = self.wlock(False)
1158 wlock = self.wlock(False)
1159 try:
1159 try:
1160 for f in fixup:
1160 for f in fixup:
1161 self.dirstate.normal(f)
1161 self.dirstate.normal(f)
1162 finally:
1162 finally:
1163 wlock.release()
1163 wlock.release()
1164 except error.LockError:
1164 except error.LockError:
1165 pass
1165 pass
1166
1166
1167 if not parentworking:
1167 if not parentworking:
1168 mf1 = mfmatches(ctx1)
1168 mf1 = mfmatches(ctx1)
1169 if working:
1169 if working:
1170 # we are comparing working dir against non-parent
1170 # we are comparing working dir against non-parent
1171 # generate a pseudo-manifest for the working dir
1171 # generate a pseudo-manifest for the working dir
1172 mf2 = mfmatches(self['.'])
1172 mf2 = mfmatches(self['.'])
1173 for f in cmp + modified + added:
1173 for f in cmp + modified + added:
1174 mf2[f] = None
1174 mf2[f] = None
1175 mf2.set(f, ctx2.flags(f))
1175 mf2.set(f, ctx2.flags(f))
1176 for f in removed:
1176 for f in removed:
1177 if f in mf2:
1177 if f in mf2:
1178 del mf2[f]
1178 del mf2[f]
1179 else:
1179 else:
1180 # we are comparing two revisions
1180 # we are comparing two revisions
1181 deleted, unknown, ignored = [], [], []
1181 deleted, unknown, ignored = [], [], []
1182 mf2 = mfmatches(ctx2)
1182 mf2 = mfmatches(ctx2)
1183
1183
1184 modified, added, clean = [], [], []
1184 modified, added, clean = [], [], []
1185 for fn in mf2:
1185 for fn in mf2:
1186 if fn in mf1:
1186 if fn in mf1:
1187 if (mf1.flags(fn) != mf2.flags(fn) or
1187 if (mf1.flags(fn) != mf2.flags(fn) or
1188 (mf1[fn] != mf2[fn] and
1188 (mf1[fn] != mf2[fn] and
1189 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1189 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1190 modified.append(fn)
1190 modified.append(fn)
1191 elif listclean:
1191 elif listclean:
1192 clean.append(fn)
1192 clean.append(fn)
1193 del mf1[fn]
1193 del mf1[fn]
1194 else:
1194 else:
1195 added.append(fn)
1195 added.append(fn)
1196 removed = mf1.keys()
1196 removed = mf1.keys()
1197
1197
1198 r = modified, added, removed, deleted, unknown, ignored, clean
1198 r = modified, added, removed, deleted, unknown, ignored, clean
1199
1199
1200 if listsubrepos:
1200 if listsubrepos:
1201 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1201 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1202 if working:
1202 if working:
1203 rev2 = None
1203 rev2 = None
1204 else:
1204 else:
1205 rev2 = ctx2.substate[subpath][1]
1205 rev2 = ctx2.substate[subpath][1]
1206 try:
1206 try:
1207 submatch = matchmod.narrowmatcher(subpath, match)
1207 submatch = matchmod.narrowmatcher(subpath, match)
1208 s = sub.status(rev2, match=submatch, ignored=listignored,
1208 s = sub.status(rev2, match=submatch, ignored=listignored,
1209 clean=listclean, unknown=listunknown,
1209 clean=listclean, unknown=listunknown,
1210 listsubrepos=True)
1210 listsubrepos=True)
1211 for rfiles, sfiles in zip(r, s):
1211 for rfiles, sfiles in zip(r, s):
1212 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1212 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1213 except error.LookupError:
1213 except error.LookupError:
1214 self.ui.status(_("skipping missing subrepository: %s\n")
1214 self.ui.status(_("skipping missing subrepository: %s\n")
1215 % subpath)
1215 % subpath)
1216
1216
1217 [l.sort() for l in r]
1217 [l.sort() for l in r]
1218 return r
1218 return r
1219
1219
1220 def heads(self, start=None):
1220 def heads(self, start=None):
1221 heads = self.changelog.heads(start)
1221 heads = self.changelog.heads(start)
1222 # sort the output in rev descending order
1222 # sort the output in rev descending order
1223 return sorted(heads, key=self.changelog.rev, reverse=True)
1223 return sorted(heads, key=self.changelog.rev, reverse=True)
1224
1224
1225 def branchheads(self, branch=None, start=None, closed=False):
1225 def branchheads(self, branch=None, start=None, closed=False):
1226 '''return a (possibly filtered) list of heads for the given branch
1226 '''return a (possibly filtered) list of heads for the given branch
1227
1227
1228 Heads are returned in topological order, from newest to oldest.
1228 Heads are returned in topological order, from newest to oldest.
1229 If branch is None, use the dirstate branch.
1229 If branch is None, use the dirstate branch.
1230 If start is not None, return only heads reachable from start.
1230 If start is not None, return only heads reachable from start.
1231 If closed is True, return heads that are marked as closed as well.
1231 If closed is True, return heads that are marked as closed as well.
1232 '''
1232 '''
1233 if branch is None:
1233 if branch is None:
1234 branch = self[None].branch()
1234 branch = self[None].branch()
1235 branches = self.branchmap()
1235 branches = self.branchmap()
1236 if branch not in branches:
1236 if branch not in branches:
1237 return []
1237 return []
1238 # the cache returns heads ordered lowest to highest
1238 # the cache returns heads ordered lowest to highest
1239 bheads = list(reversed(branches[branch]))
1239 bheads = list(reversed(branches[branch]))
1240 if start is not None:
1240 if start is not None:
1241 # filter out the heads that cannot be reached from startrev
1241 # filter out the heads that cannot be reached from startrev
1242 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1242 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1243 bheads = [h for h in bheads if h in fbheads]
1243 bheads = [h for h in bheads if h in fbheads]
1244 if not closed:
1244 if not closed:
1245 bheads = [h for h in bheads if
1245 bheads = [h for h in bheads if
1246 ('close' not in self.changelog.read(h)[5])]
1246 ('close' not in self.changelog.read(h)[5])]
1247 return bheads
1247 return bheads
1248
1248
1249 def branches(self, nodes):
1249 def branches(self, nodes):
1250 if not nodes:
1250 if not nodes:
1251 nodes = [self.changelog.tip()]
1251 nodes = [self.changelog.tip()]
1252 b = []
1252 b = []
1253 for n in nodes:
1253 for n in nodes:
1254 t = n
1254 t = n
1255 while 1:
1255 while 1:
1256 p = self.changelog.parents(n)
1256 p = self.changelog.parents(n)
1257 if p[1] != nullid or p[0] == nullid:
1257 if p[1] != nullid or p[0] == nullid:
1258 b.append((t, n, p[0], p[1]))
1258 b.append((t, n, p[0], p[1]))
1259 break
1259 break
1260 n = p[0]
1260 n = p[0]
1261 return b
1261 return b
1262
1262
1263 def between(self, pairs):
1263 def between(self, pairs):
1264 r = []
1264 r = []
1265
1265
1266 for top, bottom in pairs:
1266 for top, bottom in pairs:
1267 n, l, i = top, [], 0
1267 n, l, i = top, [], 0
1268 f = 1
1268 f = 1
1269
1269
1270 while n != bottom and n != nullid:
1270 while n != bottom and n != nullid:
1271 p = self.changelog.parents(n)[0]
1271 p = self.changelog.parents(n)[0]
1272 if i == f:
1272 if i == f:
1273 l.append(n)
1273 l.append(n)
1274 f = f * 2
1274 f = f * 2
1275 n = p
1275 n = p
1276 i += 1
1276 i += 1
1277
1277
1278 r.append(l)
1278 r.append(l)
1279
1279
1280 return r
1280 return r
1281
1281
1282 def pull(self, remote, heads=None, force=False):
1282 def pull(self, remote, heads=None, force=False):
1283 lock = self.lock()
1283 lock = self.lock()
1284 try:
1284 try:
1285 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1285 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1286 force=force)
1286 force=force)
1287 common, fetch, rheads = tmp
1287 common, fetch, rheads = tmp
1288 if not fetch:
1288 if not fetch:
1289 self.ui.status(_("no changes found\n"))
1289 self.ui.status(_("no changes found\n"))
1290 return 0
1290 return 0
1291
1291
1292 if heads is None and fetch == [nullid]:
1292 if heads is None and fetch == [nullid]:
1293 self.ui.status(_("requesting all changes\n"))
1293 self.ui.status(_("requesting all changes\n"))
1294 elif heads is None and remote.capable('changegroupsubset'):
1294 elif heads is None and remote.capable('changegroupsubset'):
1295 # issue1320, avoid a race if remote changed after discovery
1295 # issue1320, avoid a race if remote changed after discovery
1296 heads = rheads
1296 heads = rheads
1297
1297
1298 if heads is None:
1298 if heads is None:
1299 cg = remote.changegroup(fetch, 'pull')
1299 cg = remote.changegroup(fetch, 'pull')
1300 else:
1300 else:
1301 if not remote.capable('changegroupsubset'):
1301 if not remote.capable('changegroupsubset'):
1302 raise util.Abort(_("partial pull cannot be done because "
1302 raise util.Abort(_("partial pull cannot be done because "
1303 "other repository doesn't support "
1303 "other repository doesn't support "
1304 "changegroupsubset."))
1304 "changegroupsubset."))
1305 cg = remote.changegroupsubset(fetch, heads, 'pull')
1305 cg = remote.changegroupsubset(fetch, heads, 'pull')
1306 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1306 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1307 finally:
1307 finally:
1308 lock.release()
1308 lock.release()
1309
1309
1310 def push(self, remote, force=False, revs=None, newbranch=False):
1310 def push(self, remote, force=False, revs=None, newbranch=False):
1311 '''Push outgoing changesets (limited by revs) from the current
1311 '''Push outgoing changesets (limited by revs) from the current
1312 repository to remote. Return an integer:
1312 repository to remote. Return an integer:
1313 - 0 means HTTP error *or* nothing to push
1313 - 0 means HTTP error *or* nothing to push
1314 - 1 means we pushed and remote head count is unchanged *or*
1314 - 1 means we pushed and remote head count is unchanged *or*
1315 we have outgoing changesets but refused to push
1315 we have outgoing changesets but refused to push
1316 - other values as described by addchangegroup()
1316 - other values as described by addchangegroup()
1317 '''
1317 '''
1318 # there are two ways to push to remote repo:
1318 # there are two ways to push to remote repo:
1319 #
1319 #
1320 # addchangegroup assumes local user can lock remote
1320 # addchangegroup assumes local user can lock remote
1321 # repo (local filesystem, old ssh servers).
1321 # repo (local filesystem, old ssh servers).
1322 #
1322 #
1323 # unbundle assumes local user cannot lock remote repo (new ssh
1323 # unbundle assumes local user cannot lock remote repo (new ssh
1324 # servers, http servers).
1324 # servers, http servers).
1325
1325
1326 lock = None
1326 lock = None
1327 unbundle = remote.capable('unbundle')
1327 unbundle = remote.capable('unbundle')
1328 if not unbundle:
1328 if not unbundle:
1329 lock = remote.lock()
1329 lock = remote.lock()
1330 try:
1330 try:
1331 ret = discovery.prepush(self, remote, force, revs, newbranch)
1331 ret = discovery.prepush(self, remote, force, revs, newbranch)
1332 if ret[0] is None:
1332 if ret[0] is None:
1333 # and here we return 0 for "nothing to push" or 1 for
1333 # and here we return 0 for "nothing to push" or 1 for
1334 # "something to push but I refuse"
1334 # "something to push but I refuse"
1335 return ret[1]
1335 return ret[1]
1336
1336
1337 cg, remote_heads = ret
1337 cg, remote_heads = ret
1338 if unbundle:
1338 if unbundle:
1339 # local repo finds heads on server, finds out what revs it must
1339 # local repo finds heads on server, finds out what revs it must
1340 # push. once revs transferred, if server finds it has
1340 # push. once revs transferred, if server finds it has
1341 # different heads (someone else won commit/push race), server
1341 # different heads (someone else won commit/push race), server
1342 # aborts.
1342 # aborts.
1343 if force:
1343 if force:
1344 remote_heads = ['force']
1344 remote_heads = ['force']
1345 # ssh: return remote's addchangegroup()
1345 # ssh: return remote's addchangegroup()
1346 # http: return remote's addchangegroup() or 0 for error
1346 # http: return remote's addchangegroup() or 0 for error
1347 return remote.unbundle(cg, remote_heads, 'push')
1347 return remote.unbundle(cg, remote_heads, 'push')
1348 else:
1348 else:
1349 # we return an integer indicating remote head count change
1349 # we return an integer indicating remote head count change
1350 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1350 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1351 finally:
1351 finally:
1352 if lock is not None:
1352 if lock is not None:
1353 lock.release()
1353 lock.release()
1354
1354
1355 def changegroupinfo(self, nodes, source):
1355 def changegroupinfo(self, nodes, source):
1356 if self.ui.verbose or source == 'bundle':
1356 if self.ui.verbose or source == 'bundle':
1357 self.ui.status(_("%d changesets found\n") % len(nodes))
1357 self.ui.status(_("%d changesets found\n") % len(nodes))
1358 if self.ui.debugflag:
1358 if self.ui.debugflag:
1359 self.ui.debug("list of changesets:\n")
1359 self.ui.debug("list of changesets:\n")
1360 for node in nodes:
1360 for node in nodes:
1361 self.ui.debug("%s\n" % hex(node))
1361 self.ui.debug("%s\n" % hex(node))
1362
1362
1363 def changegroupsubset(self, bases, heads, source, extranodes=None):
1363 def changegroupsubset(self, bases, heads, source, extranodes=None):
1364 """Compute a changegroup consisting of all the nodes that are
1364 """Compute a changegroup consisting of all the nodes that are
1365 descendents of any of the bases and ancestors of any of the heads.
1365 descendents of any of the bases and ancestors of any of the heads.
1366 Return a chunkbuffer object whose read() method will return
1366 Return a chunkbuffer object whose read() method will return
1367 successive changegroup chunks.
1367 successive changegroup chunks.
1368
1368
1369 It is fairly complex as determining which filenodes and which
1369 It is fairly complex as determining which filenodes and which
1370 manifest nodes need to be included for the changeset to be complete
1370 manifest nodes need to be included for the changeset to be complete
1371 is non-trivial.
1371 is non-trivial.
1372
1372
1373 Another wrinkle is doing the reverse, figuring out which changeset in
1373 Another wrinkle is doing the reverse, figuring out which changeset in
1374 the changegroup a particular filenode or manifestnode belongs to.
1374 the changegroup a particular filenode or manifestnode belongs to.
1375
1375
1376 The caller can specify some nodes that must be included in the
1376 The caller can specify some nodes that must be included in the
1377 changegroup using the extranodes argument. It should be a dict
1377 changegroup using the extranodes argument. It should be a dict
1378 where the keys are the filenames (or 1 for the manifest), and the
1378 where the keys are the filenames (or 1 for the manifest), and the
1379 values are lists of (node, linknode) tuples, where node is a wanted
1379 values are lists of (node, linknode) tuples, where node is a wanted
1380 node and linknode is the changelog node that should be transmitted as
1380 node and linknode is the changelog node that should be transmitted as
1381 the linkrev.
1381 the linkrev.
1382 """
1382 """
1383
1383
1384 # Set up some initial variables
1384 # Set up some initial variables
1385 # Make it easy to refer to self.changelog
1385 # Make it easy to refer to self.changelog
1386 cl = self.changelog
1386 cl = self.changelog
1387 # Compute the list of changesets in this changegroup.
1387 # Compute the list of changesets in this changegroup.
1388 # Some bases may turn out to be superfluous, and some heads may be
1388 # Some bases may turn out to be superfluous, and some heads may be
1389 # too. nodesbetween will return the minimal set of bases and heads
1389 # too. nodesbetween will return the minimal set of bases and heads
1390 # necessary to re-create the changegroup.
1390 # necessary to re-create the changegroup.
1391 if not bases:
1391 if not bases:
1392 bases = [nullid]
1392 bases = [nullid]
1393 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1393 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1394
1394
1395 if extranodes is None:
1395 if extranodes is None:
1396 # can we go through the fast path ?
1396 # can we go through the fast path ?
1397 heads.sort()
1397 heads.sort()
1398 allheads = self.heads()
1398 allheads = self.heads()
1399 allheads.sort()
1399 allheads.sort()
1400 if heads == allheads:
1400 if heads == allheads:
1401 return self._changegroup(msng_cl_lst, source)
1401 return self._changegroup(msng_cl_lst, source)
1402
1402
1403 # slow path
1403 # slow path
1404 self.hook('preoutgoing', throw=True, source=source)
1404 self.hook('preoutgoing', throw=True, source=source)
1405
1405
1406 self.changegroupinfo(msng_cl_lst, source)
1406 self.changegroupinfo(msng_cl_lst, source)
1407
1407
1408 # We assume that all ancestors of bases are known
1408 # We assume that all ancestors of bases are known
1409 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1409 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1410
1410
1411 # Make it easy to refer to self.manifest
1411 # Make it easy to refer to self.manifest
1412 mnfst = self.manifest
1412 mnfst = self.manifest
1413 # We don't know which manifests are missing yet
1413 # We don't know which manifests are missing yet
1414 msng_mnfst_set = {}
1414 msng_mnfst_set = {}
1415 # Nor do we know which filenodes are missing.
1415 # Nor do we know which filenodes are missing.
1416 msng_filenode_set = {}
1416 msng_filenode_set = {}
1417
1417
1418 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1418 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1419 junk = None
1419 junk = None
1420
1420
1421 # A changeset always belongs to itself, so the changenode lookup
1421 # A changeset always belongs to itself, so the changenode lookup
1422 # function for a changenode is identity.
1422 # function for a changenode is identity.
1423 def identity(x):
1423 def identity(x):
1424 return x
1424 return x
1425
1425
1426 # A function generating function that sets up the initial environment
1426 # A function generating function that sets up the initial environment
1427 # the inner function.
1427 # the inner function.
1428 def filenode_collector(changedfiles):
1428 def filenode_collector(changedfiles):
1429 # This gathers information from each manifestnode included in the
1429 # This gathers information from each manifestnode included in the
1430 # changegroup about which filenodes the manifest node references
1430 # changegroup about which filenodes the manifest node references
1431 # so we can include those in the changegroup too.
1431 # so we can include those in the changegroup too.
1432 #
1432 #
1433 # It also remembers which changenode each filenode belongs to. It
1433 # It also remembers which changenode each filenode belongs to. It
1434 # does this by assuming the a filenode belongs to the changenode
1434 # does this by assuming the a filenode belongs to the changenode
1435 # the first manifest that references it belongs to.
1435 # the first manifest that references it belongs to.
1436 def collect_msng_filenodes(mnfstnode):
1436 def collect_msng_filenodes(mnfstnode):
1437 r = mnfst.rev(mnfstnode)
1437 r = mnfst.rev(mnfstnode)
1438 if mnfst.deltaparent(r) in mnfst.parentrevs(r):
1438 if mnfst.deltaparent(r) in mnfst.parentrevs(r):
1439 # If the previous rev is one of the parents,
1439 # If the previous rev is one of the parents,
1440 # we only need to see a diff.
1440 # we only need to see a diff.
1441 deltamf = mnfst.readdelta(mnfstnode)
1441 deltamf = mnfst.readdelta(mnfstnode)
1442 # For each line in the delta
1442 # For each line in the delta
1443 for f, fnode in deltamf.iteritems():
1443 for f, fnode in deltamf.iteritems():
1444 # And if the file is in the list of files we care
1444 # And if the file is in the list of files we care
1445 # about.
1445 # about.
1446 if f in changedfiles:
1446 if f in changedfiles:
1447 # Get the changenode this manifest belongs to
1447 # Get the changenode this manifest belongs to
1448 clnode = msng_mnfst_set[mnfstnode]
1448 clnode = msng_mnfst_set[mnfstnode]
1449 # Create the set of filenodes for the file if
1449 # Create the set of filenodes for the file if
1450 # there isn't one already.
1450 # there isn't one already.
1451 ndset = msng_filenode_set.setdefault(f, {})
1451 ndset = msng_filenode_set.setdefault(f, {})
1452 # And set the filenode's changelog node to the
1452 # And set the filenode's changelog node to the
1453 # manifest's if it hasn't been set already.
1453 # manifest's if it hasn't been set already.
1454 ndset.setdefault(fnode, clnode)
1454 ndset.setdefault(fnode, clnode)
1455 else:
1455 else:
1456 # Otherwise we need a full manifest.
1456 # Otherwise we need a full manifest.
1457 m = mnfst.read(mnfstnode)
1457 m = mnfst.read(mnfstnode)
1458 # For every file in we care about.
1458 # For every file in we care about.
1459 for f in changedfiles:
1459 for f in changedfiles:
1460 fnode = m.get(f, None)
1460 fnode = m.get(f, None)
1461 # If it's in the manifest
1461 # If it's in the manifest
1462 if fnode is not None:
1462 if fnode is not None:
1463 # See comments above.
1463 # See comments above.
1464 clnode = msng_mnfst_set[mnfstnode]
1464 clnode = msng_mnfst_set[mnfstnode]
1465 ndset = msng_filenode_set.setdefault(f, {})
1465 ndset = msng_filenode_set.setdefault(f, {})
1466 ndset.setdefault(fnode, clnode)
1466 ndset.setdefault(fnode, clnode)
1467 return collect_msng_filenodes
1467 return collect_msng_filenodes
1468
1468
1469 # If we determine that a particular file or manifest node must be a
1469 # If we determine that a particular file or manifest node must be a
1470 # node that the recipient of the changegroup will already have, we can
1470 # node that the recipient of the changegroup will already have, we can
1471 # also assume the recipient will have all the parents. This function
1471 # also assume the recipient will have all the parents. This function
1472 # prunes them from the set of missing nodes.
1472 # prunes them from the set of missing nodes.
1473 def prune(revlog, missingnodes):
1473 def prune(revlog, missingnodes):
1474 hasset = set()
1474 hasset = set()
1475 # If a 'missing' filenode thinks it belongs to a changenode we
1475 # If a 'missing' filenode thinks it belongs to a changenode we
1476 # assume the recipient must have, then the recipient must have
1476 # assume the recipient must have, then the recipient must have
1477 # that filenode.
1477 # that filenode.
1478 for n in missingnodes:
1478 for n in missingnodes:
1479 clrev = revlog.linkrev(revlog.rev(n))
1479 clrev = revlog.linkrev(revlog.rev(n))
1480 if clrev in commonrevs:
1480 if clrev in commonrevs:
1481 hasset.add(n)
1481 hasset.add(n)
1482 for n in hasset:
1482 for n in hasset:
1483 missingnodes.pop(n, None)
1483 missingnodes.pop(n, None)
1484 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1484 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1485 missingnodes.pop(revlog.node(r), None)
1485 missingnodes.pop(revlog.node(r), None)
1486
1486
1487 # Add the nodes that were explicitly requested.
1487 # Add the nodes that were explicitly requested.
1488 def add_extra_nodes(name, nodes):
1488 def add_extra_nodes(name, nodes):
1489 if not extranodes or name not in extranodes:
1489 if not extranodes or name not in extranodes:
1490 return
1490 return
1491
1491
1492 for node, linknode in extranodes[name]:
1492 for node, linknode in extranodes[name]:
1493 if node not in nodes:
1493 if node not in nodes:
1494 nodes[node] = linknode
1494 nodes[node] = linknode
1495
1495
1496 # Now that we have all theses utility functions to help out and
1496 # Now that we have all theses utility functions to help out and
1497 # logically divide up the task, generate the group.
1497 # logically divide up the task, generate the group.
1498 def gengroup():
1498 def gengroup():
1499 # The set of changed files starts empty.
1499 # The set of changed files starts empty.
1500 changedfiles = set()
1500 changedfiles = set()
1501 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1501 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1502
1502
1503 # Create a changenode group generator that will call our functions
1503 # Create a changenode group generator that will call our functions
1504 # back to lookup the owning changenode and collect information.
1504 # back to lookup the owning changenode and collect information.
1505 group = cl.group(msng_cl_lst, identity, collect)
1505 group = cl.group(msng_cl_lst, identity, collect)
1506 for cnt, chnk in enumerate(group):
1506 for cnt, chnk in enumerate(group):
1507 yield chnk
1507 yield chnk
1508 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1508 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1509 self.ui.progress(_('bundling changes'), None)
1509 self.ui.progress(_('bundling changes'), None)
1510
1510
1511 prune(mnfst, msng_mnfst_set)
1511 prune(mnfst, msng_mnfst_set)
1512 add_extra_nodes(1, msng_mnfst_set)
1512 add_extra_nodes(1, msng_mnfst_set)
1513 msng_mnfst_lst = msng_mnfst_set.keys()
1513 msng_mnfst_lst = msng_mnfst_set.keys()
1514 # Sort the manifestnodes by revision number.
1514 # Sort the manifestnodes by revision number.
1515 msng_mnfst_lst.sort(key=mnfst.rev)
1515 msng_mnfst_lst.sort(key=mnfst.rev)
1516 # Create a generator for the manifestnodes that calls our lookup
1516 # Create a generator for the manifestnodes that calls our lookup
1517 # and data collection functions back.
1517 # and data collection functions back.
1518 group = mnfst.group(msng_mnfst_lst,
1518 group = mnfst.group(msng_mnfst_lst,
1519 lambda mnode: msng_mnfst_set[mnode],
1519 lambda mnode: msng_mnfst_set[mnode],
1520 filenode_collector(changedfiles))
1520 filenode_collector(changedfiles))
1521 for cnt, chnk in enumerate(group):
1521 for cnt, chnk in enumerate(group):
1522 yield chnk
1522 yield chnk
1523 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1523 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1524 self.ui.progress(_('bundling manifests'), None)
1524 self.ui.progress(_('bundling manifests'), None)
1525
1525
1526 # These are no longer needed, dereference and toss the memory for
1526 # These are no longer needed, dereference and toss the memory for
1527 # them.
1527 # them.
1528 msng_mnfst_lst = None
1528 msng_mnfst_lst = None
1529 msng_mnfst_set.clear()
1529 msng_mnfst_set.clear()
1530
1530
1531 if extranodes:
1531 if extranodes:
1532 for fname in extranodes:
1532 for fname in extranodes:
1533 if isinstance(fname, int):
1533 if isinstance(fname, int):
1534 continue
1534 continue
1535 msng_filenode_set.setdefault(fname, {})
1535 msng_filenode_set.setdefault(fname, {})
1536 changedfiles.add(fname)
1536 changedfiles.add(fname)
1537 # Go through all our files in order sorted by name.
1537 # Go through all our files in order sorted by name.
1538 cnt = 0
1538 cnt = 0
1539 for fname in sorted(changedfiles):
1539 for fname in sorted(changedfiles):
1540 filerevlog = self.file(fname)
1540 filerevlog = self.file(fname)
1541 if not len(filerevlog):
1541 if not len(filerevlog):
1542 raise util.Abort(_("empty or missing revlog for %s") % fname)
1542 raise util.Abort(_("empty or missing revlog for %s") % fname)
1543 # Toss out the filenodes that the recipient isn't really
1543 # Toss out the filenodes that the recipient isn't really
1544 # missing.
1544 # missing.
1545 missingfnodes = msng_filenode_set.pop(fname, {})
1545 missingfnodes = msng_filenode_set.pop(fname, {})
1546 prune(filerevlog, missingfnodes)
1546 prune(filerevlog, missingfnodes)
1547 add_extra_nodes(fname, missingfnodes)
1547 add_extra_nodes(fname, missingfnodes)
1548 # If any filenodes are left, generate the group for them,
1548 # If any filenodes are left, generate the group for them,
1549 # otherwise don't bother.
1549 # otherwise don't bother.
1550 if missingfnodes:
1550 if missingfnodes:
1551 yield changegroup.chunkheader(len(fname))
1551 yield changegroup.chunkheader(len(fname))
1552 yield fname
1552 yield fname
1553 # Sort the filenodes by their revision # (topological order)
1553 # Sort the filenodes by their revision # (topological order)
1554 nodeiter = list(missingfnodes)
1554 nodeiter = list(missingfnodes)
1555 nodeiter.sort(key=filerevlog.rev)
1555 nodeiter.sort(key=filerevlog.rev)
1556 # Create a group generator and only pass in a changenode
1556 # Create a group generator and only pass in a changenode
1557 # lookup function as we need to collect no information
1557 # lookup function as we need to collect no information
1558 # from filenodes.
1558 # from filenodes.
1559 group = filerevlog.group(nodeiter,
1559 group = filerevlog.group(nodeiter,
1560 lambda fnode: missingfnodes[fnode])
1560 lambda fnode: missingfnodes[fnode])
1561 for chnk in group:
1561 for chnk in group:
1562 self.ui.progress(
1562 self.ui.progress(
1563 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1563 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1564 cnt += 1
1564 cnt += 1
1565 yield chnk
1565 yield chnk
1566 # Signal that no more groups are left.
1566 # Signal that no more groups are left.
1567 yield changegroup.closechunk()
1567 yield changegroup.closechunk()
1568 self.ui.progress(_('bundling files'), None)
1568 self.ui.progress(_('bundling files'), None)
1569
1569
1570 if msng_cl_lst:
1570 if msng_cl_lst:
1571 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1571 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1572
1572
1573 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1573 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1574
1574
1575 def changegroup(self, basenodes, source):
1575 def changegroup(self, basenodes, source):
1576 # to avoid a race we use changegroupsubset() (issue1320)
1576 # to avoid a race we use changegroupsubset() (issue1320)
1577 return self.changegroupsubset(basenodes, self.heads(), source)
1577 return self.changegroupsubset(basenodes, self.heads(), source)
1578
1578
1579 def _changegroup(self, nodes, source):
1579 def _changegroup(self, nodes, source):
1580 """Compute the changegroup of all nodes that we have that a recipient
1580 """Compute the changegroup of all nodes that we have that a recipient
1581 doesn't. Return a chunkbuffer object whose read() method will return
1581 doesn't. Return a chunkbuffer object whose read() method will return
1582 successive changegroup chunks.
1582 successive changegroup chunks.
1583
1583
1584 This is much easier than the previous function as we can assume that
1584 This is much easier than the previous function as we can assume that
1585 the recipient has any changenode we aren't sending them.
1585 the recipient has any changenode we aren't sending them.
1586
1586
1587 nodes is the set of nodes to send"""
1587 nodes is the set of nodes to send"""
1588
1588
1589 self.hook('preoutgoing', throw=True, source=source)
1589 self.hook('preoutgoing', throw=True, source=source)
1590
1590
1591 cl = self.changelog
1591 cl = self.changelog
1592 revset = set([cl.rev(n) for n in nodes])
1592 revset = set([cl.rev(n) for n in nodes])
1593 self.changegroupinfo(nodes, source)
1593 self.changegroupinfo(nodes, source)
1594
1594
1595 def identity(x):
1595 def identity(x):
1596 return x
1596 return x
1597
1597
1598 def gennodelst(log):
1598 def gennodelst(log):
1599 for r in log:
1599 for r in log:
1600 if log.linkrev(r) in revset:
1600 if log.linkrev(r) in revset:
1601 yield log.node(r)
1601 yield log.node(r)
1602
1602
1603 def lookuplinkrev_func(revlog):
1603 def lookuplinkrev_func(revlog):
1604 def lookuplinkrev(n):
1604 def lookuplinkrev(n):
1605 return cl.node(revlog.linkrev(revlog.rev(n)))
1605 return cl.node(revlog.linkrev(revlog.rev(n)))
1606 return lookuplinkrev
1606 return lookuplinkrev
1607
1607
1608 def gengroup():
1608 def gengroup():
1609 '''yield a sequence of changegroup chunks (strings)'''
1609 '''yield a sequence of changegroup chunks (strings)'''
1610 # construct a list of all changed files
1610 # construct a list of all changed files
1611 changedfiles = set()
1611 changedfiles = set()
1612 mmfs = {}
1612 mmfs = {}
1613 collect = changegroup.collector(cl, mmfs, changedfiles)
1613 collect = changegroup.collector(cl, mmfs, changedfiles)
1614
1614
1615 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1615 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1616 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1616 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1617 yield chnk
1617 yield chnk
1618 self.ui.progress(_('bundling changes'), None)
1618 self.ui.progress(_('bundling changes'), None)
1619
1619
1620 mnfst = self.manifest
1620 mnfst = self.manifest
1621 nodeiter = gennodelst(mnfst)
1621 nodeiter = gennodelst(mnfst)
1622 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1622 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1623 lookuplinkrev_func(mnfst))):
1623 lookuplinkrev_func(mnfst))):
1624 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1624 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1625 yield chnk
1625 yield chnk
1626 self.ui.progress(_('bundling manifests'), None)
1626 self.ui.progress(_('bundling manifests'), None)
1627
1627
1628 cnt = 0
1628 cnt = 0
1629 for fname in sorted(changedfiles):
1629 for fname in sorted(changedfiles):
1630 filerevlog = self.file(fname)
1630 filerevlog = self.file(fname)
1631 if not len(filerevlog):
1631 if not len(filerevlog):
1632 raise util.Abort(_("empty or missing revlog for %s") % fname)
1632 raise util.Abort(_("empty or missing revlog for %s") % fname)
1633 nodeiter = gennodelst(filerevlog)
1633 nodeiter = gennodelst(filerevlog)
1634 nodeiter = list(nodeiter)
1634 nodeiter = list(nodeiter)
1635 if nodeiter:
1635 if nodeiter:
1636 yield changegroup.chunkheader(len(fname))
1636 yield changegroup.chunkheader(len(fname))
1637 yield fname
1637 yield fname
1638 lookup = lookuplinkrev_func(filerevlog)
1638 lookup = lookuplinkrev_func(filerevlog)
1639 for chnk in filerevlog.group(nodeiter, lookup):
1639 for chnk in filerevlog.group(nodeiter, lookup):
1640 self.ui.progress(
1640 self.ui.progress(
1641 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1641 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1642 cnt += 1
1642 cnt += 1
1643 yield chnk
1643 yield chnk
1644 self.ui.progress(_('bundling files'), None)
1644 self.ui.progress(_('bundling files'), None)
1645
1645
1646 yield changegroup.closechunk()
1646 yield changegroup.closechunk()
1647
1647
1648 if nodes:
1648 if nodes:
1649 self.hook('outgoing', node=hex(nodes[0]), source=source)
1649 self.hook('outgoing', node=hex(nodes[0]), source=source)
1650
1650
1651 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1651 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1652
1652
1653 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1653 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1654 """Add the changegroup returned by source.read() to this repo.
1654 """Add the changegroup returned by source.read() to this repo.
1655 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1655 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1656 the URL of the repo where this changegroup is coming from.
1656 the URL of the repo where this changegroup is coming from.
1657
1657
1658 Return an integer summarizing the change to this repo:
1658 Return an integer summarizing the change to this repo:
1659 - nothing changed or no source: 0
1659 - nothing changed or no source: 0
1660 - more heads than before: 1+added heads (2..n)
1660 - more heads than before: 1+added heads (2..n)
1661 - fewer heads than before: -1-removed heads (-2..-n)
1661 - fewer heads than before: -1-removed heads (-2..-n)
1662 - number of heads stays the same: 1
1662 - number of heads stays the same: 1
1663 """
1663 """
1664 def csmap(x):
1664 def csmap(x):
1665 self.ui.debug("add changeset %s\n" % short(x))
1665 self.ui.debug("add changeset %s\n" % short(x))
1666 return len(cl)
1666 return len(cl)
1667
1667
1668 def revmap(x):
1668 def revmap(x):
1669 return cl.rev(x)
1669 return cl.rev(x)
1670
1670
1671 if not source:
1671 if not source:
1672 return 0
1672 return 0
1673
1673
1674 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1674 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1675
1675
1676 changesets = files = revisions = 0
1676 changesets = files = revisions = 0
1677 efiles = set()
1677 efiles = set()
1678
1678
1679 # write changelog data to temp files so concurrent readers will not see
1679 # write changelog data to temp files so concurrent readers will not see
1680 # inconsistent view
1680 # inconsistent view
1681 cl = self.changelog
1681 cl = self.changelog
1682 cl.delayupdate()
1682 cl.delayupdate()
1683 oldheads = len(cl.heads())
1683 oldheads = len(cl.heads())
1684
1684
1685 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1685 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1686 try:
1686 try:
1687 trp = weakref.proxy(tr)
1687 trp = weakref.proxy(tr)
1688 # pull off the changeset group
1688 # pull off the changeset group
1689 self.ui.status(_("adding changesets\n"))
1689 self.ui.status(_("adding changesets\n"))
1690 clstart = len(cl)
1690 clstart = len(cl)
1691 class prog(object):
1691 class prog(object):
1692 step = _('changesets')
1692 step = _('changesets')
1693 count = 1
1693 count = 1
1694 ui = self.ui
1694 ui = self.ui
1695 total = None
1695 total = None
1696 def __call__(self):
1696 def __call__(self):
1697 self.ui.progress(self.step, self.count, unit=_('chunks'),
1697 self.ui.progress(self.step, self.count, unit=_('chunks'),
1698 total=self.total)
1698 total=self.total)
1699 self.count += 1
1699 self.count += 1
1700 pr = prog()
1700 pr = prog()
1701 source.callback = pr
1701 source.callback = pr
1702
1702
1703 if (cl.addgroup(source, csmap, trp) is None
1703 if (cl.addgroup(source, csmap, trp) is None
1704 and not emptyok):
1704 and not emptyok):
1705 raise util.Abort(_("received changelog group is empty"))
1705 raise util.Abort(_("received changelog group is empty"))
1706 clend = len(cl)
1706 clend = len(cl)
1707 changesets = clend - clstart
1707 changesets = clend - clstart
1708 for c in xrange(clstart, clend):
1708 for c in xrange(clstart, clend):
1709 efiles.update(self[c].files())
1709 efiles.update(self[c].files())
1710 efiles = len(efiles)
1710 efiles = len(efiles)
1711 self.ui.progress(_('changesets'), None)
1711 self.ui.progress(_('changesets'), None)
1712
1712
1713 # pull off the manifest group
1713 # pull off the manifest group
1714 self.ui.status(_("adding manifests\n"))
1714 self.ui.status(_("adding manifests\n"))
1715 pr.step = _('manifests')
1715 pr.step = _('manifests')
1716 pr.count = 1
1716 pr.count = 1
1717 pr.total = changesets # manifests <= changesets
1717 pr.total = changesets # manifests <= changesets
1718 # no need to check for empty manifest group here:
1718 # no need to check for empty manifest group here:
1719 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1719 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1720 # no new manifest will be created and the manifest group will
1720 # no new manifest will be created and the manifest group will
1721 # be empty during the pull
1721 # be empty during the pull
1722 self.manifest.addgroup(source, revmap, trp)
1722 self.manifest.addgroup(source, revmap, trp)
1723 self.ui.progress(_('manifests'), None)
1723 self.ui.progress(_('manifests'), None)
1724
1724
1725 needfiles = {}
1725 needfiles = {}
1726 if self.ui.configbool('server', 'validate', default=False):
1726 if self.ui.configbool('server', 'validate', default=False):
1727 # validate incoming csets have their manifests
1727 # validate incoming csets have their manifests
1728 for cset in xrange(clstart, clend):
1728 for cset in xrange(clstart, clend):
1729 mfest = self.changelog.read(self.changelog.node(cset))[0]
1729 mfest = self.changelog.read(self.changelog.node(cset))[0]
1730 mfest = self.manifest.readdelta(mfest)
1730 mfest = self.manifest.readdelta(mfest)
1731 # store file nodes we must see
1731 # store file nodes we must see
1732 for f, n in mfest.iteritems():
1732 for f, n in mfest.iteritems():
1733 needfiles.setdefault(f, set()).add(n)
1733 needfiles.setdefault(f, set()).add(n)
1734
1734
1735 # process the files
1735 # process the files
1736 self.ui.status(_("adding file changes\n"))
1736 self.ui.status(_("adding file changes\n"))
1737 pr.step = 'files'
1737 pr.step = 'files'
1738 pr.count = 1
1738 pr.count = 1
1739 pr.total = efiles
1739 pr.total = efiles
1740 source.callback = None
1740 source.callback = None
1741
1741
1742 while 1:
1742 while 1:
1743 f = source.chunk()
1743 f = source.chunk()
1744 if not f:
1744 if not f:
1745 break
1745 break
1746 self.ui.debug("adding %s revisions\n" % f)
1746 self.ui.debug("adding %s revisions\n" % f)
1747 pr()
1747 pr()
1748 fl = self.file(f)
1748 fl = self.file(f)
1749 o = len(fl)
1749 o = len(fl)
1750 if fl.addgroup(source, revmap, trp) is None:
1750 if fl.addgroup(source, revmap, trp) is None:
1751 raise util.Abort(_("received file revlog group is empty"))
1751 raise util.Abort(_("received file revlog group is empty"))
1752 revisions += len(fl) - o
1752 revisions += len(fl) - o
1753 files += 1
1753 files += 1
1754 if f in needfiles:
1754 if f in needfiles:
1755 needs = needfiles[f]
1755 needs = needfiles[f]
1756 for new in xrange(o, len(fl)):
1756 for new in xrange(o, len(fl)):
1757 n = fl.node(new)
1757 n = fl.node(new)
1758 if n in needs:
1758 if n in needs:
1759 needs.remove(n)
1759 needs.remove(n)
1760 if not needs:
1760 if not needs:
1761 del needfiles[f]
1761 del needfiles[f]
1762 self.ui.progress(_('files'), None)
1762 self.ui.progress(_('files'), None)
1763
1763
1764 for f, needs in needfiles.iteritems():
1764 for f, needs in needfiles.iteritems():
1765 fl = self.file(f)
1765 fl = self.file(f)
1766 for n in needs:
1766 for n in needs:
1767 try:
1767 try:
1768 fl.rev(n)
1768 fl.rev(n)
1769 except error.LookupError:
1769 except error.LookupError:
1770 raise util.Abort(
1770 raise util.Abort(
1771 _('missing file data for %s:%s - run hg verify') %
1771 _('missing file data for %s:%s - run hg verify') %
1772 (f, hex(n)))
1772 (f, hex(n)))
1773
1773
1774 newheads = len(cl.heads())
1774 newheads = len(cl.heads())
1775 heads = ""
1775 heads = ""
1776 if oldheads and newheads != oldheads:
1776 if oldheads and newheads != oldheads:
1777 heads = _(" (%+d heads)") % (newheads - oldheads)
1777 heads = _(" (%+d heads)") % (newheads - oldheads)
1778
1778
1779 self.ui.status(_("added %d changesets"
1779 self.ui.status(_("added %d changesets"
1780 " with %d changes to %d files%s\n")
1780 " with %d changes to %d files%s\n")
1781 % (changesets, revisions, files, heads))
1781 % (changesets, revisions, files, heads))
1782
1782
1783 if changesets > 0:
1783 if changesets > 0:
1784 p = lambda: cl.writepending() and self.root or ""
1784 p = lambda: cl.writepending() and self.root or ""
1785 self.hook('pretxnchangegroup', throw=True,
1785 self.hook('pretxnchangegroup', throw=True,
1786 node=hex(cl.node(clstart)), source=srctype,
1786 node=hex(cl.node(clstart)), source=srctype,
1787 url=url, pending=p)
1787 url=url, pending=p)
1788
1788
1789 # make changelog see real files again
1789 # make changelog see real files again
1790 cl.finalize(trp)
1790 cl.finalize(trp)
1791
1791
1792 tr.close()
1792 tr.close()
1793 finally:
1793 finally:
1794 tr.release()
1794 tr.release()
1795 if lock:
1795 if lock:
1796 lock.release()
1796 lock.release()
1797
1797
1798 if changesets > 0:
1798 if changesets > 0:
1799 # forcefully update the on-disk branch cache
1799 # forcefully update the on-disk branch cache
1800 self.ui.debug("updating the branch cache\n")
1800 self.ui.debug("updating the branch cache\n")
1801 self.updatebranchcache()
1801 self.updatebranchcache()
1802 self.hook("changegroup", node=hex(cl.node(clstart)),
1802 self.hook("changegroup", node=hex(cl.node(clstart)),
1803 source=srctype, url=url)
1803 source=srctype, url=url)
1804
1804
1805 for i in xrange(clstart, clend):
1805 for i in xrange(clstart, clend):
1806 self.hook("incoming", node=hex(cl.node(i)),
1806 self.hook("incoming", node=hex(cl.node(i)),
1807 source=srctype, url=url)
1807 source=srctype, url=url)
1808
1808
1809 # never return 0 here:
1809 # never return 0 here:
1810 if newheads < oldheads:
1810 if newheads < oldheads:
1811 return newheads - oldheads - 1
1811 return newheads - oldheads - 1
1812 else:
1812 else:
1813 return newheads - oldheads + 1
1813 return newheads - oldheads + 1
1814
1814
1815
1815
1816 def stream_in(self, remote, requirements):
1816 def stream_in(self, remote, requirements):
1817 fp = remote.stream_out()
1817 fp = remote.stream_out()
1818 l = fp.readline()
1818 l = fp.readline()
1819 try:
1819 try:
1820 resp = int(l)
1820 resp = int(l)
1821 except ValueError:
1821 except ValueError:
1822 raise error.ResponseError(
1822 raise error.ResponseError(
1823 _('Unexpected response from remote server:'), l)
1823 _('Unexpected response from remote server:'), l)
1824 if resp == 1:
1824 if resp == 1:
1825 raise util.Abort(_('operation forbidden by server'))
1825 raise util.Abort(_('operation forbidden by server'))
1826 elif resp == 2:
1826 elif resp == 2:
1827 raise util.Abort(_('locking the remote repository failed'))
1827 raise util.Abort(_('locking the remote repository failed'))
1828 elif resp != 0:
1828 elif resp != 0:
1829 raise util.Abort(_('the server sent an unknown error code'))
1829 raise util.Abort(_('the server sent an unknown error code'))
1830 self.ui.status(_('streaming all changes\n'))
1830 self.ui.status(_('streaming all changes\n'))
1831 l = fp.readline()
1831 l = fp.readline()
1832 try:
1832 try:
1833 total_files, total_bytes = map(int, l.split(' ', 1))
1833 total_files, total_bytes = map(int, l.split(' ', 1))
1834 except (ValueError, TypeError):
1834 except (ValueError, TypeError):
1835 raise error.ResponseError(
1835 raise error.ResponseError(
1836 _('Unexpected response from remote server:'), l)
1836 _('Unexpected response from remote server:'), l)
1837 self.ui.status(_('%d files to transfer, %s of data\n') %
1837 self.ui.status(_('%d files to transfer, %s of data\n') %
1838 (total_files, util.bytecount(total_bytes)))
1838 (total_files, util.bytecount(total_bytes)))
1839 start = time.time()
1839 start = time.time()
1840 for i in xrange(total_files):
1840 for i in xrange(total_files):
1841 # XXX doesn't support '\n' or '\r' in filenames
1841 # XXX doesn't support '\n' or '\r' in filenames
1842 l = fp.readline()
1842 l = fp.readline()
1843 try:
1843 try:
1844 name, size = l.split('\0', 1)
1844 name, size = l.split('\0', 1)
1845 size = int(size)
1845 size = int(size)
1846 except (ValueError, TypeError):
1846 except (ValueError, TypeError):
1847 raise error.ResponseError(
1847 raise error.ResponseError(
1848 _('Unexpected response from remote server:'), l)
1848 _('Unexpected response from remote server:'), l)
1849 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1849 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1850 # for backwards compat, name was partially encoded
1850 # for backwards compat, name was partially encoded
1851 ofp = self.sopener(store.decodedir(name), 'w')
1851 ofp = self.sopener(store.decodedir(name), 'w')
1852 for chunk in util.filechunkiter(fp, limit=size):
1852 for chunk in util.filechunkiter(fp, limit=size):
1853 ofp.write(chunk)
1853 ofp.write(chunk)
1854 ofp.close()
1854 ofp.close()
1855 elapsed = time.time() - start
1855 elapsed = time.time() - start
1856 if elapsed <= 0:
1856 if elapsed <= 0:
1857 elapsed = 0.001
1857 elapsed = 0.001
1858 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1858 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1859 (util.bytecount(total_bytes), elapsed,
1859 (util.bytecount(total_bytes), elapsed,
1860 util.bytecount(total_bytes / elapsed)))
1860 util.bytecount(total_bytes / elapsed)))
1861
1861
1862 # new requirements = old non-format requirements + new format-related
1862 # new requirements = old non-format requirements + new format-related
1863 # requirements from the streamed-in repository
1863 # requirements from the streamed-in repository
1864 requirements.update(set(self.requirements) - self.supportedformats)
1864 requirements.update(set(self.requirements) - self.supportedformats)
1865 self._applyrequirements(requirements)
1865 self._applyrequirements(requirements)
1866 self._writerequirements()
1866 self._writerequirements()
1867
1867
1868 self.invalidate()
1868 self.invalidate()
1869 return len(self.heads()) + 1
1869 return len(self.heads()) + 1
1870
1870
1871 def clone(self, remote, heads=[], stream=False):
1871 def clone(self, remote, heads=[], stream=False):
1872 '''clone remote repository.
1872 '''clone remote repository.
1873
1873
1874 keyword arguments:
1874 keyword arguments:
1875 heads: list of revs to clone (forces use of pull)
1875 heads: list of revs to clone (forces use of pull)
1876 stream: use streaming clone if possible'''
1876 stream: use streaming clone if possible'''
1877
1877
1878 # now, all clients that can request uncompressed clones can
1878 # now, all clients that can request uncompressed clones can
1879 # read repo formats supported by all servers that can serve
1879 # read repo formats supported by all servers that can serve
1880 # them.
1880 # them.
1881
1881
1882 # if revlog format changes, client will have to check version
1882 # if revlog format changes, client will have to check version
1883 # and format flags on "stream" capability, and use
1883 # and format flags on "stream" capability, and use
1884 # uncompressed only if compatible.
1884 # uncompressed only if compatible.
1885
1885
1886 if stream and not heads:
1886 if stream and not heads:
1887 # 'stream' means remote revlog format is revlogv1 only
1887 # 'stream' means remote revlog format is revlogv1 only
1888 if remote.capable('stream'):
1888 if remote.capable('stream'):
1889 return self.stream_in(remote, set(('revlogv1',)))
1889 return self.stream_in(remote, set(('revlogv1',)))
1890 # otherwise, 'streamreqs' contains the remote revlog format
1890 # otherwise, 'streamreqs' contains the remote revlog format
1891 streamreqs = remote.capable('streamreqs')
1891 streamreqs = remote.capable('streamreqs')
1892 if streamreqs:
1892 if streamreqs:
1893 streamreqs = set(streamreqs.split(','))
1893 streamreqs = set(streamreqs.split(','))
1894 # if we support it, stream in and adjust our requirements
1894 # if we support it, stream in and adjust our requirements
1895 if not streamreqs - self.supportedformats:
1895 if not streamreqs - self.supportedformats:
1896 return self.stream_in(remote, streamreqs)
1896 return self.stream_in(remote, streamreqs)
1897 return self.pull(remote, heads)
1897 return self.pull(remote, heads)
1898
1898
1899 def pushkey(self, namespace, key, old, new):
1899 def pushkey(self, namespace, key, old, new):
1900 return pushkey.push(self, namespace, key, old, new)
1900 return pushkey.push(self, namespace, key, old, new)
1901
1901
1902 def listkeys(self, namespace):
1902 def listkeys(self, namespace):
1903 return pushkey.list(self, namespace)
1903 return pushkey.list(self, namespace)
1904
1904
1905 # used to avoid circular references so destructors work
1905 # used to avoid circular references so destructors work
1906 def aftertrans(files):
1906 def aftertrans(files):
1907 renamefiles = [tuple(t) for t in files]
1907 renamefiles = [tuple(t) for t in files]
1908 def a():
1908 def a():
1909 for src, dest in renamefiles:
1909 for src, dest in renamefiles:
1910 util.rename(src, dest)
1910 util.rename(src, dest)
1911 return a
1911 return a
1912
1912
1913 def instance(ui, path, create):
1913 def instance(ui, path, create):
1914 return localrepository(ui, util.drop_scheme('file', path), create)
1914 return localrepository(ui, util.drop_scheme('file', path), create)
1915
1915
1916 def islocal(path):
1916 def islocal(path):
1917 return True
1917 return True
General Comments 0
You need to be logged in to leave comments. Login now