##// END OF EJS Templates
mq: factor out push conditions checks...
Patrick Mezard -
r13327:dc11e30b default
parent child Browse files
Show More
@@ -1,3253 +1,3253 b''
1 # mq.py - patch queues for mercurial
1 # mq.py - patch queues for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''manage a stack of patches
8 '''manage a stack of patches
9
9
10 This extension lets you work with a stack of patches in a Mercurial
10 This extension lets you work with a stack of patches in a Mercurial
11 repository. It manages two stacks of patches - all known patches, and
11 repository. It manages two stacks of patches - all known patches, and
12 applied patches (subset of known patches).
12 applied patches (subset of known patches).
13
13
14 Known patches are represented as patch files in the .hg/patches
14 Known patches are represented as patch files in the .hg/patches
15 directory. Applied patches are both patch files and changesets.
15 directory. Applied patches are both patch files and changesets.
16
16
17 Common tasks (use :hg:`help command` for more details)::
17 Common tasks (use :hg:`help command` for more details)::
18
18
19 create new patch qnew
19 create new patch qnew
20 import existing patch qimport
20 import existing patch qimport
21
21
22 print patch series qseries
22 print patch series qseries
23 print applied patches qapplied
23 print applied patches qapplied
24
24
25 add known patch to applied stack qpush
25 add known patch to applied stack qpush
26 remove patch from applied stack qpop
26 remove patch from applied stack qpop
27 refresh contents of top applied patch qrefresh
27 refresh contents of top applied patch qrefresh
28
28
29 By default, mq will automatically use git patches when required to
29 By default, mq will automatically use git patches when required to
30 avoid losing file mode changes, copy records, binary files or empty
30 avoid losing file mode changes, copy records, binary files or empty
31 files creations or deletions. This behaviour can be configured with::
31 files creations or deletions. This behaviour can be configured with::
32
32
33 [mq]
33 [mq]
34 git = auto/keep/yes/no
34 git = auto/keep/yes/no
35
35
36 If set to 'keep', mq will obey the [diff] section configuration while
36 If set to 'keep', mq will obey the [diff] section configuration while
37 preserving existing git patches upon qrefresh. If set to 'yes' or
37 preserving existing git patches upon qrefresh. If set to 'yes' or
38 'no', mq will override the [diff] section and always generate git or
38 'no', mq will override the [diff] section and always generate git or
39 regular patches, possibly losing data in the second case.
39 regular patches, possibly losing data in the second case.
40
40
41 You will by default be managing a patch queue named "patches". You can
41 You will by default be managing a patch queue named "patches". You can
42 create other, independent patch queues with the :hg:`qqueue` command.
42 create other, independent patch queues with the :hg:`qqueue` command.
43 '''
43 '''
44
44
45 from mercurial.i18n import _
45 from mercurial.i18n import _
46 from mercurial.node import bin, hex, short, nullid, nullrev
46 from mercurial.node import bin, hex, short, nullid, nullrev
47 from mercurial.lock import release
47 from mercurial.lock import release
48 from mercurial import commands, cmdutil, hg, patch, util
48 from mercurial import commands, cmdutil, hg, patch, util
49 from mercurial import repair, extensions, url, error
49 from mercurial import repair, extensions, url, error
50 import os, sys, re, errno, shutil
50 import os, sys, re, errno, shutil
51
51
52 commands.norepo += " qclone"
52 commands.norepo += " qclone"
53
53
54 # Patch names looks like unix-file names.
54 # Patch names looks like unix-file names.
55 # They must be joinable with queue directory and result in the patch path.
55 # They must be joinable with queue directory and result in the patch path.
56 normname = util.normpath
56 normname = util.normpath
57
57
58 class statusentry(object):
58 class statusentry(object):
59 def __init__(self, node, name):
59 def __init__(self, node, name):
60 self.node, self.name = node, name
60 self.node, self.name = node, name
61 def __repr__(self):
61 def __repr__(self):
62 return hex(self.node) + ':' + self.name
62 return hex(self.node) + ':' + self.name
63
63
64 class patchheader(object):
64 class patchheader(object):
65 def __init__(self, pf, plainmode=False):
65 def __init__(self, pf, plainmode=False):
66 def eatdiff(lines):
66 def eatdiff(lines):
67 while lines:
67 while lines:
68 l = lines[-1]
68 l = lines[-1]
69 if (l.startswith("diff -") or
69 if (l.startswith("diff -") or
70 l.startswith("Index:") or
70 l.startswith("Index:") or
71 l.startswith("===========")):
71 l.startswith("===========")):
72 del lines[-1]
72 del lines[-1]
73 else:
73 else:
74 break
74 break
75 def eatempty(lines):
75 def eatempty(lines):
76 while lines:
76 while lines:
77 if not lines[-1].strip():
77 if not lines[-1].strip():
78 del lines[-1]
78 del lines[-1]
79 else:
79 else:
80 break
80 break
81
81
82 message = []
82 message = []
83 comments = []
83 comments = []
84 user = None
84 user = None
85 date = None
85 date = None
86 parent = None
86 parent = None
87 format = None
87 format = None
88 subject = None
88 subject = None
89 branch = None
89 branch = None
90 nodeid = None
90 nodeid = None
91 diffstart = 0
91 diffstart = 0
92
92
93 for line in file(pf):
93 for line in file(pf):
94 line = line.rstrip()
94 line = line.rstrip()
95 if (line.startswith('diff --git')
95 if (line.startswith('diff --git')
96 or (diffstart and line.startswith('+++ '))):
96 or (diffstart and line.startswith('+++ '))):
97 diffstart = 2
97 diffstart = 2
98 break
98 break
99 diffstart = 0 # reset
99 diffstart = 0 # reset
100 if line.startswith("--- "):
100 if line.startswith("--- "):
101 diffstart = 1
101 diffstart = 1
102 continue
102 continue
103 elif format == "hgpatch":
103 elif format == "hgpatch":
104 # parse values when importing the result of an hg export
104 # parse values when importing the result of an hg export
105 if line.startswith("# User "):
105 if line.startswith("# User "):
106 user = line[7:]
106 user = line[7:]
107 elif line.startswith("# Date "):
107 elif line.startswith("# Date "):
108 date = line[7:]
108 date = line[7:]
109 elif line.startswith("# Parent "):
109 elif line.startswith("# Parent "):
110 parent = line[9:]
110 parent = line[9:]
111 elif line.startswith("# Branch "):
111 elif line.startswith("# Branch "):
112 branch = line[9:]
112 branch = line[9:]
113 elif line.startswith("# Node ID "):
113 elif line.startswith("# Node ID "):
114 nodeid = line[10:]
114 nodeid = line[10:]
115 elif not line.startswith("# ") and line:
115 elif not line.startswith("# ") and line:
116 message.append(line)
116 message.append(line)
117 format = None
117 format = None
118 elif line == '# HG changeset patch':
118 elif line == '# HG changeset patch':
119 message = []
119 message = []
120 format = "hgpatch"
120 format = "hgpatch"
121 elif (format != "tagdone" and (line.startswith("Subject: ") or
121 elif (format != "tagdone" and (line.startswith("Subject: ") or
122 line.startswith("subject: "))):
122 line.startswith("subject: "))):
123 subject = line[9:]
123 subject = line[9:]
124 format = "tag"
124 format = "tag"
125 elif (format != "tagdone" and (line.startswith("From: ") or
125 elif (format != "tagdone" and (line.startswith("From: ") or
126 line.startswith("from: "))):
126 line.startswith("from: "))):
127 user = line[6:]
127 user = line[6:]
128 format = "tag"
128 format = "tag"
129 elif (format != "tagdone" and (line.startswith("Date: ") or
129 elif (format != "tagdone" and (line.startswith("Date: ") or
130 line.startswith("date: "))):
130 line.startswith("date: "))):
131 date = line[6:]
131 date = line[6:]
132 format = "tag"
132 format = "tag"
133 elif format == "tag" and line == "":
133 elif format == "tag" and line == "":
134 # when looking for tags (subject: from: etc) they
134 # when looking for tags (subject: from: etc) they
135 # end once you find a blank line in the source
135 # end once you find a blank line in the source
136 format = "tagdone"
136 format = "tagdone"
137 elif message or line:
137 elif message or line:
138 message.append(line)
138 message.append(line)
139 comments.append(line)
139 comments.append(line)
140
140
141 eatdiff(message)
141 eatdiff(message)
142 eatdiff(comments)
142 eatdiff(comments)
143 # Remember the exact starting line of the patch diffs before consuming
143 # Remember the exact starting line of the patch diffs before consuming
144 # empty lines, for external use by TortoiseHg and others
144 # empty lines, for external use by TortoiseHg and others
145 self.diffstartline = len(comments)
145 self.diffstartline = len(comments)
146 eatempty(message)
146 eatempty(message)
147 eatempty(comments)
147 eatempty(comments)
148
148
149 # make sure message isn't empty
149 # make sure message isn't empty
150 if format and format.startswith("tag") and subject:
150 if format and format.startswith("tag") and subject:
151 message.insert(0, "")
151 message.insert(0, "")
152 message.insert(0, subject)
152 message.insert(0, subject)
153
153
154 self.message = message
154 self.message = message
155 self.comments = comments
155 self.comments = comments
156 self.user = user
156 self.user = user
157 self.date = date
157 self.date = date
158 self.parent = parent
158 self.parent = parent
159 # nodeid and branch are for external use by TortoiseHg and others
159 # nodeid and branch are for external use by TortoiseHg and others
160 self.nodeid = nodeid
160 self.nodeid = nodeid
161 self.branch = branch
161 self.branch = branch
162 self.haspatch = diffstart > 1
162 self.haspatch = diffstart > 1
163 self.plainmode = plainmode
163 self.plainmode = plainmode
164
164
165 def setuser(self, user):
165 def setuser(self, user):
166 if not self.updateheader(['From: ', '# User '], user):
166 if not self.updateheader(['From: ', '# User '], user):
167 try:
167 try:
168 patchheaderat = self.comments.index('# HG changeset patch')
168 patchheaderat = self.comments.index('# HG changeset patch')
169 self.comments.insert(patchheaderat + 1, '# User ' + user)
169 self.comments.insert(patchheaderat + 1, '# User ' + user)
170 except ValueError:
170 except ValueError:
171 if self.plainmode or self._hasheader(['Date: ']):
171 if self.plainmode or self._hasheader(['Date: ']):
172 self.comments = ['From: ' + user] + self.comments
172 self.comments = ['From: ' + user] + self.comments
173 else:
173 else:
174 tmp = ['# HG changeset patch', '# User ' + user, '']
174 tmp = ['# HG changeset patch', '# User ' + user, '']
175 self.comments = tmp + self.comments
175 self.comments = tmp + self.comments
176 self.user = user
176 self.user = user
177
177
178 def setdate(self, date):
178 def setdate(self, date):
179 if not self.updateheader(['Date: ', '# Date '], date):
179 if not self.updateheader(['Date: ', '# Date '], date):
180 try:
180 try:
181 patchheaderat = self.comments.index('# HG changeset patch')
181 patchheaderat = self.comments.index('# HG changeset patch')
182 self.comments.insert(patchheaderat + 1, '# Date ' + date)
182 self.comments.insert(patchheaderat + 1, '# Date ' + date)
183 except ValueError:
183 except ValueError:
184 if self.plainmode or self._hasheader(['From: ']):
184 if self.plainmode or self._hasheader(['From: ']):
185 self.comments = ['Date: ' + date] + self.comments
185 self.comments = ['Date: ' + date] + self.comments
186 else:
186 else:
187 tmp = ['# HG changeset patch', '# Date ' + date, '']
187 tmp = ['# HG changeset patch', '# Date ' + date, '']
188 self.comments = tmp + self.comments
188 self.comments = tmp + self.comments
189 self.date = date
189 self.date = date
190
190
191 def setparent(self, parent):
191 def setparent(self, parent):
192 if not self.updateheader(['# Parent '], parent):
192 if not self.updateheader(['# Parent '], parent):
193 try:
193 try:
194 patchheaderat = self.comments.index('# HG changeset patch')
194 patchheaderat = self.comments.index('# HG changeset patch')
195 self.comments.insert(patchheaderat + 1, '# Parent ' + parent)
195 self.comments.insert(patchheaderat + 1, '# Parent ' + parent)
196 except ValueError:
196 except ValueError:
197 pass
197 pass
198 self.parent = parent
198 self.parent = parent
199
199
200 def setmessage(self, message):
200 def setmessage(self, message):
201 if self.comments:
201 if self.comments:
202 self._delmsg()
202 self._delmsg()
203 self.message = [message]
203 self.message = [message]
204 self.comments += self.message
204 self.comments += self.message
205
205
206 def updateheader(self, prefixes, new):
206 def updateheader(self, prefixes, new):
207 '''Update all references to a field in the patch header.
207 '''Update all references to a field in the patch header.
208 Return whether the field is present.'''
208 Return whether the field is present.'''
209 res = False
209 res = False
210 for prefix in prefixes:
210 for prefix in prefixes:
211 for i in xrange(len(self.comments)):
211 for i in xrange(len(self.comments)):
212 if self.comments[i].startswith(prefix):
212 if self.comments[i].startswith(prefix):
213 self.comments[i] = prefix + new
213 self.comments[i] = prefix + new
214 res = True
214 res = True
215 break
215 break
216 return res
216 return res
217
217
218 def _hasheader(self, prefixes):
218 def _hasheader(self, prefixes):
219 '''Check if a header starts with any of the given prefixes.'''
219 '''Check if a header starts with any of the given prefixes.'''
220 for prefix in prefixes:
220 for prefix in prefixes:
221 for comment in self.comments:
221 for comment in self.comments:
222 if comment.startswith(prefix):
222 if comment.startswith(prefix):
223 return True
223 return True
224 return False
224 return False
225
225
226 def __str__(self):
226 def __str__(self):
227 if not self.comments:
227 if not self.comments:
228 return ''
228 return ''
229 return '\n'.join(self.comments) + '\n\n'
229 return '\n'.join(self.comments) + '\n\n'
230
230
231 def _delmsg(self):
231 def _delmsg(self):
232 '''Remove existing message, keeping the rest of the comments fields.
232 '''Remove existing message, keeping the rest of the comments fields.
233 If comments contains 'subject: ', message will prepend
233 If comments contains 'subject: ', message will prepend
234 the field and a blank line.'''
234 the field and a blank line.'''
235 if self.message:
235 if self.message:
236 subj = 'subject: ' + self.message[0].lower()
236 subj = 'subject: ' + self.message[0].lower()
237 for i in xrange(len(self.comments)):
237 for i in xrange(len(self.comments)):
238 if subj == self.comments[i].lower():
238 if subj == self.comments[i].lower():
239 del self.comments[i]
239 del self.comments[i]
240 self.message = self.message[2:]
240 self.message = self.message[2:]
241 break
241 break
242 ci = 0
242 ci = 0
243 for mi in self.message:
243 for mi in self.message:
244 while mi != self.comments[ci]:
244 while mi != self.comments[ci]:
245 ci += 1
245 ci += 1
246 del self.comments[ci]
246 del self.comments[ci]
247
247
248 class queue(object):
248 class queue(object):
249 def __init__(self, ui, path, patchdir=None):
249 def __init__(self, ui, path, patchdir=None):
250 self.basepath = path
250 self.basepath = path
251 try:
251 try:
252 fh = open(os.path.join(path, 'patches.queue'))
252 fh = open(os.path.join(path, 'patches.queue'))
253 cur = fh.read().rstrip()
253 cur = fh.read().rstrip()
254 if not cur:
254 if not cur:
255 curpath = os.path.join(path, 'patches')
255 curpath = os.path.join(path, 'patches')
256 else:
256 else:
257 curpath = os.path.join(path, 'patches-' + cur)
257 curpath = os.path.join(path, 'patches-' + cur)
258 except IOError:
258 except IOError:
259 curpath = os.path.join(path, 'patches')
259 curpath = os.path.join(path, 'patches')
260 self.path = patchdir or curpath
260 self.path = patchdir or curpath
261 self.opener = util.opener(self.path)
261 self.opener = util.opener(self.path)
262 self.ui = ui
262 self.ui = ui
263 self.applied_dirty = 0
263 self.applied_dirty = 0
264 self.series_dirty = 0
264 self.series_dirty = 0
265 self.added = []
265 self.added = []
266 self.series_path = "series"
266 self.series_path = "series"
267 self.status_path = "status"
267 self.status_path = "status"
268 self.guards_path = "guards"
268 self.guards_path = "guards"
269 self.active_guards = None
269 self.active_guards = None
270 self.guards_dirty = False
270 self.guards_dirty = False
271 # Handle mq.git as a bool with extended values
271 # Handle mq.git as a bool with extended values
272 try:
272 try:
273 gitmode = ui.configbool('mq', 'git', None)
273 gitmode = ui.configbool('mq', 'git', None)
274 if gitmode is None:
274 if gitmode is None:
275 raise error.ConfigError()
275 raise error.ConfigError()
276 self.gitmode = gitmode and 'yes' or 'no'
276 self.gitmode = gitmode and 'yes' or 'no'
277 except error.ConfigError:
277 except error.ConfigError:
278 self.gitmode = ui.config('mq', 'git', 'auto').lower()
278 self.gitmode = ui.config('mq', 'git', 'auto').lower()
279 self.plainmode = ui.configbool('mq', 'plain', False)
279 self.plainmode = ui.configbool('mq', 'plain', False)
280
280
281 @util.propertycache
281 @util.propertycache
282 def applied(self):
282 def applied(self):
283 if os.path.exists(self.join(self.status_path)):
283 if os.path.exists(self.join(self.status_path)):
284 def parse(l):
284 def parse(l):
285 n, name = l.split(':', 1)
285 n, name = l.split(':', 1)
286 return statusentry(bin(n), name)
286 return statusentry(bin(n), name)
287 lines = self.opener(self.status_path).read().splitlines()
287 lines = self.opener(self.status_path).read().splitlines()
288 return [parse(l) for l in lines]
288 return [parse(l) for l in lines]
289 return []
289 return []
290
290
291 @util.propertycache
291 @util.propertycache
292 def full_series(self):
292 def full_series(self):
293 if os.path.exists(self.join(self.series_path)):
293 if os.path.exists(self.join(self.series_path)):
294 return self.opener(self.series_path).read().splitlines()
294 return self.opener(self.series_path).read().splitlines()
295 return []
295 return []
296
296
297 @util.propertycache
297 @util.propertycache
298 def series(self):
298 def series(self):
299 self.parse_series()
299 self.parse_series()
300 return self.series
300 return self.series
301
301
302 @util.propertycache
302 @util.propertycache
303 def series_guards(self):
303 def series_guards(self):
304 self.parse_series()
304 self.parse_series()
305 return self.series_guards
305 return self.series_guards
306
306
307 def invalidate(self):
307 def invalidate(self):
308 for a in 'applied full_series series series_guards'.split():
308 for a in 'applied full_series series series_guards'.split():
309 if a in self.__dict__:
309 if a in self.__dict__:
310 delattr(self, a)
310 delattr(self, a)
311 self.applied_dirty = 0
311 self.applied_dirty = 0
312 self.series_dirty = 0
312 self.series_dirty = 0
313 self.guards_dirty = False
313 self.guards_dirty = False
314 self.active_guards = None
314 self.active_guards = None
315
315
316 def diffopts(self, opts={}, patchfn=None):
316 def diffopts(self, opts={}, patchfn=None):
317 diffopts = patch.diffopts(self.ui, opts)
317 diffopts = patch.diffopts(self.ui, opts)
318 if self.gitmode == 'auto':
318 if self.gitmode == 'auto':
319 diffopts.upgrade = True
319 diffopts.upgrade = True
320 elif self.gitmode == 'keep':
320 elif self.gitmode == 'keep':
321 pass
321 pass
322 elif self.gitmode in ('yes', 'no'):
322 elif self.gitmode in ('yes', 'no'):
323 diffopts.git = self.gitmode == 'yes'
323 diffopts.git = self.gitmode == 'yes'
324 else:
324 else:
325 raise util.Abort(_('mq.git option can be auto/keep/yes/no'
325 raise util.Abort(_('mq.git option can be auto/keep/yes/no'
326 ' got %s') % self.gitmode)
326 ' got %s') % self.gitmode)
327 if patchfn:
327 if patchfn:
328 diffopts = self.patchopts(diffopts, patchfn)
328 diffopts = self.patchopts(diffopts, patchfn)
329 return diffopts
329 return diffopts
330
330
331 def patchopts(self, diffopts, *patches):
331 def patchopts(self, diffopts, *patches):
332 """Return a copy of input diff options with git set to true if
332 """Return a copy of input diff options with git set to true if
333 referenced patch is a git patch and should be preserved as such.
333 referenced patch is a git patch and should be preserved as such.
334 """
334 """
335 diffopts = diffopts.copy()
335 diffopts = diffopts.copy()
336 if not diffopts.git and self.gitmode == 'keep':
336 if not diffopts.git and self.gitmode == 'keep':
337 for patchfn in patches:
337 for patchfn in patches:
338 patchf = self.opener(patchfn, 'r')
338 patchf = self.opener(patchfn, 'r')
339 # if the patch was a git patch, refresh it as a git patch
339 # if the patch was a git patch, refresh it as a git patch
340 for line in patchf:
340 for line in patchf:
341 if line.startswith('diff --git'):
341 if line.startswith('diff --git'):
342 diffopts.git = True
342 diffopts.git = True
343 break
343 break
344 patchf.close()
344 patchf.close()
345 return diffopts
345 return diffopts
346
346
347 def join(self, *p):
347 def join(self, *p):
348 return os.path.join(self.path, *p)
348 return os.path.join(self.path, *p)
349
349
350 def find_series(self, patch):
350 def find_series(self, patch):
351 def matchpatch(l):
351 def matchpatch(l):
352 l = l.split('#', 1)[0]
352 l = l.split('#', 1)[0]
353 return l.strip() == patch
353 return l.strip() == patch
354 for index, l in enumerate(self.full_series):
354 for index, l in enumerate(self.full_series):
355 if matchpatch(l):
355 if matchpatch(l):
356 return index
356 return index
357 return None
357 return None
358
358
359 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
359 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
360
360
361 def parse_series(self):
361 def parse_series(self):
362 self.series = []
362 self.series = []
363 self.series_guards = []
363 self.series_guards = []
364 for l in self.full_series:
364 for l in self.full_series:
365 h = l.find('#')
365 h = l.find('#')
366 if h == -1:
366 if h == -1:
367 patch = l
367 patch = l
368 comment = ''
368 comment = ''
369 elif h == 0:
369 elif h == 0:
370 continue
370 continue
371 else:
371 else:
372 patch = l[:h]
372 patch = l[:h]
373 comment = l[h:]
373 comment = l[h:]
374 patch = patch.strip()
374 patch = patch.strip()
375 if patch:
375 if patch:
376 if patch in self.series:
376 if patch in self.series:
377 raise util.Abort(_('%s appears more than once in %s') %
377 raise util.Abort(_('%s appears more than once in %s') %
378 (patch, self.join(self.series_path)))
378 (patch, self.join(self.series_path)))
379 self.series.append(patch)
379 self.series.append(patch)
380 self.series_guards.append(self.guard_re.findall(comment))
380 self.series_guards.append(self.guard_re.findall(comment))
381
381
382 def check_guard(self, guard):
382 def check_guard(self, guard):
383 if not guard:
383 if not guard:
384 return _('guard cannot be an empty string')
384 return _('guard cannot be an empty string')
385 bad_chars = '# \t\r\n\f'
385 bad_chars = '# \t\r\n\f'
386 first = guard[0]
386 first = guard[0]
387 if first in '-+':
387 if first in '-+':
388 return (_('guard %r starts with invalid character: %r') %
388 return (_('guard %r starts with invalid character: %r') %
389 (guard, first))
389 (guard, first))
390 for c in bad_chars:
390 for c in bad_chars:
391 if c in guard:
391 if c in guard:
392 return _('invalid character in guard %r: %r') % (guard, c)
392 return _('invalid character in guard %r: %r') % (guard, c)
393
393
394 def set_active(self, guards):
394 def set_active(self, guards):
395 for guard in guards:
395 for guard in guards:
396 bad = self.check_guard(guard)
396 bad = self.check_guard(guard)
397 if bad:
397 if bad:
398 raise util.Abort(bad)
398 raise util.Abort(bad)
399 guards = sorted(set(guards))
399 guards = sorted(set(guards))
400 self.ui.debug('active guards: %s\n' % ' '.join(guards))
400 self.ui.debug('active guards: %s\n' % ' '.join(guards))
401 self.active_guards = guards
401 self.active_guards = guards
402 self.guards_dirty = True
402 self.guards_dirty = True
403
403
404 def active(self):
404 def active(self):
405 if self.active_guards is None:
405 if self.active_guards is None:
406 self.active_guards = []
406 self.active_guards = []
407 try:
407 try:
408 guards = self.opener(self.guards_path).read().split()
408 guards = self.opener(self.guards_path).read().split()
409 except IOError, err:
409 except IOError, err:
410 if err.errno != errno.ENOENT:
410 if err.errno != errno.ENOENT:
411 raise
411 raise
412 guards = []
412 guards = []
413 for i, guard in enumerate(guards):
413 for i, guard in enumerate(guards):
414 bad = self.check_guard(guard)
414 bad = self.check_guard(guard)
415 if bad:
415 if bad:
416 self.ui.warn('%s:%d: %s\n' %
416 self.ui.warn('%s:%d: %s\n' %
417 (self.join(self.guards_path), i + 1, bad))
417 (self.join(self.guards_path), i + 1, bad))
418 else:
418 else:
419 self.active_guards.append(guard)
419 self.active_guards.append(guard)
420 return self.active_guards
420 return self.active_guards
421
421
422 def set_guards(self, idx, guards):
422 def set_guards(self, idx, guards):
423 for g in guards:
423 for g in guards:
424 if len(g) < 2:
424 if len(g) < 2:
425 raise util.Abort(_('guard %r too short') % g)
425 raise util.Abort(_('guard %r too short') % g)
426 if g[0] not in '-+':
426 if g[0] not in '-+':
427 raise util.Abort(_('guard %r starts with invalid char') % g)
427 raise util.Abort(_('guard %r starts with invalid char') % g)
428 bad = self.check_guard(g[1:])
428 bad = self.check_guard(g[1:])
429 if bad:
429 if bad:
430 raise util.Abort(bad)
430 raise util.Abort(bad)
431 drop = self.guard_re.sub('', self.full_series[idx])
431 drop = self.guard_re.sub('', self.full_series[idx])
432 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
432 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
433 self.parse_series()
433 self.parse_series()
434 self.series_dirty = True
434 self.series_dirty = True
435
435
436 def pushable(self, idx):
436 def pushable(self, idx):
437 if isinstance(idx, str):
437 if isinstance(idx, str):
438 idx = self.series.index(idx)
438 idx = self.series.index(idx)
439 patchguards = self.series_guards[idx]
439 patchguards = self.series_guards[idx]
440 if not patchguards:
440 if not patchguards:
441 return True, None
441 return True, None
442 guards = self.active()
442 guards = self.active()
443 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
443 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
444 if exactneg:
444 if exactneg:
445 return False, exactneg[0]
445 return False, exactneg[0]
446 pos = [g for g in patchguards if g[0] == '+']
446 pos = [g for g in patchguards if g[0] == '+']
447 exactpos = [g for g in pos if g[1:] in guards]
447 exactpos = [g for g in pos if g[1:] in guards]
448 if pos:
448 if pos:
449 if exactpos:
449 if exactpos:
450 return True, exactpos[0]
450 return True, exactpos[0]
451 return False, pos
451 return False, pos
452 return True, ''
452 return True, ''
453
453
454 def explain_pushable(self, idx, all_patches=False):
454 def explain_pushable(self, idx, all_patches=False):
455 write = all_patches and self.ui.write or self.ui.warn
455 write = all_patches and self.ui.write or self.ui.warn
456 if all_patches or self.ui.verbose:
456 if all_patches or self.ui.verbose:
457 if isinstance(idx, str):
457 if isinstance(idx, str):
458 idx = self.series.index(idx)
458 idx = self.series.index(idx)
459 pushable, why = self.pushable(idx)
459 pushable, why = self.pushable(idx)
460 if all_patches and pushable:
460 if all_patches and pushable:
461 if why is None:
461 if why is None:
462 write(_('allowing %s - no guards in effect\n') %
462 write(_('allowing %s - no guards in effect\n') %
463 self.series[idx])
463 self.series[idx])
464 else:
464 else:
465 if not why:
465 if not why:
466 write(_('allowing %s - no matching negative guards\n') %
466 write(_('allowing %s - no matching negative guards\n') %
467 self.series[idx])
467 self.series[idx])
468 else:
468 else:
469 write(_('allowing %s - guarded by %r\n') %
469 write(_('allowing %s - guarded by %r\n') %
470 (self.series[idx], why))
470 (self.series[idx], why))
471 if not pushable:
471 if not pushable:
472 if why:
472 if why:
473 write(_('skipping %s - guarded by %r\n') %
473 write(_('skipping %s - guarded by %r\n') %
474 (self.series[idx], why))
474 (self.series[idx], why))
475 else:
475 else:
476 write(_('skipping %s - no matching guards\n') %
476 write(_('skipping %s - no matching guards\n') %
477 self.series[idx])
477 self.series[idx])
478
478
479 def save_dirty(self):
479 def save_dirty(self):
480 def write_list(items, path):
480 def write_list(items, path):
481 fp = self.opener(path, 'w')
481 fp = self.opener(path, 'w')
482 for i in items:
482 for i in items:
483 fp.write("%s\n" % i)
483 fp.write("%s\n" % i)
484 fp.close()
484 fp.close()
485 if self.applied_dirty:
485 if self.applied_dirty:
486 write_list(map(str, self.applied), self.status_path)
486 write_list(map(str, self.applied), self.status_path)
487 if self.series_dirty:
487 if self.series_dirty:
488 write_list(self.full_series, self.series_path)
488 write_list(self.full_series, self.series_path)
489 if self.guards_dirty:
489 if self.guards_dirty:
490 write_list(self.active_guards, self.guards_path)
490 write_list(self.active_guards, self.guards_path)
491 if self.added:
491 if self.added:
492 qrepo = self.qrepo()
492 qrepo = self.qrepo()
493 if qrepo:
493 if qrepo:
494 qrepo[None].add(f for f in self.added if f not in qrepo[None])
494 qrepo[None].add(f for f in self.added if f not in qrepo[None])
495 self.added = []
495 self.added = []
496
496
497 def removeundo(self, repo):
497 def removeundo(self, repo):
498 undo = repo.sjoin('undo')
498 undo = repo.sjoin('undo')
499 if not os.path.exists(undo):
499 if not os.path.exists(undo):
500 return
500 return
501 try:
501 try:
502 os.unlink(undo)
502 os.unlink(undo)
503 except OSError, inst:
503 except OSError, inst:
504 self.ui.warn(_('error removing undo: %s\n') % str(inst))
504 self.ui.warn(_('error removing undo: %s\n') % str(inst))
505
505
506 def printdiff(self, repo, diffopts, node1, node2=None, files=None,
506 def printdiff(self, repo, diffopts, node1, node2=None, files=None,
507 fp=None, changes=None, opts={}):
507 fp=None, changes=None, opts={}):
508 stat = opts.get('stat')
508 stat = opts.get('stat')
509 m = cmdutil.match(repo, files, opts)
509 m = cmdutil.match(repo, files, opts)
510 cmdutil.diffordiffstat(self.ui, repo, diffopts, node1, node2, m,
510 cmdutil.diffordiffstat(self.ui, repo, diffopts, node1, node2, m,
511 changes, stat, fp)
511 changes, stat, fp)
512
512
513 def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
513 def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
514 # first try just applying the patch
514 # first try just applying the patch
515 (err, n) = self.apply(repo, [patch], update_status=False,
515 (err, n) = self.apply(repo, [patch], update_status=False,
516 strict=True, merge=rev)
516 strict=True, merge=rev)
517
517
518 if err == 0:
518 if err == 0:
519 return (err, n)
519 return (err, n)
520
520
521 if n is None:
521 if n is None:
522 raise util.Abort(_("apply failed for patch %s") % patch)
522 raise util.Abort(_("apply failed for patch %s") % patch)
523
523
524 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
524 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
525
525
526 # apply failed, strip away that rev and merge.
526 # apply failed, strip away that rev and merge.
527 hg.clean(repo, head)
527 hg.clean(repo, head)
528 self.strip(repo, [n], update=False, backup='strip')
528 self.strip(repo, [n], update=False, backup='strip')
529
529
530 ctx = repo[rev]
530 ctx = repo[rev]
531 ret = hg.merge(repo, rev)
531 ret = hg.merge(repo, rev)
532 if ret:
532 if ret:
533 raise util.Abort(_("update returned %d") % ret)
533 raise util.Abort(_("update returned %d") % ret)
534 n = repo.commit(ctx.description(), ctx.user(), force=True)
534 n = repo.commit(ctx.description(), ctx.user(), force=True)
535 if n is None:
535 if n is None:
536 raise util.Abort(_("repo commit failed"))
536 raise util.Abort(_("repo commit failed"))
537 try:
537 try:
538 ph = patchheader(mergeq.join(patch), self.plainmode)
538 ph = patchheader(mergeq.join(patch), self.plainmode)
539 except:
539 except:
540 raise util.Abort(_("unable to read %s") % patch)
540 raise util.Abort(_("unable to read %s") % patch)
541
541
542 diffopts = self.patchopts(diffopts, patch)
542 diffopts = self.patchopts(diffopts, patch)
543 patchf = self.opener(patch, "w")
543 patchf = self.opener(patch, "w")
544 comments = str(ph)
544 comments = str(ph)
545 if comments:
545 if comments:
546 patchf.write(comments)
546 patchf.write(comments)
547 self.printdiff(repo, diffopts, head, n, fp=patchf)
547 self.printdiff(repo, diffopts, head, n, fp=patchf)
548 patchf.close()
548 patchf.close()
549 self.removeundo(repo)
549 self.removeundo(repo)
550 return (0, n)
550 return (0, n)
551
551
552 def qparents(self, repo, rev=None):
552 def qparents(self, repo, rev=None):
553 if rev is None:
553 if rev is None:
554 (p1, p2) = repo.dirstate.parents()
554 (p1, p2) = repo.dirstate.parents()
555 if p2 == nullid:
555 if p2 == nullid:
556 return p1
556 return p1
557 if not self.applied:
557 if not self.applied:
558 return None
558 return None
559 return self.applied[-1].node
559 return self.applied[-1].node
560 p1, p2 = repo.changelog.parents(rev)
560 p1, p2 = repo.changelog.parents(rev)
561 if p2 != nullid and p2 in [x.node for x in self.applied]:
561 if p2 != nullid and p2 in [x.node for x in self.applied]:
562 return p2
562 return p2
563 return p1
563 return p1
564
564
565 def mergepatch(self, repo, mergeq, series, diffopts):
565 def mergepatch(self, repo, mergeq, series, diffopts):
566 if not self.applied:
566 if not self.applied:
567 # each of the patches merged in will have two parents. This
567 # each of the patches merged in will have two parents. This
568 # can confuse the qrefresh, qdiff, and strip code because it
568 # can confuse the qrefresh, qdiff, and strip code because it
569 # needs to know which parent is actually in the patch queue.
569 # needs to know which parent is actually in the patch queue.
570 # so, we insert a merge marker with only one parent. This way
570 # so, we insert a merge marker with only one parent. This way
571 # the first patch in the queue is never a merge patch
571 # the first patch in the queue is never a merge patch
572 #
572 #
573 pname = ".hg.patches.merge.marker"
573 pname = ".hg.patches.merge.marker"
574 n = repo.commit('[mq]: merge marker', force=True)
574 n = repo.commit('[mq]: merge marker', force=True)
575 self.removeundo(repo)
575 self.removeundo(repo)
576 self.applied.append(statusentry(n, pname))
576 self.applied.append(statusentry(n, pname))
577 self.applied_dirty = 1
577 self.applied_dirty = 1
578
578
579 head = self.qparents(repo)
579 head = self.qparents(repo)
580
580
581 for patch in series:
581 for patch in series:
582 patch = mergeq.lookup(patch, strict=True)
582 patch = mergeq.lookup(patch, strict=True)
583 if not patch:
583 if not patch:
584 self.ui.warn(_("patch %s does not exist\n") % patch)
584 self.ui.warn(_("patch %s does not exist\n") % patch)
585 return (1, None)
585 return (1, None)
586 pushable, reason = self.pushable(patch)
586 pushable, reason = self.pushable(patch)
587 if not pushable:
587 if not pushable:
588 self.explain_pushable(patch, all_patches=True)
588 self.explain_pushable(patch, all_patches=True)
589 continue
589 continue
590 info = mergeq.isapplied(patch)
590 info = mergeq.isapplied(patch)
591 if not info:
591 if not info:
592 self.ui.warn(_("patch %s is not applied\n") % patch)
592 self.ui.warn(_("patch %s is not applied\n") % patch)
593 return (1, None)
593 return (1, None)
594 rev = info[1]
594 rev = info[1]
595 err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
595 err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
596 if head:
596 if head:
597 self.applied.append(statusentry(head, patch))
597 self.applied.append(statusentry(head, patch))
598 self.applied_dirty = 1
598 self.applied_dirty = 1
599 if err:
599 if err:
600 return (err, head)
600 return (err, head)
601 self.save_dirty()
601 self.save_dirty()
602 return (0, head)
602 return (0, head)
603
603
604 def patch(self, repo, patchfile):
604 def patch(self, repo, patchfile):
605 '''Apply patchfile to the working directory.
605 '''Apply patchfile to the working directory.
606 patchfile: name of patch file'''
606 patchfile: name of patch file'''
607 files = {}
607 files = {}
608 try:
608 try:
609 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
609 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
610 files=files, eolmode=None)
610 files=files, eolmode=None)
611 except Exception, inst:
611 except Exception, inst:
612 self.ui.note(str(inst) + '\n')
612 self.ui.note(str(inst) + '\n')
613 if not self.ui.verbose:
613 if not self.ui.verbose:
614 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
614 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
615 return (False, files, False)
615 return (False, files, False)
616
616
617 return (True, files, fuzz)
617 return (True, files, fuzz)
618
618
619 def apply(self, repo, series, list=False, update_status=True,
619 def apply(self, repo, series, list=False, update_status=True,
620 strict=False, patchdir=None, merge=None, all_files=None):
620 strict=False, patchdir=None, merge=None, all_files=None):
621 wlock = lock = tr = None
621 wlock = lock = tr = None
622 try:
622 try:
623 wlock = repo.wlock()
623 wlock = repo.wlock()
624 lock = repo.lock()
624 lock = repo.lock()
625 tr = repo.transaction("qpush")
625 tr = repo.transaction("qpush")
626 try:
626 try:
627 ret = self._apply(repo, series, list, update_status,
627 ret = self._apply(repo, series, list, update_status,
628 strict, patchdir, merge, all_files=all_files)
628 strict, patchdir, merge, all_files=all_files)
629 tr.close()
629 tr.close()
630 self.save_dirty()
630 self.save_dirty()
631 return ret
631 return ret
632 except:
632 except:
633 try:
633 try:
634 tr.abort()
634 tr.abort()
635 finally:
635 finally:
636 repo.invalidate()
636 repo.invalidate()
637 repo.dirstate.invalidate()
637 repo.dirstate.invalidate()
638 raise
638 raise
639 finally:
639 finally:
640 release(tr, lock, wlock)
640 release(tr, lock, wlock)
641 self.removeundo(repo)
641 self.removeundo(repo)
642
642
643 def _apply(self, repo, series, list=False, update_status=True,
643 def _apply(self, repo, series, list=False, update_status=True,
644 strict=False, patchdir=None, merge=None, all_files=None):
644 strict=False, patchdir=None, merge=None, all_files=None):
645 '''returns (error, hash)
645 '''returns (error, hash)
646 error = 1 for unable to read, 2 for patch failed, 3 for patch fuzz'''
646 error = 1 for unable to read, 2 for patch failed, 3 for patch fuzz'''
647 # TODO unify with commands.py
647 # TODO unify with commands.py
648 if not patchdir:
648 if not patchdir:
649 patchdir = self.path
649 patchdir = self.path
650 err = 0
650 err = 0
651 n = None
651 n = None
652 for patchname in series:
652 for patchname in series:
653 pushable, reason = self.pushable(patchname)
653 pushable, reason = self.pushable(patchname)
654 if not pushable:
654 if not pushable:
655 self.explain_pushable(patchname, all_patches=True)
655 self.explain_pushable(patchname, all_patches=True)
656 continue
656 continue
657 self.ui.status(_("applying %s\n") % patchname)
657 self.ui.status(_("applying %s\n") % patchname)
658 pf = os.path.join(patchdir, patchname)
658 pf = os.path.join(patchdir, patchname)
659
659
660 try:
660 try:
661 ph = patchheader(self.join(patchname), self.plainmode)
661 ph = patchheader(self.join(patchname), self.plainmode)
662 except:
662 except:
663 self.ui.warn(_("unable to read %s\n") % patchname)
663 self.ui.warn(_("unable to read %s\n") % patchname)
664 err = 1
664 err = 1
665 break
665 break
666
666
667 message = ph.message
667 message = ph.message
668 if not message:
668 if not message:
669 # The commit message should not be translated
669 # The commit message should not be translated
670 message = "imported patch %s\n" % patchname
670 message = "imported patch %s\n" % patchname
671 else:
671 else:
672 if list:
672 if list:
673 # The commit message should not be translated
673 # The commit message should not be translated
674 message.append("\nimported patch %s" % patchname)
674 message.append("\nimported patch %s" % patchname)
675 message = '\n'.join(message)
675 message = '\n'.join(message)
676
676
677 if ph.haspatch:
677 if ph.haspatch:
678 (patcherr, files, fuzz) = self.patch(repo, pf)
678 (patcherr, files, fuzz) = self.patch(repo, pf)
679 if all_files is not None:
679 if all_files is not None:
680 all_files.update(files)
680 all_files.update(files)
681 patcherr = not patcherr
681 patcherr = not patcherr
682 else:
682 else:
683 self.ui.warn(_("patch %s is empty\n") % patchname)
683 self.ui.warn(_("patch %s is empty\n") % patchname)
684 patcherr, files, fuzz = 0, [], 0
684 patcherr, files, fuzz = 0, [], 0
685
685
686 if merge and files:
686 if merge and files:
687 # Mark as removed/merged and update dirstate parent info
687 # Mark as removed/merged and update dirstate parent info
688 removed = []
688 removed = []
689 merged = []
689 merged = []
690 for f in files:
690 for f in files:
691 if os.path.lexists(repo.wjoin(f)):
691 if os.path.lexists(repo.wjoin(f)):
692 merged.append(f)
692 merged.append(f)
693 else:
693 else:
694 removed.append(f)
694 removed.append(f)
695 for f in removed:
695 for f in removed:
696 repo.dirstate.remove(f)
696 repo.dirstate.remove(f)
697 for f in merged:
697 for f in merged:
698 repo.dirstate.merge(f)
698 repo.dirstate.merge(f)
699 p1, p2 = repo.dirstate.parents()
699 p1, p2 = repo.dirstate.parents()
700 repo.dirstate.setparents(p1, merge)
700 repo.dirstate.setparents(p1, merge)
701
701
702 files = cmdutil.updatedir(self.ui, repo, files)
702 files = cmdutil.updatedir(self.ui, repo, files)
703 match = cmdutil.matchfiles(repo, files or [])
703 match = cmdutil.matchfiles(repo, files or [])
704 n = repo.commit(message, ph.user, ph.date, match=match, force=True)
704 n = repo.commit(message, ph.user, ph.date, match=match, force=True)
705
705
706 if n is None:
706 if n is None:
707 raise util.Abort(_("repository commit failed"))
707 raise util.Abort(_("repository commit failed"))
708
708
709 if update_status:
709 if update_status:
710 self.applied.append(statusentry(n, patchname))
710 self.applied.append(statusentry(n, patchname))
711
711
712 if patcherr:
712 if patcherr:
713 self.ui.warn(_("patch failed, rejects left in working dir\n"))
713 self.ui.warn(_("patch failed, rejects left in working dir\n"))
714 err = 2
714 err = 2
715 break
715 break
716
716
717 if fuzz and strict:
717 if fuzz and strict:
718 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
718 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
719 err = 3
719 err = 3
720 break
720 break
721 return (err, n)
721 return (err, n)
722
722
723 def _cleanup(self, patches, numrevs, keep=False):
723 def _cleanup(self, patches, numrevs, keep=False):
724 if not keep:
724 if not keep:
725 r = self.qrepo()
725 r = self.qrepo()
726 if r:
726 if r:
727 r[None].remove(patches, True)
727 r[None].remove(patches, True)
728 else:
728 else:
729 for p in patches:
729 for p in patches:
730 os.unlink(self.join(p))
730 os.unlink(self.join(p))
731
731
732 if numrevs:
732 if numrevs:
733 del self.applied[:numrevs]
733 del self.applied[:numrevs]
734 self.applied_dirty = 1
734 self.applied_dirty = 1
735
735
736 for i in sorted([self.find_series(p) for p in patches], reverse=True):
736 for i in sorted([self.find_series(p) for p in patches], reverse=True):
737 del self.full_series[i]
737 del self.full_series[i]
738 self.parse_series()
738 self.parse_series()
739 self.series_dirty = 1
739 self.series_dirty = 1
740
740
741 def _revpatches(self, repo, revs):
741 def _revpatches(self, repo, revs):
742 firstrev = repo[self.applied[0].node].rev()
742 firstrev = repo[self.applied[0].node].rev()
743 patches = []
743 patches = []
744 for i, rev in enumerate(revs):
744 for i, rev in enumerate(revs):
745
745
746 if rev < firstrev:
746 if rev < firstrev:
747 raise util.Abort(_('revision %d is not managed') % rev)
747 raise util.Abort(_('revision %d is not managed') % rev)
748
748
749 ctx = repo[rev]
749 ctx = repo[rev]
750 base = self.applied[i].node
750 base = self.applied[i].node
751 if ctx.node() != base:
751 if ctx.node() != base:
752 msg = _('cannot delete revision %d above applied patches')
752 msg = _('cannot delete revision %d above applied patches')
753 raise util.Abort(msg % rev)
753 raise util.Abort(msg % rev)
754
754
755 patch = self.applied[i].name
755 patch = self.applied[i].name
756 for fmt in ('[mq]: %s', 'imported patch %s'):
756 for fmt in ('[mq]: %s', 'imported patch %s'):
757 if ctx.description() == fmt % patch:
757 if ctx.description() == fmt % patch:
758 msg = _('patch %s finalized without changeset message\n')
758 msg = _('patch %s finalized without changeset message\n')
759 repo.ui.status(msg % patch)
759 repo.ui.status(msg % patch)
760 break
760 break
761
761
762 patches.append(patch)
762 patches.append(patch)
763 return patches
763 return patches
764
764
765 def finish(self, repo, revs):
765 def finish(self, repo, revs):
766 patches = self._revpatches(repo, sorted(revs))
766 patches = self._revpatches(repo, sorted(revs))
767 self._cleanup(patches, len(patches))
767 self._cleanup(patches, len(patches))
768
768
769 def delete(self, repo, patches, opts):
769 def delete(self, repo, patches, opts):
770 if not patches and not opts.get('rev'):
770 if not patches and not opts.get('rev'):
771 raise util.Abort(_('qdelete requires at least one revision or '
771 raise util.Abort(_('qdelete requires at least one revision or '
772 'patch name'))
772 'patch name'))
773
773
774 realpatches = []
774 realpatches = []
775 for patch in patches:
775 for patch in patches:
776 patch = self.lookup(patch, strict=True)
776 patch = self.lookup(patch, strict=True)
777 info = self.isapplied(patch)
777 info = self.isapplied(patch)
778 if info:
778 if info:
779 raise util.Abort(_("cannot delete applied patch %s") % patch)
779 raise util.Abort(_("cannot delete applied patch %s") % patch)
780 if patch not in self.series:
780 if patch not in self.series:
781 raise util.Abort(_("patch %s not in series file") % patch)
781 raise util.Abort(_("patch %s not in series file") % patch)
782 if patch not in realpatches:
782 if patch not in realpatches:
783 realpatches.append(patch)
783 realpatches.append(patch)
784
784
785 numrevs = 0
785 numrevs = 0
786 if opts.get('rev'):
786 if opts.get('rev'):
787 if not self.applied:
787 if not self.applied:
788 raise util.Abort(_('no patches applied'))
788 raise util.Abort(_('no patches applied'))
789 revs = cmdutil.revrange(repo, opts.get('rev'))
789 revs = cmdutil.revrange(repo, opts.get('rev'))
790 if len(revs) > 1 and revs[0] > revs[1]:
790 if len(revs) > 1 and revs[0] > revs[1]:
791 revs.reverse()
791 revs.reverse()
792 revpatches = self._revpatches(repo, revs)
792 revpatches = self._revpatches(repo, revs)
793 realpatches += revpatches
793 realpatches += revpatches
794 numrevs = len(revpatches)
794 numrevs = len(revpatches)
795
795
796 self._cleanup(realpatches, numrevs, opts.get('keep'))
796 self._cleanup(realpatches, numrevs, opts.get('keep'))
797
797
798 def check_toppatch(self, repo):
798 def check_toppatch(self, repo):
799 if self.applied:
799 if self.applied:
800 top = self.applied[-1].node
800 top = self.applied[-1].node
801 patch = self.applied[-1].name
801 patch = self.applied[-1].name
802 pp = repo.dirstate.parents()
802 pp = repo.dirstate.parents()
803 if top not in pp:
803 if top not in pp:
804 raise util.Abort(_("working directory revision is not qtip"))
804 raise util.Abort(_("working directory revision is not qtip"))
805 return top, patch
805 return top, patch
806 return None, None
806 return None, None
807
807
808 def check_substate(self, repo):
808 def check_substate(self, repo):
809 '''return list of subrepos at a different revision than substate.
809 '''return list of subrepos at a different revision than substate.
810 Abort if any subrepos have uncommitted changes.'''
810 Abort if any subrepos have uncommitted changes.'''
811 inclsubs = []
811 inclsubs = []
812 wctx = repo[None]
812 wctx = repo[None]
813 for s in wctx.substate:
813 for s in wctx.substate:
814 if wctx.sub(s).dirty(True):
814 if wctx.sub(s).dirty(True):
815 raise util.Abort(
815 raise util.Abort(
816 _("uncommitted changes in subrepository %s") % s)
816 _("uncommitted changes in subrepository %s") % s)
817 elif wctx.sub(s).dirty():
817 elif wctx.sub(s).dirty():
818 inclsubs.append(s)
818 inclsubs.append(s)
819 return inclsubs
819 return inclsubs
820
820
821 def check_localchanges(self, repo, force=False, refresh=True):
821 def check_localchanges(self, repo, force=False, refresh=True):
822 m, a, r, d = repo.status()[:4]
822 m, a, r, d = repo.status()[:4]
823 if (m or a or r or d) and not force:
823 if (m or a or r or d) and not force:
824 if refresh:
824 if refresh:
825 raise util.Abort(_("local changes found, refresh first"))
825 raise util.Abort(_("local changes found, refresh first"))
826 else:
826 else:
827 raise util.Abort(_("local changes found"))
827 raise util.Abort(_("local changes found"))
828 return m, a, r, d
828 return m, a, r, d
829
829
830 _reserved = ('series', 'status', 'guards')
830 _reserved = ('series', 'status', 'guards')
831 def check_reserved_name(self, name):
831 def check_reserved_name(self, name):
832 if (name in self._reserved or name.startswith('.hg')
832 if (name in self._reserved or name.startswith('.hg')
833 or name.startswith('.mq') or '#' in name or ':' in name):
833 or name.startswith('.mq') or '#' in name or ':' in name):
834 raise util.Abort(_('"%s" cannot be used as the name of a patch')
834 raise util.Abort(_('"%s" cannot be used as the name of a patch')
835 % name)
835 % name)
836
836
837 def new(self, repo, patchfn, *pats, **opts):
837 def new(self, repo, patchfn, *pats, **opts):
838 """options:
838 """options:
839 msg: a string or a no-argument function returning a string
839 msg: a string or a no-argument function returning a string
840 """
840 """
841 msg = opts.get('msg')
841 msg = opts.get('msg')
842 user = opts.get('user')
842 user = opts.get('user')
843 date = opts.get('date')
843 date = opts.get('date')
844 if date:
844 if date:
845 date = util.parsedate(date)
845 date = util.parsedate(date)
846 diffopts = self.diffopts({'git': opts.get('git')})
846 diffopts = self.diffopts({'git': opts.get('git')})
847 self.check_reserved_name(patchfn)
847 self.check_reserved_name(patchfn)
848 if os.path.exists(self.join(patchfn)):
848 if os.path.exists(self.join(patchfn)):
849 if os.path.isdir(self.join(patchfn)):
849 if os.path.isdir(self.join(patchfn)):
850 raise util.Abort(_('"%s" already exists as a directory')
850 raise util.Abort(_('"%s" already exists as a directory')
851 % patchfn)
851 % patchfn)
852 else:
852 else:
853 raise util.Abort(_('patch "%s" already exists') % patchfn)
853 raise util.Abort(_('patch "%s" already exists') % patchfn)
854
854
855 inclsubs = self.check_substate(repo)
855 inclsubs = self.check_substate(repo)
856 if inclsubs:
856 if inclsubs:
857 inclsubs.append('.hgsubstate')
857 inclsubs.append('.hgsubstate')
858 if opts.get('include') or opts.get('exclude') or pats:
858 if opts.get('include') or opts.get('exclude') or pats:
859 if inclsubs:
859 if inclsubs:
860 pats = list(pats or []) + inclsubs
860 pats = list(pats or []) + inclsubs
861 match = cmdutil.match(repo, pats, opts)
861 match = cmdutil.match(repo, pats, opts)
862 # detect missing files in pats
862 # detect missing files in pats
863 def badfn(f, msg):
863 def badfn(f, msg):
864 if f != '.hgsubstate': # .hgsubstate is auto-created
864 if f != '.hgsubstate': # .hgsubstate is auto-created
865 raise util.Abort('%s: %s' % (f, msg))
865 raise util.Abort('%s: %s' % (f, msg))
866 match.bad = badfn
866 match.bad = badfn
867 m, a, r, d = repo.status(match=match)[:4]
867 m, a, r, d = repo.status(match=match)[:4]
868 else:
868 else:
869 m, a, r, d = self.check_localchanges(repo, force=True)
869 m, a, r, d = self.check_localchanges(repo, force=True)
870 match = cmdutil.matchfiles(repo, m + a + r + inclsubs)
870 match = cmdutil.matchfiles(repo, m + a + r + inclsubs)
871 if len(repo[None].parents()) > 1:
871 if len(repo[None].parents()) > 1:
872 raise util.Abort(_('cannot manage merge changesets'))
872 raise util.Abort(_('cannot manage merge changesets'))
873 commitfiles = m + a + r
873 commitfiles = m + a + r
874 self.check_toppatch(repo)
874 self.check_toppatch(repo)
875 insert = self.full_series_end()
875 insert = self.full_series_end()
876 wlock = repo.wlock()
876 wlock = repo.wlock()
877 try:
877 try:
878 try:
878 try:
879 # if patch file write fails, abort early
879 # if patch file write fails, abort early
880 p = self.opener(patchfn, "w")
880 p = self.opener(patchfn, "w")
881 except IOError, e:
881 except IOError, e:
882 raise util.Abort(_('cannot write patch "%s": %s')
882 raise util.Abort(_('cannot write patch "%s": %s')
883 % (patchfn, e.strerror))
883 % (patchfn, e.strerror))
884 try:
884 try:
885 if self.plainmode:
885 if self.plainmode:
886 if user:
886 if user:
887 p.write("From: " + user + "\n")
887 p.write("From: " + user + "\n")
888 if not date:
888 if not date:
889 p.write("\n")
889 p.write("\n")
890 if date:
890 if date:
891 p.write("Date: %d %d\n\n" % date)
891 p.write("Date: %d %d\n\n" % date)
892 else:
892 else:
893 p.write("# HG changeset patch\n")
893 p.write("# HG changeset patch\n")
894 p.write("# Parent "
894 p.write("# Parent "
895 + hex(repo[None].parents()[0].node()) + "\n")
895 + hex(repo[None].parents()[0].node()) + "\n")
896 if user:
896 if user:
897 p.write("# User " + user + "\n")
897 p.write("# User " + user + "\n")
898 if date:
898 if date:
899 p.write("# Date %s %s\n\n" % date)
899 p.write("# Date %s %s\n\n" % date)
900 if hasattr(msg, '__call__'):
900 if hasattr(msg, '__call__'):
901 msg = msg()
901 msg = msg()
902 commitmsg = msg and msg or ("[mq]: %s" % patchfn)
902 commitmsg = msg and msg or ("[mq]: %s" % patchfn)
903 n = repo.commit(commitmsg, user, date, match=match, force=True)
903 n = repo.commit(commitmsg, user, date, match=match, force=True)
904 if n is None:
904 if n is None:
905 raise util.Abort(_("repo commit failed"))
905 raise util.Abort(_("repo commit failed"))
906 try:
906 try:
907 self.full_series[insert:insert] = [patchfn]
907 self.full_series[insert:insert] = [patchfn]
908 self.applied.append(statusentry(n, patchfn))
908 self.applied.append(statusentry(n, patchfn))
909 self.parse_series()
909 self.parse_series()
910 self.series_dirty = 1
910 self.series_dirty = 1
911 self.applied_dirty = 1
911 self.applied_dirty = 1
912 if msg:
912 if msg:
913 msg = msg + "\n\n"
913 msg = msg + "\n\n"
914 p.write(msg)
914 p.write(msg)
915 if commitfiles:
915 if commitfiles:
916 parent = self.qparents(repo, n)
916 parent = self.qparents(repo, n)
917 chunks = patch.diff(repo, node1=parent, node2=n,
917 chunks = patch.diff(repo, node1=parent, node2=n,
918 match=match, opts=diffopts)
918 match=match, opts=diffopts)
919 for chunk in chunks:
919 for chunk in chunks:
920 p.write(chunk)
920 p.write(chunk)
921 p.close()
921 p.close()
922 wlock.release()
922 wlock.release()
923 wlock = None
923 wlock = None
924 r = self.qrepo()
924 r = self.qrepo()
925 if r:
925 if r:
926 r[None].add([patchfn])
926 r[None].add([patchfn])
927 except:
927 except:
928 repo.rollback()
928 repo.rollback()
929 raise
929 raise
930 except Exception:
930 except Exception:
931 patchpath = self.join(patchfn)
931 patchpath = self.join(patchfn)
932 try:
932 try:
933 os.unlink(patchpath)
933 os.unlink(patchpath)
934 except:
934 except:
935 self.ui.warn(_('error unlinking %s\n') % patchpath)
935 self.ui.warn(_('error unlinking %s\n') % patchpath)
936 raise
936 raise
937 self.removeundo(repo)
937 self.removeundo(repo)
938 finally:
938 finally:
939 release(wlock)
939 release(wlock)
940
940
941 def strip(self, repo, revs, update=True, backup="all", force=None):
941 def strip(self, repo, revs, update=True, backup="all", force=None):
942 wlock = lock = None
942 wlock = lock = None
943 try:
943 try:
944 wlock = repo.wlock()
944 wlock = repo.wlock()
945 lock = repo.lock()
945 lock = repo.lock()
946
946
947 if update:
947 if update:
948 self.check_localchanges(repo, force=force, refresh=False)
948 self.check_localchanges(repo, force=force, refresh=False)
949 urev = self.qparents(repo, revs[0])
949 urev = self.qparents(repo, revs[0])
950 hg.clean(repo, urev)
950 hg.clean(repo, urev)
951 repo.dirstate.write()
951 repo.dirstate.write()
952
952
953 self.removeundo(repo)
953 self.removeundo(repo)
954 for rev in revs:
954 for rev in revs:
955 repair.strip(self.ui, repo, rev, backup)
955 repair.strip(self.ui, repo, rev, backup)
956 # strip may have unbundled a set of backed up revisions after
956 # strip may have unbundled a set of backed up revisions after
957 # the actual strip
957 # the actual strip
958 self.removeundo(repo)
958 self.removeundo(repo)
959 finally:
959 finally:
960 release(lock, wlock)
960 release(lock, wlock)
961
961
962 def isapplied(self, patch):
962 def isapplied(self, patch):
963 """returns (index, rev, patch)"""
963 """returns (index, rev, patch)"""
964 for i, a in enumerate(self.applied):
964 for i, a in enumerate(self.applied):
965 if a.name == patch:
965 if a.name == patch:
966 return (i, a.node, a.name)
966 return (i, a.node, a.name)
967 return None
967 return None
968
968
969 # if the exact patch name does not exist, we try a few
969 # if the exact patch name does not exist, we try a few
970 # variations. If strict is passed, we try only #1
970 # variations. If strict is passed, we try only #1
971 #
971 #
972 # 1) a number to indicate an offset in the series file
972 # 1) a number to indicate an offset in the series file
973 # 2) a unique substring of the patch name was given
973 # 2) a unique substring of the patch name was given
974 # 3) patchname[-+]num to indicate an offset in the series file
974 # 3) patchname[-+]num to indicate an offset in the series file
975 def lookup(self, patch, strict=False):
975 def lookup(self, patch, strict=False):
976 patch = patch and str(patch)
976 patch = patch and str(patch)
977
977
978 def partial_name(s):
978 def partial_name(s):
979 if s in self.series:
979 if s in self.series:
980 return s
980 return s
981 matches = [x for x in self.series if s in x]
981 matches = [x for x in self.series if s in x]
982 if len(matches) > 1:
982 if len(matches) > 1:
983 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
983 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
984 for m in matches:
984 for m in matches:
985 self.ui.warn(' %s\n' % m)
985 self.ui.warn(' %s\n' % m)
986 return None
986 return None
987 if matches:
987 if matches:
988 return matches[0]
988 return matches[0]
989 if self.series and self.applied:
989 if self.series and self.applied:
990 if s == 'qtip':
990 if s == 'qtip':
991 return self.series[self.series_end(True)-1]
991 return self.series[self.series_end(True)-1]
992 if s == 'qbase':
992 if s == 'qbase':
993 return self.series[0]
993 return self.series[0]
994 return None
994 return None
995
995
996 if patch is None:
996 if patch is None:
997 return None
997 return None
998 if patch in self.series:
998 if patch in self.series:
999 return patch
999 return patch
1000
1000
1001 if not os.path.isfile(self.join(patch)):
1001 if not os.path.isfile(self.join(patch)):
1002 try:
1002 try:
1003 sno = int(patch)
1003 sno = int(patch)
1004 except (ValueError, OverflowError):
1004 except (ValueError, OverflowError):
1005 pass
1005 pass
1006 else:
1006 else:
1007 if -len(self.series) <= sno < len(self.series):
1007 if -len(self.series) <= sno < len(self.series):
1008 return self.series[sno]
1008 return self.series[sno]
1009
1009
1010 if not strict:
1010 if not strict:
1011 res = partial_name(patch)
1011 res = partial_name(patch)
1012 if res:
1012 if res:
1013 return res
1013 return res
1014 minus = patch.rfind('-')
1014 minus = patch.rfind('-')
1015 if minus >= 0:
1015 if minus >= 0:
1016 res = partial_name(patch[:minus])
1016 res = partial_name(patch[:minus])
1017 if res:
1017 if res:
1018 i = self.series.index(res)
1018 i = self.series.index(res)
1019 try:
1019 try:
1020 off = int(patch[minus + 1:] or 1)
1020 off = int(patch[minus + 1:] or 1)
1021 except (ValueError, OverflowError):
1021 except (ValueError, OverflowError):
1022 pass
1022 pass
1023 else:
1023 else:
1024 if i - off >= 0:
1024 if i - off >= 0:
1025 return self.series[i - off]
1025 return self.series[i - off]
1026 plus = patch.rfind('+')
1026 plus = patch.rfind('+')
1027 if plus >= 0:
1027 if plus >= 0:
1028 res = partial_name(patch[:plus])
1028 res = partial_name(patch[:plus])
1029 if res:
1029 if res:
1030 i = self.series.index(res)
1030 i = self.series.index(res)
1031 try:
1031 try:
1032 off = int(patch[plus + 1:] or 1)
1032 off = int(patch[plus + 1:] or 1)
1033 except (ValueError, OverflowError):
1033 except (ValueError, OverflowError):
1034 pass
1034 pass
1035 else:
1035 else:
1036 if i + off < len(self.series):
1036 if i + off < len(self.series):
1037 return self.series[i + off]
1037 return self.series[i + off]
1038 raise util.Abort(_("patch %s not in series") % patch)
1038 raise util.Abort(_("patch %s not in series") % patch)
1039
1039
1040 def push(self, repo, patch=None, force=False, list=False,
1040 def push(self, repo, patch=None, force=False, list=False,
1041 mergeq=None, all=False, move=False, exact=False):
1041 mergeq=None, all=False, move=False, exact=False):
1042 diffopts = self.diffopts()
1042 diffopts = self.diffopts()
1043 wlock = repo.wlock()
1043 wlock = repo.wlock()
1044 try:
1044 try:
1045 heads = []
1045 heads = []
1046 for b, ls in repo.branchmap().iteritems():
1046 for b, ls in repo.branchmap().iteritems():
1047 heads += ls
1047 heads += ls
1048 if not heads:
1048 if not heads:
1049 heads = [nullid]
1049 heads = [nullid]
1050 if repo.dirstate.parents()[0] not in heads and not exact:
1050 if repo.dirstate.parents()[0] not in heads and not exact:
1051 self.ui.status(_("(working directory not at a head)\n"))
1051 self.ui.status(_("(working directory not at a head)\n"))
1052
1052
1053 if not self.series:
1053 if not self.series:
1054 self.ui.warn(_('no patches in series\n'))
1054 self.ui.warn(_('no patches in series\n'))
1055 return 0
1055 return 0
1056
1056
1057 patch = self.lookup(patch)
1057 patch = self.lookup(patch)
1058 # Suppose our series file is: A B C and the current 'top'
1058 # Suppose our series file is: A B C and the current 'top'
1059 # patch is B. qpush C should be performed (moving forward)
1059 # patch is B. qpush C should be performed (moving forward)
1060 # qpush B is a NOP (no change) qpush A is an error (can't
1060 # qpush B is a NOP (no change) qpush A is an error (can't
1061 # go backwards with qpush)
1061 # go backwards with qpush)
1062 if patch:
1062 if patch:
1063 info = self.isapplied(patch)
1063 info = self.isapplied(patch)
1064 if info:
1064 if info:
1065 if info[0] < len(self.applied) - 1:
1065 if info[0] < len(self.applied) - 1:
1066 raise util.Abort(
1066 raise util.Abort(
1067 _("cannot push to a previous patch: %s") % patch)
1067 _("cannot push to a previous patch: %s") % patch)
1068 self.ui.warn(
1068 self.ui.warn(
1069 _('qpush: %s is already at the top\n') % patch)
1069 _('qpush: %s is already at the top\n') % patch)
1070 return 0
1070 return 0
1071 pushable, reason = self.pushable(patch)
1071 pushable, reason = self.pushable(patch)
1072 if not pushable:
1072 if not pushable:
1073 if reason:
1073 if reason:
1074 reason = _('guarded by %r') % reason
1074 reason = _('guarded by %r') % reason
1075 else:
1075 else:
1076 reason = _('no matching guards')
1076 reason = _('no matching guards')
1077 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
1077 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
1078 return 1
1078 return 1
1079 elif all:
1079 elif all:
1080 patch = self.series[-1]
1080 patch = self.series[-1]
1081 if self.isapplied(patch):
1081 if self.isapplied(patch):
1082 self.ui.warn(_('all patches are currently applied\n'))
1082 self.ui.warn(_('all patches are currently applied\n'))
1083 return 0
1083 return 0
1084
1084
1085 # Following the above example, starting at 'top' of B:
1085 # Following the above example, starting at 'top' of B:
1086 # qpush should be performed (pushes C), but a subsequent
1086 # qpush should be performed (pushes C), but a subsequent
1087 # qpush without an argument is an error (nothing to
1087 # qpush without an argument is an error (nothing to
1088 # apply). This allows a loop of "...while hg qpush..." to
1088 # apply). This allows a loop of "...while hg qpush..." to
1089 # work as it detects an error when done
1089 # work as it detects an error when done
1090 start = self.series_end()
1090 start = self.series_end()
1091 if start == len(self.series):
1091 if start == len(self.series):
1092 self.ui.warn(_('patch series already fully applied\n'))
1092 self.ui.warn(_('patch series already fully applied\n'))
1093 return 1
1093 return 1
1094 if not force:
1094 if not force:
1095 self.check_localchanges(repo)
1095 self.check_localchanges(repo)
1096
1096
1097 if exact:
1097 if exact:
1098 if move:
1098 if move:
1099 raise util.Abort(_("cannot use --exact and --move together"))
1099 raise util.Abort(_("cannot use --exact and --move together"))
1100 if self.applied:
1100 if self.applied:
1101 raise util.Abort(_("cannot push --exact with applied patches"))
1101 raise util.Abort(_("cannot push --exact with applied patches"))
1102 root = self.series[start]
1102 root = self.series[start]
1103 target = patchheader(self.join(root), self.plainmode).parent
1103 target = patchheader(self.join(root), self.plainmode).parent
1104 if not target:
1104 if not target:
1105 raise util.Abort(_("%s does not have a parent recorded" % root))
1105 raise util.Abort(_("%s does not have a parent recorded" % root))
1106 if not repo[target] == repo['.']:
1106 if not repo[target] == repo['.']:
1107 hg.update(repo, target)
1107 hg.update(repo, target)
1108
1108
1109 if move:
1109 if move:
1110 if not patch:
1110 if not patch:
1111 raise util.Abort(_("please specify the patch to move"))
1111 raise util.Abort(_("please specify the patch to move"))
1112 for i, rpn in enumerate(self.full_series[start:]):
1112 for i, rpn in enumerate(self.full_series[start:]):
1113 # strip markers for patch guards
1113 # strip markers for patch guards
1114 if self.guard_re.split(rpn, 1)[0] == patch:
1114 if self.guard_re.split(rpn, 1)[0] == patch:
1115 break
1115 break
1116 index = start + i
1116 index = start + i
1117 assert index < len(self.full_series)
1117 assert index < len(self.full_series)
1118 fullpatch = self.full_series[index]
1118 fullpatch = self.full_series[index]
1119 del self.full_series[index]
1119 del self.full_series[index]
1120 self.full_series.insert(start, fullpatch)
1120 self.full_series.insert(start, fullpatch)
1121 self.parse_series()
1121 self.parse_series()
1122 self.series_dirty = 1
1122 self.series_dirty = 1
1123
1123
1124 self.applied_dirty = 1
1124 self.applied_dirty = 1
1125 if start > 0:
1125 if start > 0:
1126 self.check_toppatch(repo)
1126 self.check_toppatch(repo)
1127 if not patch:
1127 if not patch:
1128 patch = self.series[start]
1128 patch = self.series[start]
1129 end = start + 1
1129 end = start + 1
1130 else:
1130 else:
1131 end = self.series.index(patch, start) + 1
1131 end = self.series.index(patch, start) + 1
1132
1132
1133 s = self.series[start:end]
1133 s = self.series[start:end]
1134 all_files = set()
1134 all_files = set()
1135 try:
1135 try:
1136 if mergeq:
1136 if mergeq:
1137 ret = self.mergepatch(repo, mergeq, s, diffopts)
1137 ret = self.mergepatch(repo, mergeq, s, diffopts)
1138 else:
1138 else:
1139 ret = self.apply(repo, s, list, all_files=all_files)
1139 ret = self.apply(repo, s, list, all_files=all_files)
1140 except:
1140 except:
1141 self.ui.warn(_('cleaning up working directory...'))
1141 self.ui.warn(_('cleaning up working directory...'))
1142 node = repo.dirstate.parents()[0]
1142 node = repo.dirstate.parents()[0]
1143 hg.revert(repo, node, None)
1143 hg.revert(repo, node, None)
1144 # only remove unknown files that we know we touched or
1144 # only remove unknown files that we know we touched or
1145 # created while patching
1145 # created while patching
1146 for f in all_files:
1146 for f in all_files:
1147 if f not in repo.dirstate:
1147 if f not in repo.dirstate:
1148 try:
1148 try:
1149 util.unlinkpath(repo.wjoin(f))
1149 util.unlinkpath(repo.wjoin(f))
1150 except OSError, inst:
1150 except OSError, inst:
1151 if inst.errno != errno.ENOENT:
1151 if inst.errno != errno.ENOENT:
1152 raise
1152 raise
1153 self.ui.warn(_('done\n'))
1153 self.ui.warn(_('done\n'))
1154 raise
1154 raise
1155
1155
1156 if not self.applied:
1156 if not self.applied:
1157 return ret[0]
1157 return ret[0]
1158 top = self.applied[-1].name
1158 top = self.applied[-1].name
1159 if ret[0] and ret[0] > 1:
1159 if ret[0] and ret[0] > 1:
1160 msg = _("errors during apply, please fix and refresh %s\n")
1160 msg = _("errors during apply, please fix and refresh %s\n")
1161 self.ui.write(msg % top)
1161 self.ui.write(msg % top)
1162 else:
1162 else:
1163 self.ui.write(_("now at: %s\n") % top)
1163 self.ui.write(_("now at: %s\n") % top)
1164 return ret[0]
1164 return ret[0]
1165
1165
1166 finally:
1166 finally:
1167 wlock.release()
1167 wlock.release()
1168
1168
1169 def pop(self, repo, patch=None, force=False, update=True, all=False):
1169 def pop(self, repo, patch=None, force=False, update=True, all=False):
1170 wlock = repo.wlock()
1170 wlock = repo.wlock()
1171 try:
1171 try:
1172 if patch:
1172 if patch:
1173 # index, rev, patch
1173 # index, rev, patch
1174 info = self.isapplied(patch)
1174 info = self.isapplied(patch)
1175 if not info:
1175 if not info:
1176 patch = self.lookup(patch)
1176 patch = self.lookup(patch)
1177 info = self.isapplied(patch)
1177 info = self.isapplied(patch)
1178 if not info:
1178 if not info:
1179 raise util.Abort(_("patch %s is not applied") % patch)
1179 raise util.Abort(_("patch %s is not applied") % patch)
1180
1180
1181 if not self.applied:
1181 if not self.applied:
1182 # Allow qpop -a to work repeatedly,
1182 # Allow qpop -a to work repeatedly,
1183 # but not qpop without an argument
1183 # but not qpop without an argument
1184 self.ui.warn(_("no patches applied\n"))
1184 self.ui.warn(_("no patches applied\n"))
1185 return not all
1185 return not all
1186
1186
1187 if all:
1187 if all:
1188 start = 0
1188 start = 0
1189 elif patch:
1189 elif patch:
1190 start = info[0] + 1
1190 start = info[0] + 1
1191 else:
1191 else:
1192 start = len(self.applied) - 1
1192 start = len(self.applied) - 1
1193
1193
1194 if start >= len(self.applied):
1194 if start >= len(self.applied):
1195 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1195 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1196 return
1196 return
1197
1197
1198 if not update:
1198 if not update:
1199 parents = repo.dirstate.parents()
1199 parents = repo.dirstate.parents()
1200 rr = [x.node for x in self.applied]
1200 rr = [x.node for x in self.applied]
1201 for p in parents:
1201 for p in parents:
1202 if p in rr:
1202 if p in rr:
1203 self.ui.warn(_("qpop: forcing dirstate update\n"))
1203 self.ui.warn(_("qpop: forcing dirstate update\n"))
1204 update = True
1204 update = True
1205 else:
1205 else:
1206 parents = [p.node() for p in repo[None].parents()]
1206 parents = [p.node() for p in repo[None].parents()]
1207 needupdate = False
1207 needupdate = False
1208 for entry in self.applied[start:]:
1208 for entry in self.applied[start:]:
1209 if entry.node in parents:
1209 if entry.node in parents:
1210 needupdate = True
1210 needupdate = True
1211 break
1211 break
1212 update = needupdate
1212 update = needupdate
1213
1213
1214 if not force and update:
1214 if not force and update:
1215 self.check_localchanges(repo)
1215 self.check_localchanges(repo)
1216
1216
1217 self.applied_dirty = 1
1217 self.applied_dirty = 1
1218 end = len(self.applied)
1218 end = len(self.applied)
1219 rev = self.applied[start].node
1219 rev = self.applied[start].node
1220 if update:
1220 if update:
1221 top = self.check_toppatch(repo)[0]
1221 top = self.check_toppatch(repo)[0]
1222
1222
1223 try:
1223 try:
1224 heads = repo.changelog.heads(rev)
1224 heads = repo.changelog.heads(rev)
1225 except error.LookupError:
1225 except error.LookupError:
1226 node = short(rev)
1226 node = short(rev)
1227 raise util.Abort(_('trying to pop unknown node %s') % node)
1227 raise util.Abort(_('trying to pop unknown node %s') % node)
1228
1228
1229 if heads != [self.applied[-1].node]:
1229 if heads != [self.applied[-1].node]:
1230 raise util.Abort(_("popping would remove a revision not "
1230 raise util.Abort(_("popping would remove a revision not "
1231 "managed by this patch queue"))
1231 "managed by this patch queue"))
1232
1232
1233 # we know there are no local changes, so we can make a simplified
1233 # we know there are no local changes, so we can make a simplified
1234 # form of hg.update.
1234 # form of hg.update.
1235 if update:
1235 if update:
1236 qp = self.qparents(repo, rev)
1236 qp = self.qparents(repo, rev)
1237 ctx = repo[qp]
1237 ctx = repo[qp]
1238 m, a, r, d = repo.status(qp, top)[:4]
1238 m, a, r, d = repo.status(qp, top)[:4]
1239 if d:
1239 if d:
1240 raise util.Abort(_("deletions found between repo revs"))
1240 raise util.Abort(_("deletions found between repo revs"))
1241 for f in a:
1241 for f in a:
1242 try:
1242 try:
1243 util.unlinkpath(repo.wjoin(f))
1243 util.unlinkpath(repo.wjoin(f))
1244 except OSError, e:
1244 except OSError, e:
1245 if e.errno != errno.ENOENT:
1245 if e.errno != errno.ENOENT:
1246 raise
1246 raise
1247 repo.dirstate.forget(f)
1247 repo.dirstate.forget(f)
1248 for f in m + r:
1248 for f in m + r:
1249 fctx = ctx[f]
1249 fctx = ctx[f]
1250 repo.wwrite(f, fctx.data(), fctx.flags())
1250 repo.wwrite(f, fctx.data(), fctx.flags())
1251 repo.dirstate.normal(f)
1251 repo.dirstate.normal(f)
1252 repo.dirstate.setparents(qp, nullid)
1252 repo.dirstate.setparents(qp, nullid)
1253 for patch in reversed(self.applied[start:end]):
1253 for patch in reversed(self.applied[start:end]):
1254 self.ui.status(_("popping %s\n") % patch.name)
1254 self.ui.status(_("popping %s\n") % patch.name)
1255 del self.applied[start:end]
1255 del self.applied[start:end]
1256 self.strip(repo, [rev], update=False, backup='strip')
1256 self.strip(repo, [rev], update=False, backup='strip')
1257 if self.applied:
1257 if self.applied:
1258 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1258 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1259 else:
1259 else:
1260 self.ui.write(_("patch queue now empty\n"))
1260 self.ui.write(_("patch queue now empty\n"))
1261 finally:
1261 finally:
1262 wlock.release()
1262 wlock.release()
1263
1263
1264 def diff(self, repo, pats, opts):
1264 def diff(self, repo, pats, opts):
1265 top, patch = self.check_toppatch(repo)
1265 top, patch = self.check_toppatch(repo)
1266 if not top:
1266 if not top:
1267 self.ui.write(_("no patches applied\n"))
1267 self.ui.write(_("no patches applied\n"))
1268 return
1268 return
1269 qp = self.qparents(repo, top)
1269 qp = self.qparents(repo, top)
1270 if opts.get('reverse'):
1270 if opts.get('reverse'):
1271 node1, node2 = None, qp
1271 node1, node2 = None, qp
1272 else:
1272 else:
1273 node1, node2 = qp, None
1273 node1, node2 = qp, None
1274 diffopts = self.diffopts(opts, patch)
1274 diffopts = self.diffopts(opts, patch)
1275 self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
1275 self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
1276
1276
1277 def refresh(self, repo, pats=None, **opts):
1277 def refresh(self, repo, pats=None, **opts):
1278 if not self.applied:
1278 if not self.applied:
1279 self.ui.write(_("no patches applied\n"))
1279 self.ui.write(_("no patches applied\n"))
1280 return 1
1280 return 1
1281 msg = opts.get('msg', '').rstrip()
1281 msg = opts.get('msg', '').rstrip()
1282 newuser = opts.get('user')
1282 newuser = opts.get('user')
1283 newdate = opts.get('date')
1283 newdate = opts.get('date')
1284 if newdate:
1284 if newdate:
1285 newdate = '%d %d' % util.parsedate(newdate)
1285 newdate = '%d %d' % util.parsedate(newdate)
1286 wlock = repo.wlock()
1286 wlock = repo.wlock()
1287
1287
1288 try:
1288 try:
1289 self.check_toppatch(repo)
1289 self.check_toppatch(repo)
1290 (top, patchfn) = (self.applied[-1].node, self.applied[-1].name)
1290 (top, patchfn) = (self.applied[-1].node, self.applied[-1].name)
1291 if repo.changelog.heads(top) != [top]:
1291 if repo.changelog.heads(top) != [top]:
1292 raise util.Abort(_("cannot refresh a revision with children"))
1292 raise util.Abort(_("cannot refresh a revision with children"))
1293
1293
1294 inclsubs = self.check_substate(repo)
1294 inclsubs = self.check_substate(repo)
1295
1295
1296 cparents = repo.changelog.parents(top)
1296 cparents = repo.changelog.parents(top)
1297 patchparent = self.qparents(repo, top)
1297 patchparent = self.qparents(repo, top)
1298 ph = patchheader(self.join(patchfn), self.plainmode)
1298 ph = patchheader(self.join(patchfn), self.plainmode)
1299 diffopts = self.diffopts({'git': opts.get('git')}, patchfn)
1299 diffopts = self.diffopts({'git': opts.get('git')}, patchfn)
1300 if msg:
1300 if msg:
1301 ph.setmessage(msg)
1301 ph.setmessage(msg)
1302 if newuser:
1302 if newuser:
1303 ph.setuser(newuser)
1303 ph.setuser(newuser)
1304 if newdate:
1304 if newdate:
1305 ph.setdate(newdate)
1305 ph.setdate(newdate)
1306 ph.setparent(hex(patchparent))
1306 ph.setparent(hex(patchparent))
1307
1307
1308 # only commit new patch when write is complete
1308 # only commit new patch when write is complete
1309 patchf = self.opener(patchfn, 'w', atomictemp=True)
1309 patchf = self.opener(patchfn, 'w', atomictemp=True)
1310
1310
1311 comments = str(ph)
1311 comments = str(ph)
1312 if comments:
1312 if comments:
1313 patchf.write(comments)
1313 patchf.write(comments)
1314
1314
1315 # update the dirstate in place, strip off the qtip commit
1315 # update the dirstate in place, strip off the qtip commit
1316 # and then commit.
1316 # and then commit.
1317 #
1317 #
1318 # this should really read:
1318 # this should really read:
1319 # mm, dd, aa = repo.status(top, patchparent)[:3]
1319 # mm, dd, aa = repo.status(top, patchparent)[:3]
1320 # but we do it backwards to take advantage of manifest/chlog
1320 # but we do it backwards to take advantage of manifest/chlog
1321 # caching against the next repo.status call
1321 # caching against the next repo.status call
1322 mm, aa, dd = repo.status(patchparent, top)[:3]
1322 mm, aa, dd = repo.status(patchparent, top)[:3]
1323 changes = repo.changelog.read(top)
1323 changes = repo.changelog.read(top)
1324 man = repo.manifest.read(changes[0])
1324 man = repo.manifest.read(changes[0])
1325 aaa = aa[:]
1325 aaa = aa[:]
1326 matchfn = cmdutil.match(repo, pats, opts)
1326 matchfn = cmdutil.match(repo, pats, opts)
1327 # in short mode, we only diff the files included in the
1327 # in short mode, we only diff the files included in the
1328 # patch already plus specified files
1328 # patch already plus specified files
1329 if opts.get('short'):
1329 if opts.get('short'):
1330 # if amending a patch, we start with existing
1330 # if amending a patch, we start with existing
1331 # files plus specified files - unfiltered
1331 # files plus specified files - unfiltered
1332 match = cmdutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1332 match = cmdutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1333 # filter with inc/exl options
1333 # filter with inc/exl options
1334 matchfn = cmdutil.match(repo, opts=opts)
1334 matchfn = cmdutil.match(repo, opts=opts)
1335 else:
1335 else:
1336 match = cmdutil.matchall(repo)
1336 match = cmdutil.matchall(repo)
1337 m, a, r, d = repo.status(match=match)[:4]
1337 m, a, r, d = repo.status(match=match)[:4]
1338 mm = set(mm)
1338 mm = set(mm)
1339 aa = set(aa)
1339 aa = set(aa)
1340 dd = set(dd)
1340 dd = set(dd)
1341
1341
1342 # we might end up with files that were added between
1342 # we might end up with files that were added between
1343 # qtip and the dirstate parent, but then changed in the
1343 # qtip and the dirstate parent, but then changed in the
1344 # local dirstate. in this case, we want them to only
1344 # local dirstate. in this case, we want them to only
1345 # show up in the added section
1345 # show up in the added section
1346 for x in m:
1346 for x in m:
1347 if x not in aa:
1347 if x not in aa:
1348 mm.add(x)
1348 mm.add(x)
1349 # we might end up with files added by the local dirstate that
1349 # we might end up with files added by the local dirstate that
1350 # were deleted by the patch. In this case, they should only
1350 # were deleted by the patch. In this case, they should only
1351 # show up in the changed section.
1351 # show up in the changed section.
1352 for x in a:
1352 for x in a:
1353 if x in dd:
1353 if x in dd:
1354 dd.remove(x)
1354 dd.remove(x)
1355 mm.add(x)
1355 mm.add(x)
1356 else:
1356 else:
1357 aa.add(x)
1357 aa.add(x)
1358 # make sure any files deleted in the local dirstate
1358 # make sure any files deleted in the local dirstate
1359 # are not in the add or change column of the patch
1359 # are not in the add or change column of the patch
1360 forget = []
1360 forget = []
1361 for x in d + r:
1361 for x in d + r:
1362 if x in aa:
1362 if x in aa:
1363 aa.remove(x)
1363 aa.remove(x)
1364 forget.append(x)
1364 forget.append(x)
1365 continue
1365 continue
1366 else:
1366 else:
1367 mm.discard(x)
1367 mm.discard(x)
1368 dd.add(x)
1368 dd.add(x)
1369
1369
1370 m = list(mm)
1370 m = list(mm)
1371 r = list(dd)
1371 r = list(dd)
1372 a = list(aa)
1372 a = list(aa)
1373 c = [filter(matchfn, l) for l in (m, a, r)]
1373 c = [filter(matchfn, l) for l in (m, a, r)]
1374 match = cmdutil.matchfiles(repo, set(c[0] + c[1] + c[2] + inclsubs))
1374 match = cmdutil.matchfiles(repo, set(c[0] + c[1] + c[2] + inclsubs))
1375 chunks = patch.diff(repo, patchparent, match=match,
1375 chunks = patch.diff(repo, patchparent, match=match,
1376 changes=c, opts=diffopts)
1376 changes=c, opts=diffopts)
1377 for chunk in chunks:
1377 for chunk in chunks:
1378 patchf.write(chunk)
1378 patchf.write(chunk)
1379
1379
1380 try:
1380 try:
1381 if diffopts.git or diffopts.upgrade:
1381 if diffopts.git or diffopts.upgrade:
1382 copies = {}
1382 copies = {}
1383 for dst in a:
1383 for dst in a:
1384 src = repo.dirstate.copied(dst)
1384 src = repo.dirstate.copied(dst)
1385 # during qfold, the source file for copies may
1385 # during qfold, the source file for copies may
1386 # be removed. Treat this as a simple add.
1386 # be removed. Treat this as a simple add.
1387 if src is not None and src in repo.dirstate:
1387 if src is not None and src in repo.dirstate:
1388 copies.setdefault(src, []).append(dst)
1388 copies.setdefault(src, []).append(dst)
1389 repo.dirstate.add(dst)
1389 repo.dirstate.add(dst)
1390 # remember the copies between patchparent and qtip
1390 # remember the copies between patchparent and qtip
1391 for dst in aaa:
1391 for dst in aaa:
1392 f = repo.file(dst)
1392 f = repo.file(dst)
1393 src = f.renamed(man[dst])
1393 src = f.renamed(man[dst])
1394 if src:
1394 if src:
1395 copies.setdefault(src[0], []).extend(
1395 copies.setdefault(src[0], []).extend(
1396 copies.get(dst, []))
1396 copies.get(dst, []))
1397 if dst in a:
1397 if dst in a:
1398 copies[src[0]].append(dst)
1398 copies[src[0]].append(dst)
1399 # we can't copy a file created by the patch itself
1399 # we can't copy a file created by the patch itself
1400 if dst in copies:
1400 if dst in copies:
1401 del copies[dst]
1401 del copies[dst]
1402 for src, dsts in copies.iteritems():
1402 for src, dsts in copies.iteritems():
1403 for dst in dsts:
1403 for dst in dsts:
1404 repo.dirstate.copy(src, dst)
1404 repo.dirstate.copy(src, dst)
1405 else:
1405 else:
1406 for dst in a:
1406 for dst in a:
1407 repo.dirstate.add(dst)
1407 repo.dirstate.add(dst)
1408 # Drop useless copy information
1408 # Drop useless copy information
1409 for f in list(repo.dirstate.copies()):
1409 for f in list(repo.dirstate.copies()):
1410 repo.dirstate.copy(None, f)
1410 repo.dirstate.copy(None, f)
1411 for f in r:
1411 for f in r:
1412 repo.dirstate.remove(f)
1412 repo.dirstate.remove(f)
1413 # if the patch excludes a modified file, mark that
1413 # if the patch excludes a modified file, mark that
1414 # file with mtime=0 so status can see it.
1414 # file with mtime=0 so status can see it.
1415 mm = []
1415 mm = []
1416 for i in xrange(len(m)-1, -1, -1):
1416 for i in xrange(len(m)-1, -1, -1):
1417 if not matchfn(m[i]):
1417 if not matchfn(m[i]):
1418 mm.append(m[i])
1418 mm.append(m[i])
1419 del m[i]
1419 del m[i]
1420 for f in m:
1420 for f in m:
1421 repo.dirstate.normal(f)
1421 repo.dirstate.normal(f)
1422 for f in mm:
1422 for f in mm:
1423 repo.dirstate.normallookup(f)
1423 repo.dirstate.normallookup(f)
1424 for f in forget:
1424 for f in forget:
1425 repo.dirstate.forget(f)
1425 repo.dirstate.forget(f)
1426
1426
1427 if not msg:
1427 if not msg:
1428 if not ph.message:
1428 if not ph.message:
1429 message = "[mq]: %s\n" % patchfn
1429 message = "[mq]: %s\n" % patchfn
1430 else:
1430 else:
1431 message = "\n".join(ph.message)
1431 message = "\n".join(ph.message)
1432 else:
1432 else:
1433 message = msg
1433 message = msg
1434
1434
1435 user = ph.user or changes[1]
1435 user = ph.user or changes[1]
1436
1436
1437 # assumes strip can roll itself back if interrupted
1437 # assumes strip can roll itself back if interrupted
1438 repo.dirstate.setparents(*cparents)
1438 repo.dirstate.setparents(*cparents)
1439 self.applied.pop()
1439 self.applied.pop()
1440 self.applied_dirty = 1
1440 self.applied_dirty = 1
1441 self.strip(repo, [top], update=False,
1441 self.strip(repo, [top], update=False,
1442 backup='strip')
1442 backup='strip')
1443 except:
1443 except:
1444 repo.dirstate.invalidate()
1444 repo.dirstate.invalidate()
1445 raise
1445 raise
1446
1446
1447 try:
1447 try:
1448 # might be nice to attempt to roll back strip after this
1448 # might be nice to attempt to roll back strip after this
1449 patchf.rename()
1449 patchf.rename()
1450 n = repo.commit(message, user, ph.date, match=match,
1450 n = repo.commit(message, user, ph.date, match=match,
1451 force=True)
1451 force=True)
1452 self.applied.append(statusentry(n, patchfn))
1452 self.applied.append(statusentry(n, patchfn))
1453 except:
1453 except:
1454 ctx = repo[cparents[0]]
1454 ctx = repo[cparents[0]]
1455 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1455 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1456 self.save_dirty()
1456 self.save_dirty()
1457 self.ui.warn(_('refresh interrupted while patch was popped! '
1457 self.ui.warn(_('refresh interrupted while patch was popped! '
1458 '(revert --all, qpush to recover)\n'))
1458 '(revert --all, qpush to recover)\n'))
1459 raise
1459 raise
1460 finally:
1460 finally:
1461 wlock.release()
1461 wlock.release()
1462 self.removeundo(repo)
1462 self.removeundo(repo)
1463
1463
1464 def init(self, repo, create=False):
1464 def init(self, repo, create=False):
1465 if not create and os.path.isdir(self.path):
1465 if not create and os.path.isdir(self.path):
1466 raise util.Abort(_("patch queue directory already exists"))
1466 raise util.Abort(_("patch queue directory already exists"))
1467 try:
1467 try:
1468 os.mkdir(self.path)
1468 os.mkdir(self.path)
1469 except OSError, inst:
1469 except OSError, inst:
1470 if inst.errno != errno.EEXIST or not create:
1470 if inst.errno != errno.EEXIST or not create:
1471 raise
1471 raise
1472 if create:
1472 if create:
1473 return self.qrepo(create=True)
1473 return self.qrepo(create=True)
1474
1474
1475 def unapplied(self, repo, patch=None):
1475 def unapplied(self, repo, patch=None):
1476 if patch and patch not in self.series:
1476 if patch and patch not in self.series:
1477 raise util.Abort(_("patch %s is not in series file") % patch)
1477 raise util.Abort(_("patch %s is not in series file") % patch)
1478 if not patch:
1478 if not patch:
1479 start = self.series_end()
1479 start = self.series_end()
1480 else:
1480 else:
1481 start = self.series.index(patch) + 1
1481 start = self.series.index(patch) + 1
1482 unapplied = []
1482 unapplied = []
1483 for i in xrange(start, len(self.series)):
1483 for i in xrange(start, len(self.series)):
1484 pushable, reason = self.pushable(i)
1484 pushable, reason = self.pushable(i)
1485 if pushable:
1485 if pushable:
1486 unapplied.append((i, self.series[i]))
1486 unapplied.append((i, self.series[i]))
1487 self.explain_pushable(i)
1487 self.explain_pushable(i)
1488 return unapplied
1488 return unapplied
1489
1489
1490 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1490 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1491 summary=False):
1491 summary=False):
1492 def displayname(pfx, patchname, state):
1492 def displayname(pfx, patchname, state):
1493 if pfx:
1493 if pfx:
1494 self.ui.write(pfx)
1494 self.ui.write(pfx)
1495 if summary:
1495 if summary:
1496 ph = patchheader(self.join(patchname), self.plainmode)
1496 ph = patchheader(self.join(patchname), self.plainmode)
1497 msg = ph.message and ph.message[0] or ''
1497 msg = ph.message and ph.message[0] or ''
1498 if self.ui.formatted():
1498 if self.ui.formatted():
1499 width = self.ui.termwidth() - len(pfx) - len(patchname) - 2
1499 width = self.ui.termwidth() - len(pfx) - len(patchname) - 2
1500 if width > 0:
1500 if width > 0:
1501 msg = util.ellipsis(msg, width)
1501 msg = util.ellipsis(msg, width)
1502 else:
1502 else:
1503 msg = ''
1503 msg = ''
1504 self.ui.write(patchname, label='qseries.' + state)
1504 self.ui.write(patchname, label='qseries.' + state)
1505 self.ui.write(': ')
1505 self.ui.write(': ')
1506 self.ui.write(msg, label='qseries.message.' + state)
1506 self.ui.write(msg, label='qseries.message.' + state)
1507 else:
1507 else:
1508 self.ui.write(patchname, label='qseries.' + state)
1508 self.ui.write(patchname, label='qseries.' + state)
1509 self.ui.write('\n')
1509 self.ui.write('\n')
1510
1510
1511 applied = set([p.name for p in self.applied])
1511 applied = set([p.name for p in self.applied])
1512 if length is None:
1512 if length is None:
1513 length = len(self.series) - start
1513 length = len(self.series) - start
1514 if not missing:
1514 if not missing:
1515 if self.ui.verbose:
1515 if self.ui.verbose:
1516 idxwidth = len(str(start + length - 1))
1516 idxwidth = len(str(start + length - 1))
1517 for i in xrange(start, start + length):
1517 for i in xrange(start, start + length):
1518 patch = self.series[i]
1518 patch = self.series[i]
1519 if patch in applied:
1519 if patch in applied:
1520 char, state = 'A', 'applied'
1520 char, state = 'A', 'applied'
1521 elif self.pushable(i)[0]:
1521 elif self.pushable(i)[0]:
1522 char, state = 'U', 'unapplied'
1522 char, state = 'U', 'unapplied'
1523 else:
1523 else:
1524 char, state = 'G', 'guarded'
1524 char, state = 'G', 'guarded'
1525 pfx = ''
1525 pfx = ''
1526 if self.ui.verbose:
1526 if self.ui.verbose:
1527 pfx = '%*d %s ' % (idxwidth, i, char)
1527 pfx = '%*d %s ' % (idxwidth, i, char)
1528 elif status and status != char:
1528 elif status and status != char:
1529 continue
1529 continue
1530 displayname(pfx, patch, state)
1530 displayname(pfx, patch, state)
1531 else:
1531 else:
1532 msng_list = []
1532 msng_list = []
1533 for root, dirs, files in os.walk(self.path):
1533 for root, dirs, files in os.walk(self.path):
1534 d = root[len(self.path) + 1:]
1534 d = root[len(self.path) + 1:]
1535 for f in files:
1535 for f in files:
1536 fl = os.path.join(d, f)
1536 fl = os.path.join(d, f)
1537 if (fl not in self.series and
1537 if (fl not in self.series and
1538 fl not in (self.status_path, self.series_path,
1538 fl not in (self.status_path, self.series_path,
1539 self.guards_path)
1539 self.guards_path)
1540 and not fl.startswith('.')):
1540 and not fl.startswith('.')):
1541 msng_list.append(fl)
1541 msng_list.append(fl)
1542 for x in sorted(msng_list):
1542 for x in sorted(msng_list):
1543 pfx = self.ui.verbose and ('D ') or ''
1543 pfx = self.ui.verbose and ('D ') or ''
1544 displayname(pfx, x, 'missing')
1544 displayname(pfx, x, 'missing')
1545
1545
1546 def issaveline(self, l):
1546 def issaveline(self, l):
1547 if l.name == '.hg.patches.save.line':
1547 if l.name == '.hg.patches.save.line':
1548 return True
1548 return True
1549
1549
1550 def qrepo(self, create=False):
1550 def qrepo(self, create=False):
1551 ui = self.ui.copy()
1551 ui = self.ui.copy()
1552 ui.setconfig('paths', 'default', '', overlay=False)
1552 ui.setconfig('paths', 'default', '', overlay=False)
1553 ui.setconfig('paths', 'default-push', '', overlay=False)
1553 ui.setconfig('paths', 'default-push', '', overlay=False)
1554 if create or os.path.isdir(self.join(".hg")):
1554 if create or os.path.isdir(self.join(".hg")):
1555 return hg.repository(ui, path=self.path, create=create)
1555 return hg.repository(ui, path=self.path, create=create)
1556
1556
1557 def restore(self, repo, rev, delete=None, qupdate=None):
1557 def restore(self, repo, rev, delete=None, qupdate=None):
1558 desc = repo[rev].description().strip()
1558 desc = repo[rev].description().strip()
1559 lines = desc.splitlines()
1559 lines = desc.splitlines()
1560 i = 0
1560 i = 0
1561 datastart = None
1561 datastart = None
1562 series = []
1562 series = []
1563 applied = []
1563 applied = []
1564 qpp = None
1564 qpp = None
1565 for i, line in enumerate(lines):
1565 for i, line in enumerate(lines):
1566 if line == 'Patch Data:':
1566 if line == 'Patch Data:':
1567 datastart = i + 1
1567 datastart = i + 1
1568 elif line.startswith('Dirstate:'):
1568 elif line.startswith('Dirstate:'):
1569 l = line.rstrip()
1569 l = line.rstrip()
1570 l = l[10:].split(' ')
1570 l = l[10:].split(' ')
1571 qpp = [bin(x) for x in l]
1571 qpp = [bin(x) for x in l]
1572 elif datastart is not None:
1572 elif datastart is not None:
1573 l = line.rstrip()
1573 l = line.rstrip()
1574 n, name = l.split(':', 1)
1574 n, name = l.split(':', 1)
1575 if n:
1575 if n:
1576 applied.append(statusentry(bin(n), name))
1576 applied.append(statusentry(bin(n), name))
1577 else:
1577 else:
1578 series.append(l)
1578 series.append(l)
1579 if datastart is None:
1579 if datastart is None:
1580 self.ui.warn(_("No saved patch data found\n"))
1580 self.ui.warn(_("No saved patch data found\n"))
1581 return 1
1581 return 1
1582 self.ui.warn(_("restoring status: %s\n") % lines[0])
1582 self.ui.warn(_("restoring status: %s\n") % lines[0])
1583 self.full_series = series
1583 self.full_series = series
1584 self.applied = applied
1584 self.applied = applied
1585 self.parse_series()
1585 self.parse_series()
1586 self.series_dirty = 1
1586 self.series_dirty = 1
1587 self.applied_dirty = 1
1587 self.applied_dirty = 1
1588 heads = repo.changelog.heads()
1588 heads = repo.changelog.heads()
1589 if delete:
1589 if delete:
1590 if rev not in heads:
1590 if rev not in heads:
1591 self.ui.warn(_("save entry has children, leaving it alone\n"))
1591 self.ui.warn(_("save entry has children, leaving it alone\n"))
1592 else:
1592 else:
1593 self.ui.warn(_("removing save entry %s\n") % short(rev))
1593 self.ui.warn(_("removing save entry %s\n") % short(rev))
1594 pp = repo.dirstate.parents()
1594 pp = repo.dirstate.parents()
1595 if rev in pp:
1595 if rev in pp:
1596 update = True
1596 update = True
1597 else:
1597 else:
1598 update = False
1598 update = False
1599 self.strip(repo, [rev], update=update, backup='strip')
1599 self.strip(repo, [rev], update=update, backup='strip')
1600 if qpp:
1600 if qpp:
1601 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1601 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1602 (short(qpp[0]), short(qpp[1])))
1602 (short(qpp[0]), short(qpp[1])))
1603 if qupdate:
1603 if qupdate:
1604 self.ui.status(_("updating queue directory\n"))
1604 self.ui.status(_("updating queue directory\n"))
1605 r = self.qrepo()
1605 r = self.qrepo()
1606 if not r:
1606 if not r:
1607 self.ui.warn(_("Unable to load queue repository\n"))
1607 self.ui.warn(_("Unable to load queue repository\n"))
1608 return 1
1608 return 1
1609 hg.clean(r, qpp[0])
1609 hg.clean(r, qpp[0])
1610
1610
1611 def save(self, repo, msg=None):
1611 def save(self, repo, msg=None):
1612 if not self.applied:
1612 if not self.applied:
1613 self.ui.warn(_("save: no patches applied, exiting\n"))
1613 self.ui.warn(_("save: no patches applied, exiting\n"))
1614 return 1
1614 return 1
1615 if self.issaveline(self.applied[-1]):
1615 if self.issaveline(self.applied[-1]):
1616 self.ui.warn(_("status is already saved\n"))
1616 self.ui.warn(_("status is already saved\n"))
1617 return 1
1617 return 1
1618
1618
1619 if not msg:
1619 if not msg:
1620 msg = _("hg patches saved state")
1620 msg = _("hg patches saved state")
1621 else:
1621 else:
1622 msg = "hg patches: " + msg.rstrip('\r\n')
1622 msg = "hg patches: " + msg.rstrip('\r\n')
1623 r = self.qrepo()
1623 r = self.qrepo()
1624 if r:
1624 if r:
1625 pp = r.dirstate.parents()
1625 pp = r.dirstate.parents()
1626 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
1626 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
1627 msg += "\n\nPatch Data:\n"
1627 msg += "\n\nPatch Data:\n"
1628 msg += ''.join('%s\n' % x for x in self.applied)
1628 msg += ''.join('%s\n' % x for x in self.applied)
1629 msg += ''.join(':%s\n' % x for x in self.full_series)
1629 msg += ''.join(':%s\n' % x for x in self.full_series)
1630 n = repo.commit(msg, force=True)
1630 n = repo.commit(msg, force=True)
1631 if not n:
1631 if not n:
1632 self.ui.warn(_("repo commit failed\n"))
1632 self.ui.warn(_("repo commit failed\n"))
1633 return 1
1633 return 1
1634 self.applied.append(statusentry(n, '.hg.patches.save.line'))
1634 self.applied.append(statusentry(n, '.hg.patches.save.line'))
1635 self.applied_dirty = 1
1635 self.applied_dirty = 1
1636 self.removeundo(repo)
1636 self.removeundo(repo)
1637
1637
1638 def full_series_end(self):
1638 def full_series_end(self):
1639 if self.applied:
1639 if self.applied:
1640 p = self.applied[-1].name
1640 p = self.applied[-1].name
1641 end = self.find_series(p)
1641 end = self.find_series(p)
1642 if end is None:
1642 if end is None:
1643 return len(self.full_series)
1643 return len(self.full_series)
1644 return end + 1
1644 return end + 1
1645 return 0
1645 return 0
1646
1646
1647 def series_end(self, all_patches=False):
1647 def series_end(self, all_patches=False):
1648 """If all_patches is False, return the index of the next pushable patch
1648 """If all_patches is False, return the index of the next pushable patch
1649 in the series, or the series length. If all_patches is True, return the
1649 in the series, or the series length. If all_patches is True, return the
1650 index of the first patch past the last applied one.
1650 index of the first patch past the last applied one.
1651 """
1651 """
1652 end = 0
1652 end = 0
1653 def next(start):
1653 def next(start):
1654 if all_patches or start >= len(self.series):
1654 if all_patches or start >= len(self.series):
1655 return start
1655 return start
1656 for i in xrange(start, len(self.series)):
1656 for i in xrange(start, len(self.series)):
1657 p, reason = self.pushable(i)
1657 p, reason = self.pushable(i)
1658 if p:
1658 if p:
1659 break
1659 break
1660 self.explain_pushable(i)
1660 self.explain_pushable(i)
1661 return i
1661 return i
1662 if self.applied:
1662 if self.applied:
1663 p = self.applied[-1].name
1663 p = self.applied[-1].name
1664 try:
1664 try:
1665 end = self.series.index(p)
1665 end = self.series.index(p)
1666 except ValueError:
1666 except ValueError:
1667 return 0
1667 return 0
1668 return next(end + 1)
1668 return next(end + 1)
1669 return next(end)
1669 return next(end)
1670
1670
1671 def appliedname(self, index):
1671 def appliedname(self, index):
1672 pname = self.applied[index].name
1672 pname = self.applied[index].name
1673 if not self.ui.verbose:
1673 if not self.ui.verbose:
1674 p = pname
1674 p = pname
1675 else:
1675 else:
1676 p = str(self.series.index(pname)) + " " + pname
1676 p = str(self.series.index(pname)) + " " + pname
1677 return p
1677 return p
1678
1678
1679 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1679 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1680 force=None, git=False):
1680 force=None, git=False):
1681 def checkseries(patchname):
1681 def checkseries(patchname):
1682 if patchname in self.series:
1682 if patchname in self.series:
1683 raise util.Abort(_('patch %s is already in the series file')
1683 raise util.Abort(_('patch %s is already in the series file')
1684 % patchname)
1684 % patchname)
1685 def checkfile(patchname):
1685 def checkfile(patchname):
1686 if not force and os.path.exists(self.join(patchname)):
1686 if not force and os.path.exists(self.join(patchname)):
1687 raise util.Abort(_('patch "%s" already exists')
1687 raise util.Abort(_('patch "%s" already exists')
1688 % patchname)
1688 % patchname)
1689
1689
1690 if rev:
1690 if rev:
1691 if files:
1691 if files:
1692 raise util.Abort(_('option "-r" not valid when importing '
1692 raise util.Abort(_('option "-r" not valid when importing '
1693 'files'))
1693 'files'))
1694 rev = cmdutil.revrange(repo, rev)
1694 rev = cmdutil.revrange(repo, rev)
1695 rev.sort(reverse=True)
1695 rev.sort(reverse=True)
1696 if (len(files) > 1 or len(rev) > 1) and patchname:
1696 if (len(files) > 1 or len(rev) > 1) and patchname:
1697 raise util.Abort(_('option "-n" not valid when importing multiple '
1697 raise util.Abort(_('option "-n" not valid when importing multiple '
1698 'patches'))
1698 'patches'))
1699 if rev:
1699 if rev:
1700 # If mq patches are applied, we can only import revisions
1700 # If mq patches are applied, we can only import revisions
1701 # that form a linear path to qbase.
1701 # that form a linear path to qbase.
1702 # Otherwise, they should form a linear path to a head.
1702 # Otherwise, they should form a linear path to a head.
1703 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1703 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1704 if len(heads) > 1:
1704 if len(heads) > 1:
1705 raise util.Abort(_('revision %d is the root of more than one '
1705 raise util.Abort(_('revision %d is the root of more than one '
1706 'branch') % rev[-1])
1706 'branch') % rev[-1])
1707 if self.applied:
1707 if self.applied:
1708 base = repo.changelog.node(rev[0])
1708 base = repo.changelog.node(rev[0])
1709 if base in [n.node for n in self.applied]:
1709 if base in [n.node for n in self.applied]:
1710 raise util.Abort(_('revision %d is already managed')
1710 raise util.Abort(_('revision %d is already managed')
1711 % rev[0])
1711 % rev[0])
1712 if heads != [self.applied[-1].node]:
1712 if heads != [self.applied[-1].node]:
1713 raise util.Abort(_('revision %d is not the parent of '
1713 raise util.Abort(_('revision %d is not the parent of '
1714 'the queue') % rev[0])
1714 'the queue') % rev[0])
1715 base = repo.changelog.rev(self.applied[0].node)
1715 base = repo.changelog.rev(self.applied[0].node)
1716 lastparent = repo.changelog.parentrevs(base)[0]
1716 lastparent = repo.changelog.parentrevs(base)[0]
1717 else:
1717 else:
1718 if heads != [repo.changelog.node(rev[0])]:
1718 if heads != [repo.changelog.node(rev[0])]:
1719 raise util.Abort(_('revision %d has unmanaged children')
1719 raise util.Abort(_('revision %d has unmanaged children')
1720 % rev[0])
1720 % rev[0])
1721 lastparent = None
1721 lastparent = None
1722
1722
1723 diffopts = self.diffopts({'git': git})
1723 diffopts = self.diffopts({'git': git})
1724 for r in rev:
1724 for r in rev:
1725 p1, p2 = repo.changelog.parentrevs(r)
1725 p1, p2 = repo.changelog.parentrevs(r)
1726 n = repo.changelog.node(r)
1726 n = repo.changelog.node(r)
1727 if p2 != nullrev:
1727 if p2 != nullrev:
1728 raise util.Abort(_('cannot import merge revision %d') % r)
1728 raise util.Abort(_('cannot import merge revision %d') % r)
1729 if lastparent and lastparent != r:
1729 if lastparent and lastparent != r:
1730 raise util.Abort(_('revision %d is not the parent of %d')
1730 raise util.Abort(_('revision %d is not the parent of %d')
1731 % (r, lastparent))
1731 % (r, lastparent))
1732 lastparent = p1
1732 lastparent = p1
1733
1733
1734 if not patchname:
1734 if not patchname:
1735 patchname = normname('%d.diff' % r)
1735 patchname = normname('%d.diff' % r)
1736 self.check_reserved_name(patchname)
1736 self.check_reserved_name(patchname)
1737 checkseries(patchname)
1737 checkseries(patchname)
1738 checkfile(patchname)
1738 checkfile(patchname)
1739 self.full_series.insert(0, patchname)
1739 self.full_series.insert(0, patchname)
1740
1740
1741 patchf = self.opener(patchname, "w")
1741 patchf = self.opener(patchname, "w")
1742 cmdutil.export(repo, [n], fp=patchf, opts=diffopts)
1742 cmdutil.export(repo, [n], fp=patchf, opts=diffopts)
1743 patchf.close()
1743 patchf.close()
1744
1744
1745 se = statusentry(n, patchname)
1745 se = statusentry(n, patchname)
1746 self.applied.insert(0, se)
1746 self.applied.insert(0, se)
1747
1747
1748 self.added.append(patchname)
1748 self.added.append(patchname)
1749 patchname = None
1749 patchname = None
1750 self.parse_series()
1750 self.parse_series()
1751 self.applied_dirty = 1
1751 self.applied_dirty = 1
1752 self.series_dirty = True
1752 self.series_dirty = True
1753
1753
1754 for i, filename in enumerate(files):
1754 for i, filename in enumerate(files):
1755 if existing:
1755 if existing:
1756 if filename == '-':
1756 if filename == '-':
1757 raise util.Abort(_('-e is incompatible with import from -'))
1757 raise util.Abort(_('-e is incompatible with import from -'))
1758 filename = normname(filename)
1758 filename = normname(filename)
1759 self.check_reserved_name(filename)
1759 self.check_reserved_name(filename)
1760 originpath = self.join(filename)
1760 originpath = self.join(filename)
1761 if not os.path.isfile(originpath):
1761 if not os.path.isfile(originpath):
1762 raise util.Abort(_("patch %s does not exist") % filename)
1762 raise util.Abort(_("patch %s does not exist") % filename)
1763
1763
1764 if patchname:
1764 if patchname:
1765 self.check_reserved_name(patchname)
1765 self.check_reserved_name(patchname)
1766 checkfile(patchname)
1766 checkfile(patchname)
1767
1767
1768 self.ui.write(_('renaming %s to %s\n')
1768 self.ui.write(_('renaming %s to %s\n')
1769 % (filename, patchname))
1769 % (filename, patchname))
1770 util.rename(originpath, self.join(patchname))
1770 util.rename(originpath, self.join(patchname))
1771 else:
1771 else:
1772 patchname = filename
1772 patchname = filename
1773
1773
1774 else:
1774 else:
1775 try:
1775 try:
1776 if filename == '-':
1776 if filename == '-':
1777 if not patchname:
1777 if not patchname:
1778 raise util.Abort(
1778 raise util.Abort(
1779 _('need --name to import a patch from -'))
1779 _('need --name to import a patch from -'))
1780 text = sys.stdin.read()
1780 text = sys.stdin.read()
1781 else:
1781 else:
1782 text = url.open(self.ui, filename).read()
1782 text = url.open(self.ui, filename).read()
1783 except (OSError, IOError):
1783 except (OSError, IOError):
1784 raise util.Abort(_("unable to read file %s") % filename)
1784 raise util.Abort(_("unable to read file %s") % filename)
1785 if not patchname:
1785 if not patchname:
1786 patchname = normname(os.path.basename(filename))
1786 patchname = normname(os.path.basename(filename))
1787 self.check_reserved_name(patchname)
1787 self.check_reserved_name(patchname)
1788 checkfile(patchname)
1788 checkfile(patchname)
1789 patchf = self.opener(patchname, "w")
1789 patchf = self.opener(patchname, "w")
1790 patchf.write(text)
1790 patchf.write(text)
1791 if not force:
1791 if not force:
1792 checkseries(patchname)
1792 checkseries(patchname)
1793 if patchname not in self.series:
1793 if patchname not in self.series:
1794 index = self.full_series_end() + i
1794 index = self.full_series_end() + i
1795 self.full_series[index:index] = [patchname]
1795 self.full_series[index:index] = [patchname]
1796 self.parse_series()
1796 self.parse_series()
1797 self.series_dirty = True
1797 self.series_dirty = True
1798 self.ui.warn(_("adding %s to series file\n") % patchname)
1798 self.ui.warn(_("adding %s to series file\n") % patchname)
1799 self.added.append(patchname)
1799 self.added.append(patchname)
1800 patchname = None
1800 patchname = None
1801
1801
1802 def delete(ui, repo, *patches, **opts):
1802 def delete(ui, repo, *patches, **opts):
1803 """remove patches from queue
1803 """remove patches from queue
1804
1804
1805 The patches must not be applied, and at least one patch is required. With
1805 The patches must not be applied, and at least one patch is required. With
1806 -k/--keep, the patch files are preserved in the patch directory.
1806 -k/--keep, the patch files are preserved in the patch directory.
1807
1807
1808 To stop managing a patch and move it into permanent history,
1808 To stop managing a patch and move it into permanent history,
1809 use the :hg:`qfinish` command."""
1809 use the :hg:`qfinish` command."""
1810 q = repo.mq
1810 q = repo.mq
1811 q.delete(repo, patches, opts)
1811 q.delete(repo, patches, opts)
1812 q.save_dirty()
1812 q.save_dirty()
1813 return 0
1813 return 0
1814
1814
1815 def applied(ui, repo, patch=None, **opts):
1815 def applied(ui, repo, patch=None, **opts):
1816 """print the patches already applied
1816 """print the patches already applied
1817
1817
1818 Returns 0 on success."""
1818 Returns 0 on success."""
1819
1819
1820 q = repo.mq
1820 q = repo.mq
1821
1821
1822 if patch:
1822 if patch:
1823 if patch not in q.series:
1823 if patch not in q.series:
1824 raise util.Abort(_("patch %s is not in series file") % patch)
1824 raise util.Abort(_("patch %s is not in series file") % patch)
1825 end = q.series.index(patch) + 1
1825 end = q.series.index(patch) + 1
1826 else:
1826 else:
1827 end = q.series_end(True)
1827 end = q.series_end(True)
1828
1828
1829 if opts.get('last') and not end:
1829 if opts.get('last') and not end:
1830 ui.write(_("no patches applied\n"))
1830 ui.write(_("no patches applied\n"))
1831 return 1
1831 return 1
1832 elif opts.get('last') and end == 1:
1832 elif opts.get('last') and end == 1:
1833 ui.write(_("only one patch applied\n"))
1833 ui.write(_("only one patch applied\n"))
1834 return 1
1834 return 1
1835 elif opts.get('last'):
1835 elif opts.get('last'):
1836 start = end - 2
1836 start = end - 2
1837 end = 1
1837 end = 1
1838 else:
1838 else:
1839 start = 0
1839 start = 0
1840
1840
1841 q.qseries(repo, length=end, start=start, status='A',
1841 q.qseries(repo, length=end, start=start, status='A',
1842 summary=opts.get('summary'))
1842 summary=opts.get('summary'))
1843
1843
1844
1844
1845 def unapplied(ui, repo, patch=None, **opts):
1845 def unapplied(ui, repo, patch=None, **opts):
1846 """print the patches not yet applied
1846 """print the patches not yet applied
1847
1847
1848 Returns 0 on success."""
1848 Returns 0 on success."""
1849
1849
1850 q = repo.mq
1850 q = repo.mq
1851 if patch:
1851 if patch:
1852 if patch not in q.series:
1852 if patch not in q.series:
1853 raise util.Abort(_("patch %s is not in series file") % patch)
1853 raise util.Abort(_("patch %s is not in series file") % patch)
1854 start = q.series.index(patch) + 1
1854 start = q.series.index(patch) + 1
1855 else:
1855 else:
1856 start = q.series_end(True)
1856 start = q.series_end(True)
1857
1857
1858 if start == len(q.series) and opts.get('first'):
1858 if start == len(q.series) and opts.get('first'):
1859 ui.write(_("all patches applied\n"))
1859 ui.write(_("all patches applied\n"))
1860 return 1
1860 return 1
1861
1861
1862 length = opts.get('first') and 1 or None
1862 length = opts.get('first') and 1 or None
1863 q.qseries(repo, start=start, length=length, status='U',
1863 q.qseries(repo, start=start, length=length, status='U',
1864 summary=opts.get('summary'))
1864 summary=opts.get('summary'))
1865
1865
1866 def qimport(ui, repo, *filename, **opts):
1866 def qimport(ui, repo, *filename, **opts):
1867 """import a patch
1867 """import a patch
1868
1868
1869 The patch is inserted into the series after the last applied
1869 The patch is inserted into the series after the last applied
1870 patch. If no patches have been applied, qimport prepends the patch
1870 patch. If no patches have been applied, qimport prepends the patch
1871 to the series.
1871 to the series.
1872
1872
1873 The patch will have the same name as its source file unless you
1873 The patch will have the same name as its source file unless you
1874 give it a new one with -n/--name.
1874 give it a new one with -n/--name.
1875
1875
1876 You can register an existing patch inside the patch directory with
1876 You can register an existing patch inside the patch directory with
1877 the -e/--existing flag.
1877 the -e/--existing flag.
1878
1878
1879 With -f/--force, an existing patch of the same name will be
1879 With -f/--force, an existing patch of the same name will be
1880 overwritten.
1880 overwritten.
1881
1881
1882 An existing changeset may be placed under mq control with -r/--rev
1882 An existing changeset may be placed under mq control with -r/--rev
1883 (e.g. qimport --rev tip -n patch will place tip under mq control).
1883 (e.g. qimport --rev tip -n patch will place tip under mq control).
1884 With -g/--git, patches imported with --rev will use the git diff
1884 With -g/--git, patches imported with --rev will use the git diff
1885 format. See the diffs help topic for information on why this is
1885 format. See the diffs help topic for information on why this is
1886 important for preserving rename/copy information and permission
1886 important for preserving rename/copy information and permission
1887 changes.
1887 changes.
1888
1888
1889 To import a patch from standard input, pass - as the patch file.
1889 To import a patch from standard input, pass - as the patch file.
1890 When importing from standard input, a patch name must be specified
1890 When importing from standard input, a patch name must be specified
1891 using the --name flag.
1891 using the --name flag.
1892
1892
1893 To import an existing patch while renaming it::
1893 To import an existing patch while renaming it::
1894
1894
1895 hg qimport -e existing-patch -n new-name
1895 hg qimport -e existing-patch -n new-name
1896
1896
1897 Returns 0 if import succeeded.
1897 Returns 0 if import succeeded.
1898 """
1898 """
1899 q = repo.mq
1899 q = repo.mq
1900 try:
1900 try:
1901 q.qimport(repo, filename, patchname=opts.get('name'),
1901 q.qimport(repo, filename, patchname=opts.get('name'),
1902 existing=opts.get('existing'), force=opts.get('force'),
1902 existing=opts.get('existing'), force=opts.get('force'),
1903 rev=opts.get('rev'), git=opts.get('git'))
1903 rev=opts.get('rev'), git=opts.get('git'))
1904 finally:
1904 finally:
1905 q.save_dirty()
1905 q.save_dirty()
1906
1906
1907 if opts.get('push') and not opts.get('rev'):
1907 if opts.get('push') and not opts.get('rev'):
1908 return q.push(repo, None)
1908 return q.push(repo, None)
1909 return 0
1909 return 0
1910
1910
1911 def qinit(ui, repo, create):
1911 def qinit(ui, repo, create):
1912 """initialize a new queue repository
1912 """initialize a new queue repository
1913
1913
1914 This command also creates a series file for ordering patches, and
1914 This command also creates a series file for ordering patches, and
1915 an mq-specific .hgignore file in the queue repository, to exclude
1915 an mq-specific .hgignore file in the queue repository, to exclude
1916 the status and guards files (these contain mostly transient state).
1916 the status and guards files (these contain mostly transient state).
1917
1917
1918 Returns 0 if initialization succeeded."""
1918 Returns 0 if initialization succeeded."""
1919 q = repo.mq
1919 q = repo.mq
1920 r = q.init(repo, create)
1920 r = q.init(repo, create)
1921 q.save_dirty()
1921 q.save_dirty()
1922 if r:
1922 if r:
1923 if not os.path.exists(r.wjoin('.hgignore')):
1923 if not os.path.exists(r.wjoin('.hgignore')):
1924 fp = r.wopener('.hgignore', 'w')
1924 fp = r.wopener('.hgignore', 'w')
1925 fp.write('^\\.hg\n')
1925 fp.write('^\\.hg\n')
1926 fp.write('^\\.mq\n')
1926 fp.write('^\\.mq\n')
1927 fp.write('syntax: glob\n')
1927 fp.write('syntax: glob\n')
1928 fp.write('status\n')
1928 fp.write('status\n')
1929 fp.write('guards\n')
1929 fp.write('guards\n')
1930 fp.close()
1930 fp.close()
1931 if not os.path.exists(r.wjoin('series')):
1931 if not os.path.exists(r.wjoin('series')):
1932 r.wopener('series', 'w').close()
1932 r.wopener('series', 'w').close()
1933 r[None].add(['.hgignore', 'series'])
1933 r[None].add(['.hgignore', 'series'])
1934 commands.add(ui, r)
1934 commands.add(ui, r)
1935 return 0
1935 return 0
1936
1936
1937 def init(ui, repo, **opts):
1937 def init(ui, repo, **opts):
1938 """init a new queue repository (DEPRECATED)
1938 """init a new queue repository (DEPRECATED)
1939
1939
1940 The queue repository is unversioned by default. If
1940 The queue repository is unversioned by default. If
1941 -c/--create-repo is specified, qinit will create a separate nested
1941 -c/--create-repo is specified, qinit will create a separate nested
1942 repository for patches (qinit -c may also be run later to convert
1942 repository for patches (qinit -c may also be run later to convert
1943 an unversioned patch repository into a versioned one). You can use
1943 an unversioned patch repository into a versioned one). You can use
1944 qcommit to commit changes to this queue repository.
1944 qcommit to commit changes to this queue repository.
1945
1945
1946 This command is deprecated. Without -c, it's implied by other relevant
1946 This command is deprecated. Without -c, it's implied by other relevant
1947 commands. With -c, use :hg:`init --mq` instead."""
1947 commands. With -c, use :hg:`init --mq` instead."""
1948 return qinit(ui, repo, create=opts.get('create_repo'))
1948 return qinit(ui, repo, create=opts.get('create_repo'))
1949
1949
1950 def clone(ui, source, dest=None, **opts):
1950 def clone(ui, source, dest=None, **opts):
1951 '''clone main and patch repository at same time
1951 '''clone main and patch repository at same time
1952
1952
1953 If source is local, destination will have no patches applied. If
1953 If source is local, destination will have no patches applied. If
1954 source is remote, this command can not check if patches are
1954 source is remote, this command can not check if patches are
1955 applied in source, so cannot guarantee that patches are not
1955 applied in source, so cannot guarantee that patches are not
1956 applied in destination. If you clone remote repository, be sure
1956 applied in destination. If you clone remote repository, be sure
1957 before that it has no patches applied.
1957 before that it has no patches applied.
1958
1958
1959 Source patch repository is looked for in <src>/.hg/patches by
1959 Source patch repository is looked for in <src>/.hg/patches by
1960 default. Use -p <url> to change.
1960 default. Use -p <url> to change.
1961
1961
1962 The patch directory must be a nested Mercurial repository, as
1962 The patch directory must be a nested Mercurial repository, as
1963 would be created by :hg:`init --mq`.
1963 would be created by :hg:`init --mq`.
1964
1964
1965 Return 0 on success.
1965 Return 0 on success.
1966 '''
1966 '''
1967 def patchdir(repo):
1967 def patchdir(repo):
1968 url = repo.url()
1968 url = repo.url()
1969 if url.endswith('/'):
1969 if url.endswith('/'):
1970 url = url[:-1]
1970 url = url[:-1]
1971 return url + '/.hg/patches'
1971 return url + '/.hg/patches'
1972 if dest is None:
1972 if dest is None:
1973 dest = hg.defaultdest(source)
1973 dest = hg.defaultdest(source)
1974 sr = hg.repository(hg.remoteui(ui, opts), ui.expandpath(source))
1974 sr = hg.repository(hg.remoteui(ui, opts), ui.expandpath(source))
1975 if opts.get('patches'):
1975 if opts.get('patches'):
1976 patchespath = ui.expandpath(opts.get('patches'))
1976 patchespath = ui.expandpath(opts.get('patches'))
1977 else:
1977 else:
1978 patchespath = patchdir(sr)
1978 patchespath = patchdir(sr)
1979 try:
1979 try:
1980 hg.repository(ui, patchespath)
1980 hg.repository(ui, patchespath)
1981 except error.RepoError:
1981 except error.RepoError:
1982 raise util.Abort(_('versioned patch repository not found'
1982 raise util.Abort(_('versioned patch repository not found'
1983 ' (see init --mq)'))
1983 ' (see init --mq)'))
1984 qbase, destrev = None, None
1984 qbase, destrev = None, None
1985 if sr.local():
1985 if sr.local():
1986 if sr.mq.applied:
1986 if sr.mq.applied:
1987 qbase = sr.mq.applied[0].node
1987 qbase = sr.mq.applied[0].node
1988 if not hg.islocal(dest):
1988 if not hg.islocal(dest):
1989 heads = set(sr.heads())
1989 heads = set(sr.heads())
1990 destrev = list(heads.difference(sr.heads(qbase)))
1990 destrev = list(heads.difference(sr.heads(qbase)))
1991 destrev.append(sr.changelog.parents(qbase)[0])
1991 destrev.append(sr.changelog.parents(qbase)[0])
1992 elif sr.capable('lookup'):
1992 elif sr.capable('lookup'):
1993 try:
1993 try:
1994 qbase = sr.lookup('qbase')
1994 qbase = sr.lookup('qbase')
1995 except error.RepoError:
1995 except error.RepoError:
1996 pass
1996 pass
1997 ui.note(_('cloning main repository\n'))
1997 ui.note(_('cloning main repository\n'))
1998 sr, dr = hg.clone(ui, sr.url(), dest,
1998 sr, dr = hg.clone(ui, sr.url(), dest,
1999 pull=opts.get('pull'),
1999 pull=opts.get('pull'),
2000 rev=destrev,
2000 rev=destrev,
2001 update=False,
2001 update=False,
2002 stream=opts.get('uncompressed'))
2002 stream=opts.get('uncompressed'))
2003 ui.note(_('cloning patch repository\n'))
2003 ui.note(_('cloning patch repository\n'))
2004 hg.clone(ui, opts.get('patches') or patchdir(sr), patchdir(dr),
2004 hg.clone(ui, opts.get('patches') or patchdir(sr), patchdir(dr),
2005 pull=opts.get('pull'), update=not opts.get('noupdate'),
2005 pull=opts.get('pull'), update=not opts.get('noupdate'),
2006 stream=opts.get('uncompressed'))
2006 stream=opts.get('uncompressed'))
2007 if dr.local():
2007 if dr.local():
2008 if qbase:
2008 if qbase:
2009 ui.note(_('stripping applied patches from destination '
2009 ui.note(_('stripping applied patches from destination '
2010 'repository\n'))
2010 'repository\n'))
2011 dr.mq.strip(dr, [qbase], update=False, backup=None)
2011 dr.mq.strip(dr, [qbase], update=False, backup=None)
2012 if not opts.get('noupdate'):
2012 if not opts.get('noupdate'):
2013 ui.note(_('updating destination repository\n'))
2013 ui.note(_('updating destination repository\n'))
2014 hg.update(dr, dr.changelog.tip())
2014 hg.update(dr, dr.changelog.tip())
2015
2015
2016 def commit(ui, repo, *pats, **opts):
2016 def commit(ui, repo, *pats, **opts):
2017 """commit changes in the queue repository (DEPRECATED)
2017 """commit changes in the queue repository (DEPRECATED)
2018
2018
2019 This command is deprecated; use :hg:`commit --mq` instead."""
2019 This command is deprecated; use :hg:`commit --mq` instead."""
2020 q = repo.mq
2020 q = repo.mq
2021 r = q.qrepo()
2021 r = q.qrepo()
2022 if not r:
2022 if not r:
2023 raise util.Abort('no queue repository')
2023 raise util.Abort('no queue repository')
2024 commands.commit(r.ui, r, *pats, **opts)
2024 commands.commit(r.ui, r, *pats, **opts)
2025
2025
2026 def series(ui, repo, **opts):
2026 def series(ui, repo, **opts):
2027 """print the entire series file
2027 """print the entire series file
2028
2028
2029 Returns 0 on success."""
2029 Returns 0 on success."""
2030 repo.mq.qseries(repo, missing=opts.get('missing'), summary=opts.get('summary'))
2030 repo.mq.qseries(repo, missing=opts.get('missing'), summary=opts.get('summary'))
2031 return 0
2031 return 0
2032
2032
2033 def top(ui, repo, **opts):
2033 def top(ui, repo, **opts):
2034 """print the name of the current patch
2034 """print the name of the current patch
2035
2035
2036 Returns 0 on success."""
2036 Returns 0 on success."""
2037 q = repo.mq
2037 q = repo.mq
2038 t = q.applied and q.series_end(True) or 0
2038 t = q.applied and q.series_end(True) or 0
2039 if t:
2039 if t:
2040 q.qseries(repo, start=t - 1, length=1, status='A',
2040 q.qseries(repo, start=t - 1, length=1, status='A',
2041 summary=opts.get('summary'))
2041 summary=opts.get('summary'))
2042 else:
2042 else:
2043 ui.write(_("no patches applied\n"))
2043 ui.write(_("no patches applied\n"))
2044 return 1
2044 return 1
2045
2045
2046 def next(ui, repo, **opts):
2046 def next(ui, repo, **opts):
2047 """print the name of the next patch
2047 """print the name of the next patch
2048
2048
2049 Returns 0 on success."""
2049 Returns 0 on success."""
2050 q = repo.mq
2050 q = repo.mq
2051 end = q.series_end()
2051 end = q.series_end()
2052 if end == len(q.series):
2052 if end == len(q.series):
2053 ui.write(_("all patches applied\n"))
2053 ui.write(_("all patches applied\n"))
2054 return 1
2054 return 1
2055 q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
2055 q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
2056
2056
2057 def prev(ui, repo, **opts):
2057 def prev(ui, repo, **opts):
2058 """print the name of the previous patch
2058 """print the name of the previous patch
2059
2059
2060 Returns 0 on success."""
2060 Returns 0 on success."""
2061 q = repo.mq
2061 q = repo.mq
2062 l = len(q.applied)
2062 l = len(q.applied)
2063 if l == 1:
2063 if l == 1:
2064 ui.write(_("only one patch applied\n"))
2064 ui.write(_("only one patch applied\n"))
2065 return 1
2065 return 1
2066 if not l:
2066 if not l:
2067 ui.write(_("no patches applied\n"))
2067 ui.write(_("no patches applied\n"))
2068 return 1
2068 return 1
2069 q.qseries(repo, start=l - 2, length=1, status='A',
2069 q.qseries(repo, start=l - 2, length=1, status='A',
2070 summary=opts.get('summary'))
2070 summary=opts.get('summary'))
2071
2071
2072 def setupheaderopts(ui, opts):
2072 def setupheaderopts(ui, opts):
2073 if not opts.get('user') and opts.get('currentuser'):
2073 if not opts.get('user') and opts.get('currentuser'):
2074 opts['user'] = ui.username()
2074 opts['user'] = ui.username()
2075 if not opts.get('date') and opts.get('currentdate'):
2075 if not opts.get('date') and opts.get('currentdate'):
2076 opts['date'] = "%d %d" % util.makedate()
2076 opts['date'] = "%d %d" % util.makedate()
2077
2077
2078 def new(ui, repo, patch, *args, **opts):
2078 def new(ui, repo, patch, *args, **opts):
2079 """create a new patch
2079 """create a new patch
2080
2080
2081 qnew creates a new patch on top of the currently-applied patch (if
2081 qnew creates a new patch on top of the currently-applied patch (if
2082 any). The patch will be initialized with any outstanding changes
2082 any). The patch will be initialized with any outstanding changes
2083 in the working directory. You may also use -I/--include,
2083 in the working directory. You may also use -I/--include,
2084 -X/--exclude, and/or a list of files after the patch name to add
2084 -X/--exclude, and/or a list of files after the patch name to add
2085 only changes to matching files to the new patch, leaving the rest
2085 only changes to matching files to the new patch, leaving the rest
2086 as uncommitted modifications.
2086 as uncommitted modifications.
2087
2087
2088 -u/--user and -d/--date can be used to set the (given) user and
2088 -u/--user and -d/--date can be used to set the (given) user and
2089 date, respectively. -U/--currentuser and -D/--currentdate set user
2089 date, respectively. -U/--currentuser and -D/--currentdate set user
2090 to current user and date to current date.
2090 to current user and date to current date.
2091
2091
2092 -e/--edit, -m/--message or -l/--logfile set the patch header as
2092 -e/--edit, -m/--message or -l/--logfile set the patch header as
2093 well as the commit message. If none is specified, the header is
2093 well as the commit message. If none is specified, the header is
2094 empty and the commit message is '[mq]: PATCH'.
2094 empty and the commit message is '[mq]: PATCH'.
2095
2095
2096 Use the -g/--git option to keep the patch in the git extended diff
2096 Use the -g/--git option to keep the patch in the git extended diff
2097 format. Read the diffs help topic for more information on why this
2097 format. Read the diffs help topic for more information on why this
2098 is important for preserving permission changes and copy/rename
2098 is important for preserving permission changes and copy/rename
2099 information.
2099 information.
2100
2100
2101 Returns 0 on successful creation of a new patch.
2101 Returns 0 on successful creation of a new patch.
2102 """
2102 """
2103 msg = cmdutil.logmessage(opts)
2103 msg = cmdutil.logmessage(opts)
2104 def getmsg():
2104 def getmsg():
2105 return ui.edit(msg, opts.get('user') or ui.username())
2105 return ui.edit(msg, opts.get('user') or ui.username())
2106 q = repo.mq
2106 q = repo.mq
2107 opts['msg'] = msg
2107 opts['msg'] = msg
2108 if opts.get('edit'):
2108 if opts.get('edit'):
2109 opts['msg'] = getmsg
2109 opts['msg'] = getmsg
2110 else:
2110 else:
2111 opts['msg'] = msg
2111 opts['msg'] = msg
2112 setupheaderopts(ui, opts)
2112 setupheaderopts(ui, opts)
2113 q.new(repo, patch, *args, **opts)
2113 q.new(repo, patch, *args, **opts)
2114 q.save_dirty()
2114 q.save_dirty()
2115 return 0
2115 return 0
2116
2116
2117 def refresh(ui, repo, *pats, **opts):
2117 def refresh(ui, repo, *pats, **opts):
2118 """update the current patch
2118 """update the current patch
2119
2119
2120 If any file patterns are provided, the refreshed patch will
2120 If any file patterns are provided, the refreshed patch will
2121 contain only the modifications that match those patterns; the
2121 contain only the modifications that match those patterns; the
2122 remaining modifications will remain in the working directory.
2122 remaining modifications will remain in the working directory.
2123
2123
2124 If -s/--short is specified, files currently included in the patch
2124 If -s/--short is specified, files currently included in the patch
2125 will be refreshed just like matched files and remain in the patch.
2125 will be refreshed just like matched files and remain in the patch.
2126
2126
2127 If -e/--edit is specified, Mercurial will start your configured editor for
2127 If -e/--edit is specified, Mercurial will start your configured editor for
2128 you to enter a message. In case qrefresh fails, you will find a backup of
2128 you to enter a message. In case qrefresh fails, you will find a backup of
2129 your message in ``.hg/last-message.txt``.
2129 your message in ``.hg/last-message.txt``.
2130
2130
2131 hg add/remove/copy/rename work as usual, though you might want to
2131 hg add/remove/copy/rename work as usual, though you might want to
2132 use git-style patches (-g/--git or [diff] git=1) to track copies
2132 use git-style patches (-g/--git or [diff] git=1) to track copies
2133 and renames. See the diffs help topic for more information on the
2133 and renames. See the diffs help topic for more information on the
2134 git diff format.
2134 git diff format.
2135
2135
2136 Returns 0 on success.
2136 Returns 0 on success.
2137 """
2137 """
2138 q = repo.mq
2138 q = repo.mq
2139 message = cmdutil.logmessage(opts)
2139 message = cmdutil.logmessage(opts)
2140 if opts.get('edit'):
2140 if opts.get('edit'):
2141 if not q.applied:
2141 if not q.applied:
2142 ui.write(_("no patches applied\n"))
2142 ui.write(_("no patches applied\n"))
2143 return 1
2143 return 1
2144 if message:
2144 if message:
2145 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2145 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2146 patch = q.applied[-1].name
2146 patch = q.applied[-1].name
2147 ph = patchheader(q.join(patch), q.plainmode)
2147 ph = patchheader(q.join(patch), q.plainmode)
2148 message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
2148 message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
2149 # We don't want to lose the patch message if qrefresh fails (issue2062)
2149 # We don't want to lose the patch message if qrefresh fails (issue2062)
2150 msgfile = repo.opener('last-message.txt', 'wb')
2150 msgfile = repo.opener('last-message.txt', 'wb')
2151 msgfile.write(message)
2151 msgfile.write(message)
2152 msgfile.close()
2152 msgfile.close()
2153 setupheaderopts(ui, opts)
2153 setupheaderopts(ui, opts)
2154 ret = q.refresh(repo, pats, msg=message, **opts)
2154 ret = q.refresh(repo, pats, msg=message, **opts)
2155 q.save_dirty()
2155 q.save_dirty()
2156 return ret
2156 return ret
2157
2157
2158 def diff(ui, repo, *pats, **opts):
2158 def diff(ui, repo, *pats, **opts):
2159 """diff of the current patch and subsequent modifications
2159 """diff of the current patch and subsequent modifications
2160
2160
2161 Shows a diff which includes the current patch as well as any
2161 Shows a diff which includes the current patch as well as any
2162 changes which have been made in the working directory since the
2162 changes which have been made in the working directory since the
2163 last refresh (thus showing what the current patch would become
2163 last refresh (thus showing what the current patch would become
2164 after a qrefresh).
2164 after a qrefresh).
2165
2165
2166 Use :hg:`diff` if you only want to see the changes made since the
2166 Use :hg:`diff` if you only want to see the changes made since the
2167 last qrefresh, or :hg:`export qtip` if you want to see changes
2167 last qrefresh, or :hg:`export qtip` if you want to see changes
2168 made by the current patch without including changes made since the
2168 made by the current patch without including changes made since the
2169 qrefresh.
2169 qrefresh.
2170
2170
2171 Returns 0 on success.
2171 Returns 0 on success.
2172 """
2172 """
2173 repo.mq.diff(repo, pats, opts)
2173 repo.mq.diff(repo, pats, opts)
2174 return 0
2174 return 0
2175
2175
2176 def fold(ui, repo, *files, **opts):
2176 def fold(ui, repo, *files, **opts):
2177 """fold the named patches into the current patch
2177 """fold the named patches into the current patch
2178
2178
2179 Patches must not yet be applied. Each patch will be successively
2179 Patches must not yet be applied. Each patch will be successively
2180 applied to the current patch in the order given. If all the
2180 applied to the current patch in the order given. If all the
2181 patches apply successfully, the current patch will be refreshed
2181 patches apply successfully, the current patch will be refreshed
2182 with the new cumulative patch, and the folded patches will be
2182 with the new cumulative patch, and the folded patches will be
2183 deleted. With -k/--keep, the folded patch files will not be
2183 deleted. With -k/--keep, the folded patch files will not be
2184 removed afterwards.
2184 removed afterwards.
2185
2185
2186 The header for each folded patch will be concatenated with the
2186 The header for each folded patch will be concatenated with the
2187 current patch header, separated by a line of ``* * *``.
2187 current patch header, separated by a line of ``* * *``.
2188
2188
2189 Returns 0 on success."""
2189 Returns 0 on success."""
2190
2190
2191 q = repo.mq
2191 q = repo.mq
2192
2192
2193 if not files:
2193 if not files:
2194 raise util.Abort(_('qfold requires at least one patch name'))
2194 raise util.Abort(_('qfold requires at least one patch name'))
2195 if not q.check_toppatch(repo)[0]:
2195 if not q.check_toppatch(repo)[0]:
2196 raise util.Abort(_('no patches applied'))
2196 raise util.Abort(_('no patches applied'))
2197 q.check_localchanges(repo)
2197 q.check_localchanges(repo)
2198
2198
2199 message = cmdutil.logmessage(opts)
2199 message = cmdutil.logmessage(opts)
2200 if opts.get('edit'):
2200 if opts.get('edit'):
2201 if message:
2201 if message:
2202 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2202 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2203
2203
2204 parent = q.lookup('qtip')
2204 parent = q.lookup('qtip')
2205 patches = []
2205 patches = []
2206 messages = []
2206 messages = []
2207 for f in files:
2207 for f in files:
2208 p = q.lookup(f)
2208 p = q.lookup(f)
2209 if p in patches or p == parent:
2209 if p in patches or p == parent:
2210 ui.warn(_('Skipping already folded patch %s\n') % p)
2210 ui.warn(_('Skipping already folded patch %s\n') % p)
2211 if q.isapplied(p):
2211 if q.isapplied(p):
2212 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
2212 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
2213 patches.append(p)
2213 patches.append(p)
2214
2214
2215 for p in patches:
2215 for p in patches:
2216 if not message:
2216 if not message:
2217 ph = patchheader(q.join(p), q.plainmode)
2217 ph = patchheader(q.join(p), q.plainmode)
2218 if ph.message:
2218 if ph.message:
2219 messages.append(ph.message)
2219 messages.append(ph.message)
2220 pf = q.join(p)
2220 pf = q.join(p)
2221 (patchsuccess, files, fuzz) = q.patch(repo, pf)
2221 (patchsuccess, files, fuzz) = q.patch(repo, pf)
2222 if not patchsuccess:
2222 if not patchsuccess:
2223 raise util.Abort(_('error folding patch %s') % p)
2223 raise util.Abort(_('error folding patch %s') % p)
2224 cmdutil.updatedir(ui, repo, files)
2224 cmdutil.updatedir(ui, repo, files)
2225
2225
2226 if not message:
2226 if not message:
2227 ph = patchheader(q.join(parent), q.plainmode)
2227 ph = patchheader(q.join(parent), q.plainmode)
2228 message, user = ph.message, ph.user
2228 message, user = ph.message, ph.user
2229 for msg in messages:
2229 for msg in messages:
2230 message.append('* * *')
2230 message.append('* * *')
2231 message.extend(msg)
2231 message.extend(msg)
2232 message = '\n'.join(message)
2232 message = '\n'.join(message)
2233
2233
2234 if opts.get('edit'):
2234 if opts.get('edit'):
2235 message = ui.edit(message, user or ui.username())
2235 message = ui.edit(message, user or ui.username())
2236
2236
2237 diffopts = q.patchopts(q.diffopts(), *patches)
2237 diffopts = q.patchopts(q.diffopts(), *patches)
2238 q.refresh(repo, msg=message, git=diffopts.git)
2238 q.refresh(repo, msg=message, git=diffopts.git)
2239 q.delete(repo, patches, opts)
2239 q.delete(repo, patches, opts)
2240 q.save_dirty()
2240 q.save_dirty()
2241
2241
2242 def goto(ui, repo, patch, **opts):
2242 def goto(ui, repo, patch, **opts):
2243 '''push or pop patches until named patch is at top of stack
2243 '''push or pop patches until named patch is at top of stack
2244
2244
2245 Returns 0 on success.'''
2245 Returns 0 on success.'''
2246 q = repo.mq
2246 q = repo.mq
2247 patch = q.lookup(patch)
2247 patch = q.lookup(patch)
2248 if q.isapplied(patch):
2248 if q.isapplied(patch):
2249 ret = q.pop(repo, patch, force=opts.get('force'))
2249 ret = q.pop(repo, patch, force=opts.get('force'))
2250 else:
2250 else:
2251 ret = q.push(repo, patch, force=opts.get('force'))
2251 ret = q.push(repo, patch, force=opts.get('force'))
2252 q.save_dirty()
2252 q.save_dirty()
2253 return ret
2253 return ret
2254
2254
2255 def guard(ui, repo, *args, **opts):
2255 def guard(ui, repo, *args, **opts):
2256 '''set or print guards for a patch
2256 '''set or print guards for a patch
2257
2257
2258 Guards control whether a patch can be pushed. A patch with no
2258 Guards control whether a patch can be pushed. A patch with no
2259 guards is always pushed. A patch with a positive guard ("+foo") is
2259 guards is always pushed. A patch with a positive guard ("+foo") is
2260 pushed only if the :hg:`qselect` command has activated it. A patch with
2260 pushed only if the :hg:`qselect` command has activated it. A patch with
2261 a negative guard ("-foo") is never pushed if the :hg:`qselect` command
2261 a negative guard ("-foo") is never pushed if the :hg:`qselect` command
2262 has activated it.
2262 has activated it.
2263
2263
2264 With no arguments, print the currently active guards.
2264 With no arguments, print the currently active guards.
2265 With arguments, set guards for the named patch.
2265 With arguments, set guards for the named patch.
2266
2266
2267 .. note::
2267 .. note::
2268 Specifying negative guards now requires '--'.
2268 Specifying negative guards now requires '--'.
2269
2269
2270 To set guards on another patch::
2270 To set guards on another patch::
2271
2271
2272 hg qguard other.patch -- +2.6.17 -stable
2272 hg qguard other.patch -- +2.6.17 -stable
2273
2273
2274 Returns 0 on success.
2274 Returns 0 on success.
2275 '''
2275 '''
2276 def status(idx):
2276 def status(idx):
2277 guards = q.series_guards[idx] or ['unguarded']
2277 guards = q.series_guards[idx] or ['unguarded']
2278 if q.series[idx] in applied:
2278 if q.series[idx] in applied:
2279 state = 'applied'
2279 state = 'applied'
2280 elif q.pushable(idx)[0]:
2280 elif q.pushable(idx)[0]:
2281 state = 'unapplied'
2281 state = 'unapplied'
2282 else:
2282 else:
2283 state = 'guarded'
2283 state = 'guarded'
2284 label = 'qguard.patch qguard.%s qseries.%s' % (state, state)
2284 label = 'qguard.patch qguard.%s qseries.%s' % (state, state)
2285 ui.write('%s: ' % ui.label(q.series[idx], label))
2285 ui.write('%s: ' % ui.label(q.series[idx], label))
2286
2286
2287 for i, guard in enumerate(guards):
2287 for i, guard in enumerate(guards):
2288 if guard.startswith('+'):
2288 if guard.startswith('+'):
2289 ui.write(guard, label='qguard.positive')
2289 ui.write(guard, label='qguard.positive')
2290 elif guard.startswith('-'):
2290 elif guard.startswith('-'):
2291 ui.write(guard, label='qguard.negative')
2291 ui.write(guard, label='qguard.negative')
2292 else:
2292 else:
2293 ui.write(guard, label='qguard.unguarded')
2293 ui.write(guard, label='qguard.unguarded')
2294 if i != len(guards) - 1:
2294 if i != len(guards) - 1:
2295 ui.write(' ')
2295 ui.write(' ')
2296 ui.write('\n')
2296 ui.write('\n')
2297 q = repo.mq
2297 q = repo.mq
2298 applied = set(p.name for p in q.applied)
2298 applied = set(p.name for p in q.applied)
2299 patch = None
2299 patch = None
2300 args = list(args)
2300 args = list(args)
2301 if opts.get('list'):
2301 if opts.get('list'):
2302 if args or opts.get('none'):
2302 if args or opts.get('none'):
2303 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
2303 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
2304 for i in xrange(len(q.series)):
2304 for i in xrange(len(q.series)):
2305 status(i)
2305 status(i)
2306 return
2306 return
2307 if not args or args[0][0:1] in '-+':
2307 if not args or args[0][0:1] in '-+':
2308 if not q.applied:
2308 if not q.applied:
2309 raise util.Abort(_('no patches applied'))
2309 raise util.Abort(_('no patches applied'))
2310 patch = q.applied[-1].name
2310 patch = q.applied[-1].name
2311 if patch is None and args[0][0:1] not in '-+':
2311 if patch is None and args[0][0:1] not in '-+':
2312 patch = args.pop(0)
2312 patch = args.pop(0)
2313 if patch is None:
2313 if patch is None:
2314 raise util.Abort(_('no patch to work with'))
2314 raise util.Abort(_('no patch to work with'))
2315 if args or opts.get('none'):
2315 if args or opts.get('none'):
2316 idx = q.find_series(patch)
2316 idx = q.find_series(patch)
2317 if idx is None:
2317 if idx is None:
2318 raise util.Abort(_('no patch named %s') % patch)
2318 raise util.Abort(_('no patch named %s') % patch)
2319 q.set_guards(idx, args)
2319 q.set_guards(idx, args)
2320 q.save_dirty()
2320 q.save_dirty()
2321 else:
2321 else:
2322 status(q.series.index(q.lookup(patch)))
2322 status(q.series.index(q.lookup(patch)))
2323
2323
2324 def header(ui, repo, patch=None):
2324 def header(ui, repo, patch=None):
2325 """print the header of the topmost or specified patch
2325 """print the header of the topmost or specified patch
2326
2326
2327 Returns 0 on success."""
2327 Returns 0 on success."""
2328 q = repo.mq
2328 q = repo.mq
2329
2329
2330 if patch:
2330 if patch:
2331 patch = q.lookup(patch)
2331 patch = q.lookup(patch)
2332 else:
2332 else:
2333 if not q.applied:
2333 if not q.applied:
2334 ui.write(_('no patches applied\n'))
2334 ui.write(_('no patches applied\n'))
2335 return 1
2335 return 1
2336 patch = q.lookup('qtip')
2336 patch = q.lookup('qtip')
2337 ph = patchheader(q.join(patch), q.plainmode)
2337 ph = patchheader(q.join(patch), q.plainmode)
2338
2338
2339 ui.write('\n'.join(ph.message) + '\n')
2339 ui.write('\n'.join(ph.message) + '\n')
2340
2340
2341 def lastsavename(path):
2341 def lastsavename(path):
2342 (directory, base) = os.path.split(path)
2342 (directory, base) = os.path.split(path)
2343 names = os.listdir(directory)
2343 names = os.listdir(directory)
2344 namere = re.compile("%s.([0-9]+)" % base)
2344 namere = re.compile("%s.([0-9]+)" % base)
2345 maxindex = None
2345 maxindex = None
2346 maxname = None
2346 maxname = None
2347 for f in names:
2347 for f in names:
2348 m = namere.match(f)
2348 m = namere.match(f)
2349 if m:
2349 if m:
2350 index = int(m.group(1))
2350 index = int(m.group(1))
2351 if maxindex is None or index > maxindex:
2351 if maxindex is None or index > maxindex:
2352 maxindex = index
2352 maxindex = index
2353 maxname = f
2353 maxname = f
2354 if maxname:
2354 if maxname:
2355 return (os.path.join(directory, maxname), maxindex)
2355 return (os.path.join(directory, maxname), maxindex)
2356 return (None, None)
2356 return (None, None)
2357
2357
2358 def savename(path):
2358 def savename(path):
2359 (last, index) = lastsavename(path)
2359 (last, index) = lastsavename(path)
2360 if last is None:
2360 if last is None:
2361 index = 0
2361 index = 0
2362 newpath = path + ".%d" % (index + 1)
2362 newpath = path + ".%d" % (index + 1)
2363 return newpath
2363 return newpath
2364
2364
2365 def push(ui, repo, patch=None, **opts):
2365 def push(ui, repo, patch=None, **opts):
2366 """push the next patch onto the stack
2366 """push the next patch onto the stack
2367
2367
2368 When -f/--force is applied, all local changes in patched files
2368 When -f/--force is applied, all local changes in patched files
2369 will be lost.
2369 will be lost.
2370
2370
2371 Return 0 on succces.
2371 Return 0 on succces.
2372 """
2372 """
2373 q = repo.mq
2373 q = repo.mq
2374 mergeq = None
2374 mergeq = None
2375
2375
2376 if opts.get('merge'):
2376 if opts.get('merge'):
2377 if opts.get('name'):
2377 if opts.get('name'):
2378 newpath = repo.join(opts.get('name'))
2378 newpath = repo.join(opts.get('name'))
2379 else:
2379 else:
2380 newpath, i = lastsavename(q.path)
2380 newpath, i = lastsavename(q.path)
2381 if not newpath:
2381 if not newpath:
2382 ui.warn(_("no saved queues found, please use -n\n"))
2382 ui.warn(_("no saved queues found, please use -n\n"))
2383 return 1
2383 return 1
2384 mergeq = queue(ui, repo.join(""), newpath)
2384 mergeq = queue(ui, repo.join(""), newpath)
2385 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2385 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2386 ret = q.push(repo, patch, force=opts.get('force'), list=opts.get('list'),
2386 ret = q.push(repo, patch, force=opts.get('force'), list=opts.get('list'),
2387 mergeq=mergeq, all=opts.get('all'), move=opts.get('move'),
2387 mergeq=mergeq, all=opts.get('all'), move=opts.get('move'),
2388 exact=opts.get('exact'))
2388 exact=opts.get('exact'))
2389 return ret
2389 return ret
2390
2390
2391 def pop(ui, repo, patch=None, **opts):
2391 def pop(ui, repo, patch=None, **opts):
2392 """pop the current patch off the stack
2392 """pop the current patch off the stack
2393
2393
2394 By default, pops off the top of the patch stack. If given a patch
2394 By default, pops off the top of the patch stack. If given a patch
2395 name, keeps popping off patches until the named patch is at the
2395 name, keeps popping off patches until the named patch is at the
2396 top of the stack.
2396 top of the stack.
2397
2397
2398 Return 0 on success.
2398 Return 0 on success.
2399 """
2399 """
2400 localupdate = True
2400 localupdate = True
2401 if opts.get('name'):
2401 if opts.get('name'):
2402 q = queue(ui, repo.join(""), repo.join(opts.get('name')))
2402 q = queue(ui, repo.join(""), repo.join(opts.get('name')))
2403 ui.warn(_('using patch queue: %s\n') % q.path)
2403 ui.warn(_('using patch queue: %s\n') % q.path)
2404 localupdate = False
2404 localupdate = False
2405 else:
2405 else:
2406 q = repo.mq
2406 q = repo.mq
2407 ret = q.pop(repo, patch, force=opts.get('force'), update=localupdate,
2407 ret = q.pop(repo, patch, force=opts.get('force'), update=localupdate,
2408 all=opts.get('all'))
2408 all=opts.get('all'))
2409 q.save_dirty()
2409 q.save_dirty()
2410 return ret
2410 return ret
2411
2411
2412 def rename(ui, repo, patch, name=None, **opts):
2412 def rename(ui, repo, patch, name=None, **opts):
2413 """rename a patch
2413 """rename a patch
2414
2414
2415 With one argument, renames the current patch to PATCH1.
2415 With one argument, renames the current patch to PATCH1.
2416 With two arguments, renames PATCH1 to PATCH2.
2416 With two arguments, renames PATCH1 to PATCH2.
2417
2417
2418 Returns 0 on success."""
2418 Returns 0 on success."""
2419
2419
2420 q = repo.mq
2420 q = repo.mq
2421
2421
2422 if not name:
2422 if not name:
2423 name = patch
2423 name = patch
2424 patch = None
2424 patch = None
2425
2425
2426 if patch:
2426 if patch:
2427 patch = q.lookup(patch)
2427 patch = q.lookup(patch)
2428 else:
2428 else:
2429 if not q.applied:
2429 if not q.applied:
2430 ui.write(_('no patches applied\n'))
2430 ui.write(_('no patches applied\n'))
2431 return
2431 return
2432 patch = q.lookup('qtip')
2432 patch = q.lookup('qtip')
2433 absdest = q.join(name)
2433 absdest = q.join(name)
2434 if os.path.isdir(absdest):
2434 if os.path.isdir(absdest):
2435 name = normname(os.path.join(name, os.path.basename(patch)))
2435 name = normname(os.path.join(name, os.path.basename(patch)))
2436 absdest = q.join(name)
2436 absdest = q.join(name)
2437 if os.path.exists(absdest):
2437 if os.path.exists(absdest):
2438 raise util.Abort(_('%s already exists') % absdest)
2438 raise util.Abort(_('%s already exists') % absdest)
2439
2439
2440 if name in q.series:
2440 if name in q.series:
2441 raise util.Abort(
2441 raise util.Abort(
2442 _('A patch named %s already exists in the series file') % name)
2442 _('A patch named %s already exists in the series file') % name)
2443
2443
2444 ui.note(_('renaming %s to %s\n') % (patch, name))
2444 ui.note(_('renaming %s to %s\n') % (patch, name))
2445 i = q.find_series(patch)
2445 i = q.find_series(patch)
2446 guards = q.guard_re.findall(q.full_series[i])
2446 guards = q.guard_re.findall(q.full_series[i])
2447 q.full_series[i] = name + ''.join([' #' + g for g in guards])
2447 q.full_series[i] = name + ''.join([' #' + g for g in guards])
2448 q.parse_series()
2448 q.parse_series()
2449 q.series_dirty = 1
2449 q.series_dirty = 1
2450
2450
2451 info = q.isapplied(patch)
2451 info = q.isapplied(patch)
2452 if info:
2452 if info:
2453 q.applied[info[0]] = statusentry(info[1], name)
2453 q.applied[info[0]] = statusentry(info[1], name)
2454 q.applied_dirty = 1
2454 q.applied_dirty = 1
2455
2455
2456 destdir = os.path.dirname(absdest)
2456 destdir = os.path.dirname(absdest)
2457 if not os.path.isdir(destdir):
2457 if not os.path.isdir(destdir):
2458 os.makedirs(destdir)
2458 os.makedirs(destdir)
2459 util.rename(q.join(patch), absdest)
2459 util.rename(q.join(patch), absdest)
2460 r = q.qrepo()
2460 r = q.qrepo()
2461 if r and patch in r.dirstate:
2461 if r and patch in r.dirstate:
2462 wctx = r[None]
2462 wctx = r[None]
2463 wlock = r.wlock()
2463 wlock = r.wlock()
2464 try:
2464 try:
2465 if r.dirstate[patch] == 'a':
2465 if r.dirstate[patch] == 'a':
2466 r.dirstate.forget(patch)
2466 r.dirstate.forget(patch)
2467 r.dirstate.add(name)
2467 r.dirstate.add(name)
2468 else:
2468 else:
2469 if r.dirstate[name] == 'r':
2469 if r.dirstate[name] == 'r':
2470 wctx.undelete([name])
2470 wctx.undelete([name])
2471 wctx.copy(patch, name)
2471 wctx.copy(patch, name)
2472 wctx.remove([patch], False)
2472 wctx.remove([patch], False)
2473 finally:
2473 finally:
2474 wlock.release()
2474 wlock.release()
2475
2475
2476 q.save_dirty()
2476 q.save_dirty()
2477
2477
2478 def restore(ui, repo, rev, **opts):
2478 def restore(ui, repo, rev, **opts):
2479 """restore the queue state saved by a revision (DEPRECATED)
2479 """restore the queue state saved by a revision (DEPRECATED)
2480
2480
2481 This command is deprecated, use :hg:`rebase` instead."""
2481 This command is deprecated, use :hg:`rebase` instead."""
2482 rev = repo.lookup(rev)
2482 rev = repo.lookup(rev)
2483 q = repo.mq
2483 q = repo.mq
2484 q.restore(repo, rev, delete=opts.get('delete'),
2484 q.restore(repo, rev, delete=opts.get('delete'),
2485 qupdate=opts.get('update'))
2485 qupdate=opts.get('update'))
2486 q.save_dirty()
2486 q.save_dirty()
2487 return 0
2487 return 0
2488
2488
2489 def save(ui, repo, **opts):
2489 def save(ui, repo, **opts):
2490 """save current queue state (DEPRECATED)
2490 """save current queue state (DEPRECATED)
2491
2491
2492 This command is deprecated, use :hg:`rebase` instead."""
2492 This command is deprecated, use :hg:`rebase` instead."""
2493 q = repo.mq
2493 q = repo.mq
2494 message = cmdutil.logmessage(opts)
2494 message = cmdutil.logmessage(opts)
2495 ret = q.save(repo, msg=message)
2495 ret = q.save(repo, msg=message)
2496 if ret:
2496 if ret:
2497 return ret
2497 return ret
2498 q.save_dirty()
2498 q.save_dirty()
2499 if opts.get('copy'):
2499 if opts.get('copy'):
2500 path = q.path
2500 path = q.path
2501 if opts.get('name'):
2501 if opts.get('name'):
2502 newpath = os.path.join(q.basepath, opts.get('name'))
2502 newpath = os.path.join(q.basepath, opts.get('name'))
2503 if os.path.exists(newpath):
2503 if os.path.exists(newpath):
2504 if not os.path.isdir(newpath):
2504 if not os.path.isdir(newpath):
2505 raise util.Abort(_('destination %s exists and is not '
2505 raise util.Abort(_('destination %s exists and is not '
2506 'a directory') % newpath)
2506 'a directory') % newpath)
2507 if not opts.get('force'):
2507 if not opts.get('force'):
2508 raise util.Abort(_('destination %s exists, '
2508 raise util.Abort(_('destination %s exists, '
2509 'use -f to force') % newpath)
2509 'use -f to force') % newpath)
2510 else:
2510 else:
2511 newpath = savename(path)
2511 newpath = savename(path)
2512 ui.warn(_("copy %s to %s\n") % (path, newpath))
2512 ui.warn(_("copy %s to %s\n") % (path, newpath))
2513 util.copyfiles(path, newpath)
2513 util.copyfiles(path, newpath)
2514 if opts.get('empty'):
2514 if opts.get('empty'):
2515 try:
2515 try:
2516 os.unlink(q.join(q.status_path))
2516 os.unlink(q.join(q.status_path))
2517 except:
2517 except:
2518 pass
2518 pass
2519 return 0
2519 return 0
2520
2520
2521 def strip(ui, repo, *revs, **opts):
2521 def strip(ui, repo, *revs, **opts):
2522 """strip changesets and all their descendants from the repository
2522 """strip changesets and all their descendants from the repository
2523
2523
2524 The strip command removes the specified changesets and all their
2524 The strip command removes the specified changesets and all their
2525 descendants. If the working directory has uncommitted changes,
2525 descendants. If the working directory has uncommitted changes,
2526 the operation is aborted unless the --force flag is supplied.
2526 the operation is aborted unless the --force flag is supplied.
2527
2527
2528 If a parent of the working directory is stripped, then the working
2528 If a parent of the working directory is stripped, then the working
2529 directory will automatically be updated to the most recent
2529 directory will automatically be updated to the most recent
2530 available ancestor of the stripped parent after the operation
2530 available ancestor of the stripped parent after the operation
2531 completes.
2531 completes.
2532
2532
2533 Any stripped changesets are stored in ``.hg/strip-backup`` as a
2533 Any stripped changesets are stored in ``.hg/strip-backup`` as a
2534 bundle (see :hg:`help bundle` and :hg:`help unbundle`). They can
2534 bundle (see :hg:`help bundle` and :hg:`help unbundle`). They can
2535 be restored by running :hg:`unbundle .hg/strip-backup/BUNDLE`,
2535 be restored by running :hg:`unbundle .hg/strip-backup/BUNDLE`,
2536 where BUNDLE is the bundle file created by the strip. Note that
2536 where BUNDLE is the bundle file created by the strip. Note that
2537 the local revision numbers will in general be different after the
2537 the local revision numbers will in general be different after the
2538 restore.
2538 restore.
2539
2539
2540 Use the --no-backup option to discard the backup bundle once the
2540 Use the --no-backup option to discard the backup bundle once the
2541 operation completes.
2541 operation completes.
2542
2542
2543 Return 0 on success.
2543 Return 0 on success.
2544 """
2544 """
2545 backup = 'all'
2545 backup = 'all'
2546 if opts.get('backup'):
2546 if opts.get('backup'):
2547 backup = 'strip'
2547 backup = 'strip'
2548 elif opts.get('no_backup') or opts.get('nobackup'):
2548 elif opts.get('no_backup') or opts.get('nobackup'):
2549 backup = 'none'
2549 backup = 'none'
2550
2550
2551 cl = repo.changelog
2551 cl = repo.changelog
2552 revs = set(cmdutil.revrange(repo, revs))
2552 revs = set(cmdutil.revrange(repo, revs))
2553 if not revs:
2553 if not revs:
2554 raise util.Abort(_('empty revision set'))
2554 raise util.Abort(_('empty revision set'))
2555
2555
2556 descendants = set(cl.descendants(*revs))
2556 descendants = set(cl.descendants(*revs))
2557 strippedrevs = revs.union(descendants)
2557 strippedrevs = revs.union(descendants)
2558 roots = revs.difference(descendants)
2558 roots = revs.difference(descendants)
2559
2559
2560 update = False
2560 update = False
2561 # if one of the wdir parent is stripped we'll need
2561 # if one of the wdir parent is stripped we'll need
2562 # to update away to an earlier revision
2562 # to update away to an earlier revision
2563 for p in repo.dirstate.parents():
2563 for p in repo.dirstate.parents():
2564 if p != nullid and cl.rev(p) in strippedrevs:
2564 if p != nullid and cl.rev(p) in strippedrevs:
2565 update = True
2565 update = True
2566 break
2566 break
2567
2567
2568 rootnodes = set(cl.node(r) for r in roots)
2568 rootnodes = set(cl.node(r) for r in roots)
2569
2569
2570 q = repo.mq
2570 q = repo.mq
2571 if q.applied:
2571 if q.applied:
2572 # refresh queue state if we're about to strip
2572 # refresh queue state if we're about to strip
2573 # applied patches
2573 # applied patches
2574 if cl.rev(repo.lookup('qtip')) in strippedrevs:
2574 if cl.rev(repo.lookup('qtip')) in strippedrevs:
2575 q.applied_dirty = True
2575 q.applied_dirty = True
2576 start = 0
2576 start = 0
2577 end = len(q.applied)
2577 end = len(q.applied)
2578 for i, statusentry in enumerate(q.applied):
2578 for i, statusentry in enumerate(q.applied):
2579 if statusentry.node in rootnodes:
2579 if statusentry.node in rootnodes:
2580 # if one of the stripped roots is an applied
2580 # if one of the stripped roots is an applied
2581 # patch, only part of the queue is stripped
2581 # patch, only part of the queue is stripped
2582 start = i
2582 start = i
2583 break
2583 break
2584 del q.applied[start:end]
2584 del q.applied[start:end]
2585 q.save_dirty()
2585 q.save_dirty()
2586
2586
2587 revs = list(rootnodes)
2587 revs = list(rootnodes)
2588 if update and opts.get('keep'):
2588 if update and opts.get('keep'):
2589 wlock = repo.wlock()
2589 wlock = repo.wlock()
2590 try:
2590 try:
2591 urev = repo.mq.qparents(repo, revs[0])
2591 urev = repo.mq.qparents(repo, revs[0])
2592 repo.dirstate.rebuild(urev, repo[urev].manifest())
2592 repo.dirstate.rebuild(urev, repo[urev].manifest())
2593 repo.dirstate.write()
2593 repo.dirstate.write()
2594 update = False
2594 update = False
2595 finally:
2595 finally:
2596 wlock.release()
2596 wlock.release()
2597
2597
2598 repo.mq.strip(repo, revs, backup=backup, update=update,
2598 repo.mq.strip(repo, revs, backup=backup, update=update,
2599 force=opts.get('force'))
2599 force=opts.get('force'))
2600 return 0
2600 return 0
2601
2601
2602 def select(ui, repo, *args, **opts):
2602 def select(ui, repo, *args, **opts):
2603 '''set or print guarded patches to push
2603 '''set or print guarded patches to push
2604
2604
2605 Use the :hg:`qguard` command to set or print guards on patch, then use
2605 Use the :hg:`qguard` command to set or print guards on patch, then use
2606 qselect to tell mq which guards to use. A patch will be pushed if
2606 qselect to tell mq which guards to use. A patch will be pushed if
2607 it has no guards or any positive guards match the currently
2607 it has no guards or any positive guards match the currently
2608 selected guard, but will not be pushed if any negative guards
2608 selected guard, but will not be pushed if any negative guards
2609 match the current guard. For example::
2609 match the current guard. For example::
2610
2610
2611 qguard foo.patch -stable (negative guard)
2611 qguard foo.patch -stable (negative guard)
2612 qguard bar.patch +stable (positive guard)
2612 qguard bar.patch +stable (positive guard)
2613 qselect stable
2613 qselect stable
2614
2614
2615 This activates the "stable" guard. mq will skip foo.patch (because
2615 This activates the "stable" guard. mq will skip foo.patch (because
2616 it has a negative match) but push bar.patch (because it has a
2616 it has a negative match) but push bar.patch (because it has a
2617 positive match).
2617 positive match).
2618
2618
2619 With no arguments, prints the currently active guards.
2619 With no arguments, prints the currently active guards.
2620 With one argument, sets the active guard.
2620 With one argument, sets the active guard.
2621
2621
2622 Use -n/--none to deactivate guards (no other arguments needed).
2622 Use -n/--none to deactivate guards (no other arguments needed).
2623 When no guards are active, patches with positive guards are
2623 When no guards are active, patches with positive guards are
2624 skipped and patches with negative guards are pushed.
2624 skipped and patches with negative guards are pushed.
2625
2625
2626 qselect can change the guards on applied patches. It does not pop
2626 qselect can change the guards on applied patches. It does not pop
2627 guarded patches by default. Use --pop to pop back to the last
2627 guarded patches by default. Use --pop to pop back to the last
2628 applied patch that is not guarded. Use --reapply (which implies
2628 applied patch that is not guarded. Use --reapply (which implies
2629 --pop) to push back to the current patch afterwards, but skip
2629 --pop) to push back to the current patch afterwards, but skip
2630 guarded patches.
2630 guarded patches.
2631
2631
2632 Use -s/--series to print a list of all guards in the series file
2632 Use -s/--series to print a list of all guards in the series file
2633 (no other arguments needed). Use -v for more information.
2633 (no other arguments needed). Use -v for more information.
2634
2634
2635 Returns 0 on success.'''
2635 Returns 0 on success.'''
2636
2636
2637 q = repo.mq
2637 q = repo.mq
2638 guards = q.active()
2638 guards = q.active()
2639 if args or opts.get('none'):
2639 if args or opts.get('none'):
2640 old_unapplied = q.unapplied(repo)
2640 old_unapplied = q.unapplied(repo)
2641 old_guarded = [i for i in xrange(len(q.applied)) if
2641 old_guarded = [i for i in xrange(len(q.applied)) if
2642 not q.pushable(i)[0]]
2642 not q.pushable(i)[0]]
2643 q.set_active(args)
2643 q.set_active(args)
2644 q.save_dirty()
2644 q.save_dirty()
2645 if not args:
2645 if not args:
2646 ui.status(_('guards deactivated\n'))
2646 ui.status(_('guards deactivated\n'))
2647 if not opts.get('pop') and not opts.get('reapply'):
2647 if not opts.get('pop') and not opts.get('reapply'):
2648 unapplied = q.unapplied(repo)
2648 unapplied = q.unapplied(repo)
2649 guarded = [i for i in xrange(len(q.applied))
2649 guarded = [i for i in xrange(len(q.applied))
2650 if not q.pushable(i)[0]]
2650 if not q.pushable(i)[0]]
2651 if len(unapplied) != len(old_unapplied):
2651 if len(unapplied) != len(old_unapplied):
2652 ui.status(_('number of unguarded, unapplied patches has '
2652 ui.status(_('number of unguarded, unapplied patches has '
2653 'changed from %d to %d\n') %
2653 'changed from %d to %d\n') %
2654 (len(old_unapplied), len(unapplied)))
2654 (len(old_unapplied), len(unapplied)))
2655 if len(guarded) != len(old_guarded):
2655 if len(guarded) != len(old_guarded):
2656 ui.status(_('number of guarded, applied patches has changed '
2656 ui.status(_('number of guarded, applied patches has changed '
2657 'from %d to %d\n') %
2657 'from %d to %d\n') %
2658 (len(old_guarded), len(guarded)))
2658 (len(old_guarded), len(guarded)))
2659 elif opts.get('series'):
2659 elif opts.get('series'):
2660 guards = {}
2660 guards = {}
2661 noguards = 0
2661 noguards = 0
2662 for gs in q.series_guards:
2662 for gs in q.series_guards:
2663 if not gs:
2663 if not gs:
2664 noguards += 1
2664 noguards += 1
2665 for g in gs:
2665 for g in gs:
2666 guards.setdefault(g, 0)
2666 guards.setdefault(g, 0)
2667 guards[g] += 1
2667 guards[g] += 1
2668 if ui.verbose:
2668 if ui.verbose:
2669 guards['NONE'] = noguards
2669 guards['NONE'] = noguards
2670 guards = guards.items()
2670 guards = guards.items()
2671 guards.sort(key=lambda x: x[0][1:])
2671 guards.sort(key=lambda x: x[0][1:])
2672 if guards:
2672 if guards:
2673 ui.note(_('guards in series file:\n'))
2673 ui.note(_('guards in series file:\n'))
2674 for guard, count in guards:
2674 for guard, count in guards:
2675 ui.note('%2d ' % count)
2675 ui.note('%2d ' % count)
2676 ui.write(guard, '\n')
2676 ui.write(guard, '\n')
2677 else:
2677 else:
2678 ui.note(_('no guards in series file\n'))
2678 ui.note(_('no guards in series file\n'))
2679 else:
2679 else:
2680 if guards:
2680 if guards:
2681 ui.note(_('active guards:\n'))
2681 ui.note(_('active guards:\n'))
2682 for g in guards:
2682 for g in guards:
2683 ui.write(g, '\n')
2683 ui.write(g, '\n')
2684 else:
2684 else:
2685 ui.write(_('no active guards\n'))
2685 ui.write(_('no active guards\n'))
2686 reapply = opts.get('reapply') and q.applied and q.appliedname(-1)
2686 reapply = opts.get('reapply') and q.applied and q.appliedname(-1)
2687 popped = False
2687 popped = False
2688 if opts.get('pop') or opts.get('reapply'):
2688 if opts.get('pop') or opts.get('reapply'):
2689 for i in xrange(len(q.applied)):
2689 for i in xrange(len(q.applied)):
2690 pushable, reason = q.pushable(i)
2690 pushable, reason = q.pushable(i)
2691 if not pushable:
2691 if not pushable:
2692 ui.status(_('popping guarded patches\n'))
2692 ui.status(_('popping guarded patches\n'))
2693 popped = True
2693 popped = True
2694 if i == 0:
2694 if i == 0:
2695 q.pop(repo, all=True)
2695 q.pop(repo, all=True)
2696 else:
2696 else:
2697 q.pop(repo, i - 1)
2697 q.pop(repo, i - 1)
2698 break
2698 break
2699 if popped:
2699 if popped:
2700 try:
2700 try:
2701 if reapply:
2701 if reapply:
2702 ui.status(_('reapplying unguarded patches\n'))
2702 ui.status(_('reapplying unguarded patches\n'))
2703 q.push(repo, reapply)
2703 q.push(repo, reapply)
2704 finally:
2704 finally:
2705 q.save_dirty()
2705 q.save_dirty()
2706
2706
2707 def finish(ui, repo, *revrange, **opts):
2707 def finish(ui, repo, *revrange, **opts):
2708 """move applied patches into repository history
2708 """move applied patches into repository history
2709
2709
2710 Finishes the specified revisions (corresponding to applied
2710 Finishes the specified revisions (corresponding to applied
2711 patches) by moving them out of mq control into regular repository
2711 patches) by moving them out of mq control into regular repository
2712 history.
2712 history.
2713
2713
2714 Accepts a revision range or the -a/--applied option. If --applied
2714 Accepts a revision range or the -a/--applied option. If --applied
2715 is specified, all applied mq revisions are removed from mq
2715 is specified, all applied mq revisions are removed from mq
2716 control. Otherwise, the given revisions must be at the base of the
2716 control. Otherwise, the given revisions must be at the base of the
2717 stack of applied patches.
2717 stack of applied patches.
2718
2718
2719 This can be especially useful if your changes have been applied to
2719 This can be especially useful if your changes have been applied to
2720 an upstream repository, or if you are about to push your changes
2720 an upstream repository, or if you are about to push your changes
2721 to upstream.
2721 to upstream.
2722
2722
2723 Returns 0 on success.
2723 Returns 0 on success.
2724 """
2724 """
2725 if not opts.get('applied') and not revrange:
2725 if not opts.get('applied') and not revrange:
2726 raise util.Abort(_('no revisions specified'))
2726 raise util.Abort(_('no revisions specified'))
2727 elif opts.get('applied'):
2727 elif opts.get('applied'):
2728 revrange = ('qbase::qtip',) + revrange
2728 revrange = ('qbase::qtip',) + revrange
2729
2729
2730 q = repo.mq
2730 q = repo.mq
2731 if not q.applied:
2731 if not q.applied:
2732 ui.status(_('no patches applied\n'))
2732 ui.status(_('no patches applied\n'))
2733 return 0
2733 return 0
2734
2734
2735 revs = cmdutil.revrange(repo, revrange)
2735 revs = cmdutil.revrange(repo, revrange)
2736 q.finish(repo, revs)
2736 q.finish(repo, revs)
2737 q.save_dirty()
2737 q.save_dirty()
2738 return 0
2738 return 0
2739
2739
2740 def qqueue(ui, repo, name=None, **opts):
2740 def qqueue(ui, repo, name=None, **opts):
2741 '''manage multiple patch queues
2741 '''manage multiple patch queues
2742
2742
2743 Supports switching between different patch queues, as well as creating
2743 Supports switching between different patch queues, as well as creating
2744 new patch queues and deleting existing ones.
2744 new patch queues and deleting existing ones.
2745
2745
2746 Omitting a queue name or specifying -l/--list will show you the registered
2746 Omitting a queue name or specifying -l/--list will show you the registered
2747 queues - by default the "normal" patches queue is registered. The currently
2747 queues - by default the "normal" patches queue is registered. The currently
2748 active queue will be marked with "(active)".
2748 active queue will be marked with "(active)".
2749
2749
2750 To create a new queue, use -c/--create. The queue is automatically made
2750 To create a new queue, use -c/--create. The queue is automatically made
2751 active, except in the case where there are applied patches from the
2751 active, except in the case where there are applied patches from the
2752 currently active queue in the repository. Then the queue will only be
2752 currently active queue in the repository. Then the queue will only be
2753 created and switching will fail.
2753 created and switching will fail.
2754
2754
2755 To delete an existing queue, use --delete. You cannot delete the currently
2755 To delete an existing queue, use --delete. You cannot delete the currently
2756 active queue.
2756 active queue.
2757
2757
2758 Returns 0 on success.
2758 Returns 0 on success.
2759 '''
2759 '''
2760
2760
2761 q = repo.mq
2761 q = repo.mq
2762
2762
2763 _defaultqueue = 'patches'
2763 _defaultqueue = 'patches'
2764 _allqueues = 'patches.queues'
2764 _allqueues = 'patches.queues'
2765 _activequeue = 'patches.queue'
2765 _activequeue = 'patches.queue'
2766
2766
2767 def _getcurrent():
2767 def _getcurrent():
2768 cur = os.path.basename(q.path)
2768 cur = os.path.basename(q.path)
2769 if cur.startswith('patches-'):
2769 if cur.startswith('patches-'):
2770 cur = cur[8:]
2770 cur = cur[8:]
2771 return cur
2771 return cur
2772
2772
2773 def _noqueues():
2773 def _noqueues():
2774 try:
2774 try:
2775 fh = repo.opener(_allqueues, 'r')
2775 fh = repo.opener(_allqueues, 'r')
2776 fh.close()
2776 fh.close()
2777 except IOError:
2777 except IOError:
2778 return True
2778 return True
2779
2779
2780 return False
2780 return False
2781
2781
2782 def _getqueues():
2782 def _getqueues():
2783 current = _getcurrent()
2783 current = _getcurrent()
2784
2784
2785 try:
2785 try:
2786 fh = repo.opener(_allqueues, 'r')
2786 fh = repo.opener(_allqueues, 'r')
2787 queues = [queue.strip() for queue in fh if queue.strip()]
2787 queues = [queue.strip() for queue in fh if queue.strip()]
2788 if current not in queues:
2788 if current not in queues:
2789 queues.append(current)
2789 queues.append(current)
2790 except IOError:
2790 except IOError:
2791 queues = [_defaultqueue]
2791 queues = [_defaultqueue]
2792
2792
2793 return sorted(queues)
2793 return sorted(queues)
2794
2794
2795 def _setactive(name):
2795 def _setactive(name):
2796 if q.applied:
2796 if q.applied:
2797 raise util.Abort(_('patches applied - cannot set new queue active'))
2797 raise util.Abort(_('patches applied - cannot set new queue active'))
2798 _setactivenocheck(name)
2798 _setactivenocheck(name)
2799
2799
2800 def _setactivenocheck(name):
2800 def _setactivenocheck(name):
2801 fh = repo.opener(_activequeue, 'w')
2801 fh = repo.opener(_activequeue, 'w')
2802 if name != 'patches':
2802 if name != 'patches':
2803 fh.write(name)
2803 fh.write(name)
2804 fh.close()
2804 fh.close()
2805
2805
2806 def _addqueue(name):
2806 def _addqueue(name):
2807 fh = repo.opener(_allqueues, 'a')
2807 fh = repo.opener(_allqueues, 'a')
2808 fh.write('%s\n' % (name,))
2808 fh.write('%s\n' % (name,))
2809 fh.close()
2809 fh.close()
2810
2810
2811 def _queuedir(name):
2811 def _queuedir(name):
2812 if name == 'patches':
2812 if name == 'patches':
2813 return repo.join('patches')
2813 return repo.join('patches')
2814 else:
2814 else:
2815 return repo.join('patches-' + name)
2815 return repo.join('patches-' + name)
2816
2816
2817 def _validname(name):
2817 def _validname(name):
2818 for n in name:
2818 for n in name:
2819 if n in ':\\/.':
2819 if n in ':\\/.':
2820 return False
2820 return False
2821 return True
2821 return True
2822
2822
2823 def _delete(name):
2823 def _delete(name):
2824 if name not in existing:
2824 if name not in existing:
2825 raise util.Abort(_('cannot delete queue that does not exist'))
2825 raise util.Abort(_('cannot delete queue that does not exist'))
2826
2826
2827 current = _getcurrent()
2827 current = _getcurrent()
2828
2828
2829 if name == current:
2829 if name == current:
2830 raise util.Abort(_('cannot delete currently active queue'))
2830 raise util.Abort(_('cannot delete currently active queue'))
2831
2831
2832 fh = repo.opener('patches.queues.new', 'w')
2832 fh = repo.opener('patches.queues.new', 'w')
2833 for queue in existing:
2833 for queue in existing:
2834 if queue == name:
2834 if queue == name:
2835 continue
2835 continue
2836 fh.write('%s\n' % (queue,))
2836 fh.write('%s\n' % (queue,))
2837 fh.close()
2837 fh.close()
2838 util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
2838 util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
2839
2839
2840 if not name or opts.get('list'):
2840 if not name or opts.get('list'):
2841 current = _getcurrent()
2841 current = _getcurrent()
2842 for queue in _getqueues():
2842 for queue in _getqueues():
2843 ui.write('%s' % (queue,))
2843 ui.write('%s' % (queue,))
2844 if queue == current and not ui.quiet:
2844 if queue == current and not ui.quiet:
2845 ui.write(_(' (active)\n'))
2845 ui.write(_(' (active)\n'))
2846 else:
2846 else:
2847 ui.write('\n')
2847 ui.write('\n')
2848 return
2848 return
2849
2849
2850 if not _validname(name):
2850 if not _validname(name):
2851 raise util.Abort(
2851 raise util.Abort(
2852 _('invalid queue name, may not contain the characters ":\\/."'))
2852 _('invalid queue name, may not contain the characters ":\\/."'))
2853
2853
2854 existing = _getqueues()
2854 existing = _getqueues()
2855
2855
2856 if opts.get('create'):
2856 if opts.get('create'):
2857 if name in existing:
2857 if name in existing:
2858 raise util.Abort(_('queue "%s" already exists') % name)
2858 raise util.Abort(_('queue "%s" already exists') % name)
2859 if _noqueues():
2859 if _noqueues():
2860 _addqueue(_defaultqueue)
2860 _addqueue(_defaultqueue)
2861 _addqueue(name)
2861 _addqueue(name)
2862 _setactive(name)
2862 _setactive(name)
2863 elif opts.get('rename'):
2863 elif opts.get('rename'):
2864 current = _getcurrent()
2864 current = _getcurrent()
2865 if name == current:
2865 if name == current:
2866 raise util.Abort(_('can\'t rename "%s" to its current name') % name)
2866 raise util.Abort(_('can\'t rename "%s" to its current name') % name)
2867 if name in existing:
2867 if name in existing:
2868 raise util.Abort(_('queue "%s" already exists') % name)
2868 raise util.Abort(_('queue "%s" already exists') % name)
2869
2869
2870 olddir = _queuedir(current)
2870 olddir = _queuedir(current)
2871 newdir = _queuedir(name)
2871 newdir = _queuedir(name)
2872
2872
2873 if os.path.exists(newdir):
2873 if os.path.exists(newdir):
2874 raise util.Abort(_('non-queue directory "%s" already exists') %
2874 raise util.Abort(_('non-queue directory "%s" already exists') %
2875 newdir)
2875 newdir)
2876
2876
2877 fh = repo.opener('patches.queues.new', 'w')
2877 fh = repo.opener('patches.queues.new', 'w')
2878 for queue in existing:
2878 for queue in existing:
2879 if queue == current:
2879 if queue == current:
2880 fh.write('%s\n' % (name,))
2880 fh.write('%s\n' % (name,))
2881 if os.path.exists(olddir):
2881 if os.path.exists(olddir):
2882 util.rename(olddir, newdir)
2882 util.rename(olddir, newdir)
2883 else:
2883 else:
2884 fh.write('%s\n' % (queue,))
2884 fh.write('%s\n' % (queue,))
2885 fh.close()
2885 fh.close()
2886 util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
2886 util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
2887 _setactivenocheck(name)
2887 _setactivenocheck(name)
2888 elif opts.get('delete'):
2888 elif opts.get('delete'):
2889 _delete(name)
2889 _delete(name)
2890 elif opts.get('purge'):
2890 elif opts.get('purge'):
2891 if name in existing:
2891 if name in existing:
2892 _delete(name)
2892 _delete(name)
2893 qdir = _queuedir(name)
2893 qdir = _queuedir(name)
2894 if os.path.exists(qdir):
2894 if os.path.exists(qdir):
2895 shutil.rmtree(qdir)
2895 shutil.rmtree(qdir)
2896 else:
2896 else:
2897 if name not in existing:
2897 if name not in existing:
2898 raise util.Abort(_('use --create to create a new queue'))
2898 raise util.Abort(_('use --create to create a new queue'))
2899 _setactive(name)
2899 _setactive(name)
2900
2900
2901 def reposetup(ui, repo):
2901 def reposetup(ui, repo):
2902 class mqrepo(repo.__class__):
2902 class mqrepo(repo.__class__):
2903 @util.propertycache
2903 @util.propertycache
2904 def mq(self):
2904 def mq(self):
2905 return queue(self.ui, self.join(""))
2905 return queue(self.ui, self.join(""))
2906
2906
2907 def abort_if_wdir_patched(self, errmsg, force=False):
2907 def abort_if_wdir_patched(self, errmsg, force=False):
2908 if self.mq.applied and not force:
2908 if self.mq.applied and not force:
2909 parent = self.dirstate.parents()[0]
2909 parent = self.dirstate.parents()[0]
2910 if parent in [s.node for s in self.mq.applied]:
2910 if parent in [s.node for s in self.mq.applied]:
2911 raise util.Abort(errmsg)
2911 raise util.Abort(errmsg)
2912
2912
2913 def commit(self, text="", user=None, date=None, match=None,
2913 def commit(self, text="", user=None, date=None, match=None,
2914 force=False, editor=False, extra={}):
2914 force=False, editor=False, extra={}):
2915 self.abort_if_wdir_patched(
2915 self.abort_if_wdir_patched(
2916 _('cannot commit over an applied mq patch'),
2916 _('cannot commit over an applied mq patch'),
2917 force)
2917 force)
2918
2918
2919 return super(mqrepo, self).commit(text, user, date, match, force,
2919 return super(mqrepo, self).commit(text, user, date, match, force,
2920 editor, extra)
2920 editor, extra)
2921
2921
2922 def push(self, remote, force=False, revs=None, newbranch=False):
2922 def checkpush(self, force, revs):
2923 if self.mq.applied and not force:
2923 if self.mq.applied and not force:
2924 haspatches = True
2924 haspatches = True
2925 if revs:
2925 if revs:
2926 # Assume applied patches have no non-patch descendants
2926 # Assume applied patches have no non-patch descendants
2927 # and are not on remote already. If they appear in the
2927 # and are not on remote already. If they appear in the
2928 # set of resolved 'revs', bail out.
2928 # set of resolved 'revs', bail out.
2929 applied = set(e.node for e in self.mq.applied)
2929 applied = set(e.node for e in self.mq.applied)
2930 haspatches = bool([n for n in revs if n in applied])
2930 haspatches = bool([n for n in revs if n in applied])
2931 if haspatches:
2931 if haspatches:
2932 raise util.Abort(_('source has mq patches applied'))
2932 raise util.Abort(_('source has mq patches applied'))
2933 return super(mqrepo, self).push(remote, force, revs, newbranch)
2933 super(mqrepo, self).checkpush(force, revs)
2934
2934
2935 def _findtags(self):
2935 def _findtags(self):
2936 '''augment tags from base class with patch tags'''
2936 '''augment tags from base class with patch tags'''
2937 result = super(mqrepo, self)._findtags()
2937 result = super(mqrepo, self)._findtags()
2938
2938
2939 q = self.mq
2939 q = self.mq
2940 if not q.applied:
2940 if not q.applied:
2941 return result
2941 return result
2942
2942
2943 mqtags = [(patch.node, patch.name) for patch in q.applied]
2943 mqtags = [(patch.node, patch.name) for patch in q.applied]
2944
2944
2945 if mqtags[-1][0] not in self:
2945 if mqtags[-1][0] not in self:
2946 self.ui.warn(_('mq status file refers to unknown node %s\n')
2946 self.ui.warn(_('mq status file refers to unknown node %s\n')
2947 % short(mqtags[-1][0]))
2947 % short(mqtags[-1][0]))
2948 return result
2948 return result
2949
2949
2950 mqtags.append((mqtags[-1][0], 'qtip'))
2950 mqtags.append((mqtags[-1][0], 'qtip'))
2951 mqtags.append((mqtags[0][0], 'qbase'))
2951 mqtags.append((mqtags[0][0], 'qbase'))
2952 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2952 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2953 tags = result[0]
2953 tags = result[0]
2954 for patch in mqtags:
2954 for patch in mqtags:
2955 if patch[1] in tags:
2955 if patch[1] in tags:
2956 self.ui.warn(_('Tag %s overrides mq patch of the same name\n')
2956 self.ui.warn(_('Tag %s overrides mq patch of the same name\n')
2957 % patch[1])
2957 % patch[1])
2958 else:
2958 else:
2959 tags[patch[1]] = patch[0]
2959 tags[patch[1]] = patch[0]
2960
2960
2961 return result
2961 return result
2962
2962
2963 def _branchtags(self, partial, lrev):
2963 def _branchtags(self, partial, lrev):
2964 q = self.mq
2964 q = self.mq
2965 if not q.applied:
2965 if not q.applied:
2966 return super(mqrepo, self)._branchtags(partial, lrev)
2966 return super(mqrepo, self)._branchtags(partial, lrev)
2967
2967
2968 cl = self.changelog
2968 cl = self.changelog
2969 qbasenode = q.applied[0].node
2969 qbasenode = q.applied[0].node
2970 if qbasenode not in self:
2970 if qbasenode not in self:
2971 self.ui.warn(_('mq status file refers to unknown node %s\n')
2971 self.ui.warn(_('mq status file refers to unknown node %s\n')
2972 % short(qbasenode))
2972 % short(qbasenode))
2973 return super(mqrepo, self)._branchtags(partial, lrev)
2973 return super(mqrepo, self)._branchtags(partial, lrev)
2974
2974
2975 qbase = cl.rev(qbasenode)
2975 qbase = cl.rev(qbasenode)
2976 start = lrev + 1
2976 start = lrev + 1
2977 if start < qbase:
2977 if start < qbase:
2978 # update the cache (excluding the patches) and save it
2978 # update the cache (excluding the patches) and save it
2979 ctxgen = (self[r] for r in xrange(lrev + 1, qbase))
2979 ctxgen = (self[r] for r in xrange(lrev + 1, qbase))
2980 self._updatebranchcache(partial, ctxgen)
2980 self._updatebranchcache(partial, ctxgen)
2981 self._writebranchcache(partial, cl.node(qbase - 1), qbase - 1)
2981 self._writebranchcache(partial, cl.node(qbase - 1), qbase - 1)
2982 start = qbase
2982 start = qbase
2983 # if start = qbase, the cache is as updated as it should be.
2983 # if start = qbase, the cache is as updated as it should be.
2984 # if start > qbase, the cache includes (part of) the patches.
2984 # if start > qbase, the cache includes (part of) the patches.
2985 # we might as well use it, but we won't save it.
2985 # we might as well use it, but we won't save it.
2986
2986
2987 # update the cache up to the tip
2987 # update the cache up to the tip
2988 ctxgen = (self[r] for r in xrange(start, len(cl)))
2988 ctxgen = (self[r] for r in xrange(start, len(cl)))
2989 self._updatebranchcache(partial, ctxgen)
2989 self._updatebranchcache(partial, ctxgen)
2990
2990
2991 return partial
2991 return partial
2992
2992
2993 if repo.local():
2993 if repo.local():
2994 repo.__class__ = mqrepo
2994 repo.__class__ = mqrepo
2995
2995
2996 def mqimport(orig, ui, repo, *args, **kwargs):
2996 def mqimport(orig, ui, repo, *args, **kwargs):
2997 if (hasattr(repo, 'abort_if_wdir_patched')
2997 if (hasattr(repo, 'abort_if_wdir_patched')
2998 and not kwargs.get('no_commit', False)):
2998 and not kwargs.get('no_commit', False)):
2999 repo.abort_if_wdir_patched(_('cannot import over an applied patch'),
2999 repo.abort_if_wdir_patched(_('cannot import over an applied patch'),
3000 kwargs.get('force'))
3000 kwargs.get('force'))
3001 return orig(ui, repo, *args, **kwargs)
3001 return orig(ui, repo, *args, **kwargs)
3002
3002
3003 def mqinit(orig, ui, *args, **kwargs):
3003 def mqinit(orig, ui, *args, **kwargs):
3004 mq = kwargs.pop('mq', None)
3004 mq = kwargs.pop('mq', None)
3005
3005
3006 if not mq:
3006 if not mq:
3007 return orig(ui, *args, **kwargs)
3007 return orig(ui, *args, **kwargs)
3008
3008
3009 if args:
3009 if args:
3010 repopath = args[0]
3010 repopath = args[0]
3011 if not hg.islocal(repopath):
3011 if not hg.islocal(repopath):
3012 raise util.Abort(_('only a local queue repository '
3012 raise util.Abort(_('only a local queue repository '
3013 'may be initialized'))
3013 'may be initialized'))
3014 else:
3014 else:
3015 repopath = cmdutil.findrepo(os.getcwd())
3015 repopath = cmdutil.findrepo(os.getcwd())
3016 if not repopath:
3016 if not repopath:
3017 raise util.Abort(_('there is no Mercurial repository here '
3017 raise util.Abort(_('there is no Mercurial repository here '
3018 '(.hg not found)'))
3018 '(.hg not found)'))
3019 repo = hg.repository(ui, repopath)
3019 repo = hg.repository(ui, repopath)
3020 return qinit(ui, repo, True)
3020 return qinit(ui, repo, True)
3021
3021
3022 def mqcommand(orig, ui, repo, *args, **kwargs):
3022 def mqcommand(orig, ui, repo, *args, **kwargs):
3023 """Add --mq option to operate on patch repository instead of main"""
3023 """Add --mq option to operate on patch repository instead of main"""
3024
3024
3025 # some commands do not like getting unknown options
3025 # some commands do not like getting unknown options
3026 mq = kwargs.pop('mq', None)
3026 mq = kwargs.pop('mq', None)
3027
3027
3028 if not mq:
3028 if not mq:
3029 return orig(ui, repo, *args, **kwargs)
3029 return orig(ui, repo, *args, **kwargs)
3030
3030
3031 q = repo.mq
3031 q = repo.mq
3032 r = q.qrepo()
3032 r = q.qrepo()
3033 if not r:
3033 if not r:
3034 raise util.Abort(_('no queue repository'))
3034 raise util.Abort(_('no queue repository'))
3035 return orig(r.ui, r, *args, **kwargs)
3035 return orig(r.ui, r, *args, **kwargs)
3036
3036
3037 def summary(orig, ui, repo, *args, **kwargs):
3037 def summary(orig, ui, repo, *args, **kwargs):
3038 r = orig(ui, repo, *args, **kwargs)
3038 r = orig(ui, repo, *args, **kwargs)
3039 q = repo.mq
3039 q = repo.mq
3040 m = []
3040 m = []
3041 a, u = len(q.applied), len(q.unapplied(repo))
3041 a, u = len(q.applied), len(q.unapplied(repo))
3042 if a:
3042 if a:
3043 m.append(ui.label(_("%d applied"), 'qseries.applied') % a)
3043 m.append(ui.label(_("%d applied"), 'qseries.applied') % a)
3044 if u:
3044 if u:
3045 m.append(ui.label(_("%d unapplied"), 'qseries.unapplied') % u)
3045 m.append(ui.label(_("%d unapplied"), 'qseries.unapplied') % u)
3046 if m:
3046 if m:
3047 ui.write("mq: %s\n" % ', '.join(m))
3047 ui.write("mq: %s\n" % ', '.join(m))
3048 else:
3048 else:
3049 ui.note(_("mq: (empty queue)\n"))
3049 ui.note(_("mq: (empty queue)\n"))
3050 return r
3050 return r
3051
3051
3052 def uisetup(ui):
3052 def uisetup(ui):
3053 mqopt = [('', 'mq', None, _("operate on patch repository"))]
3053 mqopt = [('', 'mq', None, _("operate on patch repository"))]
3054
3054
3055 extensions.wrapcommand(commands.table, 'import', mqimport)
3055 extensions.wrapcommand(commands.table, 'import', mqimport)
3056 extensions.wrapcommand(commands.table, 'summary', summary)
3056 extensions.wrapcommand(commands.table, 'summary', summary)
3057
3057
3058 entry = extensions.wrapcommand(commands.table, 'init', mqinit)
3058 entry = extensions.wrapcommand(commands.table, 'init', mqinit)
3059 entry[1].extend(mqopt)
3059 entry[1].extend(mqopt)
3060
3060
3061 nowrap = set(commands.norepo.split(" ") + ['qrecord'])
3061 nowrap = set(commands.norepo.split(" ") + ['qrecord'])
3062
3062
3063 def dotable(cmdtable):
3063 def dotable(cmdtable):
3064 for cmd in cmdtable.keys():
3064 for cmd in cmdtable.keys():
3065 cmd = cmdutil.parsealiases(cmd)[0]
3065 cmd = cmdutil.parsealiases(cmd)[0]
3066 if cmd in nowrap:
3066 if cmd in nowrap:
3067 continue
3067 continue
3068 entry = extensions.wrapcommand(cmdtable, cmd, mqcommand)
3068 entry = extensions.wrapcommand(cmdtable, cmd, mqcommand)
3069 entry[1].extend(mqopt)
3069 entry[1].extend(mqopt)
3070
3070
3071 dotable(commands.table)
3071 dotable(commands.table)
3072
3072
3073 for extname, extmodule in extensions.extensions():
3073 for extname, extmodule in extensions.extensions():
3074 if extmodule.__file__ != __file__:
3074 if extmodule.__file__ != __file__:
3075 dotable(getattr(extmodule, 'cmdtable', {}))
3075 dotable(getattr(extmodule, 'cmdtable', {}))
3076
3076
3077 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
3077 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
3078
3078
3079 cmdtable = {
3079 cmdtable = {
3080 "qapplied":
3080 "qapplied":
3081 (applied,
3081 (applied,
3082 [('1', 'last', None, _('show only the last patch'))] + seriesopts,
3082 [('1', 'last', None, _('show only the last patch'))] + seriesopts,
3083 _('hg qapplied [-1] [-s] [PATCH]')),
3083 _('hg qapplied [-1] [-s] [PATCH]')),
3084 "qclone":
3084 "qclone":
3085 (clone,
3085 (clone,
3086 [('', 'pull', None, _('use pull protocol to copy metadata')),
3086 [('', 'pull', None, _('use pull protocol to copy metadata')),
3087 ('U', 'noupdate', None, _('do not update the new working directories')),
3087 ('U', 'noupdate', None, _('do not update the new working directories')),
3088 ('', 'uncompressed', None,
3088 ('', 'uncompressed', None,
3089 _('use uncompressed transfer (fast over LAN)')),
3089 _('use uncompressed transfer (fast over LAN)')),
3090 ('p', 'patches', '',
3090 ('p', 'patches', '',
3091 _('location of source patch repository'), _('REPO')),
3091 _('location of source patch repository'), _('REPO')),
3092 ] + commands.remoteopts,
3092 ] + commands.remoteopts,
3093 _('hg qclone [OPTION]... SOURCE [DEST]')),
3093 _('hg qclone [OPTION]... SOURCE [DEST]')),
3094 "qcommit|qci":
3094 "qcommit|qci":
3095 (commit,
3095 (commit,
3096 commands.table["^commit|ci"][1],
3096 commands.table["^commit|ci"][1],
3097 _('hg qcommit [OPTION]... [FILE]...')),
3097 _('hg qcommit [OPTION]... [FILE]...')),
3098 "^qdiff":
3098 "^qdiff":
3099 (diff,
3099 (diff,
3100 commands.diffopts + commands.diffopts2 + commands.walkopts,
3100 commands.diffopts + commands.diffopts2 + commands.walkopts,
3101 _('hg qdiff [OPTION]... [FILE]...')),
3101 _('hg qdiff [OPTION]... [FILE]...')),
3102 "qdelete|qremove|qrm":
3102 "qdelete|qremove|qrm":
3103 (delete,
3103 (delete,
3104 [('k', 'keep', None, _('keep patch file')),
3104 [('k', 'keep', None, _('keep patch file')),
3105 ('r', 'rev', [],
3105 ('r', 'rev', [],
3106 _('stop managing a revision (DEPRECATED)'), _('REV'))],
3106 _('stop managing a revision (DEPRECATED)'), _('REV'))],
3107 _('hg qdelete [-k] [PATCH]...')),
3107 _('hg qdelete [-k] [PATCH]...')),
3108 'qfold':
3108 'qfold':
3109 (fold,
3109 (fold,
3110 [('e', 'edit', None, _('edit patch header')),
3110 [('e', 'edit', None, _('edit patch header')),
3111 ('k', 'keep', None, _('keep folded patch files')),
3111 ('k', 'keep', None, _('keep folded patch files')),
3112 ] + commands.commitopts,
3112 ] + commands.commitopts,
3113 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')),
3113 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')),
3114 'qgoto':
3114 'qgoto':
3115 (goto,
3115 (goto,
3116 [('f', 'force', None, _('overwrite any local changes'))],
3116 [('f', 'force', None, _('overwrite any local changes'))],
3117 _('hg qgoto [OPTION]... PATCH')),
3117 _('hg qgoto [OPTION]... PATCH')),
3118 'qguard':
3118 'qguard':
3119 (guard,
3119 (guard,
3120 [('l', 'list', None, _('list all patches and guards')),
3120 [('l', 'list', None, _('list all patches and guards')),
3121 ('n', 'none', None, _('drop all guards'))],
3121 ('n', 'none', None, _('drop all guards'))],
3122 _('hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]')),
3122 _('hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]')),
3123 'qheader': (header, [], _('hg qheader [PATCH]')),
3123 'qheader': (header, [], _('hg qheader [PATCH]')),
3124 "qimport":
3124 "qimport":
3125 (qimport,
3125 (qimport,
3126 [('e', 'existing', None, _('import file in patch directory')),
3126 [('e', 'existing', None, _('import file in patch directory')),
3127 ('n', 'name', '',
3127 ('n', 'name', '',
3128 _('name of patch file'), _('NAME')),
3128 _('name of patch file'), _('NAME')),
3129 ('f', 'force', None, _('overwrite existing files')),
3129 ('f', 'force', None, _('overwrite existing files')),
3130 ('r', 'rev', [],
3130 ('r', 'rev', [],
3131 _('place existing revisions under mq control'), _('REV')),
3131 _('place existing revisions under mq control'), _('REV')),
3132 ('g', 'git', None, _('use git extended diff format')),
3132 ('g', 'git', None, _('use git extended diff format')),
3133 ('P', 'push', None, _('qpush after importing'))],
3133 ('P', 'push', None, _('qpush after importing'))],
3134 _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... FILE...')),
3134 _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... FILE...')),
3135 "^qinit":
3135 "^qinit":
3136 (init,
3136 (init,
3137 [('c', 'create-repo', None, _('create queue repository'))],
3137 [('c', 'create-repo', None, _('create queue repository'))],
3138 _('hg qinit [-c]')),
3138 _('hg qinit [-c]')),
3139 "^qnew":
3139 "^qnew":
3140 (new,
3140 (new,
3141 [('e', 'edit', None, _('edit commit message')),
3141 [('e', 'edit', None, _('edit commit message')),
3142 ('f', 'force', None, _('import uncommitted changes (DEPRECATED)')),
3142 ('f', 'force', None, _('import uncommitted changes (DEPRECATED)')),
3143 ('g', 'git', None, _('use git extended diff format')),
3143 ('g', 'git', None, _('use git extended diff format')),
3144 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
3144 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
3145 ('u', 'user', '',
3145 ('u', 'user', '',
3146 _('add "From: <USER>" to patch'), _('USER')),
3146 _('add "From: <USER>" to patch'), _('USER')),
3147 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
3147 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
3148 ('d', 'date', '',
3148 ('d', 'date', '',
3149 _('add "Date: <DATE>" to patch'), _('DATE'))
3149 _('add "Date: <DATE>" to patch'), _('DATE'))
3150 ] + commands.walkopts + commands.commitopts,
3150 ] + commands.walkopts + commands.commitopts,
3151 _('hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...')),
3151 _('hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...')),
3152 "qnext": (next, [] + seriesopts, _('hg qnext [-s]')),
3152 "qnext": (next, [] + seriesopts, _('hg qnext [-s]')),
3153 "qprev": (prev, [] + seriesopts, _('hg qprev [-s]')),
3153 "qprev": (prev, [] + seriesopts, _('hg qprev [-s]')),
3154 "^qpop":
3154 "^qpop":
3155 (pop,
3155 (pop,
3156 [('a', 'all', None, _('pop all patches')),
3156 [('a', 'all', None, _('pop all patches')),
3157 ('n', 'name', '',
3157 ('n', 'name', '',
3158 _('queue name to pop (DEPRECATED)'), _('NAME')),
3158 _('queue name to pop (DEPRECATED)'), _('NAME')),
3159 ('f', 'force', None, _('forget any local changes to patched files'))],
3159 ('f', 'force', None, _('forget any local changes to patched files'))],
3160 _('hg qpop [-a] [-f] [PATCH | INDEX]')),
3160 _('hg qpop [-a] [-f] [PATCH | INDEX]')),
3161 "^qpush":
3161 "^qpush":
3162 (push,
3162 (push,
3163 [('f', 'force', None, _('apply on top of local changes')),
3163 [('f', 'force', None, _('apply on top of local changes')),
3164 ('e', 'exact', None, _('apply the target patch to its recorded parent')),
3164 ('e', 'exact', None, _('apply the target patch to its recorded parent')),
3165 ('l', 'list', None, _('list patch name in commit text')),
3165 ('l', 'list', None, _('list patch name in commit text')),
3166 ('a', 'all', None, _('apply all patches')),
3166 ('a', 'all', None, _('apply all patches')),
3167 ('m', 'merge', None, _('merge from another queue (DEPRECATED)')),
3167 ('m', 'merge', None, _('merge from another queue (DEPRECATED)')),
3168 ('n', 'name', '',
3168 ('n', 'name', '',
3169 _('merge queue name (DEPRECATED)'), _('NAME')),
3169 _('merge queue name (DEPRECATED)'), _('NAME')),
3170 ('', 'move', None, _('reorder patch series and apply only the patch'))],
3170 ('', 'move', None, _('reorder patch series and apply only the patch'))],
3171 _('hg qpush [-f] [-l] [-a] [--move] [PATCH | INDEX]')),
3171 _('hg qpush [-f] [-l] [-a] [--move] [PATCH | INDEX]')),
3172 "^qrefresh":
3172 "^qrefresh":
3173 (refresh,
3173 (refresh,
3174 [('e', 'edit', None, _('edit commit message')),
3174 [('e', 'edit', None, _('edit commit message')),
3175 ('g', 'git', None, _('use git extended diff format')),
3175 ('g', 'git', None, _('use git extended diff format')),
3176 ('s', 'short', None,
3176 ('s', 'short', None,
3177 _('refresh only files already in the patch and specified files')),
3177 _('refresh only files already in the patch and specified files')),
3178 ('U', 'currentuser', None,
3178 ('U', 'currentuser', None,
3179 _('add/update author field in patch with current user')),
3179 _('add/update author field in patch with current user')),
3180 ('u', 'user', '',
3180 ('u', 'user', '',
3181 _('add/update author field in patch with given user'), _('USER')),
3181 _('add/update author field in patch with given user'), _('USER')),
3182 ('D', 'currentdate', None,
3182 ('D', 'currentdate', None,
3183 _('add/update date field in patch with current date')),
3183 _('add/update date field in patch with current date')),
3184 ('d', 'date', '',
3184 ('d', 'date', '',
3185 _('add/update date field in patch with given date'), _('DATE'))
3185 _('add/update date field in patch with given date'), _('DATE'))
3186 ] + commands.walkopts + commands.commitopts,
3186 ] + commands.walkopts + commands.commitopts,
3187 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')),
3187 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')),
3188 'qrename|qmv':
3188 'qrename|qmv':
3189 (rename, [], _('hg qrename PATCH1 [PATCH2]')),
3189 (rename, [], _('hg qrename PATCH1 [PATCH2]')),
3190 "qrestore":
3190 "qrestore":
3191 (restore,
3191 (restore,
3192 [('d', 'delete', None, _('delete save entry')),
3192 [('d', 'delete', None, _('delete save entry')),
3193 ('u', 'update', None, _('update queue working directory'))],
3193 ('u', 'update', None, _('update queue working directory'))],
3194 _('hg qrestore [-d] [-u] REV')),
3194 _('hg qrestore [-d] [-u] REV')),
3195 "qsave":
3195 "qsave":
3196 (save,
3196 (save,
3197 [('c', 'copy', None, _('copy patch directory')),
3197 [('c', 'copy', None, _('copy patch directory')),
3198 ('n', 'name', '',
3198 ('n', 'name', '',
3199 _('copy directory name'), _('NAME')),
3199 _('copy directory name'), _('NAME')),
3200 ('e', 'empty', None, _('clear queue status file')),
3200 ('e', 'empty', None, _('clear queue status file')),
3201 ('f', 'force', None, _('force copy'))] + commands.commitopts,
3201 ('f', 'force', None, _('force copy'))] + commands.commitopts,
3202 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')),
3202 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')),
3203 "qselect":
3203 "qselect":
3204 (select,
3204 (select,
3205 [('n', 'none', None, _('disable all guards')),
3205 [('n', 'none', None, _('disable all guards')),
3206 ('s', 'series', None, _('list all guards in series file')),
3206 ('s', 'series', None, _('list all guards in series file')),
3207 ('', 'pop', None, _('pop to before first guarded applied patch')),
3207 ('', 'pop', None, _('pop to before first guarded applied patch')),
3208 ('', 'reapply', None, _('pop, then reapply patches'))],
3208 ('', 'reapply', None, _('pop, then reapply patches'))],
3209 _('hg qselect [OPTION]... [GUARD]...')),
3209 _('hg qselect [OPTION]... [GUARD]...')),
3210 "qseries":
3210 "qseries":
3211 (series,
3211 (series,
3212 [('m', 'missing', None, _('print patches not in series')),
3212 [('m', 'missing', None, _('print patches not in series')),
3213 ] + seriesopts,
3213 ] + seriesopts,
3214 _('hg qseries [-ms]')),
3214 _('hg qseries [-ms]')),
3215 "strip":
3215 "strip":
3216 (strip,
3216 (strip,
3217 [('f', 'force', None, _('force removal of changesets even if the '
3217 [('f', 'force', None, _('force removal of changesets even if the '
3218 'working directory has uncommitted changes')),
3218 'working directory has uncommitted changes')),
3219 ('b', 'backup', None, _('bundle only changesets with local revision'
3219 ('b', 'backup', None, _('bundle only changesets with local revision'
3220 ' number greater than REV which are not'
3220 ' number greater than REV which are not'
3221 ' descendants of REV (DEPRECATED)')),
3221 ' descendants of REV (DEPRECATED)')),
3222 ('n', 'no-backup', None, _('no backups')),
3222 ('n', 'no-backup', None, _('no backups')),
3223 ('', 'nobackup', None, _('no backups (DEPRECATED)')),
3223 ('', 'nobackup', None, _('no backups (DEPRECATED)')),
3224 ('k', 'keep', None, _("do not modify working copy during strip"))],
3224 ('k', 'keep', None, _("do not modify working copy during strip"))],
3225 _('hg strip [-k] [-f] [-n] REV...')),
3225 _('hg strip [-k] [-f] [-n] REV...')),
3226 "qtop": (top, [] + seriesopts, _('hg qtop [-s]')),
3226 "qtop": (top, [] + seriesopts, _('hg qtop [-s]')),
3227 "qunapplied":
3227 "qunapplied":
3228 (unapplied,
3228 (unapplied,
3229 [('1', 'first', None, _('show only the first patch'))] + seriesopts,
3229 [('1', 'first', None, _('show only the first patch'))] + seriesopts,
3230 _('hg qunapplied [-1] [-s] [PATCH]')),
3230 _('hg qunapplied [-1] [-s] [PATCH]')),
3231 "qfinish":
3231 "qfinish":
3232 (finish,
3232 (finish,
3233 [('a', 'applied', None, _('finish all applied changesets'))],
3233 [('a', 'applied', None, _('finish all applied changesets'))],
3234 _('hg qfinish [-a] [REV]...')),
3234 _('hg qfinish [-a] [REV]...')),
3235 'qqueue':
3235 'qqueue':
3236 (qqueue,
3236 (qqueue,
3237 [
3237 [
3238 ('l', 'list', False, _('list all available queues')),
3238 ('l', 'list', False, _('list all available queues')),
3239 ('c', 'create', False, _('create new queue')),
3239 ('c', 'create', False, _('create new queue')),
3240 ('', 'rename', False, _('rename active queue')),
3240 ('', 'rename', False, _('rename active queue')),
3241 ('', 'delete', False, _('delete reference to queue')),
3241 ('', 'delete', False, _('delete reference to queue')),
3242 ('', 'purge', False, _('delete queue, and remove patch dir')),
3242 ('', 'purge', False, _('delete queue, and remove patch dir')),
3243 ],
3243 ],
3244 _('[OPTION] [QUEUE]')),
3244 _('[OPTION] [QUEUE]')),
3245 }
3245 }
3246
3246
3247 colortable = {'qguard.negative': 'red',
3247 colortable = {'qguard.negative': 'red',
3248 'qguard.positive': 'yellow',
3248 'qguard.positive': 'yellow',
3249 'qguard.unguarded': 'green',
3249 'qguard.unguarded': 'green',
3250 'qseries.applied': 'blue bold underline',
3250 'qseries.applied': 'blue bold underline',
3251 'qseries.guarded': 'black bold',
3251 'qseries.guarded': 'black bold',
3252 'qseries.missing': 'red bold',
3252 'qseries.missing': 'red bold',
3253 'qseries.unapplied': 'black bold'}
3253 'qseries.unapplied': 'black bold'}
@@ -1,1938 +1,1946 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 import url as urlmod
17 import url as urlmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
24 supportedformats = set(('revlogv1', 'parentdelta'))
24 supportedformats = set(('revlogv1', 'parentdelta'))
25 supported = supportedformats | set(('store', 'fncache', 'shared',
25 supported = supportedformats | set(('store', 'fncache', 'shared',
26 'dotencode'))
26 'dotencode'))
27
27
28 def __init__(self, baseui, path=None, create=0):
28 def __init__(self, baseui, path=None, create=0):
29 repo.repository.__init__(self)
29 repo.repository.__init__(self)
30 self.root = os.path.realpath(util.expandpath(path))
30 self.root = os.path.realpath(util.expandpath(path))
31 self.path = os.path.join(self.root, ".hg")
31 self.path = os.path.join(self.root, ".hg")
32 self.origroot = path
32 self.origroot = path
33 self.auditor = util.path_auditor(self.root, self._checknested)
33 self.auditor = util.path_auditor(self.root, self._checknested)
34 self.opener = util.opener(self.path)
34 self.opener = util.opener(self.path)
35 self.wopener = util.opener(self.root)
35 self.wopener = util.opener(self.root)
36 self.baseui = baseui
36 self.baseui = baseui
37 self.ui = baseui.copy()
37 self.ui = baseui.copy()
38
38
39 try:
39 try:
40 self.ui.readconfig(self.join("hgrc"), self.root)
40 self.ui.readconfig(self.join("hgrc"), self.root)
41 extensions.loadall(self.ui)
41 extensions.loadall(self.ui)
42 except IOError:
42 except IOError:
43 pass
43 pass
44
44
45 if not os.path.isdir(self.path):
45 if not os.path.isdir(self.path):
46 if create:
46 if create:
47 if not os.path.exists(path):
47 if not os.path.exists(path):
48 util.makedirs(path)
48 util.makedirs(path)
49 os.mkdir(self.path)
49 os.mkdir(self.path)
50 requirements = ["revlogv1"]
50 requirements = ["revlogv1"]
51 if self.ui.configbool('format', 'usestore', True):
51 if self.ui.configbool('format', 'usestore', True):
52 os.mkdir(os.path.join(self.path, "store"))
52 os.mkdir(os.path.join(self.path, "store"))
53 requirements.append("store")
53 requirements.append("store")
54 if self.ui.configbool('format', 'usefncache', True):
54 if self.ui.configbool('format', 'usefncache', True):
55 requirements.append("fncache")
55 requirements.append("fncache")
56 if self.ui.configbool('format', 'dotencode', True):
56 if self.ui.configbool('format', 'dotencode', True):
57 requirements.append('dotencode')
57 requirements.append('dotencode')
58 # create an invalid changelog
58 # create an invalid changelog
59 self.opener("00changelog.i", "a").write(
59 self.opener("00changelog.i", "a").write(
60 '\0\0\0\2' # represents revlogv2
60 '\0\0\0\2' # represents revlogv2
61 ' dummy changelog to prevent using the old repo layout'
61 ' dummy changelog to prevent using the old repo layout'
62 )
62 )
63 if self.ui.configbool('format', 'parentdelta', False):
63 if self.ui.configbool('format', 'parentdelta', False):
64 requirements.append("parentdelta")
64 requirements.append("parentdelta")
65 else:
65 else:
66 raise error.RepoError(_("repository %s not found") % path)
66 raise error.RepoError(_("repository %s not found") % path)
67 elif create:
67 elif create:
68 raise error.RepoError(_("repository %s already exists") % path)
68 raise error.RepoError(_("repository %s already exists") % path)
69 else:
69 else:
70 # find requirements
70 # find requirements
71 requirements = set()
71 requirements = set()
72 try:
72 try:
73 requirements = set(self.opener("requires").read().splitlines())
73 requirements = set(self.opener("requires").read().splitlines())
74 except IOError, inst:
74 except IOError, inst:
75 if inst.errno != errno.ENOENT:
75 if inst.errno != errno.ENOENT:
76 raise
76 raise
77 for r in requirements - self.supported:
77 for r in requirements - self.supported:
78 raise error.RepoError(_("requirement '%s' not supported") % r)
78 raise error.RepoError(_("requirement '%s' not supported") % r)
79
79
80 self.sharedpath = self.path
80 self.sharedpath = self.path
81 try:
81 try:
82 s = os.path.realpath(self.opener("sharedpath").read())
82 s = os.path.realpath(self.opener("sharedpath").read())
83 if not os.path.exists(s):
83 if not os.path.exists(s):
84 raise error.RepoError(
84 raise error.RepoError(
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 self.sharedpath = s
86 self.sharedpath = s
87 except IOError, inst:
87 except IOError, inst:
88 if inst.errno != errno.ENOENT:
88 if inst.errno != errno.ENOENT:
89 raise
89 raise
90
90
91 self.store = store.store(requirements, self.sharedpath, util.opener)
91 self.store = store.store(requirements, self.sharedpath, util.opener)
92 self.spath = self.store.path
92 self.spath = self.store.path
93 self.sopener = self.store.opener
93 self.sopener = self.store.opener
94 self.sjoin = self.store.join
94 self.sjoin = self.store.join
95 self.opener.createmode = self.store.createmode
95 self.opener.createmode = self.store.createmode
96 self._applyrequirements(requirements)
96 self._applyrequirements(requirements)
97 if create:
97 if create:
98 self._writerequirements()
98 self._writerequirements()
99
99
100 # These two define the set of tags for this repository. _tags
100 # These two define the set of tags for this repository. _tags
101 # maps tag name to node; _tagtypes maps tag name to 'global' or
101 # maps tag name to node; _tagtypes maps tag name to 'global' or
102 # 'local'. (Global tags are defined by .hgtags across all
102 # 'local'. (Global tags are defined by .hgtags across all
103 # heads, and local tags are defined in .hg/localtags.) They
103 # heads, and local tags are defined in .hg/localtags.) They
104 # constitute the in-memory cache of tags.
104 # constitute the in-memory cache of tags.
105 self._tags = None
105 self._tags = None
106 self._tagtypes = None
106 self._tagtypes = None
107
107
108 self._branchcache = None
108 self._branchcache = None
109 self._branchcachetip = None
109 self._branchcachetip = None
110 self.nodetagscache = None
110 self.nodetagscache = None
111 self.filterpats = {}
111 self.filterpats = {}
112 self._datafilters = {}
112 self._datafilters = {}
113 self._transref = self._lockref = self._wlockref = None
113 self._transref = self._lockref = self._wlockref = None
114
114
115 def _applyrequirements(self, requirements):
115 def _applyrequirements(self, requirements):
116 self.requirements = requirements
116 self.requirements = requirements
117 self.sopener.options = {}
117 self.sopener.options = {}
118 if 'parentdelta' in requirements:
118 if 'parentdelta' in requirements:
119 self.sopener.options['parentdelta'] = 1
119 self.sopener.options['parentdelta'] = 1
120
120
121 def _writerequirements(self):
121 def _writerequirements(self):
122 reqfile = self.opener("requires", "w")
122 reqfile = self.opener("requires", "w")
123 for r in self.requirements:
123 for r in self.requirements:
124 reqfile.write("%s\n" % r)
124 reqfile.write("%s\n" % r)
125 reqfile.close()
125 reqfile.close()
126
126
127 def _checknested(self, path):
127 def _checknested(self, path):
128 """Determine if path is a legal nested repository."""
128 """Determine if path is a legal nested repository."""
129 if not path.startswith(self.root):
129 if not path.startswith(self.root):
130 return False
130 return False
131 subpath = path[len(self.root) + 1:]
131 subpath = path[len(self.root) + 1:]
132
132
133 # XXX: Checking against the current working copy is wrong in
133 # XXX: Checking against the current working copy is wrong in
134 # the sense that it can reject things like
134 # the sense that it can reject things like
135 #
135 #
136 # $ hg cat -r 10 sub/x.txt
136 # $ hg cat -r 10 sub/x.txt
137 #
137 #
138 # if sub/ is no longer a subrepository in the working copy
138 # if sub/ is no longer a subrepository in the working copy
139 # parent revision.
139 # parent revision.
140 #
140 #
141 # However, it can of course also allow things that would have
141 # However, it can of course also allow things that would have
142 # been rejected before, such as the above cat command if sub/
142 # been rejected before, such as the above cat command if sub/
143 # is a subrepository now, but was a normal directory before.
143 # is a subrepository now, but was a normal directory before.
144 # The old path auditor would have rejected by mistake since it
144 # The old path auditor would have rejected by mistake since it
145 # panics when it sees sub/.hg/.
145 # panics when it sees sub/.hg/.
146 #
146 #
147 # All in all, checking against the working copy seems sensible
147 # All in all, checking against the working copy seems sensible
148 # since we want to prevent access to nested repositories on
148 # since we want to prevent access to nested repositories on
149 # the filesystem *now*.
149 # the filesystem *now*.
150 ctx = self[None]
150 ctx = self[None]
151 parts = util.splitpath(subpath)
151 parts = util.splitpath(subpath)
152 while parts:
152 while parts:
153 prefix = os.sep.join(parts)
153 prefix = os.sep.join(parts)
154 if prefix in ctx.substate:
154 if prefix in ctx.substate:
155 if prefix == subpath:
155 if prefix == subpath:
156 return True
156 return True
157 else:
157 else:
158 sub = ctx.sub(prefix)
158 sub = ctx.sub(prefix)
159 return sub.checknested(subpath[len(prefix) + 1:])
159 return sub.checknested(subpath[len(prefix) + 1:])
160 else:
160 else:
161 parts.pop()
161 parts.pop()
162 return False
162 return False
163
163
164
164
165 @propertycache
165 @propertycache
166 def changelog(self):
166 def changelog(self):
167 c = changelog.changelog(self.sopener)
167 c = changelog.changelog(self.sopener)
168 if 'HG_PENDING' in os.environ:
168 if 'HG_PENDING' in os.environ:
169 p = os.environ['HG_PENDING']
169 p = os.environ['HG_PENDING']
170 if p.startswith(self.root):
170 if p.startswith(self.root):
171 c.readpending('00changelog.i.a')
171 c.readpending('00changelog.i.a')
172 self.sopener.options['defversion'] = c.version
172 self.sopener.options['defversion'] = c.version
173 return c
173 return c
174
174
175 @propertycache
175 @propertycache
176 def manifest(self):
176 def manifest(self):
177 return manifest.manifest(self.sopener)
177 return manifest.manifest(self.sopener)
178
178
179 @propertycache
179 @propertycache
180 def dirstate(self):
180 def dirstate(self):
181 warned = [0]
181 warned = [0]
182 def validate(node):
182 def validate(node):
183 try:
183 try:
184 r = self.changelog.rev(node)
184 r = self.changelog.rev(node)
185 return node
185 return node
186 except error.LookupError:
186 except error.LookupError:
187 if not warned[0]:
187 if not warned[0]:
188 warned[0] = True
188 warned[0] = True
189 self.ui.warn(_("warning: ignoring unknown"
189 self.ui.warn(_("warning: ignoring unknown"
190 " working parent %s!\n") % short(node))
190 " working parent %s!\n") % short(node))
191 return nullid
191 return nullid
192
192
193 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
193 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
194
194
195 def __getitem__(self, changeid):
195 def __getitem__(self, changeid):
196 if changeid is None:
196 if changeid is None:
197 return context.workingctx(self)
197 return context.workingctx(self)
198 return context.changectx(self, changeid)
198 return context.changectx(self, changeid)
199
199
200 def __contains__(self, changeid):
200 def __contains__(self, changeid):
201 try:
201 try:
202 return bool(self.lookup(changeid))
202 return bool(self.lookup(changeid))
203 except error.RepoLookupError:
203 except error.RepoLookupError:
204 return False
204 return False
205
205
206 def __nonzero__(self):
206 def __nonzero__(self):
207 return True
207 return True
208
208
209 def __len__(self):
209 def __len__(self):
210 return len(self.changelog)
210 return len(self.changelog)
211
211
212 def __iter__(self):
212 def __iter__(self):
213 for i in xrange(len(self)):
213 for i in xrange(len(self)):
214 yield i
214 yield i
215
215
216 def url(self):
216 def url(self):
217 return 'file:' + self.root
217 return 'file:' + self.root
218
218
219 def hook(self, name, throw=False, **args):
219 def hook(self, name, throw=False, **args):
220 return hook.hook(self.ui, self, name, throw, **args)
220 return hook.hook(self.ui, self, name, throw, **args)
221
221
222 tag_disallowed = ':\r\n'
222 tag_disallowed = ':\r\n'
223
223
224 def _tag(self, names, node, message, local, user, date, extra={}):
224 def _tag(self, names, node, message, local, user, date, extra={}):
225 if isinstance(names, str):
225 if isinstance(names, str):
226 allchars = names
226 allchars = names
227 names = (names,)
227 names = (names,)
228 else:
228 else:
229 allchars = ''.join(names)
229 allchars = ''.join(names)
230 for c in self.tag_disallowed:
230 for c in self.tag_disallowed:
231 if c in allchars:
231 if c in allchars:
232 raise util.Abort(_('%r cannot be used in a tag name') % c)
232 raise util.Abort(_('%r cannot be used in a tag name') % c)
233
233
234 branches = self.branchmap()
234 branches = self.branchmap()
235 for name in names:
235 for name in names:
236 self.hook('pretag', throw=True, node=hex(node), tag=name,
236 self.hook('pretag', throw=True, node=hex(node), tag=name,
237 local=local)
237 local=local)
238 if name in branches:
238 if name in branches:
239 self.ui.warn(_("warning: tag %s conflicts with existing"
239 self.ui.warn(_("warning: tag %s conflicts with existing"
240 " branch name\n") % name)
240 " branch name\n") % name)
241
241
242 def writetags(fp, names, munge, prevtags):
242 def writetags(fp, names, munge, prevtags):
243 fp.seek(0, 2)
243 fp.seek(0, 2)
244 if prevtags and prevtags[-1] != '\n':
244 if prevtags and prevtags[-1] != '\n':
245 fp.write('\n')
245 fp.write('\n')
246 for name in names:
246 for name in names:
247 m = munge and munge(name) or name
247 m = munge and munge(name) or name
248 if self._tagtypes and name in self._tagtypes:
248 if self._tagtypes and name in self._tagtypes:
249 old = self._tags.get(name, nullid)
249 old = self._tags.get(name, nullid)
250 fp.write('%s %s\n' % (hex(old), m))
250 fp.write('%s %s\n' % (hex(old), m))
251 fp.write('%s %s\n' % (hex(node), m))
251 fp.write('%s %s\n' % (hex(node), m))
252 fp.close()
252 fp.close()
253
253
254 prevtags = ''
254 prevtags = ''
255 if local:
255 if local:
256 try:
256 try:
257 fp = self.opener('localtags', 'r+')
257 fp = self.opener('localtags', 'r+')
258 except IOError:
258 except IOError:
259 fp = self.opener('localtags', 'a')
259 fp = self.opener('localtags', 'a')
260 else:
260 else:
261 prevtags = fp.read()
261 prevtags = fp.read()
262
262
263 # local tags are stored in the current charset
263 # local tags are stored in the current charset
264 writetags(fp, names, None, prevtags)
264 writetags(fp, names, None, prevtags)
265 for name in names:
265 for name in names:
266 self.hook('tag', node=hex(node), tag=name, local=local)
266 self.hook('tag', node=hex(node), tag=name, local=local)
267 return
267 return
268
268
269 try:
269 try:
270 fp = self.wfile('.hgtags', 'rb+')
270 fp = self.wfile('.hgtags', 'rb+')
271 except IOError:
271 except IOError:
272 fp = self.wfile('.hgtags', 'ab')
272 fp = self.wfile('.hgtags', 'ab')
273 else:
273 else:
274 prevtags = fp.read()
274 prevtags = fp.read()
275
275
276 # committed tags are stored in UTF-8
276 # committed tags are stored in UTF-8
277 writetags(fp, names, encoding.fromlocal, prevtags)
277 writetags(fp, names, encoding.fromlocal, prevtags)
278
278
279 if '.hgtags' not in self.dirstate:
279 if '.hgtags' not in self.dirstate:
280 self[None].add(['.hgtags'])
280 self[None].add(['.hgtags'])
281
281
282 m = matchmod.exact(self.root, '', ['.hgtags'])
282 m = matchmod.exact(self.root, '', ['.hgtags'])
283 tagnode = self.commit(message, user, date, extra=extra, match=m)
283 tagnode = self.commit(message, user, date, extra=extra, match=m)
284
284
285 for name in names:
285 for name in names:
286 self.hook('tag', node=hex(node), tag=name, local=local)
286 self.hook('tag', node=hex(node), tag=name, local=local)
287
287
288 return tagnode
288 return tagnode
289
289
290 def tag(self, names, node, message, local, user, date):
290 def tag(self, names, node, message, local, user, date):
291 '''tag a revision with one or more symbolic names.
291 '''tag a revision with one or more symbolic names.
292
292
293 names is a list of strings or, when adding a single tag, names may be a
293 names is a list of strings or, when adding a single tag, names may be a
294 string.
294 string.
295
295
296 if local is True, the tags are stored in a per-repository file.
296 if local is True, the tags are stored in a per-repository file.
297 otherwise, they are stored in the .hgtags file, and a new
297 otherwise, they are stored in the .hgtags file, and a new
298 changeset is committed with the change.
298 changeset is committed with the change.
299
299
300 keyword arguments:
300 keyword arguments:
301
301
302 local: whether to store tags in non-version-controlled file
302 local: whether to store tags in non-version-controlled file
303 (default False)
303 (default False)
304
304
305 message: commit message to use if committing
305 message: commit message to use if committing
306
306
307 user: name of user to use if committing
307 user: name of user to use if committing
308
308
309 date: date tuple to use if committing'''
309 date: date tuple to use if committing'''
310
310
311 if not local:
311 if not local:
312 for x in self.status()[:5]:
312 for x in self.status()[:5]:
313 if '.hgtags' in x:
313 if '.hgtags' in x:
314 raise util.Abort(_('working copy of .hgtags is changed '
314 raise util.Abort(_('working copy of .hgtags is changed '
315 '(please commit .hgtags manually)'))
315 '(please commit .hgtags manually)'))
316
316
317 self.tags() # instantiate the cache
317 self.tags() # instantiate the cache
318 self._tag(names, node, message, local, user, date)
318 self._tag(names, node, message, local, user, date)
319
319
320 def tags(self):
320 def tags(self):
321 '''return a mapping of tag to node'''
321 '''return a mapping of tag to node'''
322 if self._tags is None:
322 if self._tags is None:
323 (self._tags, self._tagtypes) = self._findtags()
323 (self._tags, self._tagtypes) = self._findtags()
324
324
325 return self._tags
325 return self._tags
326
326
327 def _findtags(self):
327 def _findtags(self):
328 '''Do the hard work of finding tags. Return a pair of dicts
328 '''Do the hard work of finding tags. Return a pair of dicts
329 (tags, tagtypes) where tags maps tag name to node, and tagtypes
329 (tags, tagtypes) where tags maps tag name to node, and tagtypes
330 maps tag name to a string like \'global\' or \'local\'.
330 maps tag name to a string like \'global\' or \'local\'.
331 Subclasses or extensions are free to add their own tags, but
331 Subclasses or extensions are free to add their own tags, but
332 should be aware that the returned dicts will be retained for the
332 should be aware that the returned dicts will be retained for the
333 duration of the localrepo object.'''
333 duration of the localrepo object.'''
334
334
335 # XXX what tagtype should subclasses/extensions use? Currently
335 # XXX what tagtype should subclasses/extensions use? Currently
336 # mq and bookmarks add tags, but do not set the tagtype at all.
336 # mq and bookmarks add tags, but do not set the tagtype at all.
337 # Should each extension invent its own tag type? Should there
337 # Should each extension invent its own tag type? Should there
338 # be one tagtype for all such "virtual" tags? Or is the status
338 # be one tagtype for all such "virtual" tags? Or is the status
339 # quo fine?
339 # quo fine?
340
340
341 alltags = {} # map tag name to (node, hist)
341 alltags = {} # map tag name to (node, hist)
342 tagtypes = {}
342 tagtypes = {}
343
343
344 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
344 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
345 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
345 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
346
346
347 # Build the return dicts. Have to re-encode tag names because
347 # Build the return dicts. Have to re-encode tag names because
348 # the tags module always uses UTF-8 (in order not to lose info
348 # the tags module always uses UTF-8 (in order not to lose info
349 # writing to the cache), but the rest of Mercurial wants them in
349 # writing to the cache), but the rest of Mercurial wants them in
350 # local encoding.
350 # local encoding.
351 tags = {}
351 tags = {}
352 for (name, (node, hist)) in alltags.iteritems():
352 for (name, (node, hist)) in alltags.iteritems():
353 if node != nullid:
353 if node != nullid:
354 tags[encoding.tolocal(name)] = node
354 tags[encoding.tolocal(name)] = node
355 tags['tip'] = self.changelog.tip()
355 tags['tip'] = self.changelog.tip()
356 tagtypes = dict([(encoding.tolocal(name), value)
356 tagtypes = dict([(encoding.tolocal(name), value)
357 for (name, value) in tagtypes.iteritems()])
357 for (name, value) in tagtypes.iteritems()])
358 return (tags, tagtypes)
358 return (tags, tagtypes)
359
359
360 def tagtype(self, tagname):
360 def tagtype(self, tagname):
361 '''
361 '''
362 return the type of the given tag. result can be:
362 return the type of the given tag. result can be:
363
363
364 'local' : a local tag
364 'local' : a local tag
365 'global' : a global tag
365 'global' : a global tag
366 None : tag does not exist
366 None : tag does not exist
367 '''
367 '''
368
368
369 self.tags()
369 self.tags()
370
370
371 return self._tagtypes.get(tagname)
371 return self._tagtypes.get(tagname)
372
372
373 def tagslist(self):
373 def tagslist(self):
374 '''return a list of tags ordered by revision'''
374 '''return a list of tags ordered by revision'''
375 l = []
375 l = []
376 for t, n in self.tags().iteritems():
376 for t, n in self.tags().iteritems():
377 try:
377 try:
378 r = self.changelog.rev(n)
378 r = self.changelog.rev(n)
379 except:
379 except:
380 r = -2 # sort to the beginning of the list if unknown
380 r = -2 # sort to the beginning of the list if unknown
381 l.append((r, t, n))
381 l.append((r, t, n))
382 return [(t, n) for r, t, n in sorted(l)]
382 return [(t, n) for r, t, n in sorted(l)]
383
383
384 def nodetags(self, node):
384 def nodetags(self, node):
385 '''return the tags associated with a node'''
385 '''return the tags associated with a node'''
386 if not self.nodetagscache:
386 if not self.nodetagscache:
387 self.nodetagscache = {}
387 self.nodetagscache = {}
388 for t, n in self.tags().iteritems():
388 for t, n in self.tags().iteritems():
389 self.nodetagscache.setdefault(n, []).append(t)
389 self.nodetagscache.setdefault(n, []).append(t)
390 for tags in self.nodetagscache.itervalues():
390 for tags in self.nodetagscache.itervalues():
391 tags.sort()
391 tags.sort()
392 return self.nodetagscache.get(node, [])
392 return self.nodetagscache.get(node, [])
393
393
394 def _branchtags(self, partial, lrev):
394 def _branchtags(self, partial, lrev):
395 # TODO: rename this function?
395 # TODO: rename this function?
396 tiprev = len(self) - 1
396 tiprev = len(self) - 1
397 if lrev != tiprev:
397 if lrev != tiprev:
398 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
398 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
399 self._updatebranchcache(partial, ctxgen)
399 self._updatebranchcache(partial, ctxgen)
400 self._writebranchcache(partial, self.changelog.tip(), tiprev)
400 self._writebranchcache(partial, self.changelog.tip(), tiprev)
401
401
402 return partial
402 return partial
403
403
404 def updatebranchcache(self):
404 def updatebranchcache(self):
405 tip = self.changelog.tip()
405 tip = self.changelog.tip()
406 if self._branchcache is not None and self._branchcachetip == tip:
406 if self._branchcache is not None and self._branchcachetip == tip:
407 return self._branchcache
407 return self._branchcache
408
408
409 oldtip = self._branchcachetip
409 oldtip = self._branchcachetip
410 self._branchcachetip = tip
410 self._branchcachetip = tip
411 if oldtip is None or oldtip not in self.changelog.nodemap:
411 if oldtip is None or oldtip not in self.changelog.nodemap:
412 partial, last, lrev = self._readbranchcache()
412 partial, last, lrev = self._readbranchcache()
413 else:
413 else:
414 lrev = self.changelog.rev(oldtip)
414 lrev = self.changelog.rev(oldtip)
415 partial = self._branchcache
415 partial = self._branchcache
416
416
417 self._branchtags(partial, lrev)
417 self._branchtags(partial, lrev)
418 # this private cache holds all heads (not just tips)
418 # this private cache holds all heads (not just tips)
419 self._branchcache = partial
419 self._branchcache = partial
420
420
421 def branchmap(self):
421 def branchmap(self):
422 '''returns a dictionary {branch: [branchheads]}'''
422 '''returns a dictionary {branch: [branchheads]}'''
423 self.updatebranchcache()
423 self.updatebranchcache()
424 return self._branchcache
424 return self._branchcache
425
425
426 def branchtags(self):
426 def branchtags(self):
427 '''return a dict where branch names map to the tipmost head of
427 '''return a dict where branch names map to the tipmost head of
428 the branch, open heads come before closed'''
428 the branch, open heads come before closed'''
429 bt = {}
429 bt = {}
430 for bn, heads in self.branchmap().iteritems():
430 for bn, heads in self.branchmap().iteritems():
431 tip = heads[-1]
431 tip = heads[-1]
432 for h in reversed(heads):
432 for h in reversed(heads):
433 if 'close' not in self.changelog.read(h)[5]:
433 if 'close' not in self.changelog.read(h)[5]:
434 tip = h
434 tip = h
435 break
435 break
436 bt[bn] = tip
436 bt[bn] = tip
437 return bt
437 return bt
438
438
439 def _readbranchcache(self):
439 def _readbranchcache(self):
440 partial = {}
440 partial = {}
441 try:
441 try:
442 f = self.opener(os.path.join("cache", "branchheads"))
442 f = self.opener(os.path.join("cache", "branchheads"))
443 lines = f.read().split('\n')
443 lines = f.read().split('\n')
444 f.close()
444 f.close()
445 except (IOError, OSError):
445 except (IOError, OSError):
446 return {}, nullid, nullrev
446 return {}, nullid, nullrev
447
447
448 try:
448 try:
449 last, lrev = lines.pop(0).split(" ", 1)
449 last, lrev = lines.pop(0).split(" ", 1)
450 last, lrev = bin(last), int(lrev)
450 last, lrev = bin(last), int(lrev)
451 if lrev >= len(self) or self[lrev].node() != last:
451 if lrev >= len(self) or self[lrev].node() != last:
452 # invalidate the cache
452 # invalidate the cache
453 raise ValueError('invalidating branch cache (tip differs)')
453 raise ValueError('invalidating branch cache (tip differs)')
454 for l in lines:
454 for l in lines:
455 if not l:
455 if not l:
456 continue
456 continue
457 node, label = l.split(" ", 1)
457 node, label = l.split(" ", 1)
458 label = encoding.tolocal(label.strip())
458 label = encoding.tolocal(label.strip())
459 partial.setdefault(label, []).append(bin(node))
459 partial.setdefault(label, []).append(bin(node))
460 except KeyboardInterrupt:
460 except KeyboardInterrupt:
461 raise
461 raise
462 except Exception, inst:
462 except Exception, inst:
463 if self.ui.debugflag:
463 if self.ui.debugflag:
464 self.ui.warn(str(inst), '\n')
464 self.ui.warn(str(inst), '\n')
465 partial, last, lrev = {}, nullid, nullrev
465 partial, last, lrev = {}, nullid, nullrev
466 return partial, last, lrev
466 return partial, last, lrev
467
467
468 def _writebranchcache(self, branches, tip, tiprev):
468 def _writebranchcache(self, branches, tip, tiprev):
469 try:
469 try:
470 f = self.opener(os.path.join("cache", "branchheads"), "w",
470 f = self.opener(os.path.join("cache", "branchheads"), "w",
471 atomictemp=True)
471 atomictemp=True)
472 f.write("%s %s\n" % (hex(tip), tiprev))
472 f.write("%s %s\n" % (hex(tip), tiprev))
473 for label, nodes in branches.iteritems():
473 for label, nodes in branches.iteritems():
474 for node in nodes:
474 for node in nodes:
475 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
475 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
476 f.rename()
476 f.rename()
477 except (IOError, OSError):
477 except (IOError, OSError):
478 pass
478 pass
479
479
480 def _updatebranchcache(self, partial, ctxgen):
480 def _updatebranchcache(self, partial, ctxgen):
481 # collect new branch entries
481 # collect new branch entries
482 newbranches = {}
482 newbranches = {}
483 for c in ctxgen:
483 for c in ctxgen:
484 newbranches.setdefault(c.branch(), []).append(c.node())
484 newbranches.setdefault(c.branch(), []).append(c.node())
485 # if older branchheads are reachable from new ones, they aren't
485 # if older branchheads are reachable from new ones, they aren't
486 # really branchheads. Note checking parents is insufficient:
486 # really branchheads. Note checking parents is insufficient:
487 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
487 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
488 for branch, newnodes in newbranches.iteritems():
488 for branch, newnodes in newbranches.iteritems():
489 bheads = partial.setdefault(branch, [])
489 bheads = partial.setdefault(branch, [])
490 bheads.extend(newnodes)
490 bheads.extend(newnodes)
491 if len(bheads) <= 1:
491 if len(bheads) <= 1:
492 continue
492 continue
493 # starting from tip means fewer passes over reachable
493 # starting from tip means fewer passes over reachable
494 while newnodes:
494 while newnodes:
495 latest = newnodes.pop()
495 latest = newnodes.pop()
496 if latest not in bheads:
496 if latest not in bheads:
497 continue
497 continue
498 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
498 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
499 reachable = self.changelog.reachable(latest, minbhrev)
499 reachable = self.changelog.reachable(latest, minbhrev)
500 reachable.remove(latest)
500 reachable.remove(latest)
501 bheads = [b for b in bheads if b not in reachable]
501 bheads = [b for b in bheads if b not in reachable]
502 partial[branch] = bheads
502 partial[branch] = bheads
503
503
504 def lookup(self, key):
504 def lookup(self, key):
505 if isinstance(key, int):
505 if isinstance(key, int):
506 return self.changelog.node(key)
506 return self.changelog.node(key)
507 elif key == '.':
507 elif key == '.':
508 return self.dirstate.parents()[0]
508 return self.dirstate.parents()[0]
509 elif key == 'null':
509 elif key == 'null':
510 return nullid
510 return nullid
511 elif key == 'tip':
511 elif key == 'tip':
512 return self.changelog.tip()
512 return self.changelog.tip()
513 n = self.changelog._match(key)
513 n = self.changelog._match(key)
514 if n:
514 if n:
515 return n
515 return n
516 if key in self.tags():
516 if key in self.tags():
517 return self.tags()[key]
517 return self.tags()[key]
518 if key in self.branchtags():
518 if key in self.branchtags():
519 return self.branchtags()[key]
519 return self.branchtags()[key]
520 n = self.changelog._partialmatch(key)
520 n = self.changelog._partialmatch(key)
521 if n:
521 if n:
522 return n
522 return n
523
523
524 # can't find key, check if it might have come from damaged dirstate
524 # can't find key, check if it might have come from damaged dirstate
525 if key in self.dirstate.parents():
525 if key in self.dirstate.parents():
526 raise error.Abort(_("working directory has unknown parent '%s'!")
526 raise error.Abort(_("working directory has unknown parent '%s'!")
527 % short(key))
527 % short(key))
528 try:
528 try:
529 if len(key) == 20:
529 if len(key) == 20:
530 key = hex(key)
530 key = hex(key)
531 except:
531 except:
532 pass
532 pass
533 raise error.RepoLookupError(_("unknown revision '%s'") % key)
533 raise error.RepoLookupError(_("unknown revision '%s'") % key)
534
534
535 def lookupbranch(self, key, remote=None):
535 def lookupbranch(self, key, remote=None):
536 repo = remote or self
536 repo = remote or self
537 if key in repo.branchmap():
537 if key in repo.branchmap():
538 return key
538 return key
539
539
540 repo = (remote and remote.local()) and remote or self
540 repo = (remote and remote.local()) and remote or self
541 return repo[key].branch()
541 return repo[key].branch()
542
542
543 def local(self):
543 def local(self):
544 return True
544 return True
545
545
546 def join(self, f):
546 def join(self, f):
547 return os.path.join(self.path, f)
547 return os.path.join(self.path, f)
548
548
549 def wjoin(self, f):
549 def wjoin(self, f):
550 return os.path.join(self.root, f)
550 return os.path.join(self.root, f)
551
551
552 def file(self, f):
552 def file(self, f):
553 if f[0] == '/':
553 if f[0] == '/':
554 f = f[1:]
554 f = f[1:]
555 return filelog.filelog(self.sopener, f)
555 return filelog.filelog(self.sopener, f)
556
556
557 def changectx(self, changeid):
557 def changectx(self, changeid):
558 return self[changeid]
558 return self[changeid]
559
559
560 def parents(self, changeid=None):
560 def parents(self, changeid=None):
561 '''get list of changectxs for parents of changeid'''
561 '''get list of changectxs for parents of changeid'''
562 return self[changeid].parents()
562 return self[changeid].parents()
563
563
564 def filectx(self, path, changeid=None, fileid=None):
564 def filectx(self, path, changeid=None, fileid=None):
565 """changeid can be a changeset revision, node, or tag.
565 """changeid can be a changeset revision, node, or tag.
566 fileid can be a file revision or node."""
566 fileid can be a file revision or node."""
567 return context.filectx(self, path, changeid, fileid)
567 return context.filectx(self, path, changeid, fileid)
568
568
569 def getcwd(self):
569 def getcwd(self):
570 return self.dirstate.getcwd()
570 return self.dirstate.getcwd()
571
571
572 def pathto(self, f, cwd=None):
572 def pathto(self, f, cwd=None):
573 return self.dirstate.pathto(f, cwd)
573 return self.dirstate.pathto(f, cwd)
574
574
575 def wfile(self, f, mode='r'):
575 def wfile(self, f, mode='r'):
576 return self.wopener(f, mode)
576 return self.wopener(f, mode)
577
577
578 def _link(self, f):
578 def _link(self, f):
579 return os.path.islink(self.wjoin(f))
579 return os.path.islink(self.wjoin(f))
580
580
581 def _loadfilter(self, filter):
581 def _loadfilter(self, filter):
582 if filter not in self.filterpats:
582 if filter not in self.filterpats:
583 l = []
583 l = []
584 for pat, cmd in self.ui.configitems(filter):
584 for pat, cmd in self.ui.configitems(filter):
585 if cmd == '!':
585 if cmd == '!':
586 continue
586 continue
587 mf = matchmod.match(self.root, '', [pat])
587 mf = matchmod.match(self.root, '', [pat])
588 fn = None
588 fn = None
589 params = cmd
589 params = cmd
590 for name, filterfn in self._datafilters.iteritems():
590 for name, filterfn in self._datafilters.iteritems():
591 if cmd.startswith(name):
591 if cmd.startswith(name):
592 fn = filterfn
592 fn = filterfn
593 params = cmd[len(name):].lstrip()
593 params = cmd[len(name):].lstrip()
594 break
594 break
595 if not fn:
595 if not fn:
596 fn = lambda s, c, **kwargs: util.filter(s, c)
596 fn = lambda s, c, **kwargs: util.filter(s, c)
597 # Wrap old filters not supporting keyword arguments
597 # Wrap old filters not supporting keyword arguments
598 if not inspect.getargspec(fn)[2]:
598 if not inspect.getargspec(fn)[2]:
599 oldfn = fn
599 oldfn = fn
600 fn = lambda s, c, **kwargs: oldfn(s, c)
600 fn = lambda s, c, **kwargs: oldfn(s, c)
601 l.append((mf, fn, params))
601 l.append((mf, fn, params))
602 self.filterpats[filter] = l
602 self.filterpats[filter] = l
603 return self.filterpats[filter]
603 return self.filterpats[filter]
604
604
605 def _filter(self, filterpats, filename, data):
605 def _filter(self, filterpats, filename, data):
606 for mf, fn, cmd in filterpats:
606 for mf, fn, cmd in filterpats:
607 if mf(filename):
607 if mf(filename):
608 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
608 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
609 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
609 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
610 break
610 break
611
611
612 return data
612 return data
613
613
614 @propertycache
614 @propertycache
615 def _encodefilterpats(self):
615 def _encodefilterpats(self):
616 return self._loadfilter('encode')
616 return self._loadfilter('encode')
617
617
618 @propertycache
618 @propertycache
619 def _decodefilterpats(self):
619 def _decodefilterpats(self):
620 return self._loadfilter('decode')
620 return self._loadfilter('decode')
621
621
622 def adddatafilter(self, name, filter):
622 def adddatafilter(self, name, filter):
623 self._datafilters[name] = filter
623 self._datafilters[name] = filter
624
624
625 def wread(self, filename):
625 def wread(self, filename):
626 if self._link(filename):
626 if self._link(filename):
627 data = os.readlink(self.wjoin(filename))
627 data = os.readlink(self.wjoin(filename))
628 else:
628 else:
629 data = self.wopener(filename, 'r').read()
629 data = self.wopener(filename, 'r').read()
630 return self._filter(self._encodefilterpats, filename, data)
630 return self._filter(self._encodefilterpats, filename, data)
631
631
632 def wwrite(self, filename, data, flags):
632 def wwrite(self, filename, data, flags):
633 data = self._filter(self._decodefilterpats, filename, data)
633 data = self._filter(self._decodefilterpats, filename, data)
634 if 'l' in flags:
634 if 'l' in flags:
635 self.wopener.symlink(data, filename)
635 self.wopener.symlink(data, filename)
636 else:
636 else:
637 self.wopener(filename, 'w').write(data)
637 self.wopener(filename, 'w').write(data)
638 if 'x' in flags:
638 if 'x' in flags:
639 util.set_flags(self.wjoin(filename), False, True)
639 util.set_flags(self.wjoin(filename), False, True)
640
640
641 def wwritedata(self, filename, data):
641 def wwritedata(self, filename, data):
642 return self._filter(self._decodefilterpats, filename, data)
642 return self._filter(self._decodefilterpats, filename, data)
643
643
644 def transaction(self, desc):
644 def transaction(self, desc):
645 tr = self._transref and self._transref() or None
645 tr = self._transref and self._transref() or None
646 if tr and tr.running():
646 if tr and tr.running():
647 return tr.nest()
647 return tr.nest()
648
648
649 # abort here if the journal already exists
649 # abort here if the journal already exists
650 if os.path.exists(self.sjoin("journal")):
650 if os.path.exists(self.sjoin("journal")):
651 raise error.RepoError(
651 raise error.RepoError(
652 _("abandoned transaction found - run hg recover"))
652 _("abandoned transaction found - run hg recover"))
653
653
654 # save dirstate for rollback
654 # save dirstate for rollback
655 try:
655 try:
656 ds = self.opener("dirstate").read()
656 ds = self.opener("dirstate").read()
657 except IOError:
657 except IOError:
658 ds = ""
658 ds = ""
659 self.opener("journal.dirstate", "w").write(ds)
659 self.opener("journal.dirstate", "w").write(ds)
660 self.opener("journal.branch", "w").write(
660 self.opener("journal.branch", "w").write(
661 encoding.fromlocal(self.dirstate.branch()))
661 encoding.fromlocal(self.dirstate.branch()))
662 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
662 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
663
663
664 renames = [(self.sjoin("journal"), self.sjoin("undo")),
664 renames = [(self.sjoin("journal"), self.sjoin("undo")),
665 (self.join("journal.dirstate"), self.join("undo.dirstate")),
665 (self.join("journal.dirstate"), self.join("undo.dirstate")),
666 (self.join("journal.branch"), self.join("undo.branch")),
666 (self.join("journal.branch"), self.join("undo.branch")),
667 (self.join("journal.desc"), self.join("undo.desc"))]
667 (self.join("journal.desc"), self.join("undo.desc"))]
668 tr = transaction.transaction(self.ui.warn, self.sopener,
668 tr = transaction.transaction(self.ui.warn, self.sopener,
669 self.sjoin("journal"),
669 self.sjoin("journal"),
670 aftertrans(renames),
670 aftertrans(renames),
671 self.store.createmode)
671 self.store.createmode)
672 self._transref = weakref.ref(tr)
672 self._transref = weakref.ref(tr)
673 return tr
673 return tr
674
674
675 def recover(self):
675 def recover(self):
676 lock = self.lock()
676 lock = self.lock()
677 try:
677 try:
678 if os.path.exists(self.sjoin("journal")):
678 if os.path.exists(self.sjoin("journal")):
679 self.ui.status(_("rolling back interrupted transaction\n"))
679 self.ui.status(_("rolling back interrupted transaction\n"))
680 transaction.rollback(self.sopener, self.sjoin("journal"),
680 transaction.rollback(self.sopener, self.sjoin("journal"),
681 self.ui.warn)
681 self.ui.warn)
682 self.invalidate()
682 self.invalidate()
683 return True
683 return True
684 else:
684 else:
685 self.ui.warn(_("no interrupted transaction available\n"))
685 self.ui.warn(_("no interrupted transaction available\n"))
686 return False
686 return False
687 finally:
687 finally:
688 lock.release()
688 lock.release()
689
689
690 def rollback(self, dryrun=False):
690 def rollback(self, dryrun=False):
691 wlock = lock = None
691 wlock = lock = None
692 try:
692 try:
693 wlock = self.wlock()
693 wlock = self.wlock()
694 lock = self.lock()
694 lock = self.lock()
695 if os.path.exists(self.sjoin("undo")):
695 if os.path.exists(self.sjoin("undo")):
696 try:
696 try:
697 args = self.opener("undo.desc", "r").read().splitlines()
697 args = self.opener("undo.desc", "r").read().splitlines()
698 if len(args) >= 3 and self.ui.verbose:
698 if len(args) >= 3 and self.ui.verbose:
699 desc = _("rolling back to revision %s"
699 desc = _("rolling back to revision %s"
700 " (undo %s: %s)\n") % (
700 " (undo %s: %s)\n") % (
701 int(args[0]) - 1, args[1], args[2])
701 int(args[0]) - 1, args[1], args[2])
702 elif len(args) >= 2:
702 elif len(args) >= 2:
703 desc = _("rolling back to revision %s (undo %s)\n") % (
703 desc = _("rolling back to revision %s (undo %s)\n") % (
704 int(args[0]) - 1, args[1])
704 int(args[0]) - 1, args[1])
705 except IOError:
705 except IOError:
706 desc = _("rolling back unknown transaction\n")
706 desc = _("rolling back unknown transaction\n")
707 self.ui.status(desc)
707 self.ui.status(desc)
708 if dryrun:
708 if dryrun:
709 return
709 return
710 transaction.rollback(self.sopener, self.sjoin("undo"),
710 transaction.rollback(self.sopener, self.sjoin("undo"),
711 self.ui.warn)
711 self.ui.warn)
712 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
712 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
713 try:
713 try:
714 branch = self.opener("undo.branch").read()
714 branch = self.opener("undo.branch").read()
715 self.dirstate.setbranch(branch)
715 self.dirstate.setbranch(branch)
716 except IOError:
716 except IOError:
717 self.ui.warn(_("Named branch could not be reset, "
717 self.ui.warn(_("Named branch could not be reset, "
718 "current branch still is: %s\n")
718 "current branch still is: %s\n")
719 % self.dirstate.branch())
719 % self.dirstate.branch())
720 self.invalidate()
720 self.invalidate()
721 self.dirstate.invalidate()
721 self.dirstate.invalidate()
722 self.destroyed()
722 self.destroyed()
723 else:
723 else:
724 self.ui.warn(_("no rollback information available\n"))
724 self.ui.warn(_("no rollback information available\n"))
725 return 1
725 return 1
726 finally:
726 finally:
727 release(lock, wlock)
727 release(lock, wlock)
728
728
729 def invalidatecaches(self):
729 def invalidatecaches(self):
730 self._tags = None
730 self._tags = None
731 self._tagtypes = None
731 self._tagtypes = None
732 self.nodetagscache = None
732 self.nodetagscache = None
733 self._branchcache = None # in UTF-8
733 self._branchcache = None # in UTF-8
734 self._branchcachetip = None
734 self._branchcachetip = None
735
735
736 def invalidate(self):
736 def invalidate(self):
737 for a in ("changelog", "manifest"):
737 for a in ("changelog", "manifest"):
738 if a in self.__dict__:
738 if a in self.__dict__:
739 delattr(self, a)
739 delattr(self, a)
740 self.invalidatecaches()
740 self.invalidatecaches()
741
741
742 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
742 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
743 try:
743 try:
744 l = lock.lock(lockname, 0, releasefn, desc=desc)
744 l = lock.lock(lockname, 0, releasefn, desc=desc)
745 except error.LockHeld, inst:
745 except error.LockHeld, inst:
746 if not wait:
746 if not wait:
747 raise
747 raise
748 self.ui.warn(_("waiting for lock on %s held by %r\n") %
748 self.ui.warn(_("waiting for lock on %s held by %r\n") %
749 (desc, inst.locker))
749 (desc, inst.locker))
750 # default to 600 seconds timeout
750 # default to 600 seconds timeout
751 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
751 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
752 releasefn, desc=desc)
752 releasefn, desc=desc)
753 if acquirefn:
753 if acquirefn:
754 acquirefn()
754 acquirefn()
755 return l
755 return l
756
756
757 def lock(self, wait=True):
757 def lock(self, wait=True):
758 '''Lock the repository store (.hg/store) and return a weak reference
758 '''Lock the repository store (.hg/store) and return a weak reference
759 to the lock. Use this before modifying the store (e.g. committing or
759 to the lock. Use this before modifying the store (e.g. committing or
760 stripping). If you are opening a transaction, get a lock as well.)'''
760 stripping). If you are opening a transaction, get a lock as well.)'''
761 l = self._lockref and self._lockref()
761 l = self._lockref and self._lockref()
762 if l is not None and l.held:
762 if l is not None and l.held:
763 l.lock()
763 l.lock()
764 return l
764 return l
765
765
766 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
766 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
767 _('repository %s') % self.origroot)
767 _('repository %s') % self.origroot)
768 self._lockref = weakref.ref(l)
768 self._lockref = weakref.ref(l)
769 return l
769 return l
770
770
771 def wlock(self, wait=True):
771 def wlock(self, wait=True):
772 '''Lock the non-store parts of the repository (everything under
772 '''Lock the non-store parts of the repository (everything under
773 .hg except .hg/store) and return a weak reference to the lock.
773 .hg except .hg/store) and return a weak reference to the lock.
774 Use this before modifying files in .hg.'''
774 Use this before modifying files in .hg.'''
775 l = self._wlockref and self._wlockref()
775 l = self._wlockref and self._wlockref()
776 if l is not None and l.held:
776 if l is not None and l.held:
777 l.lock()
777 l.lock()
778 return l
778 return l
779
779
780 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
780 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
781 self.dirstate.invalidate, _('working directory of %s') %
781 self.dirstate.invalidate, _('working directory of %s') %
782 self.origroot)
782 self.origroot)
783 self._wlockref = weakref.ref(l)
783 self._wlockref = weakref.ref(l)
784 return l
784 return l
785
785
786 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
786 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
787 """
787 """
788 commit an individual file as part of a larger transaction
788 commit an individual file as part of a larger transaction
789 """
789 """
790
790
791 fname = fctx.path()
791 fname = fctx.path()
792 text = fctx.data()
792 text = fctx.data()
793 flog = self.file(fname)
793 flog = self.file(fname)
794 fparent1 = manifest1.get(fname, nullid)
794 fparent1 = manifest1.get(fname, nullid)
795 fparent2 = fparent2o = manifest2.get(fname, nullid)
795 fparent2 = fparent2o = manifest2.get(fname, nullid)
796
796
797 meta = {}
797 meta = {}
798 copy = fctx.renamed()
798 copy = fctx.renamed()
799 if copy and copy[0] != fname:
799 if copy and copy[0] != fname:
800 # Mark the new revision of this file as a copy of another
800 # Mark the new revision of this file as a copy of another
801 # file. This copy data will effectively act as a parent
801 # file. This copy data will effectively act as a parent
802 # of this new revision. If this is a merge, the first
802 # of this new revision. If this is a merge, the first
803 # parent will be the nullid (meaning "look up the copy data")
803 # parent will be the nullid (meaning "look up the copy data")
804 # and the second one will be the other parent. For example:
804 # and the second one will be the other parent. For example:
805 #
805 #
806 # 0 --- 1 --- 3 rev1 changes file foo
806 # 0 --- 1 --- 3 rev1 changes file foo
807 # \ / rev2 renames foo to bar and changes it
807 # \ / rev2 renames foo to bar and changes it
808 # \- 2 -/ rev3 should have bar with all changes and
808 # \- 2 -/ rev3 should have bar with all changes and
809 # should record that bar descends from
809 # should record that bar descends from
810 # bar in rev2 and foo in rev1
810 # bar in rev2 and foo in rev1
811 #
811 #
812 # this allows this merge to succeed:
812 # this allows this merge to succeed:
813 #
813 #
814 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
814 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
815 # \ / merging rev3 and rev4 should use bar@rev2
815 # \ / merging rev3 and rev4 should use bar@rev2
816 # \- 2 --- 4 as the merge base
816 # \- 2 --- 4 as the merge base
817 #
817 #
818
818
819 cfname = copy[0]
819 cfname = copy[0]
820 crev = manifest1.get(cfname)
820 crev = manifest1.get(cfname)
821 newfparent = fparent2
821 newfparent = fparent2
822
822
823 if manifest2: # branch merge
823 if manifest2: # branch merge
824 if fparent2 == nullid or crev is None: # copied on remote side
824 if fparent2 == nullid or crev is None: # copied on remote side
825 if cfname in manifest2:
825 if cfname in manifest2:
826 crev = manifest2[cfname]
826 crev = manifest2[cfname]
827 newfparent = fparent1
827 newfparent = fparent1
828
828
829 # find source in nearest ancestor if we've lost track
829 # find source in nearest ancestor if we've lost track
830 if not crev:
830 if not crev:
831 self.ui.debug(" %s: searching for copy revision for %s\n" %
831 self.ui.debug(" %s: searching for copy revision for %s\n" %
832 (fname, cfname))
832 (fname, cfname))
833 for ancestor in self[None].ancestors():
833 for ancestor in self[None].ancestors():
834 if cfname in ancestor:
834 if cfname in ancestor:
835 crev = ancestor[cfname].filenode()
835 crev = ancestor[cfname].filenode()
836 break
836 break
837
837
838 if crev:
838 if crev:
839 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
839 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
840 meta["copy"] = cfname
840 meta["copy"] = cfname
841 meta["copyrev"] = hex(crev)
841 meta["copyrev"] = hex(crev)
842 fparent1, fparent2 = nullid, newfparent
842 fparent1, fparent2 = nullid, newfparent
843 else:
843 else:
844 self.ui.warn(_("warning: can't find ancestor for '%s' "
844 self.ui.warn(_("warning: can't find ancestor for '%s' "
845 "copied from '%s'!\n") % (fname, cfname))
845 "copied from '%s'!\n") % (fname, cfname))
846
846
847 elif fparent2 != nullid:
847 elif fparent2 != nullid:
848 # is one parent an ancestor of the other?
848 # is one parent an ancestor of the other?
849 fparentancestor = flog.ancestor(fparent1, fparent2)
849 fparentancestor = flog.ancestor(fparent1, fparent2)
850 if fparentancestor == fparent1:
850 if fparentancestor == fparent1:
851 fparent1, fparent2 = fparent2, nullid
851 fparent1, fparent2 = fparent2, nullid
852 elif fparentancestor == fparent2:
852 elif fparentancestor == fparent2:
853 fparent2 = nullid
853 fparent2 = nullid
854
854
855 # is the file changed?
855 # is the file changed?
856 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
856 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
857 changelist.append(fname)
857 changelist.append(fname)
858 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
858 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
859
859
860 # are just the flags changed during merge?
860 # are just the flags changed during merge?
861 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
861 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
862 changelist.append(fname)
862 changelist.append(fname)
863
863
864 return fparent1
864 return fparent1
865
865
866 def commit(self, text="", user=None, date=None, match=None, force=False,
866 def commit(self, text="", user=None, date=None, match=None, force=False,
867 editor=False, extra={}):
867 editor=False, extra={}):
868 """Add a new revision to current repository.
868 """Add a new revision to current repository.
869
869
870 Revision information is gathered from the working directory,
870 Revision information is gathered from the working directory,
871 match can be used to filter the committed files. If editor is
871 match can be used to filter the committed files. If editor is
872 supplied, it is called to get a commit message.
872 supplied, it is called to get a commit message.
873 """
873 """
874
874
875 def fail(f, msg):
875 def fail(f, msg):
876 raise util.Abort('%s: %s' % (f, msg))
876 raise util.Abort('%s: %s' % (f, msg))
877
877
878 if not match:
878 if not match:
879 match = matchmod.always(self.root, '')
879 match = matchmod.always(self.root, '')
880
880
881 if not force:
881 if not force:
882 vdirs = []
882 vdirs = []
883 match.dir = vdirs.append
883 match.dir = vdirs.append
884 match.bad = fail
884 match.bad = fail
885
885
886 wlock = self.wlock()
886 wlock = self.wlock()
887 try:
887 try:
888 wctx = self[None]
888 wctx = self[None]
889 merge = len(wctx.parents()) > 1
889 merge = len(wctx.parents()) > 1
890
890
891 if (not force and merge and match and
891 if (not force and merge and match and
892 (match.files() or match.anypats())):
892 (match.files() or match.anypats())):
893 raise util.Abort(_('cannot partially commit a merge '
893 raise util.Abort(_('cannot partially commit a merge '
894 '(do not specify files or patterns)'))
894 '(do not specify files or patterns)'))
895
895
896 changes = self.status(match=match, clean=force)
896 changes = self.status(match=match, clean=force)
897 if force:
897 if force:
898 changes[0].extend(changes[6]) # mq may commit unchanged files
898 changes[0].extend(changes[6]) # mq may commit unchanged files
899
899
900 # check subrepos
900 # check subrepos
901 subs = []
901 subs = []
902 removedsubs = set()
902 removedsubs = set()
903 for p in wctx.parents():
903 for p in wctx.parents():
904 removedsubs.update(s for s in p.substate if match(s))
904 removedsubs.update(s for s in p.substate if match(s))
905 for s in wctx.substate:
905 for s in wctx.substate:
906 removedsubs.discard(s)
906 removedsubs.discard(s)
907 if match(s) and wctx.sub(s).dirty():
907 if match(s) and wctx.sub(s).dirty():
908 subs.append(s)
908 subs.append(s)
909 if (subs or removedsubs):
909 if (subs or removedsubs):
910 if (not match('.hgsub') and
910 if (not match('.hgsub') and
911 '.hgsub' in (wctx.modified() + wctx.added())):
911 '.hgsub' in (wctx.modified() + wctx.added())):
912 raise util.Abort(_("can't commit subrepos without .hgsub"))
912 raise util.Abort(_("can't commit subrepos without .hgsub"))
913 if '.hgsubstate' not in changes[0]:
913 if '.hgsubstate' not in changes[0]:
914 changes[0].insert(0, '.hgsubstate')
914 changes[0].insert(0, '.hgsubstate')
915
915
916 # make sure all explicit patterns are matched
916 # make sure all explicit patterns are matched
917 if not force and match.files():
917 if not force and match.files():
918 matched = set(changes[0] + changes[1] + changes[2])
918 matched = set(changes[0] + changes[1] + changes[2])
919
919
920 for f in match.files():
920 for f in match.files():
921 if f == '.' or f in matched or f in wctx.substate:
921 if f == '.' or f in matched or f in wctx.substate:
922 continue
922 continue
923 if f in changes[3]: # missing
923 if f in changes[3]: # missing
924 fail(f, _('file not found!'))
924 fail(f, _('file not found!'))
925 if f in vdirs: # visited directory
925 if f in vdirs: # visited directory
926 d = f + '/'
926 d = f + '/'
927 for mf in matched:
927 for mf in matched:
928 if mf.startswith(d):
928 if mf.startswith(d):
929 break
929 break
930 else:
930 else:
931 fail(f, _("no match under directory!"))
931 fail(f, _("no match under directory!"))
932 elif f not in self.dirstate:
932 elif f not in self.dirstate:
933 fail(f, _("file not tracked!"))
933 fail(f, _("file not tracked!"))
934
934
935 if (not force and not extra.get("close") and not merge
935 if (not force and not extra.get("close") and not merge
936 and not (changes[0] or changes[1] or changes[2])
936 and not (changes[0] or changes[1] or changes[2])
937 and wctx.branch() == wctx.p1().branch()):
937 and wctx.branch() == wctx.p1().branch()):
938 return None
938 return None
939
939
940 ms = mergemod.mergestate(self)
940 ms = mergemod.mergestate(self)
941 for f in changes[0]:
941 for f in changes[0]:
942 if f in ms and ms[f] == 'u':
942 if f in ms and ms[f] == 'u':
943 raise util.Abort(_("unresolved merge conflicts "
943 raise util.Abort(_("unresolved merge conflicts "
944 "(see hg resolve)"))
944 "(see hg resolve)"))
945
945
946 cctx = context.workingctx(self, text, user, date, extra, changes)
946 cctx = context.workingctx(self, text, user, date, extra, changes)
947 if editor:
947 if editor:
948 cctx._text = editor(self, cctx, subs)
948 cctx._text = editor(self, cctx, subs)
949 edited = (text != cctx._text)
949 edited = (text != cctx._text)
950
950
951 # commit subs
951 # commit subs
952 if subs or removedsubs:
952 if subs or removedsubs:
953 state = wctx.substate.copy()
953 state = wctx.substate.copy()
954 for s in sorted(subs):
954 for s in sorted(subs):
955 sub = wctx.sub(s)
955 sub = wctx.sub(s)
956 self.ui.status(_('committing subrepository %s\n') %
956 self.ui.status(_('committing subrepository %s\n') %
957 subrepo.subrelpath(sub))
957 subrepo.subrelpath(sub))
958 sr = sub.commit(cctx._text, user, date)
958 sr = sub.commit(cctx._text, user, date)
959 state[s] = (state[s][0], sr)
959 state[s] = (state[s][0], sr)
960 subrepo.writestate(self, state)
960 subrepo.writestate(self, state)
961
961
962 # Save commit message in case this transaction gets rolled back
962 # Save commit message in case this transaction gets rolled back
963 # (e.g. by a pretxncommit hook). Leave the content alone on
963 # (e.g. by a pretxncommit hook). Leave the content alone on
964 # the assumption that the user will use the same editor again.
964 # the assumption that the user will use the same editor again.
965 msgfile = self.opener('last-message.txt', 'wb')
965 msgfile = self.opener('last-message.txt', 'wb')
966 msgfile.write(cctx._text)
966 msgfile.write(cctx._text)
967 msgfile.close()
967 msgfile.close()
968
968
969 p1, p2 = self.dirstate.parents()
969 p1, p2 = self.dirstate.parents()
970 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
970 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
971 try:
971 try:
972 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
972 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
973 ret = self.commitctx(cctx, True)
973 ret = self.commitctx(cctx, True)
974 except:
974 except:
975 if edited:
975 if edited:
976 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
976 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
977 self.ui.write(
977 self.ui.write(
978 _('note: commit message saved in %s\n') % msgfn)
978 _('note: commit message saved in %s\n') % msgfn)
979 raise
979 raise
980
980
981 # update dirstate and mergestate
981 # update dirstate and mergestate
982 for f in changes[0] + changes[1]:
982 for f in changes[0] + changes[1]:
983 self.dirstate.normal(f)
983 self.dirstate.normal(f)
984 for f in changes[2]:
984 for f in changes[2]:
985 self.dirstate.forget(f)
985 self.dirstate.forget(f)
986 self.dirstate.setparents(ret)
986 self.dirstate.setparents(ret)
987 ms.reset()
987 ms.reset()
988 finally:
988 finally:
989 wlock.release()
989 wlock.release()
990
990
991 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
991 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
992 return ret
992 return ret
993
993
994 def commitctx(self, ctx, error=False):
994 def commitctx(self, ctx, error=False):
995 """Add a new revision to current repository.
995 """Add a new revision to current repository.
996 Revision information is passed via the context argument.
996 Revision information is passed via the context argument.
997 """
997 """
998
998
999 tr = lock = None
999 tr = lock = None
1000 removed = list(ctx.removed())
1000 removed = list(ctx.removed())
1001 p1, p2 = ctx.p1(), ctx.p2()
1001 p1, p2 = ctx.p1(), ctx.p2()
1002 m1 = p1.manifest().copy()
1002 m1 = p1.manifest().copy()
1003 m2 = p2.manifest()
1003 m2 = p2.manifest()
1004 user = ctx.user()
1004 user = ctx.user()
1005
1005
1006 lock = self.lock()
1006 lock = self.lock()
1007 try:
1007 try:
1008 tr = self.transaction("commit")
1008 tr = self.transaction("commit")
1009 trp = weakref.proxy(tr)
1009 trp = weakref.proxy(tr)
1010
1010
1011 # check in files
1011 # check in files
1012 new = {}
1012 new = {}
1013 changed = []
1013 changed = []
1014 linkrev = len(self)
1014 linkrev = len(self)
1015 for f in sorted(ctx.modified() + ctx.added()):
1015 for f in sorted(ctx.modified() + ctx.added()):
1016 self.ui.note(f + "\n")
1016 self.ui.note(f + "\n")
1017 try:
1017 try:
1018 fctx = ctx[f]
1018 fctx = ctx[f]
1019 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1019 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1020 changed)
1020 changed)
1021 m1.set(f, fctx.flags())
1021 m1.set(f, fctx.flags())
1022 except OSError, inst:
1022 except OSError, inst:
1023 self.ui.warn(_("trouble committing %s!\n") % f)
1023 self.ui.warn(_("trouble committing %s!\n") % f)
1024 raise
1024 raise
1025 except IOError, inst:
1025 except IOError, inst:
1026 errcode = getattr(inst, 'errno', errno.ENOENT)
1026 errcode = getattr(inst, 'errno', errno.ENOENT)
1027 if error or errcode and errcode != errno.ENOENT:
1027 if error or errcode and errcode != errno.ENOENT:
1028 self.ui.warn(_("trouble committing %s!\n") % f)
1028 self.ui.warn(_("trouble committing %s!\n") % f)
1029 raise
1029 raise
1030 else:
1030 else:
1031 removed.append(f)
1031 removed.append(f)
1032
1032
1033 # update manifest
1033 # update manifest
1034 m1.update(new)
1034 m1.update(new)
1035 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1035 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1036 drop = [f for f in removed if f in m1]
1036 drop = [f for f in removed if f in m1]
1037 for f in drop:
1037 for f in drop:
1038 del m1[f]
1038 del m1[f]
1039 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1039 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1040 p2.manifestnode(), (new, drop))
1040 p2.manifestnode(), (new, drop))
1041
1041
1042 # update changelog
1042 # update changelog
1043 self.changelog.delayupdate()
1043 self.changelog.delayupdate()
1044 n = self.changelog.add(mn, changed + removed, ctx.description(),
1044 n = self.changelog.add(mn, changed + removed, ctx.description(),
1045 trp, p1.node(), p2.node(),
1045 trp, p1.node(), p2.node(),
1046 user, ctx.date(), ctx.extra().copy())
1046 user, ctx.date(), ctx.extra().copy())
1047 p = lambda: self.changelog.writepending() and self.root or ""
1047 p = lambda: self.changelog.writepending() and self.root or ""
1048 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1048 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1049 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1049 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1050 parent2=xp2, pending=p)
1050 parent2=xp2, pending=p)
1051 self.changelog.finalize(trp)
1051 self.changelog.finalize(trp)
1052 tr.close()
1052 tr.close()
1053
1053
1054 if self._branchcache:
1054 if self._branchcache:
1055 self.updatebranchcache()
1055 self.updatebranchcache()
1056 return n
1056 return n
1057 finally:
1057 finally:
1058 if tr:
1058 if tr:
1059 tr.release()
1059 tr.release()
1060 lock.release()
1060 lock.release()
1061
1061
1062 def destroyed(self):
1062 def destroyed(self):
1063 '''Inform the repository that nodes have been destroyed.
1063 '''Inform the repository that nodes have been destroyed.
1064 Intended for use by strip and rollback, so there's a common
1064 Intended for use by strip and rollback, so there's a common
1065 place for anything that has to be done after destroying history.'''
1065 place for anything that has to be done after destroying history.'''
1066 # XXX it might be nice if we could take the list of destroyed
1066 # XXX it might be nice if we could take the list of destroyed
1067 # nodes, but I don't see an easy way for rollback() to do that
1067 # nodes, but I don't see an easy way for rollback() to do that
1068
1068
1069 # Ensure the persistent tag cache is updated. Doing it now
1069 # Ensure the persistent tag cache is updated. Doing it now
1070 # means that the tag cache only has to worry about destroyed
1070 # means that the tag cache only has to worry about destroyed
1071 # heads immediately after a strip/rollback. That in turn
1071 # heads immediately after a strip/rollback. That in turn
1072 # guarantees that "cachetip == currenttip" (comparing both rev
1072 # guarantees that "cachetip == currenttip" (comparing both rev
1073 # and node) always means no nodes have been added or destroyed.
1073 # and node) always means no nodes have been added or destroyed.
1074
1074
1075 # XXX this is suboptimal when qrefresh'ing: we strip the current
1075 # XXX this is suboptimal when qrefresh'ing: we strip the current
1076 # head, refresh the tag cache, then immediately add a new head.
1076 # head, refresh the tag cache, then immediately add a new head.
1077 # But I think doing it this way is necessary for the "instant
1077 # But I think doing it this way is necessary for the "instant
1078 # tag cache retrieval" case to work.
1078 # tag cache retrieval" case to work.
1079 self.invalidatecaches()
1079 self.invalidatecaches()
1080
1080
1081 def walk(self, match, node=None):
1081 def walk(self, match, node=None):
1082 '''
1082 '''
1083 walk recursively through the directory tree or a given
1083 walk recursively through the directory tree or a given
1084 changeset, finding all files matched by the match
1084 changeset, finding all files matched by the match
1085 function
1085 function
1086 '''
1086 '''
1087 return self[node].walk(match)
1087 return self[node].walk(match)
1088
1088
1089 def status(self, node1='.', node2=None, match=None,
1089 def status(self, node1='.', node2=None, match=None,
1090 ignored=False, clean=False, unknown=False,
1090 ignored=False, clean=False, unknown=False,
1091 listsubrepos=False):
1091 listsubrepos=False):
1092 """return status of files between two nodes or node and working directory
1092 """return status of files between two nodes or node and working directory
1093
1093
1094 If node1 is None, use the first dirstate parent instead.
1094 If node1 is None, use the first dirstate parent instead.
1095 If node2 is None, compare node1 with working directory.
1095 If node2 is None, compare node1 with working directory.
1096 """
1096 """
1097
1097
1098 def mfmatches(ctx):
1098 def mfmatches(ctx):
1099 mf = ctx.manifest().copy()
1099 mf = ctx.manifest().copy()
1100 for fn in mf.keys():
1100 for fn in mf.keys():
1101 if not match(fn):
1101 if not match(fn):
1102 del mf[fn]
1102 del mf[fn]
1103 return mf
1103 return mf
1104
1104
1105 if isinstance(node1, context.changectx):
1105 if isinstance(node1, context.changectx):
1106 ctx1 = node1
1106 ctx1 = node1
1107 else:
1107 else:
1108 ctx1 = self[node1]
1108 ctx1 = self[node1]
1109 if isinstance(node2, context.changectx):
1109 if isinstance(node2, context.changectx):
1110 ctx2 = node2
1110 ctx2 = node2
1111 else:
1111 else:
1112 ctx2 = self[node2]
1112 ctx2 = self[node2]
1113
1113
1114 working = ctx2.rev() is None
1114 working = ctx2.rev() is None
1115 parentworking = working and ctx1 == self['.']
1115 parentworking = working and ctx1 == self['.']
1116 match = match or matchmod.always(self.root, self.getcwd())
1116 match = match or matchmod.always(self.root, self.getcwd())
1117 listignored, listclean, listunknown = ignored, clean, unknown
1117 listignored, listclean, listunknown = ignored, clean, unknown
1118
1118
1119 # load earliest manifest first for caching reasons
1119 # load earliest manifest first for caching reasons
1120 if not working and ctx2.rev() < ctx1.rev():
1120 if not working and ctx2.rev() < ctx1.rev():
1121 ctx2.manifest()
1121 ctx2.manifest()
1122
1122
1123 if not parentworking:
1123 if not parentworking:
1124 def bad(f, msg):
1124 def bad(f, msg):
1125 if f not in ctx1:
1125 if f not in ctx1:
1126 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1126 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1127 match.bad = bad
1127 match.bad = bad
1128
1128
1129 if working: # we need to scan the working dir
1129 if working: # we need to scan the working dir
1130 subrepos = []
1130 subrepos = []
1131 if '.hgsub' in self.dirstate:
1131 if '.hgsub' in self.dirstate:
1132 subrepos = ctx1.substate.keys()
1132 subrepos = ctx1.substate.keys()
1133 s = self.dirstate.status(match, subrepos, listignored,
1133 s = self.dirstate.status(match, subrepos, listignored,
1134 listclean, listunknown)
1134 listclean, listunknown)
1135 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1135 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1136
1136
1137 # check for any possibly clean files
1137 # check for any possibly clean files
1138 if parentworking and cmp:
1138 if parentworking and cmp:
1139 fixup = []
1139 fixup = []
1140 # do a full compare of any files that might have changed
1140 # do a full compare of any files that might have changed
1141 for f in sorted(cmp):
1141 for f in sorted(cmp):
1142 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1142 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1143 or ctx1[f].cmp(ctx2[f])):
1143 or ctx1[f].cmp(ctx2[f])):
1144 modified.append(f)
1144 modified.append(f)
1145 else:
1145 else:
1146 fixup.append(f)
1146 fixup.append(f)
1147
1147
1148 # update dirstate for files that are actually clean
1148 # update dirstate for files that are actually clean
1149 if fixup:
1149 if fixup:
1150 if listclean:
1150 if listclean:
1151 clean += fixup
1151 clean += fixup
1152
1152
1153 try:
1153 try:
1154 # updating the dirstate is optional
1154 # updating the dirstate is optional
1155 # so we don't wait on the lock
1155 # so we don't wait on the lock
1156 wlock = self.wlock(False)
1156 wlock = self.wlock(False)
1157 try:
1157 try:
1158 for f in fixup:
1158 for f in fixup:
1159 self.dirstate.normal(f)
1159 self.dirstate.normal(f)
1160 finally:
1160 finally:
1161 wlock.release()
1161 wlock.release()
1162 except error.LockError:
1162 except error.LockError:
1163 pass
1163 pass
1164
1164
1165 if not parentworking:
1165 if not parentworking:
1166 mf1 = mfmatches(ctx1)
1166 mf1 = mfmatches(ctx1)
1167 if working:
1167 if working:
1168 # we are comparing working dir against non-parent
1168 # we are comparing working dir against non-parent
1169 # generate a pseudo-manifest for the working dir
1169 # generate a pseudo-manifest for the working dir
1170 mf2 = mfmatches(self['.'])
1170 mf2 = mfmatches(self['.'])
1171 for f in cmp + modified + added:
1171 for f in cmp + modified + added:
1172 mf2[f] = None
1172 mf2[f] = None
1173 mf2.set(f, ctx2.flags(f))
1173 mf2.set(f, ctx2.flags(f))
1174 for f in removed:
1174 for f in removed:
1175 if f in mf2:
1175 if f in mf2:
1176 del mf2[f]
1176 del mf2[f]
1177 else:
1177 else:
1178 # we are comparing two revisions
1178 # we are comparing two revisions
1179 deleted, unknown, ignored = [], [], []
1179 deleted, unknown, ignored = [], [], []
1180 mf2 = mfmatches(ctx2)
1180 mf2 = mfmatches(ctx2)
1181
1181
1182 modified, added, clean = [], [], []
1182 modified, added, clean = [], [], []
1183 for fn in mf2:
1183 for fn in mf2:
1184 if fn in mf1:
1184 if fn in mf1:
1185 if (mf1.flags(fn) != mf2.flags(fn) or
1185 if (mf1.flags(fn) != mf2.flags(fn) or
1186 (mf1[fn] != mf2[fn] and
1186 (mf1[fn] != mf2[fn] and
1187 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1187 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1188 modified.append(fn)
1188 modified.append(fn)
1189 elif listclean:
1189 elif listclean:
1190 clean.append(fn)
1190 clean.append(fn)
1191 del mf1[fn]
1191 del mf1[fn]
1192 else:
1192 else:
1193 added.append(fn)
1193 added.append(fn)
1194 removed = mf1.keys()
1194 removed = mf1.keys()
1195
1195
1196 r = modified, added, removed, deleted, unknown, ignored, clean
1196 r = modified, added, removed, deleted, unknown, ignored, clean
1197
1197
1198 if listsubrepos:
1198 if listsubrepos:
1199 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1199 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1200 if working:
1200 if working:
1201 rev2 = None
1201 rev2 = None
1202 else:
1202 else:
1203 rev2 = ctx2.substate[subpath][1]
1203 rev2 = ctx2.substate[subpath][1]
1204 try:
1204 try:
1205 submatch = matchmod.narrowmatcher(subpath, match)
1205 submatch = matchmod.narrowmatcher(subpath, match)
1206 s = sub.status(rev2, match=submatch, ignored=listignored,
1206 s = sub.status(rev2, match=submatch, ignored=listignored,
1207 clean=listclean, unknown=listunknown,
1207 clean=listclean, unknown=listunknown,
1208 listsubrepos=True)
1208 listsubrepos=True)
1209 for rfiles, sfiles in zip(r, s):
1209 for rfiles, sfiles in zip(r, s):
1210 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1210 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1211 except error.LookupError:
1211 except error.LookupError:
1212 self.ui.status(_("skipping missing subrepository: %s\n")
1212 self.ui.status(_("skipping missing subrepository: %s\n")
1213 % subpath)
1213 % subpath)
1214
1214
1215 [l.sort() for l in r]
1215 [l.sort() for l in r]
1216 return r
1216 return r
1217
1217
1218 def heads(self, start=None):
1218 def heads(self, start=None):
1219 heads = self.changelog.heads(start)
1219 heads = self.changelog.heads(start)
1220 # sort the output in rev descending order
1220 # sort the output in rev descending order
1221 return sorted(heads, key=self.changelog.rev, reverse=True)
1221 return sorted(heads, key=self.changelog.rev, reverse=True)
1222
1222
1223 def branchheads(self, branch=None, start=None, closed=False):
1223 def branchheads(self, branch=None, start=None, closed=False):
1224 '''return a (possibly filtered) list of heads for the given branch
1224 '''return a (possibly filtered) list of heads for the given branch
1225
1225
1226 Heads are returned in topological order, from newest to oldest.
1226 Heads are returned in topological order, from newest to oldest.
1227 If branch is None, use the dirstate branch.
1227 If branch is None, use the dirstate branch.
1228 If start is not None, return only heads reachable from start.
1228 If start is not None, return only heads reachable from start.
1229 If closed is True, return heads that are marked as closed as well.
1229 If closed is True, return heads that are marked as closed as well.
1230 '''
1230 '''
1231 if branch is None:
1231 if branch is None:
1232 branch = self[None].branch()
1232 branch = self[None].branch()
1233 branches = self.branchmap()
1233 branches = self.branchmap()
1234 if branch not in branches:
1234 if branch not in branches:
1235 return []
1235 return []
1236 # the cache returns heads ordered lowest to highest
1236 # the cache returns heads ordered lowest to highest
1237 bheads = list(reversed(branches[branch]))
1237 bheads = list(reversed(branches[branch]))
1238 if start is not None:
1238 if start is not None:
1239 # filter out the heads that cannot be reached from startrev
1239 # filter out the heads that cannot be reached from startrev
1240 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1240 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1241 bheads = [h for h in bheads if h in fbheads]
1241 bheads = [h for h in bheads if h in fbheads]
1242 if not closed:
1242 if not closed:
1243 bheads = [h for h in bheads if
1243 bheads = [h for h in bheads if
1244 ('close' not in self.changelog.read(h)[5])]
1244 ('close' not in self.changelog.read(h)[5])]
1245 return bheads
1245 return bheads
1246
1246
1247 def branches(self, nodes):
1247 def branches(self, nodes):
1248 if not nodes:
1248 if not nodes:
1249 nodes = [self.changelog.tip()]
1249 nodes = [self.changelog.tip()]
1250 b = []
1250 b = []
1251 for n in nodes:
1251 for n in nodes:
1252 t = n
1252 t = n
1253 while 1:
1253 while 1:
1254 p = self.changelog.parents(n)
1254 p = self.changelog.parents(n)
1255 if p[1] != nullid or p[0] == nullid:
1255 if p[1] != nullid or p[0] == nullid:
1256 b.append((t, n, p[0], p[1]))
1256 b.append((t, n, p[0], p[1]))
1257 break
1257 break
1258 n = p[0]
1258 n = p[0]
1259 return b
1259 return b
1260
1260
1261 def between(self, pairs):
1261 def between(self, pairs):
1262 r = []
1262 r = []
1263
1263
1264 for top, bottom in pairs:
1264 for top, bottom in pairs:
1265 n, l, i = top, [], 0
1265 n, l, i = top, [], 0
1266 f = 1
1266 f = 1
1267
1267
1268 while n != bottom and n != nullid:
1268 while n != bottom and n != nullid:
1269 p = self.changelog.parents(n)[0]
1269 p = self.changelog.parents(n)[0]
1270 if i == f:
1270 if i == f:
1271 l.append(n)
1271 l.append(n)
1272 f = f * 2
1272 f = f * 2
1273 n = p
1273 n = p
1274 i += 1
1274 i += 1
1275
1275
1276 r.append(l)
1276 r.append(l)
1277
1277
1278 return r
1278 return r
1279
1279
1280 def pull(self, remote, heads=None, force=False):
1280 def pull(self, remote, heads=None, force=False):
1281 lock = self.lock()
1281 lock = self.lock()
1282 try:
1282 try:
1283 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1283 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1284 force=force)
1284 force=force)
1285 common, fetch, rheads = tmp
1285 common, fetch, rheads = tmp
1286 if not fetch:
1286 if not fetch:
1287 self.ui.status(_("no changes found\n"))
1287 self.ui.status(_("no changes found\n"))
1288 return 0
1288 return 0
1289
1289
1290 if heads is None and fetch == [nullid]:
1290 if heads is None and fetch == [nullid]:
1291 self.ui.status(_("requesting all changes\n"))
1291 self.ui.status(_("requesting all changes\n"))
1292 elif heads is None and remote.capable('changegroupsubset'):
1292 elif heads is None and remote.capable('changegroupsubset'):
1293 # issue1320, avoid a race if remote changed after discovery
1293 # issue1320, avoid a race if remote changed after discovery
1294 heads = rheads
1294 heads = rheads
1295
1295
1296 if heads is None:
1296 if heads is None:
1297 cg = remote.changegroup(fetch, 'pull')
1297 cg = remote.changegroup(fetch, 'pull')
1298 else:
1298 else:
1299 if not remote.capable('changegroupsubset'):
1299 if not remote.capable('changegroupsubset'):
1300 raise util.Abort(_("partial pull cannot be done because "
1300 raise util.Abort(_("partial pull cannot be done because "
1301 "other repository doesn't support "
1301 "other repository doesn't support "
1302 "changegroupsubset."))
1302 "changegroupsubset."))
1303 cg = remote.changegroupsubset(fetch, heads, 'pull')
1303 cg = remote.changegroupsubset(fetch, heads, 'pull')
1304 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1304 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1305 finally:
1305 finally:
1306 lock.release()
1306 lock.release()
1307
1307
1308 def checkpush(self, force, revs):
1309 """Extensions can override this function if additional checks have
1310 to be performed before pushing, or call it if they override push
1311 command.
1312 """
1313 pass
1314
1308 def push(self, remote, force=False, revs=None, newbranch=False):
1315 def push(self, remote, force=False, revs=None, newbranch=False):
1309 '''Push outgoing changesets (limited by revs) from the current
1316 '''Push outgoing changesets (limited by revs) from the current
1310 repository to remote. Return an integer:
1317 repository to remote. Return an integer:
1311 - 0 means HTTP error *or* nothing to push
1318 - 0 means HTTP error *or* nothing to push
1312 - 1 means we pushed and remote head count is unchanged *or*
1319 - 1 means we pushed and remote head count is unchanged *or*
1313 we have outgoing changesets but refused to push
1320 we have outgoing changesets but refused to push
1314 - other values as described by addchangegroup()
1321 - other values as described by addchangegroup()
1315 '''
1322 '''
1316 # there are two ways to push to remote repo:
1323 # there are two ways to push to remote repo:
1317 #
1324 #
1318 # addchangegroup assumes local user can lock remote
1325 # addchangegroup assumes local user can lock remote
1319 # repo (local filesystem, old ssh servers).
1326 # repo (local filesystem, old ssh servers).
1320 #
1327 #
1321 # unbundle assumes local user cannot lock remote repo (new ssh
1328 # unbundle assumes local user cannot lock remote repo (new ssh
1322 # servers, http servers).
1329 # servers, http servers).
1323
1330
1331 self.checkpush(force, revs)
1324 lock = None
1332 lock = None
1325 unbundle = remote.capable('unbundle')
1333 unbundle = remote.capable('unbundle')
1326 if not unbundle:
1334 if not unbundle:
1327 lock = remote.lock()
1335 lock = remote.lock()
1328 try:
1336 try:
1329 ret = discovery.prepush(self, remote, force, revs, newbranch)
1337 ret = discovery.prepush(self, remote, force, revs, newbranch)
1330 if ret[0] is None:
1338 if ret[0] is None:
1331 # and here we return 0 for "nothing to push" or 1 for
1339 # and here we return 0 for "nothing to push" or 1 for
1332 # "something to push but I refuse"
1340 # "something to push but I refuse"
1333 return ret[1]
1341 return ret[1]
1334
1342
1335 cg, remote_heads = ret
1343 cg, remote_heads = ret
1336 if unbundle:
1344 if unbundle:
1337 # local repo finds heads on server, finds out what revs it must
1345 # local repo finds heads on server, finds out what revs it must
1338 # push. once revs transferred, if server finds it has
1346 # push. once revs transferred, if server finds it has
1339 # different heads (someone else won commit/push race), server
1347 # different heads (someone else won commit/push race), server
1340 # aborts.
1348 # aborts.
1341 if force:
1349 if force:
1342 remote_heads = ['force']
1350 remote_heads = ['force']
1343 # ssh: return remote's addchangegroup()
1351 # ssh: return remote's addchangegroup()
1344 # http: return remote's addchangegroup() or 0 for error
1352 # http: return remote's addchangegroup() or 0 for error
1345 return remote.unbundle(cg, remote_heads, 'push')
1353 return remote.unbundle(cg, remote_heads, 'push')
1346 else:
1354 else:
1347 # we return an integer indicating remote head count change
1355 # we return an integer indicating remote head count change
1348 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1356 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1349 finally:
1357 finally:
1350 if lock is not None:
1358 if lock is not None:
1351 lock.release()
1359 lock.release()
1352
1360
1353 def changegroupinfo(self, nodes, source):
1361 def changegroupinfo(self, nodes, source):
1354 if self.ui.verbose or source == 'bundle':
1362 if self.ui.verbose or source == 'bundle':
1355 self.ui.status(_("%d changesets found\n") % len(nodes))
1363 self.ui.status(_("%d changesets found\n") % len(nodes))
1356 if self.ui.debugflag:
1364 if self.ui.debugflag:
1357 self.ui.debug("list of changesets:\n")
1365 self.ui.debug("list of changesets:\n")
1358 for node in nodes:
1366 for node in nodes:
1359 self.ui.debug("%s\n" % hex(node))
1367 self.ui.debug("%s\n" % hex(node))
1360
1368
1361 def changegroupsubset(self, bases, heads, source, extranodes=None):
1369 def changegroupsubset(self, bases, heads, source, extranodes=None):
1362 """Compute a changegroup consisting of all the nodes that are
1370 """Compute a changegroup consisting of all the nodes that are
1363 descendents of any of the bases and ancestors of any of the heads.
1371 descendents of any of the bases and ancestors of any of the heads.
1364 Return a chunkbuffer object whose read() method will return
1372 Return a chunkbuffer object whose read() method will return
1365 successive changegroup chunks.
1373 successive changegroup chunks.
1366
1374
1367 It is fairly complex as determining which filenodes and which
1375 It is fairly complex as determining which filenodes and which
1368 manifest nodes need to be included for the changeset to be complete
1376 manifest nodes need to be included for the changeset to be complete
1369 is non-trivial.
1377 is non-trivial.
1370
1378
1371 Another wrinkle is doing the reverse, figuring out which changeset in
1379 Another wrinkle is doing the reverse, figuring out which changeset in
1372 the changegroup a particular filenode or manifestnode belongs to.
1380 the changegroup a particular filenode or manifestnode belongs to.
1373
1381
1374 The caller can specify some nodes that must be included in the
1382 The caller can specify some nodes that must be included in the
1375 changegroup using the extranodes argument. It should be a dict
1383 changegroup using the extranodes argument. It should be a dict
1376 where the keys are the filenames (or 1 for the manifest), and the
1384 where the keys are the filenames (or 1 for the manifest), and the
1377 values are lists of (node, linknode) tuples, where node is a wanted
1385 values are lists of (node, linknode) tuples, where node is a wanted
1378 node and linknode is the changelog node that should be transmitted as
1386 node and linknode is the changelog node that should be transmitted as
1379 the linkrev.
1387 the linkrev.
1380 """
1388 """
1381
1389
1382 # Set up some initial variables
1390 # Set up some initial variables
1383 # Make it easy to refer to self.changelog
1391 # Make it easy to refer to self.changelog
1384 cl = self.changelog
1392 cl = self.changelog
1385 # Compute the list of changesets in this changegroup.
1393 # Compute the list of changesets in this changegroup.
1386 # Some bases may turn out to be superfluous, and some heads may be
1394 # Some bases may turn out to be superfluous, and some heads may be
1387 # too. nodesbetween will return the minimal set of bases and heads
1395 # too. nodesbetween will return the minimal set of bases and heads
1388 # necessary to re-create the changegroup.
1396 # necessary to re-create the changegroup.
1389 if not bases:
1397 if not bases:
1390 bases = [nullid]
1398 bases = [nullid]
1391 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1399 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1392
1400
1393 if extranodes is None:
1401 if extranodes is None:
1394 # can we go through the fast path ?
1402 # can we go through the fast path ?
1395 heads.sort()
1403 heads.sort()
1396 allheads = self.heads()
1404 allheads = self.heads()
1397 allheads.sort()
1405 allheads.sort()
1398 if heads == allheads:
1406 if heads == allheads:
1399 return self._changegroup(msng_cl_lst, source)
1407 return self._changegroup(msng_cl_lst, source)
1400
1408
1401 # slow path
1409 # slow path
1402 self.hook('preoutgoing', throw=True, source=source)
1410 self.hook('preoutgoing', throw=True, source=source)
1403
1411
1404 self.changegroupinfo(msng_cl_lst, source)
1412 self.changegroupinfo(msng_cl_lst, source)
1405
1413
1406 # We assume that all ancestors of bases are known
1414 # We assume that all ancestors of bases are known
1407 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1415 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1408
1416
1409 # Make it easy to refer to self.manifest
1417 # Make it easy to refer to self.manifest
1410 mnfst = self.manifest
1418 mnfst = self.manifest
1411 # We don't know which manifests are missing yet
1419 # We don't know which manifests are missing yet
1412 msng_mnfst_set = {}
1420 msng_mnfst_set = {}
1413 # Nor do we know which filenodes are missing.
1421 # Nor do we know which filenodes are missing.
1414 msng_filenode_set = {}
1422 msng_filenode_set = {}
1415
1423
1416 # A changeset always belongs to itself, so the changenode lookup
1424 # A changeset always belongs to itself, so the changenode lookup
1417 # function for a changenode is identity.
1425 # function for a changenode is identity.
1418 def identity(x):
1426 def identity(x):
1419 return x
1427 return x
1420
1428
1421 # A function generating function that sets up the initial environment
1429 # A function generating function that sets up the initial environment
1422 # the inner function.
1430 # the inner function.
1423 def filenode_collector(changedfiles):
1431 def filenode_collector(changedfiles):
1424 # This gathers information from each manifestnode included in the
1432 # This gathers information from each manifestnode included in the
1425 # changegroup about which filenodes the manifest node references
1433 # changegroup about which filenodes the manifest node references
1426 # so we can include those in the changegroup too.
1434 # so we can include those in the changegroup too.
1427 #
1435 #
1428 # It also remembers which changenode each filenode belongs to. It
1436 # It also remembers which changenode each filenode belongs to. It
1429 # does this by assuming the a filenode belongs to the changenode
1437 # does this by assuming the a filenode belongs to the changenode
1430 # the first manifest that references it belongs to.
1438 # the first manifest that references it belongs to.
1431 def collect_msng_filenodes(mnfstnode):
1439 def collect_msng_filenodes(mnfstnode):
1432 r = mnfst.rev(mnfstnode)
1440 r = mnfst.rev(mnfstnode)
1433 if mnfst.deltaparent(r) in mnfst.parentrevs(r):
1441 if mnfst.deltaparent(r) in mnfst.parentrevs(r):
1434 # If the previous rev is one of the parents,
1442 # If the previous rev is one of the parents,
1435 # we only need to see a diff.
1443 # we only need to see a diff.
1436 deltamf = mnfst.readdelta(mnfstnode)
1444 deltamf = mnfst.readdelta(mnfstnode)
1437 # For each line in the delta
1445 # For each line in the delta
1438 for f, fnode in deltamf.iteritems():
1446 for f, fnode in deltamf.iteritems():
1439 # And if the file is in the list of files we care
1447 # And if the file is in the list of files we care
1440 # about.
1448 # about.
1441 if f in changedfiles:
1449 if f in changedfiles:
1442 # Get the changenode this manifest belongs to
1450 # Get the changenode this manifest belongs to
1443 clnode = msng_mnfst_set[mnfstnode]
1451 clnode = msng_mnfst_set[mnfstnode]
1444 # Create the set of filenodes for the file if
1452 # Create the set of filenodes for the file if
1445 # there isn't one already.
1453 # there isn't one already.
1446 ndset = msng_filenode_set.setdefault(f, {})
1454 ndset = msng_filenode_set.setdefault(f, {})
1447 # And set the filenode's changelog node to the
1455 # And set the filenode's changelog node to the
1448 # manifest's if it hasn't been set already.
1456 # manifest's if it hasn't been set already.
1449 ndset.setdefault(fnode, clnode)
1457 ndset.setdefault(fnode, clnode)
1450 else:
1458 else:
1451 # Otherwise we need a full manifest.
1459 # Otherwise we need a full manifest.
1452 m = mnfst.read(mnfstnode)
1460 m = mnfst.read(mnfstnode)
1453 # For every file in we care about.
1461 # For every file in we care about.
1454 for f in changedfiles:
1462 for f in changedfiles:
1455 fnode = m.get(f, None)
1463 fnode = m.get(f, None)
1456 # If it's in the manifest
1464 # If it's in the manifest
1457 if fnode is not None:
1465 if fnode is not None:
1458 # See comments above.
1466 # See comments above.
1459 clnode = msng_mnfst_set[mnfstnode]
1467 clnode = msng_mnfst_set[mnfstnode]
1460 ndset = msng_filenode_set.setdefault(f, {})
1468 ndset = msng_filenode_set.setdefault(f, {})
1461 ndset.setdefault(fnode, clnode)
1469 ndset.setdefault(fnode, clnode)
1462 return collect_msng_filenodes
1470 return collect_msng_filenodes
1463
1471
1464 # If we determine that a particular file or manifest node must be a
1472 # If we determine that a particular file or manifest node must be a
1465 # node that the recipient of the changegroup will already have, we can
1473 # node that the recipient of the changegroup will already have, we can
1466 # also assume the recipient will have all the parents. This function
1474 # also assume the recipient will have all the parents. This function
1467 # prunes them from the set of missing nodes.
1475 # prunes them from the set of missing nodes.
1468 def prune(revlog, missingnodes):
1476 def prune(revlog, missingnodes):
1469 hasset = set()
1477 hasset = set()
1470 # If a 'missing' filenode thinks it belongs to a changenode we
1478 # If a 'missing' filenode thinks it belongs to a changenode we
1471 # assume the recipient must have, then the recipient must have
1479 # assume the recipient must have, then the recipient must have
1472 # that filenode.
1480 # that filenode.
1473 for n in missingnodes:
1481 for n in missingnodes:
1474 clrev = revlog.linkrev(revlog.rev(n))
1482 clrev = revlog.linkrev(revlog.rev(n))
1475 if clrev in commonrevs:
1483 if clrev in commonrevs:
1476 hasset.add(n)
1484 hasset.add(n)
1477 for n in hasset:
1485 for n in hasset:
1478 missingnodes.pop(n, None)
1486 missingnodes.pop(n, None)
1479 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1487 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1480 missingnodes.pop(revlog.node(r), None)
1488 missingnodes.pop(revlog.node(r), None)
1481
1489
1482 # Add the nodes that were explicitly requested.
1490 # Add the nodes that were explicitly requested.
1483 def add_extra_nodes(name, nodes):
1491 def add_extra_nodes(name, nodes):
1484 if not extranodes or name not in extranodes:
1492 if not extranodes or name not in extranodes:
1485 return
1493 return
1486
1494
1487 for node, linknode in extranodes[name]:
1495 for node, linknode in extranodes[name]:
1488 if node not in nodes:
1496 if node not in nodes:
1489 nodes[node] = linknode
1497 nodes[node] = linknode
1490
1498
1491 # Now that we have all theses utility functions to help out and
1499 # Now that we have all theses utility functions to help out and
1492 # logically divide up the task, generate the group.
1500 # logically divide up the task, generate the group.
1493 def gengroup():
1501 def gengroup():
1494 # The set of changed files starts empty.
1502 # The set of changed files starts empty.
1495 changedfiles = set()
1503 changedfiles = set()
1496 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1504 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1497
1505
1498 # Create a changenode group generator that will call our functions
1506 # Create a changenode group generator that will call our functions
1499 # back to lookup the owning changenode and collect information.
1507 # back to lookup the owning changenode and collect information.
1500 group = cl.group(msng_cl_lst, identity, collect)
1508 group = cl.group(msng_cl_lst, identity, collect)
1501 for cnt, chnk in enumerate(group):
1509 for cnt, chnk in enumerate(group):
1502 yield chnk
1510 yield chnk
1503 # revlog.group yields three entries per node, so
1511 # revlog.group yields three entries per node, so
1504 # dividing by 3 gives an approximation of how many
1512 # dividing by 3 gives an approximation of how many
1505 # nodes have been processed.
1513 # nodes have been processed.
1506 self.ui.progress(_('bundling'), cnt / 3,
1514 self.ui.progress(_('bundling'), cnt / 3,
1507 unit=_('changesets'))
1515 unit=_('changesets'))
1508 changecount = cnt / 3
1516 changecount = cnt / 3
1509 self.ui.progress(_('bundling'), None)
1517 self.ui.progress(_('bundling'), None)
1510
1518
1511 prune(mnfst, msng_mnfst_set)
1519 prune(mnfst, msng_mnfst_set)
1512 add_extra_nodes(1, msng_mnfst_set)
1520 add_extra_nodes(1, msng_mnfst_set)
1513 msng_mnfst_lst = msng_mnfst_set.keys()
1521 msng_mnfst_lst = msng_mnfst_set.keys()
1514 # Sort the manifestnodes by revision number.
1522 # Sort the manifestnodes by revision number.
1515 msng_mnfst_lst.sort(key=mnfst.rev)
1523 msng_mnfst_lst.sort(key=mnfst.rev)
1516 # Create a generator for the manifestnodes that calls our lookup
1524 # Create a generator for the manifestnodes that calls our lookup
1517 # and data collection functions back.
1525 # and data collection functions back.
1518 group = mnfst.group(msng_mnfst_lst,
1526 group = mnfst.group(msng_mnfst_lst,
1519 lambda mnode: msng_mnfst_set[mnode],
1527 lambda mnode: msng_mnfst_set[mnode],
1520 filenode_collector(changedfiles))
1528 filenode_collector(changedfiles))
1521 efiles = {}
1529 efiles = {}
1522 for cnt, chnk in enumerate(group):
1530 for cnt, chnk in enumerate(group):
1523 if cnt % 3 == 1:
1531 if cnt % 3 == 1:
1524 mnode = chnk[:20]
1532 mnode = chnk[:20]
1525 efiles.update(mnfst.readdelta(mnode))
1533 efiles.update(mnfst.readdelta(mnode))
1526 yield chnk
1534 yield chnk
1527 # see above comment for why we divide by 3
1535 # see above comment for why we divide by 3
1528 self.ui.progress(_('bundling'), cnt / 3,
1536 self.ui.progress(_('bundling'), cnt / 3,
1529 unit=_('manifests'), total=changecount)
1537 unit=_('manifests'), total=changecount)
1530 self.ui.progress(_('bundling'), None)
1538 self.ui.progress(_('bundling'), None)
1531 efiles = len(efiles)
1539 efiles = len(efiles)
1532
1540
1533 # These are no longer needed, dereference and toss the memory for
1541 # These are no longer needed, dereference and toss the memory for
1534 # them.
1542 # them.
1535 msng_mnfst_lst = None
1543 msng_mnfst_lst = None
1536 msng_mnfst_set.clear()
1544 msng_mnfst_set.clear()
1537
1545
1538 if extranodes:
1546 if extranodes:
1539 for fname in extranodes:
1547 for fname in extranodes:
1540 if isinstance(fname, int):
1548 if isinstance(fname, int):
1541 continue
1549 continue
1542 msng_filenode_set.setdefault(fname, {})
1550 msng_filenode_set.setdefault(fname, {})
1543 changedfiles.add(fname)
1551 changedfiles.add(fname)
1544 # Go through all our files in order sorted by name.
1552 # Go through all our files in order sorted by name.
1545 for idx, fname in enumerate(sorted(changedfiles)):
1553 for idx, fname in enumerate(sorted(changedfiles)):
1546 filerevlog = self.file(fname)
1554 filerevlog = self.file(fname)
1547 if not len(filerevlog):
1555 if not len(filerevlog):
1548 raise util.Abort(_("empty or missing revlog for %s") % fname)
1556 raise util.Abort(_("empty or missing revlog for %s") % fname)
1549 # Toss out the filenodes that the recipient isn't really
1557 # Toss out the filenodes that the recipient isn't really
1550 # missing.
1558 # missing.
1551 missingfnodes = msng_filenode_set.pop(fname, {})
1559 missingfnodes = msng_filenode_set.pop(fname, {})
1552 prune(filerevlog, missingfnodes)
1560 prune(filerevlog, missingfnodes)
1553 add_extra_nodes(fname, missingfnodes)
1561 add_extra_nodes(fname, missingfnodes)
1554 # If any filenodes are left, generate the group for them,
1562 # If any filenodes are left, generate the group for them,
1555 # otherwise don't bother.
1563 # otherwise don't bother.
1556 if missingfnodes:
1564 if missingfnodes:
1557 yield changegroup.chunkheader(len(fname))
1565 yield changegroup.chunkheader(len(fname))
1558 yield fname
1566 yield fname
1559 # Sort the filenodes by their revision # (topological order)
1567 # Sort the filenodes by their revision # (topological order)
1560 nodeiter = list(missingfnodes)
1568 nodeiter = list(missingfnodes)
1561 nodeiter.sort(key=filerevlog.rev)
1569 nodeiter.sort(key=filerevlog.rev)
1562 # Create a group generator and only pass in a changenode
1570 # Create a group generator and only pass in a changenode
1563 # lookup function as we need to collect no information
1571 # lookup function as we need to collect no information
1564 # from filenodes.
1572 # from filenodes.
1565 group = filerevlog.group(nodeiter,
1573 group = filerevlog.group(nodeiter,
1566 lambda fnode: missingfnodes[fnode])
1574 lambda fnode: missingfnodes[fnode])
1567 for chnk in group:
1575 for chnk in group:
1568 # even though we print the same progress on
1576 # even though we print the same progress on
1569 # most loop iterations, put the progress call
1577 # most loop iterations, put the progress call
1570 # here so that time estimates (if any) can be updated
1578 # here so that time estimates (if any) can be updated
1571 self.ui.progress(
1579 self.ui.progress(
1572 _('bundling'), idx, item=fname,
1580 _('bundling'), idx, item=fname,
1573 unit=_('files'), total=efiles)
1581 unit=_('files'), total=efiles)
1574 yield chnk
1582 yield chnk
1575 # Signal that no more groups are left.
1583 # Signal that no more groups are left.
1576 yield changegroup.closechunk()
1584 yield changegroup.closechunk()
1577 self.ui.progress(_('bundling'), None)
1585 self.ui.progress(_('bundling'), None)
1578
1586
1579 if msng_cl_lst:
1587 if msng_cl_lst:
1580 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1588 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1581
1589
1582 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1590 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1583
1591
1584 def changegroup(self, basenodes, source):
1592 def changegroup(self, basenodes, source):
1585 # to avoid a race we use changegroupsubset() (issue1320)
1593 # to avoid a race we use changegroupsubset() (issue1320)
1586 return self.changegroupsubset(basenodes, self.heads(), source)
1594 return self.changegroupsubset(basenodes, self.heads(), source)
1587
1595
1588 def _changegroup(self, nodes, source):
1596 def _changegroup(self, nodes, source):
1589 """Compute the changegroup of all nodes that we have that a recipient
1597 """Compute the changegroup of all nodes that we have that a recipient
1590 doesn't. Return a chunkbuffer object whose read() method will return
1598 doesn't. Return a chunkbuffer object whose read() method will return
1591 successive changegroup chunks.
1599 successive changegroup chunks.
1592
1600
1593 This is much easier than the previous function as we can assume that
1601 This is much easier than the previous function as we can assume that
1594 the recipient has any changenode we aren't sending them.
1602 the recipient has any changenode we aren't sending them.
1595
1603
1596 nodes is the set of nodes to send"""
1604 nodes is the set of nodes to send"""
1597
1605
1598 self.hook('preoutgoing', throw=True, source=source)
1606 self.hook('preoutgoing', throw=True, source=source)
1599
1607
1600 cl = self.changelog
1608 cl = self.changelog
1601 revset = set([cl.rev(n) for n in nodes])
1609 revset = set([cl.rev(n) for n in nodes])
1602 self.changegroupinfo(nodes, source)
1610 self.changegroupinfo(nodes, source)
1603
1611
1604 def identity(x):
1612 def identity(x):
1605 return x
1613 return x
1606
1614
1607 def gennodelst(log):
1615 def gennodelst(log):
1608 for r in log:
1616 for r in log:
1609 if log.linkrev(r) in revset:
1617 if log.linkrev(r) in revset:
1610 yield log.node(r)
1618 yield log.node(r)
1611
1619
1612 def lookuplinkrev_func(revlog):
1620 def lookuplinkrev_func(revlog):
1613 def lookuplinkrev(n):
1621 def lookuplinkrev(n):
1614 return cl.node(revlog.linkrev(revlog.rev(n)))
1622 return cl.node(revlog.linkrev(revlog.rev(n)))
1615 return lookuplinkrev
1623 return lookuplinkrev
1616
1624
1617 def gengroup():
1625 def gengroup():
1618 '''yield a sequence of changegroup chunks (strings)'''
1626 '''yield a sequence of changegroup chunks (strings)'''
1619 # construct a list of all changed files
1627 # construct a list of all changed files
1620 changedfiles = set()
1628 changedfiles = set()
1621 mmfs = {}
1629 mmfs = {}
1622 collect = changegroup.collector(cl, mmfs, changedfiles)
1630 collect = changegroup.collector(cl, mmfs, changedfiles)
1623
1631
1624 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1632 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1625 # revlog.group yields three entries per node, so
1633 # revlog.group yields three entries per node, so
1626 # dividing by 3 gives an approximation of how many
1634 # dividing by 3 gives an approximation of how many
1627 # nodes have been processed.
1635 # nodes have been processed.
1628 self.ui.progress(_('bundling'), cnt / 3, unit=_('changesets'))
1636 self.ui.progress(_('bundling'), cnt / 3, unit=_('changesets'))
1629 yield chnk
1637 yield chnk
1630 changecount = cnt / 3
1638 changecount = cnt / 3
1631 self.ui.progress(_('bundling'), None)
1639 self.ui.progress(_('bundling'), None)
1632
1640
1633 mnfst = self.manifest
1641 mnfst = self.manifest
1634 nodeiter = gennodelst(mnfst)
1642 nodeiter = gennodelst(mnfst)
1635 efiles = {}
1643 efiles = {}
1636 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1644 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1637 lookuplinkrev_func(mnfst))):
1645 lookuplinkrev_func(mnfst))):
1638 if cnt % 3 == 1:
1646 if cnt % 3 == 1:
1639 mnode = chnk[:20]
1647 mnode = chnk[:20]
1640 efiles.update(mnfst.readdelta(mnode))
1648 efiles.update(mnfst.readdelta(mnode))
1641 # see above comment for why we divide by 3
1649 # see above comment for why we divide by 3
1642 self.ui.progress(_('bundling'), cnt / 3,
1650 self.ui.progress(_('bundling'), cnt / 3,
1643 unit=_('manifests'), total=changecount)
1651 unit=_('manifests'), total=changecount)
1644 yield chnk
1652 yield chnk
1645 efiles = len(efiles)
1653 efiles = len(efiles)
1646 self.ui.progress(_('bundling'), None)
1654 self.ui.progress(_('bundling'), None)
1647
1655
1648 for idx, fname in enumerate(sorted(changedfiles)):
1656 for idx, fname in enumerate(sorted(changedfiles)):
1649 filerevlog = self.file(fname)
1657 filerevlog = self.file(fname)
1650 if not len(filerevlog):
1658 if not len(filerevlog):
1651 raise util.Abort(_("empty or missing revlog for %s") % fname)
1659 raise util.Abort(_("empty or missing revlog for %s") % fname)
1652 nodeiter = gennodelst(filerevlog)
1660 nodeiter = gennodelst(filerevlog)
1653 nodeiter = list(nodeiter)
1661 nodeiter = list(nodeiter)
1654 if nodeiter:
1662 if nodeiter:
1655 yield changegroup.chunkheader(len(fname))
1663 yield changegroup.chunkheader(len(fname))
1656 yield fname
1664 yield fname
1657 lookup = lookuplinkrev_func(filerevlog)
1665 lookup = lookuplinkrev_func(filerevlog)
1658 for chnk in filerevlog.group(nodeiter, lookup):
1666 for chnk in filerevlog.group(nodeiter, lookup):
1659 self.ui.progress(
1667 self.ui.progress(
1660 _('bundling'), idx, item=fname,
1668 _('bundling'), idx, item=fname,
1661 total=efiles, unit=_('files'))
1669 total=efiles, unit=_('files'))
1662 yield chnk
1670 yield chnk
1663 self.ui.progress(_('bundling'), None)
1671 self.ui.progress(_('bundling'), None)
1664
1672
1665 yield changegroup.closechunk()
1673 yield changegroup.closechunk()
1666
1674
1667 if nodes:
1675 if nodes:
1668 self.hook('outgoing', node=hex(nodes[0]), source=source)
1676 self.hook('outgoing', node=hex(nodes[0]), source=source)
1669
1677
1670 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1678 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1671
1679
1672 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1680 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1673 """Add the changegroup returned by source.read() to this repo.
1681 """Add the changegroup returned by source.read() to this repo.
1674 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1682 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1675 the URL of the repo where this changegroup is coming from.
1683 the URL of the repo where this changegroup is coming from.
1676 If lock is not None, the function takes ownership of the lock
1684 If lock is not None, the function takes ownership of the lock
1677 and releases it after the changegroup is added.
1685 and releases it after the changegroup is added.
1678
1686
1679 Return an integer summarizing the change to this repo:
1687 Return an integer summarizing the change to this repo:
1680 - nothing changed or no source: 0
1688 - nothing changed or no source: 0
1681 - more heads than before: 1+added heads (2..n)
1689 - more heads than before: 1+added heads (2..n)
1682 - fewer heads than before: -1-removed heads (-2..-n)
1690 - fewer heads than before: -1-removed heads (-2..-n)
1683 - number of heads stays the same: 1
1691 - number of heads stays the same: 1
1684 """
1692 """
1685 def csmap(x):
1693 def csmap(x):
1686 self.ui.debug("add changeset %s\n" % short(x))
1694 self.ui.debug("add changeset %s\n" % short(x))
1687 return len(cl)
1695 return len(cl)
1688
1696
1689 def revmap(x):
1697 def revmap(x):
1690 return cl.rev(x)
1698 return cl.rev(x)
1691
1699
1692 if not source:
1700 if not source:
1693 return 0
1701 return 0
1694
1702
1695 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1703 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1696
1704
1697 changesets = files = revisions = 0
1705 changesets = files = revisions = 0
1698 efiles = set()
1706 efiles = set()
1699
1707
1700 # write changelog data to temp files so concurrent readers will not see
1708 # write changelog data to temp files so concurrent readers will not see
1701 # inconsistent view
1709 # inconsistent view
1702 cl = self.changelog
1710 cl = self.changelog
1703 cl.delayupdate()
1711 cl.delayupdate()
1704 oldheads = len(cl.heads())
1712 oldheads = len(cl.heads())
1705
1713
1706 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1714 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1707 try:
1715 try:
1708 trp = weakref.proxy(tr)
1716 trp = weakref.proxy(tr)
1709 # pull off the changeset group
1717 # pull off the changeset group
1710 self.ui.status(_("adding changesets\n"))
1718 self.ui.status(_("adding changesets\n"))
1711 clstart = len(cl)
1719 clstart = len(cl)
1712 class prog(object):
1720 class prog(object):
1713 step = _('changesets')
1721 step = _('changesets')
1714 count = 1
1722 count = 1
1715 ui = self.ui
1723 ui = self.ui
1716 total = None
1724 total = None
1717 def __call__(self):
1725 def __call__(self):
1718 self.ui.progress(self.step, self.count, unit=_('chunks'),
1726 self.ui.progress(self.step, self.count, unit=_('chunks'),
1719 total=self.total)
1727 total=self.total)
1720 self.count += 1
1728 self.count += 1
1721 pr = prog()
1729 pr = prog()
1722 source.callback = pr
1730 source.callback = pr
1723
1731
1724 if (cl.addgroup(source, csmap, trp) is None
1732 if (cl.addgroup(source, csmap, trp) is None
1725 and not emptyok):
1733 and not emptyok):
1726 raise util.Abort(_("received changelog group is empty"))
1734 raise util.Abort(_("received changelog group is empty"))
1727 clend = len(cl)
1735 clend = len(cl)
1728 changesets = clend - clstart
1736 changesets = clend - clstart
1729 for c in xrange(clstart, clend):
1737 for c in xrange(clstart, clend):
1730 efiles.update(self[c].files())
1738 efiles.update(self[c].files())
1731 efiles = len(efiles)
1739 efiles = len(efiles)
1732 self.ui.progress(_('changesets'), None)
1740 self.ui.progress(_('changesets'), None)
1733
1741
1734 # pull off the manifest group
1742 # pull off the manifest group
1735 self.ui.status(_("adding manifests\n"))
1743 self.ui.status(_("adding manifests\n"))
1736 pr.step = _('manifests')
1744 pr.step = _('manifests')
1737 pr.count = 1
1745 pr.count = 1
1738 pr.total = changesets # manifests <= changesets
1746 pr.total = changesets # manifests <= changesets
1739 # no need to check for empty manifest group here:
1747 # no need to check for empty manifest group here:
1740 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1748 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1741 # no new manifest will be created and the manifest group will
1749 # no new manifest will be created and the manifest group will
1742 # be empty during the pull
1750 # be empty during the pull
1743 self.manifest.addgroup(source, revmap, trp)
1751 self.manifest.addgroup(source, revmap, trp)
1744 self.ui.progress(_('manifests'), None)
1752 self.ui.progress(_('manifests'), None)
1745
1753
1746 needfiles = {}
1754 needfiles = {}
1747 if self.ui.configbool('server', 'validate', default=False):
1755 if self.ui.configbool('server', 'validate', default=False):
1748 # validate incoming csets have their manifests
1756 # validate incoming csets have their manifests
1749 for cset in xrange(clstart, clend):
1757 for cset in xrange(clstart, clend):
1750 mfest = self.changelog.read(self.changelog.node(cset))[0]
1758 mfest = self.changelog.read(self.changelog.node(cset))[0]
1751 mfest = self.manifest.readdelta(mfest)
1759 mfest = self.manifest.readdelta(mfest)
1752 # store file nodes we must see
1760 # store file nodes we must see
1753 for f, n in mfest.iteritems():
1761 for f, n in mfest.iteritems():
1754 needfiles.setdefault(f, set()).add(n)
1762 needfiles.setdefault(f, set()).add(n)
1755
1763
1756 # process the files
1764 # process the files
1757 self.ui.status(_("adding file changes\n"))
1765 self.ui.status(_("adding file changes\n"))
1758 pr.step = 'files'
1766 pr.step = 'files'
1759 pr.count = 1
1767 pr.count = 1
1760 pr.total = efiles
1768 pr.total = efiles
1761 source.callback = None
1769 source.callback = None
1762
1770
1763 while 1:
1771 while 1:
1764 f = source.chunk()
1772 f = source.chunk()
1765 if not f:
1773 if not f:
1766 break
1774 break
1767 self.ui.debug("adding %s revisions\n" % f)
1775 self.ui.debug("adding %s revisions\n" % f)
1768 pr()
1776 pr()
1769 fl = self.file(f)
1777 fl = self.file(f)
1770 o = len(fl)
1778 o = len(fl)
1771 if fl.addgroup(source, revmap, trp) is None:
1779 if fl.addgroup(source, revmap, trp) is None:
1772 raise util.Abort(_("received file revlog group is empty"))
1780 raise util.Abort(_("received file revlog group is empty"))
1773 revisions += len(fl) - o
1781 revisions += len(fl) - o
1774 files += 1
1782 files += 1
1775 if f in needfiles:
1783 if f in needfiles:
1776 needs = needfiles[f]
1784 needs = needfiles[f]
1777 for new in xrange(o, len(fl)):
1785 for new in xrange(o, len(fl)):
1778 n = fl.node(new)
1786 n = fl.node(new)
1779 if n in needs:
1787 if n in needs:
1780 needs.remove(n)
1788 needs.remove(n)
1781 if not needs:
1789 if not needs:
1782 del needfiles[f]
1790 del needfiles[f]
1783 self.ui.progress(_('files'), None)
1791 self.ui.progress(_('files'), None)
1784
1792
1785 for f, needs in needfiles.iteritems():
1793 for f, needs in needfiles.iteritems():
1786 fl = self.file(f)
1794 fl = self.file(f)
1787 for n in needs:
1795 for n in needs:
1788 try:
1796 try:
1789 fl.rev(n)
1797 fl.rev(n)
1790 except error.LookupError:
1798 except error.LookupError:
1791 raise util.Abort(
1799 raise util.Abort(
1792 _('missing file data for %s:%s - run hg verify') %
1800 _('missing file data for %s:%s - run hg verify') %
1793 (f, hex(n)))
1801 (f, hex(n)))
1794
1802
1795 newheads = len(cl.heads())
1803 newheads = len(cl.heads())
1796 heads = ""
1804 heads = ""
1797 if oldheads and newheads != oldheads:
1805 if oldheads and newheads != oldheads:
1798 heads = _(" (%+d heads)") % (newheads - oldheads)
1806 heads = _(" (%+d heads)") % (newheads - oldheads)
1799
1807
1800 self.ui.status(_("added %d changesets"
1808 self.ui.status(_("added %d changesets"
1801 " with %d changes to %d files%s\n")
1809 " with %d changes to %d files%s\n")
1802 % (changesets, revisions, files, heads))
1810 % (changesets, revisions, files, heads))
1803
1811
1804 if changesets > 0:
1812 if changesets > 0:
1805 p = lambda: cl.writepending() and self.root or ""
1813 p = lambda: cl.writepending() and self.root or ""
1806 self.hook('pretxnchangegroup', throw=True,
1814 self.hook('pretxnchangegroup', throw=True,
1807 node=hex(cl.node(clstart)), source=srctype,
1815 node=hex(cl.node(clstart)), source=srctype,
1808 url=url, pending=p)
1816 url=url, pending=p)
1809
1817
1810 # make changelog see real files again
1818 # make changelog see real files again
1811 cl.finalize(trp)
1819 cl.finalize(trp)
1812
1820
1813 tr.close()
1821 tr.close()
1814 finally:
1822 finally:
1815 tr.release()
1823 tr.release()
1816 if lock:
1824 if lock:
1817 lock.release()
1825 lock.release()
1818
1826
1819 if changesets > 0:
1827 if changesets > 0:
1820 # forcefully update the on-disk branch cache
1828 # forcefully update the on-disk branch cache
1821 self.ui.debug("updating the branch cache\n")
1829 self.ui.debug("updating the branch cache\n")
1822 self.updatebranchcache()
1830 self.updatebranchcache()
1823 self.hook("changegroup", node=hex(cl.node(clstart)),
1831 self.hook("changegroup", node=hex(cl.node(clstart)),
1824 source=srctype, url=url)
1832 source=srctype, url=url)
1825
1833
1826 for i in xrange(clstart, clend):
1834 for i in xrange(clstart, clend):
1827 self.hook("incoming", node=hex(cl.node(i)),
1835 self.hook("incoming", node=hex(cl.node(i)),
1828 source=srctype, url=url)
1836 source=srctype, url=url)
1829
1837
1830 # never return 0 here:
1838 # never return 0 here:
1831 if newheads < oldheads:
1839 if newheads < oldheads:
1832 return newheads - oldheads - 1
1840 return newheads - oldheads - 1
1833 else:
1841 else:
1834 return newheads - oldheads + 1
1842 return newheads - oldheads + 1
1835
1843
1836
1844
1837 def stream_in(self, remote, requirements):
1845 def stream_in(self, remote, requirements):
1838 fp = remote.stream_out()
1846 fp = remote.stream_out()
1839 l = fp.readline()
1847 l = fp.readline()
1840 try:
1848 try:
1841 resp = int(l)
1849 resp = int(l)
1842 except ValueError:
1850 except ValueError:
1843 raise error.ResponseError(
1851 raise error.ResponseError(
1844 _('Unexpected response from remote server:'), l)
1852 _('Unexpected response from remote server:'), l)
1845 if resp == 1:
1853 if resp == 1:
1846 raise util.Abort(_('operation forbidden by server'))
1854 raise util.Abort(_('operation forbidden by server'))
1847 elif resp == 2:
1855 elif resp == 2:
1848 raise util.Abort(_('locking the remote repository failed'))
1856 raise util.Abort(_('locking the remote repository failed'))
1849 elif resp != 0:
1857 elif resp != 0:
1850 raise util.Abort(_('the server sent an unknown error code'))
1858 raise util.Abort(_('the server sent an unknown error code'))
1851 self.ui.status(_('streaming all changes\n'))
1859 self.ui.status(_('streaming all changes\n'))
1852 l = fp.readline()
1860 l = fp.readline()
1853 try:
1861 try:
1854 total_files, total_bytes = map(int, l.split(' ', 1))
1862 total_files, total_bytes = map(int, l.split(' ', 1))
1855 except (ValueError, TypeError):
1863 except (ValueError, TypeError):
1856 raise error.ResponseError(
1864 raise error.ResponseError(
1857 _('Unexpected response from remote server:'), l)
1865 _('Unexpected response from remote server:'), l)
1858 self.ui.status(_('%d files to transfer, %s of data\n') %
1866 self.ui.status(_('%d files to transfer, %s of data\n') %
1859 (total_files, util.bytecount(total_bytes)))
1867 (total_files, util.bytecount(total_bytes)))
1860 start = time.time()
1868 start = time.time()
1861 for i in xrange(total_files):
1869 for i in xrange(total_files):
1862 # XXX doesn't support '\n' or '\r' in filenames
1870 # XXX doesn't support '\n' or '\r' in filenames
1863 l = fp.readline()
1871 l = fp.readline()
1864 try:
1872 try:
1865 name, size = l.split('\0', 1)
1873 name, size = l.split('\0', 1)
1866 size = int(size)
1874 size = int(size)
1867 except (ValueError, TypeError):
1875 except (ValueError, TypeError):
1868 raise error.ResponseError(
1876 raise error.ResponseError(
1869 _('Unexpected response from remote server:'), l)
1877 _('Unexpected response from remote server:'), l)
1870 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1878 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1871 # for backwards compat, name was partially encoded
1879 # for backwards compat, name was partially encoded
1872 ofp = self.sopener(store.decodedir(name), 'w')
1880 ofp = self.sopener(store.decodedir(name), 'w')
1873 for chunk in util.filechunkiter(fp, limit=size):
1881 for chunk in util.filechunkiter(fp, limit=size):
1874 ofp.write(chunk)
1882 ofp.write(chunk)
1875 ofp.close()
1883 ofp.close()
1876 elapsed = time.time() - start
1884 elapsed = time.time() - start
1877 if elapsed <= 0:
1885 if elapsed <= 0:
1878 elapsed = 0.001
1886 elapsed = 0.001
1879 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1887 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1880 (util.bytecount(total_bytes), elapsed,
1888 (util.bytecount(total_bytes), elapsed,
1881 util.bytecount(total_bytes / elapsed)))
1889 util.bytecount(total_bytes / elapsed)))
1882
1890
1883 # new requirements = old non-format requirements + new format-related
1891 # new requirements = old non-format requirements + new format-related
1884 # requirements from the streamed-in repository
1892 # requirements from the streamed-in repository
1885 requirements.update(set(self.requirements) - self.supportedformats)
1893 requirements.update(set(self.requirements) - self.supportedformats)
1886 self._applyrequirements(requirements)
1894 self._applyrequirements(requirements)
1887 self._writerequirements()
1895 self._writerequirements()
1888
1896
1889 self.invalidate()
1897 self.invalidate()
1890 return len(self.heads()) + 1
1898 return len(self.heads()) + 1
1891
1899
1892 def clone(self, remote, heads=[], stream=False):
1900 def clone(self, remote, heads=[], stream=False):
1893 '''clone remote repository.
1901 '''clone remote repository.
1894
1902
1895 keyword arguments:
1903 keyword arguments:
1896 heads: list of revs to clone (forces use of pull)
1904 heads: list of revs to clone (forces use of pull)
1897 stream: use streaming clone if possible'''
1905 stream: use streaming clone if possible'''
1898
1906
1899 # now, all clients that can request uncompressed clones can
1907 # now, all clients that can request uncompressed clones can
1900 # read repo formats supported by all servers that can serve
1908 # read repo formats supported by all servers that can serve
1901 # them.
1909 # them.
1902
1910
1903 # if revlog format changes, client will have to check version
1911 # if revlog format changes, client will have to check version
1904 # and format flags on "stream" capability, and use
1912 # and format flags on "stream" capability, and use
1905 # uncompressed only if compatible.
1913 # uncompressed only if compatible.
1906
1914
1907 if stream and not heads:
1915 if stream and not heads:
1908 # 'stream' means remote revlog format is revlogv1 only
1916 # 'stream' means remote revlog format is revlogv1 only
1909 if remote.capable('stream'):
1917 if remote.capable('stream'):
1910 return self.stream_in(remote, set(('revlogv1',)))
1918 return self.stream_in(remote, set(('revlogv1',)))
1911 # otherwise, 'streamreqs' contains the remote revlog format
1919 # otherwise, 'streamreqs' contains the remote revlog format
1912 streamreqs = remote.capable('streamreqs')
1920 streamreqs = remote.capable('streamreqs')
1913 if streamreqs:
1921 if streamreqs:
1914 streamreqs = set(streamreqs.split(','))
1922 streamreqs = set(streamreqs.split(','))
1915 # if we support it, stream in and adjust our requirements
1923 # if we support it, stream in and adjust our requirements
1916 if not streamreqs - self.supportedformats:
1924 if not streamreqs - self.supportedformats:
1917 return self.stream_in(remote, streamreqs)
1925 return self.stream_in(remote, streamreqs)
1918 return self.pull(remote, heads)
1926 return self.pull(remote, heads)
1919
1927
1920 def pushkey(self, namespace, key, old, new):
1928 def pushkey(self, namespace, key, old, new):
1921 return pushkey.push(self, namespace, key, old, new)
1929 return pushkey.push(self, namespace, key, old, new)
1922
1930
1923 def listkeys(self, namespace):
1931 def listkeys(self, namespace):
1924 return pushkey.list(self, namespace)
1932 return pushkey.list(self, namespace)
1925
1933
1926 # used to avoid circular references so destructors work
1934 # used to avoid circular references so destructors work
1927 def aftertrans(files):
1935 def aftertrans(files):
1928 renamefiles = [tuple(t) for t in files]
1936 renamefiles = [tuple(t) for t in files]
1929 def a():
1937 def a():
1930 for src, dest in renamefiles:
1938 for src, dest in renamefiles:
1931 util.rename(src, dest)
1939 util.rename(src, dest)
1932 return a
1940 return a
1933
1941
1934 def instance(ui, path, create):
1942 def instance(ui, path, create):
1935 return localrepository(ui, util.drop_scheme('file', path), create)
1943 return localrepository(ui, util.drop_scheme('file', path), create)
1936
1944
1937 def islocal(path):
1945 def islocal(path):
1938 return True
1946 return True
General Comments 0
You need to be logged in to leave comments. Login now