##// END OF EJS Templates
make transactions work on non-refcounted python implementations
Ronny Pfannschmidt -
r11230:5116a077 default
parent child Browse files
Show More
@@ -1,2964 +1,2963 b''
1 # mq.py - patch queues for mercurial
1 # mq.py - patch queues for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''manage a stack of patches
8 '''manage a stack of patches
9
9
10 This extension lets you work with a stack of patches in a Mercurial
10 This extension lets you work with a stack of patches in a Mercurial
11 repository. It manages two stacks of patches - all known patches, and
11 repository. It manages two stacks of patches - all known patches, and
12 applied patches (subset of known patches).
12 applied patches (subset of known patches).
13
13
14 Known patches are represented as patch files in the .hg/patches
14 Known patches are represented as patch files in the .hg/patches
15 directory. Applied patches are both patch files and changesets.
15 directory. Applied patches are both patch files and changesets.
16
16
17 Common tasks (use :hg:`help command` for more details)::
17 Common tasks (use :hg:`help command` for more details)::
18
18
19 create new patch qnew
19 create new patch qnew
20 import existing patch qimport
20 import existing patch qimport
21
21
22 print patch series qseries
22 print patch series qseries
23 print applied patches qapplied
23 print applied patches qapplied
24
24
25 add known patch to applied stack qpush
25 add known patch to applied stack qpush
26 remove patch from applied stack qpop
26 remove patch from applied stack qpop
27 refresh contents of top applied patch qrefresh
27 refresh contents of top applied patch qrefresh
28
28
29 By default, mq will automatically use git patches when required to
29 By default, mq will automatically use git patches when required to
30 avoid losing file mode changes, copy records, binary files or empty
30 avoid losing file mode changes, copy records, binary files or empty
31 files creations or deletions. This behaviour can be configured with::
31 files creations or deletions. This behaviour can be configured with::
32
32
33 [mq]
33 [mq]
34 git = auto/keep/yes/no
34 git = auto/keep/yes/no
35
35
36 If set to 'keep', mq will obey the [diff] section configuration while
36 If set to 'keep', mq will obey the [diff] section configuration while
37 preserving existing git patches upon qrefresh. If set to 'yes' or
37 preserving existing git patches upon qrefresh. If set to 'yes' or
38 'no', mq will override the [diff] section and always generate git or
38 'no', mq will override the [diff] section and always generate git or
39 regular patches, possibly losing data in the second case.
39 regular patches, possibly losing data in the second case.
40 '''
40 '''
41
41
42 from mercurial.i18n import _
42 from mercurial.i18n import _
43 from mercurial.node import bin, hex, short, nullid, nullrev
43 from mercurial.node import bin, hex, short, nullid, nullrev
44 from mercurial.lock import release
44 from mercurial.lock import release
45 from mercurial import commands, cmdutil, hg, patch, util
45 from mercurial import commands, cmdutil, hg, patch, util
46 from mercurial import repair, extensions, url, error
46 from mercurial import repair, extensions, url, error
47 import os, sys, re, errno
47 import os, sys, re, errno
48
48
49 commands.norepo += " qclone"
49 commands.norepo += " qclone"
50
50
51 # Patch names looks like unix-file names.
51 # Patch names looks like unix-file names.
52 # They must be joinable with queue directory and result in the patch path.
52 # They must be joinable with queue directory and result in the patch path.
53 normname = util.normpath
53 normname = util.normpath
54
54
55 class statusentry(object):
55 class statusentry(object):
56 def __init__(self, node, name):
56 def __init__(self, node, name):
57 self.node, self.name = node, name
57 self.node, self.name = node, name
58
58
59 def __str__(self):
59 def __str__(self):
60 return hex(self.node) + ':' + self.name
60 return hex(self.node) + ':' + self.name
61
61
62 class patchheader(object):
62 class patchheader(object):
63 def __init__(self, pf, plainmode=False):
63 def __init__(self, pf, plainmode=False):
64 def eatdiff(lines):
64 def eatdiff(lines):
65 while lines:
65 while lines:
66 l = lines[-1]
66 l = lines[-1]
67 if (l.startswith("diff -") or
67 if (l.startswith("diff -") or
68 l.startswith("Index:") or
68 l.startswith("Index:") or
69 l.startswith("===========")):
69 l.startswith("===========")):
70 del lines[-1]
70 del lines[-1]
71 else:
71 else:
72 break
72 break
73 def eatempty(lines):
73 def eatempty(lines):
74 while lines:
74 while lines:
75 if not lines[-1].strip():
75 if not lines[-1].strip():
76 del lines[-1]
76 del lines[-1]
77 else:
77 else:
78 break
78 break
79
79
80 message = []
80 message = []
81 comments = []
81 comments = []
82 user = None
82 user = None
83 date = None
83 date = None
84 parent = None
84 parent = None
85 format = None
85 format = None
86 subject = None
86 subject = None
87 diffstart = 0
87 diffstart = 0
88
88
89 for line in file(pf):
89 for line in file(pf):
90 line = line.rstrip()
90 line = line.rstrip()
91 if (line.startswith('diff --git')
91 if (line.startswith('diff --git')
92 or (diffstart and line.startswith('+++ '))):
92 or (diffstart and line.startswith('+++ '))):
93 diffstart = 2
93 diffstart = 2
94 break
94 break
95 diffstart = 0 # reset
95 diffstart = 0 # reset
96 if line.startswith("--- "):
96 if line.startswith("--- "):
97 diffstart = 1
97 diffstart = 1
98 continue
98 continue
99 elif format == "hgpatch":
99 elif format == "hgpatch":
100 # parse values when importing the result of an hg export
100 # parse values when importing the result of an hg export
101 if line.startswith("# User "):
101 if line.startswith("# User "):
102 user = line[7:]
102 user = line[7:]
103 elif line.startswith("# Date "):
103 elif line.startswith("# Date "):
104 date = line[7:]
104 date = line[7:]
105 elif line.startswith("# Parent "):
105 elif line.startswith("# Parent "):
106 parent = line[9:]
106 parent = line[9:]
107 elif not line.startswith("# ") and line:
107 elif not line.startswith("# ") and line:
108 message.append(line)
108 message.append(line)
109 format = None
109 format = None
110 elif line == '# HG changeset patch':
110 elif line == '# HG changeset patch':
111 message = []
111 message = []
112 format = "hgpatch"
112 format = "hgpatch"
113 elif (format != "tagdone" and (line.startswith("Subject: ") or
113 elif (format != "tagdone" and (line.startswith("Subject: ") or
114 line.startswith("subject: "))):
114 line.startswith("subject: "))):
115 subject = line[9:]
115 subject = line[9:]
116 format = "tag"
116 format = "tag"
117 elif (format != "tagdone" and (line.startswith("From: ") or
117 elif (format != "tagdone" and (line.startswith("From: ") or
118 line.startswith("from: "))):
118 line.startswith("from: "))):
119 user = line[6:]
119 user = line[6:]
120 format = "tag"
120 format = "tag"
121 elif (format != "tagdone" and (line.startswith("Date: ") or
121 elif (format != "tagdone" and (line.startswith("Date: ") or
122 line.startswith("date: "))):
122 line.startswith("date: "))):
123 date = line[6:]
123 date = line[6:]
124 format = "tag"
124 format = "tag"
125 elif format == "tag" and line == "":
125 elif format == "tag" and line == "":
126 # when looking for tags (subject: from: etc) they
126 # when looking for tags (subject: from: etc) they
127 # end once you find a blank line in the source
127 # end once you find a blank line in the source
128 format = "tagdone"
128 format = "tagdone"
129 elif message or line:
129 elif message or line:
130 message.append(line)
130 message.append(line)
131 comments.append(line)
131 comments.append(line)
132
132
133 eatdiff(message)
133 eatdiff(message)
134 eatdiff(comments)
134 eatdiff(comments)
135 eatempty(message)
135 eatempty(message)
136 eatempty(comments)
136 eatempty(comments)
137
137
138 # make sure message isn't empty
138 # make sure message isn't empty
139 if format and format.startswith("tag") and subject:
139 if format and format.startswith("tag") and subject:
140 message.insert(0, "")
140 message.insert(0, "")
141 message.insert(0, subject)
141 message.insert(0, subject)
142
142
143 self.message = message
143 self.message = message
144 self.comments = comments
144 self.comments = comments
145 self.user = user
145 self.user = user
146 self.date = date
146 self.date = date
147 self.parent = parent
147 self.parent = parent
148 self.haspatch = diffstart > 1
148 self.haspatch = diffstart > 1
149 self.plainmode = plainmode
149 self.plainmode = plainmode
150
150
151 def setuser(self, user):
151 def setuser(self, user):
152 if not self.updateheader(['From: ', '# User '], user):
152 if not self.updateheader(['From: ', '# User '], user):
153 try:
153 try:
154 patchheaderat = self.comments.index('# HG changeset patch')
154 patchheaderat = self.comments.index('# HG changeset patch')
155 self.comments.insert(patchheaderat + 1, '# User ' + user)
155 self.comments.insert(patchheaderat + 1, '# User ' + user)
156 except ValueError:
156 except ValueError:
157 if self.plainmode or self._hasheader(['Date: ']):
157 if self.plainmode or self._hasheader(['Date: ']):
158 self.comments = ['From: ' + user] + self.comments
158 self.comments = ['From: ' + user] + self.comments
159 else:
159 else:
160 tmp = ['# HG changeset patch', '# User ' + user, '']
160 tmp = ['# HG changeset patch', '# User ' + user, '']
161 self.comments = tmp + self.comments
161 self.comments = tmp + self.comments
162 self.user = user
162 self.user = user
163
163
164 def setdate(self, date):
164 def setdate(self, date):
165 if not self.updateheader(['Date: ', '# Date '], date):
165 if not self.updateheader(['Date: ', '# Date '], date):
166 try:
166 try:
167 patchheaderat = self.comments.index('# HG changeset patch')
167 patchheaderat = self.comments.index('# HG changeset patch')
168 self.comments.insert(patchheaderat + 1, '# Date ' + date)
168 self.comments.insert(patchheaderat + 1, '# Date ' + date)
169 except ValueError:
169 except ValueError:
170 if self.plainmode or self._hasheader(['From: ']):
170 if self.plainmode or self._hasheader(['From: ']):
171 self.comments = ['Date: ' + date] + self.comments
171 self.comments = ['Date: ' + date] + self.comments
172 else:
172 else:
173 tmp = ['# HG changeset patch', '# Date ' + date, '']
173 tmp = ['# HG changeset patch', '# Date ' + date, '']
174 self.comments = tmp + self.comments
174 self.comments = tmp + self.comments
175 self.date = date
175 self.date = date
176
176
177 def setparent(self, parent):
177 def setparent(self, parent):
178 if not self.updateheader(['# Parent '], parent):
178 if not self.updateheader(['# Parent '], parent):
179 try:
179 try:
180 patchheaderat = self.comments.index('# HG changeset patch')
180 patchheaderat = self.comments.index('# HG changeset patch')
181 self.comments.insert(patchheaderat + 1, '# Parent ' + parent)
181 self.comments.insert(patchheaderat + 1, '# Parent ' + parent)
182 except ValueError:
182 except ValueError:
183 pass
183 pass
184 self.parent = parent
184 self.parent = parent
185
185
186 def setmessage(self, message):
186 def setmessage(self, message):
187 if self.comments:
187 if self.comments:
188 self._delmsg()
188 self._delmsg()
189 self.message = [message]
189 self.message = [message]
190 self.comments += self.message
190 self.comments += self.message
191
191
192 def updateheader(self, prefixes, new):
192 def updateheader(self, prefixes, new):
193 '''Update all references to a field in the patch header.
193 '''Update all references to a field in the patch header.
194 Return whether the field is present.'''
194 Return whether the field is present.'''
195 res = False
195 res = False
196 for prefix in prefixes:
196 for prefix in prefixes:
197 for i in xrange(len(self.comments)):
197 for i in xrange(len(self.comments)):
198 if self.comments[i].startswith(prefix):
198 if self.comments[i].startswith(prefix):
199 self.comments[i] = prefix + new
199 self.comments[i] = prefix + new
200 res = True
200 res = True
201 break
201 break
202 return res
202 return res
203
203
204 def _hasheader(self, prefixes):
204 def _hasheader(self, prefixes):
205 '''Check if a header starts with any of the given prefixes.'''
205 '''Check if a header starts with any of the given prefixes.'''
206 for prefix in prefixes:
206 for prefix in prefixes:
207 for comment in self.comments:
207 for comment in self.comments:
208 if comment.startswith(prefix):
208 if comment.startswith(prefix):
209 return True
209 return True
210 return False
210 return False
211
211
212 def __str__(self):
212 def __str__(self):
213 if not self.comments:
213 if not self.comments:
214 return ''
214 return ''
215 return '\n'.join(self.comments) + '\n\n'
215 return '\n'.join(self.comments) + '\n\n'
216
216
217 def _delmsg(self):
217 def _delmsg(self):
218 '''Remove existing message, keeping the rest of the comments fields.
218 '''Remove existing message, keeping the rest of the comments fields.
219 If comments contains 'subject: ', message will prepend
219 If comments contains 'subject: ', message will prepend
220 the field and a blank line.'''
220 the field and a blank line.'''
221 if self.message:
221 if self.message:
222 subj = 'subject: ' + self.message[0].lower()
222 subj = 'subject: ' + self.message[0].lower()
223 for i in xrange(len(self.comments)):
223 for i in xrange(len(self.comments)):
224 if subj == self.comments[i].lower():
224 if subj == self.comments[i].lower():
225 del self.comments[i]
225 del self.comments[i]
226 self.message = self.message[2:]
226 self.message = self.message[2:]
227 break
227 break
228 ci = 0
228 ci = 0
229 for mi in self.message:
229 for mi in self.message:
230 while mi != self.comments[ci]:
230 while mi != self.comments[ci]:
231 ci += 1
231 ci += 1
232 del self.comments[ci]
232 del self.comments[ci]
233
233
234 class queue(object):
234 class queue(object):
235 def __init__(self, ui, path, patchdir=None):
235 def __init__(self, ui, path, patchdir=None):
236 self.basepath = path
236 self.basepath = path
237 try:
237 try:
238 fh = open(os.path.join(path, '.queue'))
238 fh = open(os.path.join(path, '.queue'))
239 curpath = os.path.join(path, fh.read().rstrip())
239 curpath = os.path.join(path, fh.read().rstrip())
240 except IOError:
240 except IOError:
241 curpath = os.path.join(path, 'patches')
241 curpath = os.path.join(path, 'patches')
242 self.path = patchdir or curpath
242 self.path = patchdir or curpath
243 self.opener = util.opener(self.path)
243 self.opener = util.opener(self.path)
244 self.ui = ui
244 self.ui = ui
245 self.applied_dirty = 0
245 self.applied_dirty = 0
246 self.series_dirty = 0
246 self.series_dirty = 0
247 self.series_path = "series"
247 self.series_path = "series"
248 self.status_path = "status"
248 self.status_path = "status"
249 self.guards_path = "guards"
249 self.guards_path = "guards"
250 self.active_guards = None
250 self.active_guards = None
251 self.guards_dirty = False
251 self.guards_dirty = False
252 # Handle mq.git as a bool with extended values
252 # Handle mq.git as a bool with extended values
253 try:
253 try:
254 gitmode = ui.configbool('mq', 'git', None)
254 gitmode = ui.configbool('mq', 'git', None)
255 if gitmode is None:
255 if gitmode is None:
256 raise error.ConfigError()
256 raise error.ConfigError()
257 self.gitmode = gitmode and 'yes' or 'no'
257 self.gitmode = gitmode and 'yes' or 'no'
258 except error.ConfigError:
258 except error.ConfigError:
259 self.gitmode = ui.config('mq', 'git', 'auto').lower()
259 self.gitmode = ui.config('mq', 'git', 'auto').lower()
260 self.plainmode = ui.configbool('mq', 'plain', False)
260 self.plainmode = ui.configbool('mq', 'plain', False)
261
261
262 @util.propertycache
262 @util.propertycache
263 def applied(self):
263 def applied(self):
264 if os.path.exists(self.join(self.status_path)):
264 if os.path.exists(self.join(self.status_path)):
265 def parse(l):
265 def parse(l):
266 n, name = l.split(':', 1)
266 n, name = l.split(':', 1)
267 return statusentry(bin(n), name)
267 return statusentry(bin(n), name)
268 lines = self.opener(self.status_path).read().splitlines()
268 lines = self.opener(self.status_path).read().splitlines()
269 return [parse(l) for l in lines]
269 return [parse(l) for l in lines]
270 return []
270 return []
271
271
272 @util.propertycache
272 @util.propertycache
273 def full_series(self):
273 def full_series(self):
274 if os.path.exists(self.join(self.series_path)):
274 if os.path.exists(self.join(self.series_path)):
275 return self.opener(self.series_path).read().splitlines()
275 return self.opener(self.series_path).read().splitlines()
276 return []
276 return []
277
277
278 @util.propertycache
278 @util.propertycache
279 def series(self):
279 def series(self):
280 self.parse_series()
280 self.parse_series()
281 return self.series
281 return self.series
282
282
283 @util.propertycache
283 @util.propertycache
284 def series_guards(self):
284 def series_guards(self):
285 self.parse_series()
285 self.parse_series()
286 return self.series_guards
286 return self.series_guards
287
287
288 def invalidate(self):
288 def invalidate(self):
289 for a in 'applied full_series series series_guards'.split():
289 for a in 'applied full_series series series_guards'.split():
290 if a in self.__dict__:
290 if a in self.__dict__:
291 delattr(self, a)
291 delattr(self, a)
292 self.applied_dirty = 0
292 self.applied_dirty = 0
293 self.series_dirty = 0
293 self.series_dirty = 0
294 self.guards_dirty = False
294 self.guards_dirty = False
295 self.active_guards = None
295 self.active_guards = None
296
296
297 def diffopts(self, opts={}, patchfn=None):
297 def diffopts(self, opts={}, patchfn=None):
298 diffopts = patch.diffopts(self.ui, opts)
298 diffopts = patch.diffopts(self.ui, opts)
299 if self.gitmode == 'auto':
299 if self.gitmode == 'auto':
300 diffopts.upgrade = True
300 diffopts.upgrade = True
301 elif self.gitmode == 'keep':
301 elif self.gitmode == 'keep':
302 pass
302 pass
303 elif self.gitmode in ('yes', 'no'):
303 elif self.gitmode in ('yes', 'no'):
304 diffopts.git = self.gitmode == 'yes'
304 diffopts.git = self.gitmode == 'yes'
305 else:
305 else:
306 raise util.Abort(_('mq.git option can be auto/keep/yes/no'
306 raise util.Abort(_('mq.git option can be auto/keep/yes/no'
307 ' got %s') % self.gitmode)
307 ' got %s') % self.gitmode)
308 if patchfn:
308 if patchfn:
309 diffopts = self.patchopts(diffopts, patchfn)
309 diffopts = self.patchopts(diffopts, patchfn)
310 return diffopts
310 return diffopts
311
311
312 def patchopts(self, diffopts, *patches):
312 def patchopts(self, diffopts, *patches):
313 """Return a copy of input diff options with git set to true if
313 """Return a copy of input diff options with git set to true if
314 referenced patch is a git patch and should be preserved as such.
314 referenced patch is a git patch and should be preserved as such.
315 """
315 """
316 diffopts = diffopts.copy()
316 diffopts = diffopts.copy()
317 if not diffopts.git and self.gitmode == 'keep':
317 if not diffopts.git and self.gitmode == 'keep':
318 for patchfn in patches:
318 for patchfn in patches:
319 patchf = self.opener(patchfn, 'r')
319 patchf = self.opener(patchfn, 'r')
320 # if the patch was a git patch, refresh it as a git patch
320 # if the patch was a git patch, refresh it as a git patch
321 for line in patchf:
321 for line in patchf:
322 if line.startswith('diff --git'):
322 if line.startswith('diff --git'):
323 diffopts.git = True
323 diffopts.git = True
324 break
324 break
325 patchf.close()
325 patchf.close()
326 return diffopts
326 return diffopts
327
327
328 def join(self, *p):
328 def join(self, *p):
329 return os.path.join(self.path, *p)
329 return os.path.join(self.path, *p)
330
330
331 def find_series(self, patch):
331 def find_series(self, patch):
332 def matchpatch(l):
332 def matchpatch(l):
333 l = l.split('#', 1)[0]
333 l = l.split('#', 1)[0]
334 return l.strip() == patch
334 return l.strip() == patch
335 for index, l in enumerate(self.full_series):
335 for index, l in enumerate(self.full_series):
336 if matchpatch(l):
336 if matchpatch(l):
337 return index
337 return index
338 return None
338 return None
339
339
340 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
340 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
341
341
342 def parse_series(self):
342 def parse_series(self):
343 self.series = []
343 self.series = []
344 self.series_guards = []
344 self.series_guards = []
345 for l in self.full_series:
345 for l in self.full_series:
346 h = l.find('#')
346 h = l.find('#')
347 if h == -1:
347 if h == -1:
348 patch = l
348 patch = l
349 comment = ''
349 comment = ''
350 elif h == 0:
350 elif h == 0:
351 continue
351 continue
352 else:
352 else:
353 patch = l[:h]
353 patch = l[:h]
354 comment = l[h:]
354 comment = l[h:]
355 patch = patch.strip()
355 patch = patch.strip()
356 if patch:
356 if patch:
357 if patch in self.series:
357 if patch in self.series:
358 raise util.Abort(_('%s appears more than once in %s') %
358 raise util.Abort(_('%s appears more than once in %s') %
359 (patch, self.join(self.series_path)))
359 (patch, self.join(self.series_path)))
360 self.series.append(patch)
360 self.series.append(patch)
361 self.series_guards.append(self.guard_re.findall(comment))
361 self.series_guards.append(self.guard_re.findall(comment))
362
362
363 def check_guard(self, guard):
363 def check_guard(self, guard):
364 if not guard:
364 if not guard:
365 return _('guard cannot be an empty string')
365 return _('guard cannot be an empty string')
366 bad_chars = '# \t\r\n\f'
366 bad_chars = '# \t\r\n\f'
367 first = guard[0]
367 first = guard[0]
368 if first in '-+':
368 if first in '-+':
369 return (_('guard %r starts with invalid character: %r') %
369 return (_('guard %r starts with invalid character: %r') %
370 (guard, first))
370 (guard, first))
371 for c in bad_chars:
371 for c in bad_chars:
372 if c in guard:
372 if c in guard:
373 return _('invalid character in guard %r: %r') % (guard, c)
373 return _('invalid character in guard %r: %r') % (guard, c)
374
374
375 def set_active(self, guards):
375 def set_active(self, guards):
376 for guard in guards:
376 for guard in guards:
377 bad = self.check_guard(guard)
377 bad = self.check_guard(guard)
378 if bad:
378 if bad:
379 raise util.Abort(bad)
379 raise util.Abort(bad)
380 guards = sorted(set(guards))
380 guards = sorted(set(guards))
381 self.ui.debug('active guards: %s\n' % ' '.join(guards))
381 self.ui.debug('active guards: %s\n' % ' '.join(guards))
382 self.active_guards = guards
382 self.active_guards = guards
383 self.guards_dirty = True
383 self.guards_dirty = True
384
384
385 def active(self):
385 def active(self):
386 if self.active_guards is None:
386 if self.active_guards is None:
387 self.active_guards = []
387 self.active_guards = []
388 try:
388 try:
389 guards = self.opener(self.guards_path).read().split()
389 guards = self.opener(self.guards_path).read().split()
390 except IOError, err:
390 except IOError, err:
391 if err.errno != errno.ENOENT:
391 if err.errno != errno.ENOENT:
392 raise
392 raise
393 guards = []
393 guards = []
394 for i, guard in enumerate(guards):
394 for i, guard in enumerate(guards):
395 bad = self.check_guard(guard)
395 bad = self.check_guard(guard)
396 if bad:
396 if bad:
397 self.ui.warn('%s:%d: %s\n' %
397 self.ui.warn('%s:%d: %s\n' %
398 (self.join(self.guards_path), i + 1, bad))
398 (self.join(self.guards_path), i + 1, bad))
399 else:
399 else:
400 self.active_guards.append(guard)
400 self.active_guards.append(guard)
401 return self.active_guards
401 return self.active_guards
402
402
403 def set_guards(self, idx, guards):
403 def set_guards(self, idx, guards):
404 for g in guards:
404 for g in guards:
405 if len(g) < 2:
405 if len(g) < 2:
406 raise util.Abort(_('guard %r too short') % g)
406 raise util.Abort(_('guard %r too short') % g)
407 if g[0] not in '-+':
407 if g[0] not in '-+':
408 raise util.Abort(_('guard %r starts with invalid char') % g)
408 raise util.Abort(_('guard %r starts with invalid char') % g)
409 bad = self.check_guard(g[1:])
409 bad = self.check_guard(g[1:])
410 if bad:
410 if bad:
411 raise util.Abort(bad)
411 raise util.Abort(bad)
412 drop = self.guard_re.sub('', self.full_series[idx])
412 drop = self.guard_re.sub('', self.full_series[idx])
413 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
413 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
414 self.parse_series()
414 self.parse_series()
415 self.series_dirty = True
415 self.series_dirty = True
416
416
417 def pushable(self, idx):
417 def pushable(self, idx):
418 if isinstance(idx, str):
418 if isinstance(idx, str):
419 idx = self.series.index(idx)
419 idx = self.series.index(idx)
420 patchguards = self.series_guards[idx]
420 patchguards = self.series_guards[idx]
421 if not patchguards:
421 if not patchguards:
422 return True, None
422 return True, None
423 guards = self.active()
423 guards = self.active()
424 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
424 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
425 if exactneg:
425 if exactneg:
426 return False, exactneg[0]
426 return False, exactneg[0]
427 pos = [g for g in patchguards if g[0] == '+']
427 pos = [g for g in patchguards if g[0] == '+']
428 exactpos = [g for g in pos if g[1:] in guards]
428 exactpos = [g for g in pos if g[1:] in guards]
429 if pos:
429 if pos:
430 if exactpos:
430 if exactpos:
431 return True, exactpos[0]
431 return True, exactpos[0]
432 return False, pos
432 return False, pos
433 return True, ''
433 return True, ''
434
434
435 def explain_pushable(self, idx, all_patches=False):
435 def explain_pushable(self, idx, all_patches=False):
436 write = all_patches and self.ui.write or self.ui.warn
436 write = all_patches and self.ui.write or self.ui.warn
437 if all_patches or self.ui.verbose:
437 if all_patches or self.ui.verbose:
438 if isinstance(idx, str):
438 if isinstance(idx, str):
439 idx = self.series.index(idx)
439 idx = self.series.index(idx)
440 pushable, why = self.pushable(idx)
440 pushable, why = self.pushable(idx)
441 if all_patches and pushable:
441 if all_patches and pushable:
442 if why is None:
442 if why is None:
443 write(_('allowing %s - no guards in effect\n') %
443 write(_('allowing %s - no guards in effect\n') %
444 self.series[idx])
444 self.series[idx])
445 else:
445 else:
446 if not why:
446 if not why:
447 write(_('allowing %s - no matching negative guards\n') %
447 write(_('allowing %s - no matching negative guards\n') %
448 self.series[idx])
448 self.series[idx])
449 else:
449 else:
450 write(_('allowing %s - guarded by %r\n') %
450 write(_('allowing %s - guarded by %r\n') %
451 (self.series[idx], why))
451 (self.series[idx], why))
452 if not pushable:
452 if not pushable:
453 if why:
453 if why:
454 write(_('skipping %s - guarded by %r\n') %
454 write(_('skipping %s - guarded by %r\n') %
455 (self.series[idx], why))
455 (self.series[idx], why))
456 else:
456 else:
457 write(_('skipping %s - no matching guards\n') %
457 write(_('skipping %s - no matching guards\n') %
458 self.series[idx])
458 self.series[idx])
459
459
460 def save_dirty(self):
460 def save_dirty(self):
461 def write_list(items, path):
461 def write_list(items, path):
462 fp = self.opener(path, 'w')
462 fp = self.opener(path, 'w')
463 for i in items:
463 for i in items:
464 fp.write("%s\n" % i)
464 fp.write("%s\n" % i)
465 fp.close()
465 fp.close()
466 if self.applied_dirty:
466 if self.applied_dirty:
467 write_list(map(str, self.applied), self.status_path)
467 write_list(map(str, self.applied), self.status_path)
468 if self.series_dirty:
468 if self.series_dirty:
469 write_list(self.full_series, self.series_path)
469 write_list(self.full_series, self.series_path)
470 if self.guards_dirty:
470 if self.guards_dirty:
471 write_list(self.active_guards, self.guards_path)
471 write_list(self.active_guards, self.guards_path)
472
472
473 def removeundo(self, repo):
473 def removeundo(self, repo):
474 undo = repo.sjoin('undo')
474 undo = repo.sjoin('undo')
475 if not os.path.exists(undo):
475 if not os.path.exists(undo):
476 return
476 return
477 try:
477 try:
478 os.unlink(undo)
478 os.unlink(undo)
479 except OSError, inst:
479 except OSError, inst:
480 self.ui.warn(_('error removing undo: %s\n') % str(inst))
480 self.ui.warn(_('error removing undo: %s\n') % str(inst))
481
481
482 def printdiff(self, repo, diffopts, node1, node2=None, files=None,
482 def printdiff(self, repo, diffopts, node1, node2=None, files=None,
483 fp=None, changes=None, opts={}):
483 fp=None, changes=None, opts={}):
484 stat = opts.get('stat')
484 stat = opts.get('stat')
485 m = cmdutil.match(repo, files, opts)
485 m = cmdutil.match(repo, files, opts)
486 cmdutil.diffordiffstat(self.ui, repo, diffopts, node1, node2, m,
486 cmdutil.diffordiffstat(self.ui, repo, diffopts, node1, node2, m,
487 changes, stat, fp)
487 changes, stat, fp)
488
488
489 def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
489 def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
490 # first try just applying the patch
490 # first try just applying the patch
491 (err, n) = self.apply(repo, [patch], update_status=False,
491 (err, n) = self.apply(repo, [patch], update_status=False,
492 strict=True, merge=rev)
492 strict=True, merge=rev)
493
493
494 if err == 0:
494 if err == 0:
495 return (err, n)
495 return (err, n)
496
496
497 if n is None:
497 if n is None:
498 raise util.Abort(_("apply failed for patch %s") % patch)
498 raise util.Abort(_("apply failed for patch %s") % patch)
499
499
500 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
500 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
501
501
502 # apply failed, strip away that rev and merge.
502 # apply failed, strip away that rev and merge.
503 hg.clean(repo, head)
503 hg.clean(repo, head)
504 self.strip(repo, n, update=False, backup='strip')
504 self.strip(repo, n, update=False, backup='strip')
505
505
506 ctx = repo[rev]
506 ctx = repo[rev]
507 ret = hg.merge(repo, rev)
507 ret = hg.merge(repo, rev)
508 if ret:
508 if ret:
509 raise util.Abort(_("update returned %d") % ret)
509 raise util.Abort(_("update returned %d") % ret)
510 n = repo.commit(ctx.description(), ctx.user(), force=True)
510 n = repo.commit(ctx.description(), ctx.user(), force=True)
511 if n is None:
511 if n is None:
512 raise util.Abort(_("repo commit failed"))
512 raise util.Abort(_("repo commit failed"))
513 try:
513 try:
514 ph = patchheader(mergeq.join(patch), self.plainmode)
514 ph = patchheader(mergeq.join(patch), self.plainmode)
515 except:
515 except:
516 raise util.Abort(_("unable to read %s") % patch)
516 raise util.Abort(_("unable to read %s") % patch)
517
517
518 diffopts = self.patchopts(diffopts, patch)
518 diffopts = self.patchopts(diffopts, patch)
519 patchf = self.opener(patch, "w")
519 patchf = self.opener(patch, "w")
520 comments = str(ph)
520 comments = str(ph)
521 if comments:
521 if comments:
522 patchf.write(comments)
522 patchf.write(comments)
523 self.printdiff(repo, diffopts, head, n, fp=patchf)
523 self.printdiff(repo, diffopts, head, n, fp=patchf)
524 patchf.close()
524 patchf.close()
525 self.removeundo(repo)
525 self.removeundo(repo)
526 return (0, n)
526 return (0, n)
527
527
528 def qparents(self, repo, rev=None):
528 def qparents(self, repo, rev=None):
529 if rev is None:
529 if rev is None:
530 (p1, p2) = repo.dirstate.parents()
530 (p1, p2) = repo.dirstate.parents()
531 if p2 == nullid:
531 if p2 == nullid:
532 return p1
532 return p1
533 if not self.applied:
533 if not self.applied:
534 return None
534 return None
535 return self.applied[-1].node
535 return self.applied[-1].node
536 p1, p2 = repo.changelog.parents(rev)
536 p1, p2 = repo.changelog.parents(rev)
537 if p2 != nullid and p2 in [x.node for x in self.applied]:
537 if p2 != nullid and p2 in [x.node for x in self.applied]:
538 return p2
538 return p2
539 return p1
539 return p1
540
540
541 def mergepatch(self, repo, mergeq, series, diffopts):
541 def mergepatch(self, repo, mergeq, series, diffopts):
542 if not self.applied:
542 if not self.applied:
543 # each of the patches merged in will have two parents. This
543 # each of the patches merged in will have two parents. This
544 # can confuse the qrefresh, qdiff, and strip code because it
544 # can confuse the qrefresh, qdiff, and strip code because it
545 # needs to know which parent is actually in the patch queue.
545 # needs to know which parent is actually in the patch queue.
546 # so, we insert a merge marker with only one parent. This way
546 # so, we insert a merge marker with only one parent. This way
547 # the first patch in the queue is never a merge patch
547 # the first patch in the queue is never a merge patch
548 #
548 #
549 pname = ".hg.patches.merge.marker"
549 pname = ".hg.patches.merge.marker"
550 n = repo.commit('[mq]: merge marker', force=True)
550 n = repo.commit('[mq]: merge marker', force=True)
551 self.removeundo(repo)
551 self.removeundo(repo)
552 self.applied.append(statusentry(n, pname))
552 self.applied.append(statusentry(n, pname))
553 self.applied_dirty = 1
553 self.applied_dirty = 1
554
554
555 head = self.qparents(repo)
555 head = self.qparents(repo)
556
556
557 for patch in series:
557 for patch in series:
558 patch = mergeq.lookup(patch, strict=True)
558 patch = mergeq.lookup(patch, strict=True)
559 if not patch:
559 if not patch:
560 self.ui.warn(_("patch %s does not exist\n") % patch)
560 self.ui.warn(_("patch %s does not exist\n") % patch)
561 return (1, None)
561 return (1, None)
562 pushable, reason = self.pushable(patch)
562 pushable, reason = self.pushable(patch)
563 if not pushable:
563 if not pushable:
564 self.explain_pushable(patch, all_patches=True)
564 self.explain_pushable(patch, all_patches=True)
565 continue
565 continue
566 info = mergeq.isapplied(patch)
566 info = mergeq.isapplied(patch)
567 if not info:
567 if not info:
568 self.ui.warn(_("patch %s is not applied\n") % patch)
568 self.ui.warn(_("patch %s is not applied\n") % patch)
569 return (1, None)
569 return (1, None)
570 rev = info[1]
570 rev = info[1]
571 err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
571 err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
572 if head:
572 if head:
573 self.applied.append(statusentry(head, patch))
573 self.applied.append(statusentry(head, patch))
574 self.applied_dirty = 1
574 self.applied_dirty = 1
575 if err:
575 if err:
576 return (err, head)
576 return (err, head)
577 self.save_dirty()
577 self.save_dirty()
578 return (0, head)
578 return (0, head)
579
579
580 def patch(self, repo, patchfile):
580 def patch(self, repo, patchfile):
581 '''Apply patchfile to the working directory.
581 '''Apply patchfile to the working directory.
582 patchfile: name of patch file'''
582 patchfile: name of patch file'''
583 files = {}
583 files = {}
584 try:
584 try:
585 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
585 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
586 files=files, eolmode=None)
586 files=files, eolmode=None)
587 except Exception, inst:
587 except Exception, inst:
588 self.ui.note(str(inst) + '\n')
588 self.ui.note(str(inst) + '\n')
589 if not self.ui.verbose:
589 if not self.ui.verbose:
590 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
590 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
591 return (False, files, False)
591 return (False, files, False)
592
592
593 return (True, files, fuzz)
593 return (True, files, fuzz)
594
594
595 def apply(self, repo, series, list=False, update_status=True,
595 def apply(self, repo, series, list=False, update_status=True,
596 strict=False, patchdir=None, merge=None, all_files=None):
596 strict=False, patchdir=None, merge=None, all_files=None):
597 wlock = lock = tr = None
597 wlock = lock = tr = None
598 try:
598 try:
599 wlock = repo.wlock()
599 wlock = repo.wlock()
600 lock = repo.lock()
600 lock = repo.lock()
601 tr = repo.transaction("qpush")
601 tr = repo.transaction("qpush")
602 try:
602 try:
603 ret = self._apply(repo, series, list, update_status,
603 ret = self._apply(repo, series, list, update_status,
604 strict, patchdir, merge, all_files=all_files)
604 strict, patchdir, merge, all_files=all_files)
605 tr.close()
605 tr.close()
606 self.save_dirty()
606 self.save_dirty()
607 return ret
607 return ret
608 except:
608 except:
609 try:
609 try:
610 tr.abort()
610 tr.abort()
611 finally:
611 finally:
612 repo.invalidate()
612 repo.invalidate()
613 repo.dirstate.invalidate()
613 repo.dirstate.invalidate()
614 raise
614 raise
615 finally:
615 finally:
616 del tr
616 release(tr, lock, wlock)
617 release(lock, wlock)
618 self.removeundo(repo)
617 self.removeundo(repo)
619
618
620 def _apply(self, repo, series, list=False, update_status=True,
619 def _apply(self, repo, series, list=False, update_status=True,
621 strict=False, patchdir=None, merge=None, all_files=None):
620 strict=False, patchdir=None, merge=None, all_files=None):
622 '''returns (error, hash)
621 '''returns (error, hash)
623 error = 1 for unable to read, 2 for patch failed, 3 for patch fuzz'''
622 error = 1 for unable to read, 2 for patch failed, 3 for patch fuzz'''
624 # TODO unify with commands.py
623 # TODO unify with commands.py
625 if not patchdir:
624 if not patchdir:
626 patchdir = self.path
625 patchdir = self.path
627 err = 0
626 err = 0
628 n = None
627 n = None
629 for patchname in series:
628 for patchname in series:
630 pushable, reason = self.pushable(patchname)
629 pushable, reason = self.pushable(patchname)
631 if not pushable:
630 if not pushable:
632 self.explain_pushable(patchname, all_patches=True)
631 self.explain_pushable(patchname, all_patches=True)
633 continue
632 continue
634 self.ui.status(_("applying %s\n") % patchname)
633 self.ui.status(_("applying %s\n") % patchname)
635 pf = os.path.join(patchdir, patchname)
634 pf = os.path.join(patchdir, patchname)
636
635
637 try:
636 try:
638 ph = patchheader(self.join(patchname), self.plainmode)
637 ph = patchheader(self.join(patchname), self.plainmode)
639 except:
638 except:
640 self.ui.warn(_("unable to read %s\n") % patchname)
639 self.ui.warn(_("unable to read %s\n") % patchname)
641 err = 1
640 err = 1
642 break
641 break
643
642
644 message = ph.message
643 message = ph.message
645 if not message:
644 if not message:
646 message = "imported patch %s\n" % patchname
645 message = "imported patch %s\n" % patchname
647 else:
646 else:
648 if list:
647 if list:
649 message.append("\nimported patch %s" % patchname)
648 message.append("\nimported patch %s" % patchname)
650 message = '\n'.join(message)
649 message = '\n'.join(message)
651
650
652 if ph.haspatch:
651 if ph.haspatch:
653 (patcherr, files, fuzz) = self.patch(repo, pf)
652 (patcherr, files, fuzz) = self.patch(repo, pf)
654 if all_files is not None:
653 if all_files is not None:
655 all_files.update(files)
654 all_files.update(files)
656 patcherr = not patcherr
655 patcherr = not patcherr
657 else:
656 else:
658 self.ui.warn(_("patch %s is empty\n") % patchname)
657 self.ui.warn(_("patch %s is empty\n") % patchname)
659 patcherr, files, fuzz = 0, [], 0
658 patcherr, files, fuzz = 0, [], 0
660
659
661 if merge and files:
660 if merge and files:
662 # Mark as removed/merged and update dirstate parent info
661 # Mark as removed/merged and update dirstate parent info
663 removed = []
662 removed = []
664 merged = []
663 merged = []
665 for f in files:
664 for f in files:
666 if os.path.exists(repo.wjoin(f)):
665 if os.path.exists(repo.wjoin(f)):
667 merged.append(f)
666 merged.append(f)
668 else:
667 else:
669 removed.append(f)
668 removed.append(f)
670 for f in removed:
669 for f in removed:
671 repo.dirstate.remove(f)
670 repo.dirstate.remove(f)
672 for f in merged:
671 for f in merged:
673 repo.dirstate.merge(f)
672 repo.dirstate.merge(f)
674 p1, p2 = repo.dirstate.parents()
673 p1, p2 = repo.dirstate.parents()
675 repo.dirstate.setparents(p1, merge)
674 repo.dirstate.setparents(p1, merge)
676
675
677 files = patch.updatedir(self.ui, repo, files)
676 files = patch.updatedir(self.ui, repo, files)
678 match = cmdutil.matchfiles(repo, files or [])
677 match = cmdutil.matchfiles(repo, files or [])
679 n = repo.commit(message, ph.user, ph.date, match=match, force=True)
678 n = repo.commit(message, ph.user, ph.date, match=match, force=True)
680
679
681 if n is None:
680 if n is None:
682 raise util.Abort(_("repo commit failed"))
681 raise util.Abort(_("repo commit failed"))
683
682
684 if update_status:
683 if update_status:
685 self.applied.append(statusentry(n, patchname))
684 self.applied.append(statusentry(n, patchname))
686
685
687 if patcherr:
686 if patcherr:
688 self.ui.warn(_("patch failed, rejects left in working dir\n"))
687 self.ui.warn(_("patch failed, rejects left in working dir\n"))
689 err = 2
688 err = 2
690 break
689 break
691
690
692 if fuzz and strict:
691 if fuzz and strict:
693 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
692 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
694 err = 3
693 err = 3
695 break
694 break
696 return (err, n)
695 return (err, n)
697
696
698 def _cleanup(self, patches, numrevs, keep=False):
697 def _cleanup(self, patches, numrevs, keep=False):
699 if not keep:
698 if not keep:
700 r = self.qrepo()
699 r = self.qrepo()
701 if r:
700 if r:
702 r.remove(patches, True)
701 r.remove(patches, True)
703 else:
702 else:
704 for p in patches:
703 for p in patches:
705 os.unlink(self.join(p))
704 os.unlink(self.join(p))
706
705
707 if numrevs:
706 if numrevs:
708 del self.applied[:numrevs]
707 del self.applied[:numrevs]
709 self.applied_dirty = 1
708 self.applied_dirty = 1
710
709
711 for i in sorted([self.find_series(p) for p in patches], reverse=True):
710 for i in sorted([self.find_series(p) for p in patches], reverse=True):
712 del self.full_series[i]
711 del self.full_series[i]
713 self.parse_series()
712 self.parse_series()
714 self.series_dirty = 1
713 self.series_dirty = 1
715
714
716 def _revpatches(self, repo, revs):
715 def _revpatches(self, repo, revs):
717 firstrev = repo[self.applied[0].node].rev()
716 firstrev = repo[self.applied[0].node].rev()
718 patches = []
717 patches = []
719 for i, rev in enumerate(revs):
718 for i, rev in enumerate(revs):
720
719
721 if rev < firstrev:
720 if rev < firstrev:
722 raise util.Abort(_('revision %d is not managed') % rev)
721 raise util.Abort(_('revision %d is not managed') % rev)
723
722
724 ctx = repo[rev]
723 ctx = repo[rev]
725 base = self.applied[i].node
724 base = self.applied[i].node
726 if ctx.node() != base:
725 if ctx.node() != base:
727 msg = _('cannot delete revision %d above applied patches')
726 msg = _('cannot delete revision %d above applied patches')
728 raise util.Abort(msg % rev)
727 raise util.Abort(msg % rev)
729
728
730 patch = self.applied[i].name
729 patch = self.applied[i].name
731 for fmt in ('[mq]: %s', 'imported patch %s'):
730 for fmt in ('[mq]: %s', 'imported patch %s'):
732 if ctx.description() == fmt % patch:
731 if ctx.description() == fmt % patch:
733 msg = _('patch %s finalized without changeset message\n')
732 msg = _('patch %s finalized without changeset message\n')
734 repo.ui.status(msg % patch)
733 repo.ui.status(msg % patch)
735 break
734 break
736
735
737 patches.append(patch)
736 patches.append(patch)
738 return patches
737 return patches
739
738
740 def finish(self, repo, revs):
739 def finish(self, repo, revs):
741 patches = self._revpatches(repo, sorted(revs))
740 patches = self._revpatches(repo, sorted(revs))
742 self._cleanup(patches, len(patches))
741 self._cleanup(patches, len(patches))
743
742
744 def delete(self, repo, patches, opts):
743 def delete(self, repo, patches, opts):
745 if not patches and not opts.get('rev'):
744 if not patches and not opts.get('rev'):
746 raise util.Abort(_('qdelete requires at least one revision or '
745 raise util.Abort(_('qdelete requires at least one revision or '
747 'patch name'))
746 'patch name'))
748
747
749 for patch in patches:
748 for patch in patches:
750 patch = self.lookup(patch, strict=True)
749 patch = self.lookup(patch, strict=True)
751 info = self.isapplied(patch)
750 info = self.isapplied(patch)
752 if info:
751 if info:
753 raise util.Abort(_("cannot delete applied patch %s") % patch)
752 raise util.Abort(_("cannot delete applied patch %s") % patch)
754 if patch not in self.series:
753 if patch not in self.series:
755 raise util.Abort(_("patch %s not in series file") % patch)
754 raise util.Abort(_("patch %s not in series file") % patch)
756
755
757 patches = list(patches)
756 patches = list(patches)
758 numrevs = 0
757 numrevs = 0
759 if opts.get('rev'):
758 if opts.get('rev'):
760 if not self.applied:
759 if not self.applied:
761 raise util.Abort(_('no patches applied'))
760 raise util.Abort(_('no patches applied'))
762 revs = cmdutil.revrange(repo, opts['rev'])
761 revs = cmdutil.revrange(repo, opts['rev'])
763 if len(revs) > 1 and revs[0] > revs[1]:
762 if len(revs) > 1 and revs[0] > revs[1]:
764 revs.reverse()
763 revs.reverse()
765 revpatches = self._revpatches(repo, revs)
764 revpatches = self._revpatches(repo, revs)
766 patches += revpatches
765 patches += revpatches
767 numrevs = len(revpatches)
766 numrevs = len(revpatches)
768
767
769 self._cleanup(patches, numrevs, opts.get('keep'))
768 self._cleanup(patches, numrevs, opts.get('keep'))
770
769
771 def check_toppatch(self, repo):
770 def check_toppatch(self, repo):
772 if self.applied:
771 if self.applied:
773 top = self.applied[-1].node
772 top = self.applied[-1].node
774 patch = self.applied[-1].name
773 patch = self.applied[-1].name
775 pp = repo.dirstate.parents()
774 pp = repo.dirstate.parents()
776 if top not in pp:
775 if top not in pp:
777 raise util.Abort(_("working directory revision is not qtip"))
776 raise util.Abort(_("working directory revision is not qtip"))
778 return top, patch
777 return top, patch
779 return None, None
778 return None, None
780
779
781 def check_localchanges(self, repo, force=False, refresh=True):
780 def check_localchanges(self, repo, force=False, refresh=True):
782 m, a, r, d = repo.status()[:4]
781 m, a, r, d = repo.status()[:4]
783 if (m or a or r or d) and not force:
782 if (m or a or r or d) and not force:
784 if refresh:
783 if refresh:
785 raise util.Abort(_("local changes found, refresh first"))
784 raise util.Abort(_("local changes found, refresh first"))
786 else:
785 else:
787 raise util.Abort(_("local changes found"))
786 raise util.Abort(_("local changes found"))
788 return m, a, r, d
787 return m, a, r, d
789
788
790 _reserved = ('series', 'status', 'guards')
789 _reserved = ('series', 'status', 'guards')
791 def check_reserved_name(self, name):
790 def check_reserved_name(self, name):
792 if (name in self._reserved or name.startswith('.hg')
791 if (name in self._reserved or name.startswith('.hg')
793 or name.startswith('.mq') or '#' in name or ':' in name):
792 or name.startswith('.mq') or '#' in name or ':' in name):
794 raise util.Abort(_('"%s" cannot be used as the name of a patch')
793 raise util.Abort(_('"%s" cannot be used as the name of a patch')
795 % name)
794 % name)
796
795
797 def new(self, repo, patchfn, *pats, **opts):
796 def new(self, repo, patchfn, *pats, **opts):
798 """options:
797 """options:
799 msg: a string or a no-argument function returning a string
798 msg: a string or a no-argument function returning a string
800 """
799 """
801 msg = opts.get('msg')
800 msg = opts.get('msg')
802 user = opts.get('user')
801 user = opts.get('user')
803 date = opts.get('date')
802 date = opts.get('date')
804 if date:
803 if date:
805 date = util.parsedate(date)
804 date = util.parsedate(date)
806 diffopts = self.diffopts({'git': opts.get('git')})
805 diffopts = self.diffopts({'git': opts.get('git')})
807 self.check_reserved_name(patchfn)
806 self.check_reserved_name(patchfn)
808 if os.path.exists(self.join(patchfn)):
807 if os.path.exists(self.join(patchfn)):
809 raise util.Abort(_('patch "%s" already exists') % patchfn)
808 raise util.Abort(_('patch "%s" already exists') % patchfn)
810 if opts.get('include') or opts.get('exclude') or pats:
809 if opts.get('include') or opts.get('exclude') or pats:
811 match = cmdutil.match(repo, pats, opts)
810 match = cmdutil.match(repo, pats, opts)
812 # detect missing files in pats
811 # detect missing files in pats
813 def badfn(f, msg):
812 def badfn(f, msg):
814 raise util.Abort('%s: %s' % (f, msg))
813 raise util.Abort('%s: %s' % (f, msg))
815 match.bad = badfn
814 match.bad = badfn
816 m, a, r, d = repo.status(match=match)[:4]
815 m, a, r, d = repo.status(match=match)[:4]
817 else:
816 else:
818 m, a, r, d = self.check_localchanges(repo, force=True)
817 m, a, r, d = self.check_localchanges(repo, force=True)
819 match = cmdutil.matchfiles(repo, m + a + r)
818 match = cmdutil.matchfiles(repo, m + a + r)
820 if len(repo[None].parents()) > 1:
819 if len(repo[None].parents()) > 1:
821 raise util.Abort(_('cannot manage merge changesets'))
820 raise util.Abort(_('cannot manage merge changesets'))
822 commitfiles = m + a + r
821 commitfiles = m + a + r
823 self.check_toppatch(repo)
822 self.check_toppatch(repo)
824 insert = self.full_series_end()
823 insert = self.full_series_end()
825 wlock = repo.wlock()
824 wlock = repo.wlock()
826 try:
825 try:
827 # if patch file write fails, abort early
826 # if patch file write fails, abort early
828 p = self.opener(patchfn, "w")
827 p = self.opener(patchfn, "w")
829 try:
828 try:
830 if self.plainmode:
829 if self.plainmode:
831 if user:
830 if user:
832 p.write("From: " + user + "\n")
831 p.write("From: " + user + "\n")
833 if not date:
832 if not date:
834 p.write("\n")
833 p.write("\n")
835 if date:
834 if date:
836 p.write("Date: %d %d\n\n" % date)
835 p.write("Date: %d %d\n\n" % date)
837 else:
836 else:
838 p.write("# HG changeset patch\n")
837 p.write("# HG changeset patch\n")
839 p.write("# Parent "
838 p.write("# Parent "
840 + hex(repo[None].parents()[0].node()) + "\n")
839 + hex(repo[None].parents()[0].node()) + "\n")
841 if user:
840 if user:
842 p.write("# User " + user + "\n")
841 p.write("# User " + user + "\n")
843 if date:
842 if date:
844 p.write("# Date %s %s\n\n" % date)
843 p.write("# Date %s %s\n\n" % date)
845 if hasattr(msg, '__call__'):
844 if hasattr(msg, '__call__'):
846 msg = msg()
845 msg = msg()
847 commitmsg = msg and msg or ("[mq]: %s" % patchfn)
846 commitmsg = msg and msg or ("[mq]: %s" % patchfn)
848 n = repo.commit(commitmsg, user, date, match=match, force=True)
847 n = repo.commit(commitmsg, user, date, match=match, force=True)
849 if n is None:
848 if n is None:
850 raise util.Abort(_("repo commit failed"))
849 raise util.Abort(_("repo commit failed"))
851 try:
850 try:
852 self.full_series[insert:insert] = [patchfn]
851 self.full_series[insert:insert] = [patchfn]
853 self.applied.append(statusentry(n, patchfn))
852 self.applied.append(statusentry(n, patchfn))
854 self.parse_series()
853 self.parse_series()
855 self.series_dirty = 1
854 self.series_dirty = 1
856 self.applied_dirty = 1
855 self.applied_dirty = 1
857 if msg:
856 if msg:
858 msg = msg + "\n\n"
857 msg = msg + "\n\n"
859 p.write(msg)
858 p.write(msg)
860 if commitfiles:
859 if commitfiles:
861 parent = self.qparents(repo, n)
860 parent = self.qparents(repo, n)
862 chunks = patch.diff(repo, node1=parent, node2=n,
861 chunks = patch.diff(repo, node1=parent, node2=n,
863 match=match, opts=diffopts)
862 match=match, opts=diffopts)
864 for chunk in chunks:
863 for chunk in chunks:
865 p.write(chunk)
864 p.write(chunk)
866 p.close()
865 p.close()
867 wlock.release()
866 wlock.release()
868 wlock = None
867 wlock = None
869 r = self.qrepo()
868 r = self.qrepo()
870 if r:
869 if r:
871 r.add([patchfn])
870 r.add([patchfn])
872 except:
871 except:
873 repo.rollback()
872 repo.rollback()
874 raise
873 raise
875 except Exception:
874 except Exception:
876 patchpath = self.join(patchfn)
875 patchpath = self.join(patchfn)
877 try:
876 try:
878 os.unlink(patchpath)
877 os.unlink(patchpath)
879 except:
878 except:
880 self.ui.warn(_('error unlinking %s\n') % patchpath)
879 self.ui.warn(_('error unlinking %s\n') % patchpath)
881 raise
880 raise
882 self.removeundo(repo)
881 self.removeundo(repo)
883 finally:
882 finally:
884 release(wlock)
883 release(wlock)
885
884
886 def strip(self, repo, rev, update=True, backup="all", force=None):
885 def strip(self, repo, rev, update=True, backup="all", force=None):
887 wlock = lock = None
886 wlock = lock = None
888 try:
887 try:
889 wlock = repo.wlock()
888 wlock = repo.wlock()
890 lock = repo.lock()
889 lock = repo.lock()
891
890
892 if update:
891 if update:
893 self.check_localchanges(repo, force=force, refresh=False)
892 self.check_localchanges(repo, force=force, refresh=False)
894 urev = self.qparents(repo, rev)
893 urev = self.qparents(repo, rev)
895 hg.clean(repo, urev)
894 hg.clean(repo, urev)
896 repo.dirstate.write()
895 repo.dirstate.write()
897
896
898 self.removeundo(repo)
897 self.removeundo(repo)
899 repair.strip(self.ui, repo, rev, backup)
898 repair.strip(self.ui, repo, rev, backup)
900 # strip may have unbundled a set of backed up revisions after
899 # strip may have unbundled a set of backed up revisions after
901 # the actual strip
900 # the actual strip
902 self.removeundo(repo)
901 self.removeundo(repo)
903 finally:
902 finally:
904 release(lock, wlock)
903 release(lock, wlock)
905
904
906 def isapplied(self, patch):
905 def isapplied(self, patch):
907 """returns (index, rev, patch)"""
906 """returns (index, rev, patch)"""
908 for i, a in enumerate(self.applied):
907 for i, a in enumerate(self.applied):
909 if a.name == patch:
908 if a.name == patch:
910 return (i, a.node, a.name)
909 return (i, a.node, a.name)
911 return None
910 return None
912
911
913 # if the exact patch name does not exist, we try a few
912 # if the exact patch name does not exist, we try a few
914 # variations. If strict is passed, we try only #1
913 # variations. If strict is passed, we try only #1
915 #
914 #
916 # 1) a number to indicate an offset in the series file
915 # 1) a number to indicate an offset in the series file
917 # 2) a unique substring of the patch name was given
916 # 2) a unique substring of the patch name was given
918 # 3) patchname[-+]num to indicate an offset in the series file
917 # 3) patchname[-+]num to indicate an offset in the series file
919 def lookup(self, patch, strict=False):
918 def lookup(self, patch, strict=False):
920 patch = patch and str(patch)
919 patch = patch and str(patch)
921
920
922 def partial_name(s):
921 def partial_name(s):
923 if s in self.series:
922 if s in self.series:
924 return s
923 return s
925 matches = [x for x in self.series if s in x]
924 matches = [x for x in self.series if s in x]
926 if len(matches) > 1:
925 if len(matches) > 1:
927 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
926 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
928 for m in matches:
927 for m in matches:
929 self.ui.warn(' %s\n' % m)
928 self.ui.warn(' %s\n' % m)
930 return None
929 return None
931 if matches:
930 if matches:
932 return matches[0]
931 return matches[0]
933 if self.series and self.applied:
932 if self.series and self.applied:
934 if s == 'qtip':
933 if s == 'qtip':
935 return self.series[self.series_end(True)-1]
934 return self.series[self.series_end(True)-1]
936 if s == 'qbase':
935 if s == 'qbase':
937 return self.series[0]
936 return self.series[0]
938 return None
937 return None
939
938
940 if patch is None:
939 if patch is None:
941 return None
940 return None
942 if patch in self.series:
941 if patch in self.series:
943 return patch
942 return patch
944
943
945 if not os.path.isfile(self.join(patch)):
944 if not os.path.isfile(self.join(patch)):
946 try:
945 try:
947 sno = int(patch)
946 sno = int(patch)
948 except (ValueError, OverflowError):
947 except (ValueError, OverflowError):
949 pass
948 pass
950 else:
949 else:
951 if -len(self.series) <= sno < len(self.series):
950 if -len(self.series) <= sno < len(self.series):
952 return self.series[sno]
951 return self.series[sno]
953
952
954 if not strict:
953 if not strict:
955 res = partial_name(patch)
954 res = partial_name(patch)
956 if res:
955 if res:
957 return res
956 return res
958 minus = patch.rfind('-')
957 minus = patch.rfind('-')
959 if minus >= 0:
958 if minus >= 0:
960 res = partial_name(patch[:minus])
959 res = partial_name(patch[:minus])
961 if res:
960 if res:
962 i = self.series.index(res)
961 i = self.series.index(res)
963 try:
962 try:
964 off = int(patch[minus + 1:] or 1)
963 off = int(patch[minus + 1:] or 1)
965 except (ValueError, OverflowError):
964 except (ValueError, OverflowError):
966 pass
965 pass
967 else:
966 else:
968 if i - off >= 0:
967 if i - off >= 0:
969 return self.series[i - off]
968 return self.series[i - off]
970 plus = patch.rfind('+')
969 plus = patch.rfind('+')
971 if plus >= 0:
970 if plus >= 0:
972 res = partial_name(patch[:plus])
971 res = partial_name(patch[:plus])
973 if res:
972 if res:
974 i = self.series.index(res)
973 i = self.series.index(res)
975 try:
974 try:
976 off = int(patch[plus + 1:] or 1)
975 off = int(patch[plus + 1:] or 1)
977 except (ValueError, OverflowError):
976 except (ValueError, OverflowError):
978 pass
977 pass
979 else:
978 else:
980 if i + off < len(self.series):
979 if i + off < len(self.series):
981 return self.series[i + off]
980 return self.series[i + off]
982 raise util.Abort(_("patch %s not in series") % patch)
981 raise util.Abort(_("patch %s not in series") % patch)
983
982
984 def push(self, repo, patch=None, force=False, list=False,
983 def push(self, repo, patch=None, force=False, list=False,
985 mergeq=None, all=False, move=False):
984 mergeq=None, all=False, move=False):
986 diffopts = self.diffopts()
985 diffopts = self.diffopts()
987 wlock = repo.wlock()
986 wlock = repo.wlock()
988 try:
987 try:
989 heads = []
988 heads = []
990 for b, ls in repo.branchmap().iteritems():
989 for b, ls in repo.branchmap().iteritems():
991 heads += ls
990 heads += ls
992 if not heads:
991 if not heads:
993 heads = [nullid]
992 heads = [nullid]
994 if repo.dirstate.parents()[0] not in heads:
993 if repo.dirstate.parents()[0] not in heads:
995 self.ui.status(_("(working directory not at a head)\n"))
994 self.ui.status(_("(working directory not at a head)\n"))
996
995
997 if not self.series:
996 if not self.series:
998 self.ui.warn(_('no patches in series\n'))
997 self.ui.warn(_('no patches in series\n'))
999 return 0
998 return 0
1000
999
1001 patch = self.lookup(patch)
1000 patch = self.lookup(patch)
1002 # Suppose our series file is: A B C and the current 'top'
1001 # Suppose our series file is: A B C and the current 'top'
1003 # patch is B. qpush C should be performed (moving forward)
1002 # patch is B. qpush C should be performed (moving forward)
1004 # qpush B is a NOP (no change) qpush A is an error (can't
1003 # qpush B is a NOP (no change) qpush A is an error (can't
1005 # go backwards with qpush)
1004 # go backwards with qpush)
1006 if patch:
1005 if patch:
1007 info = self.isapplied(patch)
1006 info = self.isapplied(patch)
1008 if info:
1007 if info:
1009 if info[0] < len(self.applied) - 1:
1008 if info[0] < len(self.applied) - 1:
1010 raise util.Abort(
1009 raise util.Abort(
1011 _("cannot push to a previous patch: %s") % patch)
1010 _("cannot push to a previous patch: %s") % patch)
1012 self.ui.warn(
1011 self.ui.warn(
1013 _('qpush: %s is already at the top\n') % patch)
1012 _('qpush: %s is already at the top\n') % patch)
1014 return
1013 return
1015 pushable, reason = self.pushable(patch)
1014 pushable, reason = self.pushable(patch)
1016 if not pushable:
1015 if not pushable:
1017 if reason:
1016 if reason:
1018 reason = _('guarded by %r') % reason
1017 reason = _('guarded by %r') % reason
1019 else:
1018 else:
1020 reason = _('no matching guards')
1019 reason = _('no matching guards')
1021 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
1020 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
1022 return 1
1021 return 1
1023 elif all:
1022 elif all:
1024 patch = self.series[-1]
1023 patch = self.series[-1]
1025 if self.isapplied(patch):
1024 if self.isapplied(patch):
1026 self.ui.warn(_('all patches are currently applied\n'))
1025 self.ui.warn(_('all patches are currently applied\n'))
1027 return 0
1026 return 0
1028
1027
1029 # Following the above example, starting at 'top' of B:
1028 # Following the above example, starting at 'top' of B:
1030 # qpush should be performed (pushes C), but a subsequent
1029 # qpush should be performed (pushes C), but a subsequent
1031 # qpush without an argument is an error (nothing to
1030 # qpush without an argument is an error (nothing to
1032 # apply). This allows a loop of "...while hg qpush..." to
1031 # apply). This allows a loop of "...while hg qpush..." to
1033 # work as it detects an error when done
1032 # work as it detects an error when done
1034 start = self.series_end()
1033 start = self.series_end()
1035 if start == len(self.series):
1034 if start == len(self.series):
1036 self.ui.warn(_('patch series already fully applied\n'))
1035 self.ui.warn(_('patch series already fully applied\n'))
1037 return 1
1036 return 1
1038 if not force:
1037 if not force:
1039 self.check_localchanges(repo)
1038 self.check_localchanges(repo)
1040
1039
1041 if move:
1040 if move:
1042 try:
1041 try:
1043 del self.full_series[self.full_series.index(patch, start)]
1042 del self.full_series[self.full_series.index(patch, start)]
1044 except ValueError:
1043 except ValueError:
1045 raise util.Abort(_("patch '%s' not found") % patch)
1044 raise util.Abort(_("patch '%s' not found") % patch)
1046 self.full_series.insert(start, patch)
1045 self.full_series.insert(start, patch)
1047 self.parse_series()
1046 self.parse_series()
1048 self.series_dirty = 1
1047 self.series_dirty = 1
1049
1048
1050 self.applied_dirty = 1
1049 self.applied_dirty = 1
1051 if start > 0:
1050 if start > 0:
1052 self.check_toppatch(repo)
1051 self.check_toppatch(repo)
1053 if not patch:
1052 if not patch:
1054 patch = self.series[start]
1053 patch = self.series[start]
1055 end = start + 1
1054 end = start + 1
1056 else:
1055 else:
1057 end = self.series.index(patch, start) + 1
1056 end = self.series.index(patch, start) + 1
1058
1057
1059 s = self.series[start:end]
1058 s = self.series[start:end]
1060 all_files = set()
1059 all_files = set()
1061 try:
1060 try:
1062 if mergeq:
1061 if mergeq:
1063 ret = self.mergepatch(repo, mergeq, s, diffopts)
1062 ret = self.mergepatch(repo, mergeq, s, diffopts)
1064 else:
1063 else:
1065 ret = self.apply(repo, s, list, all_files=all_files)
1064 ret = self.apply(repo, s, list, all_files=all_files)
1066 except:
1065 except:
1067 self.ui.warn(_('cleaning up working directory...'))
1066 self.ui.warn(_('cleaning up working directory...'))
1068 node = repo.dirstate.parents()[0]
1067 node = repo.dirstate.parents()[0]
1069 hg.revert(repo, node, None)
1068 hg.revert(repo, node, None)
1070 # only remove unknown files that we know we touched or
1069 # only remove unknown files that we know we touched or
1071 # created while patching
1070 # created while patching
1072 for f in all_files:
1071 for f in all_files:
1073 if f not in repo.dirstate:
1072 if f not in repo.dirstate:
1074 try:
1073 try:
1075 util.unlink(repo.wjoin(f))
1074 util.unlink(repo.wjoin(f))
1076 except OSError, inst:
1075 except OSError, inst:
1077 if inst.errno != errno.ENOENT:
1076 if inst.errno != errno.ENOENT:
1078 raise
1077 raise
1079 self.ui.warn(_('done\n'))
1078 self.ui.warn(_('done\n'))
1080 raise
1079 raise
1081
1080
1082 if not self.applied:
1081 if not self.applied:
1083 return ret[0]
1082 return ret[0]
1084 top = self.applied[-1].name
1083 top = self.applied[-1].name
1085 if ret[0] and ret[0] > 1:
1084 if ret[0] and ret[0] > 1:
1086 msg = _("errors during apply, please fix and refresh %s\n")
1085 msg = _("errors during apply, please fix and refresh %s\n")
1087 self.ui.write(msg % top)
1086 self.ui.write(msg % top)
1088 else:
1087 else:
1089 self.ui.write(_("now at: %s\n") % top)
1088 self.ui.write(_("now at: %s\n") % top)
1090 return ret[0]
1089 return ret[0]
1091
1090
1092 finally:
1091 finally:
1093 wlock.release()
1092 wlock.release()
1094
1093
1095 def pop(self, repo, patch=None, force=False, update=True, all=False):
1094 def pop(self, repo, patch=None, force=False, update=True, all=False):
1096 wlock = repo.wlock()
1095 wlock = repo.wlock()
1097 try:
1096 try:
1098 if patch:
1097 if patch:
1099 # index, rev, patch
1098 # index, rev, patch
1100 info = self.isapplied(patch)
1099 info = self.isapplied(patch)
1101 if not info:
1100 if not info:
1102 patch = self.lookup(patch)
1101 patch = self.lookup(patch)
1103 info = self.isapplied(patch)
1102 info = self.isapplied(patch)
1104 if not info:
1103 if not info:
1105 raise util.Abort(_("patch %s is not applied") % patch)
1104 raise util.Abort(_("patch %s is not applied") % patch)
1106
1105
1107 if not self.applied:
1106 if not self.applied:
1108 # Allow qpop -a to work repeatedly,
1107 # Allow qpop -a to work repeatedly,
1109 # but not qpop without an argument
1108 # but not qpop without an argument
1110 self.ui.warn(_("no patches applied\n"))
1109 self.ui.warn(_("no patches applied\n"))
1111 return not all
1110 return not all
1112
1111
1113 if all:
1112 if all:
1114 start = 0
1113 start = 0
1115 elif patch:
1114 elif patch:
1116 start = info[0] + 1
1115 start = info[0] + 1
1117 else:
1116 else:
1118 start = len(self.applied) - 1
1117 start = len(self.applied) - 1
1119
1118
1120 if start >= len(self.applied):
1119 if start >= len(self.applied):
1121 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1120 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1122 return
1121 return
1123
1122
1124 if not update:
1123 if not update:
1125 parents = repo.dirstate.parents()
1124 parents = repo.dirstate.parents()
1126 rr = [x.node for x in self.applied]
1125 rr = [x.node for x in self.applied]
1127 for p in parents:
1126 for p in parents:
1128 if p in rr:
1127 if p in rr:
1129 self.ui.warn(_("qpop: forcing dirstate update\n"))
1128 self.ui.warn(_("qpop: forcing dirstate update\n"))
1130 update = True
1129 update = True
1131 else:
1130 else:
1132 parents = [p.node() for p in repo[None].parents()]
1131 parents = [p.node() for p in repo[None].parents()]
1133 needupdate = False
1132 needupdate = False
1134 for entry in self.applied[start:]:
1133 for entry in self.applied[start:]:
1135 if entry.node in parents:
1134 if entry.node in parents:
1136 needupdate = True
1135 needupdate = True
1137 break
1136 break
1138 update = needupdate
1137 update = needupdate
1139
1138
1140 if not force and update:
1139 if not force and update:
1141 self.check_localchanges(repo)
1140 self.check_localchanges(repo)
1142
1141
1143 self.applied_dirty = 1
1142 self.applied_dirty = 1
1144 end = len(self.applied)
1143 end = len(self.applied)
1145 rev = self.applied[start].node
1144 rev = self.applied[start].node
1146 if update:
1145 if update:
1147 top = self.check_toppatch(repo)[0]
1146 top = self.check_toppatch(repo)[0]
1148
1147
1149 try:
1148 try:
1150 heads = repo.changelog.heads(rev)
1149 heads = repo.changelog.heads(rev)
1151 except error.LookupError:
1150 except error.LookupError:
1152 node = short(rev)
1151 node = short(rev)
1153 raise util.Abort(_('trying to pop unknown node %s') % node)
1152 raise util.Abort(_('trying to pop unknown node %s') % node)
1154
1153
1155 if heads != [self.applied[-1].node]:
1154 if heads != [self.applied[-1].node]:
1156 raise util.Abort(_("popping would remove a revision not "
1155 raise util.Abort(_("popping would remove a revision not "
1157 "managed by this patch queue"))
1156 "managed by this patch queue"))
1158
1157
1159 # we know there are no local changes, so we can make a simplified
1158 # we know there are no local changes, so we can make a simplified
1160 # form of hg.update.
1159 # form of hg.update.
1161 if update:
1160 if update:
1162 qp = self.qparents(repo, rev)
1161 qp = self.qparents(repo, rev)
1163 ctx = repo[qp]
1162 ctx = repo[qp]
1164 m, a, r, d = repo.status(qp, top)[:4]
1163 m, a, r, d = repo.status(qp, top)[:4]
1165 if d:
1164 if d:
1166 raise util.Abort(_("deletions found between repo revs"))
1165 raise util.Abort(_("deletions found between repo revs"))
1167 for f in a:
1166 for f in a:
1168 try:
1167 try:
1169 util.unlink(repo.wjoin(f))
1168 util.unlink(repo.wjoin(f))
1170 except OSError, e:
1169 except OSError, e:
1171 if e.errno != errno.ENOENT:
1170 if e.errno != errno.ENOENT:
1172 raise
1171 raise
1173 repo.dirstate.forget(f)
1172 repo.dirstate.forget(f)
1174 for f in m + r:
1173 for f in m + r:
1175 fctx = ctx[f]
1174 fctx = ctx[f]
1176 repo.wwrite(f, fctx.data(), fctx.flags())
1175 repo.wwrite(f, fctx.data(), fctx.flags())
1177 repo.dirstate.normal(f)
1176 repo.dirstate.normal(f)
1178 repo.dirstate.setparents(qp, nullid)
1177 repo.dirstate.setparents(qp, nullid)
1179 for patch in reversed(self.applied[start:end]):
1178 for patch in reversed(self.applied[start:end]):
1180 self.ui.status(_("popping %s\n") % patch.name)
1179 self.ui.status(_("popping %s\n") % patch.name)
1181 del self.applied[start:end]
1180 del self.applied[start:end]
1182 self.strip(repo, rev, update=False, backup='strip')
1181 self.strip(repo, rev, update=False, backup='strip')
1183 if self.applied:
1182 if self.applied:
1184 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1183 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1185 else:
1184 else:
1186 self.ui.write(_("patch queue now empty\n"))
1185 self.ui.write(_("patch queue now empty\n"))
1187 finally:
1186 finally:
1188 wlock.release()
1187 wlock.release()
1189
1188
1190 def diff(self, repo, pats, opts):
1189 def diff(self, repo, pats, opts):
1191 top, patch = self.check_toppatch(repo)
1190 top, patch = self.check_toppatch(repo)
1192 if not top:
1191 if not top:
1193 self.ui.write(_("no patches applied\n"))
1192 self.ui.write(_("no patches applied\n"))
1194 return
1193 return
1195 qp = self.qparents(repo, top)
1194 qp = self.qparents(repo, top)
1196 if opts.get('reverse'):
1195 if opts.get('reverse'):
1197 node1, node2 = None, qp
1196 node1, node2 = None, qp
1198 else:
1197 else:
1199 node1, node2 = qp, None
1198 node1, node2 = qp, None
1200 diffopts = self.diffopts(opts, patch)
1199 diffopts = self.diffopts(opts, patch)
1201 self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
1200 self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
1202
1201
1203 def refresh(self, repo, pats=None, **opts):
1202 def refresh(self, repo, pats=None, **opts):
1204 if not self.applied:
1203 if not self.applied:
1205 self.ui.write(_("no patches applied\n"))
1204 self.ui.write(_("no patches applied\n"))
1206 return 1
1205 return 1
1207 msg = opts.get('msg', '').rstrip()
1206 msg = opts.get('msg', '').rstrip()
1208 newuser = opts.get('user')
1207 newuser = opts.get('user')
1209 newdate = opts.get('date')
1208 newdate = opts.get('date')
1210 if newdate:
1209 if newdate:
1211 newdate = '%d %d' % util.parsedate(newdate)
1210 newdate = '%d %d' % util.parsedate(newdate)
1212 wlock = repo.wlock()
1211 wlock = repo.wlock()
1213
1212
1214 try:
1213 try:
1215 self.check_toppatch(repo)
1214 self.check_toppatch(repo)
1216 (top, patchfn) = (self.applied[-1].node, self.applied[-1].name)
1215 (top, patchfn) = (self.applied[-1].node, self.applied[-1].name)
1217 if repo.changelog.heads(top) != [top]:
1216 if repo.changelog.heads(top) != [top]:
1218 raise util.Abort(_("cannot refresh a revision with children"))
1217 raise util.Abort(_("cannot refresh a revision with children"))
1219
1218
1220 cparents = repo.changelog.parents(top)
1219 cparents = repo.changelog.parents(top)
1221 patchparent = self.qparents(repo, top)
1220 patchparent = self.qparents(repo, top)
1222 ph = patchheader(self.join(patchfn), self.plainmode)
1221 ph = patchheader(self.join(patchfn), self.plainmode)
1223 diffopts = self.diffopts({'git': opts.get('git')}, patchfn)
1222 diffopts = self.diffopts({'git': opts.get('git')}, patchfn)
1224 if msg:
1223 if msg:
1225 ph.setmessage(msg)
1224 ph.setmessage(msg)
1226 if newuser:
1225 if newuser:
1227 ph.setuser(newuser)
1226 ph.setuser(newuser)
1228 if newdate:
1227 if newdate:
1229 ph.setdate(newdate)
1228 ph.setdate(newdate)
1230 ph.setparent(hex(patchparent))
1229 ph.setparent(hex(patchparent))
1231
1230
1232 # only commit new patch when write is complete
1231 # only commit new patch when write is complete
1233 patchf = self.opener(patchfn, 'w', atomictemp=True)
1232 patchf = self.opener(patchfn, 'w', atomictemp=True)
1234
1233
1235 comments = str(ph)
1234 comments = str(ph)
1236 if comments:
1235 if comments:
1237 patchf.write(comments)
1236 patchf.write(comments)
1238
1237
1239 # update the dirstate in place, strip off the qtip commit
1238 # update the dirstate in place, strip off the qtip commit
1240 # and then commit.
1239 # and then commit.
1241 #
1240 #
1242 # this should really read:
1241 # this should really read:
1243 # mm, dd, aa, aa2 = repo.status(tip, patchparent)[:4]
1242 # mm, dd, aa, aa2 = repo.status(tip, patchparent)[:4]
1244 # but we do it backwards to take advantage of manifest/chlog
1243 # but we do it backwards to take advantage of manifest/chlog
1245 # caching against the next repo.status call
1244 # caching against the next repo.status call
1246 mm, aa, dd, aa2 = repo.status(patchparent, top)[:4]
1245 mm, aa, dd, aa2 = repo.status(patchparent, top)[:4]
1247 changes = repo.changelog.read(top)
1246 changes = repo.changelog.read(top)
1248 man = repo.manifest.read(changes[0])
1247 man = repo.manifest.read(changes[0])
1249 aaa = aa[:]
1248 aaa = aa[:]
1250 matchfn = cmdutil.match(repo, pats, opts)
1249 matchfn = cmdutil.match(repo, pats, opts)
1251 # in short mode, we only diff the files included in the
1250 # in short mode, we only diff the files included in the
1252 # patch already plus specified files
1251 # patch already plus specified files
1253 if opts.get('short'):
1252 if opts.get('short'):
1254 # if amending a patch, we start with existing
1253 # if amending a patch, we start with existing
1255 # files plus specified files - unfiltered
1254 # files plus specified files - unfiltered
1256 match = cmdutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1255 match = cmdutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1257 # filter with inc/exl options
1256 # filter with inc/exl options
1258 matchfn = cmdutil.match(repo, opts=opts)
1257 matchfn = cmdutil.match(repo, opts=opts)
1259 else:
1258 else:
1260 match = cmdutil.matchall(repo)
1259 match = cmdutil.matchall(repo)
1261 m, a, r, d = repo.status(match=match)[:4]
1260 m, a, r, d = repo.status(match=match)[:4]
1262
1261
1263 # we might end up with files that were added between
1262 # we might end up with files that were added between
1264 # qtip and the dirstate parent, but then changed in the
1263 # qtip and the dirstate parent, but then changed in the
1265 # local dirstate. in this case, we want them to only
1264 # local dirstate. in this case, we want them to only
1266 # show up in the added section
1265 # show up in the added section
1267 for x in m:
1266 for x in m:
1268 if x not in aa:
1267 if x not in aa:
1269 mm.append(x)
1268 mm.append(x)
1270 # we might end up with files added by the local dirstate that
1269 # we might end up with files added by the local dirstate that
1271 # were deleted by the patch. In this case, they should only
1270 # were deleted by the patch. In this case, they should only
1272 # show up in the changed section.
1271 # show up in the changed section.
1273 for x in a:
1272 for x in a:
1274 if x in dd:
1273 if x in dd:
1275 del dd[dd.index(x)]
1274 del dd[dd.index(x)]
1276 mm.append(x)
1275 mm.append(x)
1277 else:
1276 else:
1278 aa.append(x)
1277 aa.append(x)
1279 # make sure any files deleted in the local dirstate
1278 # make sure any files deleted in the local dirstate
1280 # are not in the add or change column of the patch
1279 # are not in the add or change column of the patch
1281 forget = []
1280 forget = []
1282 for x in d + r:
1281 for x in d + r:
1283 if x in aa:
1282 if x in aa:
1284 del aa[aa.index(x)]
1283 del aa[aa.index(x)]
1285 forget.append(x)
1284 forget.append(x)
1286 continue
1285 continue
1287 elif x in mm:
1286 elif x in mm:
1288 del mm[mm.index(x)]
1287 del mm[mm.index(x)]
1289 dd.append(x)
1288 dd.append(x)
1290
1289
1291 m = list(set(mm))
1290 m = list(set(mm))
1292 r = list(set(dd))
1291 r = list(set(dd))
1293 a = list(set(aa))
1292 a = list(set(aa))
1294 c = [filter(matchfn, l) for l in (m, a, r)]
1293 c = [filter(matchfn, l) for l in (m, a, r)]
1295 match = cmdutil.matchfiles(repo, set(c[0] + c[1] + c[2]))
1294 match = cmdutil.matchfiles(repo, set(c[0] + c[1] + c[2]))
1296 chunks = patch.diff(repo, patchparent, match=match,
1295 chunks = patch.diff(repo, patchparent, match=match,
1297 changes=c, opts=diffopts)
1296 changes=c, opts=diffopts)
1298 for chunk in chunks:
1297 for chunk in chunks:
1299 patchf.write(chunk)
1298 patchf.write(chunk)
1300
1299
1301 try:
1300 try:
1302 if diffopts.git or diffopts.upgrade:
1301 if diffopts.git or diffopts.upgrade:
1303 copies = {}
1302 copies = {}
1304 for dst in a:
1303 for dst in a:
1305 src = repo.dirstate.copied(dst)
1304 src = repo.dirstate.copied(dst)
1306 # during qfold, the source file for copies may
1305 # during qfold, the source file for copies may
1307 # be removed. Treat this as a simple add.
1306 # be removed. Treat this as a simple add.
1308 if src is not None and src in repo.dirstate:
1307 if src is not None and src in repo.dirstate:
1309 copies.setdefault(src, []).append(dst)
1308 copies.setdefault(src, []).append(dst)
1310 repo.dirstate.add(dst)
1309 repo.dirstate.add(dst)
1311 # remember the copies between patchparent and qtip
1310 # remember the copies between patchparent and qtip
1312 for dst in aaa:
1311 for dst in aaa:
1313 f = repo.file(dst)
1312 f = repo.file(dst)
1314 src = f.renamed(man[dst])
1313 src = f.renamed(man[dst])
1315 if src:
1314 if src:
1316 copies.setdefault(src[0], []).extend(
1315 copies.setdefault(src[0], []).extend(
1317 copies.get(dst, []))
1316 copies.get(dst, []))
1318 if dst in a:
1317 if dst in a:
1319 copies[src[0]].append(dst)
1318 copies[src[0]].append(dst)
1320 # we can't copy a file created by the patch itself
1319 # we can't copy a file created by the patch itself
1321 if dst in copies:
1320 if dst in copies:
1322 del copies[dst]
1321 del copies[dst]
1323 for src, dsts in copies.iteritems():
1322 for src, dsts in copies.iteritems():
1324 for dst in dsts:
1323 for dst in dsts:
1325 repo.dirstate.copy(src, dst)
1324 repo.dirstate.copy(src, dst)
1326 else:
1325 else:
1327 for dst in a:
1326 for dst in a:
1328 repo.dirstate.add(dst)
1327 repo.dirstate.add(dst)
1329 # Drop useless copy information
1328 # Drop useless copy information
1330 for f in list(repo.dirstate.copies()):
1329 for f in list(repo.dirstate.copies()):
1331 repo.dirstate.copy(None, f)
1330 repo.dirstate.copy(None, f)
1332 for f in r:
1331 for f in r:
1333 repo.dirstate.remove(f)
1332 repo.dirstate.remove(f)
1334 # if the patch excludes a modified file, mark that
1333 # if the patch excludes a modified file, mark that
1335 # file with mtime=0 so status can see it.
1334 # file with mtime=0 so status can see it.
1336 mm = []
1335 mm = []
1337 for i in xrange(len(m)-1, -1, -1):
1336 for i in xrange(len(m)-1, -1, -1):
1338 if not matchfn(m[i]):
1337 if not matchfn(m[i]):
1339 mm.append(m[i])
1338 mm.append(m[i])
1340 del m[i]
1339 del m[i]
1341 for f in m:
1340 for f in m:
1342 repo.dirstate.normal(f)
1341 repo.dirstate.normal(f)
1343 for f in mm:
1342 for f in mm:
1344 repo.dirstate.normallookup(f)
1343 repo.dirstate.normallookup(f)
1345 for f in forget:
1344 for f in forget:
1346 repo.dirstate.forget(f)
1345 repo.dirstate.forget(f)
1347
1346
1348 if not msg:
1347 if not msg:
1349 if not ph.message:
1348 if not ph.message:
1350 message = "[mq]: %s\n" % patchfn
1349 message = "[mq]: %s\n" % patchfn
1351 else:
1350 else:
1352 message = "\n".join(ph.message)
1351 message = "\n".join(ph.message)
1353 else:
1352 else:
1354 message = msg
1353 message = msg
1355
1354
1356 user = ph.user or changes[1]
1355 user = ph.user or changes[1]
1357
1356
1358 # assumes strip can roll itself back if interrupted
1357 # assumes strip can roll itself back if interrupted
1359 repo.dirstate.setparents(*cparents)
1358 repo.dirstate.setparents(*cparents)
1360 self.applied.pop()
1359 self.applied.pop()
1361 self.applied_dirty = 1
1360 self.applied_dirty = 1
1362 self.strip(repo, top, update=False,
1361 self.strip(repo, top, update=False,
1363 backup='strip')
1362 backup='strip')
1364 except:
1363 except:
1365 repo.dirstate.invalidate()
1364 repo.dirstate.invalidate()
1366 raise
1365 raise
1367
1366
1368 try:
1367 try:
1369 # might be nice to attempt to roll back strip after this
1368 # might be nice to attempt to roll back strip after this
1370 patchf.rename()
1369 patchf.rename()
1371 n = repo.commit(message, user, ph.date, match=match,
1370 n = repo.commit(message, user, ph.date, match=match,
1372 force=True)
1371 force=True)
1373 self.applied.append(statusentry(n, patchfn))
1372 self.applied.append(statusentry(n, patchfn))
1374 except:
1373 except:
1375 ctx = repo[cparents[0]]
1374 ctx = repo[cparents[0]]
1376 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1375 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1377 self.save_dirty()
1376 self.save_dirty()
1378 self.ui.warn(_('refresh interrupted while patch was popped! '
1377 self.ui.warn(_('refresh interrupted while patch was popped! '
1379 '(revert --all, qpush to recover)\n'))
1378 '(revert --all, qpush to recover)\n'))
1380 raise
1379 raise
1381 finally:
1380 finally:
1382 wlock.release()
1381 wlock.release()
1383 self.removeundo(repo)
1382 self.removeundo(repo)
1384
1383
1385 def init(self, repo, create=False):
1384 def init(self, repo, create=False):
1386 if not create and os.path.isdir(self.path):
1385 if not create and os.path.isdir(self.path):
1387 raise util.Abort(_("patch queue directory already exists"))
1386 raise util.Abort(_("patch queue directory already exists"))
1388 try:
1387 try:
1389 os.mkdir(self.path)
1388 os.mkdir(self.path)
1390 except OSError, inst:
1389 except OSError, inst:
1391 if inst.errno != errno.EEXIST or not create:
1390 if inst.errno != errno.EEXIST or not create:
1392 raise
1391 raise
1393 if create:
1392 if create:
1394 return self.qrepo(create=True)
1393 return self.qrepo(create=True)
1395
1394
1396 def unapplied(self, repo, patch=None):
1395 def unapplied(self, repo, patch=None):
1397 if patch and patch not in self.series:
1396 if patch and patch not in self.series:
1398 raise util.Abort(_("patch %s is not in series file") % patch)
1397 raise util.Abort(_("patch %s is not in series file") % patch)
1399 if not patch:
1398 if not patch:
1400 start = self.series_end()
1399 start = self.series_end()
1401 else:
1400 else:
1402 start = self.series.index(patch) + 1
1401 start = self.series.index(patch) + 1
1403 unapplied = []
1402 unapplied = []
1404 for i in xrange(start, len(self.series)):
1403 for i in xrange(start, len(self.series)):
1405 pushable, reason = self.pushable(i)
1404 pushable, reason = self.pushable(i)
1406 if pushable:
1405 if pushable:
1407 unapplied.append((i, self.series[i]))
1406 unapplied.append((i, self.series[i]))
1408 self.explain_pushable(i)
1407 self.explain_pushable(i)
1409 return unapplied
1408 return unapplied
1410
1409
1411 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1410 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1412 summary=False):
1411 summary=False):
1413 def displayname(pfx, patchname, state):
1412 def displayname(pfx, patchname, state):
1414 if pfx:
1413 if pfx:
1415 self.ui.write(pfx)
1414 self.ui.write(pfx)
1416 if summary:
1415 if summary:
1417 ph = patchheader(self.join(patchname), self.plainmode)
1416 ph = patchheader(self.join(patchname), self.plainmode)
1418 msg = ph.message and ph.message[0] or ''
1417 msg = ph.message and ph.message[0] or ''
1419 if not self.ui.plain():
1418 if not self.ui.plain():
1420 width = util.termwidth() - len(pfx) - len(patchname) - 2
1419 width = util.termwidth() - len(pfx) - len(patchname) - 2
1421 if width > 0:
1420 if width > 0:
1422 msg = util.ellipsis(msg, width)
1421 msg = util.ellipsis(msg, width)
1423 else:
1422 else:
1424 msg = ''
1423 msg = ''
1425 self.ui.write(patchname, label='qseries.' + state)
1424 self.ui.write(patchname, label='qseries.' + state)
1426 self.ui.write(': ')
1425 self.ui.write(': ')
1427 self.ui.write(msg, label='qseries.message.' + state)
1426 self.ui.write(msg, label='qseries.message.' + state)
1428 else:
1427 else:
1429 self.ui.write(patchname, label='qseries.' + state)
1428 self.ui.write(patchname, label='qseries.' + state)
1430 self.ui.write('\n')
1429 self.ui.write('\n')
1431
1430
1432 applied = set([p.name for p in self.applied])
1431 applied = set([p.name for p in self.applied])
1433 if length is None:
1432 if length is None:
1434 length = len(self.series) - start
1433 length = len(self.series) - start
1435 if not missing:
1434 if not missing:
1436 if self.ui.verbose:
1435 if self.ui.verbose:
1437 idxwidth = len(str(start + length - 1))
1436 idxwidth = len(str(start + length - 1))
1438 for i in xrange(start, start + length):
1437 for i in xrange(start, start + length):
1439 patch = self.series[i]
1438 patch = self.series[i]
1440 if patch in applied:
1439 if patch in applied:
1441 char, state = 'A', 'applied'
1440 char, state = 'A', 'applied'
1442 elif self.pushable(i)[0]:
1441 elif self.pushable(i)[0]:
1443 char, state = 'U', 'unapplied'
1442 char, state = 'U', 'unapplied'
1444 else:
1443 else:
1445 char, state = 'G', 'guarded'
1444 char, state = 'G', 'guarded'
1446 pfx = ''
1445 pfx = ''
1447 if self.ui.verbose:
1446 if self.ui.verbose:
1448 pfx = '%*d %s ' % (idxwidth, i, char)
1447 pfx = '%*d %s ' % (idxwidth, i, char)
1449 elif status and status != char:
1448 elif status and status != char:
1450 continue
1449 continue
1451 displayname(pfx, patch, state)
1450 displayname(pfx, patch, state)
1452 else:
1451 else:
1453 msng_list = []
1452 msng_list = []
1454 for root, dirs, files in os.walk(self.path):
1453 for root, dirs, files in os.walk(self.path):
1455 d = root[len(self.path) + 1:]
1454 d = root[len(self.path) + 1:]
1456 for f in files:
1455 for f in files:
1457 fl = os.path.join(d, f)
1456 fl = os.path.join(d, f)
1458 if (fl not in self.series and
1457 if (fl not in self.series and
1459 fl not in (self.status_path, self.series_path,
1458 fl not in (self.status_path, self.series_path,
1460 self.guards_path)
1459 self.guards_path)
1461 and not fl.startswith('.')):
1460 and not fl.startswith('.')):
1462 msng_list.append(fl)
1461 msng_list.append(fl)
1463 for x in sorted(msng_list):
1462 for x in sorted(msng_list):
1464 pfx = self.ui.verbose and ('D ') or ''
1463 pfx = self.ui.verbose and ('D ') or ''
1465 displayname(pfx, x, 'missing')
1464 displayname(pfx, x, 'missing')
1466
1465
1467 def issaveline(self, l):
1466 def issaveline(self, l):
1468 if l.name == '.hg.patches.save.line':
1467 if l.name == '.hg.patches.save.line':
1469 return True
1468 return True
1470
1469
1471 def qrepo(self, create=False):
1470 def qrepo(self, create=False):
1472 if create or os.path.isdir(self.join(".hg")):
1471 if create or os.path.isdir(self.join(".hg")):
1473 return hg.repository(self.ui, path=self.path, create=create)
1472 return hg.repository(self.ui, path=self.path, create=create)
1474
1473
1475 def restore(self, repo, rev, delete=None, qupdate=None):
1474 def restore(self, repo, rev, delete=None, qupdate=None):
1476 desc = repo[rev].description().strip()
1475 desc = repo[rev].description().strip()
1477 lines = desc.splitlines()
1476 lines = desc.splitlines()
1478 i = 0
1477 i = 0
1479 datastart = None
1478 datastart = None
1480 series = []
1479 series = []
1481 applied = []
1480 applied = []
1482 qpp = None
1481 qpp = None
1483 for i, line in enumerate(lines):
1482 for i, line in enumerate(lines):
1484 if line == 'Patch Data:':
1483 if line == 'Patch Data:':
1485 datastart = i + 1
1484 datastart = i + 1
1486 elif line.startswith('Dirstate:'):
1485 elif line.startswith('Dirstate:'):
1487 l = line.rstrip()
1486 l = line.rstrip()
1488 l = l[10:].split(' ')
1487 l = l[10:].split(' ')
1489 qpp = [bin(x) for x in l]
1488 qpp = [bin(x) for x in l]
1490 elif datastart != None:
1489 elif datastart != None:
1491 l = line.rstrip()
1490 l = line.rstrip()
1492 n, name = l.split(':', 1)
1491 n, name = l.split(':', 1)
1493 if n:
1492 if n:
1494 applied.append(statusentry(bin(n), name))
1493 applied.append(statusentry(bin(n), name))
1495 else:
1494 else:
1496 series.append(l)
1495 series.append(l)
1497 if datastart is None:
1496 if datastart is None:
1498 self.ui.warn(_("No saved patch data found\n"))
1497 self.ui.warn(_("No saved patch data found\n"))
1499 return 1
1498 return 1
1500 self.ui.warn(_("restoring status: %s\n") % lines[0])
1499 self.ui.warn(_("restoring status: %s\n") % lines[0])
1501 self.full_series = series
1500 self.full_series = series
1502 self.applied = applied
1501 self.applied = applied
1503 self.parse_series()
1502 self.parse_series()
1504 self.series_dirty = 1
1503 self.series_dirty = 1
1505 self.applied_dirty = 1
1504 self.applied_dirty = 1
1506 heads = repo.changelog.heads()
1505 heads = repo.changelog.heads()
1507 if delete:
1506 if delete:
1508 if rev not in heads:
1507 if rev not in heads:
1509 self.ui.warn(_("save entry has children, leaving it alone\n"))
1508 self.ui.warn(_("save entry has children, leaving it alone\n"))
1510 else:
1509 else:
1511 self.ui.warn(_("removing save entry %s\n") % short(rev))
1510 self.ui.warn(_("removing save entry %s\n") % short(rev))
1512 pp = repo.dirstate.parents()
1511 pp = repo.dirstate.parents()
1513 if rev in pp:
1512 if rev in pp:
1514 update = True
1513 update = True
1515 else:
1514 else:
1516 update = False
1515 update = False
1517 self.strip(repo, rev, update=update, backup='strip')
1516 self.strip(repo, rev, update=update, backup='strip')
1518 if qpp:
1517 if qpp:
1519 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1518 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1520 (short(qpp[0]), short(qpp[1])))
1519 (short(qpp[0]), short(qpp[1])))
1521 if qupdate:
1520 if qupdate:
1522 self.ui.status(_("queue directory updating\n"))
1521 self.ui.status(_("queue directory updating\n"))
1523 r = self.qrepo()
1522 r = self.qrepo()
1524 if not r:
1523 if not r:
1525 self.ui.warn(_("Unable to load queue repository\n"))
1524 self.ui.warn(_("Unable to load queue repository\n"))
1526 return 1
1525 return 1
1527 hg.clean(r, qpp[0])
1526 hg.clean(r, qpp[0])
1528
1527
1529 def save(self, repo, msg=None):
1528 def save(self, repo, msg=None):
1530 if not self.applied:
1529 if not self.applied:
1531 self.ui.warn(_("save: no patches applied, exiting\n"))
1530 self.ui.warn(_("save: no patches applied, exiting\n"))
1532 return 1
1531 return 1
1533 if self.issaveline(self.applied[-1]):
1532 if self.issaveline(self.applied[-1]):
1534 self.ui.warn(_("status is already saved\n"))
1533 self.ui.warn(_("status is already saved\n"))
1535 return 1
1534 return 1
1536
1535
1537 if not msg:
1536 if not msg:
1538 msg = _("hg patches saved state")
1537 msg = _("hg patches saved state")
1539 else:
1538 else:
1540 msg = "hg patches: " + msg.rstrip('\r\n')
1539 msg = "hg patches: " + msg.rstrip('\r\n')
1541 r = self.qrepo()
1540 r = self.qrepo()
1542 if r:
1541 if r:
1543 pp = r.dirstate.parents()
1542 pp = r.dirstate.parents()
1544 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
1543 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
1545 msg += "\n\nPatch Data:\n"
1544 msg += "\n\nPatch Data:\n"
1546 msg += ''.join('%s\n' % x for x in self.applied)
1545 msg += ''.join('%s\n' % x for x in self.applied)
1547 msg += ''.join(':%s\n' % x for x in self.full_series)
1546 msg += ''.join(':%s\n' % x for x in self.full_series)
1548 n = repo.commit(msg, force=True)
1547 n = repo.commit(msg, force=True)
1549 if not n:
1548 if not n:
1550 self.ui.warn(_("repo commit failed\n"))
1549 self.ui.warn(_("repo commit failed\n"))
1551 return 1
1550 return 1
1552 self.applied.append(statusentry(n, '.hg.patches.save.line'))
1551 self.applied.append(statusentry(n, '.hg.patches.save.line'))
1553 self.applied_dirty = 1
1552 self.applied_dirty = 1
1554 self.removeundo(repo)
1553 self.removeundo(repo)
1555
1554
1556 def full_series_end(self):
1555 def full_series_end(self):
1557 if self.applied:
1556 if self.applied:
1558 p = self.applied[-1].name
1557 p = self.applied[-1].name
1559 end = self.find_series(p)
1558 end = self.find_series(p)
1560 if end is None:
1559 if end is None:
1561 return len(self.full_series)
1560 return len(self.full_series)
1562 return end + 1
1561 return end + 1
1563 return 0
1562 return 0
1564
1563
1565 def series_end(self, all_patches=False):
1564 def series_end(self, all_patches=False):
1566 """If all_patches is False, return the index of the next pushable patch
1565 """If all_patches is False, return the index of the next pushable patch
1567 in the series, or the series length. If all_patches is True, return the
1566 in the series, or the series length. If all_patches is True, return the
1568 index of the first patch past the last applied one.
1567 index of the first patch past the last applied one.
1569 """
1568 """
1570 end = 0
1569 end = 0
1571 def next(start):
1570 def next(start):
1572 if all_patches or start >= len(self.series):
1571 if all_patches or start >= len(self.series):
1573 return start
1572 return start
1574 for i in xrange(start, len(self.series)):
1573 for i in xrange(start, len(self.series)):
1575 p, reason = self.pushable(i)
1574 p, reason = self.pushable(i)
1576 if p:
1575 if p:
1577 break
1576 break
1578 self.explain_pushable(i)
1577 self.explain_pushable(i)
1579 return i
1578 return i
1580 if self.applied:
1579 if self.applied:
1581 p = self.applied[-1].name
1580 p = self.applied[-1].name
1582 try:
1581 try:
1583 end = self.series.index(p)
1582 end = self.series.index(p)
1584 except ValueError:
1583 except ValueError:
1585 return 0
1584 return 0
1586 return next(end + 1)
1585 return next(end + 1)
1587 return next(end)
1586 return next(end)
1588
1587
1589 def appliedname(self, index):
1588 def appliedname(self, index):
1590 pname = self.applied[index].name
1589 pname = self.applied[index].name
1591 if not self.ui.verbose:
1590 if not self.ui.verbose:
1592 p = pname
1591 p = pname
1593 else:
1592 else:
1594 p = str(self.series.index(pname)) + " " + pname
1593 p = str(self.series.index(pname)) + " " + pname
1595 return p
1594 return p
1596
1595
1597 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1596 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1598 force=None, git=False):
1597 force=None, git=False):
1599 def checkseries(patchname):
1598 def checkseries(patchname):
1600 if patchname in self.series:
1599 if patchname in self.series:
1601 raise util.Abort(_('patch %s is already in the series file')
1600 raise util.Abort(_('patch %s is already in the series file')
1602 % patchname)
1601 % patchname)
1603 def checkfile(patchname):
1602 def checkfile(patchname):
1604 if not force and os.path.exists(self.join(patchname)):
1603 if not force and os.path.exists(self.join(patchname)):
1605 raise util.Abort(_('patch "%s" already exists')
1604 raise util.Abort(_('patch "%s" already exists')
1606 % patchname)
1605 % patchname)
1607
1606
1608 if rev:
1607 if rev:
1609 if files:
1608 if files:
1610 raise util.Abort(_('option "-r" not valid when importing '
1609 raise util.Abort(_('option "-r" not valid when importing '
1611 'files'))
1610 'files'))
1612 rev = cmdutil.revrange(repo, rev)
1611 rev = cmdutil.revrange(repo, rev)
1613 rev.sort(reverse=True)
1612 rev.sort(reverse=True)
1614 if (len(files) > 1 or len(rev) > 1) and patchname:
1613 if (len(files) > 1 or len(rev) > 1) and patchname:
1615 raise util.Abort(_('option "-n" not valid when importing multiple '
1614 raise util.Abort(_('option "-n" not valid when importing multiple '
1616 'patches'))
1615 'patches'))
1617 added = []
1616 added = []
1618 if rev:
1617 if rev:
1619 # If mq patches are applied, we can only import revisions
1618 # If mq patches are applied, we can only import revisions
1620 # that form a linear path to qbase.
1619 # that form a linear path to qbase.
1621 # Otherwise, they should form a linear path to a head.
1620 # Otherwise, they should form a linear path to a head.
1622 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1621 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1623 if len(heads) > 1:
1622 if len(heads) > 1:
1624 raise util.Abort(_('revision %d is the root of more than one '
1623 raise util.Abort(_('revision %d is the root of more than one '
1625 'branch') % rev[-1])
1624 'branch') % rev[-1])
1626 if self.applied:
1625 if self.applied:
1627 base = repo.changelog.node(rev[0])
1626 base = repo.changelog.node(rev[0])
1628 if base in [n.node for n in self.applied]:
1627 if base in [n.node for n in self.applied]:
1629 raise util.Abort(_('revision %d is already managed')
1628 raise util.Abort(_('revision %d is already managed')
1630 % rev[0])
1629 % rev[0])
1631 if heads != [self.applied[-1].node]:
1630 if heads != [self.applied[-1].node]:
1632 raise util.Abort(_('revision %d is not the parent of '
1631 raise util.Abort(_('revision %d is not the parent of '
1633 'the queue') % rev[0])
1632 'the queue') % rev[0])
1634 base = repo.changelog.rev(self.applied[0].node)
1633 base = repo.changelog.rev(self.applied[0].node)
1635 lastparent = repo.changelog.parentrevs(base)[0]
1634 lastparent = repo.changelog.parentrevs(base)[0]
1636 else:
1635 else:
1637 if heads != [repo.changelog.node(rev[0])]:
1636 if heads != [repo.changelog.node(rev[0])]:
1638 raise util.Abort(_('revision %d has unmanaged children')
1637 raise util.Abort(_('revision %d has unmanaged children')
1639 % rev[0])
1638 % rev[0])
1640 lastparent = None
1639 lastparent = None
1641
1640
1642 diffopts = self.diffopts({'git': git})
1641 diffopts = self.diffopts({'git': git})
1643 for r in rev:
1642 for r in rev:
1644 p1, p2 = repo.changelog.parentrevs(r)
1643 p1, p2 = repo.changelog.parentrevs(r)
1645 n = repo.changelog.node(r)
1644 n = repo.changelog.node(r)
1646 if p2 != nullrev:
1645 if p2 != nullrev:
1647 raise util.Abort(_('cannot import merge revision %d') % r)
1646 raise util.Abort(_('cannot import merge revision %d') % r)
1648 if lastparent and lastparent != r:
1647 if lastparent and lastparent != r:
1649 raise util.Abort(_('revision %d is not the parent of %d')
1648 raise util.Abort(_('revision %d is not the parent of %d')
1650 % (r, lastparent))
1649 % (r, lastparent))
1651 lastparent = p1
1650 lastparent = p1
1652
1651
1653 if not patchname:
1652 if not patchname:
1654 patchname = normname('%d.diff' % r)
1653 patchname = normname('%d.diff' % r)
1655 self.check_reserved_name(patchname)
1654 self.check_reserved_name(patchname)
1656 checkseries(patchname)
1655 checkseries(patchname)
1657 checkfile(patchname)
1656 checkfile(patchname)
1658 self.full_series.insert(0, patchname)
1657 self.full_series.insert(0, patchname)
1659
1658
1660 patchf = self.opener(patchname, "w")
1659 patchf = self.opener(patchname, "w")
1661 cmdutil.export(repo, [n], fp=patchf, opts=diffopts)
1660 cmdutil.export(repo, [n], fp=patchf, opts=diffopts)
1662 patchf.close()
1661 patchf.close()
1663
1662
1664 se = statusentry(n, patchname)
1663 se = statusentry(n, patchname)
1665 self.applied.insert(0, se)
1664 self.applied.insert(0, se)
1666
1665
1667 added.append(patchname)
1666 added.append(patchname)
1668 patchname = None
1667 patchname = None
1669 self.parse_series()
1668 self.parse_series()
1670 self.applied_dirty = 1
1669 self.applied_dirty = 1
1671
1670
1672 for i, filename in enumerate(files):
1671 for i, filename in enumerate(files):
1673 if existing:
1672 if existing:
1674 if filename == '-':
1673 if filename == '-':
1675 raise util.Abort(_('-e is incompatible with import from -'))
1674 raise util.Abort(_('-e is incompatible with import from -'))
1676 if not patchname:
1675 if not patchname:
1677 patchname = normname(filename)
1676 patchname = normname(filename)
1678 self.check_reserved_name(patchname)
1677 self.check_reserved_name(patchname)
1679 if not os.path.isfile(self.join(patchname)):
1678 if not os.path.isfile(self.join(patchname)):
1680 raise util.Abort(_("patch %s does not exist") % patchname)
1679 raise util.Abort(_("patch %s does not exist") % patchname)
1681 else:
1680 else:
1682 try:
1681 try:
1683 if filename == '-':
1682 if filename == '-':
1684 if not patchname:
1683 if not patchname:
1685 raise util.Abort(
1684 raise util.Abort(
1686 _('need --name to import a patch from -'))
1685 _('need --name to import a patch from -'))
1687 text = sys.stdin.read()
1686 text = sys.stdin.read()
1688 else:
1687 else:
1689 text = url.open(self.ui, filename).read()
1688 text = url.open(self.ui, filename).read()
1690 except (OSError, IOError):
1689 except (OSError, IOError):
1691 raise util.Abort(_("unable to read %s") % filename)
1690 raise util.Abort(_("unable to read %s") % filename)
1692 if not patchname:
1691 if not patchname:
1693 patchname = normname(os.path.basename(filename))
1692 patchname = normname(os.path.basename(filename))
1694 self.check_reserved_name(patchname)
1693 self.check_reserved_name(patchname)
1695 checkfile(patchname)
1694 checkfile(patchname)
1696 patchf = self.opener(patchname, "w")
1695 patchf = self.opener(patchname, "w")
1697 patchf.write(text)
1696 patchf.write(text)
1698 if not force:
1697 if not force:
1699 checkseries(patchname)
1698 checkseries(patchname)
1700 if patchname not in self.series:
1699 if patchname not in self.series:
1701 index = self.full_series_end() + i
1700 index = self.full_series_end() + i
1702 self.full_series[index:index] = [patchname]
1701 self.full_series[index:index] = [patchname]
1703 self.parse_series()
1702 self.parse_series()
1704 self.ui.warn(_("adding %s to series file\n") % patchname)
1703 self.ui.warn(_("adding %s to series file\n") % patchname)
1705 added.append(patchname)
1704 added.append(patchname)
1706 patchname = None
1705 patchname = None
1707 self.series_dirty = 1
1706 self.series_dirty = 1
1708 qrepo = self.qrepo()
1707 qrepo = self.qrepo()
1709 if qrepo:
1708 if qrepo:
1710 qrepo.add(added)
1709 qrepo.add(added)
1711
1710
1712 def delete(ui, repo, *patches, **opts):
1711 def delete(ui, repo, *patches, **opts):
1713 """remove patches from queue
1712 """remove patches from queue
1714
1713
1715 The patches must not be applied, and at least one patch is required. With
1714 The patches must not be applied, and at least one patch is required. With
1716 -k/--keep, the patch files are preserved in the patch directory.
1715 -k/--keep, the patch files are preserved in the patch directory.
1717
1716
1718 To stop managing a patch and move it into permanent history,
1717 To stop managing a patch and move it into permanent history,
1719 use the qfinish command."""
1718 use the qfinish command."""
1720 q = repo.mq
1719 q = repo.mq
1721 q.delete(repo, patches, opts)
1720 q.delete(repo, patches, opts)
1722 q.save_dirty()
1721 q.save_dirty()
1723 return 0
1722 return 0
1724
1723
1725 def applied(ui, repo, patch=None, **opts):
1724 def applied(ui, repo, patch=None, **opts):
1726 """print the patches already applied"""
1725 """print the patches already applied"""
1727
1726
1728 q = repo.mq
1727 q = repo.mq
1729 l = len(q.applied)
1728 l = len(q.applied)
1730
1729
1731 if patch:
1730 if patch:
1732 if patch not in q.series:
1731 if patch not in q.series:
1733 raise util.Abort(_("patch %s is not in series file") % patch)
1732 raise util.Abort(_("patch %s is not in series file") % patch)
1734 end = q.series.index(patch) + 1
1733 end = q.series.index(patch) + 1
1735 else:
1734 else:
1736 end = q.series_end(True)
1735 end = q.series_end(True)
1737
1736
1738 if opts.get('last') and not end:
1737 if opts.get('last') and not end:
1739 ui.write(_("no patches applied\n"))
1738 ui.write(_("no patches applied\n"))
1740 return 1
1739 return 1
1741 elif opts.get('last') and end == 1:
1740 elif opts.get('last') and end == 1:
1742 ui.write(_("only one patch applied\n"))
1741 ui.write(_("only one patch applied\n"))
1743 return 1
1742 return 1
1744 elif opts.get('last'):
1743 elif opts.get('last'):
1745 start = end - 2
1744 start = end - 2
1746 end = 1
1745 end = 1
1747 else:
1746 else:
1748 start = 0
1747 start = 0
1749
1748
1750 return q.qseries(repo, length=end, start=start, status='A',
1749 return q.qseries(repo, length=end, start=start, status='A',
1751 summary=opts.get('summary'))
1750 summary=opts.get('summary'))
1752
1751
1753 def unapplied(ui, repo, patch=None, **opts):
1752 def unapplied(ui, repo, patch=None, **opts):
1754 """print the patches not yet applied"""
1753 """print the patches not yet applied"""
1755
1754
1756 q = repo.mq
1755 q = repo.mq
1757 if patch:
1756 if patch:
1758 if patch not in q.series:
1757 if patch not in q.series:
1759 raise util.Abort(_("patch %s is not in series file") % patch)
1758 raise util.Abort(_("patch %s is not in series file") % patch)
1760 start = q.series.index(patch) + 1
1759 start = q.series.index(patch) + 1
1761 else:
1760 else:
1762 start = q.series_end(True)
1761 start = q.series_end(True)
1763
1762
1764 if start == len(q.series) and opts.get('first'):
1763 if start == len(q.series) and opts.get('first'):
1765 ui.write(_("all patches applied\n"))
1764 ui.write(_("all patches applied\n"))
1766 return 1
1765 return 1
1767
1766
1768 length = opts.get('first') and 1 or None
1767 length = opts.get('first') and 1 or None
1769 return q.qseries(repo, start=start, length=length, status='U',
1768 return q.qseries(repo, start=start, length=length, status='U',
1770 summary=opts.get('summary'))
1769 summary=opts.get('summary'))
1771
1770
1772 def qimport(ui, repo, *filename, **opts):
1771 def qimport(ui, repo, *filename, **opts):
1773 """import a patch
1772 """import a patch
1774
1773
1775 The patch is inserted into the series after the last applied
1774 The patch is inserted into the series after the last applied
1776 patch. If no patches have been applied, qimport prepends the patch
1775 patch. If no patches have been applied, qimport prepends the patch
1777 to the series.
1776 to the series.
1778
1777
1779 The patch will have the same name as its source file unless you
1778 The patch will have the same name as its source file unless you
1780 give it a new one with -n/--name.
1779 give it a new one with -n/--name.
1781
1780
1782 You can register an existing patch inside the patch directory with
1781 You can register an existing patch inside the patch directory with
1783 the -e/--existing flag.
1782 the -e/--existing flag.
1784
1783
1785 With -f/--force, an existing patch of the same name will be
1784 With -f/--force, an existing patch of the same name will be
1786 overwritten.
1785 overwritten.
1787
1786
1788 An existing changeset may be placed under mq control with -r/--rev
1787 An existing changeset may be placed under mq control with -r/--rev
1789 (e.g. qimport --rev tip -n patch will place tip under mq control).
1788 (e.g. qimport --rev tip -n patch will place tip under mq control).
1790 With -g/--git, patches imported with --rev will use the git diff
1789 With -g/--git, patches imported with --rev will use the git diff
1791 format. See the diffs help topic for information on why this is
1790 format. See the diffs help topic for information on why this is
1792 important for preserving rename/copy information and permission
1791 important for preserving rename/copy information and permission
1793 changes.
1792 changes.
1794
1793
1795 To import a patch from standard input, pass - as the patch file.
1794 To import a patch from standard input, pass - as the patch file.
1796 When importing from standard input, a patch name must be specified
1795 When importing from standard input, a patch name must be specified
1797 using the --name flag.
1796 using the --name flag.
1798 """
1797 """
1799 q = repo.mq
1798 q = repo.mq
1800 q.qimport(repo, filename, patchname=opts['name'],
1799 q.qimport(repo, filename, patchname=opts['name'],
1801 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1800 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1802 git=opts['git'])
1801 git=opts['git'])
1803 q.save_dirty()
1802 q.save_dirty()
1804
1803
1805 if opts.get('push') and not opts.get('rev'):
1804 if opts.get('push') and not opts.get('rev'):
1806 return q.push(repo, None)
1805 return q.push(repo, None)
1807 return 0
1806 return 0
1808
1807
1809 def qinit(ui, repo, create):
1808 def qinit(ui, repo, create):
1810 """initialize a new queue repository
1809 """initialize a new queue repository
1811
1810
1812 This command also creates a series file for ordering patches, and
1811 This command also creates a series file for ordering patches, and
1813 an mq-specific .hgignore file in the queue repository, to exclude
1812 an mq-specific .hgignore file in the queue repository, to exclude
1814 the status and guards files (these contain mostly transient state)."""
1813 the status and guards files (these contain mostly transient state)."""
1815 q = repo.mq
1814 q = repo.mq
1816 r = q.init(repo, create)
1815 r = q.init(repo, create)
1817 q.save_dirty()
1816 q.save_dirty()
1818 if r:
1817 if r:
1819 if not os.path.exists(r.wjoin('.hgignore')):
1818 if not os.path.exists(r.wjoin('.hgignore')):
1820 fp = r.wopener('.hgignore', 'w')
1819 fp = r.wopener('.hgignore', 'w')
1821 fp.write('^\\.hg\n')
1820 fp.write('^\\.hg\n')
1822 fp.write('^\\.mq\n')
1821 fp.write('^\\.mq\n')
1823 fp.write('syntax: glob\n')
1822 fp.write('syntax: glob\n')
1824 fp.write('status\n')
1823 fp.write('status\n')
1825 fp.write('guards\n')
1824 fp.write('guards\n')
1826 fp.close()
1825 fp.close()
1827 if not os.path.exists(r.wjoin('series')):
1826 if not os.path.exists(r.wjoin('series')):
1828 r.wopener('series', 'w').close()
1827 r.wopener('series', 'w').close()
1829 r.add(['.hgignore', 'series'])
1828 r.add(['.hgignore', 'series'])
1830 commands.add(ui, r)
1829 commands.add(ui, r)
1831 return 0
1830 return 0
1832
1831
1833 def init(ui, repo, **opts):
1832 def init(ui, repo, **opts):
1834 """init a new queue repository (DEPRECATED)
1833 """init a new queue repository (DEPRECATED)
1835
1834
1836 The queue repository is unversioned by default. If
1835 The queue repository is unversioned by default. If
1837 -c/--create-repo is specified, qinit will create a separate nested
1836 -c/--create-repo is specified, qinit will create a separate nested
1838 repository for patches (qinit -c may also be run later to convert
1837 repository for patches (qinit -c may also be run later to convert
1839 an unversioned patch repository into a versioned one). You can use
1838 an unversioned patch repository into a versioned one). You can use
1840 qcommit to commit changes to this queue repository.
1839 qcommit to commit changes to this queue repository.
1841
1840
1842 This command is deprecated. Without -c, it's implied by other relevant
1841 This command is deprecated. Without -c, it's implied by other relevant
1843 commands. With -c, use :hg:`init --mq` instead."""
1842 commands. With -c, use :hg:`init --mq` instead."""
1844 return qinit(ui, repo, create=opts['create_repo'])
1843 return qinit(ui, repo, create=opts['create_repo'])
1845
1844
1846 def clone(ui, source, dest=None, **opts):
1845 def clone(ui, source, dest=None, **opts):
1847 '''clone main and patch repository at same time
1846 '''clone main and patch repository at same time
1848
1847
1849 If source is local, destination will have no patches applied. If
1848 If source is local, destination will have no patches applied. If
1850 source is remote, this command can not check if patches are
1849 source is remote, this command can not check if patches are
1851 applied in source, so cannot guarantee that patches are not
1850 applied in source, so cannot guarantee that patches are not
1852 applied in destination. If you clone remote repository, be sure
1851 applied in destination. If you clone remote repository, be sure
1853 before that it has no patches applied.
1852 before that it has no patches applied.
1854
1853
1855 Source patch repository is looked for in <src>/.hg/patches by
1854 Source patch repository is looked for in <src>/.hg/patches by
1856 default. Use -p <url> to change.
1855 default. Use -p <url> to change.
1857
1856
1858 The patch directory must be a nested Mercurial repository, as
1857 The patch directory must be a nested Mercurial repository, as
1859 would be created by :hg:`init --mq`.
1858 would be created by :hg:`init --mq`.
1860 '''
1859 '''
1861 def patchdir(repo):
1860 def patchdir(repo):
1862 url = repo.url()
1861 url = repo.url()
1863 if url.endswith('/'):
1862 if url.endswith('/'):
1864 url = url[:-1]
1863 url = url[:-1]
1865 return url + '/.hg/patches'
1864 return url + '/.hg/patches'
1866 if dest is None:
1865 if dest is None:
1867 dest = hg.defaultdest(source)
1866 dest = hg.defaultdest(source)
1868 sr = hg.repository(cmdutil.remoteui(ui, opts), ui.expandpath(source))
1867 sr = hg.repository(cmdutil.remoteui(ui, opts), ui.expandpath(source))
1869 if opts['patches']:
1868 if opts['patches']:
1870 patchespath = ui.expandpath(opts['patches'])
1869 patchespath = ui.expandpath(opts['patches'])
1871 else:
1870 else:
1872 patchespath = patchdir(sr)
1871 patchespath = patchdir(sr)
1873 try:
1872 try:
1874 hg.repository(ui, patchespath)
1873 hg.repository(ui, patchespath)
1875 except error.RepoError:
1874 except error.RepoError:
1876 raise util.Abort(_('versioned patch repository not found'
1875 raise util.Abort(_('versioned patch repository not found'
1877 ' (see init --mq)'))
1876 ' (see init --mq)'))
1878 qbase, destrev = None, None
1877 qbase, destrev = None, None
1879 if sr.local():
1878 if sr.local():
1880 if sr.mq.applied:
1879 if sr.mq.applied:
1881 qbase = sr.mq.applied[0].node
1880 qbase = sr.mq.applied[0].node
1882 if not hg.islocal(dest):
1881 if not hg.islocal(dest):
1883 heads = set(sr.heads())
1882 heads = set(sr.heads())
1884 destrev = list(heads.difference(sr.heads(qbase)))
1883 destrev = list(heads.difference(sr.heads(qbase)))
1885 destrev.append(sr.changelog.parents(qbase)[0])
1884 destrev.append(sr.changelog.parents(qbase)[0])
1886 elif sr.capable('lookup'):
1885 elif sr.capable('lookup'):
1887 try:
1886 try:
1888 qbase = sr.lookup('qbase')
1887 qbase = sr.lookup('qbase')
1889 except error.RepoError:
1888 except error.RepoError:
1890 pass
1889 pass
1891 ui.note(_('cloning main repository\n'))
1890 ui.note(_('cloning main repository\n'))
1892 sr, dr = hg.clone(ui, sr.url(), dest,
1891 sr, dr = hg.clone(ui, sr.url(), dest,
1893 pull=opts['pull'],
1892 pull=opts['pull'],
1894 rev=destrev,
1893 rev=destrev,
1895 update=False,
1894 update=False,
1896 stream=opts['uncompressed'])
1895 stream=opts['uncompressed'])
1897 ui.note(_('cloning patch repository\n'))
1896 ui.note(_('cloning patch repository\n'))
1898 hg.clone(ui, opts['patches'] or patchdir(sr), patchdir(dr),
1897 hg.clone(ui, opts['patches'] or patchdir(sr), patchdir(dr),
1899 pull=opts['pull'], update=not opts['noupdate'],
1898 pull=opts['pull'], update=not opts['noupdate'],
1900 stream=opts['uncompressed'])
1899 stream=opts['uncompressed'])
1901 if dr.local():
1900 if dr.local():
1902 if qbase:
1901 if qbase:
1903 ui.note(_('stripping applied patches from destination '
1902 ui.note(_('stripping applied patches from destination '
1904 'repository\n'))
1903 'repository\n'))
1905 dr.mq.strip(dr, qbase, update=False, backup=None)
1904 dr.mq.strip(dr, qbase, update=False, backup=None)
1906 if not opts['noupdate']:
1905 if not opts['noupdate']:
1907 ui.note(_('updating destination repository\n'))
1906 ui.note(_('updating destination repository\n'))
1908 hg.update(dr, dr.changelog.tip())
1907 hg.update(dr, dr.changelog.tip())
1909
1908
1910 def commit(ui, repo, *pats, **opts):
1909 def commit(ui, repo, *pats, **opts):
1911 """commit changes in the queue repository (DEPRECATED)
1910 """commit changes in the queue repository (DEPRECATED)
1912
1911
1913 This command is deprecated; use :hg:`commit --mq` instead."""
1912 This command is deprecated; use :hg:`commit --mq` instead."""
1914 q = repo.mq
1913 q = repo.mq
1915 r = q.qrepo()
1914 r = q.qrepo()
1916 if not r:
1915 if not r:
1917 raise util.Abort('no queue repository')
1916 raise util.Abort('no queue repository')
1918 commands.commit(r.ui, r, *pats, **opts)
1917 commands.commit(r.ui, r, *pats, **opts)
1919
1918
1920 def series(ui, repo, **opts):
1919 def series(ui, repo, **opts):
1921 """print the entire series file"""
1920 """print the entire series file"""
1922 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1921 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1923 return 0
1922 return 0
1924
1923
1925 def top(ui, repo, **opts):
1924 def top(ui, repo, **opts):
1926 """print the name of the current patch"""
1925 """print the name of the current patch"""
1927 q = repo.mq
1926 q = repo.mq
1928 t = q.applied and q.series_end(True) or 0
1927 t = q.applied and q.series_end(True) or 0
1929 if t:
1928 if t:
1930 return q.qseries(repo, start=t - 1, length=1, status='A',
1929 return q.qseries(repo, start=t - 1, length=1, status='A',
1931 summary=opts.get('summary'))
1930 summary=opts.get('summary'))
1932 else:
1931 else:
1933 ui.write(_("no patches applied\n"))
1932 ui.write(_("no patches applied\n"))
1934 return 1
1933 return 1
1935
1934
1936 def next(ui, repo, **opts):
1935 def next(ui, repo, **opts):
1937 """print the name of the next patch"""
1936 """print the name of the next patch"""
1938 q = repo.mq
1937 q = repo.mq
1939 end = q.series_end()
1938 end = q.series_end()
1940 if end == len(q.series):
1939 if end == len(q.series):
1941 ui.write(_("all patches applied\n"))
1940 ui.write(_("all patches applied\n"))
1942 return 1
1941 return 1
1943 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1942 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1944
1943
1945 def prev(ui, repo, **opts):
1944 def prev(ui, repo, **opts):
1946 """print the name of the previous patch"""
1945 """print the name of the previous patch"""
1947 q = repo.mq
1946 q = repo.mq
1948 l = len(q.applied)
1947 l = len(q.applied)
1949 if l == 1:
1948 if l == 1:
1950 ui.write(_("only one patch applied\n"))
1949 ui.write(_("only one patch applied\n"))
1951 return 1
1950 return 1
1952 if not l:
1951 if not l:
1953 ui.write(_("no patches applied\n"))
1952 ui.write(_("no patches applied\n"))
1954 return 1
1953 return 1
1955 return q.qseries(repo, start=l - 2, length=1, status='A',
1954 return q.qseries(repo, start=l - 2, length=1, status='A',
1956 summary=opts.get('summary'))
1955 summary=opts.get('summary'))
1957
1956
1958 def setupheaderopts(ui, opts):
1957 def setupheaderopts(ui, opts):
1959 if not opts.get('user') and opts.get('currentuser'):
1958 if not opts.get('user') and opts.get('currentuser'):
1960 opts['user'] = ui.username()
1959 opts['user'] = ui.username()
1961 if not opts.get('date') and opts.get('currentdate'):
1960 if not opts.get('date') and opts.get('currentdate'):
1962 opts['date'] = "%d %d" % util.makedate()
1961 opts['date'] = "%d %d" % util.makedate()
1963
1962
1964 def new(ui, repo, patch, *args, **opts):
1963 def new(ui, repo, patch, *args, **opts):
1965 """create a new patch
1964 """create a new patch
1966
1965
1967 qnew creates a new patch on top of the currently-applied patch (if
1966 qnew creates a new patch on top of the currently-applied patch (if
1968 any). The patch will be initialized with any outstanding changes
1967 any). The patch will be initialized with any outstanding changes
1969 in the working directory. You may also use -I/--include,
1968 in the working directory. You may also use -I/--include,
1970 -X/--exclude, and/or a list of files after the patch name to add
1969 -X/--exclude, and/or a list of files after the patch name to add
1971 only changes to matching files to the new patch, leaving the rest
1970 only changes to matching files to the new patch, leaving the rest
1972 as uncommitted modifications.
1971 as uncommitted modifications.
1973
1972
1974 -u/--user and -d/--date can be used to set the (given) user and
1973 -u/--user and -d/--date can be used to set the (given) user and
1975 date, respectively. -U/--currentuser and -D/--currentdate set user
1974 date, respectively. -U/--currentuser and -D/--currentdate set user
1976 to current user and date to current date.
1975 to current user and date to current date.
1977
1976
1978 -e/--edit, -m/--message or -l/--logfile set the patch header as
1977 -e/--edit, -m/--message or -l/--logfile set the patch header as
1979 well as the commit message. If none is specified, the header is
1978 well as the commit message. If none is specified, the header is
1980 empty and the commit message is '[mq]: PATCH'.
1979 empty and the commit message is '[mq]: PATCH'.
1981
1980
1982 Use the -g/--git option to keep the patch in the git extended diff
1981 Use the -g/--git option to keep the patch in the git extended diff
1983 format. Read the diffs help topic for more information on why this
1982 format. Read the diffs help topic for more information on why this
1984 is important for preserving permission changes and copy/rename
1983 is important for preserving permission changes and copy/rename
1985 information.
1984 information.
1986 """
1985 """
1987 msg = cmdutil.logmessage(opts)
1986 msg = cmdutil.logmessage(opts)
1988 def getmsg():
1987 def getmsg():
1989 return ui.edit(msg, ui.username())
1988 return ui.edit(msg, ui.username())
1990 q = repo.mq
1989 q = repo.mq
1991 opts['msg'] = msg
1990 opts['msg'] = msg
1992 if opts.get('edit'):
1991 if opts.get('edit'):
1993 opts['msg'] = getmsg
1992 opts['msg'] = getmsg
1994 else:
1993 else:
1995 opts['msg'] = msg
1994 opts['msg'] = msg
1996 setupheaderopts(ui, opts)
1995 setupheaderopts(ui, opts)
1997 q.new(repo, patch, *args, **opts)
1996 q.new(repo, patch, *args, **opts)
1998 q.save_dirty()
1997 q.save_dirty()
1999 return 0
1998 return 0
2000
1999
2001 def refresh(ui, repo, *pats, **opts):
2000 def refresh(ui, repo, *pats, **opts):
2002 """update the current patch
2001 """update the current patch
2003
2002
2004 If any file patterns are provided, the refreshed patch will
2003 If any file patterns are provided, the refreshed patch will
2005 contain only the modifications that match those patterns; the
2004 contain only the modifications that match those patterns; the
2006 remaining modifications will remain in the working directory.
2005 remaining modifications will remain in the working directory.
2007
2006
2008 If -s/--short is specified, files currently included in the patch
2007 If -s/--short is specified, files currently included in the patch
2009 will be refreshed just like matched files and remain in the patch.
2008 will be refreshed just like matched files and remain in the patch.
2010
2009
2011 hg add/remove/copy/rename work as usual, though you might want to
2010 hg add/remove/copy/rename work as usual, though you might want to
2012 use git-style patches (-g/--git or [diff] git=1) to track copies
2011 use git-style patches (-g/--git or [diff] git=1) to track copies
2013 and renames. See the diffs help topic for more information on the
2012 and renames. See the diffs help topic for more information on the
2014 git diff format.
2013 git diff format.
2015 """
2014 """
2016 q = repo.mq
2015 q = repo.mq
2017 message = cmdutil.logmessage(opts)
2016 message = cmdutil.logmessage(opts)
2018 if opts['edit']:
2017 if opts['edit']:
2019 if not q.applied:
2018 if not q.applied:
2020 ui.write(_("no patches applied\n"))
2019 ui.write(_("no patches applied\n"))
2021 return 1
2020 return 1
2022 if message:
2021 if message:
2023 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2022 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2024 patch = q.applied[-1].name
2023 patch = q.applied[-1].name
2025 ph = patchheader(q.join(patch), q.plainmode)
2024 ph = patchheader(q.join(patch), q.plainmode)
2026 message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
2025 message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
2027 setupheaderopts(ui, opts)
2026 setupheaderopts(ui, opts)
2028 ret = q.refresh(repo, pats, msg=message, **opts)
2027 ret = q.refresh(repo, pats, msg=message, **opts)
2029 q.save_dirty()
2028 q.save_dirty()
2030 return ret
2029 return ret
2031
2030
2032 def diff(ui, repo, *pats, **opts):
2031 def diff(ui, repo, *pats, **opts):
2033 """diff of the current patch and subsequent modifications
2032 """diff of the current patch and subsequent modifications
2034
2033
2035 Shows a diff which includes the current patch as well as any
2034 Shows a diff which includes the current patch as well as any
2036 changes which have been made in the working directory since the
2035 changes which have been made in the working directory since the
2037 last refresh (thus showing what the current patch would become
2036 last refresh (thus showing what the current patch would become
2038 after a qrefresh).
2037 after a qrefresh).
2039
2038
2040 Use :hg:`diff` if you only want to see the changes made since the
2039 Use :hg:`diff` if you only want to see the changes made since the
2041 last qrefresh, or :hg:`export qtip` if you want to see changes
2040 last qrefresh, or :hg:`export qtip` if you want to see changes
2042 made by the current patch without including changes made since the
2041 made by the current patch without including changes made since the
2043 qrefresh.
2042 qrefresh.
2044 """
2043 """
2045 repo.mq.diff(repo, pats, opts)
2044 repo.mq.diff(repo, pats, opts)
2046 return 0
2045 return 0
2047
2046
2048 def fold(ui, repo, *files, **opts):
2047 def fold(ui, repo, *files, **opts):
2049 """fold the named patches into the current patch
2048 """fold the named patches into the current patch
2050
2049
2051 Patches must not yet be applied. Each patch will be successively
2050 Patches must not yet be applied. Each patch will be successively
2052 applied to the current patch in the order given. If all the
2051 applied to the current patch in the order given. If all the
2053 patches apply successfully, the current patch will be refreshed
2052 patches apply successfully, the current patch will be refreshed
2054 with the new cumulative patch, and the folded patches will be
2053 with the new cumulative patch, and the folded patches will be
2055 deleted. With -k/--keep, the folded patch files will not be
2054 deleted. With -k/--keep, the folded patch files will not be
2056 removed afterwards.
2055 removed afterwards.
2057
2056
2058 The header for each folded patch will be concatenated with the
2057 The header for each folded patch will be concatenated with the
2059 current patch header, separated by a line of '* * *'."""
2058 current patch header, separated by a line of '* * *'."""
2060
2059
2061 q = repo.mq
2060 q = repo.mq
2062
2061
2063 if not files:
2062 if not files:
2064 raise util.Abort(_('qfold requires at least one patch name'))
2063 raise util.Abort(_('qfold requires at least one patch name'))
2065 if not q.check_toppatch(repo)[0]:
2064 if not q.check_toppatch(repo)[0]:
2066 raise util.Abort(_('No patches applied'))
2065 raise util.Abort(_('No patches applied'))
2067 q.check_localchanges(repo)
2066 q.check_localchanges(repo)
2068
2067
2069 message = cmdutil.logmessage(opts)
2068 message = cmdutil.logmessage(opts)
2070 if opts['edit']:
2069 if opts['edit']:
2071 if message:
2070 if message:
2072 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2071 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2073
2072
2074 parent = q.lookup('qtip')
2073 parent = q.lookup('qtip')
2075 patches = []
2074 patches = []
2076 messages = []
2075 messages = []
2077 for f in files:
2076 for f in files:
2078 p = q.lookup(f)
2077 p = q.lookup(f)
2079 if p in patches or p == parent:
2078 if p in patches or p == parent:
2080 ui.warn(_('Skipping already folded patch %s') % p)
2079 ui.warn(_('Skipping already folded patch %s') % p)
2081 if q.isapplied(p):
2080 if q.isapplied(p):
2082 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
2081 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
2083 patches.append(p)
2082 patches.append(p)
2084
2083
2085 for p in patches:
2084 for p in patches:
2086 if not message:
2085 if not message:
2087 ph = patchheader(q.join(p), q.plainmode)
2086 ph = patchheader(q.join(p), q.plainmode)
2088 if ph.message:
2087 if ph.message:
2089 messages.append(ph.message)
2088 messages.append(ph.message)
2090 pf = q.join(p)
2089 pf = q.join(p)
2091 (patchsuccess, files, fuzz) = q.patch(repo, pf)
2090 (patchsuccess, files, fuzz) = q.patch(repo, pf)
2092 if not patchsuccess:
2091 if not patchsuccess:
2093 raise util.Abort(_('Error folding patch %s') % p)
2092 raise util.Abort(_('Error folding patch %s') % p)
2094 patch.updatedir(ui, repo, files)
2093 patch.updatedir(ui, repo, files)
2095
2094
2096 if not message:
2095 if not message:
2097 ph = patchheader(q.join(parent), q.plainmode)
2096 ph = patchheader(q.join(parent), q.plainmode)
2098 message, user = ph.message, ph.user
2097 message, user = ph.message, ph.user
2099 for msg in messages:
2098 for msg in messages:
2100 message.append('* * *')
2099 message.append('* * *')
2101 message.extend(msg)
2100 message.extend(msg)
2102 message = '\n'.join(message)
2101 message = '\n'.join(message)
2103
2102
2104 if opts['edit']:
2103 if opts['edit']:
2105 message = ui.edit(message, user or ui.username())
2104 message = ui.edit(message, user or ui.username())
2106
2105
2107 diffopts = q.patchopts(q.diffopts(), *patches)
2106 diffopts = q.patchopts(q.diffopts(), *patches)
2108 q.refresh(repo, msg=message, git=diffopts.git)
2107 q.refresh(repo, msg=message, git=diffopts.git)
2109 q.delete(repo, patches, opts)
2108 q.delete(repo, patches, opts)
2110 q.save_dirty()
2109 q.save_dirty()
2111
2110
2112 def goto(ui, repo, patch, **opts):
2111 def goto(ui, repo, patch, **opts):
2113 '''push or pop patches until named patch is at top of stack'''
2112 '''push or pop patches until named patch is at top of stack'''
2114 q = repo.mq
2113 q = repo.mq
2115 patch = q.lookup(patch)
2114 patch = q.lookup(patch)
2116 if q.isapplied(patch):
2115 if q.isapplied(patch):
2117 ret = q.pop(repo, patch, force=opts['force'])
2116 ret = q.pop(repo, patch, force=opts['force'])
2118 else:
2117 else:
2119 ret = q.push(repo, patch, force=opts['force'])
2118 ret = q.push(repo, patch, force=opts['force'])
2120 q.save_dirty()
2119 q.save_dirty()
2121 return ret
2120 return ret
2122
2121
2123 def guard(ui, repo, *args, **opts):
2122 def guard(ui, repo, *args, **opts):
2124 '''set or print guards for a patch
2123 '''set or print guards for a patch
2125
2124
2126 Guards control whether a patch can be pushed. A patch with no
2125 Guards control whether a patch can be pushed. A patch with no
2127 guards is always pushed. A patch with a positive guard ("+foo") is
2126 guards is always pushed. A patch with a positive guard ("+foo") is
2128 pushed only if the qselect command has activated it. A patch with
2127 pushed only if the qselect command has activated it. A patch with
2129 a negative guard ("-foo") is never pushed if the qselect command
2128 a negative guard ("-foo") is never pushed if the qselect command
2130 has activated it.
2129 has activated it.
2131
2130
2132 With no arguments, print the currently active guards.
2131 With no arguments, print the currently active guards.
2133 With arguments, set guards for the named patch.
2132 With arguments, set guards for the named patch.
2134 NOTE: Specifying negative guards now requires '--'.
2133 NOTE: Specifying negative guards now requires '--'.
2135
2134
2136 To set guards on another patch::
2135 To set guards on another patch::
2137
2136
2138 hg qguard other.patch -- +2.6.17 -stable
2137 hg qguard other.patch -- +2.6.17 -stable
2139 '''
2138 '''
2140 def status(idx):
2139 def status(idx):
2141 guards = q.series_guards[idx] or ['unguarded']
2140 guards = q.series_guards[idx] or ['unguarded']
2142 ui.write('%s: ' % ui.label(q.series[idx], 'qguard.patch'))
2141 ui.write('%s: ' % ui.label(q.series[idx], 'qguard.patch'))
2143 for i, guard in enumerate(guards):
2142 for i, guard in enumerate(guards):
2144 if guard.startswith('+'):
2143 if guard.startswith('+'):
2145 ui.write(guard, label='qguard.positive')
2144 ui.write(guard, label='qguard.positive')
2146 elif guard.startswith('-'):
2145 elif guard.startswith('-'):
2147 ui.write(guard, label='qguard.negative')
2146 ui.write(guard, label='qguard.negative')
2148 else:
2147 else:
2149 ui.write(guard, label='qguard.unguarded')
2148 ui.write(guard, label='qguard.unguarded')
2150 if i != len(guards) - 1:
2149 if i != len(guards) - 1:
2151 ui.write(' ')
2150 ui.write(' ')
2152 ui.write('\n')
2151 ui.write('\n')
2153 q = repo.mq
2152 q = repo.mq
2154 patch = None
2153 patch = None
2155 args = list(args)
2154 args = list(args)
2156 if opts['list']:
2155 if opts['list']:
2157 if args or opts['none']:
2156 if args or opts['none']:
2158 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
2157 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
2159 for i in xrange(len(q.series)):
2158 for i in xrange(len(q.series)):
2160 status(i)
2159 status(i)
2161 return
2160 return
2162 if not args or args[0][0:1] in '-+':
2161 if not args or args[0][0:1] in '-+':
2163 if not q.applied:
2162 if not q.applied:
2164 raise util.Abort(_('no patches applied'))
2163 raise util.Abort(_('no patches applied'))
2165 patch = q.applied[-1].name
2164 patch = q.applied[-1].name
2166 if patch is None and args[0][0:1] not in '-+':
2165 if patch is None and args[0][0:1] not in '-+':
2167 patch = args.pop(0)
2166 patch = args.pop(0)
2168 if patch is None:
2167 if patch is None:
2169 raise util.Abort(_('no patch to work with'))
2168 raise util.Abort(_('no patch to work with'))
2170 if args or opts['none']:
2169 if args or opts['none']:
2171 idx = q.find_series(patch)
2170 idx = q.find_series(patch)
2172 if idx is None:
2171 if idx is None:
2173 raise util.Abort(_('no patch named %s') % patch)
2172 raise util.Abort(_('no patch named %s') % patch)
2174 q.set_guards(idx, args)
2173 q.set_guards(idx, args)
2175 q.save_dirty()
2174 q.save_dirty()
2176 else:
2175 else:
2177 status(q.series.index(q.lookup(patch)))
2176 status(q.series.index(q.lookup(patch)))
2178
2177
2179 def header(ui, repo, patch=None):
2178 def header(ui, repo, patch=None):
2180 """print the header of the topmost or specified patch"""
2179 """print the header of the topmost or specified patch"""
2181 q = repo.mq
2180 q = repo.mq
2182
2181
2183 if patch:
2182 if patch:
2184 patch = q.lookup(patch)
2183 patch = q.lookup(patch)
2185 else:
2184 else:
2186 if not q.applied:
2185 if not q.applied:
2187 ui.write(_('no patches applied\n'))
2186 ui.write(_('no patches applied\n'))
2188 return 1
2187 return 1
2189 patch = q.lookup('qtip')
2188 patch = q.lookup('qtip')
2190 ph = patchheader(q.join(patch), q.plainmode)
2189 ph = patchheader(q.join(patch), q.plainmode)
2191
2190
2192 ui.write('\n'.join(ph.message) + '\n')
2191 ui.write('\n'.join(ph.message) + '\n')
2193
2192
2194 def lastsavename(path):
2193 def lastsavename(path):
2195 (directory, base) = os.path.split(path)
2194 (directory, base) = os.path.split(path)
2196 names = os.listdir(directory)
2195 names = os.listdir(directory)
2197 namere = re.compile("%s.([0-9]+)" % base)
2196 namere = re.compile("%s.([0-9]+)" % base)
2198 maxindex = None
2197 maxindex = None
2199 maxname = None
2198 maxname = None
2200 for f in names:
2199 for f in names:
2201 m = namere.match(f)
2200 m = namere.match(f)
2202 if m:
2201 if m:
2203 index = int(m.group(1))
2202 index = int(m.group(1))
2204 if maxindex is None or index > maxindex:
2203 if maxindex is None or index > maxindex:
2205 maxindex = index
2204 maxindex = index
2206 maxname = f
2205 maxname = f
2207 if maxname:
2206 if maxname:
2208 return (os.path.join(directory, maxname), maxindex)
2207 return (os.path.join(directory, maxname), maxindex)
2209 return (None, None)
2208 return (None, None)
2210
2209
2211 def savename(path):
2210 def savename(path):
2212 (last, index) = lastsavename(path)
2211 (last, index) = lastsavename(path)
2213 if last is None:
2212 if last is None:
2214 index = 0
2213 index = 0
2215 newpath = path + ".%d" % (index + 1)
2214 newpath = path + ".%d" % (index + 1)
2216 return newpath
2215 return newpath
2217
2216
2218 def push(ui, repo, patch=None, **opts):
2217 def push(ui, repo, patch=None, **opts):
2219 """push the next patch onto the stack
2218 """push the next patch onto the stack
2220
2219
2221 When -f/--force is applied, all local changes in patched files
2220 When -f/--force is applied, all local changes in patched files
2222 will be lost.
2221 will be lost.
2223 """
2222 """
2224 q = repo.mq
2223 q = repo.mq
2225 mergeq = None
2224 mergeq = None
2226
2225
2227 if opts['merge']:
2226 if opts['merge']:
2228 if opts['name']:
2227 if opts['name']:
2229 newpath = repo.join(opts['name'])
2228 newpath = repo.join(opts['name'])
2230 else:
2229 else:
2231 newpath, i = lastsavename(q.path)
2230 newpath, i = lastsavename(q.path)
2232 if not newpath:
2231 if not newpath:
2233 ui.warn(_("no saved queues found, please use -n\n"))
2232 ui.warn(_("no saved queues found, please use -n\n"))
2234 return 1
2233 return 1
2235 mergeq = queue(ui, repo.join(""), newpath)
2234 mergeq = queue(ui, repo.join(""), newpath)
2236 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2235 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2237 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
2236 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
2238 mergeq=mergeq, all=opts.get('all'), move=opts.get('move'))
2237 mergeq=mergeq, all=opts.get('all'), move=opts.get('move'))
2239 return ret
2238 return ret
2240
2239
2241 def pop(ui, repo, patch=None, **opts):
2240 def pop(ui, repo, patch=None, **opts):
2242 """pop the current patch off the stack
2241 """pop the current patch off the stack
2243
2242
2244 By default, pops off the top of the patch stack. If given a patch
2243 By default, pops off the top of the patch stack. If given a patch
2245 name, keeps popping off patches until the named patch is at the
2244 name, keeps popping off patches until the named patch is at the
2246 top of the stack.
2245 top of the stack.
2247 """
2246 """
2248 localupdate = True
2247 localupdate = True
2249 if opts['name']:
2248 if opts['name']:
2250 q = queue(ui, repo.join(""), repo.join(opts['name']))
2249 q = queue(ui, repo.join(""), repo.join(opts['name']))
2251 ui.warn(_('using patch queue: %s\n') % q.path)
2250 ui.warn(_('using patch queue: %s\n') % q.path)
2252 localupdate = False
2251 localupdate = False
2253 else:
2252 else:
2254 q = repo.mq
2253 q = repo.mq
2255 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
2254 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
2256 all=opts['all'])
2255 all=opts['all'])
2257 q.save_dirty()
2256 q.save_dirty()
2258 return ret
2257 return ret
2259
2258
2260 def rename(ui, repo, patch, name=None, **opts):
2259 def rename(ui, repo, patch, name=None, **opts):
2261 """rename a patch
2260 """rename a patch
2262
2261
2263 With one argument, renames the current patch to PATCH1.
2262 With one argument, renames the current patch to PATCH1.
2264 With two arguments, renames PATCH1 to PATCH2."""
2263 With two arguments, renames PATCH1 to PATCH2."""
2265
2264
2266 q = repo.mq
2265 q = repo.mq
2267
2266
2268 if not name:
2267 if not name:
2269 name = patch
2268 name = patch
2270 patch = None
2269 patch = None
2271
2270
2272 if patch:
2271 if patch:
2273 patch = q.lookup(patch)
2272 patch = q.lookup(patch)
2274 else:
2273 else:
2275 if not q.applied:
2274 if not q.applied:
2276 ui.write(_('no patches applied\n'))
2275 ui.write(_('no patches applied\n'))
2277 return
2276 return
2278 patch = q.lookup('qtip')
2277 patch = q.lookup('qtip')
2279 absdest = q.join(name)
2278 absdest = q.join(name)
2280 if os.path.isdir(absdest):
2279 if os.path.isdir(absdest):
2281 name = normname(os.path.join(name, os.path.basename(patch)))
2280 name = normname(os.path.join(name, os.path.basename(patch)))
2282 absdest = q.join(name)
2281 absdest = q.join(name)
2283 if os.path.exists(absdest):
2282 if os.path.exists(absdest):
2284 raise util.Abort(_('%s already exists') % absdest)
2283 raise util.Abort(_('%s already exists') % absdest)
2285
2284
2286 if name in q.series:
2285 if name in q.series:
2287 raise util.Abort(
2286 raise util.Abort(
2288 _('A patch named %s already exists in the series file') % name)
2287 _('A patch named %s already exists in the series file') % name)
2289
2288
2290 ui.note(_('renaming %s to %s\n') % (patch, name))
2289 ui.note(_('renaming %s to %s\n') % (patch, name))
2291 i = q.find_series(patch)
2290 i = q.find_series(patch)
2292 guards = q.guard_re.findall(q.full_series[i])
2291 guards = q.guard_re.findall(q.full_series[i])
2293 q.full_series[i] = name + ''.join([' #' + g for g in guards])
2292 q.full_series[i] = name + ''.join([' #' + g for g in guards])
2294 q.parse_series()
2293 q.parse_series()
2295 q.series_dirty = 1
2294 q.series_dirty = 1
2296
2295
2297 info = q.isapplied(patch)
2296 info = q.isapplied(patch)
2298 if info:
2297 if info:
2299 q.applied[info[0]] = statusentry(info[1], name)
2298 q.applied[info[0]] = statusentry(info[1], name)
2300 q.applied_dirty = 1
2299 q.applied_dirty = 1
2301
2300
2302 util.rename(q.join(patch), absdest)
2301 util.rename(q.join(patch), absdest)
2303 r = q.qrepo()
2302 r = q.qrepo()
2304 if r:
2303 if r:
2305 wlock = r.wlock()
2304 wlock = r.wlock()
2306 try:
2305 try:
2307 if r.dirstate[patch] == 'a':
2306 if r.dirstate[patch] == 'a':
2308 r.dirstate.forget(patch)
2307 r.dirstate.forget(patch)
2309 r.dirstate.add(name)
2308 r.dirstate.add(name)
2310 else:
2309 else:
2311 if r.dirstate[name] == 'r':
2310 if r.dirstate[name] == 'r':
2312 r.undelete([name])
2311 r.undelete([name])
2313 r.copy(patch, name)
2312 r.copy(patch, name)
2314 r.remove([patch], False)
2313 r.remove([patch], False)
2315 finally:
2314 finally:
2316 wlock.release()
2315 wlock.release()
2317
2316
2318 q.save_dirty()
2317 q.save_dirty()
2319
2318
2320 def restore(ui, repo, rev, **opts):
2319 def restore(ui, repo, rev, **opts):
2321 """restore the queue state saved by a revision (DEPRECATED)
2320 """restore the queue state saved by a revision (DEPRECATED)
2322
2321
2323 This command is deprecated, use rebase --mq instead."""
2322 This command is deprecated, use rebase --mq instead."""
2324 rev = repo.lookup(rev)
2323 rev = repo.lookup(rev)
2325 q = repo.mq
2324 q = repo.mq
2326 q.restore(repo, rev, delete=opts['delete'],
2325 q.restore(repo, rev, delete=opts['delete'],
2327 qupdate=opts['update'])
2326 qupdate=opts['update'])
2328 q.save_dirty()
2327 q.save_dirty()
2329 return 0
2328 return 0
2330
2329
2331 def save(ui, repo, **opts):
2330 def save(ui, repo, **opts):
2332 """save current queue state (DEPRECATED)
2331 """save current queue state (DEPRECATED)
2333
2332
2334 This command is deprecated, use rebase --mq instead."""
2333 This command is deprecated, use rebase --mq instead."""
2335 q = repo.mq
2334 q = repo.mq
2336 message = cmdutil.logmessage(opts)
2335 message = cmdutil.logmessage(opts)
2337 ret = q.save(repo, msg=message)
2336 ret = q.save(repo, msg=message)
2338 if ret:
2337 if ret:
2339 return ret
2338 return ret
2340 q.save_dirty()
2339 q.save_dirty()
2341 if opts['copy']:
2340 if opts['copy']:
2342 path = q.path
2341 path = q.path
2343 if opts['name']:
2342 if opts['name']:
2344 newpath = os.path.join(q.basepath, opts['name'])
2343 newpath = os.path.join(q.basepath, opts['name'])
2345 if os.path.exists(newpath):
2344 if os.path.exists(newpath):
2346 if not os.path.isdir(newpath):
2345 if not os.path.isdir(newpath):
2347 raise util.Abort(_('destination %s exists and is not '
2346 raise util.Abort(_('destination %s exists and is not '
2348 'a directory') % newpath)
2347 'a directory') % newpath)
2349 if not opts['force']:
2348 if not opts['force']:
2350 raise util.Abort(_('destination %s exists, '
2349 raise util.Abort(_('destination %s exists, '
2351 'use -f to force') % newpath)
2350 'use -f to force') % newpath)
2352 else:
2351 else:
2353 newpath = savename(path)
2352 newpath = savename(path)
2354 ui.warn(_("copy %s to %s\n") % (path, newpath))
2353 ui.warn(_("copy %s to %s\n") % (path, newpath))
2355 util.copyfiles(path, newpath)
2354 util.copyfiles(path, newpath)
2356 if opts['empty']:
2355 if opts['empty']:
2357 try:
2356 try:
2358 os.unlink(q.join(q.status_path))
2357 os.unlink(q.join(q.status_path))
2359 except:
2358 except:
2360 pass
2359 pass
2361 return 0
2360 return 0
2362
2361
2363 def strip(ui, repo, rev, **opts):
2362 def strip(ui, repo, rev, **opts):
2364 """strip a changeset and all its descendants from the repository
2363 """strip a changeset and all its descendants from the repository
2365
2364
2366 The strip command removes all changesets whose local revision
2365 The strip command removes all changesets whose local revision
2367 number is greater than or equal to REV, and then restores any
2366 number is greater than or equal to REV, and then restores any
2368 changesets that are not descendants of REV. If the working
2367 changesets that are not descendants of REV. If the working
2369 directory has uncommitted changes, the operation is aborted unless
2368 directory has uncommitted changes, the operation is aborted unless
2370 the --force flag is supplied.
2369 the --force flag is supplied.
2371
2370
2372 If a parent of the working directory is stripped, then the working
2371 If a parent of the working directory is stripped, then the working
2373 directory will automatically be updated to the most recent
2372 directory will automatically be updated to the most recent
2374 available ancestor of the stripped parent after the operation
2373 available ancestor of the stripped parent after the operation
2375 completes.
2374 completes.
2376
2375
2377 Any stripped changesets are stored in ``.hg/strip-backup`` as a
2376 Any stripped changesets are stored in ``.hg/strip-backup`` as a
2378 bundle (see :hg:`help bundle` and :hg:`help unbundle`). They can
2377 bundle (see :hg:`help bundle` and :hg:`help unbundle`). They can
2379 be restored by running :hg:`unbundle .hg/strip-backup/BUNDLE`,
2378 be restored by running :hg:`unbundle .hg/strip-backup/BUNDLE`,
2380 where BUNDLE is the bundle file created by the strip. Note that
2379 where BUNDLE is the bundle file created by the strip. Note that
2381 the local revision numbers will in general be different after the
2380 the local revision numbers will in general be different after the
2382 restore.
2381 restore.
2383
2382
2384 Use the --nobackup option to discard the backup bundle once the
2383 Use the --nobackup option to discard the backup bundle once the
2385 operation completes.
2384 operation completes.
2386 """
2385 """
2387 backup = 'all'
2386 backup = 'all'
2388 if opts['backup']:
2387 if opts['backup']:
2389 backup = 'strip'
2388 backup = 'strip'
2390 elif opts['nobackup']:
2389 elif opts['nobackup']:
2391 backup = 'none'
2390 backup = 'none'
2392
2391
2393 rev = repo.lookup(rev)
2392 rev = repo.lookup(rev)
2394 p = repo.dirstate.parents()
2393 p = repo.dirstate.parents()
2395 cl = repo.changelog
2394 cl = repo.changelog
2396 update = True
2395 update = True
2397 if p[0] == nullid:
2396 if p[0] == nullid:
2398 update = False
2397 update = False
2399 elif p[1] == nullid and rev != cl.ancestor(p[0], rev):
2398 elif p[1] == nullid and rev != cl.ancestor(p[0], rev):
2400 update = False
2399 update = False
2401 elif rev not in (cl.ancestor(p[0], rev), cl.ancestor(p[1], rev)):
2400 elif rev not in (cl.ancestor(p[0], rev), cl.ancestor(p[1], rev)):
2402 update = False
2401 update = False
2403
2402
2404 repo.mq.strip(repo, rev, backup=backup, update=update, force=opts['force'])
2403 repo.mq.strip(repo, rev, backup=backup, update=update, force=opts['force'])
2405 return 0
2404 return 0
2406
2405
2407 def select(ui, repo, *args, **opts):
2406 def select(ui, repo, *args, **opts):
2408 '''set or print guarded patches to push
2407 '''set or print guarded patches to push
2409
2408
2410 Use the qguard command to set or print guards on patch, then use
2409 Use the qguard command to set or print guards on patch, then use
2411 qselect to tell mq which guards to use. A patch will be pushed if
2410 qselect to tell mq which guards to use. A patch will be pushed if
2412 it has no guards or any positive guards match the currently
2411 it has no guards or any positive guards match the currently
2413 selected guard, but will not be pushed if any negative guards
2412 selected guard, but will not be pushed if any negative guards
2414 match the current guard. For example::
2413 match the current guard. For example::
2415
2414
2416 qguard foo.patch -stable (negative guard)
2415 qguard foo.patch -stable (negative guard)
2417 qguard bar.patch +stable (positive guard)
2416 qguard bar.patch +stable (positive guard)
2418 qselect stable
2417 qselect stable
2419
2418
2420 This activates the "stable" guard. mq will skip foo.patch (because
2419 This activates the "stable" guard. mq will skip foo.patch (because
2421 it has a negative match) but push bar.patch (because it has a
2420 it has a negative match) but push bar.patch (because it has a
2422 positive match).
2421 positive match).
2423
2422
2424 With no arguments, prints the currently active guards.
2423 With no arguments, prints the currently active guards.
2425 With one argument, sets the active guard.
2424 With one argument, sets the active guard.
2426
2425
2427 Use -n/--none to deactivate guards (no other arguments needed).
2426 Use -n/--none to deactivate guards (no other arguments needed).
2428 When no guards are active, patches with positive guards are
2427 When no guards are active, patches with positive guards are
2429 skipped and patches with negative guards are pushed.
2428 skipped and patches with negative guards are pushed.
2430
2429
2431 qselect can change the guards on applied patches. It does not pop
2430 qselect can change the guards on applied patches. It does not pop
2432 guarded patches by default. Use --pop to pop back to the last
2431 guarded patches by default. Use --pop to pop back to the last
2433 applied patch that is not guarded. Use --reapply (which implies
2432 applied patch that is not guarded. Use --reapply (which implies
2434 --pop) to push back to the current patch afterwards, but skip
2433 --pop) to push back to the current patch afterwards, but skip
2435 guarded patches.
2434 guarded patches.
2436
2435
2437 Use -s/--series to print a list of all guards in the series file
2436 Use -s/--series to print a list of all guards in the series file
2438 (no other arguments needed). Use -v for more information.'''
2437 (no other arguments needed). Use -v for more information.'''
2439
2438
2440 q = repo.mq
2439 q = repo.mq
2441 guards = q.active()
2440 guards = q.active()
2442 if args or opts['none']:
2441 if args or opts['none']:
2443 old_unapplied = q.unapplied(repo)
2442 old_unapplied = q.unapplied(repo)
2444 old_guarded = [i for i in xrange(len(q.applied)) if
2443 old_guarded = [i for i in xrange(len(q.applied)) if
2445 not q.pushable(i)[0]]
2444 not q.pushable(i)[0]]
2446 q.set_active(args)
2445 q.set_active(args)
2447 q.save_dirty()
2446 q.save_dirty()
2448 if not args:
2447 if not args:
2449 ui.status(_('guards deactivated\n'))
2448 ui.status(_('guards deactivated\n'))
2450 if not opts['pop'] and not opts['reapply']:
2449 if not opts['pop'] and not opts['reapply']:
2451 unapplied = q.unapplied(repo)
2450 unapplied = q.unapplied(repo)
2452 guarded = [i for i in xrange(len(q.applied))
2451 guarded = [i for i in xrange(len(q.applied))
2453 if not q.pushable(i)[0]]
2452 if not q.pushable(i)[0]]
2454 if len(unapplied) != len(old_unapplied):
2453 if len(unapplied) != len(old_unapplied):
2455 ui.status(_('number of unguarded, unapplied patches has '
2454 ui.status(_('number of unguarded, unapplied patches has '
2456 'changed from %d to %d\n') %
2455 'changed from %d to %d\n') %
2457 (len(old_unapplied), len(unapplied)))
2456 (len(old_unapplied), len(unapplied)))
2458 if len(guarded) != len(old_guarded):
2457 if len(guarded) != len(old_guarded):
2459 ui.status(_('number of guarded, applied patches has changed '
2458 ui.status(_('number of guarded, applied patches has changed '
2460 'from %d to %d\n') %
2459 'from %d to %d\n') %
2461 (len(old_guarded), len(guarded)))
2460 (len(old_guarded), len(guarded)))
2462 elif opts['series']:
2461 elif opts['series']:
2463 guards = {}
2462 guards = {}
2464 noguards = 0
2463 noguards = 0
2465 for gs in q.series_guards:
2464 for gs in q.series_guards:
2466 if not gs:
2465 if not gs:
2467 noguards += 1
2466 noguards += 1
2468 for g in gs:
2467 for g in gs:
2469 guards.setdefault(g, 0)
2468 guards.setdefault(g, 0)
2470 guards[g] += 1
2469 guards[g] += 1
2471 if ui.verbose:
2470 if ui.verbose:
2472 guards['NONE'] = noguards
2471 guards['NONE'] = noguards
2473 guards = guards.items()
2472 guards = guards.items()
2474 guards.sort(key=lambda x: x[0][1:])
2473 guards.sort(key=lambda x: x[0][1:])
2475 if guards:
2474 if guards:
2476 ui.note(_('guards in series file:\n'))
2475 ui.note(_('guards in series file:\n'))
2477 for guard, count in guards:
2476 for guard, count in guards:
2478 ui.note('%2d ' % count)
2477 ui.note('%2d ' % count)
2479 ui.write(guard, '\n')
2478 ui.write(guard, '\n')
2480 else:
2479 else:
2481 ui.note(_('no guards in series file\n'))
2480 ui.note(_('no guards in series file\n'))
2482 else:
2481 else:
2483 if guards:
2482 if guards:
2484 ui.note(_('active guards:\n'))
2483 ui.note(_('active guards:\n'))
2485 for g in guards:
2484 for g in guards:
2486 ui.write(g, '\n')
2485 ui.write(g, '\n')
2487 else:
2486 else:
2488 ui.write(_('no active guards\n'))
2487 ui.write(_('no active guards\n'))
2489 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2488 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2490 popped = False
2489 popped = False
2491 if opts['pop'] or opts['reapply']:
2490 if opts['pop'] or opts['reapply']:
2492 for i in xrange(len(q.applied)):
2491 for i in xrange(len(q.applied)):
2493 pushable, reason = q.pushable(i)
2492 pushable, reason = q.pushable(i)
2494 if not pushable:
2493 if not pushable:
2495 ui.status(_('popping guarded patches\n'))
2494 ui.status(_('popping guarded patches\n'))
2496 popped = True
2495 popped = True
2497 if i == 0:
2496 if i == 0:
2498 q.pop(repo, all=True)
2497 q.pop(repo, all=True)
2499 else:
2498 else:
2500 q.pop(repo, i - 1)
2499 q.pop(repo, i - 1)
2501 break
2500 break
2502 if popped:
2501 if popped:
2503 try:
2502 try:
2504 if reapply:
2503 if reapply:
2505 ui.status(_('reapplying unguarded patches\n'))
2504 ui.status(_('reapplying unguarded patches\n'))
2506 q.push(repo, reapply)
2505 q.push(repo, reapply)
2507 finally:
2506 finally:
2508 q.save_dirty()
2507 q.save_dirty()
2509
2508
2510 def finish(ui, repo, *revrange, **opts):
2509 def finish(ui, repo, *revrange, **opts):
2511 """move applied patches into repository history
2510 """move applied patches into repository history
2512
2511
2513 Finishes the specified revisions (corresponding to applied
2512 Finishes the specified revisions (corresponding to applied
2514 patches) by moving them out of mq control into regular repository
2513 patches) by moving them out of mq control into regular repository
2515 history.
2514 history.
2516
2515
2517 Accepts a revision range or the -a/--applied option. If --applied
2516 Accepts a revision range or the -a/--applied option. If --applied
2518 is specified, all applied mq revisions are removed from mq
2517 is specified, all applied mq revisions are removed from mq
2519 control. Otherwise, the given revisions must be at the base of the
2518 control. Otherwise, the given revisions must be at the base of the
2520 stack of applied patches.
2519 stack of applied patches.
2521
2520
2522 This can be especially useful if your changes have been applied to
2521 This can be especially useful if your changes have been applied to
2523 an upstream repository, or if you are about to push your changes
2522 an upstream repository, or if you are about to push your changes
2524 to upstream.
2523 to upstream.
2525 """
2524 """
2526 if not opts['applied'] and not revrange:
2525 if not opts['applied'] and not revrange:
2527 raise util.Abort(_('no revisions specified'))
2526 raise util.Abort(_('no revisions specified'))
2528 elif opts['applied']:
2527 elif opts['applied']:
2529 revrange = ('qbase:qtip',) + revrange
2528 revrange = ('qbase:qtip',) + revrange
2530
2529
2531 q = repo.mq
2530 q = repo.mq
2532 if not q.applied:
2531 if not q.applied:
2533 ui.status(_('no patches applied\n'))
2532 ui.status(_('no patches applied\n'))
2534 return 0
2533 return 0
2535
2534
2536 revs = cmdutil.revrange(repo, revrange)
2535 revs = cmdutil.revrange(repo, revrange)
2537 q.finish(repo, revs)
2536 q.finish(repo, revs)
2538 q.save_dirty()
2537 q.save_dirty()
2539 return 0
2538 return 0
2540
2539
2541 def qqueue(ui, repo, name=None, **opts):
2540 def qqueue(ui, repo, name=None, **opts):
2542 '''manage multiple patch queues
2541 '''manage multiple patch queues
2543
2542
2544 Supports switching between different patch queues, as well as creating
2543 Supports switching between different patch queues, as well as creating
2545 new patch queues and deleting existing ones.
2544 new patch queues and deleting existing ones.
2546
2545
2547 Omitting a queue name or specifying -l/--list will show you the registered
2546 Omitting a queue name or specifying -l/--list will show you the registered
2548 queues - by default the "normal" patches queue is registered. The currently
2547 queues - by default the "normal" patches queue is registered. The currently
2549 active queue will be marked with "(active)".
2548 active queue will be marked with "(active)".
2550
2549
2551 To create a new queue, use -c/--create. The queue is automatically made
2550 To create a new queue, use -c/--create. The queue is automatically made
2552 active, except in the case where there are applied patches from the
2551 active, except in the case where there are applied patches from the
2553 currently active queue in the repository. Then the queue will only be
2552 currently active queue in the repository. Then the queue will only be
2554 created and switching will fail.
2553 created and switching will fail.
2555
2554
2556 To delete an existing queue, use --delete. You cannot delete the currently
2555 To delete an existing queue, use --delete. You cannot delete the currently
2557 active queue.
2556 active queue.
2558 '''
2557 '''
2559
2558
2560 q = repo.mq
2559 q = repo.mq
2561
2560
2562 _defaultqueue = 'patches'
2561 _defaultqueue = 'patches'
2563 _allqueues = '.queues'
2562 _allqueues = '.queues'
2564 _activequeue = '.queue'
2563 _activequeue = '.queue'
2565
2564
2566 def _getcurrent():
2565 def _getcurrent():
2567 return os.path.basename(q.path)
2566 return os.path.basename(q.path)
2568
2567
2569 def _noqueues():
2568 def _noqueues():
2570 try:
2569 try:
2571 fh = repo.opener(_allqueues, 'r')
2570 fh = repo.opener(_allqueues, 'r')
2572 fh.close()
2571 fh.close()
2573 except IOError:
2572 except IOError:
2574 return True
2573 return True
2575
2574
2576 return False
2575 return False
2577
2576
2578 def _getqueues():
2577 def _getqueues():
2579 current = _getcurrent()
2578 current = _getcurrent()
2580
2579
2581 try:
2580 try:
2582 fh = repo.opener(_allqueues, 'r')
2581 fh = repo.opener(_allqueues, 'r')
2583 queues = [queue.strip() for queue in fh if queue.strip()]
2582 queues = [queue.strip() for queue in fh if queue.strip()]
2584 if current not in queues:
2583 if current not in queues:
2585 queues.append(current)
2584 queues.append(current)
2586 except IOError:
2585 except IOError:
2587 queues = [_defaultqueue]
2586 queues = [_defaultqueue]
2588
2587
2589 return sorted(queues)
2588 return sorted(queues)
2590
2589
2591 def _setactive(name):
2590 def _setactive(name):
2592 if q.applied:
2591 if q.applied:
2593 raise util.Abort(_('patches applied - cannot set new queue active'))
2592 raise util.Abort(_('patches applied - cannot set new queue active'))
2594
2593
2595 fh = repo.opener(_activequeue, 'w')
2594 fh = repo.opener(_activequeue, 'w')
2596 fh.write(name)
2595 fh.write(name)
2597 fh.close()
2596 fh.close()
2598
2597
2599 def _addqueue(name):
2598 def _addqueue(name):
2600 fh = repo.opener(_allqueues, 'a')
2599 fh = repo.opener(_allqueues, 'a')
2601 fh.write('%s\n' % (name,))
2600 fh.write('%s\n' % (name,))
2602 fh.close()
2601 fh.close()
2603
2602
2604 if not name or opts.get('list'):
2603 if not name or opts.get('list'):
2605 current = _getcurrent()
2604 current = _getcurrent()
2606 for queue in _getqueues():
2605 for queue in _getqueues():
2607 ui.write('%s' % (queue,))
2606 ui.write('%s' % (queue,))
2608 if queue == current:
2607 if queue == current:
2609 ui.write(_(' (active)\n'))
2608 ui.write(_(' (active)\n'))
2610 else:
2609 else:
2611 ui.write('\n')
2610 ui.write('\n')
2612 return
2611 return
2613
2612
2614 existing = _getqueues()
2613 existing = _getqueues()
2615
2614
2616 if name not in existing and opts.get('delete'):
2615 if name not in existing and opts.get('delete'):
2617 raise util.Abort(_('cannot delete queue that does not exist'))
2616 raise util.Abort(_('cannot delete queue that does not exist'))
2618 elif name not in existing and not opts.get('create'):
2617 elif name not in existing and not opts.get('create'):
2619 raise util.Abort(_('use --create to create a new queue'))
2618 raise util.Abort(_('use --create to create a new queue'))
2620
2619
2621 if opts.get('create'):
2620 if opts.get('create'):
2622 if _noqueues():
2621 if _noqueues():
2623 _addqueue(_defaultqueue)
2622 _addqueue(_defaultqueue)
2624 _addqueue(name)
2623 _addqueue(name)
2625 _setactive(name)
2624 _setactive(name)
2626 elif opts.get('delete'):
2625 elif opts.get('delete'):
2627 current = _getcurrent()
2626 current = _getcurrent()
2628
2627
2629 if name == current:
2628 if name == current:
2630 raise util.Abort(_('cannot delete currently active queue'))
2629 raise util.Abort(_('cannot delete currently active queue'))
2631
2630
2632 fh = repo.opener('.queues.new', 'w')
2631 fh = repo.opener('.queues.new', 'w')
2633 for queue in existing:
2632 for queue in existing:
2634 if queue == name:
2633 if queue == name:
2635 continue
2634 continue
2636 fh.write('%s\n' % (queue,))
2635 fh.write('%s\n' % (queue,))
2637 fh.close()
2636 fh.close()
2638 util.rename(repo.join('.queues.new'), repo.join(_allqueues))
2637 util.rename(repo.join('.queues.new'), repo.join(_allqueues))
2639 else:
2638 else:
2640 _setactive(name)
2639 _setactive(name)
2641
2640
2642 def reposetup(ui, repo):
2641 def reposetup(ui, repo):
2643 class mqrepo(repo.__class__):
2642 class mqrepo(repo.__class__):
2644 @util.propertycache
2643 @util.propertycache
2645 def mq(self):
2644 def mq(self):
2646 return queue(self.ui, self.join(""))
2645 return queue(self.ui, self.join(""))
2647
2646
2648 def abort_if_wdir_patched(self, errmsg, force=False):
2647 def abort_if_wdir_patched(self, errmsg, force=False):
2649 if self.mq.applied and not force:
2648 if self.mq.applied and not force:
2650 parent = self.dirstate.parents()[0]
2649 parent = self.dirstate.parents()[0]
2651 if parent in [s.node for s in self.mq.applied]:
2650 if parent in [s.node for s in self.mq.applied]:
2652 raise util.Abort(errmsg)
2651 raise util.Abort(errmsg)
2653
2652
2654 def commit(self, text="", user=None, date=None, match=None,
2653 def commit(self, text="", user=None, date=None, match=None,
2655 force=False, editor=False, extra={}):
2654 force=False, editor=False, extra={}):
2656 self.abort_if_wdir_patched(
2655 self.abort_if_wdir_patched(
2657 _('cannot commit over an applied mq patch'),
2656 _('cannot commit over an applied mq patch'),
2658 force)
2657 force)
2659
2658
2660 return super(mqrepo, self).commit(text, user, date, match, force,
2659 return super(mqrepo, self).commit(text, user, date, match, force,
2661 editor, extra)
2660 editor, extra)
2662
2661
2663 def push(self, remote, force=False, revs=None, newbranch=False):
2662 def push(self, remote, force=False, revs=None, newbranch=False):
2664 if self.mq.applied and not force and not revs:
2663 if self.mq.applied and not force and not revs:
2665 raise util.Abort(_('source has mq patches applied'))
2664 raise util.Abort(_('source has mq patches applied'))
2666 return super(mqrepo, self).push(remote, force, revs, newbranch)
2665 return super(mqrepo, self).push(remote, force, revs, newbranch)
2667
2666
2668 def _findtags(self):
2667 def _findtags(self):
2669 '''augment tags from base class with patch tags'''
2668 '''augment tags from base class with patch tags'''
2670 result = super(mqrepo, self)._findtags()
2669 result = super(mqrepo, self)._findtags()
2671
2670
2672 q = self.mq
2671 q = self.mq
2673 if not q.applied:
2672 if not q.applied:
2674 return result
2673 return result
2675
2674
2676 mqtags = [(patch.node, patch.name) for patch in q.applied]
2675 mqtags = [(patch.node, patch.name) for patch in q.applied]
2677
2676
2678 if mqtags[-1][0] not in self.changelog.nodemap:
2677 if mqtags[-1][0] not in self.changelog.nodemap:
2679 self.ui.warn(_('mq status file refers to unknown node %s\n')
2678 self.ui.warn(_('mq status file refers to unknown node %s\n')
2680 % short(mqtags[-1][0]))
2679 % short(mqtags[-1][0]))
2681 return result
2680 return result
2682
2681
2683 mqtags.append((mqtags[-1][0], 'qtip'))
2682 mqtags.append((mqtags[-1][0], 'qtip'))
2684 mqtags.append((mqtags[0][0], 'qbase'))
2683 mqtags.append((mqtags[0][0], 'qbase'))
2685 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2684 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2686 tags = result[0]
2685 tags = result[0]
2687 for patch in mqtags:
2686 for patch in mqtags:
2688 if patch[1] in tags:
2687 if patch[1] in tags:
2689 self.ui.warn(_('Tag %s overrides mq patch of the same name\n')
2688 self.ui.warn(_('Tag %s overrides mq patch of the same name\n')
2690 % patch[1])
2689 % patch[1])
2691 else:
2690 else:
2692 tags[patch[1]] = patch[0]
2691 tags[patch[1]] = patch[0]
2693
2692
2694 return result
2693 return result
2695
2694
2696 def _branchtags(self, partial, lrev):
2695 def _branchtags(self, partial, lrev):
2697 q = self.mq
2696 q = self.mq
2698 if not q.applied:
2697 if not q.applied:
2699 return super(mqrepo, self)._branchtags(partial, lrev)
2698 return super(mqrepo, self)._branchtags(partial, lrev)
2700
2699
2701 cl = self.changelog
2700 cl = self.changelog
2702 qbasenode = q.applied[0].node
2701 qbasenode = q.applied[0].node
2703 if qbasenode not in cl.nodemap:
2702 if qbasenode not in cl.nodemap:
2704 self.ui.warn(_('mq status file refers to unknown node %s\n')
2703 self.ui.warn(_('mq status file refers to unknown node %s\n')
2705 % short(qbasenode))
2704 % short(qbasenode))
2706 return super(mqrepo, self)._branchtags(partial, lrev)
2705 return super(mqrepo, self)._branchtags(partial, lrev)
2707
2706
2708 qbase = cl.rev(qbasenode)
2707 qbase = cl.rev(qbasenode)
2709 start = lrev + 1
2708 start = lrev + 1
2710 if start < qbase:
2709 if start < qbase:
2711 # update the cache (excluding the patches) and save it
2710 # update the cache (excluding the patches) and save it
2712 ctxgen = (self[r] for r in xrange(lrev + 1, qbase))
2711 ctxgen = (self[r] for r in xrange(lrev + 1, qbase))
2713 self._updatebranchcache(partial, ctxgen)
2712 self._updatebranchcache(partial, ctxgen)
2714 self._writebranchcache(partial, cl.node(qbase - 1), qbase - 1)
2713 self._writebranchcache(partial, cl.node(qbase - 1), qbase - 1)
2715 start = qbase
2714 start = qbase
2716 # if start = qbase, the cache is as updated as it should be.
2715 # if start = qbase, the cache is as updated as it should be.
2717 # if start > qbase, the cache includes (part of) the patches.
2716 # if start > qbase, the cache includes (part of) the patches.
2718 # we might as well use it, but we won't save it.
2717 # we might as well use it, but we won't save it.
2719
2718
2720 # update the cache up to the tip
2719 # update the cache up to the tip
2721 ctxgen = (self[r] for r in xrange(start, len(cl)))
2720 ctxgen = (self[r] for r in xrange(start, len(cl)))
2722 self._updatebranchcache(partial, ctxgen)
2721 self._updatebranchcache(partial, ctxgen)
2723
2722
2724 return partial
2723 return partial
2725
2724
2726 if repo.local():
2725 if repo.local():
2727 repo.__class__ = mqrepo
2726 repo.__class__ = mqrepo
2728
2727
2729 def mqimport(orig, ui, repo, *args, **kwargs):
2728 def mqimport(orig, ui, repo, *args, **kwargs):
2730 if (hasattr(repo, 'abort_if_wdir_patched')
2729 if (hasattr(repo, 'abort_if_wdir_patched')
2731 and not kwargs.get('no_commit', False)):
2730 and not kwargs.get('no_commit', False)):
2732 repo.abort_if_wdir_patched(_('cannot import over an applied patch'),
2731 repo.abort_if_wdir_patched(_('cannot import over an applied patch'),
2733 kwargs.get('force'))
2732 kwargs.get('force'))
2734 return orig(ui, repo, *args, **kwargs)
2733 return orig(ui, repo, *args, **kwargs)
2735
2734
2736 def mqinit(orig, ui, *args, **kwargs):
2735 def mqinit(orig, ui, *args, **kwargs):
2737 mq = kwargs.pop('mq', None)
2736 mq = kwargs.pop('mq', None)
2738
2737
2739 if not mq:
2738 if not mq:
2740 return orig(ui, *args, **kwargs)
2739 return orig(ui, *args, **kwargs)
2741
2740
2742 if args:
2741 if args:
2743 repopath = args[0]
2742 repopath = args[0]
2744 if not hg.islocal(repopath):
2743 if not hg.islocal(repopath):
2745 raise util.Abort(_('only a local queue repository '
2744 raise util.Abort(_('only a local queue repository '
2746 'may be initialized'))
2745 'may be initialized'))
2747 else:
2746 else:
2748 repopath = cmdutil.findrepo(os.getcwd())
2747 repopath = cmdutil.findrepo(os.getcwd())
2749 if not repopath:
2748 if not repopath:
2750 raise util.Abort(_('There is no Mercurial repository here '
2749 raise util.Abort(_('There is no Mercurial repository here '
2751 '(.hg not found)'))
2750 '(.hg not found)'))
2752 repo = hg.repository(ui, repopath)
2751 repo = hg.repository(ui, repopath)
2753 return qinit(ui, repo, True)
2752 return qinit(ui, repo, True)
2754
2753
2755 def mqcommand(orig, ui, repo, *args, **kwargs):
2754 def mqcommand(orig, ui, repo, *args, **kwargs):
2756 """Add --mq option to operate on patch repository instead of main"""
2755 """Add --mq option to operate on patch repository instead of main"""
2757
2756
2758 # some commands do not like getting unknown options
2757 # some commands do not like getting unknown options
2759 mq = kwargs.pop('mq', None)
2758 mq = kwargs.pop('mq', None)
2760
2759
2761 if not mq:
2760 if not mq:
2762 return orig(ui, repo, *args, **kwargs)
2761 return orig(ui, repo, *args, **kwargs)
2763
2762
2764 q = repo.mq
2763 q = repo.mq
2765 r = q.qrepo()
2764 r = q.qrepo()
2766 if not r:
2765 if not r:
2767 raise util.Abort(_('no queue repository'))
2766 raise util.Abort(_('no queue repository'))
2768 return orig(r.ui, r, *args, **kwargs)
2767 return orig(r.ui, r, *args, **kwargs)
2769
2768
2770 def summary(orig, ui, repo, *args, **kwargs):
2769 def summary(orig, ui, repo, *args, **kwargs):
2771 r = orig(ui, repo, *args, **kwargs)
2770 r = orig(ui, repo, *args, **kwargs)
2772 q = repo.mq
2771 q = repo.mq
2773 m = []
2772 m = []
2774 a, u = len(q.applied), len(q.unapplied(repo))
2773 a, u = len(q.applied), len(q.unapplied(repo))
2775 if a:
2774 if a:
2776 m.append(ui.label(_("%d applied"), 'qseries.applied') % a)
2775 m.append(ui.label(_("%d applied"), 'qseries.applied') % a)
2777 if u:
2776 if u:
2778 m.append(ui.label(_("%d unapplied"), 'qseries.unapplied') % u)
2777 m.append(ui.label(_("%d unapplied"), 'qseries.unapplied') % u)
2779 if m:
2778 if m:
2780 ui.write("mq: %s\n" % ', '.join(m))
2779 ui.write("mq: %s\n" % ', '.join(m))
2781 else:
2780 else:
2782 ui.note(_("mq: (empty queue)\n"))
2781 ui.note(_("mq: (empty queue)\n"))
2783 return r
2782 return r
2784
2783
2785 def uisetup(ui):
2784 def uisetup(ui):
2786 mqopt = [('', 'mq', None, _("operate on patch repository"))]
2785 mqopt = [('', 'mq', None, _("operate on patch repository"))]
2787
2786
2788 extensions.wrapcommand(commands.table, 'import', mqimport)
2787 extensions.wrapcommand(commands.table, 'import', mqimport)
2789 extensions.wrapcommand(commands.table, 'summary', summary)
2788 extensions.wrapcommand(commands.table, 'summary', summary)
2790
2789
2791 entry = extensions.wrapcommand(commands.table, 'init', mqinit)
2790 entry = extensions.wrapcommand(commands.table, 'init', mqinit)
2792 entry[1].extend(mqopt)
2791 entry[1].extend(mqopt)
2793
2792
2794 norepo = commands.norepo.split(" ")
2793 norepo = commands.norepo.split(" ")
2795 for cmd in commands.table.keys():
2794 for cmd in commands.table.keys():
2796 cmd = cmdutil.parsealiases(cmd)[0]
2795 cmd = cmdutil.parsealiases(cmd)[0]
2797 if cmd in norepo:
2796 if cmd in norepo:
2798 continue
2797 continue
2799 entry = extensions.wrapcommand(commands.table, cmd, mqcommand)
2798 entry = extensions.wrapcommand(commands.table, cmd, mqcommand)
2800 entry[1].extend(mqopt)
2799 entry[1].extend(mqopt)
2801
2800
2802 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2801 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2803
2802
2804 cmdtable = {
2803 cmdtable = {
2805 "qapplied":
2804 "qapplied":
2806 (applied,
2805 (applied,
2807 [('1', 'last', None, _('show only the last patch'))] + seriesopts,
2806 [('1', 'last', None, _('show only the last patch'))] + seriesopts,
2808 _('hg qapplied [-1] [-s] [PATCH]')),
2807 _('hg qapplied [-1] [-s] [PATCH]')),
2809 "qclone":
2808 "qclone":
2810 (clone,
2809 (clone,
2811 [('', 'pull', None, _('use pull protocol to copy metadata')),
2810 [('', 'pull', None, _('use pull protocol to copy metadata')),
2812 ('U', 'noupdate', None, _('do not update the new working directories')),
2811 ('U', 'noupdate', None, _('do not update the new working directories')),
2813 ('', 'uncompressed', None,
2812 ('', 'uncompressed', None,
2814 _('use uncompressed transfer (fast over LAN)')),
2813 _('use uncompressed transfer (fast over LAN)')),
2815 ('p', 'patches', '', _('location of source patch repository')),
2814 ('p', 'patches', '', _('location of source patch repository')),
2816 ] + commands.remoteopts,
2815 ] + commands.remoteopts,
2817 _('hg qclone [OPTION]... SOURCE [DEST]')),
2816 _('hg qclone [OPTION]... SOURCE [DEST]')),
2818 "qcommit|qci":
2817 "qcommit|qci":
2819 (commit,
2818 (commit,
2820 commands.table["^commit|ci"][1],
2819 commands.table["^commit|ci"][1],
2821 _('hg qcommit [OPTION]... [FILE]...')),
2820 _('hg qcommit [OPTION]... [FILE]...')),
2822 "^qdiff":
2821 "^qdiff":
2823 (diff,
2822 (diff,
2824 commands.diffopts + commands.diffopts2 + commands.walkopts,
2823 commands.diffopts + commands.diffopts2 + commands.walkopts,
2825 _('hg qdiff [OPTION]... [FILE]...')),
2824 _('hg qdiff [OPTION]... [FILE]...')),
2826 "qdelete|qremove|qrm":
2825 "qdelete|qremove|qrm":
2827 (delete,
2826 (delete,
2828 [('k', 'keep', None, _('keep patch file')),
2827 [('k', 'keep', None, _('keep patch file')),
2829 ('r', 'rev', [], _('stop managing a revision (DEPRECATED)'))],
2828 ('r', 'rev', [], _('stop managing a revision (DEPRECATED)'))],
2830 _('hg qdelete [-k] [-r REV]... [PATCH]...')),
2829 _('hg qdelete [-k] [-r REV]... [PATCH]...')),
2831 'qfold':
2830 'qfold':
2832 (fold,
2831 (fold,
2833 [('e', 'edit', None, _('edit patch header')),
2832 [('e', 'edit', None, _('edit patch header')),
2834 ('k', 'keep', None, _('keep folded patch files')),
2833 ('k', 'keep', None, _('keep folded patch files')),
2835 ] + commands.commitopts,
2834 ] + commands.commitopts,
2836 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')),
2835 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')),
2837 'qgoto':
2836 'qgoto':
2838 (goto,
2837 (goto,
2839 [('f', 'force', None, _('overwrite any local changes'))],
2838 [('f', 'force', None, _('overwrite any local changes'))],
2840 _('hg qgoto [OPTION]... PATCH')),
2839 _('hg qgoto [OPTION]... PATCH')),
2841 'qguard':
2840 'qguard':
2842 (guard,
2841 (guard,
2843 [('l', 'list', None, _('list all patches and guards')),
2842 [('l', 'list', None, _('list all patches and guards')),
2844 ('n', 'none', None, _('drop all guards'))],
2843 ('n', 'none', None, _('drop all guards'))],
2845 _('hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]')),
2844 _('hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]')),
2846 'qheader': (header, [], _('hg qheader [PATCH]')),
2845 'qheader': (header, [], _('hg qheader [PATCH]')),
2847 "qimport":
2846 "qimport":
2848 (qimport,
2847 (qimport,
2849 [('e', 'existing', None, _('import file in patch directory')),
2848 [('e', 'existing', None, _('import file in patch directory')),
2850 ('n', 'name', '', _('name of patch file')),
2849 ('n', 'name', '', _('name of patch file')),
2851 ('f', 'force', None, _('overwrite existing files')),
2850 ('f', 'force', None, _('overwrite existing files')),
2852 ('r', 'rev', [], _('place existing revisions under mq control')),
2851 ('r', 'rev', [], _('place existing revisions under mq control')),
2853 ('g', 'git', None, _('use git extended diff format')),
2852 ('g', 'git', None, _('use git extended diff format')),
2854 ('P', 'push', None, _('qpush after importing'))],
2853 ('P', 'push', None, _('qpush after importing'))],
2855 _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... FILE...')),
2854 _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... FILE...')),
2856 "^qinit":
2855 "^qinit":
2857 (init,
2856 (init,
2858 [('c', 'create-repo', None, _('create queue repository'))],
2857 [('c', 'create-repo', None, _('create queue repository'))],
2859 _('hg qinit [-c]')),
2858 _('hg qinit [-c]')),
2860 "^qnew":
2859 "^qnew":
2861 (new,
2860 (new,
2862 [('e', 'edit', None, _('edit commit message')),
2861 [('e', 'edit', None, _('edit commit message')),
2863 ('f', 'force', None, _('import uncommitted changes (DEPRECATED)')),
2862 ('f', 'force', None, _('import uncommitted changes (DEPRECATED)')),
2864 ('g', 'git', None, _('use git extended diff format')),
2863 ('g', 'git', None, _('use git extended diff format')),
2865 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2864 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2866 ('u', 'user', '', _('add "From: <given user>" to patch')),
2865 ('u', 'user', '', _('add "From: <given user>" to patch')),
2867 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2866 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2868 ('d', 'date', '', _('add "Date: <given date>" to patch'))
2867 ('d', 'date', '', _('add "Date: <given date>" to patch'))
2869 ] + commands.walkopts + commands.commitopts,
2868 ] + commands.walkopts + commands.commitopts,
2870 _('hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...')),
2869 _('hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...')),
2871 "qnext": (next, [] + seriesopts, _('hg qnext [-s]')),
2870 "qnext": (next, [] + seriesopts, _('hg qnext [-s]')),
2872 "qprev": (prev, [] + seriesopts, _('hg qprev [-s]')),
2871 "qprev": (prev, [] + seriesopts, _('hg qprev [-s]')),
2873 "^qpop":
2872 "^qpop":
2874 (pop,
2873 (pop,
2875 [('a', 'all', None, _('pop all patches')),
2874 [('a', 'all', None, _('pop all patches')),
2876 ('n', 'name', '', _('queue name to pop (DEPRECATED)')),
2875 ('n', 'name', '', _('queue name to pop (DEPRECATED)')),
2877 ('f', 'force', None, _('forget any local changes to patched files'))],
2876 ('f', 'force', None, _('forget any local changes to patched files'))],
2878 _('hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]')),
2877 _('hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]')),
2879 "^qpush":
2878 "^qpush":
2880 (push,
2879 (push,
2881 [('f', 'force', None, _('apply if the patch has rejects')),
2880 [('f', 'force', None, _('apply if the patch has rejects')),
2882 ('l', 'list', None, _('list patch name in commit text')),
2881 ('l', 'list', None, _('list patch name in commit text')),
2883 ('a', 'all', None, _('apply all patches')),
2882 ('a', 'all', None, _('apply all patches')),
2884 ('m', 'merge', None, _('merge from another queue (DEPRECATED)')),
2883 ('m', 'merge', None, _('merge from another queue (DEPRECATED)')),
2885 ('n', 'name', '', _('merge queue name (DEPRECATED)')),
2884 ('n', 'name', '', _('merge queue name (DEPRECATED)')),
2886 ('', 'move', None, _('reorder patch series and apply only the patch'))],
2885 ('', 'move', None, _('reorder patch series and apply only the patch'))],
2887 _('hg qpush [-f] [-l] [-a] [-m] [-n NAME] [--move] [PATCH | INDEX]')),
2886 _('hg qpush [-f] [-l] [-a] [-m] [-n NAME] [--move] [PATCH | INDEX]')),
2888 "^qrefresh":
2887 "^qrefresh":
2889 (refresh,
2888 (refresh,
2890 [('e', 'edit', None, _('edit commit message')),
2889 [('e', 'edit', None, _('edit commit message')),
2891 ('g', 'git', None, _('use git extended diff format')),
2890 ('g', 'git', None, _('use git extended diff format')),
2892 ('s', 'short', None,
2891 ('s', 'short', None,
2893 _('refresh only files already in the patch and specified files')),
2892 _('refresh only files already in the patch and specified files')),
2894 ('U', 'currentuser', None,
2893 ('U', 'currentuser', None,
2895 _('add/update author field in patch with current user')),
2894 _('add/update author field in patch with current user')),
2896 ('u', 'user', '',
2895 ('u', 'user', '',
2897 _('add/update author field in patch with given user')),
2896 _('add/update author field in patch with given user')),
2898 ('D', 'currentdate', None,
2897 ('D', 'currentdate', None,
2899 _('add/update date field in patch with current date')),
2898 _('add/update date field in patch with current date')),
2900 ('d', 'date', '',
2899 ('d', 'date', '',
2901 _('add/update date field in patch with given date'))
2900 _('add/update date field in patch with given date'))
2902 ] + commands.walkopts + commands.commitopts,
2901 ] + commands.walkopts + commands.commitopts,
2903 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')),
2902 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')),
2904 'qrename|qmv':
2903 'qrename|qmv':
2905 (rename, [], _('hg qrename PATCH1 [PATCH2]')),
2904 (rename, [], _('hg qrename PATCH1 [PATCH2]')),
2906 "qrestore":
2905 "qrestore":
2907 (restore,
2906 (restore,
2908 [('d', 'delete', None, _('delete save entry')),
2907 [('d', 'delete', None, _('delete save entry')),
2909 ('u', 'update', None, _('update queue working directory'))],
2908 ('u', 'update', None, _('update queue working directory'))],
2910 _('hg qrestore [-d] [-u] REV')),
2909 _('hg qrestore [-d] [-u] REV')),
2911 "qsave":
2910 "qsave":
2912 (save,
2911 (save,
2913 [('c', 'copy', None, _('copy patch directory')),
2912 [('c', 'copy', None, _('copy patch directory')),
2914 ('n', 'name', '', _('copy directory name')),
2913 ('n', 'name', '', _('copy directory name')),
2915 ('e', 'empty', None, _('clear queue status file')),
2914 ('e', 'empty', None, _('clear queue status file')),
2916 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2915 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2917 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')),
2916 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')),
2918 "qselect":
2917 "qselect":
2919 (select,
2918 (select,
2920 [('n', 'none', None, _('disable all guards')),
2919 [('n', 'none', None, _('disable all guards')),
2921 ('s', 'series', None, _('list all guards in series file')),
2920 ('s', 'series', None, _('list all guards in series file')),
2922 ('', 'pop', None, _('pop to before first guarded applied patch')),
2921 ('', 'pop', None, _('pop to before first guarded applied patch')),
2923 ('', 'reapply', None, _('pop, then reapply patches'))],
2922 ('', 'reapply', None, _('pop, then reapply patches'))],
2924 _('hg qselect [OPTION]... [GUARD]...')),
2923 _('hg qselect [OPTION]... [GUARD]...')),
2925 "qseries":
2924 "qseries":
2926 (series,
2925 (series,
2927 [('m', 'missing', None, _('print patches not in series')),
2926 [('m', 'missing', None, _('print patches not in series')),
2928 ] + seriesopts,
2927 ] + seriesopts,
2929 _('hg qseries [-ms]')),
2928 _('hg qseries [-ms]')),
2930 "strip":
2929 "strip":
2931 (strip,
2930 (strip,
2932 [('f', 'force', None, _('force removal of changesets even if the '
2931 [('f', 'force', None, _('force removal of changesets even if the '
2933 'working directory has uncommitted changes')),
2932 'working directory has uncommitted changes')),
2934 ('b', 'backup', None, _('bundle only changesets with local revision'
2933 ('b', 'backup', None, _('bundle only changesets with local revision'
2935 ' number greater than REV which are not'
2934 ' number greater than REV which are not'
2936 ' descendants of REV (DEPRECATED)')),
2935 ' descendants of REV (DEPRECATED)')),
2937 ('n', 'nobackup', None, _('no backups'))],
2936 ('n', 'nobackup', None, _('no backups'))],
2938 _('hg strip [-f] [-n] REV')),
2937 _('hg strip [-f] [-n] REV')),
2939 "qtop": (top, [] + seriesopts, _('hg qtop [-s]')),
2938 "qtop": (top, [] + seriesopts, _('hg qtop [-s]')),
2940 "qunapplied":
2939 "qunapplied":
2941 (unapplied,
2940 (unapplied,
2942 [('1', 'first', None, _('show only the first patch'))] + seriesopts,
2941 [('1', 'first', None, _('show only the first patch'))] + seriesopts,
2943 _('hg qunapplied [-1] [-s] [PATCH]')),
2942 _('hg qunapplied [-1] [-s] [PATCH]')),
2944 "qfinish":
2943 "qfinish":
2945 (finish,
2944 (finish,
2946 [('a', 'applied', None, _('finish all applied changesets'))],
2945 [('a', 'applied', None, _('finish all applied changesets'))],
2947 _('hg qfinish [-a] [REV]...')),
2946 _('hg qfinish [-a] [REV]...')),
2948 'qqueue':
2947 'qqueue':
2949 (qqueue,
2948 (qqueue,
2950 [
2949 [
2951 ('l', 'list', False, _('list all available queues')),
2950 ('l', 'list', False, _('list all available queues')),
2952 ('c', 'create', False, _('create new queue')),
2951 ('c', 'create', False, _('create new queue')),
2953 ('', 'delete', False, _('delete reference to queue')),
2952 ('', 'delete', False, _('delete reference to queue')),
2954 ],
2953 ],
2955 _('[OPTION] [QUEUE]')),
2954 _('[OPTION] [QUEUE]')),
2956 }
2955 }
2957
2956
2958 colortable = {'qguard.negative': 'red',
2957 colortable = {'qguard.negative': 'red',
2959 'qguard.positive': 'yellow',
2958 'qguard.positive': 'yellow',
2960 'qguard.unguarded': 'green',
2959 'qguard.unguarded': 'green',
2961 'qseries.applied': 'blue bold underline',
2960 'qseries.applied': 'blue bold underline',
2962 'qseries.guarded': 'black bold',
2961 'qseries.guarded': 'black bold',
2963 'qseries.missing': 'red bold',
2962 'qseries.missing': 'red bold',
2964 'qseries.unapplied': 'black bold'}
2963 'qseries.unapplied': 'black bold'}
@@ -1,2296 +1,2297 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo
10 import repo, changegroup, subrepo
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 import url as urlmod
17 import url as urlmod
18 from lock import release
18 from lock import release
19 import weakref, stat, errno, os, time, inspect
19 import weakref, stat, errno, os, time, inspect
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
24 supported = set('revlogv1 store fncache shared'.split())
24 supported = set('revlogv1 store fncache shared'.split())
25
25
26 def __init__(self, baseui, path=None, create=0):
26 def __init__(self, baseui, path=None, create=0):
27 repo.repository.__init__(self)
27 repo.repository.__init__(self)
28 self.root = os.path.realpath(util.expandpath(path))
28 self.root = os.path.realpath(util.expandpath(path))
29 self.path = os.path.join(self.root, ".hg")
29 self.path = os.path.join(self.root, ".hg")
30 self.origroot = path
30 self.origroot = path
31 self.opener = util.opener(self.path)
31 self.opener = util.opener(self.path)
32 self.wopener = util.opener(self.root)
32 self.wopener = util.opener(self.root)
33 self.baseui = baseui
33 self.baseui = baseui
34 self.ui = baseui.copy()
34 self.ui = baseui.copy()
35
35
36 try:
36 try:
37 self.ui.readconfig(self.join("hgrc"), self.root)
37 self.ui.readconfig(self.join("hgrc"), self.root)
38 extensions.loadall(self.ui)
38 extensions.loadall(self.ui)
39 except IOError:
39 except IOError:
40 pass
40 pass
41
41
42 if not os.path.isdir(self.path):
42 if not os.path.isdir(self.path):
43 if create:
43 if create:
44 if not os.path.exists(path):
44 if not os.path.exists(path):
45 os.mkdir(path)
45 os.mkdir(path)
46 os.mkdir(self.path)
46 os.mkdir(self.path)
47 requirements = ["revlogv1"]
47 requirements = ["revlogv1"]
48 if self.ui.configbool('format', 'usestore', True):
48 if self.ui.configbool('format', 'usestore', True):
49 os.mkdir(os.path.join(self.path, "store"))
49 os.mkdir(os.path.join(self.path, "store"))
50 requirements.append("store")
50 requirements.append("store")
51 if self.ui.configbool('format', 'usefncache', True):
51 if self.ui.configbool('format', 'usefncache', True):
52 requirements.append("fncache")
52 requirements.append("fncache")
53 # create an invalid changelog
53 # create an invalid changelog
54 self.opener("00changelog.i", "a").write(
54 self.opener("00changelog.i", "a").write(
55 '\0\0\0\2' # represents revlogv2
55 '\0\0\0\2' # represents revlogv2
56 ' dummy changelog to prevent using the old repo layout'
56 ' dummy changelog to prevent using the old repo layout'
57 )
57 )
58 reqfile = self.opener("requires", "w")
58 reqfile = self.opener("requires", "w")
59 for r in requirements:
59 for r in requirements:
60 reqfile.write("%s\n" % r)
60 reqfile.write("%s\n" % r)
61 reqfile.close()
61 reqfile.close()
62 else:
62 else:
63 raise error.RepoError(_("repository %s not found") % path)
63 raise error.RepoError(_("repository %s not found") % path)
64 elif create:
64 elif create:
65 raise error.RepoError(_("repository %s already exists") % path)
65 raise error.RepoError(_("repository %s already exists") % path)
66 else:
66 else:
67 # find requirements
67 # find requirements
68 requirements = set()
68 requirements = set()
69 try:
69 try:
70 requirements = set(self.opener("requires").read().splitlines())
70 requirements = set(self.opener("requires").read().splitlines())
71 except IOError, inst:
71 except IOError, inst:
72 if inst.errno != errno.ENOENT:
72 if inst.errno != errno.ENOENT:
73 raise
73 raise
74 for r in requirements - self.supported:
74 for r in requirements - self.supported:
75 raise error.RepoError(_("requirement '%s' not supported") % r)
75 raise error.RepoError(_("requirement '%s' not supported") % r)
76
76
77 self.sharedpath = self.path
77 self.sharedpath = self.path
78 try:
78 try:
79 s = os.path.realpath(self.opener("sharedpath").read())
79 s = os.path.realpath(self.opener("sharedpath").read())
80 if not os.path.exists(s):
80 if not os.path.exists(s):
81 raise error.RepoError(
81 raise error.RepoError(
82 _('.hg/sharedpath points to nonexistent directory %s') % s)
82 _('.hg/sharedpath points to nonexistent directory %s') % s)
83 self.sharedpath = s
83 self.sharedpath = s
84 except IOError, inst:
84 except IOError, inst:
85 if inst.errno != errno.ENOENT:
85 if inst.errno != errno.ENOENT:
86 raise
86 raise
87
87
88 self.store = store.store(requirements, self.sharedpath, util.opener)
88 self.store = store.store(requirements, self.sharedpath, util.opener)
89 self.spath = self.store.path
89 self.spath = self.store.path
90 self.sopener = self.store.opener
90 self.sopener = self.store.opener
91 self.sjoin = self.store.join
91 self.sjoin = self.store.join
92 self.opener.createmode = self.store.createmode
92 self.opener.createmode = self.store.createmode
93 self.sopener.options = {}
93 self.sopener.options = {}
94
94
95 # These two define the set of tags for this repository. _tags
95 # These two define the set of tags for this repository. _tags
96 # maps tag name to node; _tagtypes maps tag name to 'global' or
96 # maps tag name to node; _tagtypes maps tag name to 'global' or
97 # 'local'. (Global tags are defined by .hgtags across all
97 # 'local'. (Global tags are defined by .hgtags across all
98 # heads, and local tags are defined in .hg/localtags.) They
98 # heads, and local tags are defined in .hg/localtags.) They
99 # constitute the in-memory cache of tags.
99 # constitute the in-memory cache of tags.
100 self._tags = None
100 self._tags = None
101 self._tagtypes = None
101 self._tagtypes = None
102
102
103 self._branchcache = None # in UTF-8
103 self._branchcache = None # in UTF-8
104 self._branchcachetip = None
104 self._branchcachetip = None
105 self.nodetagscache = None
105 self.nodetagscache = None
106 self.filterpats = {}
106 self.filterpats = {}
107 self._datafilters = {}
107 self._datafilters = {}
108 self._transref = self._lockref = self._wlockref = None
108 self._transref = self._lockref = self._wlockref = None
109
109
110 @propertycache
110 @propertycache
111 def changelog(self):
111 def changelog(self):
112 c = changelog.changelog(self.sopener)
112 c = changelog.changelog(self.sopener)
113 if 'HG_PENDING' in os.environ:
113 if 'HG_PENDING' in os.environ:
114 p = os.environ['HG_PENDING']
114 p = os.environ['HG_PENDING']
115 if p.startswith(self.root):
115 if p.startswith(self.root):
116 c.readpending('00changelog.i.a')
116 c.readpending('00changelog.i.a')
117 self.sopener.options['defversion'] = c.version
117 self.sopener.options['defversion'] = c.version
118 return c
118 return c
119
119
120 @propertycache
120 @propertycache
121 def manifest(self):
121 def manifest(self):
122 return manifest.manifest(self.sopener)
122 return manifest.manifest(self.sopener)
123
123
124 @propertycache
124 @propertycache
125 def dirstate(self):
125 def dirstate(self):
126 return dirstate.dirstate(self.opener, self.ui, self.root)
126 return dirstate.dirstate(self.opener, self.ui, self.root)
127
127
128 def __getitem__(self, changeid):
128 def __getitem__(self, changeid):
129 if changeid is None:
129 if changeid is None:
130 return context.workingctx(self)
130 return context.workingctx(self)
131 return context.changectx(self, changeid)
131 return context.changectx(self, changeid)
132
132
133 def __contains__(self, changeid):
133 def __contains__(self, changeid):
134 try:
134 try:
135 return bool(self.lookup(changeid))
135 return bool(self.lookup(changeid))
136 except error.RepoLookupError:
136 except error.RepoLookupError:
137 return False
137 return False
138
138
139 def __nonzero__(self):
139 def __nonzero__(self):
140 return True
140 return True
141
141
142 def __len__(self):
142 def __len__(self):
143 return len(self.changelog)
143 return len(self.changelog)
144
144
145 def __iter__(self):
145 def __iter__(self):
146 for i in xrange(len(self)):
146 for i in xrange(len(self)):
147 yield i
147 yield i
148
148
149 def url(self):
149 def url(self):
150 return 'file:' + self.root
150 return 'file:' + self.root
151
151
152 def hook(self, name, throw=False, **args):
152 def hook(self, name, throw=False, **args):
153 return hook.hook(self.ui, self, name, throw, **args)
153 return hook.hook(self.ui, self, name, throw, **args)
154
154
155 tag_disallowed = ':\r\n'
155 tag_disallowed = ':\r\n'
156
156
157 def _tag(self, names, node, message, local, user, date, extra={}):
157 def _tag(self, names, node, message, local, user, date, extra={}):
158 if isinstance(names, str):
158 if isinstance(names, str):
159 allchars = names
159 allchars = names
160 names = (names,)
160 names = (names,)
161 else:
161 else:
162 allchars = ''.join(names)
162 allchars = ''.join(names)
163 for c in self.tag_disallowed:
163 for c in self.tag_disallowed:
164 if c in allchars:
164 if c in allchars:
165 raise util.Abort(_('%r cannot be used in a tag name') % c)
165 raise util.Abort(_('%r cannot be used in a tag name') % c)
166
166
167 branches = self.branchmap()
167 branches = self.branchmap()
168 for name in names:
168 for name in names:
169 self.hook('pretag', throw=True, node=hex(node), tag=name,
169 self.hook('pretag', throw=True, node=hex(node), tag=name,
170 local=local)
170 local=local)
171 if name in branches:
171 if name in branches:
172 self.ui.warn(_("warning: tag %s conflicts with existing"
172 self.ui.warn(_("warning: tag %s conflicts with existing"
173 " branch name\n") % name)
173 " branch name\n") % name)
174
174
175 def writetags(fp, names, munge, prevtags):
175 def writetags(fp, names, munge, prevtags):
176 fp.seek(0, 2)
176 fp.seek(0, 2)
177 if prevtags and prevtags[-1] != '\n':
177 if prevtags and prevtags[-1] != '\n':
178 fp.write('\n')
178 fp.write('\n')
179 for name in names:
179 for name in names:
180 m = munge and munge(name) or name
180 m = munge and munge(name) or name
181 if self._tagtypes and name in self._tagtypes:
181 if self._tagtypes and name in self._tagtypes:
182 old = self._tags.get(name, nullid)
182 old = self._tags.get(name, nullid)
183 fp.write('%s %s\n' % (hex(old), m))
183 fp.write('%s %s\n' % (hex(old), m))
184 fp.write('%s %s\n' % (hex(node), m))
184 fp.write('%s %s\n' % (hex(node), m))
185 fp.close()
185 fp.close()
186
186
187 prevtags = ''
187 prevtags = ''
188 if local:
188 if local:
189 try:
189 try:
190 fp = self.opener('localtags', 'r+')
190 fp = self.opener('localtags', 'r+')
191 except IOError:
191 except IOError:
192 fp = self.opener('localtags', 'a')
192 fp = self.opener('localtags', 'a')
193 else:
193 else:
194 prevtags = fp.read()
194 prevtags = fp.read()
195
195
196 # local tags are stored in the current charset
196 # local tags are stored in the current charset
197 writetags(fp, names, None, prevtags)
197 writetags(fp, names, None, prevtags)
198 for name in names:
198 for name in names:
199 self.hook('tag', node=hex(node), tag=name, local=local)
199 self.hook('tag', node=hex(node), tag=name, local=local)
200 return
200 return
201
201
202 try:
202 try:
203 fp = self.wfile('.hgtags', 'rb+')
203 fp = self.wfile('.hgtags', 'rb+')
204 except IOError:
204 except IOError:
205 fp = self.wfile('.hgtags', 'ab')
205 fp = self.wfile('.hgtags', 'ab')
206 else:
206 else:
207 prevtags = fp.read()
207 prevtags = fp.read()
208
208
209 # committed tags are stored in UTF-8
209 # committed tags are stored in UTF-8
210 writetags(fp, names, encoding.fromlocal, prevtags)
210 writetags(fp, names, encoding.fromlocal, prevtags)
211
211
212 if '.hgtags' not in self.dirstate:
212 if '.hgtags' not in self.dirstate:
213 self.add(['.hgtags'])
213 self.add(['.hgtags'])
214
214
215 m = matchmod.exact(self.root, '', ['.hgtags'])
215 m = matchmod.exact(self.root, '', ['.hgtags'])
216 tagnode = self.commit(message, user, date, extra=extra, match=m)
216 tagnode = self.commit(message, user, date, extra=extra, match=m)
217
217
218 for name in names:
218 for name in names:
219 self.hook('tag', node=hex(node), tag=name, local=local)
219 self.hook('tag', node=hex(node), tag=name, local=local)
220
220
221 return tagnode
221 return tagnode
222
222
223 def tag(self, names, node, message, local, user, date):
223 def tag(self, names, node, message, local, user, date):
224 '''tag a revision with one or more symbolic names.
224 '''tag a revision with one or more symbolic names.
225
225
226 names is a list of strings or, when adding a single tag, names may be a
226 names is a list of strings or, when adding a single tag, names may be a
227 string.
227 string.
228
228
229 if local is True, the tags are stored in a per-repository file.
229 if local is True, the tags are stored in a per-repository file.
230 otherwise, they are stored in the .hgtags file, and a new
230 otherwise, they are stored in the .hgtags file, and a new
231 changeset is committed with the change.
231 changeset is committed with the change.
232
232
233 keyword arguments:
233 keyword arguments:
234
234
235 local: whether to store tags in non-version-controlled file
235 local: whether to store tags in non-version-controlled file
236 (default False)
236 (default False)
237
237
238 message: commit message to use if committing
238 message: commit message to use if committing
239
239
240 user: name of user to use if committing
240 user: name of user to use if committing
241
241
242 date: date tuple to use if committing'''
242 date: date tuple to use if committing'''
243
243
244 for x in self.status()[:5]:
244 for x in self.status()[:5]:
245 if '.hgtags' in x:
245 if '.hgtags' in x:
246 raise util.Abort(_('working copy of .hgtags is changed '
246 raise util.Abort(_('working copy of .hgtags is changed '
247 '(please commit .hgtags manually)'))
247 '(please commit .hgtags manually)'))
248
248
249 self.tags() # instantiate the cache
249 self.tags() # instantiate the cache
250 self._tag(names, node, message, local, user, date)
250 self._tag(names, node, message, local, user, date)
251
251
252 def tags(self):
252 def tags(self):
253 '''return a mapping of tag to node'''
253 '''return a mapping of tag to node'''
254 if self._tags is None:
254 if self._tags is None:
255 (self._tags, self._tagtypes) = self._findtags()
255 (self._tags, self._tagtypes) = self._findtags()
256
256
257 return self._tags
257 return self._tags
258
258
259 def _findtags(self):
259 def _findtags(self):
260 '''Do the hard work of finding tags. Return a pair of dicts
260 '''Do the hard work of finding tags. Return a pair of dicts
261 (tags, tagtypes) where tags maps tag name to node, and tagtypes
261 (tags, tagtypes) where tags maps tag name to node, and tagtypes
262 maps tag name to a string like \'global\' or \'local\'.
262 maps tag name to a string like \'global\' or \'local\'.
263 Subclasses or extensions are free to add their own tags, but
263 Subclasses or extensions are free to add their own tags, but
264 should be aware that the returned dicts will be retained for the
264 should be aware that the returned dicts will be retained for the
265 duration of the localrepo object.'''
265 duration of the localrepo object.'''
266
266
267 # XXX what tagtype should subclasses/extensions use? Currently
267 # XXX what tagtype should subclasses/extensions use? Currently
268 # mq and bookmarks add tags, but do not set the tagtype at all.
268 # mq and bookmarks add tags, but do not set the tagtype at all.
269 # Should each extension invent its own tag type? Should there
269 # Should each extension invent its own tag type? Should there
270 # be one tagtype for all such "virtual" tags? Or is the status
270 # be one tagtype for all such "virtual" tags? Or is the status
271 # quo fine?
271 # quo fine?
272
272
273 alltags = {} # map tag name to (node, hist)
273 alltags = {} # map tag name to (node, hist)
274 tagtypes = {}
274 tagtypes = {}
275
275
276 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
276 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
277 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
277 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
278
278
279 # Build the return dicts. Have to re-encode tag names because
279 # Build the return dicts. Have to re-encode tag names because
280 # the tags module always uses UTF-8 (in order not to lose info
280 # the tags module always uses UTF-8 (in order not to lose info
281 # writing to the cache), but the rest of Mercurial wants them in
281 # writing to the cache), but the rest of Mercurial wants them in
282 # local encoding.
282 # local encoding.
283 tags = {}
283 tags = {}
284 for (name, (node, hist)) in alltags.iteritems():
284 for (name, (node, hist)) in alltags.iteritems():
285 if node != nullid:
285 if node != nullid:
286 tags[encoding.tolocal(name)] = node
286 tags[encoding.tolocal(name)] = node
287 tags['tip'] = self.changelog.tip()
287 tags['tip'] = self.changelog.tip()
288 tagtypes = dict([(encoding.tolocal(name), value)
288 tagtypes = dict([(encoding.tolocal(name), value)
289 for (name, value) in tagtypes.iteritems()])
289 for (name, value) in tagtypes.iteritems()])
290 return (tags, tagtypes)
290 return (tags, tagtypes)
291
291
292 def tagtype(self, tagname):
292 def tagtype(self, tagname):
293 '''
293 '''
294 return the type of the given tag. result can be:
294 return the type of the given tag. result can be:
295
295
296 'local' : a local tag
296 'local' : a local tag
297 'global' : a global tag
297 'global' : a global tag
298 None : tag does not exist
298 None : tag does not exist
299 '''
299 '''
300
300
301 self.tags()
301 self.tags()
302
302
303 return self._tagtypes.get(tagname)
303 return self._tagtypes.get(tagname)
304
304
305 def tagslist(self):
305 def tagslist(self):
306 '''return a list of tags ordered by revision'''
306 '''return a list of tags ordered by revision'''
307 l = []
307 l = []
308 for t, n in self.tags().iteritems():
308 for t, n in self.tags().iteritems():
309 try:
309 try:
310 r = self.changelog.rev(n)
310 r = self.changelog.rev(n)
311 except:
311 except:
312 r = -2 # sort to the beginning of the list if unknown
312 r = -2 # sort to the beginning of the list if unknown
313 l.append((r, t, n))
313 l.append((r, t, n))
314 return [(t, n) for r, t, n in sorted(l)]
314 return [(t, n) for r, t, n in sorted(l)]
315
315
316 def nodetags(self, node):
316 def nodetags(self, node):
317 '''return the tags associated with a node'''
317 '''return the tags associated with a node'''
318 if not self.nodetagscache:
318 if not self.nodetagscache:
319 self.nodetagscache = {}
319 self.nodetagscache = {}
320 for t, n in self.tags().iteritems():
320 for t, n in self.tags().iteritems():
321 self.nodetagscache.setdefault(n, []).append(t)
321 self.nodetagscache.setdefault(n, []).append(t)
322 for tags in self.nodetagscache.itervalues():
322 for tags in self.nodetagscache.itervalues():
323 tags.sort()
323 tags.sort()
324 return self.nodetagscache.get(node, [])
324 return self.nodetagscache.get(node, [])
325
325
326 def _branchtags(self, partial, lrev):
326 def _branchtags(self, partial, lrev):
327 # TODO: rename this function?
327 # TODO: rename this function?
328 tiprev = len(self) - 1
328 tiprev = len(self) - 1
329 if lrev != tiprev:
329 if lrev != tiprev:
330 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
330 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
331 self._updatebranchcache(partial, ctxgen)
331 self._updatebranchcache(partial, ctxgen)
332 self._writebranchcache(partial, self.changelog.tip(), tiprev)
332 self._writebranchcache(partial, self.changelog.tip(), tiprev)
333
333
334 return partial
334 return partial
335
335
336 def branchmap(self):
336 def branchmap(self):
337 '''returns a dictionary {branch: [branchheads]}'''
337 '''returns a dictionary {branch: [branchheads]}'''
338 tip = self.changelog.tip()
338 tip = self.changelog.tip()
339 if self._branchcache is not None and self._branchcachetip == tip:
339 if self._branchcache is not None and self._branchcachetip == tip:
340 return self._branchcache
340 return self._branchcache
341
341
342 oldtip = self._branchcachetip
342 oldtip = self._branchcachetip
343 self._branchcachetip = tip
343 self._branchcachetip = tip
344 if oldtip is None or oldtip not in self.changelog.nodemap:
344 if oldtip is None or oldtip not in self.changelog.nodemap:
345 partial, last, lrev = self._readbranchcache()
345 partial, last, lrev = self._readbranchcache()
346 else:
346 else:
347 lrev = self.changelog.rev(oldtip)
347 lrev = self.changelog.rev(oldtip)
348 partial = self._branchcache
348 partial = self._branchcache
349
349
350 self._branchtags(partial, lrev)
350 self._branchtags(partial, lrev)
351 # this private cache holds all heads (not just tips)
351 # this private cache holds all heads (not just tips)
352 self._branchcache = partial
352 self._branchcache = partial
353
353
354 return self._branchcache
354 return self._branchcache
355
355
356 def branchtags(self):
356 def branchtags(self):
357 '''return a dict where branch names map to the tipmost head of
357 '''return a dict where branch names map to the tipmost head of
358 the branch, open heads come before closed'''
358 the branch, open heads come before closed'''
359 bt = {}
359 bt = {}
360 for bn, heads in self.branchmap().iteritems():
360 for bn, heads in self.branchmap().iteritems():
361 tip = heads[-1]
361 tip = heads[-1]
362 for h in reversed(heads):
362 for h in reversed(heads):
363 if 'close' not in self.changelog.read(h)[5]:
363 if 'close' not in self.changelog.read(h)[5]:
364 tip = h
364 tip = h
365 break
365 break
366 bt[bn] = tip
366 bt[bn] = tip
367 return bt
367 return bt
368
368
369
369
370 def _readbranchcache(self):
370 def _readbranchcache(self):
371 partial = {}
371 partial = {}
372 try:
372 try:
373 f = self.opener("branchheads.cache")
373 f = self.opener("branchheads.cache")
374 lines = f.read().split('\n')
374 lines = f.read().split('\n')
375 f.close()
375 f.close()
376 except (IOError, OSError):
376 except (IOError, OSError):
377 return {}, nullid, nullrev
377 return {}, nullid, nullrev
378
378
379 try:
379 try:
380 last, lrev = lines.pop(0).split(" ", 1)
380 last, lrev = lines.pop(0).split(" ", 1)
381 last, lrev = bin(last), int(lrev)
381 last, lrev = bin(last), int(lrev)
382 if lrev >= len(self) or self[lrev].node() != last:
382 if lrev >= len(self) or self[lrev].node() != last:
383 # invalidate the cache
383 # invalidate the cache
384 raise ValueError('invalidating branch cache (tip differs)')
384 raise ValueError('invalidating branch cache (tip differs)')
385 for l in lines:
385 for l in lines:
386 if not l:
386 if not l:
387 continue
387 continue
388 node, label = l.split(" ", 1)
388 node, label = l.split(" ", 1)
389 partial.setdefault(label.strip(), []).append(bin(node))
389 partial.setdefault(label.strip(), []).append(bin(node))
390 except KeyboardInterrupt:
390 except KeyboardInterrupt:
391 raise
391 raise
392 except Exception, inst:
392 except Exception, inst:
393 if self.ui.debugflag:
393 if self.ui.debugflag:
394 self.ui.warn(str(inst), '\n')
394 self.ui.warn(str(inst), '\n')
395 partial, last, lrev = {}, nullid, nullrev
395 partial, last, lrev = {}, nullid, nullrev
396 return partial, last, lrev
396 return partial, last, lrev
397
397
398 def _writebranchcache(self, branches, tip, tiprev):
398 def _writebranchcache(self, branches, tip, tiprev):
399 try:
399 try:
400 f = self.opener("branchheads.cache", "w", atomictemp=True)
400 f = self.opener("branchheads.cache", "w", atomictemp=True)
401 f.write("%s %s\n" % (hex(tip), tiprev))
401 f.write("%s %s\n" % (hex(tip), tiprev))
402 for label, nodes in branches.iteritems():
402 for label, nodes in branches.iteritems():
403 for node in nodes:
403 for node in nodes:
404 f.write("%s %s\n" % (hex(node), label))
404 f.write("%s %s\n" % (hex(node), label))
405 f.rename()
405 f.rename()
406 except (IOError, OSError):
406 except (IOError, OSError):
407 pass
407 pass
408
408
409 def _updatebranchcache(self, partial, ctxgen):
409 def _updatebranchcache(self, partial, ctxgen):
410 # collect new branch entries
410 # collect new branch entries
411 newbranches = {}
411 newbranches = {}
412 for c in ctxgen:
412 for c in ctxgen:
413 newbranches.setdefault(c.branch(), []).append(c.node())
413 newbranches.setdefault(c.branch(), []).append(c.node())
414 # if older branchheads are reachable from new ones, they aren't
414 # if older branchheads are reachable from new ones, they aren't
415 # really branchheads. Note checking parents is insufficient:
415 # really branchheads. Note checking parents is insufficient:
416 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
416 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
417 for branch, newnodes in newbranches.iteritems():
417 for branch, newnodes in newbranches.iteritems():
418 bheads = partial.setdefault(branch, [])
418 bheads = partial.setdefault(branch, [])
419 bheads.extend(newnodes)
419 bheads.extend(newnodes)
420 if len(bheads) <= 1:
420 if len(bheads) <= 1:
421 continue
421 continue
422 # starting from tip means fewer passes over reachable
422 # starting from tip means fewer passes over reachable
423 while newnodes:
423 while newnodes:
424 latest = newnodes.pop()
424 latest = newnodes.pop()
425 if latest not in bheads:
425 if latest not in bheads:
426 continue
426 continue
427 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
427 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
428 reachable = self.changelog.reachable(latest, minbhrev)
428 reachable = self.changelog.reachable(latest, minbhrev)
429 reachable.remove(latest)
429 reachable.remove(latest)
430 bheads = [b for b in bheads if b not in reachable]
430 bheads = [b for b in bheads if b not in reachable]
431 partial[branch] = bheads
431 partial[branch] = bheads
432
432
433 def lookup(self, key):
433 def lookup(self, key):
434 if isinstance(key, int):
434 if isinstance(key, int):
435 return self.changelog.node(key)
435 return self.changelog.node(key)
436 elif key == '.':
436 elif key == '.':
437 return self.dirstate.parents()[0]
437 return self.dirstate.parents()[0]
438 elif key == 'null':
438 elif key == 'null':
439 return nullid
439 return nullid
440 elif key == 'tip':
440 elif key == 'tip':
441 return self.changelog.tip()
441 return self.changelog.tip()
442 n = self.changelog._match(key)
442 n = self.changelog._match(key)
443 if n:
443 if n:
444 return n
444 return n
445 if key in self.tags():
445 if key in self.tags():
446 return self.tags()[key]
446 return self.tags()[key]
447 if key in self.branchtags():
447 if key in self.branchtags():
448 return self.branchtags()[key]
448 return self.branchtags()[key]
449 n = self.changelog._partialmatch(key)
449 n = self.changelog._partialmatch(key)
450 if n:
450 if n:
451 return n
451 return n
452
452
453 # can't find key, check if it might have come from damaged dirstate
453 # can't find key, check if it might have come from damaged dirstate
454 if key in self.dirstate.parents():
454 if key in self.dirstate.parents():
455 raise error.Abort(_("working directory has unknown parent '%s'!")
455 raise error.Abort(_("working directory has unknown parent '%s'!")
456 % short(key))
456 % short(key))
457 try:
457 try:
458 if len(key) == 20:
458 if len(key) == 20:
459 key = hex(key)
459 key = hex(key)
460 except:
460 except:
461 pass
461 pass
462 raise error.RepoLookupError(_("unknown revision '%s'") % key)
462 raise error.RepoLookupError(_("unknown revision '%s'") % key)
463
463
464 def lookupbranch(self, key, remote=None):
464 def lookupbranch(self, key, remote=None):
465 repo = remote or self
465 repo = remote or self
466 if key in repo.branchmap():
466 if key in repo.branchmap():
467 return key
467 return key
468
468
469 repo = (remote and remote.local()) and remote or self
469 repo = (remote and remote.local()) and remote or self
470 return repo[key].branch()
470 return repo[key].branch()
471
471
472 def local(self):
472 def local(self):
473 return True
473 return True
474
474
475 def join(self, f):
475 def join(self, f):
476 return os.path.join(self.path, f)
476 return os.path.join(self.path, f)
477
477
478 def wjoin(self, f):
478 def wjoin(self, f):
479 return os.path.join(self.root, f)
479 return os.path.join(self.root, f)
480
480
481 def rjoin(self, f):
481 def rjoin(self, f):
482 return os.path.join(self.root, util.pconvert(f))
482 return os.path.join(self.root, util.pconvert(f))
483
483
484 def file(self, f):
484 def file(self, f):
485 if f[0] == '/':
485 if f[0] == '/':
486 f = f[1:]
486 f = f[1:]
487 return filelog.filelog(self.sopener, f)
487 return filelog.filelog(self.sopener, f)
488
488
489 def changectx(self, changeid):
489 def changectx(self, changeid):
490 return self[changeid]
490 return self[changeid]
491
491
492 def parents(self, changeid=None):
492 def parents(self, changeid=None):
493 '''get list of changectxs for parents of changeid'''
493 '''get list of changectxs for parents of changeid'''
494 return self[changeid].parents()
494 return self[changeid].parents()
495
495
496 def filectx(self, path, changeid=None, fileid=None):
496 def filectx(self, path, changeid=None, fileid=None):
497 """changeid can be a changeset revision, node, or tag.
497 """changeid can be a changeset revision, node, or tag.
498 fileid can be a file revision or node."""
498 fileid can be a file revision or node."""
499 return context.filectx(self, path, changeid, fileid)
499 return context.filectx(self, path, changeid, fileid)
500
500
501 def getcwd(self):
501 def getcwd(self):
502 return self.dirstate.getcwd()
502 return self.dirstate.getcwd()
503
503
504 def pathto(self, f, cwd=None):
504 def pathto(self, f, cwd=None):
505 return self.dirstate.pathto(f, cwd)
505 return self.dirstate.pathto(f, cwd)
506
506
507 def wfile(self, f, mode='r'):
507 def wfile(self, f, mode='r'):
508 return self.wopener(f, mode)
508 return self.wopener(f, mode)
509
509
510 def _link(self, f):
510 def _link(self, f):
511 return os.path.islink(self.wjoin(f))
511 return os.path.islink(self.wjoin(f))
512
512
513 def _filter(self, filter, filename, data):
513 def _filter(self, filter, filename, data):
514 if filter not in self.filterpats:
514 if filter not in self.filterpats:
515 l = []
515 l = []
516 for pat, cmd in self.ui.configitems(filter):
516 for pat, cmd in self.ui.configitems(filter):
517 if cmd == '!':
517 if cmd == '!':
518 continue
518 continue
519 mf = matchmod.match(self.root, '', [pat])
519 mf = matchmod.match(self.root, '', [pat])
520 fn = None
520 fn = None
521 params = cmd
521 params = cmd
522 for name, filterfn in self._datafilters.iteritems():
522 for name, filterfn in self._datafilters.iteritems():
523 if cmd.startswith(name):
523 if cmd.startswith(name):
524 fn = filterfn
524 fn = filterfn
525 params = cmd[len(name):].lstrip()
525 params = cmd[len(name):].lstrip()
526 break
526 break
527 if not fn:
527 if not fn:
528 fn = lambda s, c, **kwargs: util.filter(s, c)
528 fn = lambda s, c, **kwargs: util.filter(s, c)
529 # Wrap old filters not supporting keyword arguments
529 # Wrap old filters not supporting keyword arguments
530 if not inspect.getargspec(fn)[2]:
530 if not inspect.getargspec(fn)[2]:
531 oldfn = fn
531 oldfn = fn
532 fn = lambda s, c, **kwargs: oldfn(s, c)
532 fn = lambda s, c, **kwargs: oldfn(s, c)
533 l.append((mf, fn, params))
533 l.append((mf, fn, params))
534 self.filterpats[filter] = l
534 self.filterpats[filter] = l
535
535
536 for mf, fn, cmd in self.filterpats[filter]:
536 for mf, fn, cmd in self.filterpats[filter]:
537 if mf(filename):
537 if mf(filename):
538 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
538 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
539 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
539 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
540 break
540 break
541
541
542 return data
542 return data
543
543
544 def adddatafilter(self, name, filter):
544 def adddatafilter(self, name, filter):
545 self._datafilters[name] = filter
545 self._datafilters[name] = filter
546
546
547 def wread(self, filename):
547 def wread(self, filename):
548 if self._link(filename):
548 if self._link(filename):
549 data = os.readlink(self.wjoin(filename))
549 data = os.readlink(self.wjoin(filename))
550 else:
550 else:
551 data = self.wopener(filename, 'r').read()
551 data = self.wopener(filename, 'r').read()
552 return self._filter("encode", filename, data)
552 return self._filter("encode", filename, data)
553
553
554 def wwrite(self, filename, data, flags):
554 def wwrite(self, filename, data, flags):
555 data = self._filter("decode", filename, data)
555 data = self._filter("decode", filename, data)
556 try:
556 try:
557 os.unlink(self.wjoin(filename))
557 os.unlink(self.wjoin(filename))
558 except OSError:
558 except OSError:
559 pass
559 pass
560 if 'l' in flags:
560 if 'l' in flags:
561 self.wopener.symlink(data, filename)
561 self.wopener.symlink(data, filename)
562 else:
562 else:
563 self.wopener(filename, 'w').write(data)
563 self.wopener(filename, 'w').write(data)
564 if 'x' in flags:
564 if 'x' in flags:
565 util.set_flags(self.wjoin(filename), False, True)
565 util.set_flags(self.wjoin(filename), False, True)
566
566
567 def wwritedata(self, filename, data):
567 def wwritedata(self, filename, data):
568 return self._filter("decode", filename, data)
568 return self._filter("decode", filename, data)
569
569
570 def transaction(self, desc):
570 def transaction(self, desc):
571 tr = self._transref and self._transref() or None
571 tr = self._transref and self._transref() or None
572 if tr and tr.running():
572 if tr and tr.running():
573 return tr.nest()
573 return tr.nest()
574
574
575 # abort here if the journal already exists
575 # abort here if the journal already exists
576 if os.path.exists(self.sjoin("journal")):
576 if os.path.exists(self.sjoin("journal")):
577 raise error.RepoError(
577 raise error.RepoError(
578 _("abandoned transaction found - run hg recover"))
578 _("abandoned transaction found - run hg recover"))
579
579
580 # save dirstate for rollback
580 # save dirstate for rollback
581 try:
581 try:
582 ds = self.opener("dirstate").read()
582 ds = self.opener("dirstate").read()
583 except IOError:
583 except IOError:
584 ds = ""
584 ds = ""
585 self.opener("journal.dirstate", "w").write(ds)
585 self.opener("journal.dirstate", "w").write(ds)
586 self.opener("journal.branch", "w").write(self.dirstate.branch())
586 self.opener("journal.branch", "w").write(self.dirstate.branch())
587 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
587 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
588
588
589 renames = [(self.sjoin("journal"), self.sjoin("undo")),
589 renames = [(self.sjoin("journal"), self.sjoin("undo")),
590 (self.join("journal.dirstate"), self.join("undo.dirstate")),
590 (self.join("journal.dirstate"), self.join("undo.dirstate")),
591 (self.join("journal.branch"), self.join("undo.branch")),
591 (self.join("journal.branch"), self.join("undo.branch")),
592 (self.join("journal.desc"), self.join("undo.desc"))]
592 (self.join("journal.desc"), self.join("undo.desc"))]
593 tr = transaction.transaction(self.ui.warn, self.sopener,
593 tr = transaction.transaction(self.ui.warn, self.sopener,
594 self.sjoin("journal"),
594 self.sjoin("journal"),
595 aftertrans(renames),
595 aftertrans(renames),
596 self.store.createmode)
596 self.store.createmode)
597 self._transref = weakref.ref(tr)
597 self._transref = weakref.ref(tr)
598 return tr
598 return tr
599
599
600 def recover(self):
600 def recover(self):
601 lock = self.lock()
601 lock = self.lock()
602 try:
602 try:
603 if os.path.exists(self.sjoin("journal")):
603 if os.path.exists(self.sjoin("journal")):
604 self.ui.status(_("rolling back interrupted transaction\n"))
604 self.ui.status(_("rolling back interrupted transaction\n"))
605 transaction.rollback(self.sopener, self.sjoin("journal"),
605 transaction.rollback(self.sopener, self.sjoin("journal"),
606 self.ui.warn)
606 self.ui.warn)
607 self.invalidate()
607 self.invalidate()
608 return True
608 return True
609 else:
609 else:
610 self.ui.warn(_("no interrupted transaction available\n"))
610 self.ui.warn(_("no interrupted transaction available\n"))
611 return False
611 return False
612 finally:
612 finally:
613 lock.release()
613 lock.release()
614
614
615 def rollback(self, dryrun=False):
615 def rollback(self, dryrun=False):
616 wlock = lock = None
616 wlock = lock = None
617 try:
617 try:
618 wlock = self.wlock()
618 wlock = self.wlock()
619 lock = self.lock()
619 lock = self.lock()
620 if os.path.exists(self.sjoin("undo")):
620 if os.path.exists(self.sjoin("undo")):
621 try:
621 try:
622 args = self.opener("undo.desc", "r").read().splitlines()
622 args = self.opener("undo.desc", "r").read().splitlines()
623 if len(args) >= 3 and self.ui.verbose:
623 if len(args) >= 3 and self.ui.verbose:
624 desc = _("rolling back to revision %s"
624 desc = _("rolling back to revision %s"
625 " (undo %s: %s)\n") % (
625 " (undo %s: %s)\n") % (
626 int(args[0]) - 1, args[1], args[2])
626 int(args[0]) - 1, args[1], args[2])
627 elif len(args) >= 2:
627 elif len(args) >= 2:
628 desc = _("rolling back to revision %s (undo %s)\n") % (
628 desc = _("rolling back to revision %s (undo %s)\n") % (
629 int(args[0]) - 1, args[1])
629 int(args[0]) - 1, args[1])
630 except IOError:
630 except IOError:
631 desc = _("rolling back unknown transaction\n")
631 desc = _("rolling back unknown transaction\n")
632 self.ui.status(desc)
632 self.ui.status(desc)
633 if dryrun:
633 if dryrun:
634 return
634 return
635 transaction.rollback(self.sopener, self.sjoin("undo"),
635 transaction.rollback(self.sopener, self.sjoin("undo"),
636 self.ui.warn)
636 self.ui.warn)
637 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
637 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
638 try:
638 try:
639 branch = self.opener("undo.branch").read()
639 branch = self.opener("undo.branch").read()
640 self.dirstate.setbranch(branch)
640 self.dirstate.setbranch(branch)
641 except IOError:
641 except IOError:
642 self.ui.warn(_("Named branch could not be reset, "
642 self.ui.warn(_("Named branch could not be reset, "
643 "current branch still is: %s\n")
643 "current branch still is: %s\n")
644 % encoding.tolocal(self.dirstate.branch()))
644 % encoding.tolocal(self.dirstate.branch()))
645 self.invalidate()
645 self.invalidate()
646 self.dirstate.invalidate()
646 self.dirstate.invalidate()
647 self.destroyed()
647 self.destroyed()
648 else:
648 else:
649 self.ui.warn(_("no rollback information available\n"))
649 self.ui.warn(_("no rollback information available\n"))
650 return 1
650 return 1
651 finally:
651 finally:
652 release(lock, wlock)
652 release(lock, wlock)
653
653
654 def invalidatecaches(self):
654 def invalidatecaches(self):
655 self._tags = None
655 self._tags = None
656 self._tagtypes = None
656 self._tagtypes = None
657 self.nodetagscache = None
657 self.nodetagscache = None
658 self._branchcache = None # in UTF-8
658 self._branchcache = None # in UTF-8
659 self._branchcachetip = None
659 self._branchcachetip = None
660
660
661 def invalidate(self):
661 def invalidate(self):
662 for a in "changelog manifest".split():
662 for a in "changelog manifest".split():
663 if a in self.__dict__:
663 if a in self.__dict__:
664 delattr(self, a)
664 delattr(self, a)
665 self.invalidatecaches()
665 self.invalidatecaches()
666
666
667 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
667 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
668 try:
668 try:
669 l = lock.lock(lockname, 0, releasefn, desc=desc)
669 l = lock.lock(lockname, 0, releasefn, desc=desc)
670 except error.LockHeld, inst:
670 except error.LockHeld, inst:
671 if not wait:
671 if not wait:
672 raise
672 raise
673 self.ui.warn(_("waiting for lock on %s held by %r\n") %
673 self.ui.warn(_("waiting for lock on %s held by %r\n") %
674 (desc, inst.locker))
674 (desc, inst.locker))
675 # default to 600 seconds timeout
675 # default to 600 seconds timeout
676 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
676 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
677 releasefn, desc=desc)
677 releasefn, desc=desc)
678 if acquirefn:
678 if acquirefn:
679 acquirefn()
679 acquirefn()
680 return l
680 return l
681
681
682 def lock(self, wait=True):
682 def lock(self, wait=True):
683 '''Lock the repository store (.hg/store) and return a weak reference
683 '''Lock the repository store (.hg/store) and return a weak reference
684 to the lock. Use this before modifying the store (e.g. committing or
684 to the lock. Use this before modifying the store (e.g. committing or
685 stripping). If you are opening a transaction, get a lock as well.)'''
685 stripping). If you are opening a transaction, get a lock as well.)'''
686 l = self._lockref and self._lockref()
686 l = self._lockref and self._lockref()
687 if l is not None and l.held:
687 if l is not None and l.held:
688 l.lock()
688 l.lock()
689 return l
689 return l
690
690
691 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
691 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
692 _('repository %s') % self.origroot)
692 _('repository %s') % self.origroot)
693 self._lockref = weakref.ref(l)
693 self._lockref = weakref.ref(l)
694 return l
694 return l
695
695
696 def wlock(self, wait=True):
696 def wlock(self, wait=True):
697 '''Lock the non-store parts of the repository (everything under
697 '''Lock the non-store parts of the repository (everything under
698 .hg except .hg/store) and return a weak reference to the lock.
698 .hg except .hg/store) and return a weak reference to the lock.
699 Use this before modifying files in .hg.'''
699 Use this before modifying files in .hg.'''
700 l = self._wlockref and self._wlockref()
700 l = self._wlockref and self._wlockref()
701 if l is not None and l.held:
701 if l is not None and l.held:
702 l.lock()
702 l.lock()
703 return l
703 return l
704
704
705 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
705 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
706 self.dirstate.invalidate, _('working directory of %s') %
706 self.dirstate.invalidate, _('working directory of %s') %
707 self.origroot)
707 self.origroot)
708 self._wlockref = weakref.ref(l)
708 self._wlockref = weakref.ref(l)
709 return l
709 return l
710
710
711 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
711 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
712 """
712 """
713 commit an individual file as part of a larger transaction
713 commit an individual file as part of a larger transaction
714 """
714 """
715
715
716 fname = fctx.path()
716 fname = fctx.path()
717 text = fctx.data()
717 text = fctx.data()
718 flog = self.file(fname)
718 flog = self.file(fname)
719 fparent1 = manifest1.get(fname, nullid)
719 fparent1 = manifest1.get(fname, nullid)
720 fparent2 = fparent2o = manifest2.get(fname, nullid)
720 fparent2 = fparent2o = manifest2.get(fname, nullid)
721
721
722 meta = {}
722 meta = {}
723 copy = fctx.renamed()
723 copy = fctx.renamed()
724 if copy and copy[0] != fname:
724 if copy and copy[0] != fname:
725 # Mark the new revision of this file as a copy of another
725 # Mark the new revision of this file as a copy of another
726 # file. This copy data will effectively act as a parent
726 # file. This copy data will effectively act as a parent
727 # of this new revision. If this is a merge, the first
727 # of this new revision. If this is a merge, the first
728 # parent will be the nullid (meaning "look up the copy data")
728 # parent will be the nullid (meaning "look up the copy data")
729 # and the second one will be the other parent. For example:
729 # and the second one will be the other parent. For example:
730 #
730 #
731 # 0 --- 1 --- 3 rev1 changes file foo
731 # 0 --- 1 --- 3 rev1 changes file foo
732 # \ / rev2 renames foo to bar and changes it
732 # \ / rev2 renames foo to bar and changes it
733 # \- 2 -/ rev3 should have bar with all changes and
733 # \- 2 -/ rev3 should have bar with all changes and
734 # should record that bar descends from
734 # should record that bar descends from
735 # bar in rev2 and foo in rev1
735 # bar in rev2 and foo in rev1
736 #
736 #
737 # this allows this merge to succeed:
737 # this allows this merge to succeed:
738 #
738 #
739 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
739 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
740 # \ / merging rev3 and rev4 should use bar@rev2
740 # \ / merging rev3 and rev4 should use bar@rev2
741 # \- 2 --- 4 as the merge base
741 # \- 2 --- 4 as the merge base
742 #
742 #
743
743
744 cfname = copy[0]
744 cfname = copy[0]
745 crev = manifest1.get(cfname)
745 crev = manifest1.get(cfname)
746 newfparent = fparent2
746 newfparent = fparent2
747
747
748 if manifest2: # branch merge
748 if manifest2: # branch merge
749 if fparent2 == nullid or crev is None: # copied on remote side
749 if fparent2 == nullid or crev is None: # copied on remote side
750 if cfname in manifest2:
750 if cfname in manifest2:
751 crev = manifest2[cfname]
751 crev = manifest2[cfname]
752 newfparent = fparent1
752 newfparent = fparent1
753
753
754 # find source in nearest ancestor if we've lost track
754 # find source in nearest ancestor if we've lost track
755 if not crev:
755 if not crev:
756 self.ui.debug(" %s: searching for copy revision for %s\n" %
756 self.ui.debug(" %s: searching for copy revision for %s\n" %
757 (fname, cfname))
757 (fname, cfname))
758 for ancestor in self['.'].ancestors():
758 for ancestor in self['.'].ancestors():
759 if cfname in ancestor:
759 if cfname in ancestor:
760 crev = ancestor[cfname].filenode()
760 crev = ancestor[cfname].filenode()
761 break
761 break
762
762
763 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
763 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
764 meta["copy"] = cfname
764 meta["copy"] = cfname
765 meta["copyrev"] = hex(crev)
765 meta["copyrev"] = hex(crev)
766 fparent1, fparent2 = nullid, newfparent
766 fparent1, fparent2 = nullid, newfparent
767 elif fparent2 != nullid:
767 elif fparent2 != nullid:
768 # is one parent an ancestor of the other?
768 # is one parent an ancestor of the other?
769 fparentancestor = flog.ancestor(fparent1, fparent2)
769 fparentancestor = flog.ancestor(fparent1, fparent2)
770 if fparentancestor == fparent1:
770 if fparentancestor == fparent1:
771 fparent1, fparent2 = fparent2, nullid
771 fparent1, fparent2 = fparent2, nullid
772 elif fparentancestor == fparent2:
772 elif fparentancestor == fparent2:
773 fparent2 = nullid
773 fparent2 = nullid
774
774
775 # is the file changed?
775 # is the file changed?
776 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
776 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
777 changelist.append(fname)
777 changelist.append(fname)
778 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
778 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
779
779
780 # are just the flags changed during merge?
780 # are just the flags changed during merge?
781 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
781 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
782 changelist.append(fname)
782 changelist.append(fname)
783
783
784 return fparent1
784 return fparent1
785
785
786 def commit(self, text="", user=None, date=None, match=None, force=False,
786 def commit(self, text="", user=None, date=None, match=None, force=False,
787 editor=False, extra={}):
787 editor=False, extra={}):
788 """Add a new revision to current repository.
788 """Add a new revision to current repository.
789
789
790 Revision information is gathered from the working directory,
790 Revision information is gathered from the working directory,
791 match can be used to filter the committed files. If editor is
791 match can be used to filter the committed files. If editor is
792 supplied, it is called to get a commit message.
792 supplied, it is called to get a commit message.
793 """
793 """
794
794
795 def fail(f, msg):
795 def fail(f, msg):
796 raise util.Abort('%s: %s' % (f, msg))
796 raise util.Abort('%s: %s' % (f, msg))
797
797
798 if not match:
798 if not match:
799 match = matchmod.always(self.root, '')
799 match = matchmod.always(self.root, '')
800
800
801 if not force:
801 if not force:
802 vdirs = []
802 vdirs = []
803 match.dir = vdirs.append
803 match.dir = vdirs.append
804 match.bad = fail
804 match.bad = fail
805
805
806 wlock = self.wlock()
806 wlock = self.wlock()
807 try:
807 try:
808 wctx = self[None]
808 wctx = self[None]
809 merge = len(wctx.parents()) > 1
809 merge = len(wctx.parents()) > 1
810
810
811 if (not force and merge and match and
811 if (not force and merge and match and
812 (match.files() or match.anypats())):
812 (match.files() or match.anypats())):
813 raise util.Abort(_('cannot partially commit a merge '
813 raise util.Abort(_('cannot partially commit a merge '
814 '(do not specify files or patterns)'))
814 '(do not specify files or patterns)'))
815
815
816 changes = self.status(match=match, clean=force)
816 changes = self.status(match=match, clean=force)
817 if force:
817 if force:
818 changes[0].extend(changes[6]) # mq may commit unchanged files
818 changes[0].extend(changes[6]) # mq may commit unchanged files
819
819
820 # check subrepos
820 # check subrepos
821 subs = []
821 subs = []
822 removedsubs = set()
822 removedsubs = set()
823 for p in wctx.parents():
823 for p in wctx.parents():
824 removedsubs.update(s for s in p.substate if match(s))
824 removedsubs.update(s for s in p.substate if match(s))
825 for s in wctx.substate:
825 for s in wctx.substate:
826 removedsubs.discard(s)
826 removedsubs.discard(s)
827 if match(s) and wctx.sub(s).dirty():
827 if match(s) and wctx.sub(s).dirty():
828 subs.append(s)
828 subs.append(s)
829 if (subs or removedsubs) and '.hgsubstate' not in changes[0]:
829 if (subs or removedsubs) and '.hgsubstate' not in changes[0]:
830 changes[0].insert(0, '.hgsubstate')
830 changes[0].insert(0, '.hgsubstate')
831
831
832 # make sure all explicit patterns are matched
832 # make sure all explicit patterns are matched
833 if not force and match.files():
833 if not force and match.files():
834 matched = set(changes[0] + changes[1] + changes[2])
834 matched = set(changes[0] + changes[1] + changes[2])
835
835
836 for f in match.files():
836 for f in match.files():
837 if f == '.' or f in matched or f in wctx.substate:
837 if f == '.' or f in matched or f in wctx.substate:
838 continue
838 continue
839 if f in changes[3]: # missing
839 if f in changes[3]: # missing
840 fail(f, _('file not found!'))
840 fail(f, _('file not found!'))
841 if f in vdirs: # visited directory
841 if f in vdirs: # visited directory
842 d = f + '/'
842 d = f + '/'
843 for mf in matched:
843 for mf in matched:
844 if mf.startswith(d):
844 if mf.startswith(d):
845 break
845 break
846 else:
846 else:
847 fail(f, _("no match under directory!"))
847 fail(f, _("no match under directory!"))
848 elif f not in self.dirstate:
848 elif f not in self.dirstate:
849 fail(f, _("file not tracked!"))
849 fail(f, _("file not tracked!"))
850
850
851 if (not force and not extra.get("close") and not merge
851 if (not force and not extra.get("close") and not merge
852 and not (changes[0] or changes[1] or changes[2])
852 and not (changes[0] or changes[1] or changes[2])
853 and wctx.branch() == wctx.p1().branch()):
853 and wctx.branch() == wctx.p1().branch()):
854 return None
854 return None
855
855
856 ms = mergemod.mergestate(self)
856 ms = mergemod.mergestate(self)
857 for f in changes[0]:
857 for f in changes[0]:
858 if f in ms and ms[f] == 'u':
858 if f in ms and ms[f] == 'u':
859 raise util.Abort(_("unresolved merge conflicts "
859 raise util.Abort(_("unresolved merge conflicts "
860 "(see hg resolve)"))
860 "(see hg resolve)"))
861
861
862 cctx = context.workingctx(self, text, user, date, extra, changes)
862 cctx = context.workingctx(self, text, user, date, extra, changes)
863 if editor:
863 if editor:
864 cctx._text = editor(self, cctx, subs)
864 cctx._text = editor(self, cctx, subs)
865 edited = (text != cctx._text)
865 edited = (text != cctx._text)
866
866
867 # commit subs
867 # commit subs
868 if subs or removedsubs:
868 if subs or removedsubs:
869 state = wctx.substate.copy()
869 state = wctx.substate.copy()
870 for s in subs:
870 for s in subs:
871 sub = wctx.sub(s)
871 sub = wctx.sub(s)
872 self.ui.status(_('committing subrepository %s\n') %
872 self.ui.status(_('committing subrepository %s\n') %
873 subrepo.relpath(sub))
873 subrepo.relpath(sub))
874 sr = sub.commit(cctx._text, user, date)
874 sr = sub.commit(cctx._text, user, date)
875 state[s] = (state[s][0], sr)
875 state[s] = (state[s][0], sr)
876 subrepo.writestate(self, state)
876 subrepo.writestate(self, state)
877
877
878 # Save commit message in case this transaction gets rolled back
878 # Save commit message in case this transaction gets rolled back
879 # (e.g. by a pretxncommit hook). Leave the content alone on
879 # (e.g. by a pretxncommit hook). Leave the content alone on
880 # the assumption that the user will use the same editor again.
880 # the assumption that the user will use the same editor again.
881 msgfile = self.opener('last-message.txt', 'wb')
881 msgfile = self.opener('last-message.txt', 'wb')
882 msgfile.write(cctx._text)
882 msgfile.write(cctx._text)
883 msgfile.close()
883 msgfile.close()
884
884
885 p1, p2 = self.dirstate.parents()
885 p1, p2 = self.dirstate.parents()
886 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
886 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
887 try:
887 try:
888 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
888 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
889 ret = self.commitctx(cctx, True)
889 ret = self.commitctx(cctx, True)
890 except:
890 except:
891 if edited:
891 if edited:
892 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
892 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
893 self.ui.write(
893 self.ui.write(
894 _('note: commit message saved in %s\n') % msgfn)
894 _('note: commit message saved in %s\n') % msgfn)
895 raise
895 raise
896
896
897 # update dirstate and mergestate
897 # update dirstate and mergestate
898 for f in changes[0] + changes[1]:
898 for f in changes[0] + changes[1]:
899 self.dirstate.normal(f)
899 self.dirstate.normal(f)
900 for f in changes[2]:
900 for f in changes[2]:
901 self.dirstate.forget(f)
901 self.dirstate.forget(f)
902 self.dirstate.setparents(ret)
902 self.dirstate.setparents(ret)
903 ms.reset()
903 ms.reset()
904 finally:
904 finally:
905 wlock.release()
905 wlock.release()
906
906
907 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
907 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
908 return ret
908 return ret
909
909
910 def commitctx(self, ctx, error=False):
910 def commitctx(self, ctx, error=False):
911 """Add a new revision to current repository.
911 """Add a new revision to current repository.
912 Revision information is passed via the context argument.
912 Revision information is passed via the context argument.
913 """
913 """
914
914
915 tr = lock = None
915 tr = lock = None
916 removed = ctx.removed()
916 removed = ctx.removed()
917 p1, p2 = ctx.p1(), ctx.p2()
917 p1, p2 = ctx.p1(), ctx.p2()
918 m1 = p1.manifest().copy()
918 m1 = p1.manifest().copy()
919 m2 = p2.manifest()
919 m2 = p2.manifest()
920 user = ctx.user()
920 user = ctx.user()
921
921
922 lock = self.lock()
922 lock = self.lock()
923 try:
923 try:
924 tr = self.transaction("commit")
924 tr = self.transaction("commit")
925 trp = weakref.proxy(tr)
925 trp = weakref.proxy(tr)
926
926
927 # check in files
927 # check in files
928 new = {}
928 new = {}
929 changed = []
929 changed = []
930 linkrev = len(self)
930 linkrev = len(self)
931 for f in sorted(ctx.modified() + ctx.added()):
931 for f in sorted(ctx.modified() + ctx.added()):
932 self.ui.note(f + "\n")
932 self.ui.note(f + "\n")
933 try:
933 try:
934 fctx = ctx[f]
934 fctx = ctx[f]
935 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
935 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
936 changed)
936 changed)
937 m1.set(f, fctx.flags())
937 m1.set(f, fctx.flags())
938 except OSError, inst:
938 except OSError, inst:
939 self.ui.warn(_("trouble committing %s!\n") % f)
939 self.ui.warn(_("trouble committing %s!\n") % f)
940 raise
940 raise
941 except IOError, inst:
941 except IOError, inst:
942 errcode = getattr(inst, 'errno', errno.ENOENT)
942 errcode = getattr(inst, 'errno', errno.ENOENT)
943 if error or errcode and errcode != errno.ENOENT:
943 if error or errcode and errcode != errno.ENOENT:
944 self.ui.warn(_("trouble committing %s!\n") % f)
944 self.ui.warn(_("trouble committing %s!\n") % f)
945 raise
945 raise
946 else:
946 else:
947 removed.append(f)
947 removed.append(f)
948
948
949 # update manifest
949 # update manifest
950 m1.update(new)
950 m1.update(new)
951 removed = [f for f in sorted(removed) if f in m1 or f in m2]
951 removed = [f for f in sorted(removed) if f in m1 or f in m2]
952 drop = [f for f in removed if f in m1]
952 drop = [f for f in removed if f in m1]
953 for f in drop:
953 for f in drop:
954 del m1[f]
954 del m1[f]
955 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
955 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
956 p2.manifestnode(), (new, drop))
956 p2.manifestnode(), (new, drop))
957
957
958 # update changelog
958 # update changelog
959 self.changelog.delayupdate()
959 self.changelog.delayupdate()
960 n = self.changelog.add(mn, changed + removed, ctx.description(),
960 n = self.changelog.add(mn, changed + removed, ctx.description(),
961 trp, p1.node(), p2.node(),
961 trp, p1.node(), p2.node(),
962 user, ctx.date(), ctx.extra().copy())
962 user, ctx.date(), ctx.extra().copy())
963 p = lambda: self.changelog.writepending() and self.root or ""
963 p = lambda: self.changelog.writepending() and self.root or ""
964 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
964 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
965 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
965 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
966 parent2=xp2, pending=p)
966 parent2=xp2, pending=p)
967 self.changelog.finalize(trp)
967 self.changelog.finalize(trp)
968 tr.close()
968 tr.close()
969
969
970 if self._branchcache:
970 if self._branchcache:
971 self.branchtags()
971 self.branchtags()
972 return n
972 return n
973 finally:
973 finally:
974 del tr
974 if tr:
975 tr.release()
975 lock.release()
976 lock.release()
976
977
977 def destroyed(self):
978 def destroyed(self):
978 '''Inform the repository that nodes have been destroyed.
979 '''Inform the repository that nodes have been destroyed.
979 Intended for use by strip and rollback, so there's a common
980 Intended for use by strip and rollback, so there's a common
980 place for anything that has to be done after destroying history.'''
981 place for anything that has to be done after destroying history.'''
981 # XXX it might be nice if we could take the list of destroyed
982 # XXX it might be nice if we could take the list of destroyed
982 # nodes, but I don't see an easy way for rollback() to do that
983 # nodes, but I don't see an easy way for rollback() to do that
983
984
984 # Ensure the persistent tag cache is updated. Doing it now
985 # Ensure the persistent tag cache is updated. Doing it now
985 # means that the tag cache only has to worry about destroyed
986 # means that the tag cache only has to worry about destroyed
986 # heads immediately after a strip/rollback. That in turn
987 # heads immediately after a strip/rollback. That in turn
987 # guarantees that "cachetip == currenttip" (comparing both rev
988 # guarantees that "cachetip == currenttip" (comparing both rev
988 # and node) always means no nodes have been added or destroyed.
989 # and node) always means no nodes have been added or destroyed.
989
990
990 # XXX this is suboptimal when qrefresh'ing: we strip the current
991 # XXX this is suboptimal when qrefresh'ing: we strip the current
991 # head, refresh the tag cache, then immediately add a new head.
992 # head, refresh the tag cache, then immediately add a new head.
992 # But I think doing it this way is necessary for the "instant
993 # But I think doing it this way is necessary for the "instant
993 # tag cache retrieval" case to work.
994 # tag cache retrieval" case to work.
994 self.invalidatecaches()
995 self.invalidatecaches()
995
996
996 def walk(self, match, node=None):
997 def walk(self, match, node=None):
997 '''
998 '''
998 walk recursively through the directory tree or a given
999 walk recursively through the directory tree or a given
999 changeset, finding all files matched by the match
1000 changeset, finding all files matched by the match
1000 function
1001 function
1001 '''
1002 '''
1002 return self[node].walk(match)
1003 return self[node].walk(match)
1003
1004
1004 def status(self, node1='.', node2=None, match=None,
1005 def status(self, node1='.', node2=None, match=None,
1005 ignored=False, clean=False, unknown=False):
1006 ignored=False, clean=False, unknown=False):
1006 """return status of files between two nodes or node and working directory
1007 """return status of files between two nodes or node and working directory
1007
1008
1008 If node1 is None, use the first dirstate parent instead.
1009 If node1 is None, use the first dirstate parent instead.
1009 If node2 is None, compare node1 with working directory.
1010 If node2 is None, compare node1 with working directory.
1010 """
1011 """
1011
1012
1012 def mfmatches(ctx):
1013 def mfmatches(ctx):
1013 mf = ctx.manifest().copy()
1014 mf = ctx.manifest().copy()
1014 for fn in mf.keys():
1015 for fn in mf.keys():
1015 if not match(fn):
1016 if not match(fn):
1016 del mf[fn]
1017 del mf[fn]
1017 return mf
1018 return mf
1018
1019
1019 if isinstance(node1, context.changectx):
1020 if isinstance(node1, context.changectx):
1020 ctx1 = node1
1021 ctx1 = node1
1021 else:
1022 else:
1022 ctx1 = self[node1]
1023 ctx1 = self[node1]
1023 if isinstance(node2, context.changectx):
1024 if isinstance(node2, context.changectx):
1024 ctx2 = node2
1025 ctx2 = node2
1025 else:
1026 else:
1026 ctx2 = self[node2]
1027 ctx2 = self[node2]
1027
1028
1028 working = ctx2.rev() is None
1029 working = ctx2.rev() is None
1029 parentworking = working and ctx1 == self['.']
1030 parentworking = working and ctx1 == self['.']
1030 match = match or matchmod.always(self.root, self.getcwd())
1031 match = match or matchmod.always(self.root, self.getcwd())
1031 listignored, listclean, listunknown = ignored, clean, unknown
1032 listignored, listclean, listunknown = ignored, clean, unknown
1032
1033
1033 # load earliest manifest first for caching reasons
1034 # load earliest manifest first for caching reasons
1034 if not working and ctx2.rev() < ctx1.rev():
1035 if not working and ctx2.rev() < ctx1.rev():
1035 ctx2.manifest()
1036 ctx2.manifest()
1036
1037
1037 if not parentworking:
1038 if not parentworking:
1038 def bad(f, msg):
1039 def bad(f, msg):
1039 if f not in ctx1:
1040 if f not in ctx1:
1040 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1041 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1041 match.bad = bad
1042 match.bad = bad
1042
1043
1043 if working: # we need to scan the working dir
1044 if working: # we need to scan the working dir
1044 subrepos = []
1045 subrepos = []
1045 if '.hgsub' in self.dirstate:
1046 if '.hgsub' in self.dirstate:
1046 subrepos = ctx1.substate.keys()
1047 subrepos = ctx1.substate.keys()
1047 s = self.dirstate.status(match, subrepos, listignored,
1048 s = self.dirstate.status(match, subrepos, listignored,
1048 listclean, listunknown)
1049 listclean, listunknown)
1049 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1050 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1050
1051
1051 # check for any possibly clean files
1052 # check for any possibly clean files
1052 if parentworking and cmp:
1053 if parentworking and cmp:
1053 fixup = []
1054 fixup = []
1054 # do a full compare of any files that might have changed
1055 # do a full compare of any files that might have changed
1055 for f in sorted(cmp):
1056 for f in sorted(cmp):
1056 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1057 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1057 or ctx1[f].cmp(ctx2[f].data())):
1058 or ctx1[f].cmp(ctx2[f].data())):
1058 modified.append(f)
1059 modified.append(f)
1059 else:
1060 else:
1060 fixup.append(f)
1061 fixup.append(f)
1061
1062
1062 if listclean:
1063 if listclean:
1063 clean += fixup
1064 clean += fixup
1064
1065
1065 # update dirstate for files that are actually clean
1066 # update dirstate for files that are actually clean
1066 if fixup:
1067 if fixup:
1067 try:
1068 try:
1068 # updating the dirstate is optional
1069 # updating the dirstate is optional
1069 # so we don't wait on the lock
1070 # so we don't wait on the lock
1070 wlock = self.wlock(False)
1071 wlock = self.wlock(False)
1071 try:
1072 try:
1072 for f in fixup:
1073 for f in fixup:
1073 self.dirstate.normal(f)
1074 self.dirstate.normal(f)
1074 finally:
1075 finally:
1075 wlock.release()
1076 wlock.release()
1076 except error.LockError:
1077 except error.LockError:
1077 pass
1078 pass
1078
1079
1079 if not parentworking:
1080 if not parentworking:
1080 mf1 = mfmatches(ctx1)
1081 mf1 = mfmatches(ctx1)
1081 if working:
1082 if working:
1082 # we are comparing working dir against non-parent
1083 # we are comparing working dir against non-parent
1083 # generate a pseudo-manifest for the working dir
1084 # generate a pseudo-manifest for the working dir
1084 mf2 = mfmatches(self['.'])
1085 mf2 = mfmatches(self['.'])
1085 for f in cmp + modified + added:
1086 for f in cmp + modified + added:
1086 mf2[f] = None
1087 mf2[f] = None
1087 mf2.set(f, ctx2.flags(f))
1088 mf2.set(f, ctx2.flags(f))
1088 for f in removed:
1089 for f in removed:
1089 if f in mf2:
1090 if f in mf2:
1090 del mf2[f]
1091 del mf2[f]
1091 else:
1092 else:
1092 # we are comparing two revisions
1093 # we are comparing two revisions
1093 deleted, unknown, ignored = [], [], []
1094 deleted, unknown, ignored = [], [], []
1094 mf2 = mfmatches(ctx2)
1095 mf2 = mfmatches(ctx2)
1095
1096
1096 modified, added, clean = [], [], []
1097 modified, added, clean = [], [], []
1097 for fn in mf2:
1098 for fn in mf2:
1098 if fn in mf1:
1099 if fn in mf1:
1099 if (mf1.flags(fn) != mf2.flags(fn) or
1100 if (mf1.flags(fn) != mf2.flags(fn) or
1100 (mf1[fn] != mf2[fn] and
1101 (mf1[fn] != mf2[fn] and
1101 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1102 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1102 modified.append(fn)
1103 modified.append(fn)
1103 elif listclean:
1104 elif listclean:
1104 clean.append(fn)
1105 clean.append(fn)
1105 del mf1[fn]
1106 del mf1[fn]
1106 else:
1107 else:
1107 added.append(fn)
1108 added.append(fn)
1108 removed = mf1.keys()
1109 removed = mf1.keys()
1109
1110
1110 r = modified, added, removed, deleted, unknown, ignored, clean
1111 r = modified, added, removed, deleted, unknown, ignored, clean
1111 [l.sort() for l in r]
1112 [l.sort() for l in r]
1112 return r
1113 return r
1113
1114
1114 def add(self, list):
1115 def add(self, list):
1115 wlock = self.wlock()
1116 wlock = self.wlock()
1116 try:
1117 try:
1117 rejected = []
1118 rejected = []
1118 for f in list:
1119 for f in list:
1119 p = self.wjoin(f)
1120 p = self.wjoin(f)
1120 try:
1121 try:
1121 st = os.lstat(p)
1122 st = os.lstat(p)
1122 except:
1123 except:
1123 self.ui.warn(_("%s does not exist!\n") % f)
1124 self.ui.warn(_("%s does not exist!\n") % f)
1124 rejected.append(f)
1125 rejected.append(f)
1125 continue
1126 continue
1126 if st.st_size > 10000000:
1127 if st.st_size > 10000000:
1127 self.ui.warn(_("%s: up to %d MB of RAM may be required "
1128 self.ui.warn(_("%s: up to %d MB of RAM may be required "
1128 "to manage this file\n"
1129 "to manage this file\n"
1129 "(use 'hg revert %s' to cancel the "
1130 "(use 'hg revert %s' to cancel the "
1130 "pending addition)\n")
1131 "pending addition)\n")
1131 % (f, 3 * st.st_size // 1000000, f))
1132 % (f, 3 * st.st_size // 1000000, f))
1132 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1133 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1133 self.ui.warn(_("%s not added: only files and symlinks "
1134 self.ui.warn(_("%s not added: only files and symlinks "
1134 "supported currently\n") % f)
1135 "supported currently\n") % f)
1135 rejected.append(p)
1136 rejected.append(p)
1136 elif self.dirstate[f] in 'amn':
1137 elif self.dirstate[f] in 'amn':
1137 self.ui.warn(_("%s already tracked!\n") % f)
1138 self.ui.warn(_("%s already tracked!\n") % f)
1138 elif self.dirstate[f] == 'r':
1139 elif self.dirstate[f] == 'r':
1139 self.dirstate.normallookup(f)
1140 self.dirstate.normallookup(f)
1140 else:
1141 else:
1141 self.dirstate.add(f)
1142 self.dirstate.add(f)
1142 return rejected
1143 return rejected
1143 finally:
1144 finally:
1144 wlock.release()
1145 wlock.release()
1145
1146
1146 def forget(self, list):
1147 def forget(self, list):
1147 wlock = self.wlock()
1148 wlock = self.wlock()
1148 try:
1149 try:
1149 for f in list:
1150 for f in list:
1150 if self.dirstate[f] != 'a':
1151 if self.dirstate[f] != 'a':
1151 self.ui.warn(_("%s not added!\n") % f)
1152 self.ui.warn(_("%s not added!\n") % f)
1152 else:
1153 else:
1153 self.dirstate.forget(f)
1154 self.dirstate.forget(f)
1154 finally:
1155 finally:
1155 wlock.release()
1156 wlock.release()
1156
1157
1157 def remove(self, list, unlink=False):
1158 def remove(self, list, unlink=False):
1158 if unlink:
1159 if unlink:
1159 for f in list:
1160 for f in list:
1160 try:
1161 try:
1161 util.unlink(self.wjoin(f))
1162 util.unlink(self.wjoin(f))
1162 except OSError, inst:
1163 except OSError, inst:
1163 if inst.errno != errno.ENOENT:
1164 if inst.errno != errno.ENOENT:
1164 raise
1165 raise
1165 wlock = self.wlock()
1166 wlock = self.wlock()
1166 try:
1167 try:
1167 for f in list:
1168 for f in list:
1168 if unlink and os.path.exists(self.wjoin(f)):
1169 if unlink and os.path.exists(self.wjoin(f)):
1169 self.ui.warn(_("%s still exists!\n") % f)
1170 self.ui.warn(_("%s still exists!\n") % f)
1170 elif self.dirstate[f] == 'a':
1171 elif self.dirstate[f] == 'a':
1171 self.dirstate.forget(f)
1172 self.dirstate.forget(f)
1172 elif f not in self.dirstate:
1173 elif f not in self.dirstate:
1173 self.ui.warn(_("%s not tracked!\n") % f)
1174 self.ui.warn(_("%s not tracked!\n") % f)
1174 else:
1175 else:
1175 self.dirstate.remove(f)
1176 self.dirstate.remove(f)
1176 finally:
1177 finally:
1177 wlock.release()
1178 wlock.release()
1178
1179
1179 def undelete(self, list):
1180 def undelete(self, list):
1180 manifests = [self.manifest.read(self.changelog.read(p)[0])
1181 manifests = [self.manifest.read(self.changelog.read(p)[0])
1181 for p in self.dirstate.parents() if p != nullid]
1182 for p in self.dirstate.parents() if p != nullid]
1182 wlock = self.wlock()
1183 wlock = self.wlock()
1183 try:
1184 try:
1184 for f in list:
1185 for f in list:
1185 if self.dirstate[f] != 'r':
1186 if self.dirstate[f] != 'r':
1186 self.ui.warn(_("%s not removed!\n") % f)
1187 self.ui.warn(_("%s not removed!\n") % f)
1187 else:
1188 else:
1188 m = f in manifests[0] and manifests[0] or manifests[1]
1189 m = f in manifests[0] and manifests[0] or manifests[1]
1189 t = self.file(f).read(m[f])
1190 t = self.file(f).read(m[f])
1190 self.wwrite(f, t, m.flags(f))
1191 self.wwrite(f, t, m.flags(f))
1191 self.dirstate.normal(f)
1192 self.dirstate.normal(f)
1192 finally:
1193 finally:
1193 wlock.release()
1194 wlock.release()
1194
1195
1195 def copy(self, source, dest):
1196 def copy(self, source, dest):
1196 p = self.wjoin(dest)
1197 p = self.wjoin(dest)
1197 if not (os.path.exists(p) or os.path.islink(p)):
1198 if not (os.path.exists(p) or os.path.islink(p)):
1198 self.ui.warn(_("%s does not exist!\n") % dest)
1199 self.ui.warn(_("%s does not exist!\n") % dest)
1199 elif not (os.path.isfile(p) or os.path.islink(p)):
1200 elif not (os.path.isfile(p) or os.path.islink(p)):
1200 self.ui.warn(_("copy failed: %s is not a file or a "
1201 self.ui.warn(_("copy failed: %s is not a file or a "
1201 "symbolic link\n") % dest)
1202 "symbolic link\n") % dest)
1202 else:
1203 else:
1203 wlock = self.wlock()
1204 wlock = self.wlock()
1204 try:
1205 try:
1205 if self.dirstate[dest] in '?r':
1206 if self.dirstate[dest] in '?r':
1206 self.dirstate.add(dest)
1207 self.dirstate.add(dest)
1207 self.dirstate.copy(source, dest)
1208 self.dirstate.copy(source, dest)
1208 finally:
1209 finally:
1209 wlock.release()
1210 wlock.release()
1210
1211
1211 def heads(self, start=None):
1212 def heads(self, start=None):
1212 heads = self.changelog.heads(start)
1213 heads = self.changelog.heads(start)
1213 # sort the output in rev descending order
1214 # sort the output in rev descending order
1214 heads = [(-self.changelog.rev(h), h) for h in heads]
1215 heads = [(-self.changelog.rev(h), h) for h in heads]
1215 return [n for (r, n) in sorted(heads)]
1216 return [n for (r, n) in sorted(heads)]
1216
1217
1217 def branchheads(self, branch=None, start=None, closed=False):
1218 def branchheads(self, branch=None, start=None, closed=False):
1218 '''return a (possibly filtered) list of heads for the given branch
1219 '''return a (possibly filtered) list of heads for the given branch
1219
1220
1220 Heads are returned in topological order, from newest to oldest.
1221 Heads are returned in topological order, from newest to oldest.
1221 If branch is None, use the dirstate branch.
1222 If branch is None, use the dirstate branch.
1222 If start is not None, return only heads reachable from start.
1223 If start is not None, return only heads reachable from start.
1223 If closed is True, return heads that are marked as closed as well.
1224 If closed is True, return heads that are marked as closed as well.
1224 '''
1225 '''
1225 if branch is None:
1226 if branch is None:
1226 branch = self[None].branch()
1227 branch = self[None].branch()
1227 branches = self.branchmap()
1228 branches = self.branchmap()
1228 if branch not in branches:
1229 if branch not in branches:
1229 return []
1230 return []
1230 # the cache returns heads ordered lowest to highest
1231 # the cache returns heads ordered lowest to highest
1231 bheads = list(reversed(branches[branch]))
1232 bheads = list(reversed(branches[branch]))
1232 if start is not None:
1233 if start is not None:
1233 # filter out the heads that cannot be reached from startrev
1234 # filter out the heads that cannot be reached from startrev
1234 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1235 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1235 bheads = [h for h in bheads if h in fbheads]
1236 bheads = [h for h in bheads if h in fbheads]
1236 if not closed:
1237 if not closed:
1237 bheads = [h for h in bheads if
1238 bheads = [h for h in bheads if
1238 ('close' not in self.changelog.read(h)[5])]
1239 ('close' not in self.changelog.read(h)[5])]
1239 return bheads
1240 return bheads
1240
1241
1241 def branches(self, nodes):
1242 def branches(self, nodes):
1242 if not nodes:
1243 if not nodes:
1243 nodes = [self.changelog.tip()]
1244 nodes = [self.changelog.tip()]
1244 b = []
1245 b = []
1245 for n in nodes:
1246 for n in nodes:
1246 t = n
1247 t = n
1247 while 1:
1248 while 1:
1248 p = self.changelog.parents(n)
1249 p = self.changelog.parents(n)
1249 if p[1] != nullid or p[0] == nullid:
1250 if p[1] != nullid or p[0] == nullid:
1250 b.append((t, n, p[0], p[1]))
1251 b.append((t, n, p[0], p[1]))
1251 break
1252 break
1252 n = p[0]
1253 n = p[0]
1253 return b
1254 return b
1254
1255
1255 def between(self, pairs):
1256 def between(self, pairs):
1256 r = []
1257 r = []
1257
1258
1258 for top, bottom in pairs:
1259 for top, bottom in pairs:
1259 n, l, i = top, [], 0
1260 n, l, i = top, [], 0
1260 f = 1
1261 f = 1
1261
1262
1262 while n != bottom and n != nullid:
1263 while n != bottom and n != nullid:
1263 p = self.changelog.parents(n)[0]
1264 p = self.changelog.parents(n)[0]
1264 if i == f:
1265 if i == f:
1265 l.append(n)
1266 l.append(n)
1266 f = f * 2
1267 f = f * 2
1267 n = p
1268 n = p
1268 i += 1
1269 i += 1
1269
1270
1270 r.append(l)
1271 r.append(l)
1271
1272
1272 return r
1273 return r
1273
1274
1274 def findincoming(self, remote, base=None, heads=None, force=False):
1275 def findincoming(self, remote, base=None, heads=None, force=False):
1275 """Return list of roots of the subsets of missing nodes from remote
1276 """Return list of roots of the subsets of missing nodes from remote
1276
1277
1277 If base dict is specified, assume that these nodes and their parents
1278 If base dict is specified, assume that these nodes and their parents
1278 exist on the remote side and that no child of a node of base exists
1279 exist on the remote side and that no child of a node of base exists
1279 in both remote and self.
1280 in both remote and self.
1280 Furthermore base will be updated to include the nodes that exists
1281 Furthermore base will be updated to include the nodes that exists
1281 in self and remote but no children exists in self and remote.
1282 in self and remote but no children exists in self and remote.
1282 If a list of heads is specified, return only nodes which are heads
1283 If a list of heads is specified, return only nodes which are heads
1283 or ancestors of these heads.
1284 or ancestors of these heads.
1284
1285
1285 All the ancestors of base are in self and in remote.
1286 All the ancestors of base are in self and in remote.
1286 All the descendants of the list returned are missing in self.
1287 All the descendants of the list returned are missing in self.
1287 (and so we know that the rest of the nodes are missing in remote, see
1288 (and so we know that the rest of the nodes are missing in remote, see
1288 outgoing)
1289 outgoing)
1289 """
1290 """
1290 return self.findcommonincoming(remote, base, heads, force)[1]
1291 return self.findcommonincoming(remote, base, heads, force)[1]
1291
1292
1292 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1293 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1293 """Return a tuple (common, missing roots, heads) used to identify
1294 """Return a tuple (common, missing roots, heads) used to identify
1294 missing nodes from remote.
1295 missing nodes from remote.
1295
1296
1296 If base dict is specified, assume that these nodes and their parents
1297 If base dict is specified, assume that these nodes and their parents
1297 exist on the remote side and that no child of a node of base exists
1298 exist on the remote side and that no child of a node of base exists
1298 in both remote and self.
1299 in both remote and self.
1299 Furthermore base will be updated to include the nodes that exists
1300 Furthermore base will be updated to include the nodes that exists
1300 in self and remote but no children exists in self and remote.
1301 in self and remote but no children exists in self and remote.
1301 If a list of heads is specified, return only nodes which are heads
1302 If a list of heads is specified, return only nodes which are heads
1302 or ancestors of these heads.
1303 or ancestors of these heads.
1303
1304
1304 All the ancestors of base are in self and in remote.
1305 All the ancestors of base are in self and in remote.
1305 """
1306 """
1306 m = self.changelog.nodemap
1307 m = self.changelog.nodemap
1307 search = []
1308 search = []
1308 fetch = set()
1309 fetch = set()
1309 seen = set()
1310 seen = set()
1310 seenbranch = set()
1311 seenbranch = set()
1311 if base is None:
1312 if base is None:
1312 base = {}
1313 base = {}
1313
1314
1314 if not heads:
1315 if not heads:
1315 heads = remote.heads()
1316 heads = remote.heads()
1316
1317
1317 if self.changelog.tip() == nullid:
1318 if self.changelog.tip() == nullid:
1318 base[nullid] = 1
1319 base[nullid] = 1
1319 if heads != [nullid]:
1320 if heads != [nullid]:
1320 return [nullid], [nullid], list(heads)
1321 return [nullid], [nullid], list(heads)
1321 return [nullid], [], []
1322 return [nullid], [], []
1322
1323
1323 # assume we're closer to the tip than the root
1324 # assume we're closer to the tip than the root
1324 # and start by examining the heads
1325 # and start by examining the heads
1325 self.ui.status(_("searching for changes\n"))
1326 self.ui.status(_("searching for changes\n"))
1326
1327
1327 unknown = []
1328 unknown = []
1328 for h in heads:
1329 for h in heads:
1329 if h not in m:
1330 if h not in m:
1330 unknown.append(h)
1331 unknown.append(h)
1331 else:
1332 else:
1332 base[h] = 1
1333 base[h] = 1
1333
1334
1334 heads = unknown
1335 heads = unknown
1335 if not unknown:
1336 if not unknown:
1336 return base.keys(), [], []
1337 return base.keys(), [], []
1337
1338
1338 req = set(unknown)
1339 req = set(unknown)
1339 reqcnt = 0
1340 reqcnt = 0
1340
1341
1341 # search through remote branches
1342 # search through remote branches
1342 # a 'branch' here is a linear segment of history, with four parts:
1343 # a 'branch' here is a linear segment of history, with four parts:
1343 # head, root, first parent, second parent
1344 # head, root, first parent, second parent
1344 # (a branch always has two parents (or none) by definition)
1345 # (a branch always has two parents (or none) by definition)
1345 unknown = remote.branches(unknown)
1346 unknown = remote.branches(unknown)
1346 while unknown:
1347 while unknown:
1347 r = []
1348 r = []
1348 while unknown:
1349 while unknown:
1349 n = unknown.pop(0)
1350 n = unknown.pop(0)
1350 if n[0] in seen:
1351 if n[0] in seen:
1351 continue
1352 continue
1352
1353
1353 self.ui.debug("examining %s:%s\n"
1354 self.ui.debug("examining %s:%s\n"
1354 % (short(n[0]), short(n[1])))
1355 % (short(n[0]), short(n[1])))
1355 if n[0] == nullid: # found the end of the branch
1356 if n[0] == nullid: # found the end of the branch
1356 pass
1357 pass
1357 elif n in seenbranch:
1358 elif n in seenbranch:
1358 self.ui.debug("branch already found\n")
1359 self.ui.debug("branch already found\n")
1359 continue
1360 continue
1360 elif n[1] and n[1] in m: # do we know the base?
1361 elif n[1] and n[1] in m: # do we know the base?
1361 self.ui.debug("found incomplete branch %s:%s\n"
1362 self.ui.debug("found incomplete branch %s:%s\n"
1362 % (short(n[0]), short(n[1])))
1363 % (short(n[0]), short(n[1])))
1363 search.append(n[0:2]) # schedule branch range for scanning
1364 search.append(n[0:2]) # schedule branch range for scanning
1364 seenbranch.add(n)
1365 seenbranch.add(n)
1365 else:
1366 else:
1366 if n[1] not in seen and n[1] not in fetch:
1367 if n[1] not in seen and n[1] not in fetch:
1367 if n[2] in m and n[3] in m:
1368 if n[2] in m and n[3] in m:
1368 self.ui.debug("found new changeset %s\n" %
1369 self.ui.debug("found new changeset %s\n" %
1369 short(n[1]))
1370 short(n[1]))
1370 fetch.add(n[1]) # earliest unknown
1371 fetch.add(n[1]) # earliest unknown
1371 for p in n[2:4]:
1372 for p in n[2:4]:
1372 if p in m:
1373 if p in m:
1373 base[p] = 1 # latest known
1374 base[p] = 1 # latest known
1374
1375
1375 for p in n[2:4]:
1376 for p in n[2:4]:
1376 if p not in req and p not in m:
1377 if p not in req and p not in m:
1377 r.append(p)
1378 r.append(p)
1378 req.add(p)
1379 req.add(p)
1379 seen.add(n[0])
1380 seen.add(n[0])
1380
1381
1381 if r:
1382 if r:
1382 reqcnt += 1
1383 reqcnt += 1
1383 self.ui.progress(_('searching'), reqcnt, unit=_('queries'))
1384 self.ui.progress(_('searching'), reqcnt, unit=_('queries'))
1384 self.ui.debug("request %d: %s\n" %
1385 self.ui.debug("request %d: %s\n" %
1385 (reqcnt, " ".join(map(short, r))))
1386 (reqcnt, " ".join(map(short, r))))
1386 for p in xrange(0, len(r), 10):
1387 for p in xrange(0, len(r), 10):
1387 for b in remote.branches(r[p:p + 10]):
1388 for b in remote.branches(r[p:p + 10]):
1388 self.ui.debug("received %s:%s\n" %
1389 self.ui.debug("received %s:%s\n" %
1389 (short(b[0]), short(b[1])))
1390 (short(b[0]), short(b[1])))
1390 unknown.append(b)
1391 unknown.append(b)
1391
1392
1392 # do binary search on the branches we found
1393 # do binary search on the branches we found
1393 while search:
1394 while search:
1394 newsearch = []
1395 newsearch = []
1395 reqcnt += 1
1396 reqcnt += 1
1396 self.ui.progress(_('searching'), reqcnt, unit=_('queries'))
1397 self.ui.progress(_('searching'), reqcnt, unit=_('queries'))
1397 for n, l in zip(search, remote.between(search)):
1398 for n, l in zip(search, remote.between(search)):
1398 l.append(n[1])
1399 l.append(n[1])
1399 p = n[0]
1400 p = n[0]
1400 f = 1
1401 f = 1
1401 for i in l:
1402 for i in l:
1402 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1403 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1403 if i in m:
1404 if i in m:
1404 if f <= 2:
1405 if f <= 2:
1405 self.ui.debug("found new branch changeset %s\n" %
1406 self.ui.debug("found new branch changeset %s\n" %
1406 short(p))
1407 short(p))
1407 fetch.add(p)
1408 fetch.add(p)
1408 base[i] = 1
1409 base[i] = 1
1409 else:
1410 else:
1410 self.ui.debug("narrowed branch search to %s:%s\n"
1411 self.ui.debug("narrowed branch search to %s:%s\n"
1411 % (short(p), short(i)))
1412 % (short(p), short(i)))
1412 newsearch.append((p, i))
1413 newsearch.append((p, i))
1413 break
1414 break
1414 p, f = i, f * 2
1415 p, f = i, f * 2
1415 search = newsearch
1416 search = newsearch
1416
1417
1417 # sanity check our fetch list
1418 # sanity check our fetch list
1418 for f in fetch:
1419 for f in fetch:
1419 if f in m:
1420 if f in m:
1420 raise error.RepoError(_("already have changeset ")
1421 raise error.RepoError(_("already have changeset ")
1421 + short(f[:4]))
1422 + short(f[:4]))
1422
1423
1423 if base.keys() == [nullid]:
1424 if base.keys() == [nullid]:
1424 if force:
1425 if force:
1425 self.ui.warn(_("warning: repository is unrelated\n"))
1426 self.ui.warn(_("warning: repository is unrelated\n"))
1426 else:
1427 else:
1427 raise util.Abort(_("repository is unrelated"))
1428 raise util.Abort(_("repository is unrelated"))
1428
1429
1429 self.ui.debug("found new changesets starting at " +
1430 self.ui.debug("found new changesets starting at " +
1430 " ".join([short(f) for f in fetch]) + "\n")
1431 " ".join([short(f) for f in fetch]) + "\n")
1431
1432
1432 self.ui.progress(_('searching'), None)
1433 self.ui.progress(_('searching'), None)
1433 self.ui.debug("%d total queries\n" % reqcnt)
1434 self.ui.debug("%d total queries\n" % reqcnt)
1434
1435
1435 return base.keys(), list(fetch), heads
1436 return base.keys(), list(fetch), heads
1436
1437
1437 def findoutgoing(self, remote, base=None, heads=None, force=False):
1438 def findoutgoing(self, remote, base=None, heads=None, force=False):
1438 """Return list of nodes that are roots of subsets not in remote
1439 """Return list of nodes that are roots of subsets not in remote
1439
1440
1440 If base dict is specified, assume that these nodes and their parents
1441 If base dict is specified, assume that these nodes and their parents
1441 exist on the remote side.
1442 exist on the remote side.
1442 If a list of heads is specified, return only nodes which are heads
1443 If a list of heads is specified, return only nodes which are heads
1443 or ancestors of these heads, and return a second element which
1444 or ancestors of these heads, and return a second element which
1444 contains all remote heads which get new children.
1445 contains all remote heads which get new children.
1445 """
1446 """
1446 if base is None:
1447 if base is None:
1447 base = {}
1448 base = {}
1448 self.findincoming(remote, base, heads, force=force)
1449 self.findincoming(remote, base, heads, force=force)
1449
1450
1450 self.ui.debug("common changesets up to "
1451 self.ui.debug("common changesets up to "
1451 + " ".join(map(short, base.keys())) + "\n")
1452 + " ".join(map(short, base.keys())) + "\n")
1452
1453
1453 remain = set(self.changelog.nodemap)
1454 remain = set(self.changelog.nodemap)
1454
1455
1455 # prune everything remote has from the tree
1456 # prune everything remote has from the tree
1456 remain.remove(nullid)
1457 remain.remove(nullid)
1457 remove = base.keys()
1458 remove = base.keys()
1458 while remove:
1459 while remove:
1459 n = remove.pop(0)
1460 n = remove.pop(0)
1460 if n in remain:
1461 if n in remain:
1461 remain.remove(n)
1462 remain.remove(n)
1462 for p in self.changelog.parents(n):
1463 for p in self.changelog.parents(n):
1463 remove.append(p)
1464 remove.append(p)
1464
1465
1465 # find every node whose parents have been pruned
1466 # find every node whose parents have been pruned
1466 subset = []
1467 subset = []
1467 # find every remote head that will get new children
1468 # find every remote head that will get new children
1468 updated_heads = set()
1469 updated_heads = set()
1469 for n in remain:
1470 for n in remain:
1470 p1, p2 = self.changelog.parents(n)
1471 p1, p2 = self.changelog.parents(n)
1471 if p1 not in remain and p2 not in remain:
1472 if p1 not in remain and p2 not in remain:
1472 subset.append(n)
1473 subset.append(n)
1473 if heads:
1474 if heads:
1474 if p1 in heads:
1475 if p1 in heads:
1475 updated_heads.add(p1)
1476 updated_heads.add(p1)
1476 if p2 in heads:
1477 if p2 in heads:
1477 updated_heads.add(p2)
1478 updated_heads.add(p2)
1478
1479
1479 # this is the set of all roots we have to push
1480 # this is the set of all roots we have to push
1480 if heads:
1481 if heads:
1481 return subset, list(updated_heads)
1482 return subset, list(updated_heads)
1482 else:
1483 else:
1483 return subset
1484 return subset
1484
1485
1485 def pull(self, remote, heads=None, force=False):
1486 def pull(self, remote, heads=None, force=False):
1486 lock = self.lock()
1487 lock = self.lock()
1487 try:
1488 try:
1488 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1489 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1489 force=force)
1490 force=force)
1490 if not fetch:
1491 if not fetch:
1491 self.ui.status(_("no changes found\n"))
1492 self.ui.status(_("no changes found\n"))
1492 return 0
1493 return 0
1493
1494
1494 if fetch == [nullid]:
1495 if fetch == [nullid]:
1495 self.ui.status(_("requesting all changes\n"))
1496 self.ui.status(_("requesting all changes\n"))
1496 elif heads is None and remote.capable('changegroupsubset'):
1497 elif heads is None and remote.capable('changegroupsubset'):
1497 # issue1320, avoid a race if remote changed after discovery
1498 # issue1320, avoid a race if remote changed after discovery
1498 heads = rheads
1499 heads = rheads
1499
1500
1500 if heads is None:
1501 if heads is None:
1501 cg = remote.changegroup(fetch, 'pull')
1502 cg = remote.changegroup(fetch, 'pull')
1502 else:
1503 else:
1503 if not remote.capable('changegroupsubset'):
1504 if not remote.capable('changegroupsubset'):
1504 raise util.Abort(_("Partial pull cannot be done because "
1505 raise util.Abort(_("Partial pull cannot be done because "
1505 "other repository doesn't support "
1506 "other repository doesn't support "
1506 "changegroupsubset."))
1507 "changegroupsubset."))
1507 cg = remote.changegroupsubset(fetch, heads, 'pull')
1508 cg = remote.changegroupsubset(fetch, heads, 'pull')
1508 return self.addchangegroup(cg, 'pull', remote.url())
1509 return self.addchangegroup(cg, 'pull', remote.url())
1509 finally:
1510 finally:
1510 lock.release()
1511 lock.release()
1511
1512
1512 def push(self, remote, force=False, revs=None, newbranch=False):
1513 def push(self, remote, force=False, revs=None, newbranch=False):
1513 '''Push outgoing changesets (limited by revs) from the current
1514 '''Push outgoing changesets (limited by revs) from the current
1514 repository to remote. Return an integer:
1515 repository to remote. Return an integer:
1515 - 0 means HTTP error *or* nothing to push
1516 - 0 means HTTP error *or* nothing to push
1516 - 1 means we pushed and remote head count is unchanged *or*
1517 - 1 means we pushed and remote head count is unchanged *or*
1517 we have outgoing changesets but refused to push
1518 we have outgoing changesets but refused to push
1518 - other values as described by addchangegroup()
1519 - other values as described by addchangegroup()
1519 '''
1520 '''
1520 # there are two ways to push to remote repo:
1521 # there are two ways to push to remote repo:
1521 #
1522 #
1522 # addchangegroup assumes local user can lock remote
1523 # addchangegroup assumes local user can lock remote
1523 # repo (local filesystem, old ssh servers).
1524 # repo (local filesystem, old ssh servers).
1524 #
1525 #
1525 # unbundle assumes local user cannot lock remote repo (new ssh
1526 # unbundle assumes local user cannot lock remote repo (new ssh
1526 # servers, http servers).
1527 # servers, http servers).
1527
1528
1528 if remote.capable('unbundle'):
1529 if remote.capable('unbundle'):
1529 return self.push_unbundle(remote, force, revs, newbranch)
1530 return self.push_unbundle(remote, force, revs, newbranch)
1530 return self.push_addchangegroup(remote, force, revs, newbranch)
1531 return self.push_addchangegroup(remote, force, revs, newbranch)
1531
1532
1532 def prepush(self, remote, force, revs, newbranch):
1533 def prepush(self, remote, force, revs, newbranch):
1533 '''Analyze the local and remote repositories and determine which
1534 '''Analyze the local and remote repositories and determine which
1534 changesets need to be pushed to the remote. Return value depends
1535 changesets need to be pushed to the remote. Return value depends
1535 on circumstances:
1536 on circumstances:
1536
1537
1537 If we are not going to push anything, return a tuple (None,
1538 If we are not going to push anything, return a tuple (None,
1538 outgoing) where outgoing is 0 if there are no outgoing
1539 outgoing) where outgoing is 0 if there are no outgoing
1539 changesets and 1 if there are, but we refuse to push them
1540 changesets and 1 if there are, but we refuse to push them
1540 (e.g. would create new remote heads).
1541 (e.g. would create new remote heads).
1541
1542
1542 Otherwise, return a tuple (changegroup, remoteheads), where
1543 Otherwise, return a tuple (changegroup, remoteheads), where
1543 changegroup is a readable file-like object whose read() returns
1544 changegroup is a readable file-like object whose read() returns
1544 successive changegroup chunks ready to be sent over the wire and
1545 successive changegroup chunks ready to be sent over the wire and
1545 remoteheads is the list of remote heads.'''
1546 remoteheads is the list of remote heads.'''
1546 common = {}
1547 common = {}
1547 remote_heads = remote.heads()
1548 remote_heads = remote.heads()
1548 inc = self.findincoming(remote, common, remote_heads, force=force)
1549 inc = self.findincoming(remote, common, remote_heads, force=force)
1549
1550
1550 cl = self.changelog
1551 cl = self.changelog
1551 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1552 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1552 outg, bases, heads = cl.nodesbetween(update, revs)
1553 outg, bases, heads = cl.nodesbetween(update, revs)
1553
1554
1554 if not bases:
1555 if not bases:
1555 self.ui.status(_("no changes found\n"))
1556 self.ui.status(_("no changes found\n"))
1556 return None, 1
1557 return None, 1
1557
1558
1558 if not force and remote_heads != [nullid]:
1559 if not force and remote_heads != [nullid]:
1559
1560
1560 def fail_multiple_heads(unsynced, branch=None):
1561 def fail_multiple_heads(unsynced, branch=None):
1561 if branch:
1562 if branch:
1562 msg = _("abort: push creates new remote heads"
1563 msg = _("abort: push creates new remote heads"
1563 " on branch '%s'!\n") % branch
1564 " on branch '%s'!\n") % branch
1564 else:
1565 else:
1565 msg = _("abort: push creates new remote heads!\n")
1566 msg = _("abort: push creates new remote heads!\n")
1566 self.ui.warn(msg)
1567 self.ui.warn(msg)
1567 if unsynced:
1568 if unsynced:
1568 self.ui.status(_("(you should pull and merge or"
1569 self.ui.status(_("(you should pull and merge or"
1569 " use push -f to force)\n"))
1570 " use push -f to force)\n"))
1570 else:
1571 else:
1571 self.ui.status(_("(did you forget to merge?"
1572 self.ui.status(_("(did you forget to merge?"
1572 " use push -f to force)\n"))
1573 " use push -f to force)\n"))
1573 return None, 0
1574 return None, 0
1574
1575
1575 if remote.capable('branchmap'):
1576 if remote.capable('branchmap'):
1576 # Check for each named branch if we're creating new remote heads.
1577 # Check for each named branch if we're creating new remote heads.
1577 # To be a remote head after push, node must be either:
1578 # To be a remote head after push, node must be either:
1578 # - unknown locally
1579 # - unknown locally
1579 # - a local outgoing head descended from update
1580 # - a local outgoing head descended from update
1580 # - a remote head that's known locally and not
1581 # - a remote head that's known locally and not
1581 # ancestral to an outgoing head
1582 # ancestral to an outgoing head
1582 #
1583 #
1583 # New named branches cannot be created without --force.
1584 # New named branches cannot be created without --force.
1584
1585
1585 # 1. Create set of branches involved in the push.
1586 # 1. Create set of branches involved in the push.
1586 branches = set(self[n].branch() for n in outg)
1587 branches = set(self[n].branch() for n in outg)
1587
1588
1588 # 2. Check for new branches on the remote.
1589 # 2. Check for new branches on the remote.
1589 remotemap = remote.branchmap()
1590 remotemap = remote.branchmap()
1590 newbranches = branches - set(remotemap)
1591 newbranches = branches - set(remotemap)
1591 if newbranches and not newbranch: # new branch requires --new-branch
1592 if newbranches and not newbranch: # new branch requires --new-branch
1592 branchnames = ', '.join("%s" % b for b in newbranches)
1593 branchnames = ', '.join("%s" % b for b in newbranches)
1593 self.ui.warn(_("abort: push creates "
1594 self.ui.warn(_("abort: push creates "
1594 "new remote branches: %s!\n")
1595 "new remote branches: %s!\n")
1595 % branchnames)
1596 % branchnames)
1596 self.ui.status(_("(use 'hg push --new-branch' to create new "
1597 self.ui.status(_("(use 'hg push --new-branch' to create new "
1597 "remote branches)\n"))
1598 "remote branches)\n"))
1598 return None, 0
1599 return None, 0
1599 branches.difference_update(newbranches)
1600 branches.difference_update(newbranches)
1600
1601
1601 # 3. Construct the initial oldmap and newmap dicts.
1602 # 3. Construct the initial oldmap and newmap dicts.
1602 # They contain information about the remote heads before and
1603 # They contain information about the remote heads before and
1603 # after the push, respectively.
1604 # after the push, respectively.
1604 # Heads not found locally are not included in either dict,
1605 # Heads not found locally are not included in either dict,
1605 # since they won't be affected by the push.
1606 # since they won't be affected by the push.
1606 # unsynced contains all branches with incoming changesets.
1607 # unsynced contains all branches with incoming changesets.
1607 oldmap = {}
1608 oldmap = {}
1608 newmap = {}
1609 newmap = {}
1609 unsynced = set()
1610 unsynced = set()
1610 for branch in branches:
1611 for branch in branches:
1611 remoteheads = remotemap[branch]
1612 remoteheads = remotemap[branch]
1612 prunedheads = [h for h in remoteheads if h in cl.nodemap]
1613 prunedheads = [h for h in remoteheads if h in cl.nodemap]
1613 oldmap[branch] = prunedheads
1614 oldmap[branch] = prunedheads
1614 newmap[branch] = list(prunedheads)
1615 newmap[branch] = list(prunedheads)
1615 if len(remoteheads) > len(prunedheads):
1616 if len(remoteheads) > len(prunedheads):
1616 unsynced.add(branch)
1617 unsynced.add(branch)
1617
1618
1618 # 4. Update newmap with outgoing changes.
1619 # 4. Update newmap with outgoing changes.
1619 # This will possibly add new heads and remove existing ones.
1620 # This will possibly add new heads and remove existing ones.
1620 ctxgen = (self[n] for n in outg)
1621 ctxgen = (self[n] for n in outg)
1621 self._updatebranchcache(newmap, ctxgen)
1622 self._updatebranchcache(newmap, ctxgen)
1622
1623
1623 # 5. Check for new heads.
1624 # 5. Check for new heads.
1624 # If there are more heads after the push than before, a suitable
1625 # If there are more heads after the push than before, a suitable
1625 # warning, depending on unsynced status, is displayed.
1626 # warning, depending on unsynced status, is displayed.
1626 for branch in branches:
1627 for branch in branches:
1627 if len(newmap[branch]) > len(oldmap[branch]):
1628 if len(newmap[branch]) > len(oldmap[branch]):
1628 return fail_multiple_heads(branch in unsynced, branch)
1629 return fail_multiple_heads(branch in unsynced, branch)
1629
1630
1630 # 6. Check for unsynced changes on involved branches.
1631 # 6. Check for unsynced changes on involved branches.
1631 if unsynced:
1632 if unsynced:
1632 self.ui.warn(_("note: unsynced remote changes!\n"))
1633 self.ui.warn(_("note: unsynced remote changes!\n"))
1633
1634
1634 else:
1635 else:
1635 # Old servers: Check for new topological heads.
1636 # Old servers: Check for new topological heads.
1636 # Code based on _updatebranchcache.
1637 # Code based on _updatebranchcache.
1637 newheads = set(h for h in remote_heads if h in cl.nodemap)
1638 newheads = set(h for h in remote_heads if h in cl.nodemap)
1638 oldheadcnt = len(newheads)
1639 oldheadcnt = len(newheads)
1639 newheads.update(outg)
1640 newheads.update(outg)
1640 if len(newheads) > 1:
1641 if len(newheads) > 1:
1641 for latest in reversed(outg):
1642 for latest in reversed(outg):
1642 if latest not in newheads:
1643 if latest not in newheads:
1643 continue
1644 continue
1644 minhrev = min(cl.rev(h) for h in newheads)
1645 minhrev = min(cl.rev(h) for h in newheads)
1645 reachable = cl.reachable(latest, cl.node(minhrev))
1646 reachable = cl.reachable(latest, cl.node(minhrev))
1646 reachable.remove(latest)
1647 reachable.remove(latest)
1647 newheads.difference_update(reachable)
1648 newheads.difference_update(reachable)
1648 if len(newheads) > oldheadcnt:
1649 if len(newheads) > oldheadcnt:
1649 return fail_multiple_heads(inc)
1650 return fail_multiple_heads(inc)
1650 if inc:
1651 if inc:
1651 self.ui.warn(_("note: unsynced remote changes!\n"))
1652 self.ui.warn(_("note: unsynced remote changes!\n"))
1652
1653
1653 if revs is None:
1654 if revs is None:
1654 # use the fast path, no race possible on push
1655 # use the fast path, no race possible on push
1655 nodes = self.changelog.findmissing(common.keys())
1656 nodes = self.changelog.findmissing(common.keys())
1656 cg = self._changegroup(nodes, 'push')
1657 cg = self._changegroup(nodes, 'push')
1657 else:
1658 else:
1658 cg = self.changegroupsubset(update, revs, 'push')
1659 cg = self.changegroupsubset(update, revs, 'push')
1659 return cg, remote_heads
1660 return cg, remote_heads
1660
1661
1661 def push_addchangegroup(self, remote, force, revs, newbranch):
1662 def push_addchangegroup(self, remote, force, revs, newbranch):
1662 '''Push a changegroup by locking the remote and sending the
1663 '''Push a changegroup by locking the remote and sending the
1663 addchangegroup command to it. Used for local and old SSH repos.
1664 addchangegroup command to it. Used for local and old SSH repos.
1664 Return an integer: see push().
1665 Return an integer: see push().
1665 '''
1666 '''
1666 lock = remote.lock()
1667 lock = remote.lock()
1667 try:
1668 try:
1668 ret = self.prepush(remote, force, revs, newbranch)
1669 ret = self.prepush(remote, force, revs, newbranch)
1669 if ret[0] is not None:
1670 if ret[0] is not None:
1670 cg, remote_heads = ret
1671 cg, remote_heads = ret
1671 # here, we return an integer indicating remote head count change
1672 # here, we return an integer indicating remote head count change
1672 return remote.addchangegroup(cg, 'push', self.url())
1673 return remote.addchangegroup(cg, 'push', self.url())
1673 # and here we return 0 for "nothing to push" or 1 for
1674 # and here we return 0 for "nothing to push" or 1 for
1674 # "something to push but I refuse"
1675 # "something to push but I refuse"
1675 return ret[1]
1676 return ret[1]
1676 finally:
1677 finally:
1677 lock.release()
1678 lock.release()
1678
1679
1679 def push_unbundle(self, remote, force, revs, newbranch):
1680 def push_unbundle(self, remote, force, revs, newbranch):
1680 '''Push a changegroup by unbundling it on the remote. Used for new
1681 '''Push a changegroup by unbundling it on the remote. Used for new
1681 SSH and HTTP repos. Return an integer: see push().'''
1682 SSH and HTTP repos. Return an integer: see push().'''
1682 # local repo finds heads on server, finds out what revs it
1683 # local repo finds heads on server, finds out what revs it
1683 # must push. once revs transferred, if server finds it has
1684 # must push. once revs transferred, if server finds it has
1684 # different heads (someone else won commit/push race), server
1685 # different heads (someone else won commit/push race), server
1685 # aborts.
1686 # aborts.
1686
1687
1687 ret = self.prepush(remote, force, revs, newbranch)
1688 ret = self.prepush(remote, force, revs, newbranch)
1688 if ret[0] is not None:
1689 if ret[0] is not None:
1689 cg, remote_heads = ret
1690 cg, remote_heads = ret
1690 if force:
1691 if force:
1691 remote_heads = ['force']
1692 remote_heads = ['force']
1692 # ssh: return remote's addchangegroup()
1693 # ssh: return remote's addchangegroup()
1693 # http: return remote's addchangegroup() or 0 for error
1694 # http: return remote's addchangegroup() or 0 for error
1694 return remote.unbundle(cg, remote_heads, 'push')
1695 return remote.unbundle(cg, remote_heads, 'push')
1695 # as in push_addchangegroup()
1696 # as in push_addchangegroup()
1696 return ret[1]
1697 return ret[1]
1697
1698
1698 def changegroupinfo(self, nodes, source):
1699 def changegroupinfo(self, nodes, source):
1699 if self.ui.verbose or source == 'bundle':
1700 if self.ui.verbose or source == 'bundle':
1700 self.ui.status(_("%d changesets found\n") % len(nodes))
1701 self.ui.status(_("%d changesets found\n") % len(nodes))
1701 if self.ui.debugflag:
1702 if self.ui.debugflag:
1702 self.ui.debug("list of changesets:\n")
1703 self.ui.debug("list of changesets:\n")
1703 for node in nodes:
1704 for node in nodes:
1704 self.ui.debug("%s\n" % hex(node))
1705 self.ui.debug("%s\n" % hex(node))
1705
1706
1706 def changegroupsubset(self, bases, heads, source, extranodes=None):
1707 def changegroupsubset(self, bases, heads, source, extranodes=None):
1707 """Compute a changegroup consisting of all the nodes that are
1708 """Compute a changegroup consisting of all the nodes that are
1708 descendents of any of the bases and ancestors of any of the heads.
1709 descendents of any of the bases and ancestors of any of the heads.
1709 Return a chunkbuffer object whose read() method will return
1710 Return a chunkbuffer object whose read() method will return
1710 successive changegroup chunks.
1711 successive changegroup chunks.
1711
1712
1712 It is fairly complex as determining which filenodes and which
1713 It is fairly complex as determining which filenodes and which
1713 manifest nodes need to be included for the changeset to be complete
1714 manifest nodes need to be included for the changeset to be complete
1714 is non-trivial.
1715 is non-trivial.
1715
1716
1716 Another wrinkle is doing the reverse, figuring out which changeset in
1717 Another wrinkle is doing the reverse, figuring out which changeset in
1717 the changegroup a particular filenode or manifestnode belongs to.
1718 the changegroup a particular filenode or manifestnode belongs to.
1718
1719
1719 The caller can specify some nodes that must be included in the
1720 The caller can specify some nodes that must be included in the
1720 changegroup using the extranodes argument. It should be a dict
1721 changegroup using the extranodes argument. It should be a dict
1721 where the keys are the filenames (or 1 for the manifest), and the
1722 where the keys are the filenames (or 1 for the manifest), and the
1722 values are lists of (node, linknode) tuples, where node is a wanted
1723 values are lists of (node, linknode) tuples, where node is a wanted
1723 node and linknode is the changelog node that should be transmitted as
1724 node and linknode is the changelog node that should be transmitted as
1724 the linkrev.
1725 the linkrev.
1725 """
1726 """
1726
1727
1727 # Set up some initial variables
1728 # Set up some initial variables
1728 # Make it easy to refer to self.changelog
1729 # Make it easy to refer to self.changelog
1729 cl = self.changelog
1730 cl = self.changelog
1730 # msng is short for missing - compute the list of changesets in this
1731 # msng is short for missing - compute the list of changesets in this
1731 # changegroup.
1732 # changegroup.
1732 if not bases:
1733 if not bases:
1733 bases = [nullid]
1734 bases = [nullid]
1734 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1735 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1735
1736
1736 if extranodes is None:
1737 if extranodes is None:
1737 # can we go through the fast path ?
1738 # can we go through the fast path ?
1738 heads.sort()
1739 heads.sort()
1739 allheads = self.heads()
1740 allheads = self.heads()
1740 allheads.sort()
1741 allheads.sort()
1741 if heads == allheads:
1742 if heads == allheads:
1742 return self._changegroup(msng_cl_lst, source)
1743 return self._changegroup(msng_cl_lst, source)
1743
1744
1744 # slow path
1745 # slow path
1745 self.hook('preoutgoing', throw=True, source=source)
1746 self.hook('preoutgoing', throw=True, source=source)
1746
1747
1747 self.changegroupinfo(msng_cl_lst, source)
1748 self.changegroupinfo(msng_cl_lst, source)
1748 # Some bases may turn out to be superfluous, and some heads may be
1749 # Some bases may turn out to be superfluous, and some heads may be
1749 # too. nodesbetween will return the minimal set of bases and heads
1750 # too. nodesbetween will return the minimal set of bases and heads
1750 # necessary to re-create the changegroup.
1751 # necessary to re-create the changegroup.
1751
1752
1752 # Known heads are the list of heads that it is assumed the recipient
1753 # Known heads are the list of heads that it is assumed the recipient
1753 # of this changegroup will know about.
1754 # of this changegroup will know about.
1754 knownheads = set()
1755 knownheads = set()
1755 # We assume that all parents of bases are known heads.
1756 # We assume that all parents of bases are known heads.
1756 for n in bases:
1757 for n in bases:
1757 knownheads.update(cl.parents(n))
1758 knownheads.update(cl.parents(n))
1758 knownheads.discard(nullid)
1759 knownheads.discard(nullid)
1759 knownheads = list(knownheads)
1760 knownheads = list(knownheads)
1760 if knownheads:
1761 if knownheads:
1761 # Now that we know what heads are known, we can compute which
1762 # Now that we know what heads are known, we can compute which
1762 # changesets are known. The recipient must know about all
1763 # changesets are known. The recipient must know about all
1763 # changesets required to reach the known heads from the null
1764 # changesets required to reach the known heads from the null
1764 # changeset.
1765 # changeset.
1765 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1766 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1766 junk = None
1767 junk = None
1767 # Transform the list into a set.
1768 # Transform the list into a set.
1768 has_cl_set = set(has_cl_set)
1769 has_cl_set = set(has_cl_set)
1769 else:
1770 else:
1770 # If there were no known heads, the recipient cannot be assumed to
1771 # If there were no known heads, the recipient cannot be assumed to
1771 # know about any changesets.
1772 # know about any changesets.
1772 has_cl_set = set()
1773 has_cl_set = set()
1773
1774
1774 # Make it easy to refer to self.manifest
1775 # Make it easy to refer to self.manifest
1775 mnfst = self.manifest
1776 mnfst = self.manifest
1776 # We don't know which manifests are missing yet
1777 # We don't know which manifests are missing yet
1777 msng_mnfst_set = {}
1778 msng_mnfst_set = {}
1778 # Nor do we know which filenodes are missing.
1779 # Nor do we know which filenodes are missing.
1779 msng_filenode_set = {}
1780 msng_filenode_set = {}
1780
1781
1781 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1782 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1782 junk = None
1783 junk = None
1783
1784
1784 # A changeset always belongs to itself, so the changenode lookup
1785 # A changeset always belongs to itself, so the changenode lookup
1785 # function for a changenode is identity.
1786 # function for a changenode is identity.
1786 def identity(x):
1787 def identity(x):
1787 return x
1788 return x
1788
1789
1789 # If we determine that a particular file or manifest node must be a
1790 # If we determine that a particular file or manifest node must be a
1790 # node that the recipient of the changegroup will already have, we can
1791 # node that the recipient of the changegroup will already have, we can
1791 # also assume the recipient will have all the parents. This function
1792 # also assume the recipient will have all the parents. This function
1792 # prunes them from the set of missing nodes.
1793 # prunes them from the set of missing nodes.
1793 def prune_parents(revlog, hasset, msngset):
1794 def prune_parents(revlog, hasset, msngset):
1794 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1795 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1795 msngset.pop(revlog.node(r), None)
1796 msngset.pop(revlog.node(r), None)
1796
1797
1797 # Use the information collected in collect_manifests_and_files to say
1798 # Use the information collected in collect_manifests_and_files to say
1798 # which changenode any manifestnode belongs to.
1799 # which changenode any manifestnode belongs to.
1799 def lookup_manifest_link(mnfstnode):
1800 def lookup_manifest_link(mnfstnode):
1800 return msng_mnfst_set[mnfstnode]
1801 return msng_mnfst_set[mnfstnode]
1801
1802
1802 # A function generating function that sets up the initial environment
1803 # A function generating function that sets up the initial environment
1803 # the inner function.
1804 # the inner function.
1804 def filenode_collector(changedfiles):
1805 def filenode_collector(changedfiles):
1805 # This gathers information from each manifestnode included in the
1806 # This gathers information from each manifestnode included in the
1806 # changegroup about which filenodes the manifest node references
1807 # changegroup about which filenodes the manifest node references
1807 # so we can include those in the changegroup too.
1808 # so we can include those in the changegroup too.
1808 #
1809 #
1809 # It also remembers which changenode each filenode belongs to. It
1810 # It also remembers which changenode each filenode belongs to. It
1810 # does this by assuming the a filenode belongs to the changenode
1811 # does this by assuming the a filenode belongs to the changenode
1811 # the first manifest that references it belongs to.
1812 # the first manifest that references it belongs to.
1812 def collect_msng_filenodes(mnfstnode):
1813 def collect_msng_filenodes(mnfstnode):
1813 r = mnfst.rev(mnfstnode)
1814 r = mnfst.rev(mnfstnode)
1814 if r - 1 in mnfst.parentrevs(r):
1815 if r - 1 in mnfst.parentrevs(r):
1815 # If the previous rev is one of the parents,
1816 # If the previous rev is one of the parents,
1816 # we only need to see a diff.
1817 # we only need to see a diff.
1817 deltamf = mnfst.readdelta(mnfstnode)
1818 deltamf = mnfst.readdelta(mnfstnode)
1818 # For each line in the delta
1819 # For each line in the delta
1819 for f, fnode in deltamf.iteritems():
1820 for f, fnode in deltamf.iteritems():
1820 f = changedfiles.get(f, None)
1821 f = changedfiles.get(f, None)
1821 # And if the file is in the list of files we care
1822 # And if the file is in the list of files we care
1822 # about.
1823 # about.
1823 if f is not None:
1824 if f is not None:
1824 # Get the changenode this manifest belongs to
1825 # Get the changenode this manifest belongs to
1825 clnode = msng_mnfst_set[mnfstnode]
1826 clnode = msng_mnfst_set[mnfstnode]
1826 # Create the set of filenodes for the file if
1827 # Create the set of filenodes for the file if
1827 # there isn't one already.
1828 # there isn't one already.
1828 ndset = msng_filenode_set.setdefault(f, {})
1829 ndset = msng_filenode_set.setdefault(f, {})
1829 # And set the filenode's changelog node to the
1830 # And set the filenode's changelog node to the
1830 # manifest's if it hasn't been set already.
1831 # manifest's if it hasn't been set already.
1831 ndset.setdefault(fnode, clnode)
1832 ndset.setdefault(fnode, clnode)
1832 else:
1833 else:
1833 # Otherwise we need a full manifest.
1834 # Otherwise we need a full manifest.
1834 m = mnfst.read(mnfstnode)
1835 m = mnfst.read(mnfstnode)
1835 # For every file in we care about.
1836 # For every file in we care about.
1836 for f in changedfiles:
1837 for f in changedfiles:
1837 fnode = m.get(f, None)
1838 fnode = m.get(f, None)
1838 # If it's in the manifest
1839 # If it's in the manifest
1839 if fnode is not None:
1840 if fnode is not None:
1840 # See comments above.
1841 # See comments above.
1841 clnode = msng_mnfst_set[mnfstnode]
1842 clnode = msng_mnfst_set[mnfstnode]
1842 ndset = msng_filenode_set.setdefault(f, {})
1843 ndset = msng_filenode_set.setdefault(f, {})
1843 ndset.setdefault(fnode, clnode)
1844 ndset.setdefault(fnode, clnode)
1844 return collect_msng_filenodes
1845 return collect_msng_filenodes
1845
1846
1846 # We have a list of filenodes we think we need for a file, lets remove
1847 # We have a list of filenodes we think we need for a file, lets remove
1847 # all those we know the recipient must have.
1848 # all those we know the recipient must have.
1848 def prune_filenodes(f, filerevlog):
1849 def prune_filenodes(f, filerevlog):
1849 msngset = msng_filenode_set[f]
1850 msngset = msng_filenode_set[f]
1850 hasset = set()
1851 hasset = set()
1851 # If a 'missing' filenode thinks it belongs to a changenode we
1852 # If a 'missing' filenode thinks it belongs to a changenode we
1852 # assume the recipient must have, then the recipient must have
1853 # assume the recipient must have, then the recipient must have
1853 # that filenode.
1854 # that filenode.
1854 for n in msngset:
1855 for n in msngset:
1855 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1856 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1856 if clnode in has_cl_set:
1857 if clnode in has_cl_set:
1857 hasset.add(n)
1858 hasset.add(n)
1858 prune_parents(filerevlog, hasset, msngset)
1859 prune_parents(filerevlog, hasset, msngset)
1859
1860
1860 # A function generator function that sets up the a context for the
1861 # A function generator function that sets up the a context for the
1861 # inner function.
1862 # inner function.
1862 def lookup_filenode_link_func(fname):
1863 def lookup_filenode_link_func(fname):
1863 msngset = msng_filenode_set[fname]
1864 msngset = msng_filenode_set[fname]
1864 # Lookup the changenode the filenode belongs to.
1865 # Lookup the changenode the filenode belongs to.
1865 def lookup_filenode_link(fnode):
1866 def lookup_filenode_link(fnode):
1866 return msngset[fnode]
1867 return msngset[fnode]
1867 return lookup_filenode_link
1868 return lookup_filenode_link
1868
1869
1869 # Add the nodes that were explicitly requested.
1870 # Add the nodes that were explicitly requested.
1870 def add_extra_nodes(name, nodes):
1871 def add_extra_nodes(name, nodes):
1871 if not extranodes or name not in extranodes:
1872 if not extranodes or name not in extranodes:
1872 return
1873 return
1873
1874
1874 for node, linknode in extranodes[name]:
1875 for node, linknode in extranodes[name]:
1875 if node not in nodes:
1876 if node not in nodes:
1876 nodes[node] = linknode
1877 nodes[node] = linknode
1877
1878
1878 # Now that we have all theses utility functions to help out and
1879 # Now that we have all theses utility functions to help out and
1879 # logically divide up the task, generate the group.
1880 # logically divide up the task, generate the group.
1880 def gengroup():
1881 def gengroup():
1881 # The set of changed files starts empty.
1882 # The set of changed files starts empty.
1882 changedfiles = {}
1883 changedfiles = {}
1883 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1884 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1884
1885
1885 # Create a changenode group generator that will call our functions
1886 # Create a changenode group generator that will call our functions
1886 # back to lookup the owning changenode and collect information.
1887 # back to lookup the owning changenode and collect information.
1887 group = cl.group(msng_cl_lst, identity, collect)
1888 group = cl.group(msng_cl_lst, identity, collect)
1888 cnt = 0
1889 cnt = 0
1889 for chnk in group:
1890 for chnk in group:
1890 yield chnk
1891 yield chnk
1891 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1892 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1892 cnt += 1
1893 cnt += 1
1893 self.ui.progress(_('bundling changes'), None)
1894 self.ui.progress(_('bundling changes'), None)
1894
1895
1895
1896
1896 # Figure out which manifest nodes (of the ones we think might be
1897 # Figure out which manifest nodes (of the ones we think might be
1897 # part of the changegroup) the recipient must know about and
1898 # part of the changegroup) the recipient must know about and
1898 # remove them from the changegroup.
1899 # remove them from the changegroup.
1899 has_mnfst_set = set()
1900 has_mnfst_set = set()
1900 for n in msng_mnfst_set:
1901 for n in msng_mnfst_set:
1901 # If a 'missing' manifest thinks it belongs to a changenode
1902 # If a 'missing' manifest thinks it belongs to a changenode
1902 # the recipient is assumed to have, obviously the recipient
1903 # the recipient is assumed to have, obviously the recipient
1903 # must have that manifest.
1904 # must have that manifest.
1904 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1905 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1905 if linknode in has_cl_set:
1906 if linknode in has_cl_set:
1906 has_mnfst_set.add(n)
1907 has_mnfst_set.add(n)
1907 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1908 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1908 add_extra_nodes(1, msng_mnfst_set)
1909 add_extra_nodes(1, msng_mnfst_set)
1909 msng_mnfst_lst = msng_mnfst_set.keys()
1910 msng_mnfst_lst = msng_mnfst_set.keys()
1910 # Sort the manifestnodes by revision number.
1911 # Sort the manifestnodes by revision number.
1911 msng_mnfst_lst.sort(key=mnfst.rev)
1912 msng_mnfst_lst.sort(key=mnfst.rev)
1912 # Create a generator for the manifestnodes that calls our lookup
1913 # Create a generator for the manifestnodes that calls our lookup
1913 # and data collection functions back.
1914 # and data collection functions back.
1914 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1915 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1915 filenode_collector(changedfiles))
1916 filenode_collector(changedfiles))
1916 cnt = 0
1917 cnt = 0
1917 for chnk in group:
1918 for chnk in group:
1918 yield chnk
1919 yield chnk
1919 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1920 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1920 cnt += 1
1921 cnt += 1
1921 self.ui.progress(_('bundling manifests'), None)
1922 self.ui.progress(_('bundling manifests'), None)
1922
1923
1923 # These are no longer needed, dereference and toss the memory for
1924 # These are no longer needed, dereference and toss the memory for
1924 # them.
1925 # them.
1925 msng_mnfst_lst = None
1926 msng_mnfst_lst = None
1926 msng_mnfst_set.clear()
1927 msng_mnfst_set.clear()
1927
1928
1928 if extranodes:
1929 if extranodes:
1929 for fname in extranodes:
1930 for fname in extranodes:
1930 if isinstance(fname, int):
1931 if isinstance(fname, int):
1931 continue
1932 continue
1932 msng_filenode_set.setdefault(fname, {})
1933 msng_filenode_set.setdefault(fname, {})
1933 changedfiles[fname] = 1
1934 changedfiles[fname] = 1
1934 # Go through all our files in order sorted by name.
1935 # Go through all our files in order sorted by name.
1935 cnt = 0
1936 cnt = 0
1936 for fname in sorted(changedfiles):
1937 for fname in sorted(changedfiles):
1937 filerevlog = self.file(fname)
1938 filerevlog = self.file(fname)
1938 if not len(filerevlog):
1939 if not len(filerevlog):
1939 raise util.Abort(_("empty or missing revlog for %s") % fname)
1940 raise util.Abort(_("empty or missing revlog for %s") % fname)
1940 # Toss out the filenodes that the recipient isn't really
1941 # Toss out the filenodes that the recipient isn't really
1941 # missing.
1942 # missing.
1942 if fname in msng_filenode_set:
1943 if fname in msng_filenode_set:
1943 prune_filenodes(fname, filerevlog)
1944 prune_filenodes(fname, filerevlog)
1944 add_extra_nodes(fname, msng_filenode_set[fname])
1945 add_extra_nodes(fname, msng_filenode_set[fname])
1945 msng_filenode_lst = msng_filenode_set[fname].keys()
1946 msng_filenode_lst = msng_filenode_set[fname].keys()
1946 else:
1947 else:
1947 msng_filenode_lst = []
1948 msng_filenode_lst = []
1948 # If any filenodes are left, generate the group for them,
1949 # If any filenodes are left, generate the group for them,
1949 # otherwise don't bother.
1950 # otherwise don't bother.
1950 if len(msng_filenode_lst) > 0:
1951 if len(msng_filenode_lst) > 0:
1951 yield changegroup.chunkheader(len(fname))
1952 yield changegroup.chunkheader(len(fname))
1952 yield fname
1953 yield fname
1953 # Sort the filenodes by their revision #
1954 # Sort the filenodes by their revision #
1954 msng_filenode_lst.sort(key=filerevlog.rev)
1955 msng_filenode_lst.sort(key=filerevlog.rev)
1955 # Create a group generator and only pass in a changenode
1956 # Create a group generator and only pass in a changenode
1956 # lookup function as we need to collect no information
1957 # lookup function as we need to collect no information
1957 # from filenodes.
1958 # from filenodes.
1958 group = filerevlog.group(msng_filenode_lst,
1959 group = filerevlog.group(msng_filenode_lst,
1959 lookup_filenode_link_func(fname))
1960 lookup_filenode_link_func(fname))
1960 for chnk in group:
1961 for chnk in group:
1961 self.ui.progress(
1962 self.ui.progress(
1962 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1963 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1963 cnt += 1
1964 cnt += 1
1964 yield chnk
1965 yield chnk
1965 if fname in msng_filenode_set:
1966 if fname in msng_filenode_set:
1966 # Don't need this anymore, toss it to free memory.
1967 # Don't need this anymore, toss it to free memory.
1967 del msng_filenode_set[fname]
1968 del msng_filenode_set[fname]
1968 # Signal that no more groups are left.
1969 # Signal that no more groups are left.
1969 yield changegroup.closechunk()
1970 yield changegroup.closechunk()
1970 self.ui.progress(_('bundling files'), None)
1971 self.ui.progress(_('bundling files'), None)
1971
1972
1972 if msng_cl_lst:
1973 if msng_cl_lst:
1973 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1974 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1974
1975
1975 return util.chunkbuffer(gengroup())
1976 return util.chunkbuffer(gengroup())
1976
1977
1977 def changegroup(self, basenodes, source):
1978 def changegroup(self, basenodes, source):
1978 # to avoid a race we use changegroupsubset() (issue1320)
1979 # to avoid a race we use changegroupsubset() (issue1320)
1979 return self.changegroupsubset(basenodes, self.heads(), source)
1980 return self.changegroupsubset(basenodes, self.heads(), source)
1980
1981
1981 def _changegroup(self, nodes, source):
1982 def _changegroup(self, nodes, source):
1982 """Compute the changegroup of all nodes that we have that a recipient
1983 """Compute the changegroup of all nodes that we have that a recipient
1983 doesn't. Return a chunkbuffer object whose read() method will return
1984 doesn't. Return a chunkbuffer object whose read() method will return
1984 successive changegroup chunks.
1985 successive changegroup chunks.
1985
1986
1986 This is much easier than the previous function as we can assume that
1987 This is much easier than the previous function as we can assume that
1987 the recipient has any changenode we aren't sending them.
1988 the recipient has any changenode we aren't sending them.
1988
1989
1989 nodes is the set of nodes to send"""
1990 nodes is the set of nodes to send"""
1990
1991
1991 self.hook('preoutgoing', throw=True, source=source)
1992 self.hook('preoutgoing', throw=True, source=source)
1992
1993
1993 cl = self.changelog
1994 cl = self.changelog
1994 revset = set([cl.rev(n) for n in nodes])
1995 revset = set([cl.rev(n) for n in nodes])
1995 self.changegroupinfo(nodes, source)
1996 self.changegroupinfo(nodes, source)
1996
1997
1997 def identity(x):
1998 def identity(x):
1998 return x
1999 return x
1999
2000
2000 def gennodelst(log):
2001 def gennodelst(log):
2001 for r in log:
2002 for r in log:
2002 if log.linkrev(r) in revset:
2003 if log.linkrev(r) in revset:
2003 yield log.node(r)
2004 yield log.node(r)
2004
2005
2005 def lookuprevlink_func(revlog):
2006 def lookuprevlink_func(revlog):
2006 def lookuprevlink(n):
2007 def lookuprevlink(n):
2007 return cl.node(revlog.linkrev(revlog.rev(n)))
2008 return cl.node(revlog.linkrev(revlog.rev(n)))
2008 return lookuprevlink
2009 return lookuprevlink
2009
2010
2010 def gengroup():
2011 def gengroup():
2011 '''yield a sequence of changegroup chunks (strings)'''
2012 '''yield a sequence of changegroup chunks (strings)'''
2012 # construct a list of all changed files
2013 # construct a list of all changed files
2013 changedfiles = {}
2014 changedfiles = {}
2014 mmfs = {}
2015 mmfs = {}
2015 collect = changegroup.collector(cl, mmfs, changedfiles)
2016 collect = changegroup.collector(cl, mmfs, changedfiles)
2016
2017
2017 cnt = 0
2018 cnt = 0
2018 for chnk in cl.group(nodes, identity, collect):
2019 for chnk in cl.group(nodes, identity, collect):
2019 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
2020 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
2020 cnt += 1
2021 cnt += 1
2021 yield chnk
2022 yield chnk
2022 self.ui.progress(_('bundling changes'), None)
2023 self.ui.progress(_('bundling changes'), None)
2023
2024
2024 mnfst = self.manifest
2025 mnfst = self.manifest
2025 nodeiter = gennodelst(mnfst)
2026 nodeiter = gennodelst(mnfst)
2026 cnt = 0
2027 cnt = 0
2027 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
2028 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
2028 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
2029 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
2029 cnt += 1
2030 cnt += 1
2030 yield chnk
2031 yield chnk
2031 self.ui.progress(_('bundling manifests'), None)
2032 self.ui.progress(_('bundling manifests'), None)
2032
2033
2033 cnt = 0
2034 cnt = 0
2034 for fname in sorted(changedfiles):
2035 for fname in sorted(changedfiles):
2035 filerevlog = self.file(fname)
2036 filerevlog = self.file(fname)
2036 if not len(filerevlog):
2037 if not len(filerevlog):
2037 raise util.Abort(_("empty or missing revlog for %s") % fname)
2038 raise util.Abort(_("empty or missing revlog for %s") % fname)
2038 nodeiter = gennodelst(filerevlog)
2039 nodeiter = gennodelst(filerevlog)
2039 nodeiter = list(nodeiter)
2040 nodeiter = list(nodeiter)
2040 if nodeiter:
2041 if nodeiter:
2041 yield changegroup.chunkheader(len(fname))
2042 yield changegroup.chunkheader(len(fname))
2042 yield fname
2043 yield fname
2043 lookup = lookuprevlink_func(filerevlog)
2044 lookup = lookuprevlink_func(filerevlog)
2044 for chnk in filerevlog.group(nodeiter, lookup):
2045 for chnk in filerevlog.group(nodeiter, lookup):
2045 self.ui.progress(
2046 self.ui.progress(
2046 _('bundling files'), cnt, item=fname, unit=_('chunks'))
2047 _('bundling files'), cnt, item=fname, unit=_('chunks'))
2047 cnt += 1
2048 cnt += 1
2048 yield chnk
2049 yield chnk
2049 self.ui.progress(_('bundling files'), None)
2050 self.ui.progress(_('bundling files'), None)
2050
2051
2051 yield changegroup.closechunk()
2052 yield changegroup.closechunk()
2052
2053
2053 if nodes:
2054 if nodes:
2054 self.hook('outgoing', node=hex(nodes[0]), source=source)
2055 self.hook('outgoing', node=hex(nodes[0]), source=source)
2055
2056
2056 return util.chunkbuffer(gengroup())
2057 return util.chunkbuffer(gengroup())
2057
2058
2058 def addchangegroup(self, source, srctype, url, emptyok=False):
2059 def addchangegroup(self, source, srctype, url, emptyok=False):
2059 """Add the changegroup returned by source.read() to this repo.
2060 """Add the changegroup returned by source.read() to this repo.
2060 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2061 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2061 the URL of the repo where this changegroup is coming from.
2062 the URL of the repo where this changegroup is coming from.
2062
2063
2063 Return an integer summarizing the change to this repo:
2064 Return an integer summarizing the change to this repo:
2064 - nothing changed or no source: 0
2065 - nothing changed or no source: 0
2065 - more heads than before: 1+added heads (2..n)
2066 - more heads than before: 1+added heads (2..n)
2066 - fewer heads than before: -1-removed heads (-2..-n)
2067 - fewer heads than before: -1-removed heads (-2..-n)
2067 - number of heads stays the same: 1
2068 - number of heads stays the same: 1
2068 """
2069 """
2069 def csmap(x):
2070 def csmap(x):
2070 self.ui.debug("add changeset %s\n" % short(x))
2071 self.ui.debug("add changeset %s\n" % short(x))
2071 return len(cl)
2072 return len(cl)
2072
2073
2073 def revmap(x):
2074 def revmap(x):
2074 return cl.rev(x)
2075 return cl.rev(x)
2075
2076
2076 if not source:
2077 if not source:
2077 return 0
2078 return 0
2078
2079
2079 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2080 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2080
2081
2081 changesets = files = revisions = 0
2082 changesets = files = revisions = 0
2082 efiles = set()
2083 efiles = set()
2083
2084
2084 # write changelog data to temp files so concurrent readers will not see
2085 # write changelog data to temp files so concurrent readers will not see
2085 # inconsistent view
2086 # inconsistent view
2086 cl = self.changelog
2087 cl = self.changelog
2087 cl.delayupdate()
2088 cl.delayupdate()
2088 oldheads = len(cl.heads())
2089 oldheads = len(cl.heads())
2089
2090
2090 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
2091 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
2091 try:
2092 try:
2092 trp = weakref.proxy(tr)
2093 trp = weakref.proxy(tr)
2093 # pull off the changeset group
2094 # pull off the changeset group
2094 self.ui.status(_("adding changesets\n"))
2095 self.ui.status(_("adding changesets\n"))
2095 clstart = len(cl)
2096 clstart = len(cl)
2096 class prog(object):
2097 class prog(object):
2097 step = _('changesets')
2098 step = _('changesets')
2098 count = 1
2099 count = 1
2099 ui = self.ui
2100 ui = self.ui
2100 total = None
2101 total = None
2101 def __call__(self):
2102 def __call__(self):
2102 self.ui.progress(self.step, self.count, unit=_('chunks'),
2103 self.ui.progress(self.step, self.count, unit=_('chunks'),
2103 total=self.total)
2104 total=self.total)
2104 self.count += 1
2105 self.count += 1
2105 pr = prog()
2106 pr = prog()
2106 chunkiter = changegroup.chunkiter(source, progress=pr)
2107 chunkiter = changegroup.chunkiter(source, progress=pr)
2107 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2108 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2108 raise util.Abort(_("received changelog group is empty"))
2109 raise util.Abort(_("received changelog group is empty"))
2109 clend = len(cl)
2110 clend = len(cl)
2110 changesets = clend - clstart
2111 changesets = clend - clstart
2111 for c in xrange(clstart, clend):
2112 for c in xrange(clstart, clend):
2112 efiles.update(self[c].files())
2113 efiles.update(self[c].files())
2113 efiles = len(efiles)
2114 efiles = len(efiles)
2114 self.ui.progress(_('changesets'), None)
2115 self.ui.progress(_('changesets'), None)
2115
2116
2116 # pull off the manifest group
2117 # pull off the manifest group
2117 self.ui.status(_("adding manifests\n"))
2118 self.ui.status(_("adding manifests\n"))
2118 pr.step = _('manifests')
2119 pr.step = _('manifests')
2119 pr.count = 1
2120 pr.count = 1
2120 pr.total = changesets # manifests <= changesets
2121 pr.total = changesets # manifests <= changesets
2121 chunkiter = changegroup.chunkiter(source, progress=pr)
2122 chunkiter = changegroup.chunkiter(source, progress=pr)
2122 # no need to check for empty manifest group here:
2123 # no need to check for empty manifest group here:
2123 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2124 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2124 # no new manifest will be created and the manifest group will
2125 # no new manifest will be created and the manifest group will
2125 # be empty during the pull
2126 # be empty during the pull
2126 self.manifest.addgroup(chunkiter, revmap, trp)
2127 self.manifest.addgroup(chunkiter, revmap, trp)
2127 self.ui.progress(_('manifests'), None)
2128 self.ui.progress(_('manifests'), None)
2128
2129
2129 needfiles = {}
2130 needfiles = {}
2130 if self.ui.configbool('server', 'validate', default=False):
2131 if self.ui.configbool('server', 'validate', default=False):
2131 # validate incoming csets have their manifests
2132 # validate incoming csets have their manifests
2132 for cset in xrange(clstart, clend):
2133 for cset in xrange(clstart, clend):
2133 mfest = self.changelog.read(self.changelog.node(cset))[0]
2134 mfest = self.changelog.read(self.changelog.node(cset))[0]
2134 mfest = self.manifest.readdelta(mfest)
2135 mfest = self.manifest.readdelta(mfest)
2135 # store file nodes we must see
2136 # store file nodes we must see
2136 for f, n in mfest.iteritems():
2137 for f, n in mfest.iteritems():
2137 needfiles.setdefault(f, set()).add(n)
2138 needfiles.setdefault(f, set()).add(n)
2138
2139
2139 # process the files
2140 # process the files
2140 self.ui.status(_("adding file changes\n"))
2141 self.ui.status(_("adding file changes\n"))
2141 pr.step = 'files'
2142 pr.step = 'files'
2142 pr.count = 1
2143 pr.count = 1
2143 pr.total = efiles
2144 pr.total = efiles
2144 while 1:
2145 while 1:
2145 f = changegroup.getchunk(source)
2146 f = changegroup.getchunk(source)
2146 if not f:
2147 if not f:
2147 break
2148 break
2148 self.ui.debug("adding %s revisions\n" % f)
2149 self.ui.debug("adding %s revisions\n" % f)
2149 pr()
2150 pr()
2150 fl = self.file(f)
2151 fl = self.file(f)
2151 o = len(fl)
2152 o = len(fl)
2152 chunkiter = changegroup.chunkiter(source)
2153 chunkiter = changegroup.chunkiter(source)
2153 if fl.addgroup(chunkiter, revmap, trp) is None:
2154 if fl.addgroup(chunkiter, revmap, trp) is None:
2154 raise util.Abort(_("received file revlog group is empty"))
2155 raise util.Abort(_("received file revlog group is empty"))
2155 revisions += len(fl) - o
2156 revisions += len(fl) - o
2156 files += 1
2157 files += 1
2157 if f in needfiles:
2158 if f in needfiles:
2158 needs = needfiles[f]
2159 needs = needfiles[f]
2159 for new in xrange(o, len(fl)):
2160 for new in xrange(o, len(fl)):
2160 n = fl.node(new)
2161 n = fl.node(new)
2161 if n in needs:
2162 if n in needs:
2162 needs.remove(n)
2163 needs.remove(n)
2163 if not needs:
2164 if not needs:
2164 del needfiles[f]
2165 del needfiles[f]
2165 self.ui.progress(_('files'), None)
2166 self.ui.progress(_('files'), None)
2166
2167
2167 for f, needs in needfiles.iteritems():
2168 for f, needs in needfiles.iteritems():
2168 fl = self.file(f)
2169 fl = self.file(f)
2169 for n in needs:
2170 for n in needs:
2170 try:
2171 try:
2171 fl.rev(n)
2172 fl.rev(n)
2172 except error.LookupError:
2173 except error.LookupError:
2173 raise util.Abort(
2174 raise util.Abort(
2174 _('missing file data for %s:%s - run hg verify') %
2175 _('missing file data for %s:%s - run hg verify') %
2175 (f, hex(n)))
2176 (f, hex(n)))
2176
2177
2177 newheads = len(cl.heads())
2178 newheads = len(cl.heads())
2178 heads = ""
2179 heads = ""
2179 if oldheads and newheads != oldheads:
2180 if oldheads and newheads != oldheads:
2180 heads = _(" (%+d heads)") % (newheads - oldheads)
2181 heads = _(" (%+d heads)") % (newheads - oldheads)
2181
2182
2182 self.ui.status(_("added %d changesets"
2183 self.ui.status(_("added %d changesets"
2183 " with %d changes to %d files%s\n")
2184 " with %d changes to %d files%s\n")
2184 % (changesets, revisions, files, heads))
2185 % (changesets, revisions, files, heads))
2185
2186
2186 if changesets > 0:
2187 if changesets > 0:
2187 p = lambda: cl.writepending() and self.root or ""
2188 p = lambda: cl.writepending() and self.root or ""
2188 self.hook('pretxnchangegroup', throw=True,
2189 self.hook('pretxnchangegroup', throw=True,
2189 node=hex(cl.node(clstart)), source=srctype,
2190 node=hex(cl.node(clstart)), source=srctype,
2190 url=url, pending=p)
2191 url=url, pending=p)
2191
2192
2192 # make changelog see real files again
2193 # make changelog see real files again
2193 cl.finalize(trp)
2194 cl.finalize(trp)
2194
2195
2195 tr.close()
2196 tr.close()
2196 finally:
2197 finally:
2197 del tr
2198 tr.release()
2198
2199
2199 if changesets > 0:
2200 if changesets > 0:
2200 # forcefully update the on-disk branch cache
2201 # forcefully update the on-disk branch cache
2201 self.ui.debug("updating the branch cache\n")
2202 self.ui.debug("updating the branch cache\n")
2202 self.branchtags()
2203 self.branchtags()
2203 self.hook("changegroup", node=hex(cl.node(clstart)),
2204 self.hook("changegroup", node=hex(cl.node(clstart)),
2204 source=srctype, url=url)
2205 source=srctype, url=url)
2205
2206
2206 for i in xrange(clstart, clend):
2207 for i in xrange(clstart, clend):
2207 self.hook("incoming", node=hex(cl.node(i)),
2208 self.hook("incoming", node=hex(cl.node(i)),
2208 source=srctype, url=url)
2209 source=srctype, url=url)
2209
2210
2210 # never return 0 here:
2211 # never return 0 here:
2211 if newheads < oldheads:
2212 if newheads < oldheads:
2212 return newheads - oldheads - 1
2213 return newheads - oldheads - 1
2213 else:
2214 else:
2214 return newheads - oldheads + 1
2215 return newheads - oldheads + 1
2215
2216
2216
2217
2217 def stream_in(self, remote):
2218 def stream_in(self, remote):
2218 fp = remote.stream_out()
2219 fp = remote.stream_out()
2219 l = fp.readline()
2220 l = fp.readline()
2220 try:
2221 try:
2221 resp = int(l)
2222 resp = int(l)
2222 except ValueError:
2223 except ValueError:
2223 raise error.ResponseError(
2224 raise error.ResponseError(
2224 _('Unexpected response from remote server:'), l)
2225 _('Unexpected response from remote server:'), l)
2225 if resp == 1:
2226 if resp == 1:
2226 raise util.Abort(_('operation forbidden by server'))
2227 raise util.Abort(_('operation forbidden by server'))
2227 elif resp == 2:
2228 elif resp == 2:
2228 raise util.Abort(_('locking the remote repository failed'))
2229 raise util.Abort(_('locking the remote repository failed'))
2229 elif resp != 0:
2230 elif resp != 0:
2230 raise util.Abort(_('the server sent an unknown error code'))
2231 raise util.Abort(_('the server sent an unknown error code'))
2231 self.ui.status(_('streaming all changes\n'))
2232 self.ui.status(_('streaming all changes\n'))
2232 l = fp.readline()
2233 l = fp.readline()
2233 try:
2234 try:
2234 total_files, total_bytes = map(int, l.split(' ', 1))
2235 total_files, total_bytes = map(int, l.split(' ', 1))
2235 except (ValueError, TypeError):
2236 except (ValueError, TypeError):
2236 raise error.ResponseError(
2237 raise error.ResponseError(
2237 _('Unexpected response from remote server:'), l)
2238 _('Unexpected response from remote server:'), l)
2238 self.ui.status(_('%d files to transfer, %s of data\n') %
2239 self.ui.status(_('%d files to transfer, %s of data\n') %
2239 (total_files, util.bytecount(total_bytes)))
2240 (total_files, util.bytecount(total_bytes)))
2240 start = time.time()
2241 start = time.time()
2241 for i in xrange(total_files):
2242 for i in xrange(total_files):
2242 # XXX doesn't support '\n' or '\r' in filenames
2243 # XXX doesn't support '\n' or '\r' in filenames
2243 l = fp.readline()
2244 l = fp.readline()
2244 try:
2245 try:
2245 name, size = l.split('\0', 1)
2246 name, size = l.split('\0', 1)
2246 size = int(size)
2247 size = int(size)
2247 except (ValueError, TypeError):
2248 except (ValueError, TypeError):
2248 raise error.ResponseError(
2249 raise error.ResponseError(
2249 _('Unexpected response from remote server:'), l)
2250 _('Unexpected response from remote server:'), l)
2250 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2251 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2251 # for backwards compat, name was partially encoded
2252 # for backwards compat, name was partially encoded
2252 ofp = self.sopener(store.decodedir(name), 'w')
2253 ofp = self.sopener(store.decodedir(name), 'w')
2253 for chunk in util.filechunkiter(fp, limit=size):
2254 for chunk in util.filechunkiter(fp, limit=size):
2254 ofp.write(chunk)
2255 ofp.write(chunk)
2255 ofp.close()
2256 ofp.close()
2256 elapsed = time.time() - start
2257 elapsed = time.time() - start
2257 if elapsed <= 0:
2258 if elapsed <= 0:
2258 elapsed = 0.001
2259 elapsed = 0.001
2259 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2260 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2260 (util.bytecount(total_bytes), elapsed,
2261 (util.bytecount(total_bytes), elapsed,
2261 util.bytecount(total_bytes / elapsed)))
2262 util.bytecount(total_bytes / elapsed)))
2262 self.invalidate()
2263 self.invalidate()
2263 return len(self.heads()) + 1
2264 return len(self.heads()) + 1
2264
2265
2265 def clone(self, remote, heads=[], stream=False):
2266 def clone(self, remote, heads=[], stream=False):
2266 '''clone remote repository.
2267 '''clone remote repository.
2267
2268
2268 keyword arguments:
2269 keyword arguments:
2269 heads: list of revs to clone (forces use of pull)
2270 heads: list of revs to clone (forces use of pull)
2270 stream: use streaming clone if possible'''
2271 stream: use streaming clone if possible'''
2271
2272
2272 # now, all clients that can request uncompressed clones can
2273 # now, all clients that can request uncompressed clones can
2273 # read repo formats supported by all servers that can serve
2274 # read repo formats supported by all servers that can serve
2274 # them.
2275 # them.
2275
2276
2276 # if revlog format changes, client will have to check version
2277 # if revlog format changes, client will have to check version
2277 # and format flags on "stream" capability, and use
2278 # and format flags on "stream" capability, and use
2278 # uncompressed only if compatible.
2279 # uncompressed only if compatible.
2279
2280
2280 if stream and not heads and remote.capable('stream'):
2281 if stream and not heads and remote.capable('stream'):
2281 return self.stream_in(remote)
2282 return self.stream_in(remote)
2282 return self.pull(remote, heads)
2283 return self.pull(remote, heads)
2283
2284
2284 # used to avoid circular references so destructors work
2285 # used to avoid circular references so destructors work
2285 def aftertrans(files):
2286 def aftertrans(files):
2286 renamefiles = [tuple(t) for t in files]
2287 renamefiles = [tuple(t) for t in files]
2287 def a():
2288 def a():
2288 for src, dest in renamefiles:
2289 for src, dest in renamefiles:
2289 util.rename(src, dest)
2290 util.rename(src, dest)
2290 return a
2291 return a
2291
2292
2292 def instance(ui, path, create):
2293 def instance(ui, path, create):
2293 return localrepository(ui, util.drop_scheme('file', path), create)
2294 return localrepository(ui, util.drop_scheme('file', path), create)
2294
2295
2295 def islocal(path):
2296 def islocal(path):
2296 return True
2297 return True
@@ -1,166 +1,176 b''
1 # transaction.py - simple journalling scheme for mercurial
1 # transaction.py - simple journalling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 from i18n import _
14 from i18n import _
15 import os, errno
15 import os, errno
16 import error
16 import error
17
17
18 def active(func):
18 def active(func):
19 def _active(self, *args, **kwds):
19 def _active(self, *args, **kwds):
20 if self.count == 0:
20 if self.count == 0:
21 raise error.Abort(_(
21 raise error.Abort(_(
22 'cannot use transaction when it is already committed/aborted'))
22 'cannot use transaction when it is already committed/aborted'))
23 return func(self, *args, **kwds)
23 return func(self, *args, **kwds)
24 return _active
24 return _active
25
25
26 def _playback(journal, report, opener, entries, unlink=True):
26 def _playback(journal, report, opener, entries, unlink=True):
27 for f, o, ignore in entries:
27 for f, o, ignore in entries:
28 if o or not unlink:
28 if o or not unlink:
29 try:
29 try:
30 opener(f, 'a').truncate(o)
30 opener(f, 'a').truncate(o)
31 except IOError:
31 except IOError:
32 report(_("failed to truncate %s\n") % f)
32 report(_("failed to truncate %s\n") % f)
33 raise
33 raise
34 else:
34 else:
35 try:
35 try:
36 fn = opener(f).name
36 fn = opener(f).name
37 os.unlink(fn)
37 os.unlink(fn)
38 except (IOError, OSError), inst:
38 except (IOError, OSError), inst:
39 if inst.errno != errno.ENOENT:
39 if inst.errno != errno.ENOENT:
40 raise
40 raise
41 os.unlink(journal)
41 os.unlink(journal)
42
42
43 class transaction(object):
43 class transaction(object):
44 def __init__(self, report, opener, journal, after=None, createmode=None):
44 def __init__(self, report, opener, journal, after=None, createmode=None):
45 self.count = 1
45 self.count = 1
46 self.usages = 1
46 self.report = report
47 self.report = report
47 self.opener = opener
48 self.opener = opener
48 self.after = after
49 self.after = after
49 self.entries = []
50 self.entries = []
50 self.map = {}
51 self.map = {}
51 self.journal = journal
52 self.journal = journal
52 self._queue = []
53 self._queue = []
53
54
54 self.file = open(self.journal, "w")
55 self.file = open(self.journal, "w")
55 if createmode is not None:
56 if createmode is not None:
56 os.chmod(self.journal, createmode & 0666)
57 os.chmod(self.journal, createmode & 0666)
57
58
58 def __del__(self):
59 def __del__(self):
59 if self.journal:
60 if self.journal:
60 self._abort()
61 self._abort()
61
62
62 @active
63 @active
63 def startgroup(self):
64 def startgroup(self):
64 self._queue.append([])
65 self._queue.append([])
65
66
66 @active
67 @active
67 def endgroup(self):
68 def endgroup(self):
68 q = self._queue.pop()
69 q = self._queue.pop()
69 d = ''.join(['%s\0%d\n' % (x[0], x[1]) for x in q])
70 d = ''.join(['%s\0%d\n' % (x[0], x[1]) for x in q])
70 self.entries.extend(q)
71 self.entries.extend(q)
71 self.file.write(d)
72 self.file.write(d)
72 self.file.flush()
73 self.file.flush()
73
74
74 @active
75 @active
75 def add(self, file, offset, data=None):
76 def add(self, file, offset, data=None):
76 if file in self.map:
77 if file in self.map:
77 return
78 return
78 if self._queue:
79 if self._queue:
79 self._queue[-1].append((file, offset, data))
80 self._queue[-1].append((file, offset, data))
80 return
81 return
81
82
82 self.entries.append((file, offset, data))
83 self.entries.append((file, offset, data))
83 self.map[file] = len(self.entries) - 1
84 self.map[file] = len(self.entries) - 1
84 # add enough data to the journal to do the truncate
85 # add enough data to the journal to do the truncate
85 self.file.write("%s\0%d\n" % (file, offset))
86 self.file.write("%s\0%d\n" % (file, offset))
86 self.file.flush()
87 self.file.flush()
87
88
88 @active
89 @active
89 def find(self, file):
90 def find(self, file):
90 if file in self.map:
91 if file in self.map:
91 return self.entries[self.map[file]]
92 return self.entries[self.map[file]]
92 return None
93 return None
93
94
94 @active
95 @active
95 def replace(self, file, offset, data=None):
96 def replace(self, file, offset, data=None):
96 '''
97 '''
97 replace can only replace already committed entries
98 replace can only replace already committed entries
98 that are not pending in the queue
99 that are not pending in the queue
99 '''
100 '''
100
101
101 if file not in self.map:
102 if file not in self.map:
102 raise KeyError(file)
103 raise KeyError(file)
103 index = self.map[file]
104 index = self.map[file]
104 self.entries[index] = (file, offset, data)
105 self.entries[index] = (file, offset, data)
105 self.file.write("%s\0%d\n" % (file, offset))
106 self.file.write("%s\0%d\n" % (file, offset))
106 self.file.flush()
107 self.file.flush()
107
108
108 @active
109 @active
109 def nest(self):
110 def nest(self):
110 self.count += 1
111 self.count += 1
112 self.usages += 1
111 return self
113 return self
112
114
115 def release(self):
116 if self.count > 0:
117 self.usages -= 1
118 # of the transaction scopes are left without being closed, fail
119 if self.count > 0 and self.usages == 0:
120 self._abort()
121
113 def running(self):
122 def running(self):
114 return self.count > 0
123 return self.count > 0
115
124
116 @active
125 @active
117 def close(self):
126 def close(self):
118 '''commit the transaction'''
127 '''commit the transaction'''
119 self.count -= 1
128 self.count -= 1
120 if self.count != 0:
129 if self.count != 0:
121 return
130 return
122 self.file.close()
131 self.file.close()
123 self.entries = []
132 self.entries = []
124 if self.after:
133 if self.after:
125 self.after()
134 self.after()
126 if os.path.isfile(self.journal):
135 if os.path.isfile(self.journal):
127 os.unlink(self.journal)
136 os.unlink(self.journal)
128 self.journal = None
137 self.journal = None
129
138
130 @active
139 @active
131 def abort(self):
140 def abort(self):
132 '''abort the transaction (generally called on error, or when the
141 '''abort the transaction (generally called on error, or when the
133 transaction is not explicitly committed before going out of
142 transaction is not explicitly committed before going out of
134 scope)'''
143 scope)'''
135 self._abort()
144 self._abort()
136
145
137 def _abort(self):
146 def _abort(self):
138 self.count = 0
147 self.count = 0
148 self.usages = 0
139 self.file.close()
149 self.file.close()
140
150
141 try:
151 try:
142 if not self.entries:
152 if not self.entries:
143 if self.journal:
153 if self.journal:
144 os.unlink(self.journal)
154 os.unlink(self.journal)
145 return
155 return
146
156
147 self.report(_("transaction abort!\n"))
157 self.report(_("transaction abort!\n"))
148
158
149 try:
159 try:
150 _playback(self.journal, self.report, self.opener,
160 _playback(self.journal, self.report, self.opener,
151 self.entries, False)
161 self.entries, False)
152 self.report(_("rollback completed\n"))
162 self.report(_("rollback completed\n"))
153 except:
163 except:
154 self.report(_("rollback failed - please run hg recover\n"))
164 self.report(_("rollback failed - please run hg recover\n"))
155 finally:
165 finally:
156 self.journal = None
166 self.journal = None
157
167
158
168
159 def rollback(opener, file, report):
169 def rollback(opener, file, report):
160 entries = []
170 entries = []
161
171
162 for l in open(file).readlines():
172 for l in open(file).readlines():
163 f, o = l.split('\0')
173 f, o = l.split('\0')
164 entries.append((f, int(o), None))
174 entries.append((f, int(o), None))
165
175
166 _playback(file, report, opener, entries)
176 _playback(file, report, opener, entries)
General Comments 0
You need to be logged in to leave comments. Login now