##// END OF EJS Templates
localrepo: add desc parameter to transaction...
Steve Borho -
r10881:a685011e default
parent child Browse files
Show More
@@ -1,2818 +1,2818 b''
1 # mq.py - patch queues for mercurial
1 # mq.py - patch queues for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''manage a stack of patches
8 '''manage a stack of patches
9
9
10 This extension lets you work with a stack of patches in a Mercurial
10 This extension lets you work with a stack of patches in a Mercurial
11 repository. It manages two stacks of patches - all known patches, and
11 repository. It manages two stacks of patches - all known patches, and
12 applied patches (subset of known patches).
12 applied patches (subset of known patches).
13
13
14 Known patches are represented as patch files in the .hg/patches
14 Known patches are represented as patch files in the .hg/patches
15 directory. Applied patches are both patch files and changesets.
15 directory. Applied patches are both patch files and changesets.
16
16
17 Common tasks (use "hg help command" for more details)::
17 Common tasks (use "hg help command" for more details)::
18
18
19 create new patch qnew
19 create new patch qnew
20 import existing patch qimport
20 import existing patch qimport
21
21
22 print patch series qseries
22 print patch series qseries
23 print applied patches qapplied
23 print applied patches qapplied
24
24
25 add known patch to applied stack qpush
25 add known patch to applied stack qpush
26 remove patch from applied stack qpop
26 remove patch from applied stack qpop
27 refresh contents of top applied patch qrefresh
27 refresh contents of top applied patch qrefresh
28
28
29 By default, mq will automatically use git patches when required to
29 By default, mq will automatically use git patches when required to
30 avoid losing file mode changes, copy records, binary files or empty
30 avoid losing file mode changes, copy records, binary files or empty
31 files creations or deletions. This behaviour can be configured with::
31 files creations or deletions. This behaviour can be configured with::
32
32
33 [mq]
33 [mq]
34 git = auto/keep/yes/no
34 git = auto/keep/yes/no
35
35
36 If set to 'keep', mq will obey the [diff] section configuration while
36 If set to 'keep', mq will obey the [diff] section configuration while
37 preserving existing git patches upon qrefresh. If set to 'yes' or
37 preserving existing git patches upon qrefresh. If set to 'yes' or
38 'no', mq will override the [diff] section and always generate git or
38 'no', mq will override the [diff] section and always generate git or
39 regular patches, possibly losing data in the second case.
39 regular patches, possibly losing data in the second case.
40 '''
40 '''
41
41
42 from mercurial.i18n import _
42 from mercurial.i18n import _
43 from mercurial.node import bin, hex, short, nullid, nullrev
43 from mercurial.node import bin, hex, short, nullid, nullrev
44 from mercurial.lock import release
44 from mercurial.lock import release
45 from mercurial import commands, cmdutil, hg, patch, util
45 from mercurial import commands, cmdutil, hg, patch, util
46 from mercurial import repair, extensions, url, error
46 from mercurial import repair, extensions, url, error
47 import os, sys, re, errno
47 import os, sys, re, errno
48
48
49 commands.norepo += " qclone"
49 commands.norepo += " qclone"
50
50
51 # Patch names looks like unix-file names.
51 # Patch names looks like unix-file names.
52 # They must be joinable with queue directory and result in the patch path.
52 # They must be joinable with queue directory and result in the patch path.
53 normname = util.normpath
53 normname = util.normpath
54
54
55 class statusentry(object):
55 class statusentry(object):
56 def __init__(self, node, name):
56 def __init__(self, node, name):
57 self.node, self.name = node, name
57 self.node, self.name = node, name
58
58
59 def __str__(self):
59 def __str__(self):
60 return hex(self.node) + ':' + self.name
60 return hex(self.node) + ':' + self.name
61
61
62 class patchheader(object):
62 class patchheader(object):
63 def __init__(self, pf, plainmode=False):
63 def __init__(self, pf, plainmode=False):
64 def eatdiff(lines):
64 def eatdiff(lines):
65 while lines:
65 while lines:
66 l = lines[-1]
66 l = lines[-1]
67 if (l.startswith("diff -") or
67 if (l.startswith("diff -") or
68 l.startswith("Index:") or
68 l.startswith("Index:") or
69 l.startswith("===========")):
69 l.startswith("===========")):
70 del lines[-1]
70 del lines[-1]
71 else:
71 else:
72 break
72 break
73 def eatempty(lines):
73 def eatempty(lines):
74 while lines:
74 while lines:
75 if not lines[-1].strip():
75 if not lines[-1].strip():
76 del lines[-1]
76 del lines[-1]
77 else:
77 else:
78 break
78 break
79
79
80 message = []
80 message = []
81 comments = []
81 comments = []
82 user = None
82 user = None
83 date = None
83 date = None
84 parent = None
84 parent = None
85 format = None
85 format = None
86 subject = None
86 subject = None
87 diffstart = 0
87 diffstart = 0
88
88
89 for line in file(pf):
89 for line in file(pf):
90 line = line.rstrip()
90 line = line.rstrip()
91 if (line.startswith('diff --git')
91 if (line.startswith('diff --git')
92 or (diffstart and line.startswith('+++ '))):
92 or (diffstart and line.startswith('+++ '))):
93 diffstart = 2
93 diffstart = 2
94 break
94 break
95 diffstart = 0 # reset
95 diffstart = 0 # reset
96 if line.startswith("--- "):
96 if line.startswith("--- "):
97 diffstart = 1
97 diffstart = 1
98 continue
98 continue
99 elif format == "hgpatch":
99 elif format == "hgpatch":
100 # parse values when importing the result of an hg export
100 # parse values when importing the result of an hg export
101 if line.startswith("# User "):
101 if line.startswith("# User "):
102 user = line[7:]
102 user = line[7:]
103 elif line.startswith("# Date "):
103 elif line.startswith("# Date "):
104 date = line[7:]
104 date = line[7:]
105 elif line.startswith("# Parent "):
105 elif line.startswith("# Parent "):
106 parent = line[9:]
106 parent = line[9:]
107 elif not line.startswith("# ") and line:
107 elif not line.startswith("# ") and line:
108 message.append(line)
108 message.append(line)
109 format = None
109 format = None
110 elif line == '# HG changeset patch':
110 elif line == '# HG changeset patch':
111 message = []
111 message = []
112 format = "hgpatch"
112 format = "hgpatch"
113 elif (format != "tagdone" and (line.startswith("Subject: ") or
113 elif (format != "tagdone" and (line.startswith("Subject: ") or
114 line.startswith("subject: "))):
114 line.startswith("subject: "))):
115 subject = line[9:]
115 subject = line[9:]
116 format = "tag"
116 format = "tag"
117 elif (format != "tagdone" and (line.startswith("From: ") or
117 elif (format != "tagdone" and (line.startswith("From: ") or
118 line.startswith("from: "))):
118 line.startswith("from: "))):
119 user = line[6:]
119 user = line[6:]
120 format = "tag"
120 format = "tag"
121 elif (format != "tagdone" and (line.startswith("Date: ") or
121 elif (format != "tagdone" and (line.startswith("Date: ") or
122 line.startswith("date: "))):
122 line.startswith("date: "))):
123 date = line[6:]
123 date = line[6:]
124 format = "tag"
124 format = "tag"
125 elif format == "tag" and line == "":
125 elif format == "tag" and line == "":
126 # when looking for tags (subject: from: etc) they
126 # when looking for tags (subject: from: etc) they
127 # end once you find a blank line in the source
127 # end once you find a blank line in the source
128 format = "tagdone"
128 format = "tagdone"
129 elif message or line:
129 elif message or line:
130 message.append(line)
130 message.append(line)
131 comments.append(line)
131 comments.append(line)
132
132
133 eatdiff(message)
133 eatdiff(message)
134 eatdiff(comments)
134 eatdiff(comments)
135 eatempty(message)
135 eatempty(message)
136 eatempty(comments)
136 eatempty(comments)
137
137
138 # make sure message isn't empty
138 # make sure message isn't empty
139 if format and format.startswith("tag") and subject:
139 if format and format.startswith("tag") and subject:
140 message.insert(0, "")
140 message.insert(0, "")
141 message.insert(0, subject)
141 message.insert(0, subject)
142
142
143 self.message = message
143 self.message = message
144 self.comments = comments
144 self.comments = comments
145 self.user = user
145 self.user = user
146 self.date = date
146 self.date = date
147 self.parent = parent
147 self.parent = parent
148 self.haspatch = diffstart > 1
148 self.haspatch = diffstart > 1
149 self.plainmode = plainmode
149 self.plainmode = plainmode
150
150
151 def setuser(self, user):
151 def setuser(self, user):
152 if not self.updateheader(['From: ', '# User '], user):
152 if not self.updateheader(['From: ', '# User '], user):
153 try:
153 try:
154 patchheaderat = self.comments.index('# HG changeset patch')
154 patchheaderat = self.comments.index('# HG changeset patch')
155 self.comments.insert(patchheaderat + 1, '# User ' + user)
155 self.comments.insert(patchheaderat + 1, '# User ' + user)
156 except ValueError:
156 except ValueError:
157 if self.plainmode or self._hasheader(['Date: ']):
157 if self.plainmode or self._hasheader(['Date: ']):
158 self.comments = ['From: ' + user] + self.comments
158 self.comments = ['From: ' + user] + self.comments
159 else:
159 else:
160 tmp = ['# HG changeset patch', '# User ' + user, '']
160 tmp = ['# HG changeset patch', '# User ' + user, '']
161 self.comments = tmp + self.comments
161 self.comments = tmp + self.comments
162 self.user = user
162 self.user = user
163
163
164 def setdate(self, date):
164 def setdate(self, date):
165 if not self.updateheader(['Date: ', '# Date '], date):
165 if not self.updateheader(['Date: ', '# Date '], date):
166 try:
166 try:
167 patchheaderat = self.comments.index('# HG changeset patch')
167 patchheaderat = self.comments.index('# HG changeset patch')
168 self.comments.insert(patchheaderat + 1, '# Date ' + date)
168 self.comments.insert(patchheaderat + 1, '# Date ' + date)
169 except ValueError:
169 except ValueError:
170 if self.plainmode or self._hasheader(['From: ']):
170 if self.plainmode or self._hasheader(['From: ']):
171 self.comments = ['Date: ' + date] + self.comments
171 self.comments = ['Date: ' + date] + self.comments
172 else:
172 else:
173 tmp = ['# HG changeset patch', '# Date ' + date, '']
173 tmp = ['# HG changeset patch', '# Date ' + date, '']
174 self.comments = tmp + self.comments
174 self.comments = tmp + self.comments
175 self.date = date
175 self.date = date
176
176
177 def setparent(self, parent):
177 def setparent(self, parent):
178 if not self.updateheader(['# Parent '], parent):
178 if not self.updateheader(['# Parent '], parent):
179 try:
179 try:
180 patchheaderat = self.comments.index('# HG changeset patch')
180 patchheaderat = self.comments.index('# HG changeset patch')
181 self.comments.insert(patchheaderat + 1, '# Parent ' + parent)
181 self.comments.insert(patchheaderat + 1, '# Parent ' + parent)
182 except ValueError:
182 except ValueError:
183 pass
183 pass
184 self.parent = parent
184 self.parent = parent
185
185
186 def setmessage(self, message):
186 def setmessage(self, message):
187 if self.comments:
187 if self.comments:
188 self._delmsg()
188 self._delmsg()
189 self.message = [message]
189 self.message = [message]
190 self.comments += self.message
190 self.comments += self.message
191
191
192 def updateheader(self, prefixes, new):
192 def updateheader(self, prefixes, new):
193 '''Update all references to a field in the patch header.
193 '''Update all references to a field in the patch header.
194 Return whether the field is present.'''
194 Return whether the field is present.'''
195 res = False
195 res = False
196 for prefix in prefixes:
196 for prefix in prefixes:
197 for i in xrange(len(self.comments)):
197 for i in xrange(len(self.comments)):
198 if self.comments[i].startswith(prefix):
198 if self.comments[i].startswith(prefix):
199 self.comments[i] = prefix + new
199 self.comments[i] = prefix + new
200 res = True
200 res = True
201 break
201 break
202 return res
202 return res
203
203
204 def _hasheader(self, prefixes):
204 def _hasheader(self, prefixes):
205 '''Check if a header starts with any of the given prefixes.'''
205 '''Check if a header starts with any of the given prefixes.'''
206 for prefix in prefixes:
206 for prefix in prefixes:
207 for comment in self.comments:
207 for comment in self.comments:
208 if comment.startswith(prefix):
208 if comment.startswith(prefix):
209 return True
209 return True
210 return False
210 return False
211
211
212 def __str__(self):
212 def __str__(self):
213 if not self.comments:
213 if not self.comments:
214 return ''
214 return ''
215 return '\n'.join(self.comments) + '\n\n'
215 return '\n'.join(self.comments) + '\n\n'
216
216
217 def _delmsg(self):
217 def _delmsg(self):
218 '''Remove existing message, keeping the rest of the comments fields.
218 '''Remove existing message, keeping the rest of the comments fields.
219 If comments contains 'subject: ', message will prepend
219 If comments contains 'subject: ', message will prepend
220 the field and a blank line.'''
220 the field and a blank line.'''
221 if self.message:
221 if self.message:
222 subj = 'subject: ' + self.message[0].lower()
222 subj = 'subject: ' + self.message[0].lower()
223 for i in xrange(len(self.comments)):
223 for i in xrange(len(self.comments)):
224 if subj == self.comments[i].lower():
224 if subj == self.comments[i].lower():
225 del self.comments[i]
225 del self.comments[i]
226 self.message = self.message[2:]
226 self.message = self.message[2:]
227 break
227 break
228 ci = 0
228 ci = 0
229 for mi in self.message:
229 for mi in self.message:
230 while mi != self.comments[ci]:
230 while mi != self.comments[ci]:
231 ci += 1
231 ci += 1
232 del self.comments[ci]
232 del self.comments[ci]
233
233
234 class queue(object):
234 class queue(object):
235 def __init__(self, ui, path, patchdir=None):
235 def __init__(self, ui, path, patchdir=None):
236 self.basepath = path
236 self.basepath = path
237 self.path = patchdir or os.path.join(path, "patches")
237 self.path = patchdir or os.path.join(path, "patches")
238 self.opener = util.opener(self.path)
238 self.opener = util.opener(self.path)
239 self.ui = ui
239 self.ui = ui
240 self.applied_dirty = 0
240 self.applied_dirty = 0
241 self.series_dirty = 0
241 self.series_dirty = 0
242 self.series_path = "series"
242 self.series_path = "series"
243 self.status_path = "status"
243 self.status_path = "status"
244 self.guards_path = "guards"
244 self.guards_path = "guards"
245 self.active_guards = None
245 self.active_guards = None
246 self.guards_dirty = False
246 self.guards_dirty = False
247 # Handle mq.git as a bool with extended values
247 # Handle mq.git as a bool with extended values
248 try:
248 try:
249 gitmode = ui.configbool('mq', 'git', None)
249 gitmode = ui.configbool('mq', 'git', None)
250 if gitmode is None:
250 if gitmode is None:
251 raise error.ConfigError()
251 raise error.ConfigError()
252 self.gitmode = gitmode and 'yes' or 'no'
252 self.gitmode = gitmode and 'yes' or 'no'
253 except error.ConfigError:
253 except error.ConfigError:
254 self.gitmode = ui.config('mq', 'git', 'auto').lower()
254 self.gitmode = ui.config('mq', 'git', 'auto').lower()
255 self.plainmode = ui.configbool('mq', 'plain', False)
255 self.plainmode = ui.configbool('mq', 'plain', False)
256
256
257 @util.propertycache
257 @util.propertycache
258 def applied(self):
258 def applied(self):
259 if os.path.exists(self.join(self.status_path)):
259 if os.path.exists(self.join(self.status_path)):
260 def parse(l):
260 def parse(l):
261 n, name = l.split(':', 1)
261 n, name = l.split(':', 1)
262 return statusentry(bin(n), name)
262 return statusentry(bin(n), name)
263 lines = self.opener(self.status_path).read().splitlines()
263 lines = self.opener(self.status_path).read().splitlines()
264 return [parse(l) for l in lines]
264 return [parse(l) for l in lines]
265 return []
265 return []
266
266
267 @util.propertycache
267 @util.propertycache
268 def full_series(self):
268 def full_series(self):
269 if os.path.exists(self.join(self.series_path)):
269 if os.path.exists(self.join(self.series_path)):
270 return self.opener(self.series_path).read().splitlines()
270 return self.opener(self.series_path).read().splitlines()
271 return []
271 return []
272
272
273 @util.propertycache
273 @util.propertycache
274 def series(self):
274 def series(self):
275 self.parse_series()
275 self.parse_series()
276 return self.series
276 return self.series
277
277
278 @util.propertycache
278 @util.propertycache
279 def series_guards(self):
279 def series_guards(self):
280 self.parse_series()
280 self.parse_series()
281 return self.series_guards
281 return self.series_guards
282
282
283 def invalidate(self):
283 def invalidate(self):
284 for a in 'applied full_series series series_guards'.split():
284 for a in 'applied full_series series series_guards'.split():
285 if a in self.__dict__:
285 if a in self.__dict__:
286 delattr(self, a)
286 delattr(self, a)
287 self.applied_dirty = 0
287 self.applied_dirty = 0
288 self.series_dirty = 0
288 self.series_dirty = 0
289 self.guards_dirty = False
289 self.guards_dirty = False
290 self.active_guards = None
290 self.active_guards = None
291
291
292 def diffopts(self, opts={}, patchfn=None):
292 def diffopts(self, opts={}, patchfn=None):
293 diffopts = patch.diffopts(self.ui, opts)
293 diffopts = patch.diffopts(self.ui, opts)
294 if self.gitmode == 'auto':
294 if self.gitmode == 'auto':
295 diffopts.upgrade = True
295 diffopts.upgrade = True
296 elif self.gitmode == 'keep':
296 elif self.gitmode == 'keep':
297 pass
297 pass
298 elif self.gitmode in ('yes', 'no'):
298 elif self.gitmode in ('yes', 'no'):
299 diffopts.git = self.gitmode == 'yes'
299 diffopts.git = self.gitmode == 'yes'
300 else:
300 else:
301 raise util.Abort(_('mq.git option can be auto/keep/yes/no'
301 raise util.Abort(_('mq.git option can be auto/keep/yes/no'
302 ' got %s') % self.gitmode)
302 ' got %s') % self.gitmode)
303 if patchfn:
303 if patchfn:
304 diffopts = self.patchopts(diffopts, patchfn)
304 diffopts = self.patchopts(diffopts, patchfn)
305 return diffopts
305 return diffopts
306
306
307 def patchopts(self, diffopts, *patches):
307 def patchopts(self, diffopts, *patches):
308 """Return a copy of input diff options with git set to true if
308 """Return a copy of input diff options with git set to true if
309 referenced patch is a git patch and should be preserved as such.
309 referenced patch is a git patch and should be preserved as such.
310 """
310 """
311 diffopts = diffopts.copy()
311 diffopts = diffopts.copy()
312 if not diffopts.git and self.gitmode == 'keep':
312 if not diffopts.git and self.gitmode == 'keep':
313 for patchfn in patches:
313 for patchfn in patches:
314 patchf = self.opener(patchfn, 'r')
314 patchf = self.opener(patchfn, 'r')
315 # if the patch was a git patch, refresh it as a git patch
315 # if the patch was a git patch, refresh it as a git patch
316 for line in patchf:
316 for line in patchf:
317 if line.startswith('diff --git'):
317 if line.startswith('diff --git'):
318 diffopts.git = True
318 diffopts.git = True
319 break
319 break
320 patchf.close()
320 patchf.close()
321 return diffopts
321 return diffopts
322
322
323 def join(self, *p):
323 def join(self, *p):
324 return os.path.join(self.path, *p)
324 return os.path.join(self.path, *p)
325
325
326 def find_series(self, patch):
326 def find_series(self, patch):
327 def matchpatch(l):
327 def matchpatch(l):
328 l = l.split('#', 1)[0]
328 l = l.split('#', 1)[0]
329 return l.strip() == patch
329 return l.strip() == patch
330 for index, l in enumerate(self.full_series):
330 for index, l in enumerate(self.full_series):
331 if matchpatch(l):
331 if matchpatch(l):
332 return index
332 return index
333 return None
333 return None
334
334
335 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
335 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
336
336
337 def parse_series(self):
337 def parse_series(self):
338 self.series = []
338 self.series = []
339 self.series_guards = []
339 self.series_guards = []
340 for l in self.full_series:
340 for l in self.full_series:
341 h = l.find('#')
341 h = l.find('#')
342 if h == -1:
342 if h == -1:
343 patch = l
343 patch = l
344 comment = ''
344 comment = ''
345 elif h == 0:
345 elif h == 0:
346 continue
346 continue
347 else:
347 else:
348 patch = l[:h]
348 patch = l[:h]
349 comment = l[h:]
349 comment = l[h:]
350 patch = patch.strip()
350 patch = patch.strip()
351 if patch:
351 if patch:
352 if patch in self.series:
352 if patch in self.series:
353 raise util.Abort(_('%s appears more than once in %s') %
353 raise util.Abort(_('%s appears more than once in %s') %
354 (patch, self.join(self.series_path)))
354 (patch, self.join(self.series_path)))
355 self.series.append(patch)
355 self.series.append(patch)
356 self.series_guards.append(self.guard_re.findall(comment))
356 self.series_guards.append(self.guard_re.findall(comment))
357
357
358 def check_guard(self, guard):
358 def check_guard(self, guard):
359 if not guard:
359 if not guard:
360 return _('guard cannot be an empty string')
360 return _('guard cannot be an empty string')
361 bad_chars = '# \t\r\n\f'
361 bad_chars = '# \t\r\n\f'
362 first = guard[0]
362 first = guard[0]
363 if first in '-+':
363 if first in '-+':
364 return (_('guard %r starts with invalid character: %r') %
364 return (_('guard %r starts with invalid character: %r') %
365 (guard, first))
365 (guard, first))
366 for c in bad_chars:
366 for c in bad_chars:
367 if c in guard:
367 if c in guard:
368 return _('invalid character in guard %r: %r') % (guard, c)
368 return _('invalid character in guard %r: %r') % (guard, c)
369
369
370 def set_active(self, guards):
370 def set_active(self, guards):
371 for guard in guards:
371 for guard in guards:
372 bad = self.check_guard(guard)
372 bad = self.check_guard(guard)
373 if bad:
373 if bad:
374 raise util.Abort(bad)
374 raise util.Abort(bad)
375 guards = sorted(set(guards))
375 guards = sorted(set(guards))
376 self.ui.debug('active guards: %s\n' % ' '.join(guards))
376 self.ui.debug('active guards: %s\n' % ' '.join(guards))
377 self.active_guards = guards
377 self.active_guards = guards
378 self.guards_dirty = True
378 self.guards_dirty = True
379
379
380 def active(self):
380 def active(self):
381 if self.active_guards is None:
381 if self.active_guards is None:
382 self.active_guards = []
382 self.active_guards = []
383 try:
383 try:
384 guards = self.opener(self.guards_path).read().split()
384 guards = self.opener(self.guards_path).read().split()
385 except IOError, err:
385 except IOError, err:
386 if err.errno != errno.ENOENT:
386 if err.errno != errno.ENOENT:
387 raise
387 raise
388 guards = []
388 guards = []
389 for i, guard in enumerate(guards):
389 for i, guard in enumerate(guards):
390 bad = self.check_guard(guard)
390 bad = self.check_guard(guard)
391 if bad:
391 if bad:
392 self.ui.warn('%s:%d: %s\n' %
392 self.ui.warn('%s:%d: %s\n' %
393 (self.join(self.guards_path), i + 1, bad))
393 (self.join(self.guards_path), i + 1, bad))
394 else:
394 else:
395 self.active_guards.append(guard)
395 self.active_guards.append(guard)
396 return self.active_guards
396 return self.active_guards
397
397
398 def set_guards(self, idx, guards):
398 def set_guards(self, idx, guards):
399 for g in guards:
399 for g in guards:
400 if len(g) < 2:
400 if len(g) < 2:
401 raise util.Abort(_('guard %r too short') % g)
401 raise util.Abort(_('guard %r too short') % g)
402 if g[0] not in '-+':
402 if g[0] not in '-+':
403 raise util.Abort(_('guard %r starts with invalid char') % g)
403 raise util.Abort(_('guard %r starts with invalid char') % g)
404 bad = self.check_guard(g[1:])
404 bad = self.check_guard(g[1:])
405 if bad:
405 if bad:
406 raise util.Abort(bad)
406 raise util.Abort(bad)
407 drop = self.guard_re.sub('', self.full_series[idx])
407 drop = self.guard_re.sub('', self.full_series[idx])
408 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
408 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
409 self.parse_series()
409 self.parse_series()
410 self.series_dirty = True
410 self.series_dirty = True
411
411
412 def pushable(self, idx):
412 def pushable(self, idx):
413 if isinstance(idx, str):
413 if isinstance(idx, str):
414 idx = self.series.index(idx)
414 idx = self.series.index(idx)
415 patchguards = self.series_guards[idx]
415 patchguards = self.series_guards[idx]
416 if not patchguards:
416 if not patchguards:
417 return True, None
417 return True, None
418 guards = self.active()
418 guards = self.active()
419 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
419 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
420 if exactneg:
420 if exactneg:
421 return False, exactneg[0]
421 return False, exactneg[0]
422 pos = [g for g in patchguards if g[0] == '+']
422 pos = [g for g in patchguards if g[0] == '+']
423 exactpos = [g for g in pos if g[1:] in guards]
423 exactpos = [g for g in pos if g[1:] in guards]
424 if pos:
424 if pos:
425 if exactpos:
425 if exactpos:
426 return True, exactpos[0]
426 return True, exactpos[0]
427 return False, pos
427 return False, pos
428 return True, ''
428 return True, ''
429
429
430 def explain_pushable(self, idx, all_patches=False):
430 def explain_pushable(self, idx, all_patches=False):
431 write = all_patches and self.ui.write or self.ui.warn
431 write = all_patches and self.ui.write or self.ui.warn
432 if all_patches or self.ui.verbose:
432 if all_patches or self.ui.verbose:
433 if isinstance(idx, str):
433 if isinstance(idx, str):
434 idx = self.series.index(idx)
434 idx = self.series.index(idx)
435 pushable, why = self.pushable(idx)
435 pushable, why = self.pushable(idx)
436 if all_patches and pushable:
436 if all_patches and pushable:
437 if why is None:
437 if why is None:
438 write(_('allowing %s - no guards in effect\n') %
438 write(_('allowing %s - no guards in effect\n') %
439 self.series[idx])
439 self.series[idx])
440 else:
440 else:
441 if not why:
441 if not why:
442 write(_('allowing %s - no matching negative guards\n') %
442 write(_('allowing %s - no matching negative guards\n') %
443 self.series[idx])
443 self.series[idx])
444 else:
444 else:
445 write(_('allowing %s - guarded by %r\n') %
445 write(_('allowing %s - guarded by %r\n') %
446 (self.series[idx], why))
446 (self.series[idx], why))
447 if not pushable:
447 if not pushable:
448 if why:
448 if why:
449 write(_('skipping %s - guarded by %r\n') %
449 write(_('skipping %s - guarded by %r\n') %
450 (self.series[idx], why))
450 (self.series[idx], why))
451 else:
451 else:
452 write(_('skipping %s - no matching guards\n') %
452 write(_('skipping %s - no matching guards\n') %
453 self.series[idx])
453 self.series[idx])
454
454
455 def save_dirty(self):
455 def save_dirty(self):
456 def write_list(items, path):
456 def write_list(items, path):
457 fp = self.opener(path, 'w')
457 fp = self.opener(path, 'w')
458 for i in items:
458 for i in items:
459 fp.write("%s\n" % i)
459 fp.write("%s\n" % i)
460 fp.close()
460 fp.close()
461 if self.applied_dirty:
461 if self.applied_dirty:
462 write_list(map(str, self.applied), self.status_path)
462 write_list(map(str, self.applied), self.status_path)
463 if self.series_dirty:
463 if self.series_dirty:
464 write_list(self.full_series, self.series_path)
464 write_list(self.full_series, self.series_path)
465 if self.guards_dirty:
465 if self.guards_dirty:
466 write_list(self.active_guards, self.guards_path)
466 write_list(self.active_guards, self.guards_path)
467
467
468 def removeundo(self, repo):
468 def removeundo(self, repo):
469 undo = repo.sjoin('undo')
469 undo = repo.sjoin('undo')
470 if not os.path.exists(undo):
470 if not os.path.exists(undo):
471 return
471 return
472 try:
472 try:
473 os.unlink(undo)
473 os.unlink(undo)
474 except OSError, inst:
474 except OSError, inst:
475 self.ui.warn(_('error removing undo: %s\n') % str(inst))
475 self.ui.warn(_('error removing undo: %s\n') % str(inst))
476
476
477 def printdiff(self, repo, diffopts, node1, node2=None, files=None,
477 def printdiff(self, repo, diffopts, node1, node2=None, files=None,
478 fp=None, changes=None, opts={}):
478 fp=None, changes=None, opts={}):
479 stat = opts.get('stat')
479 stat = opts.get('stat')
480 if stat:
480 if stat:
481 opts['unified'] = '0'
481 opts['unified'] = '0'
482
482
483 m = cmdutil.match(repo, files, opts)
483 m = cmdutil.match(repo, files, opts)
484 if fp is None:
484 if fp is None:
485 write = repo.ui.write
485 write = repo.ui.write
486 else:
486 else:
487 def write(s, **kw):
487 def write(s, **kw):
488 fp.write(s)
488 fp.write(s)
489 if stat:
489 if stat:
490 width = self.ui.interactive() and util.termwidth() or 80
490 width = self.ui.interactive() and util.termwidth() or 80
491 chunks = patch.diff(repo, node1, node2, m, changes, diffopts)
491 chunks = patch.diff(repo, node1, node2, m, changes, diffopts)
492 for chunk, label in patch.diffstatui(util.iterlines(chunks),
492 for chunk, label in patch.diffstatui(util.iterlines(chunks),
493 width=width,
493 width=width,
494 git=diffopts.git):
494 git=diffopts.git):
495 write(chunk, label=label)
495 write(chunk, label=label)
496 else:
496 else:
497 for chunk, label in patch.diffui(repo, node1, node2, m, changes,
497 for chunk, label in patch.diffui(repo, node1, node2, m, changes,
498 diffopts):
498 diffopts):
499 write(chunk, label=label)
499 write(chunk, label=label)
500
500
501 def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
501 def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
502 # first try just applying the patch
502 # first try just applying the patch
503 (err, n) = self.apply(repo, [patch], update_status=False,
503 (err, n) = self.apply(repo, [patch], update_status=False,
504 strict=True, merge=rev)
504 strict=True, merge=rev)
505
505
506 if err == 0:
506 if err == 0:
507 return (err, n)
507 return (err, n)
508
508
509 if n is None:
509 if n is None:
510 raise util.Abort(_("apply failed for patch %s") % patch)
510 raise util.Abort(_("apply failed for patch %s") % patch)
511
511
512 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
512 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
513
513
514 # apply failed, strip away that rev and merge.
514 # apply failed, strip away that rev and merge.
515 hg.clean(repo, head)
515 hg.clean(repo, head)
516 self.strip(repo, n, update=False, backup='strip')
516 self.strip(repo, n, update=False, backup='strip')
517
517
518 ctx = repo[rev]
518 ctx = repo[rev]
519 ret = hg.merge(repo, rev)
519 ret = hg.merge(repo, rev)
520 if ret:
520 if ret:
521 raise util.Abort(_("update returned %d") % ret)
521 raise util.Abort(_("update returned %d") % ret)
522 n = repo.commit(ctx.description(), ctx.user(), force=True)
522 n = repo.commit(ctx.description(), ctx.user(), force=True)
523 if n is None:
523 if n is None:
524 raise util.Abort(_("repo commit failed"))
524 raise util.Abort(_("repo commit failed"))
525 try:
525 try:
526 ph = patchheader(mergeq.join(patch), self.plainmode)
526 ph = patchheader(mergeq.join(patch), self.plainmode)
527 except:
527 except:
528 raise util.Abort(_("unable to read %s") % patch)
528 raise util.Abort(_("unable to read %s") % patch)
529
529
530 diffopts = self.patchopts(diffopts, patch)
530 diffopts = self.patchopts(diffopts, patch)
531 patchf = self.opener(patch, "w")
531 patchf = self.opener(patch, "w")
532 comments = str(ph)
532 comments = str(ph)
533 if comments:
533 if comments:
534 patchf.write(comments)
534 patchf.write(comments)
535 self.printdiff(repo, diffopts, head, n, fp=patchf)
535 self.printdiff(repo, diffopts, head, n, fp=patchf)
536 patchf.close()
536 patchf.close()
537 self.removeundo(repo)
537 self.removeundo(repo)
538 return (0, n)
538 return (0, n)
539
539
540 def qparents(self, repo, rev=None):
540 def qparents(self, repo, rev=None):
541 if rev is None:
541 if rev is None:
542 (p1, p2) = repo.dirstate.parents()
542 (p1, p2) = repo.dirstate.parents()
543 if p2 == nullid:
543 if p2 == nullid:
544 return p1
544 return p1
545 if not self.applied:
545 if not self.applied:
546 return None
546 return None
547 return self.applied[-1].node
547 return self.applied[-1].node
548 p1, p2 = repo.changelog.parents(rev)
548 p1, p2 = repo.changelog.parents(rev)
549 if p2 != nullid and p2 in [x.node for x in self.applied]:
549 if p2 != nullid and p2 in [x.node for x in self.applied]:
550 return p2
550 return p2
551 return p1
551 return p1
552
552
553 def mergepatch(self, repo, mergeq, series, diffopts):
553 def mergepatch(self, repo, mergeq, series, diffopts):
554 if not self.applied:
554 if not self.applied:
555 # each of the patches merged in will have two parents. This
555 # each of the patches merged in will have two parents. This
556 # can confuse the qrefresh, qdiff, and strip code because it
556 # can confuse the qrefresh, qdiff, and strip code because it
557 # needs to know which parent is actually in the patch queue.
557 # needs to know which parent is actually in the patch queue.
558 # so, we insert a merge marker with only one parent. This way
558 # so, we insert a merge marker with only one parent. This way
559 # the first patch in the queue is never a merge patch
559 # the first patch in the queue is never a merge patch
560 #
560 #
561 pname = ".hg.patches.merge.marker"
561 pname = ".hg.patches.merge.marker"
562 n = repo.commit('[mq]: merge marker', force=True)
562 n = repo.commit('[mq]: merge marker', force=True)
563 self.removeundo(repo)
563 self.removeundo(repo)
564 self.applied.append(statusentry(n, pname))
564 self.applied.append(statusentry(n, pname))
565 self.applied_dirty = 1
565 self.applied_dirty = 1
566
566
567 head = self.qparents(repo)
567 head = self.qparents(repo)
568
568
569 for patch in series:
569 for patch in series:
570 patch = mergeq.lookup(patch, strict=True)
570 patch = mergeq.lookup(patch, strict=True)
571 if not patch:
571 if not patch:
572 self.ui.warn(_("patch %s does not exist\n") % patch)
572 self.ui.warn(_("patch %s does not exist\n") % patch)
573 return (1, None)
573 return (1, None)
574 pushable, reason = self.pushable(patch)
574 pushable, reason = self.pushable(patch)
575 if not pushable:
575 if not pushable:
576 self.explain_pushable(patch, all_patches=True)
576 self.explain_pushable(patch, all_patches=True)
577 continue
577 continue
578 info = mergeq.isapplied(patch)
578 info = mergeq.isapplied(patch)
579 if not info:
579 if not info:
580 self.ui.warn(_("patch %s is not applied\n") % patch)
580 self.ui.warn(_("patch %s is not applied\n") % patch)
581 return (1, None)
581 return (1, None)
582 rev = info[1]
582 rev = info[1]
583 err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
583 err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
584 if head:
584 if head:
585 self.applied.append(statusentry(head, patch))
585 self.applied.append(statusentry(head, patch))
586 self.applied_dirty = 1
586 self.applied_dirty = 1
587 if err:
587 if err:
588 return (err, head)
588 return (err, head)
589 self.save_dirty()
589 self.save_dirty()
590 return (0, head)
590 return (0, head)
591
591
592 def patch(self, repo, patchfile):
592 def patch(self, repo, patchfile):
593 '''Apply patchfile to the working directory.
593 '''Apply patchfile to the working directory.
594 patchfile: name of patch file'''
594 patchfile: name of patch file'''
595 files = {}
595 files = {}
596 try:
596 try:
597 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
597 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
598 files=files, eolmode=None)
598 files=files, eolmode=None)
599 except Exception, inst:
599 except Exception, inst:
600 self.ui.note(str(inst) + '\n')
600 self.ui.note(str(inst) + '\n')
601 if not self.ui.verbose:
601 if not self.ui.verbose:
602 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
602 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
603 return (False, files, False)
603 return (False, files, False)
604
604
605 return (True, files, fuzz)
605 return (True, files, fuzz)
606
606
607 def apply(self, repo, series, list=False, update_status=True,
607 def apply(self, repo, series, list=False, update_status=True,
608 strict=False, patchdir=None, merge=None, all_files=None):
608 strict=False, patchdir=None, merge=None, all_files=None):
609 wlock = lock = tr = None
609 wlock = lock = tr = None
610 try:
610 try:
611 wlock = repo.wlock()
611 wlock = repo.wlock()
612 lock = repo.lock()
612 lock = repo.lock()
613 tr = repo.transaction()
613 tr = repo.transaction("qpush")
614 try:
614 try:
615 ret = self._apply(repo, series, list, update_status,
615 ret = self._apply(repo, series, list, update_status,
616 strict, patchdir, merge, all_files=all_files)
616 strict, patchdir, merge, all_files=all_files)
617 tr.close()
617 tr.close()
618 self.save_dirty()
618 self.save_dirty()
619 return ret
619 return ret
620 except:
620 except:
621 try:
621 try:
622 tr.abort()
622 tr.abort()
623 finally:
623 finally:
624 repo.invalidate()
624 repo.invalidate()
625 repo.dirstate.invalidate()
625 repo.dirstate.invalidate()
626 raise
626 raise
627 finally:
627 finally:
628 del tr
628 del tr
629 release(lock, wlock)
629 release(lock, wlock)
630 self.removeundo(repo)
630 self.removeundo(repo)
631
631
632 def _apply(self, repo, series, list=False, update_status=True,
632 def _apply(self, repo, series, list=False, update_status=True,
633 strict=False, patchdir=None, merge=None, all_files=None):
633 strict=False, patchdir=None, merge=None, all_files=None):
634 '''returns (error, hash)
634 '''returns (error, hash)
635 error = 1 for unable to read, 2 for patch failed, 3 for patch fuzz'''
635 error = 1 for unable to read, 2 for patch failed, 3 for patch fuzz'''
636 # TODO unify with commands.py
636 # TODO unify with commands.py
637 if not patchdir:
637 if not patchdir:
638 patchdir = self.path
638 patchdir = self.path
639 err = 0
639 err = 0
640 n = None
640 n = None
641 for patchname in series:
641 for patchname in series:
642 pushable, reason = self.pushable(patchname)
642 pushable, reason = self.pushable(patchname)
643 if not pushable:
643 if not pushable:
644 self.explain_pushable(patchname, all_patches=True)
644 self.explain_pushable(patchname, all_patches=True)
645 continue
645 continue
646 self.ui.status(_("applying %s\n") % patchname)
646 self.ui.status(_("applying %s\n") % patchname)
647 pf = os.path.join(patchdir, patchname)
647 pf = os.path.join(patchdir, patchname)
648
648
649 try:
649 try:
650 ph = patchheader(self.join(patchname), self.plainmode)
650 ph = patchheader(self.join(patchname), self.plainmode)
651 except:
651 except:
652 self.ui.warn(_("unable to read %s\n") % patchname)
652 self.ui.warn(_("unable to read %s\n") % patchname)
653 err = 1
653 err = 1
654 break
654 break
655
655
656 message = ph.message
656 message = ph.message
657 if not message:
657 if not message:
658 message = "imported patch %s\n" % patchname
658 message = "imported patch %s\n" % patchname
659 else:
659 else:
660 if list:
660 if list:
661 message.append("\nimported patch %s" % patchname)
661 message.append("\nimported patch %s" % patchname)
662 message = '\n'.join(message)
662 message = '\n'.join(message)
663
663
664 if ph.haspatch:
664 if ph.haspatch:
665 (patcherr, files, fuzz) = self.patch(repo, pf)
665 (patcherr, files, fuzz) = self.patch(repo, pf)
666 if all_files is not None:
666 if all_files is not None:
667 all_files.update(files)
667 all_files.update(files)
668 patcherr = not patcherr
668 patcherr = not patcherr
669 else:
669 else:
670 self.ui.warn(_("patch %s is empty\n") % patchname)
670 self.ui.warn(_("patch %s is empty\n") % patchname)
671 patcherr, files, fuzz = 0, [], 0
671 patcherr, files, fuzz = 0, [], 0
672
672
673 if merge and files:
673 if merge and files:
674 # Mark as removed/merged and update dirstate parent info
674 # Mark as removed/merged and update dirstate parent info
675 removed = []
675 removed = []
676 merged = []
676 merged = []
677 for f in files:
677 for f in files:
678 if os.path.exists(repo.wjoin(f)):
678 if os.path.exists(repo.wjoin(f)):
679 merged.append(f)
679 merged.append(f)
680 else:
680 else:
681 removed.append(f)
681 removed.append(f)
682 for f in removed:
682 for f in removed:
683 repo.dirstate.remove(f)
683 repo.dirstate.remove(f)
684 for f in merged:
684 for f in merged:
685 repo.dirstate.merge(f)
685 repo.dirstate.merge(f)
686 p1, p2 = repo.dirstate.parents()
686 p1, p2 = repo.dirstate.parents()
687 repo.dirstate.setparents(p1, merge)
687 repo.dirstate.setparents(p1, merge)
688
688
689 files = patch.updatedir(self.ui, repo, files)
689 files = patch.updatedir(self.ui, repo, files)
690 match = cmdutil.matchfiles(repo, files or [])
690 match = cmdutil.matchfiles(repo, files or [])
691 n = repo.commit(message, ph.user, ph.date, match=match, force=True)
691 n = repo.commit(message, ph.user, ph.date, match=match, force=True)
692
692
693 if n is None:
693 if n is None:
694 raise util.Abort(_("repo commit failed"))
694 raise util.Abort(_("repo commit failed"))
695
695
696 if update_status:
696 if update_status:
697 self.applied.append(statusentry(n, patchname))
697 self.applied.append(statusentry(n, patchname))
698
698
699 if patcherr:
699 if patcherr:
700 self.ui.warn(_("patch failed, rejects left in working dir\n"))
700 self.ui.warn(_("patch failed, rejects left in working dir\n"))
701 err = 2
701 err = 2
702 break
702 break
703
703
704 if fuzz and strict:
704 if fuzz and strict:
705 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
705 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
706 err = 3
706 err = 3
707 break
707 break
708 return (err, n)
708 return (err, n)
709
709
710 def _cleanup(self, patches, numrevs, keep=False):
710 def _cleanup(self, patches, numrevs, keep=False):
711 if not keep:
711 if not keep:
712 r = self.qrepo()
712 r = self.qrepo()
713 if r:
713 if r:
714 r.remove(patches, True)
714 r.remove(patches, True)
715 else:
715 else:
716 for p in patches:
716 for p in patches:
717 os.unlink(self.join(p))
717 os.unlink(self.join(p))
718
718
719 if numrevs:
719 if numrevs:
720 del self.applied[:numrevs]
720 del self.applied[:numrevs]
721 self.applied_dirty = 1
721 self.applied_dirty = 1
722
722
723 for i in sorted([self.find_series(p) for p in patches], reverse=True):
723 for i in sorted([self.find_series(p) for p in patches], reverse=True):
724 del self.full_series[i]
724 del self.full_series[i]
725 self.parse_series()
725 self.parse_series()
726 self.series_dirty = 1
726 self.series_dirty = 1
727
727
728 def _revpatches(self, repo, revs):
728 def _revpatches(self, repo, revs):
729 firstrev = repo[self.applied[0].node].rev()
729 firstrev = repo[self.applied[0].node].rev()
730 patches = []
730 patches = []
731 for i, rev in enumerate(revs):
731 for i, rev in enumerate(revs):
732
732
733 if rev < firstrev:
733 if rev < firstrev:
734 raise util.Abort(_('revision %d is not managed') % rev)
734 raise util.Abort(_('revision %d is not managed') % rev)
735
735
736 ctx = repo[rev]
736 ctx = repo[rev]
737 base = self.applied[i].node
737 base = self.applied[i].node
738 if ctx.node() != base:
738 if ctx.node() != base:
739 msg = _('cannot delete revision %d above applied patches')
739 msg = _('cannot delete revision %d above applied patches')
740 raise util.Abort(msg % rev)
740 raise util.Abort(msg % rev)
741
741
742 patch = self.applied[i].name
742 patch = self.applied[i].name
743 for fmt in ('[mq]: %s', 'imported patch %s'):
743 for fmt in ('[mq]: %s', 'imported patch %s'):
744 if ctx.description() == fmt % patch:
744 if ctx.description() == fmt % patch:
745 msg = _('patch %s finalized without changeset message\n')
745 msg = _('patch %s finalized without changeset message\n')
746 repo.ui.status(msg % patch)
746 repo.ui.status(msg % patch)
747 break
747 break
748
748
749 patches.append(patch)
749 patches.append(patch)
750 return patches
750 return patches
751
751
752 def finish(self, repo, revs):
752 def finish(self, repo, revs):
753 patches = self._revpatches(repo, sorted(revs))
753 patches = self._revpatches(repo, sorted(revs))
754 self._cleanup(patches, len(patches))
754 self._cleanup(patches, len(patches))
755
755
756 def delete(self, repo, patches, opts):
756 def delete(self, repo, patches, opts):
757 if not patches and not opts.get('rev'):
757 if not patches and not opts.get('rev'):
758 raise util.Abort(_('qdelete requires at least one revision or '
758 raise util.Abort(_('qdelete requires at least one revision or '
759 'patch name'))
759 'patch name'))
760
760
761 realpatches = []
761 realpatches = []
762 for patch in patches:
762 for patch in patches:
763 patch = self.lookup(patch, strict=True)
763 patch = self.lookup(patch, strict=True)
764 info = self.isapplied(patch)
764 info = self.isapplied(patch)
765 if info:
765 if info:
766 raise util.Abort(_("cannot delete applied patch %s") % patch)
766 raise util.Abort(_("cannot delete applied patch %s") % patch)
767 if patch not in self.series:
767 if patch not in self.series:
768 raise util.Abort(_("patch %s not in series file") % patch)
768 raise util.Abort(_("patch %s not in series file") % patch)
769 realpatches.append(patch)
769 realpatches.append(patch)
770
770
771 numrevs = 0
771 numrevs = 0
772 if opts.get('rev'):
772 if opts.get('rev'):
773 if not self.applied:
773 if not self.applied:
774 raise util.Abort(_('no patches applied'))
774 raise util.Abort(_('no patches applied'))
775 revs = cmdutil.revrange(repo, opts['rev'])
775 revs = cmdutil.revrange(repo, opts['rev'])
776 if len(revs) > 1 and revs[0] > revs[1]:
776 if len(revs) > 1 and revs[0] > revs[1]:
777 revs.reverse()
777 revs.reverse()
778 revpatches = self._revpatches(repo, revs)
778 revpatches = self._revpatches(repo, revs)
779 realpatches += revpatches
779 realpatches += revpatches
780 numrevs = len(revpatches)
780 numrevs = len(revpatches)
781
781
782 self._cleanup(realpatches, numrevs, opts.get('keep'))
782 self._cleanup(realpatches, numrevs, opts.get('keep'))
783
783
784 def check_toppatch(self, repo):
784 def check_toppatch(self, repo):
785 if self.applied:
785 if self.applied:
786 top = self.applied[-1].node
786 top = self.applied[-1].node
787 patch = self.applied[-1].name
787 patch = self.applied[-1].name
788 pp = repo.dirstate.parents()
788 pp = repo.dirstate.parents()
789 if top not in pp:
789 if top not in pp:
790 raise util.Abort(_("working directory revision is not qtip"))
790 raise util.Abort(_("working directory revision is not qtip"))
791 return top, patch
791 return top, patch
792 return None, None
792 return None, None
793
793
794 def check_localchanges(self, repo, force=False, refresh=True):
794 def check_localchanges(self, repo, force=False, refresh=True):
795 m, a, r, d = repo.status()[:4]
795 m, a, r, d = repo.status()[:4]
796 if (m or a or r or d) and not force:
796 if (m or a or r or d) and not force:
797 if refresh:
797 if refresh:
798 raise util.Abort(_("local changes found, refresh first"))
798 raise util.Abort(_("local changes found, refresh first"))
799 else:
799 else:
800 raise util.Abort(_("local changes found"))
800 raise util.Abort(_("local changes found"))
801 return m, a, r, d
801 return m, a, r, d
802
802
803 _reserved = ('series', 'status', 'guards')
803 _reserved = ('series', 'status', 'guards')
804 def check_reserved_name(self, name):
804 def check_reserved_name(self, name):
805 if (name in self._reserved or name.startswith('.hg')
805 if (name in self._reserved or name.startswith('.hg')
806 or name.startswith('.mq') or '#' in name or ':' in name):
806 or name.startswith('.mq') or '#' in name or ':' in name):
807 raise util.Abort(_('"%s" cannot be used as the name of a patch')
807 raise util.Abort(_('"%s" cannot be used as the name of a patch')
808 % name)
808 % name)
809
809
810 def new(self, repo, patchfn, *pats, **opts):
810 def new(self, repo, patchfn, *pats, **opts):
811 """options:
811 """options:
812 msg: a string or a no-argument function returning a string
812 msg: a string or a no-argument function returning a string
813 """
813 """
814 msg = opts.get('msg')
814 msg = opts.get('msg')
815 user = opts.get('user')
815 user = opts.get('user')
816 date = opts.get('date')
816 date = opts.get('date')
817 if date:
817 if date:
818 date = util.parsedate(date)
818 date = util.parsedate(date)
819 diffopts = self.diffopts({'git': opts.get('git')})
819 diffopts = self.diffopts({'git': opts.get('git')})
820 self.check_reserved_name(patchfn)
820 self.check_reserved_name(patchfn)
821 if os.path.exists(self.join(patchfn)):
821 if os.path.exists(self.join(patchfn)):
822 raise util.Abort(_('patch "%s" already exists') % patchfn)
822 raise util.Abort(_('patch "%s" already exists') % patchfn)
823 if opts.get('include') or opts.get('exclude') or pats:
823 if opts.get('include') or opts.get('exclude') or pats:
824 match = cmdutil.match(repo, pats, opts)
824 match = cmdutil.match(repo, pats, opts)
825 # detect missing files in pats
825 # detect missing files in pats
826 def badfn(f, msg):
826 def badfn(f, msg):
827 raise util.Abort('%s: %s' % (f, msg))
827 raise util.Abort('%s: %s' % (f, msg))
828 match.bad = badfn
828 match.bad = badfn
829 m, a, r, d = repo.status(match=match)[:4]
829 m, a, r, d = repo.status(match=match)[:4]
830 else:
830 else:
831 m, a, r, d = self.check_localchanges(repo, force=True)
831 m, a, r, d = self.check_localchanges(repo, force=True)
832 match = cmdutil.matchfiles(repo, m + a + r)
832 match = cmdutil.matchfiles(repo, m + a + r)
833 if len(repo[None].parents()) > 1:
833 if len(repo[None].parents()) > 1:
834 raise util.Abort(_('cannot manage merge changesets'))
834 raise util.Abort(_('cannot manage merge changesets'))
835 commitfiles = m + a + r
835 commitfiles = m + a + r
836 self.check_toppatch(repo)
836 self.check_toppatch(repo)
837 insert = self.full_series_end()
837 insert = self.full_series_end()
838 wlock = repo.wlock()
838 wlock = repo.wlock()
839 try:
839 try:
840 # if patch file write fails, abort early
840 # if patch file write fails, abort early
841 p = self.opener(patchfn, "w")
841 p = self.opener(patchfn, "w")
842 try:
842 try:
843 if self.plainmode:
843 if self.plainmode:
844 if user:
844 if user:
845 p.write("From: " + user + "\n")
845 p.write("From: " + user + "\n")
846 if not date:
846 if not date:
847 p.write("\n")
847 p.write("\n")
848 if date:
848 if date:
849 p.write("Date: %d %d\n\n" % date)
849 p.write("Date: %d %d\n\n" % date)
850 else:
850 else:
851 p.write("# HG changeset patch\n")
851 p.write("# HG changeset patch\n")
852 p.write("# Parent "
852 p.write("# Parent "
853 + hex(repo[None].parents()[0].node()) + "\n")
853 + hex(repo[None].parents()[0].node()) + "\n")
854 if user:
854 if user:
855 p.write("# User " + user + "\n")
855 p.write("# User " + user + "\n")
856 if date:
856 if date:
857 p.write("# Date %s %s\n\n" % date)
857 p.write("# Date %s %s\n\n" % date)
858 if hasattr(msg, '__call__'):
858 if hasattr(msg, '__call__'):
859 msg = msg()
859 msg = msg()
860 commitmsg = msg and msg or ("[mq]: %s" % patchfn)
860 commitmsg = msg and msg or ("[mq]: %s" % patchfn)
861 n = repo.commit(commitmsg, user, date, match=match, force=True)
861 n = repo.commit(commitmsg, user, date, match=match, force=True)
862 if n is None:
862 if n is None:
863 raise util.Abort(_("repo commit failed"))
863 raise util.Abort(_("repo commit failed"))
864 try:
864 try:
865 self.full_series[insert:insert] = [patchfn]
865 self.full_series[insert:insert] = [patchfn]
866 self.applied.append(statusentry(n, patchfn))
866 self.applied.append(statusentry(n, patchfn))
867 self.parse_series()
867 self.parse_series()
868 self.series_dirty = 1
868 self.series_dirty = 1
869 self.applied_dirty = 1
869 self.applied_dirty = 1
870 if msg:
870 if msg:
871 msg = msg + "\n\n"
871 msg = msg + "\n\n"
872 p.write(msg)
872 p.write(msg)
873 if commitfiles:
873 if commitfiles:
874 parent = self.qparents(repo, n)
874 parent = self.qparents(repo, n)
875 chunks = patch.diff(repo, node1=parent, node2=n,
875 chunks = patch.diff(repo, node1=parent, node2=n,
876 match=match, opts=diffopts)
876 match=match, opts=diffopts)
877 for chunk in chunks:
877 for chunk in chunks:
878 p.write(chunk)
878 p.write(chunk)
879 p.close()
879 p.close()
880 wlock.release()
880 wlock.release()
881 wlock = None
881 wlock = None
882 r = self.qrepo()
882 r = self.qrepo()
883 if r:
883 if r:
884 r.add([patchfn])
884 r.add([patchfn])
885 except:
885 except:
886 repo.rollback()
886 repo.rollback()
887 raise
887 raise
888 except Exception:
888 except Exception:
889 patchpath = self.join(patchfn)
889 patchpath = self.join(patchfn)
890 try:
890 try:
891 os.unlink(patchpath)
891 os.unlink(patchpath)
892 except:
892 except:
893 self.ui.warn(_('error unlinking %s\n') % patchpath)
893 self.ui.warn(_('error unlinking %s\n') % patchpath)
894 raise
894 raise
895 self.removeundo(repo)
895 self.removeundo(repo)
896 finally:
896 finally:
897 release(wlock)
897 release(wlock)
898
898
899 def strip(self, repo, rev, update=True, backup="all", force=None):
899 def strip(self, repo, rev, update=True, backup="all", force=None):
900 wlock = lock = None
900 wlock = lock = None
901 try:
901 try:
902 wlock = repo.wlock()
902 wlock = repo.wlock()
903 lock = repo.lock()
903 lock = repo.lock()
904
904
905 if update:
905 if update:
906 self.check_localchanges(repo, force=force, refresh=False)
906 self.check_localchanges(repo, force=force, refresh=False)
907 urev = self.qparents(repo, rev)
907 urev = self.qparents(repo, rev)
908 hg.clean(repo, urev)
908 hg.clean(repo, urev)
909 repo.dirstate.write()
909 repo.dirstate.write()
910
910
911 self.removeundo(repo)
911 self.removeundo(repo)
912 repair.strip(self.ui, repo, rev, backup)
912 repair.strip(self.ui, repo, rev, backup)
913 # strip may have unbundled a set of backed up revisions after
913 # strip may have unbundled a set of backed up revisions after
914 # the actual strip
914 # the actual strip
915 self.removeundo(repo)
915 self.removeundo(repo)
916 finally:
916 finally:
917 release(lock, wlock)
917 release(lock, wlock)
918
918
919 def isapplied(self, patch):
919 def isapplied(self, patch):
920 """returns (index, rev, patch)"""
920 """returns (index, rev, patch)"""
921 for i, a in enumerate(self.applied):
921 for i, a in enumerate(self.applied):
922 if a.name == patch:
922 if a.name == patch:
923 return (i, a.node, a.name)
923 return (i, a.node, a.name)
924 return None
924 return None
925
925
926 # if the exact patch name does not exist, we try a few
926 # if the exact patch name does not exist, we try a few
927 # variations. If strict is passed, we try only #1
927 # variations. If strict is passed, we try only #1
928 #
928 #
929 # 1) a number to indicate an offset in the series file
929 # 1) a number to indicate an offset in the series file
930 # 2) a unique substring of the patch name was given
930 # 2) a unique substring of the patch name was given
931 # 3) patchname[-+]num to indicate an offset in the series file
931 # 3) patchname[-+]num to indicate an offset in the series file
932 def lookup(self, patch, strict=False):
932 def lookup(self, patch, strict=False):
933 patch = patch and str(patch)
933 patch = patch and str(patch)
934
934
935 def partial_name(s):
935 def partial_name(s):
936 if s in self.series:
936 if s in self.series:
937 return s
937 return s
938 matches = [x for x in self.series if s in x]
938 matches = [x for x in self.series if s in x]
939 if len(matches) > 1:
939 if len(matches) > 1:
940 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
940 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
941 for m in matches:
941 for m in matches:
942 self.ui.warn(' %s\n' % m)
942 self.ui.warn(' %s\n' % m)
943 return None
943 return None
944 if matches:
944 if matches:
945 return matches[0]
945 return matches[0]
946 if self.series and self.applied:
946 if self.series and self.applied:
947 if s == 'qtip':
947 if s == 'qtip':
948 return self.series[self.series_end(True)-1]
948 return self.series[self.series_end(True)-1]
949 if s == 'qbase':
949 if s == 'qbase':
950 return self.series[0]
950 return self.series[0]
951 return None
951 return None
952
952
953 if patch is None:
953 if patch is None:
954 return None
954 return None
955 if patch in self.series:
955 if patch in self.series:
956 return patch
956 return patch
957
957
958 if not os.path.isfile(self.join(patch)):
958 if not os.path.isfile(self.join(patch)):
959 try:
959 try:
960 sno = int(patch)
960 sno = int(patch)
961 except (ValueError, OverflowError):
961 except (ValueError, OverflowError):
962 pass
962 pass
963 else:
963 else:
964 if -len(self.series) <= sno < len(self.series):
964 if -len(self.series) <= sno < len(self.series):
965 return self.series[sno]
965 return self.series[sno]
966
966
967 if not strict:
967 if not strict:
968 res = partial_name(patch)
968 res = partial_name(patch)
969 if res:
969 if res:
970 return res
970 return res
971 minus = patch.rfind('-')
971 minus = patch.rfind('-')
972 if minus >= 0:
972 if minus >= 0:
973 res = partial_name(patch[:minus])
973 res = partial_name(patch[:minus])
974 if res:
974 if res:
975 i = self.series.index(res)
975 i = self.series.index(res)
976 try:
976 try:
977 off = int(patch[minus + 1:] or 1)
977 off = int(patch[minus + 1:] or 1)
978 except (ValueError, OverflowError):
978 except (ValueError, OverflowError):
979 pass
979 pass
980 else:
980 else:
981 if i - off >= 0:
981 if i - off >= 0:
982 return self.series[i - off]
982 return self.series[i - off]
983 plus = patch.rfind('+')
983 plus = patch.rfind('+')
984 if plus >= 0:
984 if plus >= 0:
985 res = partial_name(patch[:plus])
985 res = partial_name(patch[:plus])
986 if res:
986 if res:
987 i = self.series.index(res)
987 i = self.series.index(res)
988 try:
988 try:
989 off = int(patch[plus + 1:] or 1)
989 off = int(patch[plus + 1:] or 1)
990 except (ValueError, OverflowError):
990 except (ValueError, OverflowError):
991 pass
991 pass
992 else:
992 else:
993 if i + off < len(self.series):
993 if i + off < len(self.series):
994 return self.series[i + off]
994 return self.series[i + off]
995 raise util.Abort(_("patch %s not in series") % patch)
995 raise util.Abort(_("patch %s not in series") % patch)
996
996
997 def push(self, repo, patch=None, force=False, list=False,
997 def push(self, repo, patch=None, force=False, list=False,
998 mergeq=None, all=False):
998 mergeq=None, all=False):
999 diffopts = self.diffopts()
999 diffopts = self.diffopts()
1000 wlock = repo.wlock()
1000 wlock = repo.wlock()
1001 try:
1001 try:
1002 heads = []
1002 heads = []
1003 for b, ls in repo.branchmap().iteritems():
1003 for b, ls in repo.branchmap().iteritems():
1004 heads += ls
1004 heads += ls
1005 if not heads:
1005 if not heads:
1006 heads = [nullid]
1006 heads = [nullid]
1007 if repo.dirstate.parents()[0] not in heads:
1007 if repo.dirstate.parents()[0] not in heads:
1008 self.ui.status(_("(working directory not at a head)\n"))
1008 self.ui.status(_("(working directory not at a head)\n"))
1009
1009
1010 if not self.series:
1010 if not self.series:
1011 self.ui.warn(_('no patches in series\n'))
1011 self.ui.warn(_('no patches in series\n'))
1012 return 0
1012 return 0
1013
1013
1014 patch = self.lookup(patch)
1014 patch = self.lookup(patch)
1015 # Suppose our series file is: A B C and the current 'top'
1015 # Suppose our series file is: A B C and the current 'top'
1016 # patch is B. qpush C should be performed (moving forward)
1016 # patch is B. qpush C should be performed (moving forward)
1017 # qpush B is a NOP (no change) qpush A is an error (can't
1017 # qpush B is a NOP (no change) qpush A is an error (can't
1018 # go backwards with qpush)
1018 # go backwards with qpush)
1019 if patch:
1019 if patch:
1020 info = self.isapplied(patch)
1020 info = self.isapplied(patch)
1021 if info:
1021 if info:
1022 if info[0] < len(self.applied) - 1:
1022 if info[0] < len(self.applied) - 1:
1023 raise util.Abort(
1023 raise util.Abort(
1024 _("cannot push to a previous patch: %s") % patch)
1024 _("cannot push to a previous patch: %s") % patch)
1025 self.ui.warn(
1025 self.ui.warn(
1026 _('qpush: %s is already at the top\n') % patch)
1026 _('qpush: %s is already at the top\n') % patch)
1027 return
1027 return
1028 pushable, reason = self.pushable(patch)
1028 pushable, reason = self.pushable(patch)
1029 if not pushable:
1029 if not pushable:
1030 if reason:
1030 if reason:
1031 reason = _('guarded by %r') % reason
1031 reason = _('guarded by %r') % reason
1032 else:
1032 else:
1033 reason = _('no matching guards')
1033 reason = _('no matching guards')
1034 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
1034 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
1035 return 1
1035 return 1
1036 elif all:
1036 elif all:
1037 patch = self.series[-1]
1037 patch = self.series[-1]
1038 if self.isapplied(patch):
1038 if self.isapplied(patch):
1039 self.ui.warn(_('all patches are currently applied\n'))
1039 self.ui.warn(_('all patches are currently applied\n'))
1040 return 0
1040 return 0
1041
1041
1042 # Following the above example, starting at 'top' of B:
1042 # Following the above example, starting at 'top' of B:
1043 # qpush should be performed (pushes C), but a subsequent
1043 # qpush should be performed (pushes C), but a subsequent
1044 # qpush without an argument is an error (nothing to
1044 # qpush without an argument is an error (nothing to
1045 # apply). This allows a loop of "...while hg qpush..." to
1045 # apply). This allows a loop of "...while hg qpush..." to
1046 # work as it detects an error when done
1046 # work as it detects an error when done
1047 start = self.series_end()
1047 start = self.series_end()
1048 if start == len(self.series):
1048 if start == len(self.series):
1049 self.ui.warn(_('patch series already fully applied\n'))
1049 self.ui.warn(_('patch series already fully applied\n'))
1050 return 1
1050 return 1
1051 if not force:
1051 if not force:
1052 self.check_localchanges(repo)
1052 self.check_localchanges(repo)
1053
1053
1054 self.applied_dirty = 1
1054 self.applied_dirty = 1
1055 if start > 0:
1055 if start > 0:
1056 self.check_toppatch(repo)
1056 self.check_toppatch(repo)
1057 if not patch:
1057 if not patch:
1058 patch = self.series[start]
1058 patch = self.series[start]
1059 end = start + 1
1059 end = start + 1
1060 else:
1060 else:
1061 end = self.series.index(patch, start) + 1
1061 end = self.series.index(patch, start) + 1
1062
1062
1063 s = self.series[start:end]
1063 s = self.series[start:end]
1064 all_files = set()
1064 all_files = set()
1065 try:
1065 try:
1066 if mergeq:
1066 if mergeq:
1067 ret = self.mergepatch(repo, mergeq, s, diffopts)
1067 ret = self.mergepatch(repo, mergeq, s, diffopts)
1068 else:
1068 else:
1069 ret = self.apply(repo, s, list, all_files=all_files)
1069 ret = self.apply(repo, s, list, all_files=all_files)
1070 except:
1070 except:
1071 self.ui.warn(_('cleaning up working directory...'))
1071 self.ui.warn(_('cleaning up working directory...'))
1072 node = repo.dirstate.parents()[0]
1072 node = repo.dirstate.parents()[0]
1073 hg.revert(repo, node, None)
1073 hg.revert(repo, node, None)
1074 # only remove unknown files that we know we touched or
1074 # only remove unknown files that we know we touched or
1075 # created while patching
1075 # created while patching
1076 for f in all_files:
1076 for f in all_files:
1077 if f not in repo.dirstate:
1077 if f not in repo.dirstate:
1078 try:
1078 try:
1079 util.unlink(repo.wjoin(f))
1079 util.unlink(repo.wjoin(f))
1080 except OSError, inst:
1080 except OSError, inst:
1081 if inst.errno != errno.ENOENT:
1081 if inst.errno != errno.ENOENT:
1082 raise
1082 raise
1083 self.ui.warn(_('done\n'))
1083 self.ui.warn(_('done\n'))
1084 raise
1084 raise
1085
1085
1086 if not self.applied:
1086 if not self.applied:
1087 return ret[0]
1087 return ret[0]
1088 top = self.applied[-1].name
1088 top = self.applied[-1].name
1089 if ret[0] and ret[0] > 1:
1089 if ret[0] and ret[0] > 1:
1090 msg = _("errors during apply, please fix and refresh %s\n")
1090 msg = _("errors during apply, please fix and refresh %s\n")
1091 self.ui.write(msg % top)
1091 self.ui.write(msg % top)
1092 else:
1092 else:
1093 self.ui.write(_("now at: %s\n") % top)
1093 self.ui.write(_("now at: %s\n") % top)
1094 return ret[0]
1094 return ret[0]
1095
1095
1096 finally:
1096 finally:
1097 wlock.release()
1097 wlock.release()
1098
1098
1099 def pop(self, repo, patch=None, force=False, update=True, all=False):
1099 def pop(self, repo, patch=None, force=False, update=True, all=False):
1100 wlock = repo.wlock()
1100 wlock = repo.wlock()
1101 try:
1101 try:
1102 if patch:
1102 if patch:
1103 # index, rev, patch
1103 # index, rev, patch
1104 info = self.isapplied(patch)
1104 info = self.isapplied(patch)
1105 if not info:
1105 if not info:
1106 patch = self.lookup(patch)
1106 patch = self.lookup(patch)
1107 info = self.isapplied(patch)
1107 info = self.isapplied(patch)
1108 if not info:
1108 if not info:
1109 raise util.Abort(_("patch %s is not applied") % patch)
1109 raise util.Abort(_("patch %s is not applied") % patch)
1110
1110
1111 if not self.applied:
1111 if not self.applied:
1112 # Allow qpop -a to work repeatedly,
1112 # Allow qpop -a to work repeatedly,
1113 # but not qpop without an argument
1113 # but not qpop without an argument
1114 self.ui.warn(_("no patches applied\n"))
1114 self.ui.warn(_("no patches applied\n"))
1115 return not all
1115 return not all
1116
1116
1117 if all:
1117 if all:
1118 start = 0
1118 start = 0
1119 elif patch:
1119 elif patch:
1120 start = info[0] + 1
1120 start = info[0] + 1
1121 else:
1121 else:
1122 start = len(self.applied) - 1
1122 start = len(self.applied) - 1
1123
1123
1124 if start >= len(self.applied):
1124 if start >= len(self.applied):
1125 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1125 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1126 return
1126 return
1127
1127
1128 if not update:
1128 if not update:
1129 parents = repo.dirstate.parents()
1129 parents = repo.dirstate.parents()
1130 rr = [x.node for x in self.applied]
1130 rr = [x.node for x in self.applied]
1131 for p in parents:
1131 for p in parents:
1132 if p in rr:
1132 if p in rr:
1133 self.ui.warn(_("qpop: forcing dirstate update\n"))
1133 self.ui.warn(_("qpop: forcing dirstate update\n"))
1134 update = True
1134 update = True
1135 else:
1135 else:
1136 parents = [p.node() for p in repo[None].parents()]
1136 parents = [p.node() for p in repo[None].parents()]
1137 needupdate = False
1137 needupdate = False
1138 for entry in self.applied[start:]:
1138 for entry in self.applied[start:]:
1139 if entry.node in parents:
1139 if entry.node in parents:
1140 needupdate = True
1140 needupdate = True
1141 break
1141 break
1142 update = needupdate
1142 update = needupdate
1143
1143
1144 if not force and update:
1144 if not force and update:
1145 self.check_localchanges(repo)
1145 self.check_localchanges(repo)
1146
1146
1147 self.applied_dirty = 1
1147 self.applied_dirty = 1
1148 end = len(self.applied)
1148 end = len(self.applied)
1149 rev = self.applied[start].node
1149 rev = self.applied[start].node
1150 if update:
1150 if update:
1151 top = self.check_toppatch(repo)[0]
1151 top = self.check_toppatch(repo)[0]
1152
1152
1153 try:
1153 try:
1154 heads = repo.changelog.heads(rev)
1154 heads = repo.changelog.heads(rev)
1155 except error.LookupError:
1155 except error.LookupError:
1156 node = short(rev)
1156 node = short(rev)
1157 raise util.Abort(_('trying to pop unknown node %s') % node)
1157 raise util.Abort(_('trying to pop unknown node %s') % node)
1158
1158
1159 if heads != [self.applied[-1].node]:
1159 if heads != [self.applied[-1].node]:
1160 raise util.Abort(_("popping would remove a revision not "
1160 raise util.Abort(_("popping would remove a revision not "
1161 "managed by this patch queue"))
1161 "managed by this patch queue"))
1162
1162
1163 # we know there are no local changes, so we can make a simplified
1163 # we know there are no local changes, so we can make a simplified
1164 # form of hg.update.
1164 # form of hg.update.
1165 if update:
1165 if update:
1166 qp = self.qparents(repo, rev)
1166 qp = self.qparents(repo, rev)
1167 ctx = repo[qp]
1167 ctx = repo[qp]
1168 m, a, r, d = repo.status(qp, top)[:4]
1168 m, a, r, d = repo.status(qp, top)[:4]
1169 if d:
1169 if d:
1170 raise util.Abort(_("deletions found between repo revs"))
1170 raise util.Abort(_("deletions found between repo revs"))
1171 for f in a:
1171 for f in a:
1172 try:
1172 try:
1173 util.unlink(repo.wjoin(f))
1173 util.unlink(repo.wjoin(f))
1174 except OSError, e:
1174 except OSError, e:
1175 if e.errno != errno.ENOENT:
1175 if e.errno != errno.ENOENT:
1176 raise
1176 raise
1177 repo.dirstate.forget(f)
1177 repo.dirstate.forget(f)
1178 for f in m + r:
1178 for f in m + r:
1179 fctx = ctx[f]
1179 fctx = ctx[f]
1180 repo.wwrite(f, fctx.data(), fctx.flags())
1180 repo.wwrite(f, fctx.data(), fctx.flags())
1181 repo.dirstate.normal(f)
1181 repo.dirstate.normal(f)
1182 repo.dirstate.setparents(qp, nullid)
1182 repo.dirstate.setparents(qp, nullid)
1183 for patch in reversed(self.applied[start:end]):
1183 for patch in reversed(self.applied[start:end]):
1184 self.ui.status(_("popping %s\n") % patch.name)
1184 self.ui.status(_("popping %s\n") % patch.name)
1185 del self.applied[start:end]
1185 del self.applied[start:end]
1186 self.strip(repo, rev, update=False, backup='strip')
1186 self.strip(repo, rev, update=False, backup='strip')
1187 if self.applied:
1187 if self.applied:
1188 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1188 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1189 else:
1189 else:
1190 self.ui.write(_("patch queue now empty\n"))
1190 self.ui.write(_("patch queue now empty\n"))
1191 finally:
1191 finally:
1192 wlock.release()
1192 wlock.release()
1193
1193
1194 def diff(self, repo, pats, opts):
1194 def diff(self, repo, pats, opts):
1195 top, patch = self.check_toppatch(repo)
1195 top, patch = self.check_toppatch(repo)
1196 if not top:
1196 if not top:
1197 self.ui.write(_("no patches applied\n"))
1197 self.ui.write(_("no patches applied\n"))
1198 return
1198 return
1199 qp = self.qparents(repo, top)
1199 qp = self.qparents(repo, top)
1200 if opts.get('reverse'):
1200 if opts.get('reverse'):
1201 node1, node2 = None, qp
1201 node1, node2 = None, qp
1202 else:
1202 else:
1203 node1, node2 = qp, None
1203 node1, node2 = qp, None
1204 diffopts = self.diffopts(opts, patch)
1204 diffopts = self.diffopts(opts, patch)
1205 self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
1205 self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
1206
1206
1207 def refresh(self, repo, pats=None, **opts):
1207 def refresh(self, repo, pats=None, **opts):
1208 if not self.applied:
1208 if not self.applied:
1209 self.ui.write(_("no patches applied\n"))
1209 self.ui.write(_("no patches applied\n"))
1210 return 1
1210 return 1
1211 msg = opts.get('msg', '').rstrip()
1211 msg = opts.get('msg', '').rstrip()
1212 newuser = opts.get('user')
1212 newuser = opts.get('user')
1213 newdate = opts.get('date')
1213 newdate = opts.get('date')
1214 if newdate:
1214 if newdate:
1215 newdate = '%d %d' % util.parsedate(newdate)
1215 newdate = '%d %d' % util.parsedate(newdate)
1216 wlock = repo.wlock()
1216 wlock = repo.wlock()
1217
1217
1218 try:
1218 try:
1219 self.check_toppatch(repo)
1219 self.check_toppatch(repo)
1220 (top, patchfn) = (self.applied[-1].node, self.applied[-1].name)
1220 (top, patchfn) = (self.applied[-1].node, self.applied[-1].name)
1221 if repo.changelog.heads(top) != [top]:
1221 if repo.changelog.heads(top) != [top]:
1222 raise util.Abort(_("cannot refresh a revision with children"))
1222 raise util.Abort(_("cannot refresh a revision with children"))
1223
1223
1224 cparents = repo.changelog.parents(top)
1224 cparents = repo.changelog.parents(top)
1225 patchparent = self.qparents(repo, top)
1225 patchparent = self.qparents(repo, top)
1226 ph = patchheader(self.join(patchfn), self.plainmode)
1226 ph = patchheader(self.join(patchfn), self.plainmode)
1227 diffopts = self.diffopts({'git': opts.get('git')}, patchfn)
1227 diffopts = self.diffopts({'git': opts.get('git')}, patchfn)
1228 if msg:
1228 if msg:
1229 ph.setmessage(msg)
1229 ph.setmessage(msg)
1230 if newuser:
1230 if newuser:
1231 ph.setuser(newuser)
1231 ph.setuser(newuser)
1232 if newdate:
1232 if newdate:
1233 ph.setdate(newdate)
1233 ph.setdate(newdate)
1234 ph.setparent(hex(patchparent))
1234 ph.setparent(hex(patchparent))
1235
1235
1236 # only commit new patch when write is complete
1236 # only commit new patch when write is complete
1237 patchf = self.opener(patchfn, 'w', atomictemp=True)
1237 patchf = self.opener(patchfn, 'w', atomictemp=True)
1238
1238
1239 comments = str(ph)
1239 comments = str(ph)
1240 if comments:
1240 if comments:
1241 patchf.write(comments)
1241 patchf.write(comments)
1242
1242
1243 # update the dirstate in place, strip off the qtip commit
1243 # update the dirstate in place, strip off the qtip commit
1244 # and then commit.
1244 # and then commit.
1245 #
1245 #
1246 # this should really read:
1246 # this should really read:
1247 # mm, dd, aa, aa2 = repo.status(tip, patchparent)[:4]
1247 # mm, dd, aa, aa2 = repo.status(tip, patchparent)[:4]
1248 # but we do it backwards to take advantage of manifest/chlog
1248 # but we do it backwards to take advantage of manifest/chlog
1249 # caching against the next repo.status call
1249 # caching against the next repo.status call
1250 mm, aa, dd, aa2 = repo.status(patchparent, top)[:4]
1250 mm, aa, dd, aa2 = repo.status(patchparent, top)[:4]
1251 changes = repo.changelog.read(top)
1251 changes = repo.changelog.read(top)
1252 man = repo.manifest.read(changes[0])
1252 man = repo.manifest.read(changes[0])
1253 aaa = aa[:]
1253 aaa = aa[:]
1254 matchfn = cmdutil.match(repo, pats, opts)
1254 matchfn = cmdutil.match(repo, pats, opts)
1255 # in short mode, we only diff the files included in the
1255 # in short mode, we only diff the files included in the
1256 # patch already plus specified files
1256 # patch already plus specified files
1257 if opts.get('short'):
1257 if opts.get('short'):
1258 # if amending a patch, we start with existing
1258 # if amending a patch, we start with existing
1259 # files plus specified files - unfiltered
1259 # files plus specified files - unfiltered
1260 match = cmdutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1260 match = cmdutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1261 # filter with inc/exl options
1261 # filter with inc/exl options
1262 matchfn = cmdutil.match(repo, opts=opts)
1262 matchfn = cmdutil.match(repo, opts=opts)
1263 else:
1263 else:
1264 match = cmdutil.matchall(repo)
1264 match = cmdutil.matchall(repo)
1265 m, a, r, d = repo.status(match=match)[:4]
1265 m, a, r, d = repo.status(match=match)[:4]
1266
1266
1267 # we might end up with files that were added between
1267 # we might end up with files that were added between
1268 # qtip and the dirstate parent, but then changed in the
1268 # qtip and the dirstate parent, but then changed in the
1269 # local dirstate. in this case, we want them to only
1269 # local dirstate. in this case, we want them to only
1270 # show up in the added section
1270 # show up in the added section
1271 for x in m:
1271 for x in m:
1272 if x not in aa:
1272 if x not in aa:
1273 mm.append(x)
1273 mm.append(x)
1274 # we might end up with files added by the local dirstate that
1274 # we might end up with files added by the local dirstate that
1275 # were deleted by the patch. In this case, they should only
1275 # were deleted by the patch. In this case, they should only
1276 # show up in the changed section.
1276 # show up in the changed section.
1277 for x in a:
1277 for x in a:
1278 if x in dd:
1278 if x in dd:
1279 del dd[dd.index(x)]
1279 del dd[dd.index(x)]
1280 mm.append(x)
1280 mm.append(x)
1281 else:
1281 else:
1282 aa.append(x)
1282 aa.append(x)
1283 # make sure any files deleted in the local dirstate
1283 # make sure any files deleted in the local dirstate
1284 # are not in the add or change column of the patch
1284 # are not in the add or change column of the patch
1285 forget = []
1285 forget = []
1286 for x in d + r:
1286 for x in d + r:
1287 if x in aa:
1287 if x in aa:
1288 del aa[aa.index(x)]
1288 del aa[aa.index(x)]
1289 forget.append(x)
1289 forget.append(x)
1290 continue
1290 continue
1291 elif x in mm:
1291 elif x in mm:
1292 del mm[mm.index(x)]
1292 del mm[mm.index(x)]
1293 dd.append(x)
1293 dd.append(x)
1294
1294
1295 m = list(set(mm))
1295 m = list(set(mm))
1296 r = list(set(dd))
1296 r = list(set(dd))
1297 a = list(set(aa))
1297 a = list(set(aa))
1298 c = [filter(matchfn, l) for l in (m, a, r)]
1298 c = [filter(matchfn, l) for l in (m, a, r)]
1299 match = cmdutil.matchfiles(repo, set(c[0] + c[1] + c[2]))
1299 match = cmdutil.matchfiles(repo, set(c[0] + c[1] + c[2]))
1300 chunks = patch.diff(repo, patchparent, match=match,
1300 chunks = patch.diff(repo, patchparent, match=match,
1301 changes=c, opts=diffopts)
1301 changes=c, opts=diffopts)
1302 for chunk in chunks:
1302 for chunk in chunks:
1303 patchf.write(chunk)
1303 patchf.write(chunk)
1304
1304
1305 try:
1305 try:
1306 if diffopts.git or diffopts.upgrade:
1306 if diffopts.git or diffopts.upgrade:
1307 copies = {}
1307 copies = {}
1308 for dst in a:
1308 for dst in a:
1309 src = repo.dirstate.copied(dst)
1309 src = repo.dirstate.copied(dst)
1310 # during qfold, the source file for copies may
1310 # during qfold, the source file for copies may
1311 # be removed. Treat this as a simple add.
1311 # be removed. Treat this as a simple add.
1312 if src is not None and src in repo.dirstate:
1312 if src is not None and src in repo.dirstate:
1313 copies.setdefault(src, []).append(dst)
1313 copies.setdefault(src, []).append(dst)
1314 repo.dirstate.add(dst)
1314 repo.dirstate.add(dst)
1315 # remember the copies between patchparent and qtip
1315 # remember the copies between patchparent and qtip
1316 for dst in aaa:
1316 for dst in aaa:
1317 f = repo.file(dst)
1317 f = repo.file(dst)
1318 src = f.renamed(man[dst])
1318 src = f.renamed(man[dst])
1319 if src:
1319 if src:
1320 copies.setdefault(src[0], []).extend(
1320 copies.setdefault(src[0], []).extend(
1321 copies.get(dst, []))
1321 copies.get(dst, []))
1322 if dst in a:
1322 if dst in a:
1323 copies[src[0]].append(dst)
1323 copies[src[0]].append(dst)
1324 # we can't copy a file created by the patch itself
1324 # we can't copy a file created by the patch itself
1325 if dst in copies:
1325 if dst in copies:
1326 del copies[dst]
1326 del copies[dst]
1327 for src, dsts in copies.iteritems():
1327 for src, dsts in copies.iteritems():
1328 for dst in dsts:
1328 for dst in dsts:
1329 repo.dirstate.copy(src, dst)
1329 repo.dirstate.copy(src, dst)
1330 else:
1330 else:
1331 for dst in a:
1331 for dst in a:
1332 repo.dirstate.add(dst)
1332 repo.dirstate.add(dst)
1333 # Drop useless copy information
1333 # Drop useless copy information
1334 for f in list(repo.dirstate.copies()):
1334 for f in list(repo.dirstate.copies()):
1335 repo.dirstate.copy(None, f)
1335 repo.dirstate.copy(None, f)
1336 for f in r:
1336 for f in r:
1337 repo.dirstate.remove(f)
1337 repo.dirstate.remove(f)
1338 # if the patch excludes a modified file, mark that
1338 # if the patch excludes a modified file, mark that
1339 # file with mtime=0 so status can see it.
1339 # file with mtime=0 so status can see it.
1340 mm = []
1340 mm = []
1341 for i in xrange(len(m)-1, -1, -1):
1341 for i in xrange(len(m)-1, -1, -1):
1342 if not matchfn(m[i]):
1342 if not matchfn(m[i]):
1343 mm.append(m[i])
1343 mm.append(m[i])
1344 del m[i]
1344 del m[i]
1345 for f in m:
1345 for f in m:
1346 repo.dirstate.normal(f)
1346 repo.dirstate.normal(f)
1347 for f in mm:
1347 for f in mm:
1348 repo.dirstate.normallookup(f)
1348 repo.dirstate.normallookup(f)
1349 for f in forget:
1349 for f in forget:
1350 repo.dirstate.forget(f)
1350 repo.dirstate.forget(f)
1351
1351
1352 if not msg:
1352 if not msg:
1353 if not ph.message:
1353 if not ph.message:
1354 message = "[mq]: %s\n" % patchfn
1354 message = "[mq]: %s\n" % patchfn
1355 else:
1355 else:
1356 message = "\n".join(ph.message)
1356 message = "\n".join(ph.message)
1357 else:
1357 else:
1358 message = msg
1358 message = msg
1359
1359
1360 user = ph.user or changes[1]
1360 user = ph.user or changes[1]
1361
1361
1362 # assumes strip can roll itself back if interrupted
1362 # assumes strip can roll itself back if interrupted
1363 repo.dirstate.setparents(*cparents)
1363 repo.dirstate.setparents(*cparents)
1364 self.applied.pop()
1364 self.applied.pop()
1365 self.applied_dirty = 1
1365 self.applied_dirty = 1
1366 self.strip(repo, top, update=False,
1366 self.strip(repo, top, update=False,
1367 backup='strip')
1367 backup='strip')
1368 except:
1368 except:
1369 repo.dirstate.invalidate()
1369 repo.dirstate.invalidate()
1370 raise
1370 raise
1371
1371
1372 try:
1372 try:
1373 # might be nice to attempt to roll back strip after this
1373 # might be nice to attempt to roll back strip after this
1374 patchf.rename()
1374 patchf.rename()
1375 n = repo.commit(message, user, ph.date, match=match,
1375 n = repo.commit(message, user, ph.date, match=match,
1376 force=True)
1376 force=True)
1377 self.applied.append(statusentry(n, patchfn))
1377 self.applied.append(statusentry(n, patchfn))
1378 except:
1378 except:
1379 ctx = repo[cparents[0]]
1379 ctx = repo[cparents[0]]
1380 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1380 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1381 self.save_dirty()
1381 self.save_dirty()
1382 self.ui.warn(_('refresh interrupted while patch was popped! '
1382 self.ui.warn(_('refresh interrupted while patch was popped! '
1383 '(revert --all, qpush to recover)\n'))
1383 '(revert --all, qpush to recover)\n'))
1384 raise
1384 raise
1385 finally:
1385 finally:
1386 wlock.release()
1386 wlock.release()
1387 self.removeundo(repo)
1387 self.removeundo(repo)
1388
1388
1389 def init(self, repo, create=False):
1389 def init(self, repo, create=False):
1390 if not create and os.path.isdir(self.path):
1390 if not create and os.path.isdir(self.path):
1391 raise util.Abort(_("patch queue directory already exists"))
1391 raise util.Abort(_("patch queue directory already exists"))
1392 try:
1392 try:
1393 os.mkdir(self.path)
1393 os.mkdir(self.path)
1394 except OSError, inst:
1394 except OSError, inst:
1395 if inst.errno != errno.EEXIST or not create:
1395 if inst.errno != errno.EEXIST or not create:
1396 raise
1396 raise
1397 if create:
1397 if create:
1398 return self.qrepo(create=True)
1398 return self.qrepo(create=True)
1399
1399
1400 def unapplied(self, repo, patch=None):
1400 def unapplied(self, repo, patch=None):
1401 if patch and patch not in self.series:
1401 if patch and patch not in self.series:
1402 raise util.Abort(_("patch %s is not in series file") % patch)
1402 raise util.Abort(_("patch %s is not in series file") % patch)
1403 if not patch:
1403 if not patch:
1404 start = self.series_end()
1404 start = self.series_end()
1405 else:
1405 else:
1406 start = self.series.index(patch) + 1
1406 start = self.series.index(patch) + 1
1407 unapplied = []
1407 unapplied = []
1408 for i in xrange(start, len(self.series)):
1408 for i in xrange(start, len(self.series)):
1409 pushable, reason = self.pushable(i)
1409 pushable, reason = self.pushable(i)
1410 if pushable:
1410 if pushable:
1411 unapplied.append((i, self.series[i]))
1411 unapplied.append((i, self.series[i]))
1412 self.explain_pushable(i)
1412 self.explain_pushable(i)
1413 return unapplied
1413 return unapplied
1414
1414
1415 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1415 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1416 summary=False):
1416 summary=False):
1417 def displayname(pfx, patchname, state):
1417 def displayname(pfx, patchname, state):
1418 if summary:
1418 if summary:
1419 ph = patchheader(self.join(patchname), self.plainmode)
1419 ph = patchheader(self.join(patchname), self.plainmode)
1420 msg = ph.message and ph.message[0] or ''
1420 msg = ph.message and ph.message[0] or ''
1421 if self.ui.interactive():
1421 if self.ui.interactive():
1422 width = util.termwidth() - len(pfx) - len(patchname) - 2
1422 width = util.termwidth() - len(pfx) - len(patchname) - 2
1423 if width > 0:
1423 if width > 0:
1424 msg = util.ellipsis(msg, width)
1424 msg = util.ellipsis(msg, width)
1425 else:
1425 else:
1426 msg = ''
1426 msg = ''
1427 msg = "%s%s: %s" % (pfx, patchname, msg)
1427 msg = "%s%s: %s" % (pfx, patchname, msg)
1428 else:
1428 else:
1429 msg = pfx + patchname
1429 msg = pfx + patchname
1430 self.ui.write(msg + '\n', label='qseries.' + state)
1430 self.ui.write(msg + '\n', label='qseries.' + state)
1431
1431
1432 applied = set([p.name for p in self.applied])
1432 applied = set([p.name for p in self.applied])
1433 if length is None:
1433 if length is None:
1434 length = len(self.series) - start
1434 length = len(self.series) - start
1435 if not missing:
1435 if not missing:
1436 if self.ui.verbose:
1436 if self.ui.verbose:
1437 idxwidth = len(str(start + length - 1))
1437 idxwidth = len(str(start + length - 1))
1438 for i in xrange(start, start + length):
1438 for i in xrange(start, start + length):
1439 patch = self.series[i]
1439 patch = self.series[i]
1440 if patch in applied:
1440 if patch in applied:
1441 char, state = 'A', 'applied'
1441 char, state = 'A', 'applied'
1442 elif self.pushable(i)[0]:
1442 elif self.pushable(i)[0]:
1443 char, state = 'U', 'unapplied'
1443 char, state = 'U', 'unapplied'
1444 else:
1444 else:
1445 char, state = 'G', 'guarded'
1445 char, state = 'G', 'guarded'
1446 pfx = ''
1446 pfx = ''
1447 if self.ui.verbose:
1447 if self.ui.verbose:
1448 pfx = '%*d %s ' % (idxwidth, i, char)
1448 pfx = '%*d %s ' % (idxwidth, i, char)
1449 elif status and status != char:
1449 elif status and status != char:
1450 continue
1450 continue
1451 displayname(pfx, patch, state)
1451 displayname(pfx, patch, state)
1452 else:
1452 else:
1453 msng_list = []
1453 msng_list = []
1454 for root, dirs, files in os.walk(self.path):
1454 for root, dirs, files in os.walk(self.path):
1455 d = root[len(self.path) + 1:]
1455 d = root[len(self.path) + 1:]
1456 for f in files:
1456 for f in files:
1457 fl = os.path.join(d, f)
1457 fl = os.path.join(d, f)
1458 if (fl not in self.series and
1458 if (fl not in self.series and
1459 fl not in (self.status_path, self.series_path,
1459 fl not in (self.status_path, self.series_path,
1460 self.guards_path)
1460 self.guards_path)
1461 and not fl.startswith('.')):
1461 and not fl.startswith('.')):
1462 msng_list.append(fl)
1462 msng_list.append(fl)
1463 for x in sorted(msng_list):
1463 for x in sorted(msng_list):
1464 pfx = self.ui.verbose and ('D ') or ''
1464 pfx = self.ui.verbose and ('D ') or ''
1465 displayname(pfx, x, 'missing')
1465 displayname(pfx, x, 'missing')
1466
1466
1467 def issaveline(self, l):
1467 def issaveline(self, l):
1468 if l.name == '.hg.patches.save.line':
1468 if l.name == '.hg.patches.save.line':
1469 return True
1469 return True
1470
1470
1471 def qrepo(self, create=False):
1471 def qrepo(self, create=False):
1472 if create or os.path.isdir(self.join(".hg")):
1472 if create or os.path.isdir(self.join(".hg")):
1473 return hg.repository(self.ui, path=self.path, create=create)
1473 return hg.repository(self.ui, path=self.path, create=create)
1474
1474
1475 def restore(self, repo, rev, delete=None, qupdate=None):
1475 def restore(self, repo, rev, delete=None, qupdate=None):
1476 desc = repo[rev].description().strip()
1476 desc = repo[rev].description().strip()
1477 lines = desc.splitlines()
1477 lines = desc.splitlines()
1478 i = 0
1478 i = 0
1479 datastart = None
1479 datastart = None
1480 series = []
1480 series = []
1481 applied = []
1481 applied = []
1482 qpp = None
1482 qpp = None
1483 for i, line in enumerate(lines):
1483 for i, line in enumerate(lines):
1484 if line == 'Patch Data:':
1484 if line == 'Patch Data:':
1485 datastart = i + 1
1485 datastart = i + 1
1486 elif line.startswith('Dirstate:'):
1486 elif line.startswith('Dirstate:'):
1487 l = line.rstrip()
1487 l = line.rstrip()
1488 l = l[10:].split(' ')
1488 l = l[10:].split(' ')
1489 qpp = [bin(x) for x in l]
1489 qpp = [bin(x) for x in l]
1490 elif datastart != None:
1490 elif datastart != None:
1491 l = line.rstrip()
1491 l = line.rstrip()
1492 n, name = l.split(':', 1)
1492 n, name = l.split(':', 1)
1493 if n:
1493 if n:
1494 applied.append(statusentry(bin(n), name))
1494 applied.append(statusentry(bin(n), name))
1495 else:
1495 else:
1496 series.append(l)
1496 series.append(l)
1497 if datastart is None:
1497 if datastart is None:
1498 self.ui.warn(_("No saved patch data found\n"))
1498 self.ui.warn(_("No saved patch data found\n"))
1499 return 1
1499 return 1
1500 self.ui.warn(_("restoring status: %s\n") % lines[0])
1500 self.ui.warn(_("restoring status: %s\n") % lines[0])
1501 self.full_series = series
1501 self.full_series = series
1502 self.applied = applied
1502 self.applied = applied
1503 self.parse_series()
1503 self.parse_series()
1504 self.series_dirty = 1
1504 self.series_dirty = 1
1505 self.applied_dirty = 1
1505 self.applied_dirty = 1
1506 heads = repo.changelog.heads()
1506 heads = repo.changelog.heads()
1507 if delete:
1507 if delete:
1508 if rev not in heads:
1508 if rev not in heads:
1509 self.ui.warn(_("save entry has children, leaving it alone\n"))
1509 self.ui.warn(_("save entry has children, leaving it alone\n"))
1510 else:
1510 else:
1511 self.ui.warn(_("removing save entry %s\n") % short(rev))
1511 self.ui.warn(_("removing save entry %s\n") % short(rev))
1512 pp = repo.dirstate.parents()
1512 pp = repo.dirstate.parents()
1513 if rev in pp:
1513 if rev in pp:
1514 update = True
1514 update = True
1515 else:
1515 else:
1516 update = False
1516 update = False
1517 self.strip(repo, rev, update=update, backup='strip')
1517 self.strip(repo, rev, update=update, backup='strip')
1518 if qpp:
1518 if qpp:
1519 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1519 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1520 (short(qpp[0]), short(qpp[1])))
1520 (short(qpp[0]), short(qpp[1])))
1521 if qupdate:
1521 if qupdate:
1522 self.ui.status(_("queue directory updating\n"))
1522 self.ui.status(_("queue directory updating\n"))
1523 r = self.qrepo()
1523 r = self.qrepo()
1524 if not r:
1524 if not r:
1525 self.ui.warn(_("Unable to load queue repository\n"))
1525 self.ui.warn(_("Unable to load queue repository\n"))
1526 return 1
1526 return 1
1527 hg.clean(r, qpp[0])
1527 hg.clean(r, qpp[0])
1528
1528
1529 def save(self, repo, msg=None):
1529 def save(self, repo, msg=None):
1530 if not self.applied:
1530 if not self.applied:
1531 self.ui.warn(_("save: no patches applied, exiting\n"))
1531 self.ui.warn(_("save: no patches applied, exiting\n"))
1532 return 1
1532 return 1
1533 if self.issaveline(self.applied[-1]):
1533 if self.issaveline(self.applied[-1]):
1534 self.ui.warn(_("status is already saved\n"))
1534 self.ui.warn(_("status is already saved\n"))
1535 return 1
1535 return 1
1536
1536
1537 if not msg:
1537 if not msg:
1538 msg = _("hg patches saved state")
1538 msg = _("hg patches saved state")
1539 else:
1539 else:
1540 msg = "hg patches: " + msg.rstrip('\r\n')
1540 msg = "hg patches: " + msg.rstrip('\r\n')
1541 r = self.qrepo()
1541 r = self.qrepo()
1542 if r:
1542 if r:
1543 pp = r.dirstate.parents()
1543 pp = r.dirstate.parents()
1544 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
1544 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
1545 msg += "\n\nPatch Data:\n"
1545 msg += "\n\nPatch Data:\n"
1546 msg += ''.join('%s\n' % x for x in self.applied)
1546 msg += ''.join('%s\n' % x for x in self.applied)
1547 msg += ''.join(':%s\n' % x for x in self.full_series)
1547 msg += ''.join(':%s\n' % x for x in self.full_series)
1548 n = repo.commit(msg, force=True)
1548 n = repo.commit(msg, force=True)
1549 if not n:
1549 if not n:
1550 self.ui.warn(_("repo commit failed\n"))
1550 self.ui.warn(_("repo commit failed\n"))
1551 return 1
1551 return 1
1552 self.applied.append(statusentry(n, '.hg.patches.save.line'))
1552 self.applied.append(statusentry(n, '.hg.patches.save.line'))
1553 self.applied_dirty = 1
1553 self.applied_dirty = 1
1554 self.removeundo(repo)
1554 self.removeundo(repo)
1555
1555
1556 def full_series_end(self):
1556 def full_series_end(self):
1557 if self.applied:
1557 if self.applied:
1558 p = self.applied[-1].name
1558 p = self.applied[-1].name
1559 end = self.find_series(p)
1559 end = self.find_series(p)
1560 if end is None:
1560 if end is None:
1561 return len(self.full_series)
1561 return len(self.full_series)
1562 return end + 1
1562 return end + 1
1563 return 0
1563 return 0
1564
1564
1565 def series_end(self, all_patches=False):
1565 def series_end(self, all_patches=False):
1566 """If all_patches is False, return the index of the next pushable patch
1566 """If all_patches is False, return the index of the next pushable patch
1567 in the series, or the series length. If all_patches is True, return the
1567 in the series, or the series length. If all_patches is True, return the
1568 index of the first patch past the last applied one.
1568 index of the first patch past the last applied one.
1569 """
1569 """
1570 end = 0
1570 end = 0
1571 def next(start):
1571 def next(start):
1572 if all_patches or start >= len(self.series):
1572 if all_patches or start >= len(self.series):
1573 return start
1573 return start
1574 for i in xrange(start, len(self.series)):
1574 for i in xrange(start, len(self.series)):
1575 p, reason = self.pushable(i)
1575 p, reason = self.pushable(i)
1576 if p:
1576 if p:
1577 break
1577 break
1578 self.explain_pushable(i)
1578 self.explain_pushable(i)
1579 return i
1579 return i
1580 if self.applied:
1580 if self.applied:
1581 p = self.applied[-1].name
1581 p = self.applied[-1].name
1582 try:
1582 try:
1583 end = self.series.index(p)
1583 end = self.series.index(p)
1584 except ValueError:
1584 except ValueError:
1585 return 0
1585 return 0
1586 return next(end + 1)
1586 return next(end + 1)
1587 return next(end)
1587 return next(end)
1588
1588
1589 def appliedname(self, index):
1589 def appliedname(self, index):
1590 pname = self.applied[index].name
1590 pname = self.applied[index].name
1591 if not self.ui.verbose:
1591 if not self.ui.verbose:
1592 p = pname
1592 p = pname
1593 else:
1593 else:
1594 p = str(self.series.index(pname)) + " " + pname
1594 p = str(self.series.index(pname)) + " " + pname
1595 return p
1595 return p
1596
1596
1597 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1597 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1598 force=None, git=False):
1598 force=None, git=False):
1599 def checkseries(patchname):
1599 def checkseries(patchname):
1600 if patchname in self.series:
1600 if patchname in self.series:
1601 raise util.Abort(_('patch %s is already in the series file')
1601 raise util.Abort(_('patch %s is already in the series file')
1602 % patchname)
1602 % patchname)
1603 def checkfile(patchname):
1603 def checkfile(patchname):
1604 if not force and os.path.exists(self.join(patchname)):
1604 if not force and os.path.exists(self.join(patchname)):
1605 raise util.Abort(_('patch "%s" already exists')
1605 raise util.Abort(_('patch "%s" already exists')
1606 % patchname)
1606 % patchname)
1607
1607
1608 if rev:
1608 if rev:
1609 if files:
1609 if files:
1610 raise util.Abort(_('option "-r" not valid when importing '
1610 raise util.Abort(_('option "-r" not valid when importing '
1611 'files'))
1611 'files'))
1612 rev = cmdutil.revrange(repo, rev)
1612 rev = cmdutil.revrange(repo, rev)
1613 rev.sort(reverse=True)
1613 rev.sort(reverse=True)
1614 if (len(files) > 1 or len(rev) > 1) and patchname:
1614 if (len(files) > 1 or len(rev) > 1) and patchname:
1615 raise util.Abort(_('option "-n" not valid when importing multiple '
1615 raise util.Abort(_('option "-n" not valid when importing multiple '
1616 'patches'))
1616 'patches'))
1617 added = []
1617 added = []
1618 if rev:
1618 if rev:
1619 # If mq patches are applied, we can only import revisions
1619 # If mq patches are applied, we can only import revisions
1620 # that form a linear path to qbase.
1620 # that form a linear path to qbase.
1621 # Otherwise, they should form a linear path to a head.
1621 # Otherwise, they should form a linear path to a head.
1622 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1622 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1623 if len(heads) > 1:
1623 if len(heads) > 1:
1624 raise util.Abort(_('revision %d is the root of more than one '
1624 raise util.Abort(_('revision %d is the root of more than one '
1625 'branch') % rev[-1])
1625 'branch') % rev[-1])
1626 if self.applied:
1626 if self.applied:
1627 base = repo.changelog.node(rev[0])
1627 base = repo.changelog.node(rev[0])
1628 if base in [n.node for n in self.applied]:
1628 if base in [n.node for n in self.applied]:
1629 raise util.Abort(_('revision %d is already managed')
1629 raise util.Abort(_('revision %d is already managed')
1630 % rev[0])
1630 % rev[0])
1631 if heads != [self.applied[-1].node]:
1631 if heads != [self.applied[-1].node]:
1632 raise util.Abort(_('revision %d is not the parent of '
1632 raise util.Abort(_('revision %d is not the parent of '
1633 'the queue') % rev[0])
1633 'the queue') % rev[0])
1634 base = repo.changelog.rev(self.applied[0].node)
1634 base = repo.changelog.rev(self.applied[0].node)
1635 lastparent = repo.changelog.parentrevs(base)[0]
1635 lastparent = repo.changelog.parentrevs(base)[0]
1636 else:
1636 else:
1637 if heads != [repo.changelog.node(rev[0])]:
1637 if heads != [repo.changelog.node(rev[0])]:
1638 raise util.Abort(_('revision %d has unmanaged children')
1638 raise util.Abort(_('revision %d has unmanaged children')
1639 % rev[0])
1639 % rev[0])
1640 lastparent = None
1640 lastparent = None
1641
1641
1642 diffopts = self.diffopts({'git': git})
1642 diffopts = self.diffopts({'git': git})
1643 for r in rev:
1643 for r in rev:
1644 p1, p2 = repo.changelog.parentrevs(r)
1644 p1, p2 = repo.changelog.parentrevs(r)
1645 n = repo.changelog.node(r)
1645 n = repo.changelog.node(r)
1646 if p2 != nullrev:
1646 if p2 != nullrev:
1647 raise util.Abort(_('cannot import merge revision %d') % r)
1647 raise util.Abort(_('cannot import merge revision %d') % r)
1648 if lastparent and lastparent != r:
1648 if lastparent and lastparent != r:
1649 raise util.Abort(_('revision %d is not the parent of %d')
1649 raise util.Abort(_('revision %d is not the parent of %d')
1650 % (r, lastparent))
1650 % (r, lastparent))
1651 lastparent = p1
1651 lastparent = p1
1652
1652
1653 if not patchname:
1653 if not patchname:
1654 patchname = normname('%d.diff' % r)
1654 patchname = normname('%d.diff' % r)
1655 self.check_reserved_name(patchname)
1655 self.check_reserved_name(patchname)
1656 checkseries(patchname)
1656 checkseries(patchname)
1657 checkfile(patchname)
1657 checkfile(patchname)
1658 self.full_series.insert(0, patchname)
1658 self.full_series.insert(0, patchname)
1659
1659
1660 patchf = self.opener(patchname, "w")
1660 patchf = self.opener(patchname, "w")
1661 cmdutil.export(repo, [n], fp=patchf, opts=diffopts)
1661 cmdutil.export(repo, [n], fp=patchf, opts=diffopts)
1662 patchf.close()
1662 patchf.close()
1663
1663
1664 se = statusentry(n, patchname)
1664 se = statusentry(n, patchname)
1665 self.applied.insert(0, se)
1665 self.applied.insert(0, se)
1666
1666
1667 added.append(patchname)
1667 added.append(patchname)
1668 patchname = None
1668 patchname = None
1669 self.parse_series()
1669 self.parse_series()
1670 self.applied_dirty = 1
1670 self.applied_dirty = 1
1671
1671
1672 for i, filename in enumerate(files):
1672 for i, filename in enumerate(files):
1673 if existing:
1673 if existing:
1674 if filename == '-':
1674 if filename == '-':
1675 raise util.Abort(_('-e is incompatible with import from -'))
1675 raise util.Abort(_('-e is incompatible with import from -'))
1676 if not patchname:
1676 if not patchname:
1677 patchname = normname(filename)
1677 patchname = normname(filename)
1678 self.check_reserved_name(patchname)
1678 self.check_reserved_name(patchname)
1679 if not os.path.isfile(self.join(patchname)):
1679 if not os.path.isfile(self.join(patchname)):
1680 raise util.Abort(_("patch %s does not exist") % patchname)
1680 raise util.Abort(_("patch %s does not exist") % patchname)
1681 else:
1681 else:
1682 try:
1682 try:
1683 if filename == '-':
1683 if filename == '-':
1684 if not patchname:
1684 if not patchname:
1685 raise util.Abort(
1685 raise util.Abort(
1686 _('need --name to import a patch from -'))
1686 _('need --name to import a patch from -'))
1687 text = sys.stdin.read()
1687 text = sys.stdin.read()
1688 else:
1688 else:
1689 text = url.open(self.ui, filename).read()
1689 text = url.open(self.ui, filename).read()
1690 except (OSError, IOError):
1690 except (OSError, IOError):
1691 raise util.Abort(_("unable to read %s") % filename)
1691 raise util.Abort(_("unable to read %s") % filename)
1692 if not patchname:
1692 if not patchname:
1693 patchname = normname(os.path.basename(filename))
1693 patchname = normname(os.path.basename(filename))
1694 self.check_reserved_name(patchname)
1694 self.check_reserved_name(patchname)
1695 checkfile(patchname)
1695 checkfile(patchname)
1696 patchf = self.opener(patchname, "w")
1696 patchf = self.opener(patchname, "w")
1697 patchf.write(text)
1697 patchf.write(text)
1698 if not force:
1698 if not force:
1699 checkseries(patchname)
1699 checkseries(patchname)
1700 if patchname not in self.series:
1700 if patchname not in self.series:
1701 index = self.full_series_end() + i
1701 index = self.full_series_end() + i
1702 self.full_series[index:index] = [patchname]
1702 self.full_series[index:index] = [patchname]
1703 self.parse_series()
1703 self.parse_series()
1704 self.ui.warn(_("adding %s to series file\n") % patchname)
1704 self.ui.warn(_("adding %s to series file\n") % patchname)
1705 added.append(patchname)
1705 added.append(patchname)
1706 patchname = None
1706 patchname = None
1707 self.series_dirty = 1
1707 self.series_dirty = 1
1708 qrepo = self.qrepo()
1708 qrepo = self.qrepo()
1709 if qrepo:
1709 if qrepo:
1710 qrepo.add(added)
1710 qrepo.add(added)
1711
1711
1712 def delete(ui, repo, *patches, **opts):
1712 def delete(ui, repo, *patches, **opts):
1713 """remove patches from queue
1713 """remove patches from queue
1714
1714
1715 The patches must not be applied, and at least one patch is required. With
1715 The patches must not be applied, and at least one patch is required. With
1716 -k/--keep, the patch files are preserved in the patch directory.
1716 -k/--keep, the patch files are preserved in the patch directory.
1717
1717
1718 To stop managing a patch and move it into permanent history,
1718 To stop managing a patch and move it into permanent history,
1719 use the qfinish command."""
1719 use the qfinish command."""
1720 q = repo.mq
1720 q = repo.mq
1721 q.delete(repo, patches, opts)
1721 q.delete(repo, patches, opts)
1722 q.save_dirty()
1722 q.save_dirty()
1723 return 0
1723 return 0
1724
1724
1725 def applied(ui, repo, patch=None, **opts):
1725 def applied(ui, repo, patch=None, **opts):
1726 """print the patches already applied"""
1726 """print the patches already applied"""
1727
1727
1728 q = repo.mq
1728 q = repo.mq
1729 l = len(q.applied)
1729 l = len(q.applied)
1730
1730
1731 if patch:
1731 if patch:
1732 if patch not in q.series:
1732 if patch not in q.series:
1733 raise util.Abort(_("patch %s is not in series file") % patch)
1733 raise util.Abort(_("patch %s is not in series file") % patch)
1734 end = q.series.index(patch) + 1
1734 end = q.series.index(patch) + 1
1735 else:
1735 else:
1736 end = q.series_end(True)
1736 end = q.series_end(True)
1737
1737
1738 if opts.get('last') and not end:
1738 if opts.get('last') and not end:
1739 ui.write(_("no patches applied\n"))
1739 ui.write(_("no patches applied\n"))
1740 return 1
1740 return 1
1741 elif opts.get('last') and end == 1:
1741 elif opts.get('last') and end == 1:
1742 ui.write(_("only one patch applied\n"))
1742 ui.write(_("only one patch applied\n"))
1743 return 1
1743 return 1
1744 elif opts.get('last'):
1744 elif opts.get('last'):
1745 start = end - 2
1745 start = end - 2
1746 end = 1
1746 end = 1
1747 else:
1747 else:
1748 start = 0
1748 start = 0
1749
1749
1750 return q.qseries(repo, length=end, start=start, status='A',
1750 return q.qseries(repo, length=end, start=start, status='A',
1751 summary=opts.get('summary'))
1751 summary=opts.get('summary'))
1752
1752
1753 def unapplied(ui, repo, patch=None, **opts):
1753 def unapplied(ui, repo, patch=None, **opts):
1754 """print the patches not yet applied"""
1754 """print the patches not yet applied"""
1755
1755
1756 q = repo.mq
1756 q = repo.mq
1757 if patch:
1757 if patch:
1758 if patch not in q.series:
1758 if patch not in q.series:
1759 raise util.Abort(_("patch %s is not in series file") % patch)
1759 raise util.Abort(_("patch %s is not in series file") % patch)
1760 start = q.series.index(patch) + 1
1760 start = q.series.index(patch) + 1
1761 else:
1761 else:
1762 start = q.series_end(True)
1762 start = q.series_end(True)
1763
1763
1764 if start == len(q.series) and opts.get('first'):
1764 if start == len(q.series) and opts.get('first'):
1765 ui.write(_("all patches applied\n"))
1765 ui.write(_("all patches applied\n"))
1766 return 1
1766 return 1
1767
1767
1768 length = opts.get('first') and 1 or None
1768 length = opts.get('first') and 1 or None
1769 return q.qseries(repo, start=start, length=length, status='U',
1769 return q.qseries(repo, start=start, length=length, status='U',
1770 summary=opts.get('summary'))
1770 summary=opts.get('summary'))
1771
1771
1772 def qimport(ui, repo, *filename, **opts):
1772 def qimport(ui, repo, *filename, **opts):
1773 """import a patch
1773 """import a patch
1774
1774
1775 The patch is inserted into the series after the last applied
1775 The patch is inserted into the series after the last applied
1776 patch. If no patches have been applied, qimport prepends the patch
1776 patch. If no patches have been applied, qimport prepends the patch
1777 to the series.
1777 to the series.
1778
1778
1779 The patch will have the same name as its source file unless you
1779 The patch will have the same name as its source file unless you
1780 give it a new one with -n/--name.
1780 give it a new one with -n/--name.
1781
1781
1782 You can register an existing patch inside the patch directory with
1782 You can register an existing patch inside the patch directory with
1783 the -e/--existing flag.
1783 the -e/--existing flag.
1784
1784
1785 With -f/--force, an existing patch of the same name will be
1785 With -f/--force, an existing patch of the same name will be
1786 overwritten.
1786 overwritten.
1787
1787
1788 An existing changeset may be placed under mq control with -r/--rev
1788 An existing changeset may be placed under mq control with -r/--rev
1789 (e.g. qimport --rev tip -n patch will place tip under mq control).
1789 (e.g. qimport --rev tip -n patch will place tip under mq control).
1790 With -g/--git, patches imported with --rev will use the git diff
1790 With -g/--git, patches imported with --rev will use the git diff
1791 format. See the diffs help topic for information on why this is
1791 format. See the diffs help topic for information on why this is
1792 important for preserving rename/copy information and permission
1792 important for preserving rename/copy information and permission
1793 changes.
1793 changes.
1794
1794
1795 To import a patch from standard input, pass - as the patch file.
1795 To import a patch from standard input, pass - as the patch file.
1796 When importing from standard input, a patch name must be specified
1796 When importing from standard input, a patch name must be specified
1797 using the --name flag.
1797 using the --name flag.
1798 """
1798 """
1799 q = repo.mq
1799 q = repo.mq
1800 q.qimport(repo, filename, patchname=opts['name'],
1800 q.qimport(repo, filename, patchname=opts['name'],
1801 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1801 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1802 git=opts['git'])
1802 git=opts['git'])
1803 q.save_dirty()
1803 q.save_dirty()
1804
1804
1805 if opts.get('push') and not opts.get('rev'):
1805 if opts.get('push') and not opts.get('rev'):
1806 return q.push(repo, None)
1806 return q.push(repo, None)
1807 return 0
1807 return 0
1808
1808
1809 def qinit(ui, repo, create):
1809 def qinit(ui, repo, create):
1810 """initialize a new queue repository
1810 """initialize a new queue repository
1811
1811
1812 This command also creates a series file for ordering patches, and
1812 This command also creates a series file for ordering patches, and
1813 an mq-specific .hgignore file in the queue repository, to exclude
1813 an mq-specific .hgignore file in the queue repository, to exclude
1814 the status and guards files (these contain mostly transient state)."""
1814 the status and guards files (these contain mostly transient state)."""
1815 q = repo.mq
1815 q = repo.mq
1816 r = q.init(repo, create)
1816 r = q.init(repo, create)
1817 q.save_dirty()
1817 q.save_dirty()
1818 if r:
1818 if r:
1819 if not os.path.exists(r.wjoin('.hgignore')):
1819 if not os.path.exists(r.wjoin('.hgignore')):
1820 fp = r.wopener('.hgignore', 'w')
1820 fp = r.wopener('.hgignore', 'w')
1821 fp.write('^\\.hg\n')
1821 fp.write('^\\.hg\n')
1822 fp.write('^\\.mq\n')
1822 fp.write('^\\.mq\n')
1823 fp.write('syntax: glob\n')
1823 fp.write('syntax: glob\n')
1824 fp.write('status\n')
1824 fp.write('status\n')
1825 fp.write('guards\n')
1825 fp.write('guards\n')
1826 fp.close()
1826 fp.close()
1827 if not os.path.exists(r.wjoin('series')):
1827 if not os.path.exists(r.wjoin('series')):
1828 r.wopener('series', 'w').close()
1828 r.wopener('series', 'w').close()
1829 r.add(['.hgignore', 'series'])
1829 r.add(['.hgignore', 'series'])
1830 commands.add(ui, r)
1830 commands.add(ui, r)
1831 return 0
1831 return 0
1832
1832
1833 def init(ui, repo, **opts):
1833 def init(ui, repo, **opts):
1834 """init a new queue repository (DEPRECATED)
1834 """init a new queue repository (DEPRECATED)
1835
1835
1836 The queue repository is unversioned by default. If
1836 The queue repository is unversioned by default. If
1837 -c/--create-repo is specified, qinit will create a separate nested
1837 -c/--create-repo is specified, qinit will create a separate nested
1838 repository for patches (qinit -c may also be run later to convert
1838 repository for patches (qinit -c may also be run later to convert
1839 an unversioned patch repository into a versioned one). You can use
1839 an unversioned patch repository into a versioned one). You can use
1840 qcommit to commit changes to this queue repository.
1840 qcommit to commit changes to this queue repository.
1841
1841
1842 This command is deprecated. Without -c, it's implied by other relevant
1842 This command is deprecated. Without -c, it's implied by other relevant
1843 commands. With -c, use hg init --mq instead."""
1843 commands. With -c, use hg init --mq instead."""
1844 return qinit(ui, repo, create=opts['create_repo'])
1844 return qinit(ui, repo, create=opts['create_repo'])
1845
1845
1846 def clone(ui, source, dest=None, **opts):
1846 def clone(ui, source, dest=None, **opts):
1847 '''clone main and patch repository at same time
1847 '''clone main and patch repository at same time
1848
1848
1849 If source is local, destination will have no patches applied. If
1849 If source is local, destination will have no patches applied. If
1850 source is remote, this command can not check if patches are
1850 source is remote, this command can not check if patches are
1851 applied in source, so cannot guarantee that patches are not
1851 applied in source, so cannot guarantee that patches are not
1852 applied in destination. If you clone remote repository, be sure
1852 applied in destination. If you clone remote repository, be sure
1853 before that it has no patches applied.
1853 before that it has no patches applied.
1854
1854
1855 Source patch repository is looked for in <src>/.hg/patches by
1855 Source patch repository is looked for in <src>/.hg/patches by
1856 default. Use -p <url> to change.
1856 default. Use -p <url> to change.
1857
1857
1858 The patch directory must be a nested Mercurial repository, as
1858 The patch directory must be a nested Mercurial repository, as
1859 would be created by init --mq.
1859 would be created by init --mq.
1860 '''
1860 '''
1861 def patchdir(repo):
1861 def patchdir(repo):
1862 url = repo.url()
1862 url = repo.url()
1863 if url.endswith('/'):
1863 if url.endswith('/'):
1864 url = url[:-1]
1864 url = url[:-1]
1865 return url + '/.hg/patches'
1865 return url + '/.hg/patches'
1866 if dest is None:
1866 if dest is None:
1867 dest = hg.defaultdest(source)
1867 dest = hg.defaultdest(source)
1868 sr = hg.repository(cmdutil.remoteui(ui, opts), ui.expandpath(source))
1868 sr = hg.repository(cmdutil.remoteui(ui, opts), ui.expandpath(source))
1869 if opts['patches']:
1869 if opts['patches']:
1870 patchespath = ui.expandpath(opts['patches'])
1870 patchespath = ui.expandpath(opts['patches'])
1871 else:
1871 else:
1872 patchespath = patchdir(sr)
1872 patchespath = patchdir(sr)
1873 try:
1873 try:
1874 hg.repository(ui, patchespath)
1874 hg.repository(ui, patchespath)
1875 except error.RepoError:
1875 except error.RepoError:
1876 raise util.Abort(_('versioned patch repository not found'
1876 raise util.Abort(_('versioned patch repository not found'
1877 ' (see init --mq)'))
1877 ' (see init --mq)'))
1878 qbase, destrev = None, None
1878 qbase, destrev = None, None
1879 if sr.local():
1879 if sr.local():
1880 if sr.mq.applied:
1880 if sr.mq.applied:
1881 qbase = sr.mq.applied[0].node
1881 qbase = sr.mq.applied[0].node
1882 if not hg.islocal(dest):
1882 if not hg.islocal(dest):
1883 heads = set(sr.heads())
1883 heads = set(sr.heads())
1884 destrev = list(heads.difference(sr.heads(qbase)))
1884 destrev = list(heads.difference(sr.heads(qbase)))
1885 destrev.append(sr.changelog.parents(qbase)[0])
1885 destrev.append(sr.changelog.parents(qbase)[0])
1886 elif sr.capable('lookup'):
1886 elif sr.capable('lookup'):
1887 try:
1887 try:
1888 qbase = sr.lookup('qbase')
1888 qbase = sr.lookup('qbase')
1889 except error.RepoError:
1889 except error.RepoError:
1890 pass
1890 pass
1891 ui.note(_('cloning main repository\n'))
1891 ui.note(_('cloning main repository\n'))
1892 sr, dr = hg.clone(ui, sr.url(), dest,
1892 sr, dr = hg.clone(ui, sr.url(), dest,
1893 pull=opts['pull'],
1893 pull=opts['pull'],
1894 rev=destrev,
1894 rev=destrev,
1895 update=False,
1895 update=False,
1896 stream=opts['uncompressed'])
1896 stream=opts['uncompressed'])
1897 ui.note(_('cloning patch repository\n'))
1897 ui.note(_('cloning patch repository\n'))
1898 hg.clone(ui, opts['patches'] or patchdir(sr), patchdir(dr),
1898 hg.clone(ui, opts['patches'] or patchdir(sr), patchdir(dr),
1899 pull=opts['pull'], update=not opts['noupdate'],
1899 pull=opts['pull'], update=not opts['noupdate'],
1900 stream=opts['uncompressed'])
1900 stream=opts['uncompressed'])
1901 if dr.local():
1901 if dr.local():
1902 if qbase:
1902 if qbase:
1903 ui.note(_('stripping applied patches from destination '
1903 ui.note(_('stripping applied patches from destination '
1904 'repository\n'))
1904 'repository\n'))
1905 dr.mq.strip(dr, qbase, update=False, backup=None)
1905 dr.mq.strip(dr, qbase, update=False, backup=None)
1906 if not opts['noupdate']:
1906 if not opts['noupdate']:
1907 ui.note(_('updating destination repository\n'))
1907 ui.note(_('updating destination repository\n'))
1908 hg.update(dr, dr.changelog.tip())
1908 hg.update(dr, dr.changelog.tip())
1909
1909
1910 def commit(ui, repo, *pats, **opts):
1910 def commit(ui, repo, *pats, **opts):
1911 """commit changes in the queue repository (DEPRECATED)
1911 """commit changes in the queue repository (DEPRECATED)
1912
1912
1913 This command is deprecated; use hg commit --mq instead."""
1913 This command is deprecated; use hg commit --mq instead."""
1914 q = repo.mq
1914 q = repo.mq
1915 r = q.qrepo()
1915 r = q.qrepo()
1916 if not r:
1916 if not r:
1917 raise util.Abort('no queue repository')
1917 raise util.Abort('no queue repository')
1918 commands.commit(r.ui, r, *pats, **opts)
1918 commands.commit(r.ui, r, *pats, **opts)
1919
1919
1920 def series(ui, repo, **opts):
1920 def series(ui, repo, **opts):
1921 """print the entire series file"""
1921 """print the entire series file"""
1922 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1922 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1923 return 0
1923 return 0
1924
1924
1925 def top(ui, repo, **opts):
1925 def top(ui, repo, **opts):
1926 """print the name of the current patch"""
1926 """print the name of the current patch"""
1927 q = repo.mq
1927 q = repo.mq
1928 t = q.applied and q.series_end(True) or 0
1928 t = q.applied and q.series_end(True) or 0
1929 if t:
1929 if t:
1930 return q.qseries(repo, start=t - 1, length=1, status='A',
1930 return q.qseries(repo, start=t - 1, length=1, status='A',
1931 summary=opts.get('summary'))
1931 summary=opts.get('summary'))
1932 else:
1932 else:
1933 ui.write(_("no patches applied\n"))
1933 ui.write(_("no patches applied\n"))
1934 return 1
1934 return 1
1935
1935
1936 def next(ui, repo, **opts):
1936 def next(ui, repo, **opts):
1937 """print the name of the next patch"""
1937 """print the name of the next patch"""
1938 q = repo.mq
1938 q = repo.mq
1939 end = q.series_end()
1939 end = q.series_end()
1940 if end == len(q.series):
1940 if end == len(q.series):
1941 ui.write(_("all patches applied\n"))
1941 ui.write(_("all patches applied\n"))
1942 return 1
1942 return 1
1943 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1943 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1944
1944
1945 def prev(ui, repo, **opts):
1945 def prev(ui, repo, **opts):
1946 """print the name of the previous patch"""
1946 """print the name of the previous patch"""
1947 q = repo.mq
1947 q = repo.mq
1948 l = len(q.applied)
1948 l = len(q.applied)
1949 if l == 1:
1949 if l == 1:
1950 ui.write(_("only one patch applied\n"))
1950 ui.write(_("only one patch applied\n"))
1951 return 1
1951 return 1
1952 if not l:
1952 if not l:
1953 ui.write(_("no patches applied\n"))
1953 ui.write(_("no patches applied\n"))
1954 return 1
1954 return 1
1955 return q.qseries(repo, start=l - 2, length=1, status='A',
1955 return q.qseries(repo, start=l - 2, length=1, status='A',
1956 summary=opts.get('summary'))
1956 summary=opts.get('summary'))
1957
1957
1958 def setupheaderopts(ui, opts):
1958 def setupheaderopts(ui, opts):
1959 if not opts.get('user') and opts.get('currentuser'):
1959 if not opts.get('user') and opts.get('currentuser'):
1960 opts['user'] = ui.username()
1960 opts['user'] = ui.username()
1961 if not opts.get('date') and opts.get('currentdate'):
1961 if not opts.get('date') and opts.get('currentdate'):
1962 opts['date'] = "%d %d" % util.makedate()
1962 opts['date'] = "%d %d" % util.makedate()
1963
1963
1964 def new(ui, repo, patch, *args, **opts):
1964 def new(ui, repo, patch, *args, **opts):
1965 """create a new patch
1965 """create a new patch
1966
1966
1967 qnew creates a new patch on top of the currently-applied patch (if
1967 qnew creates a new patch on top of the currently-applied patch (if
1968 any). The patch will be initialized with any outstanding changes
1968 any). The patch will be initialized with any outstanding changes
1969 in the working directory. You may also use -I/--include,
1969 in the working directory. You may also use -I/--include,
1970 -X/--exclude, and/or a list of files after the patch name to add
1970 -X/--exclude, and/or a list of files after the patch name to add
1971 only changes to matching files to the new patch, leaving the rest
1971 only changes to matching files to the new patch, leaving the rest
1972 as uncommitted modifications.
1972 as uncommitted modifications.
1973
1973
1974 -u/--user and -d/--date can be used to set the (given) user and
1974 -u/--user and -d/--date can be used to set the (given) user and
1975 date, respectively. -U/--currentuser and -D/--currentdate set user
1975 date, respectively. -U/--currentuser and -D/--currentdate set user
1976 to current user and date to current date.
1976 to current user and date to current date.
1977
1977
1978 -e/--edit, -m/--message or -l/--logfile set the patch header as
1978 -e/--edit, -m/--message or -l/--logfile set the patch header as
1979 well as the commit message. If none is specified, the header is
1979 well as the commit message. If none is specified, the header is
1980 empty and the commit message is '[mq]: PATCH'.
1980 empty and the commit message is '[mq]: PATCH'.
1981
1981
1982 Use the -g/--git option to keep the patch in the git extended diff
1982 Use the -g/--git option to keep the patch in the git extended diff
1983 format. Read the diffs help topic for more information on why this
1983 format. Read the diffs help topic for more information on why this
1984 is important for preserving permission changes and copy/rename
1984 is important for preserving permission changes and copy/rename
1985 information.
1985 information.
1986 """
1986 """
1987 msg = cmdutil.logmessage(opts)
1987 msg = cmdutil.logmessage(opts)
1988 def getmsg():
1988 def getmsg():
1989 return ui.edit(msg, ui.username())
1989 return ui.edit(msg, ui.username())
1990 q = repo.mq
1990 q = repo.mq
1991 opts['msg'] = msg
1991 opts['msg'] = msg
1992 if opts.get('edit'):
1992 if opts.get('edit'):
1993 opts['msg'] = getmsg
1993 opts['msg'] = getmsg
1994 else:
1994 else:
1995 opts['msg'] = msg
1995 opts['msg'] = msg
1996 setupheaderopts(ui, opts)
1996 setupheaderopts(ui, opts)
1997 q.new(repo, patch, *args, **opts)
1997 q.new(repo, patch, *args, **opts)
1998 q.save_dirty()
1998 q.save_dirty()
1999 return 0
1999 return 0
2000
2000
2001 def refresh(ui, repo, *pats, **opts):
2001 def refresh(ui, repo, *pats, **opts):
2002 """update the current patch
2002 """update the current patch
2003
2003
2004 If any file patterns are provided, the refreshed patch will
2004 If any file patterns are provided, the refreshed patch will
2005 contain only the modifications that match those patterns; the
2005 contain only the modifications that match those patterns; the
2006 remaining modifications will remain in the working directory.
2006 remaining modifications will remain in the working directory.
2007
2007
2008 If -s/--short is specified, files currently included in the patch
2008 If -s/--short is specified, files currently included in the patch
2009 will be refreshed just like matched files and remain in the patch.
2009 will be refreshed just like matched files and remain in the patch.
2010
2010
2011 hg add/remove/copy/rename work as usual, though you might want to
2011 hg add/remove/copy/rename work as usual, though you might want to
2012 use git-style patches (-g/--git or [diff] git=1) to track copies
2012 use git-style patches (-g/--git or [diff] git=1) to track copies
2013 and renames. See the diffs help topic for more information on the
2013 and renames. See the diffs help topic for more information on the
2014 git diff format.
2014 git diff format.
2015 """
2015 """
2016 q = repo.mq
2016 q = repo.mq
2017 message = cmdutil.logmessage(opts)
2017 message = cmdutil.logmessage(opts)
2018 if opts['edit']:
2018 if opts['edit']:
2019 if not q.applied:
2019 if not q.applied:
2020 ui.write(_("no patches applied\n"))
2020 ui.write(_("no patches applied\n"))
2021 return 1
2021 return 1
2022 if message:
2022 if message:
2023 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2023 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2024 patch = q.applied[-1].name
2024 patch = q.applied[-1].name
2025 ph = patchheader(q.join(patch), q.plainmode)
2025 ph = patchheader(q.join(patch), q.plainmode)
2026 message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
2026 message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
2027 setupheaderopts(ui, opts)
2027 setupheaderopts(ui, opts)
2028 ret = q.refresh(repo, pats, msg=message, **opts)
2028 ret = q.refresh(repo, pats, msg=message, **opts)
2029 q.save_dirty()
2029 q.save_dirty()
2030 return ret
2030 return ret
2031
2031
2032 def diff(ui, repo, *pats, **opts):
2032 def diff(ui, repo, *pats, **opts):
2033 """diff of the current patch and subsequent modifications
2033 """diff of the current patch and subsequent modifications
2034
2034
2035 Shows a diff which includes the current patch as well as any
2035 Shows a diff which includes the current patch as well as any
2036 changes which have been made in the working directory since the
2036 changes which have been made in the working directory since the
2037 last refresh (thus showing what the current patch would become
2037 last refresh (thus showing what the current patch would become
2038 after a qrefresh).
2038 after a qrefresh).
2039
2039
2040 Use 'hg diff' if you only want to see the changes made since the
2040 Use 'hg diff' if you only want to see the changes made since the
2041 last qrefresh, or 'hg export qtip' if you want to see changes made
2041 last qrefresh, or 'hg export qtip' if you want to see changes made
2042 by the current patch without including changes made since the
2042 by the current patch without including changes made since the
2043 qrefresh.
2043 qrefresh.
2044 """
2044 """
2045 repo.mq.diff(repo, pats, opts)
2045 repo.mq.diff(repo, pats, opts)
2046 return 0
2046 return 0
2047
2047
2048 def fold(ui, repo, *files, **opts):
2048 def fold(ui, repo, *files, **opts):
2049 """fold the named patches into the current patch
2049 """fold the named patches into the current patch
2050
2050
2051 Patches must not yet be applied. Each patch will be successively
2051 Patches must not yet be applied. Each patch will be successively
2052 applied to the current patch in the order given. If all the
2052 applied to the current patch in the order given. If all the
2053 patches apply successfully, the current patch will be refreshed
2053 patches apply successfully, the current patch will be refreshed
2054 with the new cumulative patch, and the folded patches will be
2054 with the new cumulative patch, and the folded patches will be
2055 deleted. With -k/--keep, the folded patch files will not be
2055 deleted. With -k/--keep, the folded patch files will not be
2056 removed afterwards.
2056 removed afterwards.
2057
2057
2058 The header for each folded patch will be concatenated with the
2058 The header for each folded patch will be concatenated with the
2059 current patch header, separated by a line of '* * *'."""
2059 current patch header, separated by a line of '* * *'."""
2060
2060
2061 q = repo.mq
2061 q = repo.mq
2062
2062
2063 if not files:
2063 if not files:
2064 raise util.Abort(_('qfold requires at least one patch name'))
2064 raise util.Abort(_('qfold requires at least one patch name'))
2065 if not q.check_toppatch(repo)[0]:
2065 if not q.check_toppatch(repo)[0]:
2066 raise util.Abort(_('No patches applied'))
2066 raise util.Abort(_('No patches applied'))
2067 q.check_localchanges(repo)
2067 q.check_localchanges(repo)
2068
2068
2069 message = cmdutil.logmessage(opts)
2069 message = cmdutil.logmessage(opts)
2070 if opts['edit']:
2070 if opts['edit']:
2071 if message:
2071 if message:
2072 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2072 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2073
2073
2074 parent = q.lookup('qtip')
2074 parent = q.lookup('qtip')
2075 patches = []
2075 patches = []
2076 messages = []
2076 messages = []
2077 for f in files:
2077 for f in files:
2078 p = q.lookup(f)
2078 p = q.lookup(f)
2079 if p in patches or p == parent:
2079 if p in patches or p == parent:
2080 ui.warn(_('Skipping already folded patch %s') % p)
2080 ui.warn(_('Skipping already folded patch %s') % p)
2081 if q.isapplied(p):
2081 if q.isapplied(p):
2082 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
2082 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
2083 patches.append(p)
2083 patches.append(p)
2084
2084
2085 for p in patches:
2085 for p in patches:
2086 if not message:
2086 if not message:
2087 ph = patchheader(q.join(p), q.plainmode)
2087 ph = patchheader(q.join(p), q.plainmode)
2088 if ph.message:
2088 if ph.message:
2089 messages.append(ph.message)
2089 messages.append(ph.message)
2090 pf = q.join(p)
2090 pf = q.join(p)
2091 (patchsuccess, files, fuzz) = q.patch(repo, pf)
2091 (patchsuccess, files, fuzz) = q.patch(repo, pf)
2092 if not patchsuccess:
2092 if not patchsuccess:
2093 raise util.Abort(_('Error folding patch %s') % p)
2093 raise util.Abort(_('Error folding patch %s') % p)
2094 patch.updatedir(ui, repo, files)
2094 patch.updatedir(ui, repo, files)
2095
2095
2096 if not message:
2096 if not message:
2097 ph = patchheader(q.join(parent), q.plainmode)
2097 ph = patchheader(q.join(parent), q.plainmode)
2098 message, user = ph.message, ph.user
2098 message, user = ph.message, ph.user
2099 for msg in messages:
2099 for msg in messages:
2100 message.append('* * *')
2100 message.append('* * *')
2101 message.extend(msg)
2101 message.extend(msg)
2102 message = '\n'.join(message)
2102 message = '\n'.join(message)
2103
2103
2104 if opts['edit']:
2104 if opts['edit']:
2105 message = ui.edit(message, user or ui.username())
2105 message = ui.edit(message, user or ui.username())
2106
2106
2107 diffopts = q.patchopts(q.diffopts(), *patches)
2107 diffopts = q.patchopts(q.diffopts(), *patches)
2108 q.refresh(repo, msg=message, git=diffopts.git)
2108 q.refresh(repo, msg=message, git=diffopts.git)
2109 q.delete(repo, patches, opts)
2109 q.delete(repo, patches, opts)
2110 q.save_dirty()
2110 q.save_dirty()
2111
2111
2112 def goto(ui, repo, patch, **opts):
2112 def goto(ui, repo, patch, **opts):
2113 '''push or pop patches until named patch is at top of stack'''
2113 '''push or pop patches until named patch is at top of stack'''
2114 q = repo.mq
2114 q = repo.mq
2115 patch = q.lookup(patch)
2115 patch = q.lookup(patch)
2116 if q.isapplied(patch):
2116 if q.isapplied(patch):
2117 ret = q.pop(repo, patch, force=opts['force'])
2117 ret = q.pop(repo, patch, force=opts['force'])
2118 else:
2118 else:
2119 ret = q.push(repo, patch, force=opts['force'])
2119 ret = q.push(repo, patch, force=opts['force'])
2120 q.save_dirty()
2120 q.save_dirty()
2121 return ret
2121 return ret
2122
2122
2123 def guard(ui, repo, *args, **opts):
2123 def guard(ui, repo, *args, **opts):
2124 '''set or print guards for a patch
2124 '''set or print guards for a patch
2125
2125
2126 Guards control whether a patch can be pushed. A patch with no
2126 Guards control whether a patch can be pushed. A patch with no
2127 guards is always pushed. A patch with a positive guard ("+foo") is
2127 guards is always pushed. A patch with a positive guard ("+foo") is
2128 pushed only if the qselect command has activated it. A patch with
2128 pushed only if the qselect command has activated it. A patch with
2129 a negative guard ("-foo") is never pushed if the qselect command
2129 a negative guard ("-foo") is never pushed if the qselect command
2130 has activated it.
2130 has activated it.
2131
2131
2132 With no arguments, print the currently active guards.
2132 With no arguments, print the currently active guards.
2133 With arguments, set guards for the named patch.
2133 With arguments, set guards for the named patch.
2134 NOTE: Specifying negative guards now requires '--'.
2134 NOTE: Specifying negative guards now requires '--'.
2135
2135
2136 To set guards on another patch::
2136 To set guards on another patch::
2137
2137
2138 hg qguard other.patch -- +2.6.17 -stable
2138 hg qguard other.patch -- +2.6.17 -stable
2139 '''
2139 '''
2140 def status(idx):
2140 def status(idx):
2141 guards = q.series_guards[idx] or ['unguarded']
2141 guards = q.series_guards[idx] or ['unguarded']
2142 ui.write('%s: ' % ui.label(q.series[idx], 'qguard.patch'))
2142 ui.write('%s: ' % ui.label(q.series[idx], 'qguard.patch'))
2143 for i, guard in enumerate(guards):
2143 for i, guard in enumerate(guards):
2144 if guard.startswith('+'):
2144 if guard.startswith('+'):
2145 ui.write(guard, label='qguard.positive')
2145 ui.write(guard, label='qguard.positive')
2146 elif guard.startswith('-'):
2146 elif guard.startswith('-'):
2147 ui.write(guard, label='qguard.negative')
2147 ui.write(guard, label='qguard.negative')
2148 else:
2148 else:
2149 ui.write(guard, label='qguard.unguarded')
2149 ui.write(guard, label='qguard.unguarded')
2150 if i != len(guards) - 1:
2150 if i != len(guards) - 1:
2151 ui.write(' ')
2151 ui.write(' ')
2152 ui.write('\n')
2152 ui.write('\n')
2153 q = repo.mq
2153 q = repo.mq
2154 patch = None
2154 patch = None
2155 args = list(args)
2155 args = list(args)
2156 if opts['list']:
2156 if opts['list']:
2157 if args or opts['none']:
2157 if args or opts['none']:
2158 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
2158 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
2159 for i in xrange(len(q.series)):
2159 for i in xrange(len(q.series)):
2160 status(i)
2160 status(i)
2161 return
2161 return
2162 if not args or args[0][0:1] in '-+':
2162 if not args or args[0][0:1] in '-+':
2163 if not q.applied:
2163 if not q.applied:
2164 raise util.Abort(_('no patches applied'))
2164 raise util.Abort(_('no patches applied'))
2165 patch = q.applied[-1].name
2165 patch = q.applied[-1].name
2166 if patch is None and args[0][0:1] not in '-+':
2166 if patch is None and args[0][0:1] not in '-+':
2167 patch = args.pop(0)
2167 patch = args.pop(0)
2168 if patch is None:
2168 if patch is None:
2169 raise util.Abort(_('no patch to work with'))
2169 raise util.Abort(_('no patch to work with'))
2170 if args or opts['none']:
2170 if args or opts['none']:
2171 idx = q.find_series(patch)
2171 idx = q.find_series(patch)
2172 if idx is None:
2172 if idx is None:
2173 raise util.Abort(_('no patch named %s') % patch)
2173 raise util.Abort(_('no patch named %s') % patch)
2174 q.set_guards(idx, args)
2174 q.set_guards(idx, args)
2175 q.save_dirty()
2175 q.save_dirty()
2176 else:
2176 else:
2177 status(q.series.index(q.lookup(patch)))
2177 status(q.series.index(q.lookup(patch)))
2178
2178
2179 def header(ui, repo, patch=None):
2179 def header(ui, repo, patch=None):
2180 """print the header of the topmost or specified patch"""
2180 """print the header of the topmost or specified patch"""
2181 q = repo.mq
2181 q = repo.mq
2182
2182
2183 if patch:
2183 if patch:
2184 patch = q.lookup(patch)
2184 patch = q.lookup(patch)
2185 else:
2185 else:
2186 if not q.applied:
2186 if not q.applied:
2187 ui.write(_('no patches applied\n'))
2187 ui.write(_('no patches applied\n'))
2188 return 1
2188 return 1
2189 patch = q.lookup('qtip')
2189 patch = q.lookup('qtip')
2190 ph = patchheader(q.join(patch), q.plainmode)
2190 ph = patchheader(q.join(patch), q.plainmode)
2191
2191
2192 ui.write('\n'.join(ph.message) + '\n')
2192 ui.write('\n'.join(ph.message) + '\n')
2193
2193
2194 def lastsavename(path):
2194 def lastsavename(path):
2195 (directory, base) = os.path.split(path)
2195 (directory, base) = os.path.split(path)
2196 names = os.listdir(directory)
2196 names = os.listdir(directory)
2197 namere = re.compile("%s.([0-9]+)" % base)
2197 namere = re.compile("%s.([0-9]+)" % base)
2198 maxindex = None
2198 maxindex = None
2199 maxname = None
2199 maxname = None
2200 for f in names:
2200 for f in names:
2201 m = namere.match(f)
2201 m = namere.match(f)
2202 if m:
2202 if m:
2203 index = int(m.group(1))
2203 index = int(m.group(1))
2204 if maxindex is None or index > maxindex:
2204 if maxindex is None or index > maxindex:
2205 maxindex = index
2205 maxindex = index
2206 maxname = f
2206 maxname = f
2207 if maxname:
2207 if maxname:
2208 return (os.path.join(directory, maxname), maxindex)
2208 return (os.path.join(directory, maxname), maxindex)
2209 return (None, None)
2209 return (None, None)
2210
2210
2211 def savename(path):
2211 def savename(path):
2212 (last, index) = lastsavename(path)
2212 (last, index) = lastsavename(path)
2213 if last is None:
2213 if last is None:
2214 index = 0
2214 index = 0
2215 newpath = path + ".%d" % (index + 1)
2215 newpath = path + ".%d" % (index + 1)
2216 return newpath
2216 return newpath
2217
2217
2218 def push(ui, repo, patch=None, **opts):
2218 def push(ui, repo, patch=None, **opts):
2219 """push the next patch onto the stack
2219 """push the next patch onto the stack
2220
2220
2221 When -f/--force is applied, all local changes in patched files
2221 When -f/--force is applied, all local changes in patched files
2222 will be lost.
2222 will be lost.
2223 """
2223 """
2224 q = repo.mq
2224 q = repo.mq
2225 mergeq = None
2225 mergeq = None
2226
2226
2227 if opts['merge']:
2227 if opts['merge']:
2228 if opts['name']:
2228 if opts['name']:
2229 newpath = repo.join(opts['name'])
2229 newpath = repo.join(opts['name'])
2230 else:
2230 else:
2231 newpath, i = lastsavename(q.path)
2231 newpath, i = lastsavename(q.path)
2232 if not newpath:
2232 if not newpath:
2233 ui.warn(_("no saved queues found, please use -n\n"))
2233 ui.warn(_("no saved queues found, please use -n\n"))
2234 return 1
2234 return 1
2235 mergeq = queue(ui, repo.join(""), newpath)
2235 mergeq = queue(ui, repo.join(""), newpath)
2236 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2236 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2237 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
2237 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
2238 mergeq=mergeq, all=opts.get('all'))
2238 mergeq=mergeq, all=opts.get('all'))
2239 return ret
2239 return ret
2240
2240
2241 def pop(ui, repo, patch=None, **opts):
2241 def pop(ui, repo, patch=None, **opts):
2242 """pop the current patch off the stack
2242 """pop the current patch off the stack
2243
2243
2244 By default, pops off the top of the patch stack. If given a patch
2244 By default, pops off the top of the patch stack. If given a patch
2245 name, keeps popping off patches until the named patch is at the
2245 name, keeps popping off patches until the named patch is at the
2246 top of the stack.
2246 top of the stack.
2247 """
2247 """
2248 localupdate = True
2248 localupdate = True
2249 if opts['name']:
2249 if opts['name']:
2250 q = queue(ui, repo.join(""), repo.join(opts['name']))
2250 q = queue(ui, repo.join(""), repo.join(opts['name']))
2251 ui.warn(_('using patch queue: %s\n') % q.path)
2251 ui.warn(_('using patch queue: %s\n') % q.path)
2252 localupdate = False
2252 localupdate = False
2253 else:
2253 else:
2254 q = repo.mq
2254 q = repo.mq
2255 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
2255 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
2256 all=opts['all'])
2256 all=opts['all'])
2257 q.save_dirty()
2257 q.save_dirty()
2258 return ret
2258 return ret
2259
2259
2260 def rename(ui, repo, patch, name=None, **opts):
2260 def rename(ui, repo, patch, name=None, **opts):
2261 """rename a patch
2261 """rename a patch
2262
2262
2263 With one argument, renames the current patch to PATCH1.
2263 With one argument, renames the current patch to PATCH1.
2264 With two arguments, renames PATCH1 to PATCH2."""
2264 With two arguments, renames PATCH1 to PATCH2."""
2265
2265
2266 q = repo.mq
2266 q = repo.mq
2267
2267
2268 if not name:
2268 if not name:
2269 name = patch
2269 name = patch
2270 patch = None
2270 patch = None
2271
2271
2272 if patch:
2272 if patch:
2273 patch = q.lookup(patch)
2273 patch = q.lookup(patch)
2274 else:
2274 else:
2275 if not q.applied:
2275 if not q.applied:
2276 ui.write(_('no patches applied\n'))
2276 ui.write(_('no patches applied\n'))
2277 return
2277 return
2278 patch = q.lookup('qtip')
2278 patch = q.lookup('qtip')
2279 absdest = q.join(name)
2279 absdest = q.join(name)
2280 if os.path.isdir(absdest):
2280 if os.path.isdir(absdest):
2281 name = normname(os.path.join(name, os.path.basename(patch)))
2281 name = normname(os.path.join(name, os.path.basename(patch)))
2282 absdest = q.join(name)
2282 absdest = q.join(name)
2283 if os.path.exists(absdest):
2283 if os.path.exists(absdest):
2284 raise util.Abort(_('%s already exists') % absdest)
2284 raise util.Abort(_('%s already exists') % absdest)
2285
2285
2286 if name in q.series:
2286 if name in q.series:
2287 raise util.Abort(
2287 raise util.Abort(
2288 _('A patch named %s already exists in the series file') % name)
2288 _('A patch named %s already exists in the series file') % name)
2289
2289
2290 ui.note(_('renaming %s to %s\n') % (patch, name))
2290 ui.note(_('renaming %s to %s\n') % (patch, name))
2291 i = q.find_series(patch)
2291 i = q.find_series(patch)
2292 guards = q.guard_re.findall(q.full_series[i])
2292 guards = q.guard_re.findall(q.full_series[i])
2293 q.full_series[i] = name + ''.join([' #' + g for g in guards])
2293 q.full_series[i] = name + ''.join([' #' + g for g in guards])
2294 q.parse_series()
2294 q.parse_series()
2295 q.series_dirty = 1
2295 q.series_dirty = 1
2296
2296
2297 info = q.isapplied(patch)
2297 info = q.isapplied(patch)
2298 if info:
2298 if info:
2299 q.applied[info[0]] = statusentry(info[1], name)
2299 q.applied[info[0]] = statusentry(info[1], name)
2300 q.applied_dirty = 1
2300 q.applied_dirty = 1
2301
2301
2302 util.rename(q.join(patch), absdest)
2302 util.rename(q.join(patch), absdest)
2303 r = q.qrepo()
2303 r = q.qrepo()
2304 if r:
2304 if r:
2305 wlock = r.wlock()
2305 wlock = r.wlock()
2306 try:
2306 try:
2307 if r.dirstate[patch] == 'a':
2307 if r.dirstate[patch] == 'a':
2308 r.dirstate.forget(patch)
2308 r.dirstate.forget(patch)
2309 r.dirstate.add(name)
2309 r.dirstate.add(name)
2310 else:
2310 else:
2311 if r.dirstate[name] == 'r':
2311 if r.dirstate[name] == 'r':
2312 r.undelete([name])
2312 r.undelete([name])
2313 r.copy(patch, name)
2313 r.copy(patch, name)
2314 r.remove([patch], False)
2314 r.remove([patch], False)
2315 finally:
2315 finally:
2316 wlock.release()
2316 wlock.release()
2317
2317
2318 q.save_dirty()
2318 q.save_dirty()
2319
2319
2320 def restore(ui, repo, rev, **opts):
2320 def restore(ui, repo, rev, **opts):
2321 """restore the queue state saved by a revision (DEPRECATED)
2321 """restore the queue state saved by a revision (DEPRECATED)
2322
2322
2323 This command is deprecated, use rebase --mq instead."""
2323 This command is deprecated, use rebase --mq instead."""
2324 rev = repo.lookup(rev)
2324 rev = repo.lookup(rev)
2325 q = repo.mq
2325 q = repo.mq
2326 q.restore(repo, rev, delete=opts['delete'],
2326 q.restore(repo, rev, delete=opts['delete'],
2327 qupdate=opts['update'])
2327 qupdate=opts['update'])
2328 q.save_dirty()
2328 q.save_dirty()
2329 return 0
2329 return 0
2330
2330
2331 def save(ui, repo, **opts):
2331 def save(ui, repo, **opts):
2332 """save current queue state (DEPRECATED)
2332 """save current queue state (DEPRECATED)
2333
2333
2334 This command is deprecated, use rebase --mq instead."""
2334 This command is deprecated, use rebase --mq instead."""
2335 q = repo.mq
2335 q = repo.mq
2336 message = cmdutil.logmessage(opts)
2336 message = cmdutil.logmessage(opts)
2337 ret = q.save(repo, msg=message)
2337 ret = q.save(repo, msg=message)
2338 if ret:
2338 if ret:
2339 return ret
2339 return ret
2340 q.save_dirty()
2340 q.save_dirty()
2341 if opts['copy']:
2341 if opts['copy']:
2342 path = q.path
2342 path = q.path
2343 if opts['name']:
2343 if opts['name']:
2344 newpath = os.path.join(q.basepath, opts['name'])
2344 newpath = os.path.join(q.basepath, opts['name'])
2345 if os.path.exists(newpath):
2345 if os.path.exists(newpath):
2346 if not os.path.isdir(newpath):
2346 if not os.path.isdir(newpath):
2347 raise util.Abort(_('destination %s exists and is not '
2347 raise util.Abort(_('destination %s exists and is not '
2348 'a directory') % newpath)
2348 'a directory') % newpath)
2349 if not opts['force']:
2349 if not opts['force']:
2350 raise util.Abort(_('destination %s exists, '
2350 raise util.Abort(_('destination %s exists, '
2351 'use -f to force') % newpath)
2351 'use -f to force') % newpath)
2352 else:
2352 else:
2353 newpath = savename(path)
2353 newpath = savename(path)
2354 ui.warn(_("copy %s to %s\n") % (path, newpath))
2354 ui.warn(_("copy %s to %s\n") % (path, newpath))
2355 util.copyfiles(path, newpath)
2355 util.copyfiles(path, newpath)
2356 if opts['empty']:
2356 if opts['empty']:
2357 try:
2357 try:
2358 os.unlink(q.join(q.status_path))
2358 os.unlink(q.join(q.status_path))
2359 except:
2359 except:
2360 pass
2360 pass
2361 return 0
2361 return 0
2362
2362
2363 def strip(ui, repo, rev, **opts):
2363 def strip(ui, repo, rev, **opts):
2364 """strip a revision and all its descendants from the repository
2364 """strip a revision and all its descendants from the repository
2365
2365
2366 If one of the working directory's parent revisions is stripped, the
2366 If one of the working directory's parent revisions is stripped, the
2367 working directory will be updated to the parent of the stripped
2367 working directory will be updated to the parent of the stripped
2368 revision.
2368 revision.
2369 """
2369 """
2370 backup = 'all'
2370 backup = 'all'
2371 if opts['backup']:
2371 if opts['backup']:
2372 backup = 'strip'
2372 backup = 'strip'
2373 elif opts['nobackup']:
2373 elif opts['nobackup']:
2374 backup = 'none'
2374 backup = 'none'
2375
2375
2376 rev = repo.lookup(rev)
2376 rev = repo.lookup(rev)
2377 p = repo.dirstate.parents()
2377 p = repo.dirstate.parents()
2378 cl = repo.changelog
2378 cl = repo.changelog
2379 update = True
2379 update = True
2380 if p[0] == nullid:
2380 if p[0] == nullid:
2381 update = False
2381 update = False
2382 elif p[1] == nullid and rev != cl.ancestor(p[0], rev):
2382 elif p[1] == nullid and rev != cl.ancestor(p[0], rev):
2383 update = False
2383 update = False
2384 elif rev not in (cl.ancestor(p[0], rev), cl.ancestor(p[1], rev)):
2384 elif rev not in (cl.ancestor(p[0], rev), cl.ancestor(p[1], rev)):
2385 update = False
2385 update = False
2386
2386
2387 repo.mq.strip(repo, rev, backup=backup, update=update, force=opts['force'])
2387 repo.mq.strip(repo, rev, backup=backup, update=update, force=opts['force'])
2388 return 0
2388 return 0
2389
2389
2390 def select(ui, repo, *args, **opts):
2390 def select(ui, repo, *args, **opts):
2391 '''set or print guarded patches to push
2391 '''set or print guarded patches to push
2392
2392
2393 Use the qguard command to set or print guards on patch, then use
2393 Use the qguard command to set or print guards on patch, then use
2394 qselect to tell mq which guards to use. A patch will be pushed if
2394 qselect to tell mq which guards to use. A patch will be pushed if
2395 it has no guards or any positive guards match the currently
2395 it has no guards or any positive guards match the currently
2396 selected guard, but will not be pushed if any negative guards
2396 selected guard, but will not be pushed if any negative guards
2397 match the current guard. For example::
2397 match the current guard. For example::
2398
2398
2399 qguard foo.patch -stable (negative guard)
2399 qguard foo.patch -stable (negative guard)
2400 qguard bar.patch +stable (positive guard)
2400 qguard bar.patch +stable (positive guard)
2401 qselect stable
2401 qselect stable
2402
2402
2403 This activates the "stable" guard. mq will skip foo.patch (because
2403 This activates the "stable" guard. mq will skip foo.patch (because
2404 it has a negative match) but push bar.patch (because it has a
2404 it has a negative match) but push bar.patch (because it has a
2405 positive match).
2405 positive match).
2406
2406
2407 With no arguments, prints the currently active guards.
2407 With no arguments, prints the currently active guards.
2408 With one argument, sets the active guard.
2408 With one argument, sets the active guard.
2409
2409
2410 Use -n/--none to deactivate guards (no other arguments needed).
2410 Use -n/--none to deactivate guards (no other arguments needed).
2411 When no guards are active, patches with positive guards are
2411 When no guards are active, patches with positive guards are
2412 skipped and patches with negative guards are pushed.
2412 skipped and patches with negative guards are pushed.
2413
2413
2414 qselect can change the guards on applied patches. It does not pop
2414 qselect can change the guards on applied patches. It does not pop
2415 guarded patches by default. Use --pop to pop back to the last
2415 guarded patches by default. Use --pop to pop back to the last
2416 applied patch that is not guarded. Use --reapply (which implies
2416 applied patch that is not guarded. Use --reapply (which implies
2417 --pop) to push back to the current patch afterwards, but skip
2417 --pop) to push back to the current patch afterwards, but skip
2418 guarded patches.
2418 guarded patches.
2419
2419
2420 Use -s/--series to print a list of all guards in the series file
2420 Use -s/--series to print a list of all guards in the series file
2421 (no other arguments needed). Use -v for more information.'''
2421 (no other arguments needed). Use -v for more information.'''
2422
2422
2423 q = repo.mq
2423 q = repo.mq
2424 guards = q.active()
2424 guards = q.active()
2425 if args or opts['none']:
2425 if args or opts['none']:
2426 old_unapplied = q.unapplied(repo)
2426 old_unapplied = q.unapplied(repo)
2427 old_guarded = [i for i in xrange(len(q.applied)) if
2427 old_guarded = [i for i in xrange(len(q.applied)) if
2428 not q.pushable(i)[0]]
2428 not q.pushable(i)[0]]
2429 q.set_active(args)
2429 q.set_active(args)
2430 q.save_dirty()
2430 q.save_dirty()
2431 if not args:
2431 if not args:
2432 ui.status(_('guards deactivated\n'))
2432 ui.status(_('guards deactivated\n'))
2433 if not opts['pop'] and not opts['reapply']:
2433 if not opts['pop'] and not opts['reapply']:
2434 unapplied = q.unapplied(repo)
2434 unapplied = q.unapplied(repo)
2435 guarded = [i for i in xrange(len(q.applied))
2435 guarded = [i for i in xrange(len(q.applied))
2436 if not q.pushable(i)[0]]
2436 if not q.pushable(i)[0]]
2437 if len(unapplied) != len(old_unapplied):
2437 if len(unapplied) != len(old_unapplied):
2438 ui.status(_('number of unguarded, unapplied patches has '
2438 ui.status(_('number of unguarded, unapplied patches has '
2439 'changed from %d to %d\n') %
2439 'changed from %d to %d\n') %
2440 (len(old_unapplied), len(unapplied)))
2440 (len(old_unapplied), len(unapplied)))
2441 if len(guarded) != len(old_guarded):
2441 if len(guarded) != len(old_guarded):
2442 ui.status(_('number of guarded, applied patches has changed '
2442 ui.status(_('number of guarded, applied patches has changed '
2443 'from %d to %d\n') %
2443 'from %d to %d\n') %
2444 (len(old_guarded), len(guarded)))
2444 (len(old_guarded), len(guarded)))
2445 elif opts['series']:
2445 elif opts['series']:
2446 guards = {}
2446 guards = {}
2447 noguards = 0
2447 noguards = 0
2448 for gs in q.series_guards:
2448 for gs in q.series_guards:
2449 if not gs:
2449 if not gs:
2450 noguards += 1
2450 noguards += 1
2451 for g in gs:
2451 for g in gs:
2452 guards.setdefault(g, 0)
2452 guards.setdefault(g, 0)
2453 guards[g] += 1
2453 guards[g] += 1
2454 if ui.verbose:
2454 if ui.verbose:
2455 guards['NONE'] = noguards
2455 guards['NONE'] = noguards
2456 guards = guards.items()
2456 guards = guards.items()
2457 guards.sort(key=lambda x: x[0][1:])
2457 guards.sort(key=lambda x: x[0][1:])
2458 if guards:
2458 if guards:
2459 ui.note(_('guards in series file:\n'))
2459 ui.note(_('guards in series file:\n'))
2460 for guard, count in guards:
2460 for guard, count in guards:
2461 ui.note('%2d ' % count)
2461 ui.note('%2d ' % count)
2462 ui.write(guard, '\n')
2462 ui.write(guard, '\n')
2463 else:
2463 else:
2464 ui.note(_('no guards in series file\n'))
2464 ui.note(_('no guards in series file\n'))
2465 else:
2465 else:
2466 if guards:
2466 if guards:
2467 ui.note(_('active guards:\n'))
2467 ui.note(_('active guards:\n'))
2468 for g in guards:
2468 for g in guards:
2469 ui.write(g, '\n')
2469 ui.write(g, '\n')
2470 else:
2470 else:
2471 ui.write(_('no active guards\n'))
2471 ui.write(_('no active guards\n'))
2472 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2472 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2473 popped = False
2473 popped = False
2474 if opts['pop'] or opts['reapply']:
2474 if opts['pop'] or opts['reapply']:
2475 for i in xrange(len(q.applied)):
2475 for i in xrange(len(q.applied)):
2476 pushable, reason = q.pushable(i)
2476 pushable, reason = q.pushable(i)
2477 if not pushable:
2477 if not pushable:
2478 ui.status(_('popping guarded patches\n'))
2478 ui.status(_('popping guarded patches\n'))
2479 popped = True
2479 popped = True
2480 if i == 0:
2480 if i == 0:
2481 q.pop(repo, all=True)
2481 q.pop(repo, all=True)
2482 else:
2482 else:
2483 q.pop(repo, i - 1)
2483 q.pop(repo, i - 1)
2484 break
2484 break
2485 if popped:
2485 if popped:
2486 try:
2486 try:
2487 if reapply:
2487 if reapply:
2488 ui.status(_('reapplying unguarded patches\n'))
2488 ui.status(_('reapplying unguarded patches\n'))
2489 q.push(repo, reapply)
2489 q.push(repo, reapply)
2490 finally:
2490 finally:
2491 q.save_dirty()
2491 q.save_dirty()
2492
2492
2493 def finish(ui, repo, *revrange, **opts):
2493 def finish(ui, repo, *revrange, **opts):
2494 """move applied patches into repository history
2494 """move applied patches into repository history
2495
2495
2496 Finishes the specified revisions (corresponding to applied
2496 Finishes the specified revisions (corresponding to applied
2497 patches) by moving them out of mq control into regular repository
2497 patches) by moving them out of mq control into regular repository
2498 history.
2498 history.
2499
2499
2500 Accepts a revision range or the -a/--applied option. If --applied
2500 Accepts a revision range or the -a/--applied option. If --applied
2501 is specified, all applied mq revisions are removed from mq
2501 is specified, all applied mq revisions are removed from mq
2502 control. Otherwise, the given revisions must be at the base of the
2502 control. Otherwise, the given revisions must be at the base of the
2503 stack of applied patches.
2503 stack of applied patches.
2504
2504
2505 This can be especially useful if your changes have been applied to
2505 This can be especially useful if your changes have been applied to
2506 an upstream repository, or if you are about to push your changes
2506 an upstream repository, or if you are about to push your changes
2507 to upstream.
2507 to upstream.
2508 """
2508 """
2509 if not opts['applied'] and not revrange:
2509 if not opts['applied'] and not revrange:
2510 raise util.Abort(_('no revisions specified'))
2510 raise util.Abort(_('no revisions specified'))
2511 elif opts['applied']:
2511 elif opts['applied']:
2512 revrange = ('qbase:qtip',) + revrange
2512 revrange = ('qbase:qtip',) + revrange
2513
2513
2514 q = repo.mq
2514 q = repo.mq
2515 if not q.applied:
2515 if not q.applied:
2516 ui.status(_('no patches applied\n'))
2516 ui.status(_('no patches applied\n'))
2517 return 0
2517 return 0
2518
2518
2519 revs = cmdutil.revrange(repo, revrange)
2519 revs = cmdutil.revrange(repo, revrange)
2520 q.finish(repo, revs)
2520 q.finish(repo, revs)
2521 q.save_dirty()
2521 q.save_dirty()
2522 return 0
2522 return 0
2523
2523
2524 def reposetup(ui, repo):
2524 def reposetup(ui, repo):
2525 class mqrepo(repo.__class__):
2525 class mqrepo(repo.__class__):
2526 @util.propertycache
2526 @util.propertycache
2527 def mq(self):
2527 def mq(self):
2528 return queue(self.ui, self.join(""))
2528 return queue(self.ui, self.join(""))
2529
2529
2530 def abort_if_wdir_patched(self, errmsg, force=False):
2530 def abort_if_wdir_patched(self, errmsg, force=False):
2531 if self.mq.applied and not force:
2531 if self.mq.applied and not force:
2532 parent = self.dirstate.parents()[0]
2532 parent = self.dirstate.parents()[0]
2533 if parent in [s.node for s in self.mq.applied]:
2533 if parent in [s.node for s in self.mq.applied]:
2534 raise util.Abort(errmsg)
2534 raise util.Abort(errmsg)
2535
2535
2536 def commit(self, text="", user=None, date=None, match=None,
2536 def commit(self, text="", user=None, date=None, match=None,
2537 force=False, editor=False, extra={}):
2537 force=False, editor=False, extra={}):
2538 self.abort_if_wdir_patched(
2538 self.abort_if_wdir_patched(
2539 _('cannot commit over an applied mq patch'),
2539 _('cannot commit over an applied mq patch'),
2540 force)
2540 force)
2541
2541
2542 return super(mqrepo, self).commit(text, user, date, match, force,
2542 return super(mqrepo, self).commit(text, user, date, match, force,
2543 editor, extra)
2543 editor, extra)
2544
2544
2545 def push(self, remote, force=False, revs=None):
2545 def push(self, remote, force=False, revs=None):
2546 if self.mq.applied and not force and not revs:
2546 if self.mq.applied and not force and not revs:
2547 raise util.Abort(_('source has mq patches applied'))
2547 raise util.Abort(_('source has mq patches applied'))
2548 return super(mqrepo, self).push(remote, force, revs)
2548 return super(mqrepo, self).push(remote, force, revs)
2549
2549
2550 def _findtags(self):
2550 def _findtags(self):
2551 '''augment tags from base class with patch tags'''
2551 '''augment tags from base class with patch tags'''
2552 result = super(mqrepo, self)._findtags()
2552 result = super(mqrepo, self)._findtags()
2553
2553
2554 q = self.mq
2554 q = self.mq
2555 if not q.applied:
2555 if not q.applied:
2556 return result
2556 return result
2557
2557
2558 mqtags = [(patch.node, patch.name) for patch in q.applied]
2558 mqtags = [(patch.node, patch.name) for patch in q.applied]
2559
2559
2560 if mqtags[-1][0] not in self.changelog.nodemap:
2560 if mqtags[-1][0] not in self.changelog.nodemap:
2561 self.ui.warn(_('mq status file refers to unknown node %s\n')
2561 self.ui.warn(_('mq status file refers to unknown node %s\n')
2562 % short(mqtags[-1][0]))
2562 % short(mqtags[-1][0]))
2563 return result
2563 return result
2564
2564
2565 mqtags.append((mqtags[-1][0], 'qtip'))
2565 mqtags.append((mqtags[-1][0], 'qtip'))
2566 mqtags.append((mqtags[0][0], 'qbase'))
2566 mqtags.append((mqtags[0][0], 'qbase'))
2567 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2567 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2568 tags = result[0]
2568 tags = result[0]
2569 for patch in mqtags:
2569 for patch in mqtags:
2570 if patch[1] in tags:
2570 if patch[1] in tags:
2571 self.ui.warn(_('Tag %s overrides mq patch of the same name\n')
2571 self.ui.warn(_('Tag %s overrides mq patch of the same name\n')
2572 % patch[1])
2572 % patch[1])
2573 else:
2573 else:
2574 tags[patch[1]] = patch[0]
2574 tags[patch[1]] = patch[0]
2575
2575
2576 return result
2576 return result
2577
2577
2578 def _branchtags(self, partial, lrev):
2578 def _branchtags(self, partial, lrev):
2579 q = self.mq
2579 q = self.mq
2580 if not q.applied:
2580 if not q.applied:
2581 return super(mqrepo, self)._branchtags(partial, lrev)
2581 return super(mqrepo, self)._branchtags(partial, lrev)
2582
2582
2583 cl = self.changelog
2583 cl = self.changelog
2584 qbasenode = q.applied[0].node
2584 qbasenode = q.applied[0].node
2585 if qbasenode not in cl.nodemap:
2585 if qbasenode not in cl.nodemap:
2586 self.ui.warn(_('mq status file refers to unknown node %s\n')
2586 self.ui.warn(_('mq status file refers to unknown node %s\n')
2587 % short(qbasenode))
2587 % short(qbasenode))
2588 return super(mqrepo, self)._branchtags(partial, lrev)
2588 return super(mqrepo, self)._branchtags(partial, lrev)
2589
2589
2590 qbase = cl.rev(qbasenode)
2590 qbase = cl.rev(qbasenode)
2591 start = lrev + 1
2591 start = lrev + 1
2592 if start < qbase:
2592 if start < qbase:
2593 # update the cache (excluding the patches) and save it
2593 # update the cache (excluding the patches) and save it
2594 ctxgen = (self[r] for r in xrange(lrev + 1, qbase))
2594 ctxgen = (self[r] for r in xrange(lrev + 1, qbase))
2595 self._updatebranchcache(partial, ctxgen)
2595 self._updatebranchcache(partial, ctxgen)
2596 self._writebranchcache(partial, cl.node(qbase - 1), qbase - 1)
2596 self._writebranchcache(partial, cl.node(qbase - 1), qbase - 1)
2597 start = qbase
2597 start = qbase
2598 # if start = qbase, the cache is as updated as it should be.
2598 # if start = qbase, the cache is as updated as it should be.
2599 # if start > qbase, the cache includes (part of) the patches.
2599 # if start > qbase, the cache includes (part of) the patches.
2600 # we might as well use it, but we won't save it.
2600 # we might as well use it, but we won't save it.
2601
2601
2602 # update the cache up to the tip
2602 # update the cache up to the tip
2603 ctxgen = (self[r] for r in xrange(start, len(cl)))
2603 ctxgen = (self[r] for r in xrange(start, len(cl)))
2604 self._updatebranchcache(partial, ctxgen)
2604 self._updatebranchcache(partial, ctxgen)
2605
2605
2606 return partial
2606 return partial
2607
2607
2608 if repo.local():
2608 if repo.local():
2609 repo.__class__ = mqrepo
2609 repo.__class__ = mqrepo
2610
2610
2611 def mqimport(orig, ui, repo, *args, **kwargs):
2611 def mqimport(orig, ui, repo, *args, **kwargs):
2612 if (hasattr(repo, 'abort_if_wdir_patched')
2612 if (hasattr(repo, 'abort_if_wdir_patched')
2613 and not kwargs.get('no_commit', False)):
2613 and not kwargs.get('no_commit', False)):
2614 repo.abort_if_wdir_patched(_('cannot import over an applied patch'),
2614 repo.abort_if_wdir_patched(_('cannot import over an applied patch'),
2615 kwargs.get('force'))
2615 kwargs.get('force'))
2616 return orig(ui, repo, *args, **kwargs)
2616 return orig(ui, repo, *args, **kwargs)
2617
2617
2618 def mqinit(orig, ui, *args, **kwargs):
2618 def mqinit(orig, ui, *args, **kwargs):
2619 mq = kwargs.pop('mq', None)
2619 mq = kwargs.pop('mq', None)
2620
2620
2621 if not mq:
2621 if not mq:
2622 return orig(ui, *args, **kwargs)
2622 return orig(ui, *args, **kwargs)
2623
2623
2624 if args:
2624 if args:
2625 repopath = args[0]
2625 repopath = args[0]
2626 if not hg.islocal(repopath):
2626 if not hg.islocal(repopath):
2627 raise util.Abort(_('only a local queue repository '
2627 raise util.Abort(_('only a local queue repository '
2628 'may be initialized'))
2628 'may be initialized'))
2629 else:
2629 else:
2630 repopath = cmdutil.findrepo(os.getcwd())
2630 repopath = cmdutil.findrepo(os.getcwd())
2631 if not repopath:
2631 if not repopath:
2632 raise util.Abort(_('There is no Mercurial repository here '
2632 raise util.Abort(_('There is no Mercurial repository here '
2633 '(.hg not found)'))
2633 '(.hg not found)'))
2634 repo = hg.repository(ui, repopath)
2634 repo = hg.repository(ui, repopath)
2635 return qinit(ui, repo, True)
2635 return qinit(ui, repo, True)
2636
2636
2637 def mqcommand(orig, ui, repo, *args, **kwargs):
2637 def mqcommand(orig, ui, repo, *args, **kwargs):
2638 """Add --mq option to operate on patch repository instead of main"""
2638 """Add --mq option to operate on patch repository instead of main"""
2639
2639
2640 # some commands do not like getting unknown options
2640 # some commands do not like getting unknown options
2641 mq = kwargs.pop('mq', None)
2641 mq = kwargs.pop('mq', None)
2642
2642
2643 if not mq:
2643 if not mq:
2644 return orig(ui, repo, *args, **kwargs)
2644 return orig(ui, repo, *args, **kwargs)
2645
2645
2646 q = repo.mq
2646 q = repo.mq
2647 r = q.qrepo()
2647 r = q.qrepo()
2648 if not r:
2648 if not r:
2649 raise util.Abort('no queue repository')
2649 raise util.Abort('no queue repository')
2650 return orig(r.ui, r, *args, **kwargs)
2650 return orig(r.ui, r, *args, **kwargs)
2651
2651
2652 def uisetup(ui):
2652 def uisetup(ui):
2653 mqopt = [('', 'mq', None, _("operate on patch repository"))]
2653 mqopt = [('', 'mq', None, _("operate on patch repository"))]
2654
2654
2655 extensions.wrapcommand(commands.table, 'import', mqimport)
2655 extensions.wrapcommand(commands.table, 'import', mqimport)
2656
2656
2657 entry = extensions.wrapcommand(commands.table, 'init', mqinit)
2657 entry = extensions.wrapcommand(commands.table, 'init', mqinit)
2658 entry[1].extend(mqopt)
2658 entry[1].extend(mqopt)
2659
2659
2660 norepo = commands.norepo.split(" ")
2660 norepo = commands.norepo.split(" ")
2661 for cmd in commands.table.keys():
2661 for cmd in commands.table.keys():
2662 cmd = cmdutil.parsealiases(cmd)[0]
2662 cmd = cmdutil.parsealiases(cmd)[0]
2663 if cmd in norepo:
2663 if cmd in norepo:
2664 continue
2664 continue
2665 entry = extensions.wrapcommand(commands.table, cmd, mqcommand)
2665 entry = extensions.wrapcommand(commands.table, cmd, mqcommand)
2666 entry[1].extend(mqopt)
2666 entry[1].extend(mqopt)
2667
2667
2668 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2668 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2669
2669
2670 cmdtable = {
2670 cmdtable = {
2671 "qapplied":
2671 "qapplied":
2672 (applied,
2672 (applied,
2673 [('1', 'last', None, _('show only the last patch'))] + seriesopts,
2673 [('1', 'last', None, _('show only the last patch'))] + seriesopts,
2674 _('hg qapplied [-1] [-s] [PATCH]')),
2674 _('hg qapplied [-1] [-s] [PATCH]')),
2675 "qclone":
2675 "qclone":
2676 (clone,
2676 (clone,
2677 [('', 'pull', None, _('use pull protocol to copy metadata')),
2677 [('', 'pull', None, _('use pull protocol to copy metadata')),
2678 ('U', 'noupdate', None, _('do not update the new working directories')),
2678 ('U', 'noupdate', None, _('do not update the new working directories')),
2679 ('', 'uncompressed', None,
2679 ('', 'uncompressed', None,
2680 _('use uncompressed transfer (fast over LAN)')),
2680 _('use uncompressed transfer (fast over LAN)')),
2681 ('p', 'patches', '', _('location of source patch repository')),
2681 ('p', 'patches', '', _('location of source patch repository')),
2682 ] + commands.remoteopts,
2682 ] + commands.remoteopts,
2683 _('hg qclone [OPTION]... SOURCE [DEST]')),
2683 _('hg qclone [OPTION]... SOURCE [DEST]')),
2684 "qcommit|qci":
2684 "qcommit|qci":
2685 (commit,
2685 (commit,
2686 commands.table["^commit|ci"][1],
2686 commands.table["^commit|ci"][1],
2687 _('hg qcommit [OPTION]... [FILE]...')),
2687 _('hg qcommit [OPTION]... [FILE]...')),
2688 "^qdiff":
2688 "^qdiff":
2689 (diff,
2689 (diff,
2690 commands.diffopts + commands.diffopts2 + commands.walkopts,
2690 commands.diffopts + commands.diffopts2 + commands.walkopts,
2691 _('hg qdiff [OPTION]... [FILE]...')),
2691 _('hg qdiff [OPTION]... [FILE]...')),
2692 "qdelete|qremove|qrm":
2692 "qdelete|qremove|qrm":
2693 (delete,
2693 (delete,
2694 [('k', 'keep', None, _('keep patch file')),
2694 [('k', 'keep', None, _('keep patch file')),
2695 ('r', 'rev', [], _('stop managing a revision (DEPRECATED)'))],
2695 ('r', 'rev', [], _('stop managing a revision (DEPRECATED)'))],
2696 _('hg qdelete [-k] [-r REV]... [PATCH]...')),
2696 _('hg qdelete [-k] [-r REV]... [PATCH]...')),
2697 'qfold':
2697 'qfold':
2698 (fold,
2698 (fold,
2699 [('e', 'edit', None, _('edit patch header')),
2699 [('e', 'edit', None, _('edit patch header')),
2700 ('k', 'keep', None, _('keep folded patch files')),
2700 ('k', 'keep', None, _('keep folded patch files')),
2701 ] + commands.commitopts,
2701 ] + commands.commitopts,
2702 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')),
2702 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')),
2703 'qgoto':
2703 'qgoto':
2704 (goto,
2704 (goto,
2705 [('f', 'force', None, _('overwrite any local changes'))],
2705 [('f', 'force', None, _('overwrite any local changes'))],
2706 _('hg qgoto [OPTION]... PATCH')),
2706 _('hg qgoto [OPTION]... PATCH')),
2707 'qguard':
2707 'qguard':
2708 (guard,
2708 (guard,
2709 [('l', 'list', None, _('list all patches and guards')),
2709 [('l', 'list', None, _('list all patches and guards')),
2710 ('n', 'none', None, _('drop all guards'))],
2710 ('n', 'none', None, _('drop all guards'))],
2711 _('hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]')),
2711 _('hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]')),
2712 'qheader': (header, [], _('hg qheader [PATCH]')),
2712 'qheader': (header, [], _('hg qheader [PATCH]')),
2713 "^qimport":
2713 "^qimport":
2714 (qimport,
2714 (qimport,
2715 [('e', 'existing', None, _('import file in patch directory')),
2715 [('e', 'existing', None, _('import file in patch directory')),
2716 ('n', 'name', '', _('name of patch file')),
2716 ('n', 'name', '', _('name of patch file')),
2717 ('f', 'force', None, _('overwrite existing files')),
2717 ('f', 'force', None, _('overwrite existing files')),
2718 ('r', 'rev', [], _('place existing revisions under mq control')),
2718 ('r', 'rev', [], _('place existing revisions under mq control')),
2719 ('g', 'git', None, _('use git extended diff format')),
2719 ('g', 'git', None, _('use git extended diff format')),
2720 ('P', 'push', None, _('qpush after importing'))],
2720 ('P', 'push', None, _('qpush after importing'))],
2721 _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... FILE...')),
2721 _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... FILE...')),
2722 "^qinit":
2722 "^qinit":
2723 (init,
2723 (init,
2724 [('c', 'create-repo', None, _('create queue repository'))],
2724 [('c', 'create-repo', None, _('create queue repository'))],
2725 _('hg qinit [-c]')),
2725 _('hg qinit [-c]')),
2726 "qnew":
2726 "qnew":
2727 (new,
2727 (new,
2728 [('e', 'edit', None, _('edit commit message')),
2728 [('e', 'edit', None, _('edit commit message')),
2729 ('f', 'force', None, _('import uncommitted changes (DEPRECATED)')),
2729 ('f', 'force', None, _('import uncommitted changes (DEPRECATED)')),
2730 ('g', 'git', None, _('use git extended diff format')),
2730 ('g', 'git', None, _('use git extended diff format')),
2731 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2731 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2732 ('u', 'user', '', _('add "From: <given user>" to patch')),
2732 ('u', 'user', '', _('add "From: <given user>" to patch')),
2733 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2733 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2734 ('d', 'date', '', _('add "Date: <given date>" to patch'))
2734 ('d', 'date', '', _('add "Date: <given date>" to patch'))
2735 ] + commands.walkopts + commands.commitopts,
2735 ] + commands.walkopts + commands.commitopts,
2736 _('hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH [FILE]...')),
2736 _('hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH [FILE]...')),
2737 "qnext": (next, [] + seriesopts, _('hg qnext [-s]')),
2737 "qnext": (next, [] + seriesopts, _('hg qnext [-s]')),
2738 "qprev": (prev, [] + seriesopts, _('hg qprev [-s]')),
2738 "qprev": (prev, [] + seriesopts, _('hg qprev [-s]')),
2739 "^qpop":
2739 "^qpop":
2740 (pop,
2740 (pop,
2741 [('a', 'all', None, _('pop all patches')),
2741 [('a', 'all', None, _('pop all patches')),
2742 ('n', 'name', '', _('queue name to pop (DEPRECATED)')),
2742 ('n', 'name', '', _('queue name to pop (DEPRECATED)')),
2743 ('f', 'force', None, _('forget any local changes to patched files'))],
2743 ('f', 'force', None, _('forget any local changes to patched files'))],
2744 _('hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]')),
2744 _('hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]')),
2745 "^qpush":
2745 "^qpush":
2746 (push,
2746 (push,
2747 [('f', 'force', None, _('apply if the patch has rejects')),
2747 [('f', 'force', None, _('apply if the patch has rejects')),
2748 ('l', 'list', None, _('list patch name in commit text')),
2748 ('l', 'list', None, _('list patch name in commit text')),
2749 ('a', 'all', None, _('apply all patches')),
2749 ('a', 'all', None, _('apply all patches')),
2750 ('m', 'merge', None, _('merge from another queue (DEPRECATED)')),
2750 ('m', 'merge', None, _('merge from another queue (DEPRECATED)')),
2751 ('n', 'name', '', _('merge queue name (DEPRECATED)'))],
2751 ('n', 'name', '', _('merge queue name (DEPRECATED)'))],
2752 _('hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]')),
2752 _('hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]')),
2753 "^qrefresh":
2753 "^qrefresh":
2754 (refresh,
2754 (refresh,
2755 [('e', 'edit', None, _('edit commit message')),
2755 [('e', 'edit', None, _('edit commit message')),
2756 ('g', 'git', None, _('use git extended diff format')),
2756 ('g', 'git', None, _('use git extended diff format')),
2757 ('s', 'short', None,
2757 ('s', 'short', None,
2758 _('refresh only files already in the patch and specified files')),
2758 _('refresh only files already in the patch and specified files')),
2759 ('U', 'currentuser', None,
2759 ('U', 'currentuser', None,
2760 _('add/update author field in patch with current user')),
2760 _('add/update author field in patch with current user')),
2761 ('u', 'user', '',
2761 ('u', 'user', '',
2762 _('add/update author field in patch with given user')),
2762 _('add/update author field in patch with given user')),
2763 ('D', 'currentdate', None,
2763 ('D', 'currentdate', None,
2764 _('add/update date field in patch with current date')),
2764 _('add/update date field in patch with current date')),
2765 ('d', 'date', '',
2765 ('d', 'date', '',
2766 _('add/update date field in patch with given date'))
2766 _('add/update date field in patch with given date'))
2767 ] + commands.walkopts + commands.commitopts,
2767 ] + commands.walkopts + commands.commitopts,
2768 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')),
2768 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')),
2769 'qrename|qmv':
2769 'qrename|qmv':
2770 (rename, [], _('hg qrename PATCH1 [PATCH2]')),
2770 (rename, [], _('hg qrename PATCH1 [PATCH2]')),
2771 "qrestore":
2771 "qrestore":
2772 (restore,
2772 (restore,
2773 [('d', 'delete', None, _('delete save entry')),
2773 [('d', 'delete', None, _('delete save entry')),
2774 ('u', 'update', None, _('update queue working directory'))],
2774 ('u', 'update', None, _('update queue working directory'))],
2775 _('hg qrestore [-d] [-u] REV')),
2775 _('hg qrestore [-d] [-u] REV')),
2776 "qsave":
2776 "qsave":
2777 (save,
2777 (save,
2778 [('c', 'copy', None, _('copy patch directory')),
2778 [('c', 'copy', None, _('copy patch directory')),
2779 ('n', 'name', '', _('copy directory name')),
2779 ('n', 'name', '', _('copy directory name')),
2780 ('e', 'empty', None, _('clear queue status file')),
2780 ('e', 'empty', None, _('clear queue status file')),
2781 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2781 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2782 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')),
2782 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')),
2783 "qselect":
2783 "qselect":
2784 (select,
2784 (select,
2785 [('n', 'none', None, _('disable all guards')),
2785 [('n', 'none', None, _('disable all guards')),
2786 ('s', 'series', None, _('list all guards in series file')),
2786 ('s', 'series', None, _('list all guards in series file')),
2787 ('', 'pop', None, _('pop to before first guarded applied patch')),
2787 ('', 'pop', None, _('pop to before first guarded applied patch')),
2788 ('', 'reapply', None, _('pop, then reapply patches'))],
2788 ('', 'reapply', None, _('pop, then reapply patches'))],
2789 _('hg qselect [OPTION]... [GUARD]...')),
2789 _('hg qselect [OPTION]... [GUARD]...')),
2790 "qseries":
2790 "qseries":
2791 (series,
2791 (series,
2792 [('m', 'missing', None, _('print patches not in series')),
2792 [('m', 'missing', None, _('print patches not in series')),
2793 ] + seriesopts,
2793 ] + seriesopts,
2794 _('hg qseries [-ms]')),
2794 _('hg qseries [-ms]')),
2795 "^strip":
2795 "^strip":
2796 (strip,
2796 (strip,
2797 [('f', 'force', None, _('force removal with local changes')),
2797 [('f', 'force', None, _('force removal with local changes')),
2798 ('b', 'backup', None, _('bundle unrelated changesets')),
2798 ('b', 'backup', None, _('bundle unrelated changesets')),
2799 ('n', 'nobackup', None, _('no backups'))],
2799 ('n', 'nobackup', None, _('no backups'))],
2800 _('hg strip [-f] [-b] [-n] REV')),
2800 _('hg strip [-f] [-b] [-n] REV')),
2801 "qtop": (top, [] + seriesopts, _('hg qtop [-s]')),
2801 "qtop": (top, [] + seriesopts, _('hg qtop [-s]')),
2802 "qunapplied":
2802 "qunapplied":
2803 (unapplied,
2803 (unapplied,
2804 [('1', 'first', None, _('show only the first patch'))] + seriesopts,
2804 [('1', 'first', None, _('show only the first patch'))] + seriesopts,
2805 _('hg qunapplied [-1] [-s] [PATCH]')),
2805 _('hg qunapplied [-1] [-s] [PATCH]')),
2806 "qfinish":
2806 "qfinish":
2807 (finish,
2807 (finish,
2808 [('a', 'applied', None, _('finish all applied changesets'))],
2808 [('a', 'applied', None, _('finish all applied changesets'))],
2809 _('hg qfinish [-a] [REV]...')),
2809 _('hg qfinish [-a] [REV]...')),
2810 }
2810 }
2811
2811
2812 colortable = {'qguard.negative': 'red',
2812 colortable = {'qguard.negative': 'red',
2813 'qguard.positive': 'yellow',
2813 'qguard.positive': 'yellow',
2814 'qguard.unguarded': 'green',
2814 'qguard.unguarded': 'green',
2815 'qseries.applied': 'blue bold underline',
2815 'qseries.applied': 'blue bold underline',
2816 'qseries.guarded': 'black bold',
2816 'qseries.guarded': 'black bold',
2817 'qseries.missing': 'red bold',
2817 'qseries.missing': 'red bold',
2818 'qseries.unapplied': 'black bold'}
2818 'qseries.unapplied': 'black bold'}
@@ -1,2204 +1,2206 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo
10 import repo, changegroup, subrepo
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, stat, errno, os, time, inspect
18 import weakref, stat, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20
20
21 class localrepository(repo.repository):
21 class localrepository(repo.repository):
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
23 supported = set('revlogv1 store fncache shared'.split())
23 supported = set('revlogv1 store fncache shared'.split())
24
24
25 def __init__(self, baseui, path=None, create=0):
25 def __init__(self, baseui, path=None, create=0):
26 repo.repository.__init__(self)
26 repo.repository.__init__(self)
27 self.root = os.path.realpath(path)
27 self.root = os.path.realpath(path)
28 self.path = os.path.join(self.root, ".hg")
28 self.path = os.path.join(self.root, ".hg")
29 self.origroot = path
29 self.origroot = path
30 self.opener = util.opener(self.path)
30 self.opener = util.opener(self.path)
31 self.wopener = util.opener(self.root)
31 self.wopener = util.opener(self.root)
32 self.baseui = baseui
32 self.baseui = baseui
33 self.ui = baseui.copy()
33 self.ui = baseui.copy()
34
34
35 try:
35 try:
36 self.ui.readconfig(self.join("hgrc"), self.root)
36 self.ui.readconfig(self.join("hgrc"), self.root)
37 extensions.loadall(self.ui)
37 extensions.loadall(self.ui)
38 except IOError:
38 except IOError:
39 pass
39 pass
40
40
41 if not os.path.isdir(self.path):
41 if not os.path.isdir(self.path):
42 if create:
42 if create:
43 if not os.path.exists(path):
43 if not os.path.exists(path):
44 os.mkdir(path)
44 os.mkdir(path)
45 os.mkdir(self.path)
45 os.mkdir(self.path)
46 requirements = ["revlogv1"]
46 requirements = ["revlogv1"]
47 if self.ui.configbool('format', 'usestore', True):
47 if self.ui.configbool('format', 'usestore', True):
48 os.mkdir(os.path.join(self.path, "store"))
48 os.mkdir(os.path.join(self.path, "store"))
49 requirements.append("store")
49 requirements.append("store")
50 if self.ui.configbool('format', 'usefncache', True):
50 if self.ui.configbool('format', 'usefncache', True):
51 requirements.append("fncache")
51 requirements.append("fncache")
52 # create an invalid changelog
52 # create an invalid changelog
53 self.opener("00changelog.i", "a").write(
53 self.opener("00changelog.i", "a").write(
54 '\0\0\0\2' # represents revlogv2
54 '\0\0\0\2' # represents revlogv2
55 ' dummy changelog to prevent using the old repo layout'
55 ' dummy changelog to prevent using the old repo layout'
56 )
56 )
57 reqfile = self.opener("requires", "w")
57 reqfile = self.opener("requires", "w")
58 for r in requirements:
58 for r in requirements:
59 reqfile.write("%s\n" % r)
59 reqfile.write("%s\n" % r)
60 reqfile.close()
60 reqfile.close()
61 else:
61 else:
62 raise error.RepoError(_("repository %s not found") % path)
62 raise error.RepoError(_("repository %s not found") % path)
63 elif create:
63 elif create:
64 raise error.RepoError(_("repository %s already exists") % path)
64 raise error.RepoError(_("repository %s already exists") % path)
65 else:
65 else:
66 # find requirements
66 # find requirements
67 requirements = set()
67 requirements = set()
68 try:
68 try:
69 requirements = set(self.opener("requires").read().splitlines())
69 requirements = set(self.opener("requires").read().splitlines())
70 except IOError, inst:
70 except IOError, inst:
71 if inst.errno != errno.ENOENT:
71 if inst.errno != errno.ENOENT:
72 raise
72 raise
73 for r in requirements - self.supported:
73 for r in requirements - self.supported:
74 raise error.RepoError(_("requirement '%s' not supported") % r)
74 raise error.RepoError(_("requirement '%s' not supported") % r)
75
75
76 self.sharedpath = self.path
76 self.sharedpath = self.path
77 try:
77 try:
78 s = os.path.realpath(self.opener("sharedpath").read())
78 s = os.path.realpath(self.opener("sharedpath").read())
79 if not os.path.exists(s):
79 if not os.path.exists(s):
80 raise error.RepoError(
80 raise error.RepoError(
81 _('.hg/sharedpath points to nonexistent directory %s') % s)
81 _('.hg/sharedpath points to nonexistent directory %s') % s)
82 self.sharedpath = s
82 self.sharedpath = s
83 except IOError, inst:
83 except IOError, inst:
84 if inst.errno != errno.ENOENT:
84 if inst.errno != errno.ENOENT:
85 raise
85 raise
86
86
87 self.store = store.store(requirements, self.sharedpath, util.opener)
87 self.store = store.store(requirements, self.sharedpath, util.opener)
88 self.spath = self.store.path
88 self.spath = self.store.path
89 self.sopener = self.store.opener
89 self.sopener = self.store.opener
90 self.sjoin = self.store.join
90 self.sjoin = self.store.join
91 self.opener.createmode = self.store.createmode
91 self.opener.createmode = self.store.createmode
92 self.sopener.options = {}
92 self.sopener.options = {}
93
93
94 # These two define the set of tags for this repository. _tags
94 # These two define the set of tags for this repository. _tags
95 # maps tag name to node; _tagtypes maps tag name to 'global' or
95 # maps tag name to node; _tagtypes maps tag name to 'global' or
96 # 'local'. (Global tags are defined by .hgtags across all
96 # 'local'. (Global tags are defined by .hgtags across all
97 # heads, and local tags are defined in .hg/localtags.) They
97 # heads, and local tags are defined in .hg/localtags.) They
98 # constitute the in-memory cache of tags.
98 # constitute the in-memory cache of tags.
99 self._tags = None
99 self._tags = None
100 self._tagtypes = None
100 self._tagtypes = None
101
101
102 self._branchcache = None # in UTF-8
102 self._branchcache = None # in UTF-8
103 self._branchcachetip = None
103 self._branchcachetip = None
104 self.nodetagscache = None
104 self.nodetagscache = None
105 self.filterpats = {}
105 self.filterpats = {}
106 self._datafilters = {}
106 self._datafilters = {}
107 self._transref = self._lockref = self._wlockref = None
107 self._transref = self._lockref = self._wlockref = None
108
108
109 @propertycache
109 @propertycache
110 def changelog(self):
110 def changelog(self):
111 c = changelog.changelog(self.sopener)
111 c = changelog.changelog(self.sopener)
112 if 'HG_PENDING' in os.environ:
112 if 'HG_PENDING' in os.environ:
113 p = os.environ['HG_PENDING']
113 p = os.environ['HG_PENDING']
114 if p.startswith(self.root):
114 if p.startswith(self.root):
115 c.readpending('00changelog.i.a')
115 c.readpending('00changelog.i.a')
116 self.sopener.options['defversion'] = c.version
116 self.sopener.options['defversion'] = c.version
117 return c
117 return c
118
118
119 @propertycache
119 @propertycache
120 def manifest(self):
120 def manifest(self):
121 return manifest.manifest(self.sopener)
121 return manifest.manifest(self.sopener)
122
122
123 @propertycache
123 @propertycache
124 def dirstate(self):
124 def dirstate(self):
125 return dirstate.dirstate(self.opener, self.ui, self.root)
125 return dirstate.dirstate(self.opener, self.ui, self.root)
126
126
127 def __getitem__(self, changeid):
127 def __getitem__(self, changeid):
128 if changeid is None:
128 if changeid is None:
129 return context.workingctx(self)
129 return context.workingctx(self)
130 return context.changectx(self, changeid)
130 return context.changectx(self, changeid)
131
131
132 def __contains__(self, changeid):
132 def __contains__(self, changeid):
133 try:
133 try:
134 return bool(self.lookup(changeid))
134 return bool(self.lookup(changeid))
135 except error.RepoLookupError:
135 except error.RepoLookupError:
136 return False
136 return False
137
137
138 def __nonzero__(self):
138 def __nonzero__(self):
139 return True
139 return True
140
140
141 def __len__(self):
141 def __len__(self):
142 return len(self.changelog)
142 return len(self.changelog)
143
143
144 def __iter__(self):
144 def __iter__(self):
145 for i in xrange(len(self)):
145 for i in xrange(len(self)):
146 yield i
146 yield i
147
147
148 def url(self):
148 def url(self):
149 return 'file:' + self.root
149 return 'file:' + self.root
150
150
151 def hook(self, name, throw=False, **args):
151 def hook(self, name, throw=False, **args):
152 return hook.hook(self.ui, self, name, throw, **args)
152 return hook.hook(self.ui, self, name, throw, **args)
153
153
154 tag_disallowed = ':\r\n'
154 tag_disallowed = ':\r\n'
155
155
156 def _tag(self, names, node, message, local, user, date, extra={}):
156 def _tag(self, names, node, message, local, user, date, extra={}):
157 if isinstance(names, str):
157 if isinstance(names, str):
158 allchars = names
158 allchars = names
159 names = (names,)
159 names = (names,)
160 else:
160 else:
161 allchars = ''.join(names)
161 allchars = ''.join(names)
162 for c in self.tag_disallowed:
162 for c in self.tag_disallowed:
163 if c in allchars:
163 if c in allchars:
164 raise util.Abort(_('%r cannot be used in a tag name') % c)
164 raise util.Abort(_('%r cannot be used in a tag name') % c)
165
165
166 for name in names:
166 for name in names:
167 self.hook('pretag', throw=True, node=hex(node), tag=name,
167 self.hook('pretag', throw=True, node=hex(node), tag=name,
168 local=local)
168 local=local)
169
169
170 def writetags(fp, names, munge, prevtags):
170 def writetags(fp, names, munge, prevtags):
171 fp.seek(0, 2)
171 fp.seek(0, 2)
172 if prevtags and prevtags[-1] != '\n':
172 if prevtags and prevtags[-1] != '\n':
173 fp.write('\n')
173 fp.write('\n')
174 for name in names:
174 for name in names:
175 m = munge and munge(name) or name
175 m = munge and munge(name) or name
176 if self._tagtypes and name in self._tagtypes:
176 if self._tagtypes and name in self._tagtypes:
177 old = self._tags.get(name, nullid)
177 old = self._tags.get(name, nullid)
178 fp.write('%s %s\n' % (hex(old), m))
178 fp.write('%s %s\n' % (hex(old), m))
179 fp.write('%s %s\n' % (hex(node), m))
179 fp.write('%s %s\n' % (hex(node), m))
180 fp.close()
180 fp.close()
181
181
182 prevtags = ''
182 prevtags = ''
183 if local:
183 if local:
184 try:
184 try:
185 fp = self.opener('localtags', 'r+')
185 fp = self.opener('localtags', 'r+')
186 except IOError:
186 except IOError:
187 fp = self.opener('localtags', 'a')
187 fp = self.opener('localtags', 'a')
188 else:
188 else:
189 prevtags = fp.read()
189 prevtags = fp.read()
190
190
191 # local tags are stored in the current charset
191 # local tags are stored in the current charset
192 writetags(fp, names, None, prevtags)
192 writetags(fp, names, None, prevtags)
193 for name in names:
193 for name in names:
194 self.hook('tag', node=hex(node), tag=name, local=local)
194 self.hook('tag', node=hex(node), tag=name, local=local)
195 return
195 return
196
196
197 try:
197 try:
198 fp = self.wfile('.hgtags', 'rb+')
198 fp = self.wfile('.hgtags', 'rb+')
199 except IOError:
199 except IOError:
200 fp = self.wfile('.hgtags', 'ab')
200 fp = self.wfile('.hgtags', 'ab')
201 else:
201 else:
202 prevtags = fp.read()
202 prevtags = fp.read()
203
203
204 # committed tags are stored in UTF-8
204 # committed tags are stored in UTF-8
205 writetags(fp, names, encoding.fromlocal, prevtags)
205 writetags(fp, names, encoding.fromlocal, prevtags)
206
206
207 if '.hgtags' not in self.dirstate:
207 if '.hgtags' not in self.dirstate:
208 self.add(['.hgtags'])
208 self.add(['.hgtags'])
209
209
210 m = matchmod.exact(self.root, '', ['.hgtags'])
210 m = matchmod.exact(self.root, '', ['.hgtags'])
211 tagnode = self.commit(message, user, date, extra=extra, match=m)
211 tagnode = self.commit(message, user, date, extra=extra, match=m)
212
212
213 for name in names:
213 for name in names:
214 self.hook('tag', node=hex(node), tag=name, local=local)
214 self.hook('tag', node=hex(node), tag=name, local=local)
215
215
216 return tagnode
216 return tagnode
217
217
218 def tag(self, names, node, message, local, user, date):
218 def tag(self, names, node, message, local, user, date):
219 '''tag a revision with one or more symbolic names.
219 '''tag a revision with one or more symbolic names.
220
220
221 names is a list of strings or, when adding a single tag, names may be a
221 names is a list of strings or, when adding a single tag, names may be a
222 string.
222 string.
223
223
224 if local is True, the tags are stored in a per-repository file.
224 if local is True, the tags are stored in a per-repository file.
225 otherwise, they are stored in the .hgtags file, and a new
225 otherwise, they are stored in the .hgtags file, and a new
226 changeset is committed with the change.
226 changeset is committed with the change.
227
227
228 keyword arguments:
228 keyword arguments:
229
229
230 local: whether to store tags in non-version-controlled file
230 local: whether to store tags in non-version-controlled file
231 (default False)
231 (default False)
232
232
233 message: commit message to use if committing
233 message: commit message to use if committing
234
234
235 user: name of user to use if committing
235 user: name of user to use if committing
236
236
237 date: date tuple to use if committing'''
237 date: date tuple to use if committing'''
238
238
239 for x in self.status()[:5]:
239 for x in self.status()[:5]:
240 if '.hgtags' in x:
240 if '.hgtags' in x:
241 raise util.Abort(_('working copy of .hgtags is changed '
241 raise util.Abort(_('working copy of .hgtags is changed '
242 '(please commit .hgtags manually)'))
242 '(please commit .hgtags manually)'))
243
243
244 self.tags() # instantiate the cache
244 self.tags() # instantiate the cache
245 self._tag(names, node, message, local, user, date)
245 self._tag(names, node, message, local, user, date)
246
246
247 def tags(self):
247 def tags(self):
248 '''return a mapping of tag to node'''
248 '''return a mapping of tag to node'''
249 if self._tags is None:
249 if self._tags is None:
250 (self._tags, self._tagtypes) = self._findtags()
250 (self._tags, self._tagtypes) = self._findtags()
251
251
252 return self._tags
252 return self._tags
253
253
254 def _findtags(self):
254 def _findtags(self):
255 '''Do the hard work of finding tags. Return a pair of dicts
255 '''Do the hard work of finding tags. Return a pair of dicts
256 (tags, tagtypes) where tags maps tag name to node, and tagtypes
256 (tags, tagtypes) where tags maps tag name to node, and tagtypes
257 maps tag name to a string like \'global\' or \'local\'.
257 maps tag name to a string like \'global\' or \'local\'.
258 Subclasses or extensions are free to add their own tags, but
258 Subclasses or extensions are free to add their own tags, but
259 should be aware that the returned dicts will be retained for the
259 should be aware that the returned dicts will be retained for the
260 duration of the localrepo object.'''
260 duration of the localrepo object.'''
261
261
262 # XXX what tagtype should subclasses/extensions use? Currently
262 # XXX what tagtype should subclasses/extensions use? Currently
263 # mq and bookmarks add tags, but do not set the tagtype at all.
263 # mq and bookmarks add tags, but do not set the tagtype at all.
264 # Should each extension invent its own tag type? Should there
264 # Should each extension invent its own tag type? Should there
265 # be one tagtype for all such "virtual" tags? Or is the status
265 # be one tagtype for all such "virtual" tags? Or is the status
266 # quo fine?
266 # quo fine?
267
267
268 alltags = {} # map tag name to (node, hist)
268 alltags = {} # map tag name to (node, hist)
269 tagtypes = {}
269 tagtypes = {}
270
270
271 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
271 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
272 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
272 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
273
273
274 # Build the return dicts. Have to re-encode tag names because
274 # Build the return dicts. Have to re-encode tag names because
275 # the tags module always uses UTF-8 (in order not to lose info
275 # the tags module always uses UTF-8 (in order not to lose info
276 # writing to the cache), but the rest of Mercurial wants them in
276 # writing to the cache), but the rest of Mercurial wants them in
277 # local encoding.
277 # local encoding.
278 tags = {}
278 tags = {}
279 for (name, (node, hist)) in alltags.iteritems():
279 for (name, (node, hist)) in alltags.iteritems():
280 if node != nullid:
280 if node != nullid:
281 tags[encoding.tolocal(name)] = node
281 tags[encoding.tolocal(name)] = node
282 tags['tip'] = self.changelog.tip()
282 tags['tip'] = self.changelog.tip()
283 tagtypes = dict([(encoding.tolocal(name), value)
283 tagtypes = dict([(encoding.tolocal(name), value)
284 for (name, value) in tagtypes.iteritems()])
284 for (name, value) in tagtypes.iteritems()])
285 return (tags, tagtypes)
285 return (tags, tagtypes)
286
286
287 def tagtype(self, tagname):
287 def tagtype(self, tagname):
288 '''
288 '''
289 return the type of the given tag. result can be:
289 return the type of the given tag. result can be:
290
290
291 'local' : a local tag
291 'local' : a local tag
292 'global' : a global tag
292 'global' : a global tag
293 None : tag does not exist
293 None : tag does not exist
294 '''
294 '''
295
295
296 self.tags()
296 self.tags()
297
297
298 return self._tagtypes.get(tagname)
298 return self._tagtypes.get(tagname)
299
299
300 def tagslist(self):
300 def tagslist(self):
301 '''return a list of tags ordered by revision'''
301 '''return a list of tags ordered by revision'''
302 l = []
302 l = []
303 for t, n in self.tags().iteritems():
303 for t, n in self.tags().iteritems():
304 try:
304 try:
305 r = self.changelog.rev(n)
305 r = self.changelog.rev(n)
306 except:
306 except:
307 r = -2 # sort to the beginning of the list if unknown
307 r = -2 # sort to the beginning of the list if unknown
308 l.append((r, t, n))
308 l.append((r, t, n))
309 return [(t, n) for r, t, n in sorted(l)]
309 return [(t, n) for r, t, n in sorted(l)]
310
310
311 def nodetags(self, node):
311 def nodetags(self, node):
312 '''return the tags associated with a node'''
312 '''return the tags associated with a node'''
313 if not self.nodetagscache:
313 if not self.nodetagscache:
314 self.nodetagscache = {}
314 self.nodetagscache = {}
315 for t, n in self.tags().iteritems():
315 for t, n in self.tags().iteritems():
316 self.nodetagscache.setdefault(n, []).append(t)
316 self.nodetagscache.setdefault(n, []).append(t)
317 return self.nodetagscache.get(node, [])
317 return self.nodetagscache.get(node, [])
318
318
319 def _branchtags(self, partial, lrev):
319 def _branchtags(self, partial, lrev):
320 # TODO: rename this function?
320 # TODO: rename this function?
321 tiprev = len(self) - 1
321 tiprev = len(self) - 1
322 if lrev != tiprev:
322 if lrev != tiprev:
323 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
323 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
324 self._updatebranchcache(partial, ctxgen)
324 self._updatebranchcache(partial, ctxgen)
325 self._writebranchcache(partial, self.changelog.tip(), tiprev)
325 self._writebranchcache(partial, self.changelog.tip(), tiprev)
326
326
327 return partial
327 return partial
328
328
329 def branchmap(self):
329 def branchmap(self):
330 '''returns a dictionary {branch: [branchheads]}'''
330 '''returns a dictionary {branch: [branchheads]}'''
331 tip = self.changelog.tip()
331 tip = self.changelog.tip()
332 if self._branchcache is not None and self._branchcachetip == tip:
332 if self._branchcache is not None and self._branchcachetip == tip:
333 return self._branchcache
333 return self._branchcache
334
334
335 oldtip = self._branchcachetip
335 oldtip = self._branchcachetip
336 self._branchcachetip = tip
336 self._branchcachetip = tip
337 if oldtip is None or oldtip not in self.changelog.nodemap:
337 if oldtip is None or oldtip not in self.changelog.nodemap:
338 partial, last, lrev = self._readbranchcache()
338 partial, last, lrev = self._readbranchcache()
339 else:
339 else:
340 lrev = self.changelog.rev(oldtip)
340 lrev = self.changelog.rev(oldtip)
341 partial = self._branchcache
341 partial = self._branchcache
342
342
343 self._branchtags(partial, lrev)
343 self._branchtags(partial, lrev)
344 # this private cache holds all heads (not just tips)
344 # this private cache holds all heads (not just tips)
345 self._branchcache = partial
345 self._branchcache = partial
346
346
347 return self._branchcache
347 return self._branchcache
348
348
349 def branchtags(self):
349 def branchtags(self):
350 '''return a dict where branch names map to the tipmost head of
350 '''return a dict where branch names map to the tipmost head of
351 the branch, open heads come before closed'''
351 the branch, open heads come before closed'''
352 bt = {}
352 bt = {}
353 for bn, heads in self.branchmap().iteritems():
353 for bn, heads in self.branchmap().iteritems():
354 tip = heads[-1]
354 tip = heads[-1]
355 for h in reversed(heads):
355 for h in reversed(heads):
356 if 'close' not in self.changelog.read(h)[5]:
356 if 'close' not in self.changelog.read(h)[5]:
357 tip = h
357 tip = h
358 break
358 break
359 bt[bn] = tip
359 bt[bn] = tip
360 return bt
360 return bt
361
361
362
362
363 def _readbranchcache(self):
363 def _readbranchcache(self):
364 partial = {}
364 partial = {}
365 try:
365 try:
366 f = self.opener("branchheads.cache")
366 f = self.opener("branchheads.cache")
367 lines = f.read().split('\n')
367 lines = f.read().split('\n')
368 f.close()
368 f.close()
369 except (IOError, OSError):
369 except (IOError, OSError):
370 return {}, nullid, nullrev
370 return {}, nullid, nullrev
371
371
372 try:
372 try:
373 last, lrev = lines.pop(0).split(" ", 1)
373 last, lrev = lines.pop(0).split(" ", 1)
374 last, lrev = bin(last), int(lrev)
374 last, lrev = bin(last), int(lrev)
375 if lrev >= len(self) or self[lrev].node() != last:
375 if lrev >= len(self) or self[lrev].node() != last:
376 # invalidate the cache
376 # invalidate the cache
377 raise ValueError('invalidating branch cache (tip differs)')
377 raise ValueError('invalidating branch cache (tip differs)')
378 for l in lines:
378 for l in lines:
379 if not l:
379 if not l:
380 continue
380 continue
381 node, label = l.split(" ", 1)
381 node, label = l.split(" ", 1)
382 partial.setdefault(label.strip(), []).append(bin(node))
382 partial.setdefault(label.strip(), []).append(bin(node))
383 except KeyboardInterrupt:
383 except KeyboardInterrupt:
384 raise
384 raise
385 except Exception, inst:
385 except Exception, inst:
386 if self.ui.debugflag:
386 if self.ui.debugflag:
387 self.ui.warn(str(inst), '\n')
387 self.ui.warn(str(inst), '\n')
388 partial, last, lrev = {}, nullid, nullrev
388 partial, last, lrev = {}, nullid, nullrev
389 return partial, last, lrev
389 return partial, last, lrev
390
390
391 def _writebranchcache(self, branches, tip, tiprev):
391 def _writebranchcache(self, branches, tip, tiprev):
392 try:
392 try:
393 f = self.opener("branchheads.cache", "w", atomictemp=True)
393 f = self.opener("branchheads.cache", "w", atomictemp=True)
394 f.write("%s %s\n" % (hex(tip), tiprev))
394 f.write("%s %s\n" % (hex(tip), tiprev))
395 for label, nodes in branches.iteritems():
395 for label, nodes in branches.iteritems():
396 for node in nodes:
396 for node in nodes:
397 f.write("%s %s\n" % (hex(node), label))
397 f.write("%s %s\n" % (hex(node), label))
398 f.rename()
398 f.rename()
399 except (IOError, OSError):
399 except (IOError, OSError):
400 pass
400 pass
401
401
402 def _updatebranchcache(self, partial, ctxgen):
402 def _updatebranchcache(self, partial, ctxgen):
403 # collect new branch entries
403 # collect new branch entries
404 newbranches = {}
404 newbranches = {}
405 for c in ctxgen:
405 for c in ctxgen:
406 newbranches.setdefault(c.branch(), []).append(c.node())
406 newbranches.setdefault(c.branch(), []).append(c.node())
407 # if older branchheads are reachable from new ones, they aren't
407 # if older branchheads are reachable from new ones, they aren't
408 # really branchheads. Note checking parents is insufficient:
408 # really branchheads. Note checking parents is insufficient:
409 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
409 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
410 for branch, newnodes in newbranches.iteritems():
410 for branch, newnodes in newbranches.iteritems():
411 bheads = partial.setdefault(branch, [])
411 bheads = partial.setdefault(branch, [])
412 bheads.extend(newnodes)
412 bheads.extend(newnodes)
413 if len(bheads) < 2:
413 if len(bheads) < 2:
414 continue
414 continue
415 newbheads = []
415 newbheads = []
416 # starting from tip means fewer passes over reachable
416 # starting from tip means fewer passes over reachable
417 while newnodes:
417 while newnodes:
418 latest = newnodes.pop()
418 latest = newnodes.pop()
419 if latest not in bheads:
419 if latest not in bheads:
420 continue
420 continue
421 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
421 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
422 reachable = self.changelog.reachable(latest, minbhrev)
422 reachable = self.changelog.reachable(latest, minbhrev)
423 bheads = [b for b in bheads if b not in reachable]
423 bheads = [b for b in bheads if b not in reachable]
424 newbheads.insert(0, latest)
424 newbheads.insert(0, latest)
425 bheads.extend(newbheads)
425 bheads.extend(newbheads)
426 partial[branch] = bheads
426 partial[branch] = bheads
427
427
428 def lookup(self, key):
428 def lookup(self, key):
429 if isinstance(key, int):
429 if isinstance(key, int):
430 return self.changelog.node(key)
430 return self.changelog.node(key)
431 elif key == '.':
431 elif key == '.':
432 return self.dirstate.parents()[0]
432 return self.dirstate.parents()[0]
433 elif key == 'null':
433 elif key == 'null':
434 return nullid
434 return nullid
435 elif key == 'tip':
435 elif key == 'tip':
436 return self.changelog.tip()
436 return self.changelog.tip()
437 n = self.changelog._match(key)
437 n = self.changelog._match(key)
438 if n:
438 if n:
439 return n
439 return n
440 if key in self.tags():
440 if key in self.tags():
441 return self.tags()[key]
441 return self.tags()[key]
442 if key in self.branchtags():
442 if key in self.branchtags():
443 return self.branchtags()[key]
443 return self.branchtags()[key]
444 n = self.changelog._partialmatch(key)
444 n = self.changelog._partialmatch(key)
445 if n:
445 if n:
446 return n
446 return n
447
447
448 # can't find key, check if it might have come from damaged dirstate
448 # can't find key, check if it might have come from damaged dirstate
449 if key in self.dirstate.parents():
449 if key in self.dirstate.parents():
450 raise error.Abort(_("working directory has unknown parent '%s'!")
450 raise error.Abort(_("working directory has unknown parent '%s'!")
451 % short(key))
451 % short(key))
452 try:
452 try:
453 if len(key) == 20:
453 if len(key) == 20:
454 key = hex(key)
454 key = hex(key)
455 except:
455 except:
456 pass
456 pass
457 raise error.RepoLookupError(_("unknown revision '%s'") % key)
457 raise error.RepoLookupError(_("unknown revision '%s'") % key)
458
458
459 def local(self):
459 def local(self):
460 return True
460 return True
461
461
462 def join(self, f):
462 def join(self, f):
463 return os.path.join(self.path, f)
463 return os.path.join(self.path, f)
464
464
465 def wjoin(self, f):
465 def wjoin(self, f):
466 return os.path.join(self.root, f)
466 return os.path.join(self.root, f)
467
467
468 def rjoin(self, f):
468 def rjoin(self, f):
469 return os.path.join(self.root, util.pconvert(f))
469 return os.path.join(self.root, util.pconvert(f))
470
470
471 def file(self, f):
471 def file(self, f):
472 if f[0] == '/':
472 if f[0] == '/':
473 f = f[1:]
473 f = f[1:]
474 return filelog.filelog(self.sopener, f)
474 return filelog.filelog(self.sopener, f)
475
475
476 def changectx(self, changeid):
476 def changectx(self, changeid):
477 return self[changeid]
477 return self[changeid]
478
478
479 def parents(self, changeid=None):
479 def parents(self, changeid=None):
480 '''get list of changectxs for parents of changeid'''
480 '''get list of changectxs for parents of changeid'''
481 return self[changeid].parents()
481 return self[changeid].parents()
482
482
483 def filectx(self, path, changeid=None, fileid=None):
483 def filectx(self, path, changeid=None, fileid=None):
484 """changeid can be a changeset revision, node, or tag.
484 """changeid can be a changeset revision, node, or tag.
485 fileid can be a file revision or node."""
485 fileid can be a file revision or node."""
486 return context.filectx(self, path, changeid, fileid)
486 return context.filectx(self, path, changeid, fileid)
487
487
488 def getcwd(self):
488 def getcwd(self):
489 return self.dirstate.getcwd()
489 return self.dirstate.getcwd()
490
490
491 def pathto(self, f, cwd=None):
491 def pathto(self, f, cwd=None):
492 return self.dirstate.pathto(f, cwd)
492 return self.dirstate.pathto(f, cwd)
493
493
494 def wfile(self, f, mode='r'):
494 def wfile(self, f, mode='r'):
495 return self.wopener(f, mode)
495 return self.wopener(f, mode)
496
496
497 def _link(self, f):
497 def _link(self, f):
498 return os.path.islink(self.wjoin(f))
498 return os.path.islink(self.wjoin(f))
499
499
500 def _filter(self, filter, filename, data):
500 def _filter(self, filter, filename, data):
501 if filter not in self.filterpats:
501 if filter not in self.filterpats:
502 l = []
502 l = []
503 for pat, cmd in self.ui.configitems(filter):
503 for pat, cmd in self.ui.configitems(filter):
504 if cmd == '!':
504 if cmd == '!':
505 continue
505 continue
506 mf = matchmod.match(self.root, '', [pat])
506 mf = matchmod.match(self.root, '', [pat])
507 fn = None
507 fn = None
508 params = cmd
508 params = cmd
509 for name, filterfn in self._datafilters.iteritems():
509 for name, filterfn in self._datafilters.iteritems():
510 if cmd.startswith(name):
510 if cmd.startswith(name):
511 fn = filterfn
511 fn = filterfn
512 params = cmd[len(name):].lstrip()
512 params = cmd[len(name):].lstrip()
513 break
513 break
514 if not fn:
514 if not fn:
515 fn = lambda s, c, **kwargs: util.filter(s, c)
515 fn = lambda s, c, **kwargs: util.filter(s, c)
516 # Wrap old filters not supporting keyword arguments
516 # Wrap old filters not supporting keyword arguments
517 if not inspect.getargspec(fn)[2]:
517 if not inspect.getargspec(fn)[2]:
518 oldfn = fn
518 oldfn = fn
519 fn = lambda s, c, **kwargs: oldfn(s, c)
519 fn = lambda s, c, **kwargs: oldfn(s, c)
520 l.append((mf, fn, params))
520 l.append((mf, fn, params))
521 self.filterpats[filter] = l
521 self.filterpats[filter] = l
522
522
523 for mf, fn, cmd in self.filterpats[filter]:
523 for mf, fn, cmd in self.filterpats[filter]:
524 if mf(filename):
524 if mf(filename):
525 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
525 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
526 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
526 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
527 break
527 break
528
528
529 return data
529 return data
530
530
531 def adddatafilter(self, name, filter):
531 def adddatafilter(self, name, filter):
532 self._datafilters[name] = filter
532 self._datafilters[name] = filter
533
533
534 def wread(self, filename):
534 def wread(self, filename):
535 if self._link(filename):
535 if self._link(filename):
536 data = os.readlink(self.wjoin(filename))
536 data = os.readlink(self.wjoin(filename))
537 else:
537 else:
538 data = self.wopener(filename, 'r').read()
538 data = self.wopener(filename, 'r').read()
539 return self._filter("encode", filename, data)
539 return self._filter("encode", filename, data)
540
540
541 def wwrite(self, filename, data, flags):
541 def wwrite(self, filename, data, flags):
542 data = self._filter("decode", filename, data)
542 data = self._filter("decode", filename, data)
543 try:
543 try:
544 os.unlink(self.wjoin(filename))
544 os.unlink(self.wjoin(filename))
545 except OSError:
545 except OSError:
546 pass
546 pass
547 if 'l' in flags:
547 if 'l' in flags:
548 self.wopener.symlink(data, filename)
548 self.wopener.symlink(data, filename)
549 else:
549 else:
550 self.wopener(filename, 'w').write(data)
550 self.wopener(filename, 'w').write(data)
551 if 'x' in flags:
551 if 'x' in flags:
552 util.set_flags(self.wjoin(filename), False, True)
552 util.set_flags(self.wjoin(filename), False, True)
553
553
554 def wwritedata(self, filename, data):
554 def wwritedata(self, filename, data):
555 return self._filter("decode", filename, data)
555 return self._filter("decode", filename, data)
556
556
557 def transaction(self):
557 def transaction(self, desc):
558 tr = self._transref and self._transref() or None
558 tr = self._transref and self._transref() or None
559 if tr and tr.running():
559 if tr and tr.running():
560 return tr.nest()
560 return tr.nest()
561
561
562 # abort here if the journal already exists
562 # abort here if the journal already exists
563 if os.path.exists(self.sjoin("journal")):
563 if os.path.exists(self.sjoin("journal")):
564 raise error.RepoError(
564 raise error.RepoError(
565 _("abandoned transaction found - run hg recover"))
565 _("abandoned transaction found - run hg recover"))
566
566
567 # save dirstate for rollback
567 # save dirstate for rollback
568 try:
568 try:
569 ds = self.opener("dirstate").read()
569 ds = self.opener("dirstate").read()
570 except IOError:
570 except IOError:
571 ds = ""
571 ds = ""
572 self.opener("journal.dirstate", "w").write(ds)
572 self.opener("journal.dirstate", "w").write(ds)
573 self.opener("journal.branch", "w").write(self.dirstate.branch())
573 self.opener("journal.branch", "w").write(self.dirstate.branch())
574 self.opener("journal.desc", "w").write("%d,%s" % (len(self), desc))
574
575
575 renames = [(self.sjoin("journal"), self.sjoin("undo")),
576 renames = [(self.sjoin("journal"), self.sjoin("undo")),
576 (self.join("journal.dirstate"), self.join("undo.dirstate")),
577 (self.join("journal.dirstate"), self.join("undo.dirstate")),
577 (self.join("journal.branch"), self.join("undo.branch"))]
578 (self.join("journal.branch"), self.join("undo.branch")),
579 (self.join("journal.desc"), self.join("undo.desc"))]
578 tr = transaction.transaction(self.ui.warn, self.sopener,
580 tr = transaction.transaction(self.ui.warn, self.sopener,
579 self.sjoin("journal"),
581 self.sjoin("journal"),
580 aftertrans(renames),
582 aftertrans(renames),
581 self.store.createmode)
583 self.store.createmode)
582 self._transref = weakref.ref(tr)
584 self._transref = weakref.ref(tr)
583 return tr
585 return tr
584
586
585 def recover(self):
587 def recover(self):
586 lock = self.lock()
588 lock = self.lock()
587 try:
589 try:
588 if os.path.exists(self.sjoin("journal")):
590 if os.path.exists(self.sjoin("journal")):
589 self.ui.status(_("rolling back interrupted transaction\n"))
591 self.ui.status(_("rolling back interrupted transaction\n"))
590 transaction.rollback(self.sopener, self.sjoin("journal"),
592 transaction.rollback(self.sopener, self.sjoin("journal"),
591 self.ui.warn)
593 self.ui.warn)
592 self.invalidate()
594 self.invalidate()
593 return True
595 return True
594 else:
596 else:
595 self.ui.warn(_("no interrupted transaction available\n"))
597 self.ui.warn(_("no interrupted transaction available\n"))
596 return False
598 return False
597 finally:
599 finally:
598 lock.release()
600 lock.release()
599
601
600 def rollback(self):
602 def rollback(self):
601 wlock = lock = None
603 wlock = lock = None
602 try:
604 try:
603 wlock = self.wlock()
605 wlock = self.wlock()
604 lock = self.lock()
606 lock = self.lock()
605 if os.path.exists(self.sjoin("undo")):
607 if os.path.exists(self.sjoin("undo")):
606 self.ui.status(_("rolling back last transaction\n"))
608 self.ui.status(_("rolling back last transaction\n"))
607 transaction.rollback(self.sopener, self.sjoin("undo"),
609 transaction.rollback(self.sopener, self.sjoin("undo"),
608 self.ui.warn)
610 self.ui.warn)
609 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
611 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
610 try:
612 try:
611 branch = self.opener("undo.branch").read()
613 branch = self.opener("undo.branch").read()
612 self.dirstate.setbranch(branch)
614 self.dirstate.setbranch(branch)
613 except IOError:
615 except IOError:
614 self.ui.warn(_("Named branch could not be reset, "
616 self.ui.warn(_("Named branch could not be reset, "
615 "current branch still is: %s\n")
617 "current branch still is: %s\n")
616 % encoding.tolocal(self.dirstate.branch()))
618 % encoding.tolocal(self.dirstate.branch()))
617 self.invalidate()
619 self.invalidate()
618 self.dirstate.invalidate()
620 self.dirstate.invalidate()
619 self.destroyed()
621 self.destroyed()
620 else:
622 else:
621 self.ui.warn(_("no rollback information available\n"))
623 self.ui.warn(_("no rollback information available\n"))
622 finally:
624 finally:
623 release(lock, wlock)
625 release(lock, wlock)
624
626
625 def invalidatecaches(self):
627 def invalidatecaches(self):
626 self._tags = None
628 self._tags = None
627 self._tagtypes = None
629 self._tagtypes = None
628 self.nodetagscache = None
630 self.nodetagscache = None
629 self._branchcache = None # in UTF-8
631 self._branchcache = None # in UTF-8
630 self._branchcachetip = None
632 self._branchcachetip = None
631
633
632 def invalidate(self):
634 def invalidate(self):
633 for a in "changelog manifest".split():
635 for a in "changelog manifest".split():
634 if a in self.__dict__:
636 if a in self.__dict__:
635 delattr(self, a)
637 delattr(self, a)
636 self.invalidatecaches()
638 self.invalidatecaches()
637
639
638 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
640 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
639 try:
641 try:
640 l = lock.lock(lockname, 0, releasefn, desc=desc)
642 l = lock.lock(lockname, 0, releasefn, desc=desc)
641 except error.LockHeld, inst:
643 except error.LockHeld, inst:
642 if not wait:
644 if not wait:
643 raise
645 raise
644 self.ui.warn(_("waiting for lock on %s held by %r\n") %
646 self.ui.warn(_("waiting for lock on %s held by %r\n") %
645 (desc, inst.locker))
647 (desc, inst.locker))
646 # default to 600 seconds timeout
648 # default to 600 seconds timeout
647 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
649 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
648 releasefn, desc=desc)
650 releasefn, desc=desc)
649 if acquirefn:
651 if acquirefn:
650 acquirefn()
652 acquirefn()
651 return l
653 return l
652
654
653 def lock(self, wait=True):
655 def lock(self, wait=True):
654 '''Lock the repository store (.hg/store) and return a weak reference
656 '''Lock the repository store (.hg/store) and return a weak reference
655 to the lock. Use this before modifying the store (e.g. committing or
657 to the lock. Use this before modifying the store (e.g. committing or
656 stripping). If you are opening a transaction, get a lock as well.)'''
658 stripping). If you are opening a transaction, get a lock as well.)'''
657 l = self._lockref and self._lockref()
659 l = self._lockref and self._lockref()
658 if l is not None and l.held:
660 if l is not None and l.held:
659 l.lock()
661 l.lock()
660 return l
662 return l
661
663
662 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
664 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
663 _('repository %s') % self.origroot)
665 _('repository %s') % self.origroot)
664 self._lockref = weakref.ref(l)
666 self._lockref = weakref.ref(l)
665 return l
667 return l
666
668
667 def wlock(self, wait=True):
669 def wlock(self, wait=True):
668 '''Lock the non-store parts of the repository (everything under
670 '''Lock the non-store parts of the repository (everything under
669 .hg except .hg/store) and return a weak reference to the lock.
671 .hg except .hg/store) and return a weak reference to the lock.
670 Use this before modifying files in .hg.'''
672 Use this before modifying files in .hg.'''
671 l = self._wlockref and self._wlockref()
673 l = self._wlockref and self._wlockref()
672 if l is not None and l.held:
674 if l is not None and l.held:
673 l.lock()
675 l.lock()
674 return l
676 return l
675
677
676 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
678 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
677 self.dirstate.invalidate, _('working directory of %s') %
679 self.dirstate.invalidate, _('working directory of %s') %
678 self.origroot)
680 self.origroot)
679 self._wlockref = weakref.ref(l)
681 self._wlockref = weakref.ref(l)
680 return l
682 return l
681
683
682 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
684 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
683 """
685 """
684 commit an individual file as part of a larger transaction
686 commit an individual file as part of a larger transaction
685 """
687 """
686
688
687 fname = fctx.path()
689 fname = fctx.path()
688 text = fctx.data()
690 text = fctx.data()
689 flog = self.file(fname)
691 flog = self.file(fname)
690 fparent1 = manifest1.get(fname, nullid)
692 fparent1 = manifest1.get(fname, nullid)
691 fparent2 = fparent2o = manifest2.get(fname, nullid)
693 fparent2 = fparent2o = manifest2.get(fname, nullid)
692
694
693 meta = {}
695 meta = {}
694 copy = fctx.renamed()
696 copy = fctx.renamed()
695 if copy and copy[0] != fname:
697 if copy and copy[0] != fname:
696 # Mark the new revision of this file as a copy of another
698 # Mark the new revision of this file as a copy of another
697 # file. This copy data will effectively act as a parent
699 # file. This copy data will effectively act as a parent
698 # of this new revision. If this is a merge, the first
700 # of this new revision. If this is a merge, the first
699 # parent will be the nullid (meaning "look up the copy data")
701 # parent will be the nullid (meaning "look up the copy data")
700 # and the second one will be the other parent. For example:
702 # and the second one will be the other parent. For example:
701 #
703 #
702 # 0 --- 1 --- 3 rev1 changes file foo
704 # 0 --- 1 --- 3 rev1 changes file foo
703 # \ / rev2 renames foo to bar and changes it
705 # \ / rev2 renames foo to bar and changes it
704 # \- 2 -/ rev3 should have bar with all changes and
706 # \- 2 -/ rev3 should have bar with all changes and
705 # should record that bar descends from
707 # should record that bar descends from
706 # bar in rev2 and foo in rev1
708 # bar in rev2 and foo in rev1
707 #
709 #
708 # this allows this merge to succeed:
710 # this allows this merge to succeed:
709 #
711 #
710 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
712 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
711 # \ / merging rev3 and rev4 should use bar@rev2
713 # \ / merging rev3 and rev4 should use bar@rev2
712 # \- 2 --- 4 as the merge base
714 # \- 2 --- 4 as the merge base
713 #
715 #
714
716
715 cfname = copy[0]
717 cfname = copy[0]
716 crev = manifest1.get(cfname)
718 crev = manifest1.get(cfname)
717 newfparent = fparent2
719 newfparent = fparent2
718
720
719 if manifest2: # branch merge
721 if manifest2: # branch merge
720 if fparent2 == nullid or crev is None: # copied on remote side
722 if fparent2 == nullid or crev is None: # copied on remote side
721 if cfname in manifest2:
723 if cfname in manifest2:
722 crev = manifest2[cfname]
724 crev = manifest2[cfname]
723 newfparent = fparent1
725 newfparent = fparent1
724
726
725 # find source in nearest ancestor if we've lost track
727 # find source in nearest ancestor if we've lost track
726 if not crev:
728 if not crev:
727 self.ui.debug(" %s: searching for copy revision for %s\n" %
729 self.ui.debug(" %s: searching for copy revision for %s\n" %
728 (fname, cfname))
730 (fname, cfname))
729 for ancestor in self['.'].ancestors():
731 for ancestor in self['.'].ancestors():
730 if cfname in ancestor:
732 if cfname in ancestor:
731 crev = ancestor[cfname].filenode()
733 crev = ancestor[cfname].filenode()
732 break
734 break
733
735
734 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
736 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
735 meta["copy"] = cfname
737 meta["copy"] = cfname
736 meta["copyrev"] = hex(crev)
738 meta["copyrev"] = hex(crev)
737 fparent1, fparent2 = nullid, newfparent
739 fparent1, fparent2 = nullid, newfparent
738 elif fparent2 != nullid:
740 elif fparent2 != nullid:
739 # is one parent an ancestor of the other?
741 # is one parent an ancestor of the other?
740 fparentancestor = flog.ancestor(fparent1, fparent2)
742 fparentancestor = flog.ancestor(fparent1, fparent2)
741 if fparentancestor == fparent1:
743 if fparentancestor == fparent1:
742 fparent1, fparent2 = fparent2, nullid
744 fparent1, fparent2 = fparent2, nullid
743 elif fparentancestor == fparent2:
745 elif fparentancestor == fparent2:
744 fparent2 = nullid
746 fparent2 = nullid
745
747
746 # is the file changed?
748 # is the file changed?
747 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
749 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
748 changelist.append(fname)
750 changelist.append(fname)
749 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
751 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
750
752
751 # are just the flags changed during merge?
753 # are just the flags changed during merge?
752 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
754 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
753 changelist.append(fname)
755 changelist.append(fname)
754
756
755 return fparent1
757 return fparent1
756
758
757 def commit(self, text="", user=None, date=None, match=None, force=False,
759 def commit(self, text="", user=None, date=None, match=None, force=False,
758 editor=False, extra={}):
760 editor=False, extra={}):
759 """Add a new revision to current repository.
761 """Add a new revision to current repository.
760
762
761 Revision information is gathered from the working directory,
763 Revision information is gathered from the working directory,
762 match can be used to filter the committed files. If editor is
764 match can be used to filter the committed files. If editor is
763 supplied, it is called to get a commit message.
765 supplied, it is called to get a commit message.
764 """
766 """
765
767
766 def fail(f, msg):
768 def fail(f, msg):
767 raise util.Abort('%s: %s' % (f, msg))
769 raise util.Abort('%s: %s' % (f, msg))
768
770
769 if not match:
771 if not match:
770 match = matchmod.always(self.root, '')
772 match = matchmod.always(self.root, '')
771
773
772 if not force:
774 if not force:
773 vdirs = []
775 vdirs = []
774 match.dir = vdirs.append
776 match.dir = vdirs.append
775 match.bad = fail
777 match.bad = fail
776
778
777 wlock = self.wlock()
779 wlock = self.wlock()
778 try:
780 try:
779 p1, p2 = self.dirstate.parents()
781 p1, p2 = self.dirstate.parents()
780 wctx = self[None]
782 wctx = self[None]
781
783
782 if (not force and p2 != nullid and match and
784 if (not force and p2 != nullid and match and
783 (match.files() or match.anypats())):
785 (match.files() or match.anypats())):
784 raise util.Abort(_('cannot partially commit a merge '
786 raise util.Abort(_('cannot partially commit a merge '
785 '(do not specify files or patterns)'))
787 '(do not specify files or patterns)'))
786
788
787 changes = self.status(match=match, clean=force)
789 changes = self.status(match=match, clean=force)
788 if force:
790 if force:
789 changes[0].extend(changes[6]) # mq may commit unchanged files
791 changes[0].extend(changes[6]) # mq may commit unchanged files
790
792
791 # check subrepos
793 # check subrepos
792 subs = []
794 subs = []
793 removedsubs = set()
795 removedsubs = set()
794 for p in wctx.parents():
796 for p in wctx.parents():
795 removedsubs.update(s for s in p.substate if match(s))
797 removedsubs.update(s for s in p.substate if match(s))
796 for s in wctx.substate:
798 for s in wctx.substate:
797 removedsubs.discard(s)
799 removedsubs.discard(s)
798 if match(s) and wctx.sub(s).dirty():
800 if match(s) and wctx.sub(s).dirty():
799 subs.append(s)
801 subs.append(s)
800 if (subs or removedsubs) and '.hgsubstate' not in changes[0]:
802 if (subs or removedsubs) and '.hgsubstate' not in changes[0]:
801 changes[0].insert(0, '.hgsubstate')
803 changes[0].insert(0, '.hgsubstate')
802
804
803 # make sure all explicit patterns are matched
805 # make sure all explicit patterns are matched
804 if not force and match.files():
806 if not force and match.files():
805 matched = set(changes[0] + changes[1] + changes[2])
807 matched = set(changes[0] + changes[1] + changes[2])
806
808
807 for f in match.files():
809 for f in match.files():
808 if f == '.' or f in matched or f in wctx.substate:
810 if f == '.' or f in matched or f in wctx.substate:
809 continue
811 continue
810 if f in changes[3]: # missing
812 if f in changes[3]: # missing
811 fail(f, _('file not found!'))
813 fail(f, _('file not found!'))
812 if f in vdirs: # visited directory
814 if f in vdirs: # visited directory
813 d = f + '/'
815 d = f + '/'
814 for mf in matched:
816 for mf in matched:
815 if mf.startswith(d):
817 if mf.startswith(d):
816 break
818 break
817 else:
819 else:
818 fail(f, _("no match under directory!"))
820 fail(f, _("no match under directory!"))
819 elif f not in self.dirstate:
821 elif f not in self.dirstate:
820 fail(f, _("file not tracked!"))
822 fail(f, _("file not tracked!"))
821
823
822 if (not force and not extra.get("close") and p2 == nullid
824 if (not force and not extra.get("close") and p2 == nullid
823 and not (changes[0] or changes[1] or changes[2])
825 and not (changes[0] or changes[1] or changes[2])
824 and self[None].branch() == self['.'].branch()):
826 and self[None].branch() == self['.'].branch()):
825 return None
827 return None
826
828
827 ms = mergemod.mergestate(self)
829 ms = mergemod.mergestate(self)
828 for f in changes[0]:
830 for f in changes[0]:
829 if f in ms and ms[f] == 'u':
831 if f in ms and ms[f] == 'u':
830 raise util.Abort(_("unresolved merge conflicts "
832 raise util.Abort(_("unresolved merge conflicts "
831 "(see hg resolve)"))
833 "(see hg resolve)"))
832
834
833 cctx = context.workingctx(self, (p1, p2), text, user, date,
835 cctx = context.workingctx(self, (p1, p2), text, user, date,
834 extra, changes)
836 extra, changes)
835 if editor:
837 if editor:
836 cctx._text = editor(self, cctx, subs)
838 cctx._text = editor(self, cctx, subs)
837 edited = (text != cctx._text)
839 edited = (text != cctx._text)
838
840
839 # commit subs
841 # commit subs
840 if subs or removedsubs:
842 if subs or removedsubs:
841 state = wctx.substate.copy()
843 state = wctx.substate.copy()
842 for s in subs:
844 for s in subs:
843 self.ui.status(_('committing subrepository %s\n') % s)
845 self.ui.status(_('committing subrepository %s\n') % s)
844 sr = wctx.sub(s).commit(cctx._text, user, date)
846 sr = wctx.sub(s).commit(cctx._text, user, date)
845 state[s] = (state[s][0], sr)
847 state[s] = (state[s][0], sr)
846 subrepo.writestate(self, state)
848 subrepo.writestate(self, state)
847
849
848 # Save commit message in case this transaction gets rolled back
850 # Save commit message in case this transaction gets rolled back
849 # (e.g. by a pretxncommit hook). Leave the content alone on
851 # (e.g. by a pretxncommit hook). Leave the content alone on
850 # the assumption that the user will use the same editor again.
852 # the assumption that the user will use the same editor again.
851 msgfile = self.opener('last-message.txt', 'wb')
853 msgfile = self.opener('last-message.txt', 'wb')
852 msgfile.write(cctx._text)
854 msgfile.write(cctx._text)
853 msgfile.close()
855 msgfile.close()
854
856
855 try:
857 try:
856 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
858 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
857 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
859 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
858 ret = self.commitctx(cctx, True)
860 ret = self.commitctx(cctx, True)
859 except:
861 except:
860 if edited:
862 if edited:
861 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
863 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
862 self.ui.write(
864 self.ui.write(
863 _('note: commit message saved in %s\n') % msgfn)
865 _('note: commit message saved in %s\n') % msgfn)
864 raise
866 raise
865
867
866 # update dirstate and mergestate
868 # update dirstate and mergestate
867 for f in changes[0] + changes[1]:
869 for f in changes[0] + changes[1]:
868 self.dirstate.normal(f)
870 self.dirstate.normal(f)
869 for f in changes[2]:
871 for f in changes[2]:
870 self.dirstate.forget(f)
872 self.dirstate.forget(f)
871 self.dirstate.setparents(ret)
873 self.dirstate.setparents(ret)
872 ms.reset()
874 ms.reset()
873 finally:
875 finally:
874 wlock.release()
876 wlock.release()
875
877
876 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
878 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
877 return ret
879 return ret
878
880
879 def commitctx(self, ctx, error=False):
881 def commitctx(self, ctx, error=False):
880 """Add a new revision to current repository.
882 """Add a new revision to current repository.
881 Revision information is passed via the context argument.
883 Revision information is passed via the context argument.
882 """
884 """
883
885
884 tr = lock = None
886 tr = lock = None
885 removed = ctx.removed()
887 removed = ctx.removed()
886 p1, p2 = ctx.p1(), ctx.p2()
888 p1, p2 = ctx.p1(), ctx.p2()
887 m1 = p1.manifest().copy()
889 m1 = p1.manifest().copy()
888 m2 = p2.manifest()
890 m2 = p2.manifest()
889 user = ctx.user()
891 user = ctx.user()
890
892
891 lock = self.lock()
893 lock = self.lock()
892 try:
894 try:
893 tr = self.transaction()
895 tr = self.transaction("commit")
894 trp = weakref.proxy(tr)
896 trp = weakref.proxy(tr)
895
897
896 # check in files
898 # check in files
897 new = {}
899 new = {}
898 changed = []
900 changed = []
899 linkrev = len(self)
901 linkrev = len(self)
900 for f in sorted(ctx.modified() + ctx.added()):
902 for f in sorted(ctx.modified() + ctx.added()):
901 self.ui.note(f + "\n")
903 self.ui.note(f + "\n")
902 try:
904 try:
903 fctx = ctx[f]
905 fctx = ctx[f]
904 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
906 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
905 changed)
907 changed)
906 m1.set(f, fctx.flags())
908 m1.set(f, fctx.flags())
907 except OSError, inst:
909 except OSError, inst:
908 self.ui.warn(_("trouble committing %s!\n") % f)
910 self.ui.warn(_("trouble committing %s!\n") % f)
909 raise
911 raise
910 except IOError, inst:
912 except IOError, inst:
911 errcode = getattr(inst, 'errno', errno.ENOENT)
913 errcode = getattr(inst, 'errno', errno.ENOENT)
912 if error or errcode and errcode != errno.ENOENT:
914 if error or errcode and errcode != errno.ENOENT:
913 self.ui.warn(_("trouble committing %s!\n") % f)
915 self.ui.warn(_("trouble committing %s!\n") % f)
914 raise
916 raise
915 else:
917 else:
916 removed.append(f)
918 removed.append(f)
917
919
918 # update manifest
920 # update manifest
919 m1.update(new)
921 m1.update(new)
920 removed = [f for f in sorted(removed) if f in m1 or f in m2]
922 removed = [f for f in sorted(removed) if f in m1 or f in m2]
921 drop = [f for f in removed if f in m1]
923 drop = [f for f in removed if f in m1]
922 for f in drop:
924 for f in drop:
923 del m1[f]
925 del m1[f]
924 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
926 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
925 p2.manifestnode(), (new, drop))
927 p2.manifestnode(), (new, drop))
926
928
927 # update changelog
929 # update changelog
928 self.changelog.delayupdate()
930 self.changelog.delayupdate()
929 n = self.changelog.add(mn, changed + removed, ctx.description(),
931 n = self.changelog.add(mn, changed + removed, ctx.description(),
930 trp, p1.node(), p2.node(),
932 trp, p1.node(), p2.node(),
931 user, ctx.date(), ctx.extra().copy())
933 user, ctx.date(), ctx.extra().copy())
932 p = lambda: self.changelog.writepending() and self.root or ""
934 p = lambda: self.changelog.writepending() and self.root or ""
933 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
935 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
934 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
936 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
935 parent2=xp2, pending=p)
937 parent2=xp2, pending=p)
936 self.changelog.finalize(trp)
938 self.changelog.finalize(trp)
937 tr.close()
939 tr.close()
938
940
939 if self._branchcache:
941 if self._branchcache:
940 self.branchtags()
942 self.branchtags()
941 return n
943 return n
942 finally:
944 finally:
943 del tr
945 del tr
944 lock.release()
946 lock.release()
945
947
946 def destroyed(self):
948 def destroyed(self):
947 '''Inform the repository that nodes have been destroyed.
949 '''Inform the repository that nodes have been destroyed.
948 Intended for use by strip and rollback, so there's a common
950 Intended for use by strip and rollback, so there's a common
949 place for anything that has to be done after destroying history.'''
951 place for anything that has to be done after destroying history.'''
950 # XXX it might be nice if we could take the list of destroyed
952 # XXX it might be nice if we could take the list of destroyed
951 # nodes, but I don't see an easy way for rollback() to do that
953 # nodes, but I don't see an easy way for rollback() to do that
952
954
953 # Ensure the persistent tag cache is updated. Doing it now
955 # Ensure the persistent tag cache is updated. Doing it now
954 # means that the tag cache only has to worry about destroyed
956 # means that the tag cache only has to worry about destroyed
955 # heads immediately after a strip/rollback. That in turn
957 # heads immediately after a strip/rollback. That in turn
956 # guarantees that "cachetip == currenttip" (comparing both rev
958 # guarantees that "cachetip == currenttip" (comparing both rev
957 # and node) always means no nodes have been added or destroyed.
959 # and node) always means no nodes have been added or destroyed.
958
960
959 # XXX this is suboptimal when qrefresh'ing: we strip the current
961 # XXX this is suboptimal when qrefresh'ing: we strip the current
960 # head, refresh the tag cache, then immediately add a new head.
962 # head, refresh the tag cache, then immediately add a new head.
961 # But I think doing it this way is necessary for the "instant
963 # But I think doing it this way is necessary for the "instant
962 # tag cache retrieval" case to work.
964 # tag cache retrieval" case to work.
963 self.invalidatecaches()
965 self.invalidatecaches()
964
966
965 def walk(self, match, node=None):
967 def walk(self, match, node=None):
966 '''
968 '''
967 walk recursively through the directory tree or a given
969 walk recursively through the directory tree or a given
968 changeset, finding all files matched by the match
970 changeset, finding all files matched by the match
969 function
971 function
970 '''
972 '''
971 return self[node].walk(match)
973 return self[node].walk(match)
972
974
973 def status(self, node1='.', node2=None, match=None,
975 def status(self, node1='.', node2=None, match=None,
974 ignored=False, clean=False, unknown=False):
976 ignored=False, clean=False, unknown=False):
975 """return status of files between two nodes or node and working directory
977 """return status of files between two nodes or node and working directory
976
978
977 If node1 is None, use the first dirstate parent instead.
979 If node1 is None, use the first dirstate parent instead.
978 If node2 is None, compare node1 with working directory.
980 If node2 is None, compare node1 with working directory.
979 """
981 """
980
982
981 def mfmatches(ctx):
983 def mfmatches(ctx):
982 mf = ctx.manifest().copy()
984 mf = ctx.manifest().copy()
983 for fn in mf.keys():
985 for fn in mf.keys():
984 if not match(fn):
986 if not match(fn):
985 del mf[fn]
987 del mf[fn]
986 return mf
988 return mf
987
989
988 if isinstance(node1, context.changectx):
990 if isinstance(node1, context.changectx):
989 ctx1 = node1
991 ctx1 = node1
990 else:
992 else:
991 ctx1 = self[node1]
993 ctx1 = self[node1]
992 if isinstance(node2, context.changectx):
994 if isinstance(node2, context.changectx):
993 ctx2 = node2
995 ctx2 = node2
994 else:
996 else:
995 ctx2 = self[node2]
997 ctx2 = self[node2]
996
998
997 working = ctx2.rev() is None
999 working = ctx2.rev() is None
998 parentworking = working and ctx1 == self['.']
1000 parentworking = working and ctx1 == self['.']
999 match = match or matchmod.always(self.root, self.getcwd())
1001 match = match or matchmod.always(self.root, self.getcwd())
1000 listignored, listclean, listunknown = ignored, clean, unknown
1002 listignored, listclean, listunknown = ignored, clean, unknown
1001
1003
1002 # load earliest manifest first for caching reasons
1004 # load earliest manifest first for caching reasons
1003 if not working and ctx2.rev() < ctx1.rev():
1005 if not working and ctx2.rev() < ctx1.rev():
1004 ctx2.manifest()
1006 ctx2.manifest()
1005
1007
1006 if not parentworking:
1008 if not parentworking:
1007 def bad(f, msg):
1009 def bad(f, msg):
1008 if f not in ctx1:
1010 if f not in ctx1:
1009 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1011 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1010 match.bad = bad
1012 match.bad = bad
1011
1013
1012 if working: # we need to scan the working dir
1014 if working: # we need to scan the working dir
1013 subrepos = ctx1.substate.keys()
1015 subrepos = ctx1.substate.keys()
1014 s = self.dirstate.status(match, subrepos, listignored,
1016 s = self.dirstate.status(match, subrepos, listignored,
1015 listclean, listunknown)
1017 listclean, listunknown)
1016 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1018 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1017
1019
1018 # check for any possibly clean files
1020 # check for any possibly clean files
1019 if parentworking and cmp:
1021 if parentworking and cmp:
1020 fixup = []
1022 fixup = []
1021 # do a full compare of any files that might have changed
1023 # do a full compare of any files that might have changed
1022 for f in sorted(cmp):
1024 for f in sorted(cmp):
1023 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1025 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1024 or ctx1[f].cmp(ctx2[f].data())):
1026 or ctx1[f].cmp(ctx2[f].data())):
1025 modified.append(f)
1027 modified.append(f)
1026 else:
1028 else:
1027 fixup.append(f)
1029 fixup.append(f)
1028
1030
1029 if listclean:
1031 if listclean:
1030 clean += fixup
1032 clean += fixup
1031
1033
1032 # update dirstate for files that are actually clean
1034 # update dirstate for files that are actually clean
1033 if fixup:
1035 if fixup:
1034 try:
1036 try:
1035 # updating the dirstate is optional
1037 # updating the dirstate is optional
1036 # so we don't wait on the lock
1038 # so we don't wait on the lock
1037 wlock = self.wlock(False)
1039 wlock = self.wlock(False)
1038 try:
1040 try:
1039 for f in fixup:
1041 for f in fixup:
1040 self.dirstate.normal(f)
1042 self.dirstate.normal(f)
1041 finally:
1043 finally:
1042 wlock.release()
1044 wlock.release()
1043 except error.LockError:
1045 except error.LockError:
1044 pass
1046 pass
1045
1047
1046 if not parentworking:
1048 if not parentworking:
1047 mf1 = mfmatches(ctx1)
1049 mf1 = mfmatches(ctx1)
1048 if working:
1050 if working:
1049 # we are comparing working dir against non-parent
1051 # we are comparing working dir against non-parent
1050 # generate a pseudo-manifest for the working dir
1052 # generate a pseudo-manifest for the working dir
1051 mf2 = mfmatches(self['.'])
1053 mf2 = mfmatches(self['.'])
1052 for f in cmp + modified + added:
1054 for f in cmp + modified + added:
1053 mf2[f] = None
1055 mf2[f] = None
1054 mf2.set(f, ctx2.flags(f))
1056 mf2.set(f, ctx2.flags(f))
1055 for f in removed:
1057 for f in removed:
1056 if f in mf2:
1058 if f in mf2:
1057 del mf2[f]
1059 del mf2[f]
1058 else:
1060 else:
1059 # we are comparing two revisions
1061 # we are comparing two revisions
1060 deleted, unknown, ignored = [], [], []
1062 deleted, unknown, ignored = [], [], []
1061 mf2 = mfmatches(ctx2)
1063 mf2 = mfmatches(ctx2)
1062
1064
1063 modified, added, clean = [], [], []
1065 modified, added, clean = [], [], []
1064 for fn in mf2:
1066 for fn in mf2:
1065 if fn in mf1:
1067 if fn in mf1:
1066 if (mf1.flags(fn) != mf2.flags(fn) or
1068 if (mf1.flags(fn) != mf2.flags(fn) or
1067 (mf1[fn] != mf2[fn] and
1069 (mf1[fn] != mf2[fn] and
1068 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1070 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1069 modified.append(fn)
1071 modified.append(fn)
1070 elif listclean:
1072 elif listclean:
1071 clean.append(fn)
1073 clean.append(fn)
1072 del mf1[fn]
1074 del mf1[fn]
1073 else:
1075 else:
1074 added.append(fn)
1076 added.append(fn)
1075 removed = mf1.keys()
1077 removed = mf1.keys()
1076
1078
1077 r = modified, added, removed, deleted, unknown, ignored, clean
1079 r = modified, added, removed, deleted, unknown, ignored, clean
1078 [l.sort() for l in r]
1080 [l.sort() for l in r]
1079 return r
1081 return r
1080
1082
1081 def add(self, list):
1083 def add(self, list):
1082 wlock = self.wlock()
1084 wlock = self.wlock()
1083 try:
1085 try:
1084 rejected = []
1086 rejected = []
1085 for f in list:
1087 for f in list:
1086 p = self.wjoin(f)
1088 p = self.wjoin(f)
1087 try:
1089 try:
1088 st = os.lstat(p)
1090 st = os.lstat(p)
1089 except:
1091 except:
1090 self.ui.warn(_("%s does not exist!\n") % f)
1092 self.ui.warn(_("%s does not exist!\n") % f)
1091 rejected.append(f)
1093 rejected.append(f)
1092 continue
1094 continue
1093 if st.st_size > 10000000:
1095 if st.st_size > 10000000:
1094 self.ui.warn(_("%s: up to %d MB of RAM may be required "
1096 self.ui.warn(_("%s: up to %d MB of RAM may be required "
1095 "to manage this file\n"
1097 "to manage this file\n"
1096 "(use 'hg revert %s' to cancel the "
1098 "(use 'hg revert %s' to cancel the "
1097 "pending addition)\n")
1099 "pending addition)\n")
1098 % (f, 3 * st.st_size // 1000000, f))
1100 % (f, 3 * st.st_size // 1000000, f))
1099 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1101 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1100 self.ui.warn(_("%s not added: only files and symlinks "
1102 self.ui.warn(_("%s not added: only files and symlinks "
1101 "supported currently\n") % f)
1103 "supported currently\n") % f)
1102 rejected.append(p)
1104 rejected.append(p)
1103 elif self.dirstate[f] in 'amn':
1105 elif self.dirstate[f] in 'amn':
1104 self.ui.warn(_("%s already tracked!\n") % f)
1106 self.ui.warn(_("%s already tracked!\n") % f)
1105 elif self.dirstate[f] == 'r':
1107 elif self.dirstate[f] == 'r':
1106 self.dirstate.normallookup(f)
1108 self.dirstate.normallookup(f)
1107 else:
1109 else:
1108 self.dirstate.add(f)
1110 self.dirstate.add(f)
1109 return rejected
1111 return rejected
1110 finally:
1112 finally:
1111 wlock.release()
1113 wlock.release()
1112
1114
1113 def forget(self, list):
1115 def forget(self, list):
1114 wlock = self.wlock()
1116 wlock = self.wlock()
1115 try:
1117 try:
1116 for f in list:
1118 for f in list:
1117 if self.dirstate[f] != 'a':
1119 if self.dirstate[f] != 'a':
1118 self.ui.warn(_("%s not added!\n") % f)
1120 self.ui.warn(_("%s not added!\n") % f)
1119 else:
1121 else:
1120 self.dirstate.forget(f)
1122 self.dirstate.forget(f)
1121 finally:
1123 finally:
1122 wlock.release()
1124 wlock.release()
1123
1125
1124 def remove(self, list, unlink=False):
1126 def remove(self, list, unlink=False):
1125 if unlink:
1127 if unlink:
1126 for f in list:
1128 for f in list:
1127 try:
1129 try:
1128 util.unlink(self.wjoin(f))
1130 util.unlink(self.wjoin(f))
1129 except OSError, inst:
1131 except OSError, inst:
1130 if inst.errno != errno.ENOENT:
1132 if inst.errno != errno.ENOENT:
1131 raise
1133 raise
1132 wlock = self.wlock()
1134 wlock = self.wlock()
1133 try:
1135 try:
1134 for f in list:
1136 for f in list:
1135 if unlink and os.path.exists(self.wjoin(f)):
1137 if unlink and os.path.exists(self.wjoin(f)):
1136 self.ui.warn(_("%s still exists!\n") % f)
1138 self.ui.warn(_("%s still exists!\n") % f)
1137 elif self.dirstate[f] == 'a':
1139 elif self.dirstate[f] == 'a':
1138 self.dirstate.forget(f)
1140 self.dirstate.forget(f)
1139 elif f not in self.dirstate:
1141 elif f not in self.dirstate:
1140 self.ui.warn(_("%s not tracked!\n") % f)
1142 self.ui.warn(_("%s not tracked!\n") % f)
1141 else:
1143 else:
1142 self.dirstate.remove(f)
1144 self.dirstate.remove(f)
1143 finally:
1145 finally:
1144 wlock.release()
1146 wlock.release()
1145
1147
1146 def undelete(self, list):
1148 def undelete(self, list):
1147 manifests = [self.manifest.read(self.changelog.read(p)[0])
1149 manifests = [self.manifest.read(self.changelog.read(p)[0])
1148 for p in self.dirstate.parents() if p != nullid]
1150 for p in self.dirstate.parents() if p != nullid]
1149 wlock = self.wlock()
1151 wlock = self.wlock()
1150 try:
1152 try:
1151 for f in list:
1153 for f in list:
1152 if self.dirstate[f] != 'r':
1154 if self.dirstate[f] != 'r':
1153 self.ui.warn(_("%s not removed!\n") % f)
1155 self.ui.warn(_("%s not removed!\n") % f)
1154 else:
1156 else:
1155 m = f in manifests[0] and manifests[0] or manifests[1]
1157 m = f in manifests[0] and manifests[0] or manifests[1]
1156 t = self.file(f).read(m[f])
1158 t = self.file(f).read(m[f])
1157 self.wwrite(f, t, m.flags(f))
1159 self.wwrite(f, t, m.flags(f))
1158 self.dirstate.normal(f)
1160 self.dirstate.normal(f)
1159 finally:
1161 finally:
1160 wlock.release()
1162 wlock.release()
1161
1163
1162 def copy(self, source, dest):
1164 def copy(self, source, dest):
1163 p = self.wjoin(dest)
1165 p = self.wjoin(dest)
1164 if not (os.path.exists(p) or os.path.islink(p)):
1166 if not (os.path.exists(p) or os.path.islink(p)):
1165 self.ui.warn(_("%s does not exist!\n") % dest)
1167 self.ui.warn(_("%s does not exist!\n") % dest)
1166 elif not (os.path.isfile(p) or os.path.islink(p)):
1168 elif not (os.path.isfile(p) or os.path.islink(p)):
1167 self.ui.warn(_("copy failed: %s is not a file or a "
1169 self.ui.warn(_("copy failed: %s is not a file or a "
1168 "symbolic link\n") % dest)
1170 "symbolic link\n") % dest)
1169 else:
1171 else:
1170 wlock = self.wlock()
1172 wlock = self.wlock()
1171 try:
1173 try:
1172 if self.dirstate[dest] in '?r':
1174 if self.dirstate[dest] in '?r':
1173 self.dirstate.add(dest)
1175 self.dirstate.add(dest)
1174 self.dirstate.copy(source, dest)
1176 self.dirstate.copy(source, dest)
1175 finally:
1177 finally:
1176 wlock.release()
1178 wlock.release()
1177
1179
1178 def heads(self, start=None):
1180 def heads(self, start=None):
1179 heads = self.changelog.heads(start)
1181 heads = self.changelog.heads(start)
1180 # sort the output in rev descending order
1182 # sort the output in rev descending order
1181 heads = [(-self.changelog.rev(h), h) for h in heads]
1183 heads = [(-self.changelog.rev(h), h) for h in heads]
1182 return [n for (r, n) in sorted(heads)]
1184 return [n for (r, n) in sorted(heads)]
1183
1185
1184 def branchheads(self, branch=None, start=None, closed=False):
1186 def branchheads(self, branch=None, start=None, closed=False):
1185 '''return a (possibly filtered) list of heads for the given branch
1187 '''return a (possibly filtered) list of heads for the given branch
1186
1188
1187 Heads are returned in topological order, from newest to oldest.
1189 Heads are returned in topological order, from newest to oldest.
1188 If branch is None, use the dirstate branch.
1190 If branch is None, use the dirstate branch.
1189 If start is not None, return only heads reachable from start.
1191 If start is not None, return only heads reachable from start.
1190 If closed is True, return heads that are marked as closed as well.
1192 If closed is True, return heads that are marked as closed as well.
1191 '''
1193 '''
1192 if branch is None:
1194 if branch is None:
1193 branch = self[None].branch()
1195 branch = self[None].branch()
1194 branches = self.branchmap()
1196 branches = self.branchmap()
1195 if branch not in branches:
1197 if branch not in branches:
1196 return []
1198 return []
1197 # the cache returns heads ordered lowest to highest
1199 # the cache returns heads ordered lowest to highest
1198 bheads = list(reversed(branches[branch]))
1200 bheads = list(reversed(branches[branch]))
1199 if start is not None:
1201 if start is not None:
1200 # filter out the heads that cannot be reached from startrev
1202 # filter out the heads that cannot be reached from startrev
1201 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1203 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1202 bheads = [h for h in bheads if h in fbheads]
1204 bheads = [h for h in bheads if h in fbheads]
1203 if not closed:
1205 if not closed:
1204 bheads = [h for h in bheads if
1206 bheads = [h for h in bheads if
1205 ('close' not in self.changelog.read(h)[5])]
1207 ('close' not in self.changelog.read(h)[5])]
1206 return bheads
1208 return bheads
1207
1209
1208 def branches(self, nodes):
1210 def branches(self, nodes):
1209 if not nodes:
1211 if not nodes:
1210 nodes = [self.changelog.tip()]
1212 nodes = [self.changelog.tip()]
1211 b = []
1213 b = []
1212 for n in nodes:
1214 for n in nodes:
1213 t = n
1215 t = n
1214 while 1:
1216 while 1:
1215 p = self.changelog.parents(n)
1217 p = self.changelog.parents(n)
1216 if p[1] != nullid or p[0] == nullid:
1218 if p[1] != nullid or p[0] == nullid:
1217 b.append((t, n, p[0], p[1]))
1219 b.append((t, n, p[0], p[1]))
1218 break
1220 break
1219 n = p[0]
1221 n = p[0]
1220 return b
1222 return b
1221
1223
1222 def between(self, pairs):
1224 def between(self, pairs):
1223 r = []
1225 r = []
1224
1226
1225 for top, bottom in pairs:
1227 for top, bottom in pairs:
1226 n, l, i = top, [], 0
1228 n, l, i = top, [], 0
1227 f = 1
1229 f = 1
1228
1230
1229 while n != bottom and n != nullid:
1231 while n != bottom and n != nullid:
1230 p = self.changelog.parents(n)[0]
1232 p = self.changelog.parents(n)[0]
1231 if i == f:
1233 if i == f:
1232 l.append(n)
1234 l.append(n)
1233 f = f * 2
1235 f = f * 2
1234 n = p
1236 n = p
1235 i += 1
1237 i += 1
1236
1238
1237 r.append(l)
1239 r.append(l)
1238
1240
1239 return r
1241 return r
1240
1242
1241 def findincoming(self, remote, base=None, heads=None, force=False):
1243 def findincoming(self, remote, base=None, heads=None, force=False):
1242 """Return list of roots of the subsets of missing nodes from remote
1244 """Return list of roots of the subsets of missing nodes from remote
1243
1245
1244 If base dict is specified, assume that these nodes and their parents
1246 If base dict is specified, assume that these nodes and their parents
1245 exist on the remote side and that no child of a node of base exists
1247 exist on the remote side and that no child of a node of base exists
1246 in both remote and self.
1248 in both remote and self.
1247 Furthermore base will be updated to include the nodes that exists
1249 Furthermore base will be updated to include the nodes that exists
1248 in self and remote but no children exists in self and remote.
1250 in self and remote but no children exists in self and remote.
1249 If a list of heads is specified, return only nodes which are heads
1251 If a list of heads is specified, return only nodes which are heads
1250 or ancestors of these heads.
1252 or ancestors of these heads.
1251
1253
1252 All the ancestors of base are in self and in remote.
1254 All the ancestors of base are in self and in remote.
1253 All the descendants of the list returned are missing in self.
1255 All the descendants of the list returned are missing in self.
1254 (and so we know that the rest of the nodes are missing in remote, see
1256 (and so we know that the rest of the nodes are missing in remote, see
1255 outgoing)
1257 outgoing)
1256 """
1258 """
1257 return self.findcommonincoming(remote, base, heads, force)[1]
1259 return self.findcommonincoming(remote, base, heads, force)[1]
1258
1260
1259 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1261 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1260 """Return a tuple (common, missing roots, heads) used to identify
1262 """Return a tuple (common, missing roots, heads) used to identify
1261 missing nodes from remote.
1263 missing nodes from remote.
1262
1264
1263 If base dict is specified, assume that these nodes and their parents
1265 If base dict is specified, assume that these nodes and their parents
1264 exist on the remote side and that no child of a node of base exists
1266 exist on the remote side and that no child of a node of base exists
1265 in both remote and self.
1267 in both remote and self.
1266 Furthermore base will be updated to include the nodes that exists
1268 Furthermore base will be updated to include the nodes that exists
1267 in self and remote but no children exists in self and remote.
1269 in self and remote but no children exists in self and remote.
1268 If a list of heads is specified, return only nodes which are heads
1270 If a list of heads is specified, return only nodes which are heads
1269 or ancestors of these heads.
1271 or ancestors of these heads.
1270
1272
1271 All the ancestors of base are in self and in remote.
1273 All the ancestors of base are in self and in remote.
1272 """
1274 """
1273 m = self.changelog.nodemap
1275 m = self.changelog.nodemap
1274 search = []
1276 search = []
1275 fetch = set()
1277 fetch = set()
1276 seen = set()
1278 seen = set()
1277 seenbranch = set()
1279 seenbranch = set()
1278 if base is None:
1280 if base is None:
1279 base = {}
1281 base = {}
1280
1282
1281 if not heads:
1283 if not heads:
1282 heads = remote.heads()
1284 heads = remote.heads()
1283
1285
1284 if self.changelog.tip() == nullid:
1286 if self.changelog.tip() == nullid:
1285 base[nullid] = 1
1287 base[nullid] = 1
1286 if heads != [nullid]:
1288 if heads != [nullid]:
1287 return [nullid], [nullid], list(heads)
1289 return [nullid], [nullid], list(heads)
1288 return [nullid], [], []
1290 return [nullid], [], []
1289
1291
1290 # assume we're closer to the tip than the root
1292 # assume we're closer to the tip than the root
1291 # and start by examining the heads
1293 # and start by examining the heads
1292 self.ui.status(_("searching for changes\n"))
1294 self.ui.status(_("searching for changes\n"))
1293
1295
1294 unknown = []
1296 unknown = []
1295 for h in heads:
1297 for h in heads:
1296 if h not in m:
1298 if h not in m:
1297 unknown.append(h)
1299 unknown.append(h)
1298 else:
1300 else:
1299 base[h] = 1
1301 base[h] = 1
1300
1302
1301 heads = unknown
1303 heads = unknown
1302 if not unknown:
1304 if not unknown:
1303 return base.keys(), [], []
1305 return base.keys(), [], []
1304
1306
1305 req = set(unknown)
1307 req = set(unknown)
1306 reqcnt = 0
1308 reqcnt = 0
1307
1309
1308 # search through remote branches
1310 # search through remote branches
1309 # a 'branch' here is a linear segment of history, with four parts:
1311 # a 'branch' here is a linear segment of history, with four parts:
1310 # head, root, first parent, second parent
1312 # head, root, first parent, second parent
1311 # (a branch always has two parents (or none) by definition)
1313 # (a branch always has two parents (or none) by definition)
1312 unknown = remote.branches(unknown)
1314 unknown = remote.branches(unknown)
1313 while unknown:
1315 while unknown:
1314 r = []
1316 r = []
1315 while unknown:
1317 while unknown:
1316 n = unknown.pop(0)
1318 n = unknown.pop(0)
1317 if n[0] in seen:
1319 if n[0] in seen:
1318 continue
1320 continue
1319
1321
1320 self.ui.debug("examining %s:%s\n"
1322 self.ui.debug("examining %s:%s\n"
1321 % (short(n[0]), short(n[1])))
1323 % (short(n[0]), short(n[1])))
1322 if n[0] == nullid: # found the end of the branch
1324 if n[0] == nullid: # found the end of the branch
1323 pass
1325 pass
1324 elif n in seenbranch:
1326 elif n in seenbranch:
1325 self.ui.debug("branch already found\n")
1327 self.ui.debug("branch already found\n")
1326 continue
1328 continue
1327 elif n[1] and n[1] in m: # do we know the base?
1329 elif n[1] and n[1] in m: # do we know the base?
1328 self.ui.debug("found incomplete branch %s:%s\n"
1330 self.ui.debug("found incomplete branch %s:%s\n"
1329 % (short(n[0]), short(n[1])))
1331 % (short(n[0]), short(n[1])))
1330 search.append(n[0:2]) # schedule branch range for scanning
1332 search.append(n[0:2]) # schedule branch range for scanning
1331 seenbranch.add(n)
1333 seenbranch.add(n)
1332 else:
1334 else:
1333 if n[1] not in seen and n[1] not in fetch:
1335 if n[1] not in seen and n[1] not in fetch:
1334 if n[2] in m and n[3] in m:
1336 if n[2] in m and n[3] in m:
1335 self.ui.debug("found new changeset %s\n" %
1337 self.ui.debug("found new changeset %s\n" %
1336 short(n[1]))
1338 short(n[1]))
1337 fetch.add(n[1]) # earliest unknown
1339 fetch.add(n[1]) # earliest unknown
1338 for p in n[2:4]:
1340 for p in n[2:4]:
1339 if p in m:
1341 if p in m:
1340 base[p] = 1 # latest known
1342 base[p] = 1 # latest known
1341
1343
1342 for p in n[2:4]:
1344 for p in n[2:4]:
1343 if p not in req and p not in m:
1345 if p not in req and p not in m:
1344 r.append(p)
1346 r.append(p)
1345 req.add(p)
1347 req.add(p)
1346 seen.add(n[0])
1348 seen.add(n[0])
1347
1349
1348 if r:
1350 if r:
1349 reqcnt += 1
1351 reqcnt += 1
1350 self.ui.progress(_('searching'), reqcnt, unit=_('queries'))
1352 self.ui.progress(_('searching'), reqcnt, unit=_('queries'))
1351 self.ui.debug("request %d: %s\n" %
1353 self.ui.debug("request %d: %s\n" %
1352 (reqcnt, " ".join(map(short, r))))
1354 (reqcnt, " ".join(map(short, r))))
1353 for p in xrange(0, len(r), 10):
1355 for p in xrange(0, len(r), 10):
1354 for b in remote.branches(r[p:p + 10]):
1356 for b in remote.branches(r[p:p + 10]):
1355 self.ui.debug("received %s:%s\n" %
1357 self.ui.debug("received %s:%s\n" %
1356 (short(b[0]), short(b[1])))
1358 (short(b[0]), short(b[1])))
1357 unknown.append(b)
1359 unknown.append(b)
1358
1360
1359 # do binary search on the branches we found
1361 # do binary search on the branches we found
1360 while search:
1362 while search:
1361 newsearch = []
1363 newsearch = []
1362 reqcnt += 1
1364 reqcnt += 1
1363 self.ui.progress(_('searching'), reqcnt, unit=_('queries'))
1365 self.ui.progress(_('searching'), reqcnt, unit=_('queries'))
1364 for n, l in zip(search, remote.between(search)):
1366 for n, l in zip(search, remote.between(search)):
1365 l.append(n[1])
1367 l.append(n[1])
1366 p = n[0]
1368 p = n[0]
1367 f = 1
1369 f = 1
1368 for i in l:
1370 for i in l:
1369 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1371 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1370 if i in m:
1372 if i in m:
1371 if f <= 2:
1373 if f <= 2:
1372 self.ui.debug("found new branch changeset %s\n" %
1374 self.ui.debug("found new branch changeset %s\n" %
1373 short(p))
1375 short(p))
1374 fetch.add(p)
1376 fetch.add(p)
1375 base[i] = 1
1377 base[i] = 1
1376 else:
1378 else:
1377 self.ui.debug("narrowed branch search to %s:%s\n"
1379 self.ui.debug("narrowed branch search to %s:%s\n"
1378 % (short(p), short(i)))
1380 % (short(p), short(i)))
1379 newsearch.append((p, i))
1381 newsearch.append((p, i))
1380 break
1382 break
1381 p, f = i, f * 2
1383 p, f = i, f * 2
1382 search = newsearch
1384 search = newsearch
1383
1385
1384 # sanity check our fetch list
1386 # sanity check our fetch list
1385 for f in fetch:
1387 for f in fetch:
1386 if f in m:
1388 if f in m:
1387 raise error.RepoError(_("already have changeset ")
1389 raise error.RepoError(_("already have changeset ")
1388 + short(f[:4]))
1390 + short(f[:4]))
1389
1391
1390 if base.keys() == [nullid]:
1392 if base.keys() == [nullid]:
1391 if force:
1393 if force:
1392 self.ui.warn(_("warning: repository is unrelated\n"))
1394 self.ui.warn(_("warning: repository is unrelated\n"))
1393 else:
1395 else:
1394 raise util.Abort(_("repository is unrelated"))
1396 raise util.Abort(_("repository is unrelated"))
1395
1397
1396 self.ui.debug("found new changesets starting at " +
1398 self.ui.debug("found new changesets starting at " +
1397 " ".join([short(f) for f in fetch]) + "\n")
1399 " ".join([short(f) for f in fetch]) + "\n")
1398
1400
1399 self.ui.progress(_('searching'), None)
1401 self.ui.progress(_('searching'), None)
1400 self.ui.debug("%d total queries\n" % reqcnt)
1402 self.ui.debug("%d total queries\n" % reqcnt)
1401
1403
1402 return base.keys(), list(fetch), heads
1404 return base.keys(), list(fetch), heads
1403
1405
1404 def findoutgoing(self, remote, base=None, heads=None, force=False):
1406 def findoutgoing(self, remote, base=None, heads=None, force=False):
1405 """Return list of nodes that are roots of subsets not in remote
1407 """Return list of nodes that are roots of subsets not in remote
1406
1408
1407 If base dict is specified, assume that these nodes and their parents
1409 If base dict is specified, assume that these nodes and their parents
1408 exist on the remote side.
1410 exist on the remote side.
1409 If a list of heads is specified, return only nodes which are heads
1411 If a list of heads is specified, return only nodes which are heads
1410 or ancestors of these heads, and return a second element which
1412 or ancestors of these heads, and return a second element which
1411 contains all remote heads which get new children.
1413 contains all remote heads which get new children.
1412 """
1414 """
1413 if base is None:
1415 if base is None:
1414 base = {}
1416 base = {}
1415 self.findincoming(remote, base, heads, force=force)
1417 self.findincoming(remote, base, heads, force=force)
1416
1418
1417 self.ui.debug("common changesets up to "
1419 self.ui.debug("common changesets up to "
1418 + " ".join(map(short, base.keys())) + "\n")
1420 + " ".join(map(short, base.keys())) + "\n")
1419
1421
1420 remain = set(self.changelog.nodemap)
1422 remain = set(self.changelog.nodemap)
1421
1423
1422 # prune everything remote has from the tree
1424 # prune everything remote has from the tree
1423 remain.remove(nullid)
1425 remain.remove(nullid)
1424 remove = base.keys()
1426 remove = base.keys()
1425 while remove:
1427 while remove:
1426 n = remove.pop(0)
1428 n = remove.pop(0)
1427 if n in remain:
1429 if n in remain:
1428 remain.remove(n)
1430 remain.remove(n)
1429 for p in self.changelog.parents(n):
1431 for p in self.changelog.parents(n):
1430 remove.append(p)
1432 remove.append(p)
1431
1433
1432 # find every node whose parents have been pruned
1434 # find every node whose parents have been pruned
1433 subset = []
1435 subset = []
1434 # find every remote head that will get new children
1436 # find every remote head that will get new children
1435 updated_heads = set()
1437 updated_heads = set()
1436 for n in remain:
1438 for n in remain:
1437 p1, p2 = self.changelog.parents(n)
1439 p1, p2 = self.changelog.parents(n)
1438 if p1 not in remain and p2 not in remain:
1440 if p1 not in remain and p2 not in remain:
1439 subset.append(n)
1441 subset.append(n)
1440 if heads:
1442 if heads:
1441 if p1 in heads:
1443 if p1 in heads:
1442 updated_heads.add(p1)
1444 updated_heads.add(p1)
1443 if p2 in heads:
1445 if p2 in heads:
1444 updated_heads.add(p2)
1446 updated_heads.add(p2)
1445
1447
1446 # this is the set of all roots we have to push
1448 # this is the set of all roots we have to push
1447 if heads:
1449 if heads:
1448 return subset, list(updated_heads)
1450 return subset, list(updated_heads)
1449 else:
1451 else:
1450 return subset
1452 return subset
1451
1453
1452 def pull(self, remote, heads=None, force=False):
1454 def pull(self, remote, heads=None, force=False):
1453 lock = self.lock()
1455 lock = self.lock()
1454 try:
1456 try:
1455 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1457 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1456 force=force)
1458 force=force)
1457 if not fetch:
1459 if not fetch:
1458 self.ui.status(_("no changes found\n"))
1460 self.ui.status(_("no changes found\n"))
1459 return 0
1461 return 0
1460
1462
1461 if fetch == [nullid]:
1463 if fetch == [nullid]:
1462 self.ui.status(_("requesting all changes\n"))
1464 self.ui.status(_("requesting all changes\n"))
1463 elif heads is None and remote.capable('changegroupsubset'):
1465 elif heads is None and remote.capable('changegroupsubset'):
1464 # issue1320, avoid a race if remote changed after discovery
1466 # issue1320, avoid a race if remote changed after discovery
1465 heads = rheads
1467 heads = rheads
1466
1468
1467 if heads is None:
1469 if heads is None:
1468 cg = remote.changegroup(fetch, 'pull')
1470 cg = remote.changegroup(fetch, 'pull')
1469 else:
1471 else:
1470 if not remote.capable('changegroupsubset'):
1472 if not remote.capable('changegroupsubset'):
1471 raise util.Abort(_("Partial pull cannot be done because "
1473 raise util.Abort(_("Partial pull cannot be done because "
1472 "other repository doesn't support "
1474 "other repository doesn't support "
1473 "changegroupsubset."))
1475 "changegroupsubset."))
1474 cg = remote.changegroupsubset(fetch, heads, 'pull')
1476 cg = remote.changegroupsubset(fetch, heads, 'pull')
1475 return self.addchangegroup(cg, 'pull', remote.url())
1477 return self.addchangegroup(cg, 'pull', remote.url())
1476 finally:
1478 finally:
1477 lock.release()
1479 lock.release()
1478
1480
1479 def push(self, remote, force=False, revs=None):
1481 def push(self, remote, force=False, revs=None):
1480 # there are two ways to push to remote repo:
1482 # there are two ways to push to remote repo:
1481 #
1483 #
1482 # addchangegroup assumes local user can lock remote
1484 # addchangegroup assumes local user can lock remote
1483 # repo (local filesystem, old ssh servers).
1485 # repo (local filesystem, old ssh servers).
1484 #
1486 #
1485 # unbundle assumes local user cannot lock remote repo (new ssh
1487 # unbundle assumes local user cannot lock remote repo (new ssh
1486 # servers, http servers).
1488 # servers, http servers).
1487
1489
1488 if remote.capable('unbundle'):
1490 if remote.capable('unbundle'):
1489 return self.push_unbundle(remote, force, revs)
1491 return self.push_unbundle(remote, force, revs)
1490 return self.push_addchangegroup(remote, force, revs)
1492 return self.push_addchangegroup(remote, force, revs)
1491
1493
1492 def prepush(self, remote, force, revs):
1494 def prepush(self, remote, force, revs):
1493 '''Analyze the local and remote repositories and determine which
1495 '''Analyze the local and remote repositories and determine which
1494 changesets need to be pushed to the remote. Return a tuple
1496 changesets need to be pushed to the remote. Return a tuple
1495 (changegroup, remoteheads). changegroup is a readable file-like
1497 (changegroup, remoteheads). changegroup is a readable file-like
1496 object whose read() returns successive changegroup chunks ready to
1498 object whose read() returns successive changegroup chunks ready to
1497 be sent over the wire. remoteheads is the list of remote heads.
1499 be sent over the wire. remoteheads is the list of remote heads.
1498 '''
1500 '''
1499 common = {}
1501 common = {}
1500 remote_heads = remote.heads()
1502 remote_heads = remote.heads()
1501 inc = self.findincoming(remote, common, remote_heads, force=force)
1503 inc = self.findincoming(remote, common, remote_heads, force=force)
1502
1504
1503 cl = self.changelog
1505 cl = self.changelog
1504 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1506 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1505 msng_cl, bases, heads = cl.nodesbetween(update, revs)
1507 msng_cl, bases, heads = cl.nodesbetween(update, revs)
1506
1508
1507 outgoingnodeset = set(msng_cl)
1509 outgoingnodeset = set(msng_cl)
1508 # compute set of nodes which, if they were a head before, no longer are
1510 # compute set of nodes which, if they were a head before, no longer are
1509 nolongeraheadnodeset = set(p for n in msng_cl for p in cl.parents(n))
1511 nolongeraheadnodeset = set(p for n in msng_cl for p in cl.parents(n))
1510
1512
1511 def checkbranch(lheads, rheads, branchname=None):
1513 def checkbranch(lheads, rheads, branchname=None):
1512 '''
1514 '''
1513 check whether there are more local heads than remote heads on
1515 check whether there are more local heads than remote heads on
1514 a specific branch.
1516 a specific branch.
1515
1517
1516 lheads: local branch heads
1518 lheads: local branch heads
1517 rheads: remote branch heads
1519 rheads: remote branch heads
1518 '''
1520 '''
1519 newlheads = [n for n in lheads if n in outgoingnodeset]
1521 newlheads = [n for n in lheads if n in outgoingnodeset]
1520 formerrheads = [n for n in rheads if n in nolongeraheadnodeset]
1522 formerrheads = [n for n in rheads if n in nolongeraheadnodeset]
1521 if len(newlheads) > len(formerrheads):
1523 if len(newlheads) > len(formerrheads):
1522 # we add more new heads than we demote former heads to non-head
1524 # we add more new heads than we demote former heads to non-head
1523 if branchname is not None:
1525 if branchname is not None:
1524 msg = _("abort: push creates new remote heads"
1526 msg = _("abort: push creates new remote heads"
1525 " on branch '%s'!\n") % branchname
1527 " on branch '%s'!\n") % branchname
1526 else:
1528 else:
1527 msg = _("abort: push creates new remote heads!\n")
1529 msg = _("abort: push creates new remote heads!\n")
1528 self.ui.warn(msg)
1530 self.ui.warn(msg)
1529 if len(lheads) > len(rheads):
1531 if len(lheads) > len(rheads):
1530 self.ui.status(_("(did you forget to merge?"
1532 self.ui.status(_("(did you forget to merge?"
1531 " use push -f to force)\n"))
1533 " use push -f to force)\n"))
1532 else:
1534 else:
1533 self.ui.status(_("(you should pull and merge or"
1535 self.ui.status(_("(you should pull and merge or"
1534 " use push -f to force)\n"))
1536 " use push -f to force)\n"))
1535 return False
1537 return False
1536 return True
1538 return True
1537
1539
1538 if not bases:
1540 if not bases:
1539 self.ui.status(_("no changes found\n"))
1541 self.ui.status(_("no changes found\n"))
1540 return None, 1
1542 return None, 1
1541 elif not force:
1543 elif not force:
1542 # Check for each named branch if we're creating new remote heads.
1544 # Check for each named branch if we're creating new remote heads.
1543 # To be a remote head after push, node must be either:
1545 # To be a remote head after push, node must be either:
1544 # - unknown locally
1546 # - unknown locally
1545 # - a local outgoing head descended from update
1547 # - a local outgoing head descended from update
1546 # - a remote head that's known locally and not
1548 # - a remote head that's known locally and not
1547 # ancestral to an outgoing head
1549 # ancestral to an outgoing head
1548 #
1550 #
1549 # New named branches cannot be created without --force.
1551 # New named branches cannot be created without --force.
1550
1552
1551 if remote_heads != [nullid]:
1553 if remote_heads != [nullid]:
1552 if remote.capable('branchmap'):
1554 if remote.capable('branchmap'):
1553 remotebrheads = remote.branchmap()
1555 remotebrheads = remote.branchmap()
1554
1556
1555 if not revs:
1557 if not revs:
1556 localbrheads = self.branchmap()
1558 localbrheads = self.branchmap()
1557 else:
1559 else:
1558 localbrheads = {}
1560 localbrheads = {}
1559 ctxgen = (self[n] for n in msng_cl)
1561 ctxgen = (self[n] for n in msng_cl)
1560 self._updatebranchcache(localbrheads, ctxgen)
1562 self._updatebranchcache(localbrheads, ctxgen)
1561
1563
1562 newbranches = list(set(localbrheads) - set(remotebrheads))
1564 newbranches = list(set(localbrheads) - set(remotebrheads))
1563 if newbranches: # new branch requires --force
1565 if newbranches: # new branch requires --force
1564 branchnames = ', '.join("%s" % b for b in newbranches)
1566 branchnames = ', '.join("%s" % b for b in newbranches)
1565 self.ui.warn(_("abort: push creates "
1567 self.ui.warn(_("abort: push creates "
1566 "new remote branches: %s!\n")
1568 "new remote branches: %s!\n")
1567 % branchnames)
1569 % branchnames)
1568 # propose 'push -b .' in the msg too?
1570 # propose 'push -b .' in the msg too?
1569 self.ui.status(_("(use 'hg push -f' to force)\n"))
1571 self.ui.status(_("(use 'hg push -f' to force)\n"))
1570 return None, 0
1572 return None, 0
1571 for branch, lheads in localbrheads.iteritems():
1573 for branch, lheads in localbrheads.iteritems():
1572 if branch in remotebrheads:
1574 if branch in remotebrheads:
1573 rheads = remotebrheads[branch]
1575 rheads = remotebrheads[branch]
1574 if not checkbranch(lheads, rheads, branch):
1576 if not checkbranch(lheads, rheads, branch):
1575 return None, 0
1577 return None, 0
1576 else:
1578 else:
1577 if not checkbranch(heads, remote_heads):
1579 if not checkbranch(heads, remote_heads):
1578 return None, 0
1580 return None, 0
1579
1581
1580 if inc:
1582 if inc:
1581 self.ui.warn(_("note: unsynced remote changes!\n"))
1583 self.ui.warn(_("note: unsynced remote changes!\n"))
1582
1584
1583
1585
1584 if revs is None:
1586 if revs is None:
1585 # use the fast path, no race possible on push
1587 # use the fast path, no race possible on push
1586 nodes = cl.findmissing(common.keys())
1588 nodes = cl.findmissing(common.keys())
1587 cg = self._changegroup(nodes, 'push')
1589 cg = self._changegroup(nodes, 'push')
1588 else:
1590 else:
1589 cg = self.changegroupsubset(update, revs, 'push')
1591 cg = self.changegroupsubset(update, revs, 'push')
1590 return cg, remote_heads
1592 return cg, remote_heads
1591
1593
1592 def push_addchangegroup(self, remote, force, revs):
1594 def push_addchangegroup(self, remote, force, revs):
1593 lock = remote.lock()
1595 lock = remote.lock()
1594 try:
1596 try:
1595 ret = self.prepush(remote, force, revs)
1597 ret = self.prepush(remote, force, revs)
1596 if ret[0] is not None:
1598 if ret[0] is not None:
1597 cg, remote_heads = ret
1599 cg, remote_heads = ret
1598 return remote.addchangegroup(cg, 'push', self.url())
1600 return remote.addchangegroup(cg, 'push', self.url())
1599 return ret[1]
1601 return ret[1]
1600 finally:
1602 finally:
1601 lock.release()
1603 lock.release()
1602
1604
1603 def push_unbundle(self, remote, force, revs):
1605 def push_unbundle(self, remote, force, revs):
1604 # local repo finds heads on server, finds out what revs it
1606 # local repo finds heads on server, finds out what revs it
1605 # must push. once revs transferred, if server finds it has
1607 # must push. once revs transferred, if server finds it has
1606 # different heads (someone else won commit/push race), server
1608 # different heads (someone else won commit/push race), server
1607 # aborts.
1609 # aborts.
1608
1610
1609 ret = self.prepush(remote, force, revs)
1611 ret = self.prepush(remote, force, revs)
1610 if ret[0] is not None:
1612 if ret[0] is not None:
1611 cg, remote_heads = ret
1613 cg, remote_heads = ret
1612 if force:
1614 if force:
1613 remote_heads = ['force']
1615 remote_heads = ['force']
1614 return remote.unbundle(cg, remote_heads, 'push')
1616 return remote.unbundle(cg, remote_heads, 'push')
1615 return ret[1]
1617 return ret[1]
1616
1618
1617 def changegroupinfo(self, nodes, source):
1619 def changegroupinfo(self, nodes, source):
1618 if self.ui.verbose or source == 'bundle':
1620 if self.ui.verbose or source == 'bundle':
1619 self.ui.status(_("%d changesets found\n") % len(nodes))
1621 self.ui.status(_("%d changesets found\n") % len(nodes))
1620 if self.ui.debugflag:
1622 if self.ui.debugflag:
1621 self.ui.debug("list of changesets:\n")
1623 self.ui.debug("list of changesets:\n")
1622 for node in nodes:
1624 for node in nodes:
1623 self.ui.debug("%s\n" % hex(node))
1625 self.ui.debug("%s\n" % hex(node))
1624
1626
1625 def changegroupsubset(self, bases, heads, source, extranodes=None):
1627 def changegroupsubset(self, bases, heads, source, extranodes=None):
1626 """Compute a changegroup consisting of all the nodes that are
1628 """Compute a changegroup consisting of all the nodes that are
1627 descendents of any of the bases and ancestors of any of the heads.
1629 descendents of any of the bases and ancestors of any of the heads.
1628 Return a chunkbuffer object whose read() method will return
1630 Return a chunkbuffer object whose read() method will return
1629 successive changegroup chunks.
1631 successive changegroup chunks.
1630
1632
1631 It is fairly complex as determining which filenodes and which
1633 It is fairly complex as determining which filenodes and which
1632 manifest nodes need to be included for the changeset to be complete
1634 manifest nodes need to be included for the changeset to be complete
1633 is non-trivial.
1635 is non-trivial.
1634
1636
1635 Another wrinkle is doing the reverse, figuring out which changeset in
1637 Another wrinkle is doing the reverse, figuring out which changeset in
1636 the changegroup a particular filenode or manifestnode belongs to.
1638 the changegroup a particular filenode or manifestnode belongs to.
1637
1639
1638 The caller can specify some nodes that must be included in the
1640 The caller can specify some nodes that must be included in the
1639 changegroup using the extranodes argument. It should be a dict
1641 changegroup using the extranodes argument. It should be a dict
1640 where the keys are the filenames (or 1 for the manifest), and the
1642 where the keys are the filenames (or 1 for the manifest), and the
1641 values are lists of (node, linknode) tuples, where node is a wanted
1643 values are lists of (node, linknode) tuples, where node is a wanted
1642 node and linknode is the changelog node that should be transmitted as
1644 node and linknode is the changelog node that should be transmitted as
1643 the linkrev.
1645 the linkrev.
1644 """
1646 """
1645
1647
1646 # Set up some initial variables
1648 # Set up some initial variables
1647 # Make it easy to refer to self.changelog
1649 # Make it easy to refer to self.changelog
1648 cl = self.changelog
1650 cl = self.changelog
1649 # msng is short for missing - compute the list of changesets in this
1651 # msng is short for missing - compute the list of changesets in this
1650 # changegroup.
1652 # changegroup.
1651 if not bases:
1653 if not bases:
1652 bases = [nullid]
1654 bases = [nullid]
1653 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1655 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1654
1656
1655 if extranodes is None:
1657 if extranodes is None:
1656 # can we go through the fast path ?
1658 # can we go through the fast path ?
1657 heads.sort()
1659 heads.sort()
1658 allheads = self.heads()
1660 allheads = self.heads()
1659 allheads.sort()
1661 allheads.sort()
1660 if heads == allheads:
1662 if heads == allheads:
1661 return self._changegroup(msng_cl_lst, source)
1663 return self._changegroup(msng_cl_lst, source)
1662
1664
1663 # slow path
1665 # slow path
1664 self.hook('preoutgoing', throw=True, source=source)
1666 self.hook('preoutgoing', throw=True, source=source)
1665
1667
1666 self.changegroupinfo(msng_cl_lst, source)
1668 self.changegroupinfo(msng_cl_lst, source)
1667 # Some bases may turn out to be superfluous, and some heads may be
1669 # Some bases may turn out to be superfluous, and some heads may be
1668 # too. nodesbetween will return the minimal set of bases and heads
1670 # too. nodesbetween will return the minimal set of bases and heads
1669 # necessary to re-create the changegroup.
1671 # necessary to re-create the changegroup.
1670
1672
1671 # Known heads are the list of heads that it is assumed the recipient
1673 # Known heads are the list of heads that it is assumed the recipient
1672 # of this changegroup will know about.
1674 # of this changegroup will know about.
1673 knownheads = set()
1675 knownheads = set()
1674 # We assume that all parents of bases are known heads.
1676 # We assume that all parents of bases are known heads.
1675 for n in bases:
1677 for n in bases:
1676 knownheads.update(cl.parents(n))
1678 knownheads.update(cl.parents(n))
1677 knownheads.discard(nullid)
1679 knownheads.discard(nullid)
1678 knownheads = list(knownheads)
1680 knownheads = list(knownheads)
1679 if knownheads:
1681 if knownheads:
1680 # Now that we know what heads are known, we can compute which
1682 # Now that we know what heads are known, we can compute which
1681 # changesets are known. The recipient must know about all
1683 # changesets are known. The recipient must know about all
1682 # changesets required to reach the known heads from the null
1684 # changesets required to reach the known heads from the null
1683 # changeset.
1685 # changeset.
1684 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1686 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1685 junk = None
1687 junk = None
1686 # Transform the list into a set.
1688 # Transform the list into a set.
1687 has_cl_set = set(has_cl_set)
1689 has_cl_set = set(has_cl_set)
1688 else:
1690 else:
1689 # If there were no known heads, the recipient cannot be assumed to
1691 # If there were no known heads, the recipient cannot be assumed to
1690 # know about any changesets.
1692 # know about any changesets.
1691 has_cl_set = set()
1693 has_cl_set = set()
1692
1694
1693 # Make it easy to refer to self.manifest
1695 # Make it easy to refer to self.manifest
1694 mnfst = self.manifest
1696 mnfst = self.manifest
1695 # We don't know which manifests are missing yet
1697 # We don't know which manifests are missing yet
1696 msng_mnfst_set = {}
1698 msng_mnfst_set = {}
1697 # Nor do we know which filenodes are missing.
1699 # Nor do we know which filenodes are missing.
1698 msng_filenode_set = {}
1700 msng_filenode_set = {}
1699
1701
1700 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1702 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1701 junk = None
1703 junk = None
1702
1704
1703 # A changeset always belongs to itself, so the changenode lookup
1705 # A changeset always belongs to itself, so the changenode lookup
1704 # function for a changenode is identity.
1706 # function for a changenode is identity.
1705 def identity(x):
1707 def identity(x):
1706 return x
1708 return x
1707
1709
1708 # If we determine that a particular file or manifest node must be a
1710 # If we determine that a particular file or manifest node must be a
1709 # node that the recipient of the changegroup will already have, we can
1711 # node that the recipient of the changegroup will already have, we can
1710 # also assume the recipient will have all the parents. This function
1712 # also assume the recipient will have all the parents. This function
1711 # prunes them from the set of missing nodes.
1713 # prunes them from the set of missing nodes.
1712 def prune_parents(revlog, hasset, msngset):
1714 def prune_parents(revlog, hasset, msngset):
1713 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1715 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1714 msngset.pop(revlog.node(r), None)
1716 msngset.pop(revlog.node(r), None)
1715
1717
1716 # Use the information collected in collect_manifests_and_files to say
1718 # Use the information collected in collect_manifests_and_files to say
1717 # which changenode any manifestnode belongs to.
1719 # which changenode any manifestnode belongs to.
1718 def lookup_manifest_link(mnfstnode):
1720 def lookup_manifest_link(mnfstnode):
1719 return msng_mnfst_set[mnfstnode]
1721 return msng_mnfst_set[mnfstnode]
1720
1722
1721 # A function generating function that sets up the initial environment
1723 # A function generating function that sets up the initial environment
1722 # the inner function.
1724 # the inner function.
1723 def filenode_collector(changedfiles):
1725 def filenode_collector(changedfiles):
1724 # This gathers information from each manifestnode included in the
1726 # This gathers information from each manifestnode included in the
1725 # changegroup about which filenodes the manifest node references
1727 # changegroup about which filenodes the manifest node references
1726 # so we can include those in the changegroup too.
1728 # so we can include those in the changegroup too.
1727 #
1729 #
1728 # It also remembers which changenode each filenode belongs to. It
1730 # It also remembers which changenode each filenode belongs to. It
1729 # does this by assuming the a filenode belongs to the changenode
1731 # does this by assuming the a filenode belongs to the changenode
1730 # the first manifest that references it belongs to.
1732 # the first manifest that references it belongs to.
1731 def collect_msng_filenodes(mnfstnode):
1733 def collect_msng_filenodes(mnfstnode):
1732 r = mnfst.rev(mnfstnode)
1734 r = mnfst.rev(mnfstnode)
1733 if r - 1 in mnfst.parentrevs(r):
1735 if r - 1 in mnfst.parentrevs(r):
1734 # If the previous rev is one of the parents,
1736 # If the previous rev is one of the parents,
1735 # we only need to see a diff.
1737 # we only need to see a diff.
1736 deltamf = mnfst.readdelta(mnfstnode)
1738 deltamf = mnfst.readdelta(mnfstnode)
1737 # For each line in the delta
1739 # For each line in the delta
1738 for f, fnode in deltamf.iteritems():
1740 for f, fnode in deltamf.iteritems():
1739 f = changedfiles.get(f, None)
1741 f = changedfiles.get(f, None)
1740 # And if the file is in the list of files we care
1742 # And if the file is in the list of files we care
1741 # about.
1743 # about.
1742 if f is not None:
1744 if f is not None:
1743 # Get the changenode this manifest belongs to
1745 # Get the changenode this manifest belongs to
1744 clnode = msng_mnfst_set[mnfstnode]
1746 clnode = msng_mnfst_set[mnfstnode]
1745 # Create the set of filenodes for the file if
1747 # Create the set of filenodes for the file if
1746 # there isn't one already.
1748 # there isn't one already.
1747 ndset = msng_filenode_set.setdefault(f, {})
1749 ndset = msng_filenode_set.setdefault(f, {})
1748 # And set the filenode's changelog node to the
1750 # And set the filenode's changelog node to the
1749 # manifest's if it hasn't been set already.
1751 # manifest's if it hasn't been set already.
1750 ndset.setdefault(fnode, clnode)
1752 ndset.setdefault(fnode, clnode)
1751 else:
1753 else:
1752 # Otherwise we need a full manifest.
1754 # Otherwise we need a full manifest.
1753 m = mnfst.read(mnfstnode)
1755 m = mnfst.read(mnfstnode)
1754 # For every file in we care about.
1756 # For every file in we care about.
1755 for f in changedfiles:
1757 for f in changedfiles:
1756 fnode = m.get(f, None)
1758 fnode = m.get(f, None)
1757 # If it's in the manifest
1759 # If it's in the manifest
1758 if fnode is not None:
1760 if fnode is not None:
1759 # See comments above.
1761 # See comments above.
1760 clnode = msng_mnfst_set[mnfstnode]
1762 clnode = msng_mnfst_set[mnfstnode]
1761 ndset = msng_filenode_set.setdefault(f, {})
1763 ndset = msng_filenode_set.setdefault(f, {})
1762 ndset.setdefault(fnode, clnode)
1764 ndset.setdefault(fnode, clnode)
1763 return collect_msng_filenodes
1765 return collect_msng_filenodes
1764
1766
1765 # We have a list of filenodes we think we need for a file, lets remove
1767 # We have a list of filenodes we think we need for a file, lets remove
1766 # all those we know the recipient must have.
1768 # all those we know the recipient must have.
1767 def prune_filenodes(f, filerevlog):
1769 def prune_filenodes(f, filerevlog):
1768 msngset = msng_filenode_set[f]
1770 msngset = msng_filenode_set[f]
1769 hasset = set()
1771 hasset = set()
1770 # If a 'missing' filenode thinks it belongs to a changenode we
1772 # If a 'missing' filenode thinks it belongs to a changenode we
1771 # assume the recipient must have, then the recipient must have
1773 # assume the recipient must have, then the recipient must have
1772 # that filenode.
1774 # that filenode.
1773 for n in msngset:
1775 for n in msngset:
1774 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1776 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1775 if clnode in has_cl_set:
1777 if clnode in has_cl_set:
1776 hasset.add(n)
1778 hasset.add(n)
1777 prune_parents(filerevlog, hasset, msngset)
1779 prune_parents(filerevlog, hasset, msngset)
1778
1780
1779 # A function generator function that sets up the a context for the
1781 # A function generator function that sets up the a context for the
1780 # inner function.
1782 # inner function.
1781 def lookup_filenode_link_func(fname):
1783 def lookup_filenode_link_func(fname):
1782 msngset = msng_filenode_set[fname]
1784 msngset = msng_filenode_set[fname]
1783 # Lookup the changenode the filenode belongs to.
1785 # Lookup the changenode the filenode belongs to.
1784 def lookup_filenode_link(fnode):
1786 def lookup_filenode_link(fnode):
1785 return msngset[fnode]
1787 return msngset[fnode]
1786 return lookup_filenode_link
1788 return lookup_filenode_link
1787
1789
1788 # Add the nodes that were explicitly requested.
1790 # Add the nodes that were explicitly requested.
1789 def add_extra_nodes(name, nodes):
1791 def add_extra_nodes(name, nodes):
1790 if not extranodes or name not in extranodes:
1792 if not extranodes or name not in extranodes:
1791 return
1793 return
1792
1794
1793 for node, linknode in extranodes[name]:
1795 for node, linknode in extranodes[name]:
1794 if node not in nodes:
1796 if node not in nodes:
1795 nodes[node] = linknode
1797 nodes[node] = linknode
1796
1798
1797 # Now that we have all theses utility functions to help out and
1799 # Now that we have all theses utility functions to help out and
1798 # logically divide up the task, generate the group.
1800 # logically divide up the task, generate the group.
1799 def gengroup():
1801 def gengroup():
1800 # The set of changed files starts empty.
1802 # The set of changed files starts empty.
1801 changedfiles = {}
1803 changedfiles = {}
1802 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1804 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1803
1805
1804 # Create a changenode group generator that will call our functions
1806 # Create a changenode group generator that will call our functions
1805 # back to lookup the owning changenode and collect information.
1807 # back to lookup the owning changenode and collect information.
1806 group = cl.group(msng_cl_lst, identity, collect)
1808 group = cl.group(msng_cl_lst, identity, collect)
1807 cnt = 0
1809 cnt = 0
1808 for chnk in group:
1810 for chnk in group:
1809 yield chnk
1811 yield chnk
1810 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1812 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1811 cnt += 1
1813 cnt += 1
1812 self.ui.progress(_('bundling changes'), None)
1814 self.ui.progress(_('bundling changes'), None)
1813
1815
1814
1816
1815 # Figure out which manifest nodes (of the ones we think might be
1817 # Figure out which manifest nodes (of the ones we think might be
1816 # part of the changegroup) the recipient must know about and
1818 # part of the changegroup) the recipient must know about and
1817 # remove them from the changegroup.
1819 # remove them from the changegroup.
1818 has_mnfst_set = set()
1820 has_mnfst_set = set()
1819 for n in msng_mnfst_set:
1821 for n in msng_mnfst_set:
1820 # If a 'missing' manifest thinks it belongs to a changenode
1822 # If a 'missing' manifest thinks it belongs to a changenode
1821 # the recipient is assumed to have, obviously the recipient
1823 # the recipient is assumed to have, obviously the recipient
1822 # must have that manifest.
1824 # must have that manifest.
1823 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1825 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1824 if linknode in has_cl_set:
1826 if linknode in has_cl_set:
1825 has_mnfst_set.add(n)
1827 has_mnfst_set.add(n)
1826 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1828 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1827 add_extra_nodes(1, msng_mnfst_set)
1829 add_extra_nodes(1, msng_mnfst_set)
1828 msng_mnfst_lst = msng_mnfst_set.keys()
1830 msng_mnfst_lst = msng_mnfst_set.keys()
1829 # Sort the manifestnodes by revision number.
1831 # Sort the manifestnodes by revision number.
1830 msng_mnfst_lst.sort(key=mnfst.rev)
1832 msng_mnfst_lst.sort(key=mnfst.rev)
1831 # Create a generator for the manifestnodes that calls our lookup
1833 # Create a generator for the manifestnodes that calls our lookup
1832 # and data collection functions back.
1834 # and data collection functions back.
1833 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1835 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1834 filenode_collector(changedfiles))
1836 filenode_collector(changedfiles))
1835 cnt = 0
1837 cnt = 0
1836 for chnk in group:
1838 for chnk in group:
1837 yield chnk
1839 yield chnk
1838 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1840 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1839 cnt += 1
1841 cnt += 1
1840 self.ui.progress(_('bundling manifests'), None)
1842 self.ui.progress(_('bundling manifests'), None)
1841
1843
1842 # These are no longer needed, dereference and toss the memory for
1844 # These are no longer needed, dereference and toss the memory for
1843 # them.
1845 # them.
1844 msng_mnfst_lst = None
1846 msng_mnfst_lst = None
1845 msng_mnfst_set.clear()
1847 msng_mnfst_set.clear()
1846
1848
1847 if extranodes:
1849 if extranodes:
1848 for fname in extranodes:
1850 for fname in extranodes:
1849 if isinstance(fname, int):
1851 if isinstance(fname, int):
1850 continue
1852 continue
1851 msng_filenode_set.setdefault(fname, {})
1853 msng_filenode_set.setdefault(fname, {})
1852 changedfiles[fname] = 1
1854 changedfiles[fname] = 1
1853 # Go through all our files in order sorted by name.
1855 # Go through all our files in order sorted by name.
1854 cnt = 0
1856 cnt = 0
1855 for fname in sorted(changedfiles):
1857 for fname in sorted(changedfiles):
1856 filerevlog = self.file(fname)
1858 filerevlog = self.file(fname)
1857 if not len(filerevlog):
1859 if not len(filerevlog):
1858 raise util.Abort(_("empty or missing revlog for %s") % fname)
1860 raise util.Abort(_("empty or missing revlog for %s") % fname)
1859 # Toss out the filenodes that the recipient isn't really
1861 # Toss out the filenodes that the recipient isn't really
1860 # missing.
1862 # missing.
1861 if fname in msng_filenode_set:
1863 if fname in msng_filenode_set:
1862 prune_filenodes(fname, filerevlog)
1864 prune_filenodes(fname, filerevlog)
1863 add_extra_nodes(fname, msng_filenode_set[fname])
1865 add_extra_nodes(fname, msng_filenode_set[fname])
1864 msng_filenode_lst = msng_filenode_set[fname].keys()
1866 msng_filenode_lst = msng_filenode_set[fname].keys()
1865 else:
1867 else:
1866 msng_filenode_lst = []
1868 msng_filenode_lst = []
1867 # If any filenodes are left, generate the group for them,
1869 # If any filenodes are left, generate the group for them,
1868 # otherwise don't bother.
1870 # otherwise don't bother.
1869 if len(msng_filenode_lst) > 0:
1871 if len(msng_filenode_lst) > 0:
1870 yield changegroup.chunkheader(len(fname))
1872 yield changegroup.chunkheader(len(fname))
1871 yield fname
1873 yield fname
1872 # Sort the filenodes by their revision #
1874 # Sort the filenodes by their revision #
1873 msng_filenode_lst.sort(key=filerevlog.rev)
1875 msng_filenode_lst.sort(key=filerevlog.rev)
1874 # Create a group generator and only pass in a changenode
1876 # Create a group generator and only pass in a changenode
1875 # lookup function as we need to collect no information
1877 # lookup function as we need to collect no information
1876 # from filenodes.
1878 # from filenodes.
1877 group = filerevlog.group(msng_filenode_lst,
1879 group = filerevlog.group(msng_filenode_lst,
1878 lookup_filenode_link_func(fname))
1880 lookup_filenode_link_func(fname))
1879 for chnk in group:
1881 for chnk in group:
1880 self.ui.progress(
1882 self.ui.progress(
1881 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1883 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1882 cnt += 1
1884 cnt += 1
1883 yield chnk
1885 yield chnk
1884 if fname in msng_filenode_set:
1886 if fname in msng_filenode_set:
1885 # Don't need this anymore, toss it to free memory.
1887 # Don't need this anymore, toss it to free memory.
1886 del msng_filenode_set[fname]
1888 del msng_filenode_set[fname]
1887 # Signal that no more groups are left.
1889 # Signal that no more groups are left.
1888 yield changegroup.closechunk()
1890 yield changegroup.closechunk()
1889 self.ui.progress(_('bundling files'), None)
1891 self.ui.progress(_('bundling files'), None)
1890
1892
1891 if msng_cl_lst:
1893 if msng_cl_lst:
1892 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1894 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1893
1895
1894 return util.chunkbuffer(gengroup())
1896 return util.chunkbuffer(gengroup())
1895
1897
1896 def changegroup(self, basenodes, source):
1898 def changegroup(self, basenodes, source):
1897 # to avoid a race we use changegroupsubset() (issue1320)
1899 # to avoid a race we use changegroupsubset() (issue1320)
1898 return self.changegroupsubset(basenodes, self.heads(), source)
1900 return self.changegroupsubset(basenodes, self.heads(), source)
1899
1901
1900 def _changegroup(self, nodes, source):
1902 def _changegroup(self, nodes, source):
1901 """Compute the changegroup of all nodes that we have that a recipient
1903 """Compute the changegroup of all nodes that we have that a recipient
1902 doesn't. Return a chunkbuffer object whose read() method will return
1904 doesn't. Return a chunkbuffer object whose read() method will return
1903 successive changegroup chunks.
1905 successive changegroup chunks.
1904
1906
1905 This is much easier than the previous function as we can assume that
1907 This is much easier than the previous function as we can assume that
1906 the recipient has any changenode we aren't sending them.
1908 the recipient has any changenode we aren't sending them.
1907
1909
1908 nodes is the set of nodes to send"""
1910 nodes is the set of nodes to send"""
1909
1911
1910 self.hook('preoutgoing', throw=True, source=source)
1912 self.hook('preoutgoing', throw=True, source=source)
1911
1913
1912 cl = self.changelog
1914 cl = self.changelog
1913 revset = set([cl.rev(n) for n in nodes])
1915 revset = set([cl.rev(n) for n in nodes])
1914 self.changegroupinfo(nodes, source)
1916 self.changegroupinfo(nodes, source)
1915
1917
1916 def identity(x):
1918 def identity(x):
1917 return x
1919 return x
1918
1920
1919 def gennodelst(log):
1921 def gennodelst(log):
1920 for r in log:
1922 for r in log:
1921 if log.linkrev(r) in revset:
1923 if log.linkrev(r) in revset:
1922 yield log.node(r)
1924 yield log.node(r)
1923
1925
1924 def lookuprevlink_func(revlog):
1926 def lookuprevlink_func(revlog):
1925 def lookuprevlink(n):
1927 def lookuprevlink(n):
1926 return cl.node(revlog.linkrev(revlog.rev(n)))
1928 return cl.node(revlog.linkrev(revlog.rev(n)))
1927 return lookuprevlink
1929 return lookuprevlink
1928
1930
1929 def gengroup():
1931 def gengroup():
1930 '''yield a sequence of changegroup chunks (strings)'''
1932 '''yield a sequence of changegroup chunks (strings)'''
1931 # construct a list of all changed files
1933 # construct a list of all changed files
1932 changedfiles = {}
1934 changedfiles = {}
1933 mmfs = {}
1935 mmfs = {}
1934 collect = changegroup.collector(cl, mmfs, changedfiles)
1936 collect = changegroup.collector(cl, mmfs, changedfiles)
1935
1937
1936 cnt = 0
1938 cnt = 0
1937 for chnk in cl.group(nodes, identity, collect):
1939 for chnk in cl.group(nodes, identity, collect):
1938 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1940 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1939 cnt += 1
1941 cnt += 1
1940 yield chnk
1942 yield chnk
1941 self.ui.progress(_('bundling changes'), None)
1943 self.ui.progress(_('bundling changes'), None)
1942
1944
1943 mnfst = self.manifest
1945 mnfst = self.manifest
1944 nodeiter = gennodelst(mnfst)
1946 nodeiter = gennodelst(mnfst)
1945 cnt = 0
1947 cnt = 0
1946 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1948 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1947 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1949 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1948 cnt += 1
1950 cnt += 1
1949 yield chnk
1951 yield chnk
1950 self.ui.progress(_('bundling manifests'), None)
1952 self.ui.progress(_('bundling manifests'), None)
1951
1953
1952 cnt = 0
1954 cnt = 0
1953 for fname in sorted(changedfiles):
1955 for fname in sorted(changedfiles):
1954 filerevlog = self.file(fname)
1956 filerevlog = self.file(fname)
1955 if not len(filerevlog):
1957 if not len(filerevlog):
1956 raise util.Abort(_("empty or missing revlog for %s") % fname)
1958 raise util.Abort(_("empty or missing revlog for %s") % fname)
1957 nodeiter = gennodelst(filerevlog)
1959 nodeiter = gennodelst(filerevlog)
1958 nodeiter = list(nodeiter)
1960 nodeiter = list(nodeiter)
1959 if nodeiter:
1961 if nodeiter:
1960 yield changegroup.chunkheader(len(fname))
1962 yield changegroup.chunkheader(len(fname))
1961 yield fname
1963 yield fname
1962 lookup = lookuprevlink_func(filerevlog)
1964 lookup = lookuprevlink_func(filerevlog)
1963 for chnk in filerevlog.group(nodeiter, lookup):
1965 for chnk in filerevlog.group(nodeiter, lookup):
1964 self.ui.progress(
1966 self.ui.progress(
1965 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1967 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1966 cnt += 1
1968 cnt += 1
1967 yield chnk
1969 yield chnk
1968 self.ui.progress(_('bundling files'), None)
1970 self.ui.progress(_('bundling files'), None)
1969
1971
1970 yield changegroup.closechunk()
1972 yield changegroup.closechunk()
1971
1973
1972 if nodes:
1974 if nodes:
1973 self.hook('outgoing', node=hex(nodes[0]), source=source)
1975 self.hook('outgoing', node=hex(nodes[0]), source=source)
1974
1976
1975 return util.chunkbuffer(gengroup())
1977 return util.chunkbuffer(gengroup())
1976
1978
1977 def addchangegroup(self, source, srctype, url, emptyok=False):
1979 def addchangegroup(self, source, srctype, url, emptyok=False):
1978 """add changegroup to repo.
1980 """add changegroup to repo.
1979
1981
1980 return values:
1982 return values:
1981 - nothing changed or no source: 0
1983 - nothing changed or no source: 0
1982 - more heads than before: 1+added heads (2..n)
1984 - more heads than before: 1+added heads (2..n)
1983 - less heads than before: -1-removed heads (-2..-n)
1985 - less heads than before: -1-removed heads (-2..-n)
1984 - number of heads stays the same: 1
1986 - number of heads stays the same: 1
1985 """
1987 """
1986 def csmap(x):
1988 def csmap(x):
1987 self.ui.debug("add changeset %s\n" % short(x))
1989 self.ui.debug("add changeset %s\n" % short(x))
1988 return len(cl)
1990 return len(cl)
1989
1991
1990 def revmap(x):
1992 def revmap(x):
1991 return cl.rev(x)
1993 return cl.rev(x)
1992
1994
1993 if not source:
1995 if not source:
1994 return 0
1996 return 0
1995
1997
1996 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1998 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1997
1999
1998 changesets = files = revisions = 0
2000 changesets = files = revisions = 0
1999
2001
2000 # write changelog data to temp files so concurrent readers will not see
2002 # write changelog data to temp files so concurrent readers will not see
2001 # inconsistent view
2003 # inconsistent view
2002 cl = self.changelog
2004 cl = self.changelog
2003 cl.delayupdate()
2005 cl.delayupdate()
2004 oldheads = len(cl.heads())
2006 oldheads = len(cl.heads())
2005
2007
2006 tr = self.transaction()
2008 tr = self.transaction(",".join([srctype, url]))
2007 try:
2009 try:
2008 trp = weakref.proxy(tr)
2010 trp = weakref.proxy(tr)
2009 # pull off the changeset group
2011 # pull off the changeset group
2010 self.ui.status(_("adding changesets\n"))
2012 self.ui.status(_("adding changesets\n"))
2011 clstart = len(cl)
2013 clstart = len(cl)
2012 class prog(object):
2014 class prog(object):
2013 step = _('changesets')
2015 step = _('changesets')
2014 count = 1
2016 count = 1
2015 ui = self.ui
2017 ui = self.ui
2016 def __call__(self):
2018 def __call__(self):
2017 self.ui.progress(self.step, self.count, unit=_('chunks'))
2019 self.ui.progress(self.step, self.count, unit=_('chunks'))
2018 self.count += 1
2020 self.count += 1
2019 pr = prog()
2021 pr = prog()
2020 chunkiter = changegroup.chunkiter(source, progress=pr)
2022 chunkiter = changegroup.chunkiter(source, progress=pr)
2021 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2023 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2022 raise util.Abort(_("received changelog group is empty"))
2024 raise util.Abort(_("received changelog group is empty"))
2023 clend = len(cl)
2025 clend = len(cl)
2024 changesets = clend - clstart
2026 changesets = clend - clstart
2025 self.ui.progress(_('changesets'), None)
2027 self.ui.progress(_('changesets'), None)
2026
2028
2027 # pull off the manifest group
2029 # pull off the manifest group
2028 self.ui.status(_("adding manifests\n"))
2030 self.ui.status(_("adding manifests\n"))
2029 pr.step = _('manifests')
2031 pr.step = _('manifests')
2030 pr.count = 1
2032 pr.count = 1
2031 chunkiter = changegroup.chunkiter(source, progress=pr)
2033 chunkiter = changegroup.chunkiter(source, progress=pr)
2032 # no need to check for empty manifest group here:
2034 # no need to check for empty manifest group here:
2033 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2035 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2034 # no new manifest will be created and the manifest group will
2036 # no new manifest will be created and the manifest group will
2035 # be empty during the pull
2037 # be empty during the pull
2036 self.manifest.addgroup(chunkiter, revmap, trp)
2038 self.manifest.addgroup(chunkiter, revmap, trp)
2037 self.ui.progress(_('manifests'), None)
2039 self.ui.progress(_('manifests'), None)
2038
2040
2039 needfiles = {}
2041 needfiles = {}
2040 if self.ui.configbool('server', 'validate', default=False):
2042 if self.ui.configbool('server', 'validate', default=False):
2041 # validate incoming csets have their manifests
2043 # validate incoming csets have their manifests
2042 for cset in xrange(clstart, clend):
2044 for cset in xrange(clstart, clend):
2043 mfest = self.changelog.read(self.changelog.node(cset))[0]
2045 mfest = self.changelog.read(self.changelog.node(cset))[0]
2044 mfest = self.manifest.readdelta(mfest)
2046 mfest = self.manifest.readdelta(mfest)
2045 # store file nodes we must see
2047 # store file nodes we must see
2046 for f, n in mfest.iteritems():
2048 for f, n in mfest.iteritems():
2047 needfiles.setdefault(f, set()).add(n)
2049 needfiles.setdefault(f, set()).add(n)
2048
2050
2049 # process the files
2051 # process the files
2050 self.ui.status(_("adding file changes\n"))
2052 self.ui.status(_("adding file changes\n"))
2051 pr.step = 'files'
2053 pr.step = 'files'
2052 pr.count = 1
2054 pr.count = 1
2053 while 1:
2055 while 1:
2054 f = changegroup.getchunk(source)
2056 f = changegroup.getchunk(source)
2055 if not f:
2057 if not f:
2056 break
2058 break
2057 self.ui.debug("adding %s revisions\n" % f)
2059 self.ui.debug("adding %s revisions\n" % f)
2058 fl = self.file(f)
2060 fl = self.file(f)
2059 o = len(fl)
2061 o = len(fl)
2060 chunkiter = changegroup.chunkiter(source, progress=pr)
2062 chunkiter = changegroup.chunkiter(source, progress=pr)
2061 if fl.addgroup(chunkiter, revmap, trp) is None:
2063 if fl.addgroup(chunkiter, revmap, trp) is None:
2062 raise util.Abort(_("received file revlog group is empty"))
2064 raise util.Abort(_("received file revlog group is empty"))
2063 revisions += len(fl) - o
2065 revisions += len(fl) - o
2064 files += 1
2066 files += 1
2065 if f in needfiles:
2067 if f in needfiles:
2066 needs = needfiles[f]
2068 needs = needfiles[f]
2067 for new in xrange(o, len(fl)):
2069 for new in xrange(o, len(fl)):
2068 n = fl.node(new)
2070 n = fl.node(new)
2069 if n in needs:
2071 if n in needs:
2070 needs.remove(n)
2072 needs.remove(n)
2071 if not needs:
2073 if not needs:
2072 del needfiles[f]
2074 del needfiles[f]
2073 self.ui.progress(_('files'), None)
2075 self.ui.progress(_('files'), None)
2074
2076
2075 for f, needs in needfiles.iteritems():
2077 for f, needs in needfiles.iteritems():
2076 fl = self.file(f)
2078 fl = self.file(f)
2077 for n in needs:
2079 for n in needs:
2078 try:
2080 try:
2079 fl.rev(n)
2081 fl.rev(n)
2080 except error.LookupError:
2082 except error.LookupError:
2081 raise util.Abort(
2083 raise util.Abort(
2082 _('missing file data for %s:%s - run hg verify') %
2084 _('missing file data for %s:%s - run hg verify') %
2083 (f, hex(n)))
2085 (f, hex(n)))
2084
2086
2085 newheads = len(cl.heads())
2087 newheads = len(cl.heads())
2086 heads = ""
2088 heads = ""
2087 if oldheads and newheads != oldheads:
2089 if oldheads and newheads != oldheads:
2088 heads = _(" (%+d heads)") % (newheads - oldheads)
2090 heads = _(" (%+d heads)") % (newheads - oldheads)
2089
2091
2090 self.ui.status(_("added %d changesets"
2092 self.ui.status(_("added %d changesets"
2091 " with %d changes to %d files%s\n")
2093 " with %d changes to %d files%s\n")
2092 % (changesets, revisions, files, heads))
2094 % (changesets, revisions, files, heads))
2093
2095
2094 if changesets > 0:
2096 if changesets > 0:
2095 p = lambda: cl.writepending() and self.root or ""
2097 p = lambda: cl.writepending() and self.root or ""
2096 self.hook('pretxnchangegroup', throw=True,
2098 self.hook('pretxnchangegroup', throw=True,
2097 node=hex(cl.node(clstart)), source=srctype,
2099 node=hex(cl.node(clstart)), source=srctype,
2098 url=url, pending=p)
2100 url=url, pending=p)
2099
2101
2100 # make changelog see real files again
2102 # make changelog see real files again
2101 cl.finalize(trp)
2103 cl.finalize(trp)
2102
2104
2103 tr.close()
2105 tr.close()
2104 finally:
2106 finally:
2105 del tr
2107 del tr
2106
2108
2107 if changesets > 0:
2109 if changesets > 0:
2108 # forcefully update the on-disk branch cache
2110 # forcefully update the on-disk branch cache
2109 self.ui.debug("updating the branch cache\n")
2111 self.ui.debug("updating the branch cache\n")
2110 self.branchtags()
2112 self.branchtags()
2111 self.hook("changegroup", node=hex(cl.node(clstart)),
2113 self.hook("changegroup", node=hex(cl.node(clstart)),
2112 source=srctype, url=url)
2114 source=srctype, url=url)
2113
2115
2114 for i in xrange(clstart, clend):
2116 for i in xrange(clstart, clend):
2115 self.hook("incoming", node=hex(cl.node(i)),
2117 self.hook("incoming", node=hex(cl.node(i)),
2116 source=srctype, url=url)
2118 source=srctype, url=url)
2117
2119
2118 # never return 0 here:
2120 # never return 0 here:
2119 if newheads < oldheads:
2121 if newheads < oldheads:
2120 return newheads - oldheads - 1
2122 return newheads - oldheads - 1
2121 else:
2123 else:
2122 return newheads - oldheads + 1
2124 return newheads - oldheads + 1
2123
2125
2124
2126
2125 def stream_in(self, remote):
2127 def stream_in(self, remote):
2126 fp = remote.stream_out()
2128 fp = remote.stream_out()
2127 l = fp.readline()
2129 l = fp.readline()
2128 try:
2130 try:
2129 resp = int(l)
2131 resp = int(l)
2130 except ValueError:
2132 except ValueError:
2131 raise error.ResponseError(
2133 raise error.ResponseError(
2132 _('Unexpected response from remote server:'), l)
2134 _('Unexpected response from remote server:'), l)
2133 if resp == 1:
2135 if resp == 1:
2134 raise util.Abort(_('operation forbidden by server'))
2136 raise util.Abort(_('operation forbidden by server'))
2135 elif resp == 2:
2137 elif resp == 2:
2136 raise util.Abort(_('locking the remote repository failed'))
2138 raise util.Abort(_('locking the remote repository failed'))
2137 elif resp != 0:
2139 elif resp != 0:
2138 raise util.Abort(_('the server sent an unknown error code'))
2140 raise util.Abort(_('the server sent an unknown error code'))
2139 self.ui.status(_('streaming all changes\n'))
2141 self.ui.status(_('streaming all changes\n'))
2140 l = fp.readline()
2142 l = fp.readline()
2141 try:
2143 try:
2142 total_files, total_bytes = map(int, l.split(' ', 1))
2144 total_files, total_bytes = map(int, l.split(' ', 1))
2143 except (ValueError, TypeError):
2145 except (ValueError, TypeError):
2144 raise error.ResponseError(
2146 raise error.ResponseError(
2145 _('Unexpected response from remote server:'), l)
2147 _('Unexpected response from remote server:'), l)
2146 self.ui.status(_('%d files to transfer, %s of data\n') %
2148 self.ui.status(_('%d files to transfer, %s of data\n') %
2147 (total_files, util.bytecount(total_bytes)))
2149 (total_files, util.bytecount(total_bytes)))
2148 start = time.time()
2150 start = time.time()
2149 for i in xrange(total_files):
2151 for i in xrange(total_files):
2150 # XXX doesn't support '\n' or '\r' in filenames
2152 # XXX doesn't support '\n' or '\r' in filenames
2151 l = fp.readline()
2153 l = fp.readline()
2152 try:
2154 try:
2153 name, size = l.split('\0', 1)
2155 name, size = l.split('\0', 1)
2154 size = int(size)
2156 size = int(size)
2155 except (ValueError, TypeError):
2157 except (ValueError, TypeError):
2156 raise error.ResponseError(
2158 raise error.ResponseError(
2157 _('Unexpected response from remote server:'), l)
2159 _('Unexpected response from remote server:'), l)
2158 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2160 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2159 # for backwards compat, name was partially encoded
2161 # for backwards compat, name was partially encoded
2160 ofp = self.sopener(store.decodedir(name), 'w')
2162 ofp = self.sopener(store.decodedir(name), 'w')
2161 for chunk in util.filechunkiter(fp, limit=size):
2163 for chunk in util.filechunkiter(fp, limit=size):
2162 ofp.write(chunk)
2164 ofp.write(chunk)
2163 ofp.close()
2165 ofp.close()
2164 elapsed = time.time() - start
2166 elapsed = time.time() - start
2165 if elapsed <= 0:
2167 if elapsed <= 0:
2166 elapsed = 0.001
2168 elapsed = 0.001
2167 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2169 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2168 (util.bytecount(total_bytes), elapsed,
2170 (util.bytecount(total_bytes), elapsed,
2169 util.bytecount(total_bytes / elapsed)))
2171 util.bytecount(total_bytes / elapsed)))
2170 self.invalidate()
2172 self.invalidate()
2171 return len(self.heads()) + 1
2173 return len(self.heads()) + 1
2172
2174
2173 def clone(self, remote, heads=[], stream=False):
2175 def clone(self, remote, heads=[], stream=False):
2174 '''clone remote repository.
2176 '''clone remote repository.
2175
2177
2176 keyword arguments:
2178 keyword arguments:
2177 heads: list of revs to clone (forces use of pull)
2179 heads: list of revs to clone (forces use of pull)
2178 stream: use streaming clone if possible'''
2180 stream: use streaming clone if possible'''
2179
2181
2180 # now, all clients that can request uncompressed clones can
2182 # now, all clients that can request uncompressed clones can
2181 # read repo formats supported by all servers that can serve
2183 # read repo formats supported by all servers that can serve
2182 # them.
2184 # them.
2183
2185
2184 # if revlog format changes, client will have to check version
2186 # if revlog format changes, client will have to check version
2185 # and format flags on "stream" capability, and use
2187 # and format flags on "stream" capability, and use
2186 # uncompressed only if compatible.
2188 # uncompressed only if compatible.
2187
2189
2188 if stream and not heads and remote.capable('stream'):
2190 if stream and not heads and remote.capable('stream'):
2189 return self.stream_in(remote)
2191 return self.stream_in(remote)
2190 return self.pull(remote, heads)
2192 return self.pull(remote, heads)
2191
2193
2192 # used to avoid circular references so destructors work
2194 # used to avoid circular references so destructors work
2193 def aftertrans(files):
2195 def aftertrans(files):
2194 renamefiles = [tuple(t) for t in files]
2196 renamefiles = [tuple(t) for t in files]
2195 def a():
2197 def a():
2196 for src, dest in renamefiles:
2198 for src, dest in renamefiles:
2197 util.rename(src, dest)
2199 util.rename(src, dest)
2198 return a
2200 return a
2199
2201
2200 def instance(ui, path, create):
2202 def instance(ui, path, create):
2201 return localrepository(ui, util.drop_scheme('file', path), create)
2203 return localrepository(ui, util.drop_scheme('file', path), create)
2202
2204
2203 def islocal(path):
2205 def islocal(path):
2204 return True
2206 return True
@@ -1,145 +1,145 b''
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import changegroup
9 import changegroup
10 from node import nullrev, short
10 from node import nullrev, short
11 from i18n import _
11 from i18n import _
12 import os
12 import os
13
13
14 def _bundle(repo, bases, heads, node, suffix, extranodes=None):
14 def _bundle(repo, bases, heads, node, suffix, extranodes=None):
15 """create a bundle with the specified revisions as a backup"""
15 """create a bundle with the specified revisions as a backup"""
16 cg = repo.changegroupsubset(bases, heads, 'strip', extranodes)
16 cg = repo.changegroupsubset(bases, heads, 'strip', extranodes)
17 backupdir = repo.join("strip-backup")
17 backupdir = repo.join("strip-backup")
18 if not os.path.isdir(backupdir):
18 if not os.path.isdir(backupdir):
19 os.mkdir(backupdir)
19 os.mkdir(backupdir)
20 name = os.path.join(backupdir, "%s-%s" % (short(node), suffix))
20 name = os.path.join(backupdir, "%s-%s" % (short(node), suffix))
21 repo.ui.warn(_("saving bundle to %s\n") % name)
21 repo.ui.warn(_("saving bundle to %s\n") % name)
22 return changegroup.writebundle(cg, name, "HG10BZ")
22 return changegroup.writebundle(cg, name, "HG10BZ")
23
23
24 def _collectfiles(repo, striprev):
24 def _collectfiles(repo, striprev):
25 """find out the filelogs affected by the strip"""
25 """find out the filelogs affected by the strip"""
26 files = set()
26 files = set()
27
27
28 for x in xrange(striprev, len(repo)):
28 for x in xrange(striprev, len(repo)):
29 files.update(repo[x].files())
29 files.update(repo[x].files())
30
30
31 return sorted(files)
31 return sorted(files)
32
32
33 def _collectextranodes(repo, files, link):
33 def _collectextranodes(repo, files, link):
34 """return the nodes that have to be saved before the strip"""
34 """return the nodes that have to be saved before the strip"""
35 def collectone(revlog):
35 def collectone(revlog):
36 extra = []
36 extra = []
37 startrev = count = len(revlog)
37 startrev = count = len(revlog)
38 # find the truncation point of the revlog
38 # find the truncation point of the revlog
39 for i in xrange(count):
39 for i in xrange(count):
40 lrev = revlog.linkrev(i)
40 lrev = revlog.linkrev(i)
41 if lrev >= link:
41 if lrev >= link:
42 startrev = i + 1
42 startrev = i + 1
43 break
43 break
44
44
45 # see if any revision after that point has a linkrev less than link
45 # see if any revision after that point has a linkrev less than link
46 # (we have to manually save these guys)
46 # (we have to manually save these guys)
47 for i in xrange(startrev, count):
47 for i in xrange(startrev, count):
48 node = revlog.node(i)
48 node = revlog.node(i)
49 lrev = revlog.linkrev(i)
49 lrev = revlog.linkrev(i)
50 if lrev < link:
50 if lrev < link:
51 extra.append((node, cl.node(lrev)))
51 extra.append((node, cl.node(lrev)))
52
52
53 return extra
53 return extra
54
54
55 extranodes = {}
55 extranodes = {}
56 cl = repo.changelog
56 cl = repo.changelog
57 extra = collectone(repo.manifest)
57 extra = collectone(repo.manifest)
58 if extra:
58 if extra:
59 extranodes[1] = extra
59 extranodes[1] = extra
60 for fname in files:
60 for fname in files:
61 f = repo.file(fname)
61 f = repo.file(fname)
62 extra = collectone(f)
62 extra = collectone(f)
63 if extra:
63 if extra:
64 extranodes[fname] = extra
64 extranodes[fname] = extra
65
65
66 return extranodes
66 return extranodes
67
67
68 def strip(ui, repo, node, backup="all"):
68 def strip(ui, repo, node, backup="all"):
69 cl = repo.changelog
69 cl = repo.changelog
70 # TODO delete the undo files, and handle undo of merge sets
70 # TODO delete the undo files, and handle undo of merge sets
71 striprev = cl.rev(node)
71 striprev = cl.rev(node)
72
72
73 # Some revisions with rev > striprev may not be descendants of striprev.
73 # Some revisions with rev > striprev may not be descendants of striprev.
74 # We have to find these revisions and put them in a bundle, so that
74 # We have to find these revisions and put them in a bundle, so that
75 # we can restore them after the truncations.
75 # we can restore them after the truncations.
76 # To create the bundle we use repo.changegroupsubset which requires
76 # To create the bundle we use repo.changegroupsubset which requires
77 # the list of heads and bases of the set of interesting revisions.
77 # the list of heads and bases of the set of interesting revisions.
78 # (head = revision in the set that has no descendant in the set;
78 # (head = revision in the set that has no descendant in the set;
79 # base = revision in the set that has no ancestor in the set)
79 # base = revision in the set that has no ancestor in the set)
80 tostrip = set((striprev,))
80 tostrip = set((striprev,))
81 saveheads = set()
81 saveheads = set()
82 savebases = []
82 savebases = []
83 for r in xrange(striprev + 1, len(cl)):
83 for r in xrange(striprev + 1, len(cl)):
84 parents = cl.parentrevs(r)
84 parents = cl.parentrevs(r)
85 if parents[0] in tostrip or parents[1] in tostrip:
85 if parents[0] in tostrip or parents[1] in tostrip:
86 # r is a descendant of striprev
86 # r is a descendant of striprev
87 tostrip.add(r)
87 tostrip.add(r)
88 # if this is a merge and one of the parents does not descend
88 # if this is a merge and one of the parents does not descend
89 # from striprev, mark that parent as a savehead.
89 # from striprev, mark that parent as a savehead.
90 if parents[1] != nullrev:
90 if parents[1] != nullrev:
91 for p in parents:
91 for p in parents:
92 if p not in tostrip and p > striprev:
92 if p not in tostrip and p > striprev:
93 saveheads.add(p)
93 saveheads.add(p)
94 else:
94 else:
95 # if no parents of this revision will be stripped, mark it as
95 # if no parents of this revision will be stripped, mark it as
96 # a savebase
96 # a savebase
97 if parents[0] < striprev and parents[1] < striprev:
97 if parents[0] < striprev and parents[1] < striprev:
98 savebases.append(cl.node(r))
98 savebases.append(cl.node(r))
99
99
100 saveheads.difference_update(parents)
100 saveheads.difference_update(parents)
101 saveheads.add(r)
101 saveheads.add(r)
102
102
103 saveheads = [cl.node(r) for r in saveheads]
103 saveheads = [cl.node(r) for r in saveheads]
104 files = _collectfiles(repo, striprev)
104 files = _collectfiles(repo, striprev)
105
105
106 extranodes = _collectextranodes(repo, files, striprev)
106 extranodes = _collectextranodes(repo, files, striprev)
107
107
108 # create a changegroup for all the branches we need to keep
108 # create a changegroup for all the branches we need to keep
109 if backup == "all":
109 if backup == "all":
110 _bundle(repo, [node], cl.heads(), node, 'backup')
110 _bundle(repo, [node], cl.heads(), node, 'backup')
111 if saveheads or extranodes:
111 if saveheads or extranodes:
112 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
112 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
113 extranodes)
113 extranodes)
114
114
115 mfst = repo.manifest
115 mfst = repo.manifest
116
116
117 tr = repo.transaction()
117 tr = repo.transaction("strip")
118 offset = len(tr.entries)
118 offset = len(tr.entries)
119
119
120 tr.startgroup()
120 tr.startgroup()
121 cl.strip(striprev, tr)
121 cl.strip(striprev, tr)
122 mfst.strip(striprev, tr)
122 mfst.strip(striprev, tr)
123 for fn in files:
123 for fn in files:
124 repo.file(fn).strip(striprev, tr)
124 repo.file(fn).strip(striprev, tr)
125 tr.endgroup()
125 tr.endgroup()
126
126
127 try:
127 try:
128 for i in xrange(offset, len(tr.entries)):
128 for i in xrange(offset, len(tr.entries)):
129 file, troffset, ignore = tr.entries[i]
129 file, troffset, ignore = tr.entries[i]
130 repo.sopener(file, 'a').truncate(troffset)
130 repo.sopener(file, 'a').truncate(troffset)
131 tr.close()
131 tr.close()
132 except:
132 except:
133 tr.abort()
133 tr.abort()
134 raise
134 raise
135
135
136 if saveheads or extranodes:
136 if saveheads or extranodes:
137 ui.status(_("adding branch\n"))
137 ui.status(_("adding branch\n"))
138 f = open(chgrpfile, "rb")
138 f = open(chgrpfile, "rb")
139 gen = changegroup.readbundle(f, chgrpfile)
139 gen = changegroup.readbundle(f, chgrpfile)
140 repo.addchangegroup(gen, 'strip', 'bundle:' + chgrpfile, True)
140 repo.addchangegroup(gen, 'strip', 'bundle:' + chgrpfile, True)
141 f.close()
141 f.close()
142 if backup != "strip":
142 if backup != "strip":
143 os.unlink(chgrpfile)
143 os.unlink(chgrpfile)
144
144
145 repo.destroyed()
145 repo.destroyed()
@@ -1,73 +1,75 b''
1 % init repo1
1 % init repo1
2
2
3 % add a; ci
3 % add a; ci
4 adding a
4 adding a
5
5
6 % cat .hg/store/fncache
6 % cat .hg/store/fncache
7 data/a.i
7 data/a.i
8
8
9 % add a.i/b; ci
9 % add a.i/b; ci
10 adding a.i/b
10 adding a.i/b
11
11
12 % cat .hg/store/fncache
12 % cat .hg/store/fncache
13 data/a.i
13 data/a.i
14 data/a.i.hg/b.i
14 data/a.i.hg/b.i
15
15
16 % add a.i.hg/c; ci
16 % add a.i.hg/c; ci
17 adding a.i.hg/c
17 adding a.i.hg/c
18
18
19 % cat .hg/store/fncache
19 % cat .hg/store/fncache
20 data/a.i
20 data/a.i
21 data/a.i.hg/b.i
21 data/a.i.hg/b.i
22 data/a.i.hg.hg/c.i
22 data/a.i.hg.hg/c.i
23
23
24 % hg verify
24 % hg verify
25 checking changesets
25 checking changesets
26 checking manifests
26 checking manifests
27 crosschecking files in changesets and manifests
27 crosschecking files in changesets and manifests
28 checking files
28 checking files
29 3 files, 3 changesets, 3 total revisions
29 3 files, 3 changesets, 3 total revisions
30
30
31 % rm .hg/store/fncache
31 % rm .hg/store/fncache
32
32
33 % hg verify
33 % hg verify
34 checking changesets
34 checking changesets
35 checking manifests
35 checking manifests
36 crosschecking files in changesets and manifests
36 crosschecking files in changesets and manifests
37 checking files
37 checking files
38 data/a.i@0: missing revlog!
38 data/a.i@0: missing revlog!
39 data/a.i.hg/c.i@2: missing revlog!
39 data/a.i.hg/c.i@2: missing revlog!
40 data/a.i/b.i@1: missing revlog!
40 data/a.i/b.i@1: missing revlog!
41 3 files, 3 changesets, 3 total revisions
41 3 files, 3 changesets, 3 total revisions
42 3 integrity errors encountered!
42 3 integrity errors encountered!
43 (first damaged changeset appears to be 0)
43 (first damaged changeset appears to be 0)
44 % non store repo
44 % non store repo
45 adding tst.d/foo
45 adding tst.d/foo
46 .hg
46 .hg
47 .hg/00changelog.i
47 .hg/00changelog.i
48 .hg/00manifest.i
48 .hg/00manifest.i
49 .hg/data
49 .hg/data
50 .hg/data/tst.d.hg
50 .hg/data/tst.d.hg
51 .hg/data/tst.d.hg/foo.i
51 .hg/data/tst.d.hg/foo.i
52 .hg/dirstate
52 .hg/dirstate
53 .hg/last-message.txt
53 .hg/last-message.txt
54 .hg/requires
54 .hg/requires
55 .hg/undo
55 .hg/undo
56 .hg/undo.branch
56 .hg/undo.branch
57 .hg/undo.desc
57 .hg/undo.dirstate
58 .hg/undo.dirstate
58 % non fncache repo
59 % non fncache repo
59 adding tst.d/Foo
60 adding tst.d/Foo
60 .hg
61 .hg
61 .hg/00changelog.i
62 .hg/00changelog.i
62 .hg/dirstate
63 .hg/dirstate
63 .hg/last-message.txt
64 .hg/last-message.txt
64 .hg/requires
65 .hg/requires
65 .hg/store
66 .hg/store
66 .hg/store/00changelog.i
67 .hg/store/00changelog.i
67 .hg/store/00manifest.i
68 .hg/store/00manifest.i
68 .hg/store/data
69 .hg/store/data
69 .hg/store/data/tst.d.hg
70 .hg/store/data/tst.d.hg
70 .hg/store/data/tst.d.hg/_foo.i
71 .hg/store/data/tst.d.hg/_foo.i
71 .hg/store/undo
72 .hg/store/undo
72 .hg/undo.branch
73 .hg/undo.branch
74 .hg/undo.desc
73 .hg/undo.dirstate
75 .hg/undo.dirstate
@@ -1,7 +1,7 b''
1 0
1 0
2 0
2 0
3 adding changesets
3 adding changesets
4 transaction abort!
4 transaction abort!
5 rollback completed
5 rollback completed
6 killed!
6 killed!
7 .hg/00changelog.i .hg/journal.branch .hg/journal.dirstate .hg/requires .hg/store .hg/store/00changelog.i .hg/store/00changelog.i.a
7 .hg/00changelog.i .hg/journal.branch .hg/journal.desc .hg/journal.dirstate .hg/requires .hg/store .hg/store/00changelog.i .hg/store/00changelog.i.a
@@ -1,57 +1,59 b''
1 % before commit
1 % before commit
2 % store can be written by the group, other files cannot
2 % store can be written by the group, other files cannot
3 % store is setgid
3 % store is setgid
4 00700 ./.hg/
4 00700 ./.hg/
5 00600 ./.hg/00changelog.i
5 00600 ./.hg/00changelog.i
6 00600 ./.hg/requires
6 00600 ./.hg/requires
7 00770 ./.hg/store/
7 00770 ./.hg/store/
8
8
9 % after commit
9 % after commit
10 % working dir files can only be written by the owner
10 % working dir files can only be written by the owner
11 % files created in .hg can be written by the group
11 % files created in .hg can be written by the group
12 % (in particular, store/**, dirstate, branch cache file, undo files)
12 % (in particular, store/**, dirstate, branch cache file, undo files)
13 % new directories are setgid
13 % new directories are setgid
14 00700 ./.hg/
14 00700 ./.hg/
15 00600 ./.hg/00changelog.i
15 00600 ./.hg/00changelog.i
16 00660 ./.hg/dirstate
16 00660 ./.hg/dirstate
17 00660 ./.hg/last-message.txt
17 00660 ./.hg/last-message.txt
18 00600 ./.hg/requires
18 00600 ./.hg/requires
19 00770 ./.hg/store/
19 00770 ./.hg/store/
20 00660 ./.hg/store/00changelog.i
20 00660 ./.hg/store/00changelog.i
21 00660 ./.hg/store/00manifest.i
21 00660 ./.hg/store/00manifest.i
22 00770 ./.hg/store/data/
22 00770 ./.hg/store/data/
23 00770 ./.hg/store/data/dir/
23 00770 ./.hg/store/data/dir/
24 00660 ./.hg/store/data/dir/bar.i
24 00660 ./.hg/store/data/dir/bar.i
25 00660 ./.hg/store/data/foo.i
25 00660 ./.hg/store/data/foo.i
26 00660 ./.hg/store/fncache
26 00660 ./.hg/store/fncache
27 00660 ./.hg/store/undo
27 00660 ./.hg/store/undo
28 00660 ./.hg/undo.branch
28 00660 ./.hg/undo.branch
29 00660 ./.hg/undo.desc
29 00660 ./.hg/undo.dirstate
30 00660 ./.hg/undo.dirstate
30 00700 ./dir/
31 00700 ./dir/
31 00600 ./dir/bar
32 00600 ./dir/bar
32 00600 ./foo
33 00600 ./foo
33
34
34 % before push
35 % before push
35 % group can write everything
36 % group can write everything
36 00770 ../push/.hg/
37 00770 ../push/.hg/
37 00660 ../push/.hg/00changelog.i
38 00660 ../push/.hg/00changelog.i
38 00660 ../push/.hg/requires
39 00660 ../push/.hg/requires
39 00770 ../push/.hg/store/
40 00770 ../push/.hg/store/
40
41
41 % after push
42 % after push
42 % group can still write everything
43 % group can still write everything
43 00770 ../push/.hg/
44 00770 ../push/.hg/
44 00660 ../push/.hg/00changelog.i
45 00660 ../push/.hg/00changelog.i
45 00660 ../push/.hg/branchheads.cache
46 00660 ../push/.hg/branchheads.cache
46 00660 ../push/.hg/requires
47 00660 ../push/.hg/requires
47 00770 ../push/.hg/store/
48 00770 ../push/.hg/store/
48 00660 ../push/.hg/store/00changelog.i
49 00660 ../push/.hg/store/00changelog.i
49 00660 ../push/.hg/store/00manifest.i
50 00660 ../push/.hg/store/00manifest.i
50 00770 ../push/.hg/store/data/
51 00770 ../push/.hg/store/data/
51 00770 ../push/.hg/store/data/dir/
52 00770 ../push/.hg/store/data/dir/
52 00660 ../push/.hg/store/data/dir/bar.i
53 00660 ../push/.hg/store/data/dir/bar.i
53 00660 ../push/.hg/store/data/foo.i
54 00660 ../push/.hg/store/data/foo.i
54 00660 ../push/.hg/store/fncache
55 00660 ../push/.hg/store/fncache
55 00660 ../push/.hg/store/undo
56 00660 ../push/.hg/store/undo
56 00660 ../push/.hg/undo.branch
57 00660 ../push/.hg/undo.branch
58 00660 ../push/.hg/undo.desc
57 00660 ../push/.hg/undo.dirstate
59 00660 ../push/.hg/undo.dirstate
General Comments 0
You need to be logged in to leave comments. Login now