##// END OF EJS Templates
localrepo: change _updatebranchcache to use a context generator
Sune Foldager -
r10770:fe39f016 stable
parent child Browse files
Show More
@@ -1,2819 +1,2821 b''
1 # mq.py - patch queues for mercurial
1 # mq.py - patch queues for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''manage a stack of patches
8 '''manage a stack of patches
9
9
10 This extension lets you work with a stack of patches in a Mercurial
10 This extension lets you work with a stack of patches in a Mercurial
11 repository. It manages two stacks of patches - all known patches, and
11 repository. It manages two stacks of patches - all known patches, and
12 applied patches (subset of known patches).
12 applied patches (subset of known patches).
13
13
14 Known patches are represented as patch files in the .hg/patches
14 Known patches are represented as patch files in the .hg/patches
15 directory. Applied patches are both patch files and changesets.
15 directory. Applied patches are both patch files and changesets.
16
16
17 Common tasks (use "hg help command" for more details)::
17 Common tasks (use "hg help command" for more details)::
18
18
19 create new patch qnew
19 create new patch qnew
20 import existing patch qimport
20 import existing patch qimport
21
21
22 print patch series qseries
22 print patch series qseries
23 print applied patches qapplied
23 print applied patches qapplied
24
24
25 add known patch to applied stack qpush
25 add known patch to applied stack qpush
26 remove patch from applied stack qpop
26 remove patch from applied stack qpop
27 refresh contents of top applied patch qrefresh
27 refresh contents of top applied patch qrefresh
28
28
29 By default, mq will automatically use git patches when required to
29 By default, mq will automatically use git patches when required to
30 avoid losing file mode changes, copy records, binary files or empty
30 avoid losing file mode changes, copy records, binary files or empty
31 files creations or deletions. This behaviour can be configured with::
31 files creations or deletions. This behaviour can be configured with::
32
32
33 [mq]
33 [mq]
34 git = auto/keep/yes/no
34 git = auto/keep/yes/no
35
35
36 If set to 'keep', mq will obey the [diff] section configuration while
36 If set to 'keep', mq will obey the [diff] section configuration while
37 preserving existing git patches upon qrefresh. If set to 'yes' or
37 preserving existing git patches upon qrefresh. If set to 'yes' or
38 'no', mq will override the [diff] section and always generate git or
38 'no', mq will override the [diff] section and always generate git or
39 regular patches, possibly losing data in the second case.
39 regular patches, possibly losing data in the second case.
40 '''
40 '''
41
41
42 from mercurial.i18n import _
42 from mercurial.i18n import _
43 from mercurial.node import bin, hex, short, nullid, nullrev
43 from mercurial.node import bin, hex, short, nullid, nullrev
44 from mercurial.lock import release
44 from mercurial.lock import release
45 from mercurial import commands, cmdutil, hg, patch, util
45 from mercurial import commands, cmdutil, hg, patch, util
46 from mercurial import repair, extensions, url, error
46 from mercurial import repair, extensions, url, error
47 import os, sys, re, errno
47 import os, sys, re, errno
48
48
49 commands.norepo += " qclone"
49 commands.norepo += " qclone"
50
50
51 # Patch names looks like unix-file names.
51 # Patch names looks like unix-file names.
52 # They must be joinable with queue directory and result in the patch path.
52 # They must be joinable with queue directory and result in the patch path.
53 normname = util.normpath
53 normname = util.normpath
54
54
55 class statusentry(object):
55 class statusentry(object):
56 def __init__(self, rev, name=None):
56 def __init__(self, rev, name=None):
57 if not name:
57 if not name:
58 fields = rev.split(':', 1)
58 fields = rev.split(':', 1)
59 if len(fields) == 2:
59 if len(fields) == 2:
60 self.rev, self.name = fields
60 self.rev, self.name = fields
61 else:
61 else:
62 self.rev, self.name = None, None
62 self.rev, self.name = None, None
63 else:
63 else:
64 self.rev, self.name = rev, name
64 self.rev, self.name = rev, name
65
65
66 def __str__(self):
66 def __str__(self):
67 return self.rev + ':' + self.name
67 return self.rev + ':' + self.name
68
68
69 class patchheader(object):
69 class patchheader(object):
70 def __init__(self, pf, plainmode=False):
70 def __init__(self, pf, plainmode=False):
71 def eatdiff(lines):
71 def eatdiff(lines):
72 while lines:
72 while lines:
73 l = lines[-1]
73 l = lines[-1]
74 if (l.startswith("diff -") or
74 if (l.startswith("diff -") or
75 l.startswith("Index:") or
75 l.startswith("Index:") or
76 l.startswith("===========")):
76 l.startswith("===========")):
77 del lines[-1]
77 del lines[-1]
78 else:
78 else:
79 break
79 break
80 def eatempty(lines):
80 def eatempty(lines):
81 while lines:
81 while lines:
82 l = lines[-1]
82 l = lines[-1]
83 if re.match('\s*$', l):
83 if re.match('\s*$', l):
84 del lines[-1]
84 del lines[-1]
85 else:
85 else:
86 break
86 break
87
87
88 message = []
88 message = []
89 comments = []
89 comments = []
90 user = None
90 user = None
91 date = None
91 date = None
92 parent = None
92 parent = None
93 format = None
93 format = None
94 subject = None
94 subject = None
95 diffstart = 0
95 diffstart = 0
96
96
97 for line in file(pf):
97 for line in file(pf):
98 line = line.rstrip()
98 line = line.rstrip()
99 if (line.startswith('diff --git')
99 if (line.startswith('diff --git')
100 or (diffstart and line.startswith('+++ '))):
100 or (diffstart and line.startswith('+++ '))):
101 diffstart = 2
101 diffstart = 2
102 break
102 break
103 diffstart = 0 # reset
103 diffstart = 0 # reset
104 if line.startswith("--- "):
104 if line.startswith("--- "):
105 diffstart = 1
105 diffstart = 1
106 continue
106 continue
107 elif format == "hgpatch":
107 elif format == "hgpatch":
108 # parse values when importing the result of an hg export
108 # parse values when importing the result of an hg export
109 if line.startswith("# User "):
109 if line.startswith("# User "):
110 user = line[7:]
110 user = line[7:]
111 elif line.startswith("# Date "):
111 elif line.startswith("# Date "):
112 date = line[7:]
112 date = line[7:]
113 elif line.startswith("# Parent "):
113 elif line.startswith("# Parent "):
114 parent = line[9:]
114 parent = line[9:]
115 elif not line.startswith("# ") and line:
115 elif not line.startswith("# ") and line:
116 message.append(line)
116 message.append(line)
117 format = None
117 format = None
118 elif line == '# HG changeset patch':
118 elif line == '# HG changeset patch':
119 message = []
119 message = []
120 format = "hgpatch"
120 format = "hgpatch"
121 elif (format != "tagdone" and (line.startswith("Subject: ") or
121 elif (format != "tagdone" and (line.startswith("Subject: ") or
122 line.startswith("subject: "))):
122 line.startswith("subject: "))):
123 subject = line[9:]
123 subject = line[9:]
124 format = "tag"
124 format = "tag"
125 elif (format != "tagdone" and (line.startswith("From: ") or
125 elif (format != "tagdone" and (line.startswith("From: ") or
126 line.startswith("from: "))):
126 line.startswith("from: "))):
127 user = line[6:]
127 user = line[6:]
128 format = "tag"
128 format = "tag"
129 elif (format != "tagdone" and (line.startswith("Date: ") or
129 elif (format != "tagdone" and (line.startswith("Date: ") or
130 line.startswith("date: "))):
130 line.startswith("date: "))):
131 date = line[6:]
131 date = line[6:]
132 format = "tag"
132 format = "tag"
133 elif format == "tag" and line == "":
133 elif format == "tag" and line == "":
134 # when looking for tags (subject: from: etc) they
134 # when looking for tags (subject: from: etc) they
135 # end once you find a blank line in the source
135 # end once you find a blank line in the source
136 format = "tagdone"
136 format = "tagdone"
137 elif message or line:
137 elif message or line:
138 message.append(line)
138 message.append(line)
139 comments.append(line)
139 comments.append(line)
140
140
141 eatdiff(message)
141 eatdiff(message)
142 eatdiff(comments)
142 eatdiff(comments)
143 eatempty(message)
143 eatempty(message)
144 eatempty(comments)
144 eatempty(comments)
145
145
146 # make sure message isn't empty
146 # make sure message isn't empty
147 if format and format.startswith("tag") and subject:
147 if format and format.startswith("tag") and subject:
148 message.insert(0, "")
148 message.insert(0, "")
149 message.insert(0, subject)
149 message.insert(0, subject)
150
150
151 self.message = message
151 self.message = message
152 self.comments = comments
152 self.comments = comments
153 self.user = user
153 self.user = user
154 self.date = date
154 self.date = date
155 self.parent = parent
155 self.parent = parent
156 self.haspatch = diffstart > 1
156 self.haspatch = diffstart > 1
157 self.plainmode = plainmode
157 self.plainmode = plainmode
158
158
159 def setuser(self, user):
159 def setuser(self, user):
160 if not self.updateheader(['From: ', '# User '], user):
160 if not self.updateheader(['From: ', '# User '], user):
161 try:
161 try:
162 patchheaderat = self.comments.index('# HG changeset patch')
162 patchheaderat = self.comments.index('# HG changeset patch')
163 self.comments.insert(patchheaderat + 1, '# User ' + user)
163 self.comments.insert(patchheaderat + 1, '# User ' + user)
164 except ValueError:
164 except ValueError:
165 if self.plainmode or self._hasheader(['Date: ']):
165 if self.plainmode or self._hasheader(['Date: ']):
166 self.comments = ['From: ' + user] + self.comments
166 self.comments = ['From: ' + user] + self.comments
167 else:
167 else:
168 tmp = ['# HG changeset patch', '# User ' + user, '']
168 tmp = ['# HG changeset patch', '# User ' + user, '']
169 self.comments = tmp + self.comments
169 self.comments = tmp + self.comments
170 self.user = user
170 self.user = user
171
171
172 def setdate(self, date):
172 def setdate(self, date):
173 if not self.updateheader(['Date: ', '# Date '], date):
173 if not self.updateheader(['Date: ', '# Date '], date):
174 try:
174 try:
175 patchheaderat = self.comments.index('# HG changeset patch')
175 patchheaderat = self.comments.index('# HG changeset patch')
176 self.comments.insert(patchheaderat + 1, '# Date ' + date)
176 self.comments.insert(patchheaderat + 1, '# Date ' + date)
177 except ValueError:
177 except ValueError:
178 if self.plainmode or self._hasheader(['From: ']):
178 if self.plainmode or self._hasheader(['From: ']):
179 self.comments = ['Date: ' + date] + self.comments
179 self.comments = ['Date: ' + date] + self.comments
180 else:
180 else:
181 tmp = ['# HG changeset patch', '# Date ' + date, '']
181 tmp = ['# HG changeset patch', '# Date ' + date, '']
182 self.comments = tmp + self.comments
182 self.comments = tmp + self.comments
183 self.date = date
183 self.date = date
184
184
185 def setparent(self, parent):
185 def setparent(self, parent):
186 if not self.updateheader(['# Parent '], parent):
186 if not self.updateheader(['# Parent '], parent):
187 try:
187 try:
188 patchheaderat = self.comments.index('# HG changeset patch')
188 patchheaderat = self.comments.index('# HG changeset patch')
189 self.comments.insert(patchheaderat + 1, '# Parent ' + parent)
189 self.comments.insert(patchheaderat + 1, '# Parent ' + parent)
190 except ValueError:
190 except ValueError:
191 pass
191 pass
192 self.parent = parent
192 self.parent = parent
193
193
194 def setmessage(self, message):
194 def setmessage(self, message):
195 if self.comments:
195 if self.comments:
196 self._delmsg()
196 self._delmsg()
197 self.message = [message]
197 self.message = [message]
198 self.comments += self.message
198 self.comments += self.message
199
199
200 def updateheader(self, prefixes, new):
200 def updateheader(self, prefixes, new):
201 '''Update all references to a field in the patch header.
201 '''Update all references to a field in the patch header.
202 Return whether the field is present.'''
202 Return whether the field is present.'''
203 res = False
203 res = False
204 for prefix in prefixes:
204 for prefix in prefixes:
205 for i in xrange(len(self.comments)):
205 for i in xrange(len(self.comments)):
206 if self.comments[i].startswith(prefix):
206 if self.comments[i].startswith(prefix):
207 self.comments[i] = prefix + new
207 self.comments[i] = prefix + new
208 res = True
208 res = True
209 break
209 break
210 return res
210 return res
211
211
212 def _hasheader(self, prefixes):
212 def _hasheader(self, prefixes):
213 '''Check if a header starts with any of the given prefixes.'''
213 '''Check if a header starts with any of the given prefixes.'''
214 for prefix in prefixes:
214 for prefix in prefixes:
215 for comment in self.comments:
215 for comment in self.comments:
216 if comment.startswith(prefix):
216 if comment.startswith(prefix):
217 return True
217 return True
218 return False
218 return False
219
219
220 def __str__(self):
220 def __str__(self):
221 if not self.comments:
221 if not self.comments:
222 return ''
222 return ''
223 return '\n'.join(self.comments) + '\n\n'
223 return '\n'.join(self.comments) + '\n\n'
224
224
225 def _delmsg(self):
225 def _delmsg(self):
226 '''Remove existing message, keeping the rest of the comments fields.
226 '''Remove existing message, keeping the rest of the comments fields.
227 If comments contains 'subject: ', message will prepend
227 If comments contains 'subject: ', message will prepend
228 the field and a blank line.'''
228 the field and a blank line.'''
229 if self.message:
229 if self.message:
230 subj = 'subject: ' + self.message[0].lower()
230 subj = 'subject: ' + self.message[0].lower()
231 for i in xrange(len(self.comments)):
231 for i in xrange(len(self.comments)):
232 if subj == self.comments[i].lower():
232 if subj == self.comments[i].lower():
233 del self.comments[i]
233 del self.comments[i]
234 self.message = self.message[2:]
234 self.message = self.message[2:]
235 break
235 break
236 ci = 0
236 ci = 0
237 for mi in self.message:
237 for mi in self.message:
238 while mi != self.comments[ci]:
238 while mi != self.comments[ci]:
239 ci += 1
239 ci += 1
240 del self.comments[ci]
240 del self.comments[ci]
241
241
242 class queue(object):
242 class queue(object):
243 def __init__(self, ui, path, patchdir=None):
243 def __init__(self, ui, path, patchdir=None):
244 self.basepath = path
244 self.basepath = path
245 self.path = patchdir or os.path.join(path, "patches")
245 self.path = patchdir or os.path.join(path, "patches")
246 self.opener = util.opener(self.path)
246 self.opener = util.opener(self.path)
247 self.ui = ui
247 self.ui = ui
248 self.applied_dirty = 0
248 self.applied_dirty = 0
249 self.series_dirty = 0
249 self.series_dirty = 0
250 self.series_path = "series"
250 self.series_path = "series"
251 self.status_path = "status"
251 self.status_path = "status"
252 self.guards_path = "guards"
252 self.guards_path = "guards"
253 self.active_guards = None
253 self.active_guards = None
254 self.guards_dirty = False
254 self.guards_dirty = False
255 # Handle mq.git as a bool with extended values
255 # Handle mq.git as a bool with extended values
256 try:
256 try:
257 gitmode = ui.configbool('mq', 'git', None)
257 gitmode = ui.configbool('mq', 'git', None)
258 if gitmode is None:
258 if gitmode is None:
259 raise error.ConfigError()
259 raise error.ConfigError()
260 self.gitmode = gitmode and 'yes' or 'no'
260 self.gitmode = gitmode and 'yes' or 'no'
261 except error.ConfigError:
261 except error.ConfigError:
262 self.gitmode = ui.config('mq', 'git', 'auto').lower()
262 self.gitmode = ui.config('mq', 'git', 'auto').lower()
263 self.plainmode = ui.configbool('mq', 'plain', False)
263 self.plainmode = ui.configbool('mq', 'plain', False)
264
264
265 @util.propertycache
265 @util.propertycache
266 def applied(self):
266 def applied(self):
267 if os.path.exists(self.join(self.status_path)):
267 if os.path.exists(self.join(self.status_path)):
268 lines = self.opener(self.status_path).read().splitlines()
268 lines = self.opener(self.status_path).read().splitlines()
269 return [statusentry(l) for l in lines]
269 return [statusentry(l) for l in lines]
270 return []
270 return []
271
271
272 @util.propertycache
272 @util.propertycache
273 def full_series(self):
273 def full_series(self):
274 if os.path.exists(self.join(self.series_path)):
274 if os.path.exists(self.join(self.series_path)):
275 return self.opener(self.series_path).read().splitlines()
275 return self.opener(self.series_path).read().splitlines()
276 return []
276 return []
277
277
278 @util.propertycache
278 @util.propertycache
279 def series(self):
279 def series(self):
280 self.parse_series()
280 self.parse_series()
281 return self.series
281 return self.series
282
282
283 @util.propertycache
283 @util.propertycache
284 def series_guards(self):
284 def series_guards(self):
285 self.parse_series()
285 self.parse_series()
286 return self.series_guards
286 return self.series_guards
287
287
288 def invalidate(self):
288 def invalidate(self):
289 for a in 'applied full_series series series_guards'.split():
289 for a in 'applied full_series series series_guards'.split():
290 if a in self.__dict__:
290 if a in self.__dict__:
291 delattr(self, a)
291 delattr(self, a)
292 self.applied_dirty = 0
292 self.applied_dirty = 0
293 self.series_dirty = 0
293 self.series_dirty = 0
294 self.guards_dirty = False
294 self.guards_dirty = False
295 self.active_guards = None
295 self.active_guards = None
296
296
297 def diffopts(self, opts={}, patchfn=None):
297 def diffopts(self, opts={}, patchfn=None):
298 diffopts = patch.diffopts(self.ui, opts)
298 diffopts = patch.diffopts(self.ui, opts)
299 if self.gitmode == 'auto':
299 if self.gitmode == 'auto':
300 diffopts.upgrade = True
300 diffopts.upgrade = True
301 elif self.gitmode == 'keep':
301 elif self.gitmode == 'keep':
302 pass
302 pass
303 elif self.gitmode in ('yes', 'no'):
303 elif self.gitmode in ('yes', 'no'):
304 diffopts.git = self.gitmode == 'yes'
304 diffopts.git = self.gitmode == 'yes'
305 else:
305 else:
306 raise util.Abort(_('mq.git option can be auto/keep/yes/no'
306 raise util.Abort(_('mq.git option can be auto/keep/yes/no'
307 ' got %s') % self.gitmode)
307 ' got %s') % self.gitmode)
308 if patchfn:
308 if patchfn:
309 diffopts = self.patchopts(diffopts, patchfn)
309 diffopts = self.patchopts(diffopts, patchfn)
310 return diffopts
310 return diffopts
311
311
312 def patchopts(self, diffopts, *patches):
312 def patchopts(self, diffopts, *patches):
313 """Return a copy of input diff options with git set to true if
313 """Return a copy of input diff options with git set to true if
314 referenced patch is a git patch and should be preserved as such.
314 referenced patch is a git patch and should be preserved as such.
315 """
315 """
316 diffopts = diffopts.copy()
316 diffopts = diffopts.copy()
317 if not diffopts.git and self.gitmode == 'keep':
317 if not diffopts.git and self.gitmode == 'keep':
318 for patchfn in patches:
318 for patchfn in patches:
319 patchf = self.opener(patchfn, 'r')
319 patchf = self.opener(patchfn, 'r')
320 # if the patch was a git patch, refresh it as a git patch
320 # if the patch was a git patch, refresh it as a git patch
321 for line in patchf:
321 for line in patchf:
322 if line.startswith('diff --git'):
322 if line.startswith('diff --git'):
323 diffopts.git = True
323 diffopts.git = True
324 break
324 break
325 patchf.close()
325 patchf.close()
326 return diffopts
326 return diffopts
327
327
328 def join(self, *p):
328 def join(self, *p):
329 return os.path.join(self.path, *p)
329 return os.path.join(self.path, *p)
330
330
331 def find_series(self, patch):
331 def find_series(self, patch):
332 pre = re.compile("(\s*)([^#]+)")
332 pre = re.compile("(\s*)([^#]+)")
333 index = 0
333 index = 0
334 for l in self.full_series:
334 for l in self.full_series:
335 m = pre.match(l)
335 m = pre.match(l)
336 if m:
336 if m:
337 s = m.group(2)
337 s = m.group(2)
338 s = s.rstrip()
338 s = s.rstrip()
339 if s == patch:
339 if s == patch:
340 return index
340 return index
341 index += 1
341 index += 1
342 return None
342 return None
343
343
344 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
344 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
345
345
346 def parse_series(self):
346 def parse_series(self):
347 self.series = []
347 self.series = []
348 self.series_guards = []
348 self.series_guards = []
349 for l in self.full_series:
349 for l in self.full_series:
350 h = l.find('#')
350 h = l.find('#')
351 if h == -1:
351 if h == -1:
352 patch = l
352 patch = l
353 comment = ''
353 comment = ''
354 elif h == 0:
354 elif h == 0:
355 continue
355 continue
356 else:
356 else:
357 patch = l[:h]
357 patch = l[:h]
358 comment = l[h:]
358 comment = l[h:]
359 patch = patch.strip()
359 patch = patch.strip()
360 if patch:
360 if patch:
361 if patch in self.series:
361 if patch in self.series:
362 raise util.Abort(_('%s appears more than once in %s') %
362 raise util.Abort(_('%s appears more than once in %s') %
363 (patch, self.join(self.series_path)))
363 (patch, self.join(self.series_path)))
364 self.series.append(patch)
364 self.series.append(patch)
365 self.series_guards.append(self.guard_re.findall(comment))
365 self.series_guards.append(self.guard_re.findall(comment))
366
366
367 def check_guard(self, guard):
367 def check_guard(self, guard):
368 if not guard:
368 if not guard:
369 return _('guard cannot be an empty string')
369 return _('guard cannot be an empty string')
370 bad_chars = '# \t\r\n\f'
370 bad_chars = '# \t\r\n\f'
371 first = guard[0]
371 first = guard[0]
372 if first in '-+':
372 if first in '-+':
373 return (_('guard %r starts with invalid character: %r') %
373 return (_('guard %r starts with invalid character: %r') %
374 (guard, first))
374 (guard, first))
375 for c in bad_chars:
375 for c in bad_chars:
376 if c in guard:
376 if c in guard:
377 return _('invalid character in guard %r: %r') % (guard, c)
377 return _('invalid character in guard %r: %r') % (guard, c)
378
378
379 def set_active(self, guards):
379 def set_active(self, guards):
380 for guard in guards:
380 for guard in guards:
381 bad = self.check_guard(guard)
381 bad = self.check_guard(guard)
382 if bad:
382 if bad:
383 raise util.Abort(bad)
383 raise util.Abort(bad)
384 guards = sorted(set(guards))
384 guards = sorted(set(guards))
385 self.ui.debug('active guards: %s\n' % ' '.join(guards))
385 self.ui.debug('active guards: %s\n' % ' '.join(guards))
386 self.active_guards = guards
386 self.active_guards = guards
387 self.guards_dirty = True
387 self.guards_dirty = True
388
388
389 def active(self):
389 def active(self):
390 if self.active_guards is None:
390 if self.active_guards is None:
391 self.active_guards = []
391 self.active_guards = []
392 try:
392 try:
393 guards = self.opener(self.guards_path).read().split()
393 guards = self.opener(self.guards_path).read().split()
394 except IOError, err:
394 except IOError, err:
395 if err.errno != errno.ENOENT:
395 if err.errno != errno.ENOENT:
396 raise
396 raise
397 guards = []
397 guards = []
398 for i, guard in enumerate(guards):
398 for i, guard in enumerate(guards):
399 bad = self.check_guard(guard)
399 bad = self.check_guard(guard)
400 if bad:
400 if bad:
401 self.ui.warn('%s:%d: %s\n' %
401 self.ui.warn('%s:%d: %s\n' %
402 (self.join(self.guards_path), i + 1, bad))
402 (self.join(self.guards_path), i + 1, bad))
403 else:
403 else:
404 self.active_guards.append(guard)
404 self.active_guards.append(guard)
405 return self.active_guards
405 return self.active_guards
406
406
407 def set_guards(self, idx, guards):
407 def set_guards(self, idx, guards):
408 for g in guards:
408 for g in guards:
409 if len(g) < 2:
409 if len(g) < 2:
410 raise util.Abort(_('guard %r too short') % g)
410 raise util.Abort(_('guard %r too short') % g)
411 if g[0] not in '-+':
411 if g[0] not in '-+':
412 raise util.Abort(_('guard %r starts with invalid char') % g)
412 raise util.Abort(_('guard %r starts with invalid char') % g)
413 bad = self.check_guard(g[1:])
413 bad = self.check_guard(g[1:])
414 if bad:
414 if bad:
415 raise util.Abort(bad)
415 raise util.Abort(bad)
416 drop = self.guard_re.sub('', self.full_series[idx])
416 drop = self.guard_re.sub('', self.full_series[idx])
417 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
417 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
418 self.parse_series()
418 self.parse_series()
419 self.series_dirty = True
419 self.series_dirty = True
420
420
421 def pushable(self, idx):
421 def pushable(self, idx):
422 if isinstance(idx, str):
422 if isinstance(idx, str):
423 idx = self.series.index(idx)
423 idx = self.series.index(idx)
424 patchguards = self.series_guards[idx]
424 patchguards = self.series_guards[idx]
425 if not patchguards:
425 if not patchguards:
426 return True, None
426 return True, None
427 guards = self.active()
427 guards = self.active()
428 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
428 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
429 if exactneg:
429 if exactneg:
430 return False, exactneg[0]
430 return False, exactneg[0]
431 pos = [g for g in patchguards if g[0] == '+']
431 pos = [g for g in patchguards if g[0] == '+']
432 exactpos = [g for g in pos if g[1:] in guards]
432 exactpos = [g for g in pos if g[1:] in guards]
433 if pos:
433 if pos:
434 if exactpos:
434 if exactpos:
435 return True, exactpos[0]
435 return True, exactpos[0]
436 return False, pos
436 return False, pos
437 return True, ''
437 return True, ''
438
438
439 def explain_pushable(self, idx, all_patches=False):
439 def explain_pushable(self, idx, all_patches=False):
440 write = all_patches and self.ui.write or self.ui.warn
440 write = all_patches and self.ui.write or self.ui.warn
441 if all_patches or self.ui.verbose:
441 if all_patches or self.ui.verbose:
442 if isinstance(idx, str):
442 if isinstance(idx, str):
443 idx = self.series.index(idx)
443 idx = self.series.index(idx)
444 pushable, why = self.pushable(idx)
444 pushable, why = self.pushable(idx)
445 if all_patches and pushable:
445 if all_patches and pushable:
446 if why is None:
446 if why is None:
447 write(_('allowing %s - no guards in effect\n') %
447 write(_('allowing %s - no guards in effect\n') %
448 self.series[idx])
448 self.series[idx])
449 else:
449 else:
450 if not why:
450 if not why:
451 write(_('allowing %s - no matching negative guards\n') %
451 write(_('allowing %s - no matching negative guards\n') %
452 self.series[idx])
452 self.series[idx])
453 else:
453 else:
454 write(_('allowing %s - guarded by %r\n') %
454 write(_('allowing %s - guarded by %r\n') %
455 (self.series[idx], why))
455 (self.series[idx], why))
456 if not pushable:
456 if not pushable:
457 if why:
457 if why:
458 write(_('skipping %s - guarded by %r\n') %
458 write(_('skipping %s - guarded by %r\n') %
459 (self.series[idx], why))
459 (self.series[idx], why))
460 else:
460 else:
461 write(_('skipping %s - no matching guards\n') %
461 write(_('skipping %s - no matching guards\n') %
462 self.series[idx])
462 self.series[idx])
463
463
464 def save_dirty(self):
464 def save_dirty(self):
465 def write_list(items, path):
465 def write_list(items, path):
466 fp = self.opener(path, 'w')
466 fp = self.opener(path, 'w')
467 for i in items:
467 for i in items:
468 fp.write("%s\n" % i)
468 fp.write("%s\n" % i)
469 fp.close()
469 fp.close()
470 if self.applied_dirty:
470 if self.applied_dirty:
471 write_list(map(str, self.applied), self.status_path)
471 write_list(map(str, self.applied), self.status_path)
472 if self.series_dirty:
472 if self.series_dirty:
473 write_list(self.full_series, self.series_path)
473 write_list(self.full_series, self.series_path)
474 if self.guards_dirty:
474 if self.guards_dirty:
475 write_list(self.active_guards, self.guards_path)
475 write_list(self.active_guards, self.guards_path)
476
476
477 def removeundo(self, repo):
477 def removeundo(self, repo):
478 undo = repo.sjoin('undo')
478 undo = repo.sjoin('undo')
479 if not os.path.exists(undo):
479 if not os.path.exists(undo):
480 return
480 return
481 try:
481 try:
482 os.unlink(undo)
482 os.unlink(undo)
483 except OSError, inst:
483 except OSError, inst:
484 self.ui.warn(_('error removing undo: %s\n') % str(inst))
484 self.ui.warn(_('error removing undo: %s\n') % str(inst))
485
485
486 def printdiff(self, repo, diffopts, node1, node2=None, files=None,
486 def printdiff(self, repo, diffopts, node1, node2=None, files=None,
487 fp=None, changes=None, opts={}):
487 fp=None, changes=None, opts={}):
488 stat = opts.get('stat')
488 stat = opts.get('stat')
489 if stat:
489 if stat:
490 opts['unified'] = '0'
490 opts['unified'] = '0'
491
491
492 m = cmdutil.match(repo, files, opts)
492 m = cmdutil.match(repo, files, opts)
493 chunks = patch.diff(repo, node1, node2, m, changes, diffopts)
493 chunks = patch.diff(repo, node1, node2, m, changes, diffopts)
494 write = fp is None and repo.ui.write or fp.write
494 write = fp is None and repo.ui.write or fp.write
495 if stat:
495 if stat:
496 width = self.ui.interactive() and util.termwidth() or 80
496 width = self.ui.interactive() and util.termwidth() or 80
497 write(patch.diffstat(util.iterlines(chunks), width=width,
497 write(patch.diffstat(util.iterlines(chunks), width=width,
498 git=diffopts.git))
498 git=diffopts.git))
499 else:
499 else:
500 for chunk in chunks:
500 for chunk in chunks:
501 write(chunk)
501 write(chunk)
502
502
503 def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
503 def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
504 # first try just applying the patch
504 # first try just applying the patch
505 (err, n) = self.apply(repo, [patch], update_status=False,
505 (err, n) = self.apply(repo, [patch], update_status=False,
506 strict=True, merge=rev)
506 strict=True, merge=rev)
507
507
508 if err == 0:
508 if err == 0:
509 return (err, n)
509 return (err, n)
510
510
511 if n is None:
511 if n is None:
512 raise util.Abort(_("apply failed for patch %s") % patch)
512 raise util.Abort(_("apply failed for patch %s") % patch)
513
513
514 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
514 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
515
515
516 # apply failed, strip away that rev and merge.
516 # apply failed, strip away that rev and merge.
517 hg.clean(repo, head)
517 hg.clean(repo, head)
518 self.strip(repo, n, update=False, backup='strip')
518 self.strip(repo, n, update=False, backup='strip')
519
519
520 ctx = repo[rev]
520 ctx = repo[rev]
521 ret = hg.merge(repo, rev)
521 ret = hg.merge(repo, rev)
522 if ret:
522 if ret:
523 raise util.Abort(_("update returned %d") % ret)
523 raise util.Abort(_("update returned %d") % ret)
524 n = repo.commit(ctx.description(), ctx.user(), force=True)
524 n = repo.commit(ctx.description(), ctx.user(), force=True)
525 if n is None:
525 if n is None:
526 raise util.Abort(_("repo commit failed"))
526 raise util.Abort(_("repo commit failed"))
527 try:
527 try:
528 ph = patchheader(mergeq.join(patch), self.plainmode)
528 ph = patchheader(mergeq.join(patch), self.plainmode)
529 except:
529 except:
530 raise util.Abort(_("unable to read %s") % patch)
530 raise util.Abort(_("unable to read %s") % patch)
531
531
532 diffopts = self.patchopts(diffopts, patch)
532 diffopts = self.patchopts(diffopts, patch)
533 patchf = self.opener(patch, "w")
533 patchf = self.opener(patch, "w")
534 comments = str(ph)
534 comments = str(ph)
535 if comments:
535 if comments:
536 patchf.write(comments)
536 patchf.write(comments)
537 self.printdiff(repo, diffopts, head, n, fp=patchf)
537 self.printdiff(repo, diffopts, head, n, fp=patchf)
538 patchf.close()
538 patchf.close()
539 self.removeundo(repo)
539 self.removeundo(repo)
540 return (0, n)
540 return (0, n)
541
541
542 def qparents(self, repo, rev=None):
542 def qparents(self, repo, rev=None):
543 if rev is None:
543 if rev is None:
544 (p1, p2) = repo.dirstate.parents()
544 (p1, p2) = repo.dirstate.parents()
545 if p2 == nullid:
545 if p2 == nullid:
546 return p1
546 return p1
547 if len(self.applied) == 0:
547 if len(self.applied) == 0:
548 return None
548 return None
549 return bin(self.applied[-1].rev)
549 return bin(self.applied[-1].rev)
550 pp = repo.changelog.parents(rev)
550 pp = repo.changelog.parents(rev)
551 if pp[1] != nullid:
551 if pp[1] != nullid:
552 arevs = [x.rev for x in self.applied]
552 arevs = [x.rev for x in self.applied]
553 p0 = hex(pp[0])
553 p0 = hex(pp[0])
554 p1 = hex(pp[1])
554 p1 = hex(pp[1])
555 if p0 in arevs:
555 if p0 in arevs:
556 return pp[0]
556 return pp[0]
557 if p1 in arevs:
557 if p1 in arevs:
558 return pp[1]
558 return pp[1]
559 return pp[0]
559 return pp[0]
560
560
561 def mergepatch(self, repo, mergeq, series, diffopts):
561 def mergepatch(self, repo, mergeq, series, diffopts):
562 if len(self.applied) == 0:
562 if len(self.applied) == 0:
563 # each of the patches merged in will have two parents. This
563 # each of the patches merged in will have two parents. This
564 # can confuse the qrefresh, qdiff, and strip code because it
564 # can confuse the qrefresh, qdiff, and strip code because it
565 # needs to know which parent is actually in the patch queue.
565 # needs to know which parent is actually in the patch queue.
566 # so, we insert a merge marker with only one parent. This way
566 # so, we insert a merge marker with only one parent. This way
567 # the first patch in the queue is never a merge patch
567 # the first patch in the queue is never a merge patch
568 #
568 #
569 pname = ".hg.patches.merge.marker"
569 pname = ".hg.patches.merge.marker"
570 n = repo.commit('[mq]: merge marker', force=True)
570 n = repo.commit('[mq]: merge marker', force=True)
571 self.removeundo(repo)
571 self.removeundo(repo)
572 self.applied.append(statusentry(hex(n), pname))
572 self.applied.append(statusentry(hex(n), pname))
573 self.applied_dirty = 1
573 self.applied_dirty = 1
574
574
575 head = self.qparents(repo)
575 head = self.qparents(repo)
576
576
577 for patch in series:
577 for patch in series:
578 patch = mergeq.lookup(patch, strict=True)
578 patch = mergeq.lookup(patch, strict=True)
579 if not patch:
579 if not patch:
580 self.ui.warn(_("patch %s does not exist\n") % patch)
580 self.ui.warn(_("patch %s does not exist\n") % patch)
581 return (1, None)
581 return (1, None)
582 pushable, reason = self.pushable(patch)
582 pushable, reason = self.pushable(patch)
583 if not pushable:
583 if not pushable:
584 self.explain_pushable(patch, all_patches=True)
584 self.explain_pushable(patch, all_patches=True)
585 continue
585 continue
586 info = mergeq.isapplied(patch)
586 info = mergeq.isapplied(patch)
587 if not info:
587 if not info:
588 self.ui.warn(_("patch %s is not applied\n") % patch)
588 self.ui.warn(_("patch %s is not applied\n") % patch)
589 return (1, None)
589 return (1, None)
590 rev = bin(info[1])
590 rev = bin(info[1])
591 err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
591 err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
592 if head:
592 if head:
593 self.applied.append(statusentry(hex(head), patch))
593 self.applied.append(statusentry(hex(head), patch))
594 self.applied_dirty = 1
594 self.applied_dirty = 1
595 if err:
595 if err:
596 return (err, head)
596 return (err, head)
597 self.save_dirty()
597 self.save_dirty()
598 return (0, head)
598 return (0, head)
599
599
600 def patch(self, repo, patchfile):
600 def patch(self, repo, patchfile):
601 '''Apply patchfile to the working directory.
601 '''Apply patchfile to the working directory.
602 patchfile: name of patch file'''
602 patchfile: name of patch file'''
603 files = {}
603 files = {}
604 try:
604 try:
605 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
605 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
606 files=files, eolmode=None)
606 files=files, eolmode=None)
607 except Exception, inst:
607 except Exception, inst:
608 self.ui.note(str(inst) + '\n')
608 self.ui.note(str(inst) + '\n')
609 if not self.ui.verbose:
609 if not self.ui.verbose:
610 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
610 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
611 return (False, files, False)
611 return (False, files, False)
612
612
613 return (True, files, fuzz)
613 return (True, files, fuzz)
614
614
615 def apply(self, repo, series, list=False, update_status=True,
615 def apply(self, repo, series, list=False, update_status=True,
616 strict=False, patchdir=None, merge=None, all_files={}):
616 strict=False, patchdir=None, merge=None, all_files={}):
617 wlock = lock = tr = None
617 wlock = lock = tr = None
618 try:
618 try:
619 wlock = repo.wlock()
619 wlock = repo.wlock()
620 lock = repo.lock()
620 lock = repo.lock()
621 tr = repo.transaction()
621 tr = repo.transaction()
622 try:
622 try:
623 ret = self._apply(repo, series, list, update_status,
623 ret = self._apply(repo, series, list, update_status,
624 strict, patchdir, merge, all_files=all_files)
624 strict, patchdir, merge, all_files=all_files)
625 tr.close()
625 tr.close()
626 self.save_dirty()
626 self.save_dirty()
627 return ret
627 return ret
628 except:
628 except:
629 try:
629 try:
630 tr.abort()
630 tr.abort()
631 finally:
631 finally:
632 repo.invalidate()
632 repo.invalidate()
633 repo.dirstate.invalidate()
633 repo.dirstate.invalidate()
634 raise
634 raise
635 finally:
635 finally:
636 del tr
636 del tr
637 release(lock, wlock)
637 release(lock, wlock)
638 self.removeundo(repo)
638 self.removeundo(repo)
639
639
640 def _apply(self, repo, series, list=False, update_status=True,
640 def _apply(self, repo, series, list=False, update_status=True,
641 strict=False, patchdir=None, merge=None, all_files={}):
641 strict=False, patchdir=None, merge=None, all_files={}):
642 '''returns (error, hash)
642 '''returns (error, hash)
643 error = 1 for unable to read, 2 for patch failed, 3 for patch fuzz'''
643 error = 1 for unable to read, 2 for patch failed, 3 for patch fuzz'''
644 # TODO unify with commands.py
644 # TODO unify with commands.py
645 if not patchdir:
645 if not patchdir:
646 patchdir = self.path
646 patchdir = self.path
647 err = 0
647 err = 0
648 n = None
648 n = None
649 for patchname in series:
649 for patchname in series:
650 pushable, reason = self.pushable(patchname)
650 pushable, reason = self.pushable(patchname)
651 if not pushable:
651 if not pushable:
652 self.explain_pushable(patchname, all_patches=True)
652 self.explain_pushable(patchname, all_patches=True)
653 continue
653 continue
654 self.ui.status(_("applying %s\n") % patchname)
654 self.ui.status(_("applying %s\n") % patchname)
655 pf = os.path.join(patchdir, patchname)
655 pf = os.path.join(patchdir, patchname)
656
656
657 try:
657 try:
658 ph = patchheader(self.join(patchname), self.plainmode)
658 ph = patchheader(self.join(patchname), self.plainmode)
659 except:
659 except:
660 self.ui.warn(_("unable to read %s\n") % patchname)
660 self.ui.warn(_("unable to read %s\n") % patchname)
661 err = 1
661 err = 1
662 break
662 break
663
663
664 message = ph.message
664 message = ph.message
665 if not message:
665 if not message:
666 message = "imported patch %s\n" % patchname
666 message = "imported patch %s\n" % patchname
667 else:
667 else:
668 if list:
668 if list:
669 message.append("\nimported patch %s" % patchname)
669 message.append("\nimported patch %s" % patchname)
670 message = '\n'.join(message)
670 message = '\n'.join(message)
671
671
672 if ph.haspatch:
672 if ph.haspatch:
673 (patcherr, files, fuzz) = self.patch(repo, pf)
673 (patcherr, files, fuzz) = self.patch(repo, pf)
674 all_files.update(files)
674 all_files.update(files)
675 patcherr = not patcherr
675 patcherr = not patcherr
676 else:
676 else:
677 self.ui.warn(_("patch %s is empty\n") % patchname)
677 self.ui.warn(_("patch %s is empty\n") % patchname)
678 patcherr, files, fuzz = 0, [], 0
678 patcherr, files, fuzz = 0, [], 0
679
679
680 if merge and files:
680 if merge and files:
681 # Mark as removed/merged and update dirstate parent info
681 # Mark as removed/merged and update dirstate parent info
682 removed = []
682 removed = []
683 merged = []
683 merged = []
684 for f in files:
684 for f in files:
685 if os.path.exists(repo.wjoin(f)):
685 if os.path.exists(repo.wjoin(f)):
686 merged.append(f)
686 merged.append(f)
687 else:
687 else:
688 removed.append(f)
688 removed.append(f)
689 for f in removed:
689 for f in removed:
690 repo.dirstate.remove(f)
690 repo.dirstate.remove(f)
691 for f in merged:
691 for f in merged:
692 repo.dirstate.merge(f)
692 repo.dirstate.merge(f)
693 p1, p2 = repo.dirstate.parents()
693 p1, p2 = repo.dirstate.parents()
694 repo.dirstate.setparents(p1, merge)
694 repo.dirstate.setparents(p1, merge)
695
695
696 files = patch.updatedir(self.ui, repo, files)
696 files = patch.updatedir(self.ui, repo, files)
697 match = cmdutil.matchfiles(repo, files or [])
697 match = cmdutil.matchfiles(repo, files or [])
698 n = repo.commit(message, ph.user, ph.date, match=match, force=True)
698 n = repo.commit(message, ph.user, ph.date, match=match, force=True)
699
699
700 if n is None:
700 if n is None:
701 raise util.Abort(_("repo commit failed"))
701 raise util.Abort(_("repo commit failed"))
702
702
703 if update_status:
703 if update_status:
704 self.applied.append(statusentry(hex(n), patchname))
704 self.applied.append(statusentry(hex(n), patchname))
705
705
706 if patcherr:
706 if patcherr:
707 self.ui.warn(_("patch failed, rejects left in working dir\n"))
707 self.ui.warn(_("patch failed, rejects left in working dir\n"))
708 err = 2
708 err = 2
709 break
709 break
710
710
711 if fuzz and strict:
711 if fuzz and strict:
712 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
712 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
713 err = 3
713 err = 3
714 break
714 break
715 return (err, n)
715 return (err, n)
716
716
717 def _cleanup(self, patches, numrevs, keep=False):
717 def _cleanup(self, patches, numrevs, keep=False):
718 if not keep:
718 if not keep:
719 r = self.qrepo()
719 r = self.qrepo()
720 if r:
720 if r:
721 r.remove(patches, True)
721 r.remove(patches, True)
722 else:
722 else:
723 for p in patches:
723 for p in patches:
724 os.unlink(self.join(p))
724 os.unlink(self.join(p))
725
725
726 if numrevs:
726 if numrevs:
727 del self.applied[:numrevs]
727 del self.applied[:numrevs]
728 self.applied_dirty = 1
728 self.applied_dirty = 1
729
729
730 for i in sorted([self.find_series(p) for p in patches], reverse=True):
730 for i in sorted([self.find_series(p) for p in patches], reverse=True):
731 del self.full_series[i]
731 del self.full_series[i]
732 self.parse_series()
732 self.parse_series()
733 self.series_dirty = 1
733 self.series_dirty = 1
734
734
735 def _revpatches(self, repo, revs):
735 def _revpatches(self, repo, revs):
736 firstrev = repo[self.applied[0].rev].rev()
736 firstrev = repo[self.applied[0].rev].rev()
737 patches = []
737 patches = []
738 for i, rev in enumerate(revs):
738 for i, rev in enumerate(revs):
739
739
740 if rev < firstrev:
740 if rev < firstrev:
741 raise util.Abort(_('revision %d is not managed') % rev)
741 raise util.Abort(_('revision %d is not managed') % rev)
742
742
743 ctx = repo[rev]
743 ctx = repo[rev]
744 base = bin(self.applied[i].rev)
744 base = bin(self.applied[i].rev)
745 if ctx.node() != base:
745 if ctx.node() != base:
746 msg = _('cannot delete revision %d above applied patches')
746 msg = _('cannot delete revision %d above applied patches')
747 raise util.Abort(msg % rev)
747 raise util.Abort(msg % rev)
748
748
749 patch = self.applied[i].name
749 patch = self.applied[i].name
750 for fmt in ('[mq]: %s', 'imported patch %s'):
750 for fmt in ('[mq]: %s', 'imported patch %s'):
751 if ctx.description() == fmt % patch:
751 if ctx.description() == fmt % patch:
752 msg = _('patch %s finalized without changeset message\n')
752 msg = _('patch %s finalized without changeset message\n')
753 repo.ui.status(msg % patch)
753 repo.ui.status(msg % patch)
754 break
754 break
755
755
756 patches.append(patch)
756 patches.append(patch)
757 return patches
757 return patches
758
758
759 def finish(self, repo, revs):
759 def finish(self, repo, revs):
760 patches = self._revpatches(repo, sorted(revs))
760 patches = self._revpatches(repo, sorted(revs))
761 self._cleanup(patches, len(patches))
761 self._cleanup(patches, len(patches))
762
762
763 def delete(self, repo, patches, opts):
763 def delete(self, repo, patches, opts):
764 if not patches and not opts.get('rev'):
764 if not patches and not opts.get('rev'):
765 raise util.Abort(_('qdelete requires at least one revision or '
765 raise util.Abort(_('qdelete requires at least one revision or '
766 'patch name'))
766 'patch name'))
767
767
768 realpatches = []
768 realpatches = []
769 for patch in patches:
769 for patch in patches:
770 patch = self.lookup(patch, strict=True)
770 patch = self.lookup(patch, strict=True)
771 info = self.isapplied(patch)
771 info = self.isapplied(patch)
772 if info:
772 if info:
773 raise util.Abort(_("cannot delete applied patch %s") % patch)
773 raise util.Abort(_("cannot delete applied patch %s") % patch)
774 if patch not in self.series:
774 if patch not in self.series:
775 raise util.Abort(_("patch %s not in series file") % patch)
775 raise util.Abort(_("patch %s not in series file") % patch)
776 realpatches.append(patch)
776 realpatches.append(patch)
777
777
778 numrevs = 0
778 numrevs = 0
779 if opts.get('rev'):
779 if opts.get('rev'):
780 if not self.applied:
780 if not self.applied:
781 raise util.Abort(_('no patches applied'))
781 raise util.Abort(_('no patches applied'))
782 revs = cmdutil.revrange(repo, opts['rev'])
782 revs = cmdutil.revrange(repo, opts['rev'])
783 if len(revs) > 1 and revs[0] > revs[1]:
783 if len(revs) > 1 and revs[0] > revs[1]:
784 revs.reverse()
784 revs.reverse()
785 revpatches = self._revpatches(repo, revs)
785 revpatches = self._revpatches(repo, revs)
786 realpatches += revpatches
786 realpatches += revpatches
787 numrevs = len(revpatches)
787 numrevs = len(revpatches)
788
788
789 self._cleanup(realpatches, numrevs, opts.get('keep'))
789 self._cleanup(realpatches, numrevs, opts.get('keep'))
790
790
791 def check_toppatch(self, repo):
791 def check_toppatch(self, repo):
792 if len(self.applied) > 0:
792 if len(self.applied) > 0:
793 top = bin(self.applied[-1].rev)
793 top = bin(self.applied[-1].rev)
794 patch = self.applied[-1].name
794 patch = self.applied[-1].name
795 pp = repo.dirstate.parents()
795 pp = repo.dirstate.parents()
796 if top not in pp:
796 if top not in pp:
797 raise util.Abort(_("working directory revision is not qtip"))
797 raise util.Abort(_("working directory revision is not qtip"))
798 return top, patch
798 return top, patch
799 return None, None
799 return None, None
800
800
801 def check_localchanges(self, repo, force=False, refresh=True):
801 def check_localchanges(self, repo, force=False, refresh=True):
802 m, a, r, d = repo.status()[:4]
802 m, a, r, d = repo.status()[:4]
803 if (m or a or r or d) and not force:
803 if (m or a or r or d) and not force:
804 if refresh:
804 if refresh:
805 raise util.Abort(_("local changes found, refresh first"))
805 raise util.Abort(_("local changes found, refresh first"))
806 else:
806 else:
807 raise util.Abort(_("local changes found"))
807 raise util.Abort(_("local changes found"))
808 return m, a, r, d
808 return m, a, r, d
809
809
810 _reserved = ('series', 'status', 'guards')
810 _reserved = ('series', 'status', 'guards')
811 def check_reserved_name(self, name):
811 def check_reserved_name(self, name):
812 if (name in self._reserved or name.startswith('.hg')
812 if (name in self._reserved or name.startswith('.hg')
813 or name.startswith('.mq') or '#' in name or ':' in name):
813 or name.startswith('.mq') or '#' in name or ':' in name):
814 raise util.Abort(_('"%s" cannot be used as the name of a patch')
814 raise util.Abort(_('"%s" cannot be used as the name of a patch')
815 % name)
815 % name)
816
816
817 def new(self, repo, patchfn, *pats, **opts):
817 def new(self, repo, patchfn, *pats, **opts):
818 """options:
818 """options:
819 msg: a string or a no-argument function returning a string
819 msg: a string or a no-argument function returning a string
820 """
820 """
821 msg = opts.get('msg')
821 msg = opts.get('msg')
822 user = opts.get('user')
822 user = opts.get('user')
823 date = opts.get('date')
823 date = opts.get('date')
824 if date:
824 if date:
825 date = util.parsedate(date)
825 date = util.parsedate(date)
826 diffopts = self.diffopts({'git': opts.get('git')})
826 diffopts = self.diffopts({'git': opts.get('git')})
827 self.check_reserved_name(patchfn)
827 self.check_reserved_name(patchfn)
828 if os.path.exists(self.join(patchfn)):
828 if os.path.exists(self.join(patchfn)):
829 raise util.Abort(_('patch "%s" already exists') % patchfn)
829 raise util.Abort(_('patch "%s" already exists') % patchfn)
830 if opts.get('include') or opts.get('exclude') or pats:
830 if opts.get('include') or opts.get('exclude') or pats:
831 match = cmdutil.match(repo, pats, opts)
831 match = cmdutil.match(repo, pats, opts)
832 # detect missing files in pats
832 # detect missing files in pats
833 def badfn(f, msg):
833 def badfn(f, msg):
834 raise util.Abort('%s: %s' % (f, msg))
834 raise util.Abort('%s: %s' % (f, msg))
835 match.bad = badfn
835 match.bad = badfn
836 m, a, r, d = repo.status(match=match)[:4]
836 m, a, r, d = repo.status(match=match)[:4]
837 else:
837 else:
838 m, a, r, d = self.check_localchanges(repo, force=True)
838 m, a, r, d = self.check_localchanges(repo, force=True)
839 match = cmdutil.matchfiles(repo, m + a + r)
839 match = cmdutil.matchfiles(repo, m + a + r)
840 if len(repo[None].parents()) > 1:
840 if len(repo[None].parents()) > 1:
841 raise util.Abort(_('cannot manage merge changesets'))
841 raise util.Abort(_('cannot manage merge changesets'))
842 commitfiles = m + a + r
842 commitfiles = m + a + r
843 self.check_toppatch(repo)
843 self.check_toppatch(repo)
844 insert = self.full_series_end()
844 insert = self.full_series_end()
845 wlock = repo.wlock()
845 wlock = repo.wlock()
846 try:
846 try:
847 # if patch file write fails, abort early
847 # if patch file write fails, abort early
848 p = self.opener(patchfn, "w")
848 p = self.opener(patchfn, "w")
849 try:
849 try:
850 if self.plainmode:
850 if self.plainmode:
851 if user:
851 if user:
852 p.write("From: " + user + "\n")
852 p.write("From: " + user + "\n")
853 if not date:
853 if not date:
854 p.write("\n")
854 p.write("\n")
855 if date:
855 if date:
856 p.write("Date: %d %d\n\n" % date)
856 p.write("Date: %d %d\n\n" % date)
857 else:
857 else:
858 p.write("# HG changeset patch\n")
858 p.write("# HG changeset patch\n")
859 p.write("# Parent "
859 p.write("# Parent "
860 + hex(repo[None].parents()[0].node()) + "\n")
860 + hex(repo[None].parents()[0].node()) + "\n")
861 if user:
861 if user:
862 p.write("# User " + user + "\n")
862 p.write("# User " + user + "\n")
863 if date:
863 if date:
864 p.write("# Date %s %s\n\n" % date)
864 p.write("# Date %s %s\n\n" % date)
865 if hasattr(msg, '__call__'):
865 if hasattr(msg, '__call__'):
866 msg = msg()
866 msg = msg()
867 commitmsg = msg and msg or ("[mq]: %s" % patchfn)
867 commitmsg = msg and msg or ("[mq]: %s" % patchfn)
868 n = repo.commit(commitmsg, user, date, match=match, force=True)
868 n = repo.commit(commitmsg, user, date, match=match, force=True)
869 if n is None:
869 if n is None:
870 raise util.Abort(_("repo commit failed"))
870 raise util.Abort(_("repo commit failed"))
871 try:
871 try:
872 self.full_series[insert:insert] = [patchfn]
872 self.full_series[insert:insert] = [patchfn]
873 self.applied.append(statusentry(hex(n), patchfn))
873 self.applied.append(statusentry(hex(n), patchfn))
874 self.parse_series()
874 self.parse_series()
875 self.series_dirty = 1
875 self.series_dirty = 1
876 self.applied_dirty = 1
876 self.applied_dirty = 1
877 if msg:
877 if msg:
878 msg = msg + "\n\n"
878 msg = msg + "\n\n"
879 p.write(msg)
879 p.write(msg)
880 if commitfiles:
880 if commitfiles:
881 parent = self.qparents(repo, n)
881 parent = self.qparents(repo, n)
882 chunks = patch.diff(repo, node1=parent, node2=n,
882 chunks = patch.diff(repo, node1=parent, node2=n,
883 match=match, opts=diffopts)
883 match=match, opts=diffopts)
884 for chunk in chunks:
884 for chunk in chunks:
885 p.write(chunk)
885 p.write(chunk)
886 p.close()
886 p.close()
887 wlock.release()
887 wlock.release()
888 wlock = None
888 wlock = None
889 r = self.qrepo()
889 r = self.qrepo()
890 if r:
890 if r:
891 r.add([patchfn])
891 r.add([patchfn])
892 except:
892 except:
893 repo.rollback()
893 repo.rollback()
894 raise
894 raise
895 except Exception:
895 except Exception:
896 patchpath = self.join(patchfn)
896 patchpath = self.join(patchfn)
897 try:
897 try:
898 os.unlink(patchpath)
898 os.unlink(patchpath)
899 except:
899 except:
900 self.ui.warn(_('error unlinking %s\n') % patchpath)
900 self.ui.warn(_('error unlinking %s\n') % patchpath)
901 raise
901 raise
902 self.removeundo(repo)
902 self.removeundo(repo)
903 finally:
903 finally:
904 release(wlock)
904 release(wlock)
905
905
906 def strip(self, repo, rev, update=True, backup="all", force=None):
906 def strip(self, repo, rev, update=True, backup="all", force=None):
907 wlock = lock = None
907 wlock = lock = None
908 try:
908 try:
909 wlock = repo.wlock()
909 wlock = repo.wlock()
910 lock = repo.lock()
910 lock = repo.lock()
911
911
912 if update:
912 if update:
913 self.check_localchanges(repo, force=force, refresh=False)
913 self.check_localchanges(repo, force=force, refresh=False)
914 urev = self.qparents(repo, rev)
914 urev = self.qparents(repo, rev)
915 hg.clean(repo, urev)
915 hg.clean(repo, urev)
916 repo.dirstate.write()
916 repo.dirstate.write()
917
917
918 self.removeundo(repo)
918 self.removeundo(repo)
919 repair.strip(self.ui, repo, rev, backup)
919 repair.strip(self.ui, repo, rev, backup)
920 # strip may have unbundled a set of backed up revisions after
920 # strip may have unbundled a set of backed up revisions after
921 # the actual strip
921 # the actual strip
922 self.removeundo(repo)
922 self.removeundo(repo)
923 finally:
923 finally:
924 release(lock, wlock)
924 release(lock, wlock)
925
925
926 def isapplied(self, patch):
926 def isapplied(self, patch):
927 """returns (index, rev, patch)"""
927 """returns (index, rev, patch)"""
928 for i, a in enumerate(self.applied):
928 for i, a in enumerate(self.applied):
929 if a.name == patch:
929 if a.name == patch:
930 return (i, a.rev, a.name)
930 return (i, a.rev, a.name)
931 return None
931 return None
932
932
933 # if the exact patch name does not exist, we try a few
933 # if the exact patch name does not exist, we try a few
934 # variations. If strict is passed, we try only #1
934 # variations. If strict is passed, we try only #1
935 #
935 #
936 # 1) a number to indicate an offset in the series file
936 # 1) a number to indicate an offset in the series file
937 # 2) a unique substring of the patch name was given
937 # 2) a unique substring of the patch name was given
938 # 3) patchname[-+]num to indicate an offset in the series file
938 # 3) patchname[-+]num to indicate an offset in the series file
939 def lookup(self, patch, strict=False):
939 def lookup(self, patch, strict=False):
940 patch = patch and str(patch)
940 patch = patch and str(patch)
941
941
942 def partial_name(s):
942 def partial_name(s):
943 if s in self.series:
943 if s in self.series:
944 return s
944 return s
945 matches = [x for x in self.series if s in x]
945 matches = [x for x in self.series if s in x]
946 if len(matches) > 1:
946 if len(matches) > 1:
947 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
947 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
948 for m in matches:
948 for m in matches:
949 self.ui.warn(' %s\n' % m)
949 self.ui.warn(' %s\n' % m)
950 return None
950 return None
951 if matches:
951 if matches:
952 return matches[0]
952 return matches[0]
953 if len(self.series) > 0 and len(self.applied) > 0:
953 if len(self.series) > 0 and len(self.applied) > 0:
954 if s == 'qtip':
954 if s == 'qtip':
955 return self.series[self.series_end(True)-1]
955 return self.series[self.series_end(True)-1]
956 if s == 'qbase':
956 if s == 'qbase':
957 return self.series[0]
957 return self.series[0]
958 return None
958 return None
959
959
960 if patch is None:
960 if patch is None:
961 return None
961 return None
962 if patch in self.series:
962 if patch in self.series:
963 return patch
963 return patch
964
964
965 if not os.path.isfile(self.join(patch)):
965 if not os.path.isfile(self.join(patch)):
966 try:
966 try:
967 sno = int(patch)
967 sno = int(patch)
968 except (ValueError, OverflowError):
968 except (ValueError, OverflowError):
969 pass
969 pass
970 else:
970 else:
971 if -len(self.series) <= sno < len(self.series):
971 if -len(self.series) <= sno < len(self.series):
972 return self.series[sno]
972 return self.series[sno]
973
973
974 if not strict:
974 if not strict:
975 res = partial_name(patch)
975 res = partial_name(patch)
976 if res:
976 if res:
977 return res
977 return res
978 minus = patch.rfind('-')
978 minus = patch.rfind('-')
979 if minus >= 0:
979 if minus >= 0:
980 res = partial_name(patch[:minus])
980 res = partial_name(patch[:minus])
981 if res:
981 if res:
982 i = self.series.index(res)
982 i = self.series.index(res)
983 try:
983 try:
984 off = int(patch[minus + 1:] or 1)
984 off = int(patch[minus + 1:] or 1)
985 except (ValueError, OverflowError):
985 except (ValueError, OverflowError):
986 pass
986 pass
987 else:
987 else:
988 if i - off >= 0:
988 if i - off >= 0:
989 return self.series[i - off]
989 return self.series[i - off]
990 plus = patch.rfind('+')
990 plus = patch.rfind('+')
991 if plus >= 0:
991 if plus >= 0:
992 res = partial_name(patch[:plus])
992 res = partial_name(patch[:plus])
993 if res:
993 if res:
994 i = self.series.index(res)
994 i = self.series.index(res)
995 try:
995 try:
996 off = int(patch[plus + 1:] or 1)
996 off = int(patch[plus + 1:] or 1)
997 except (ValueError, OverflowError):
997 except (ValueError, OverflowError):
998 pass
998 pass
999 else:
999 else:
1000 if i + off < len(self.series):
1000 if i + off < len(self.series):
1001 return self.series[i + off]
1001 return self.series[i + off]
1002 raise util.Abort(_("patch %s not in series") % patch)
1002 raise util.Abort(_("patch %s not in series") % patch)
1003
1003
1004 def push(self, repo, patch=None, force=False, list=False,
1004 def push(self, repo, patch=None, force=False, list=False,
1005 mergeq=None, all=False):
1005 mergeq=None, all=False):
1006 diffopts = self.diffopts()
1006 diffopts = self.diffopts()
1007 wlock = repo.wlock()
1007 wlock = repo.wlock()
1008 try:
1008 try:
1009 heads = []
1009 heads = []
1010 for b, ls in repo.branchmap().iteritems():
1010 for b, ls in repo.branchmap().iteritems():
1011 heads += ls
1011 heads += ls
1012 if not heads:
1012 if not heads:
1013 heads = [nullid]
1013 heads = [nullid]
1014 if repo.dirstate.parents()[0] not in heads:
1014 if repo.dirstate.parents()[0] not in heads:
1015 self.ui.status(_("(working directory not at a head)\n"))
1015 self.ui.status(_("(working directory not at a head)\n"))
1016
1016
1017 if not self.series:
1017 if not self.series:
1018 self.ui.warn(_('no patches in series\n'))
1018 self.ui.warn(_('no patches in series\n'))
1019 return 0
1019 return 0
1020
1020
1021 patch = self.lookup(patch)
1021 patch = self.lookup(patch)
1022 # Suppose our series file is: A B C and the current 'top'
1022 # Suppose our series file is: A B C and the current 'top'
1023 # patch is B. qpush C should be performed (moving forward)
1023 # patch is B. qpush C should be performed (moving forward)
1024 # qpush B is a NOP (no change) qpush A is an error (can't
1024 # qpush B is a NOP (no change) qpush A is an error (can't
1025 # go backwards with qpush)
1025 # go backwards with qpush)
1026 if patch:
1026 if patch:
1027 info = self.isapplied(patch)
1027 info = self.isapplied(patch)
1028 if info:
1028 if info:
1029 if info[0] < len(self.applied) - 1:
1029 if info[0] < len(self.applied) - 1:
1030 raise util.Abort(
1030 raise util.Abort(
1031 _("cannot push to a previous patch: %s") % patch)
1031 _("cannot push to a previous patch: %s") % patch)
1032 self.ui.warn(
1032 self.ui.warn(
1033 _('qpush: %s is already at the top\n') % patch)
1033 _('qpush: %s is already at the top\n') % patch)
1034 return
1034 return
1035 pushable, reason = self.pushable(patch)
1035 pushable, reason = self.pushable(patch)
1036 if not pushable:
1036 if not pushable:
1037 if reason:
1037 if reason:
1038 reason = _('guarded by %r') % reason
1038 reason = _('guarded by %r') % reason
1039 else:
1039 else:
1040 reason = _('no matching guards')
1040 reason = _('no matching guards')
1041 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
1041 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
1042 return 1
1042 return 1
1043 elif all:
1043 elif all:
1044 patch = self.series[-1]
1044 patch = self.series[-1]
1045 if self.isapplied(patch):
1045 if self.isapplied(patch):
1046 self.ui.warn(_('all patches are currently applied\n'))
1046 self.ui.warn(_('all patches are currently applied\n'))
1047 return 0
1047 return 0
1048
1048
1049 # Following the above example, starting at 'top' of B:
1049 # Following the above example, starting at 'top' of B:
1050 # qpush should be performed (pushes C), but a subsequent
1050 # qpush should be performed (pushes C), but a subsequent
1051 # qpush without an argument is an error (nothing to
1051 # qpush without an argument is an error (nothing to
1052 # apply). This allows a loop of "...while hg qpush..." to
1052 # apply). This allows a loop of "...while hg qpush..." to
1053 # work as it detects an error when done
1053 # work as it detects an error when done
1054 start = self.series_end()
1054 start = self.series_end()
1055 if start == len(self.series):
1055 if start == len(self.series):
1056 self.ui.warn(_('patch series already fully applied\n'))
1056 self.ui.warn(_('patch series already fully applied\n'))
1057 return 1
1057 return 1
1058 if not force:
1058 if not force:
1059 self.check_localchanges(repo)
1059 self.check_localchanges(repo)
1060
1060
1061 self.applied_dirty = 1
1061 self.applied_dirty = 1
1062 if start > 0:
1062 if start > 0:
1063 self.check_toppatch(repo)
1063 self.check_toppatch(repo)
1064 if not patch:
1064 if not patch:
1065 patch = self.series[start]
1065 patch = self.series[start]
1066 end = start + 1
1066 end = start + 1
1067 else:
1067 else:
1068 end = self.series.index(patch, start) + 1
1068 end = self.series.index(patch, start) + 1
1069
1069
1070 s = self.series[start:end]
1070 s = self.series[start:end]
1071 all_files = {}
1071 all_files = {}
1072 try:
1072 try:
1073 if mergeq:
1073 if mergeq:
1074 ret = self.mergepatch(repo, mergeq, s, diffopts)
1074 ret = self.mergepatch(repo, mergeq, s, diffopts)
1075 else:
1075 else:
1076 ret = self.apply(repo, s, list, all_files=all_files)
1076 ret = self.apply(repo, s, list, all_files=all_files)
1077 except:
1077 except:
1078 self.ui.warn(_('cleaning up working directory...'))
1078 self.ui.warn(_('cleaning up working directory...'))
1079 node = repo.dirstate.parents()[0]
1079 node = repo.dirstate.parents()[0]
1080 hg.revert(repo, node, None)
1080 hg.revert(repo, node, None)
1081 unknown = repo.status(unknown=True)[4]
1081 unknown = repo.status(unknown=True)[4]
1082 # only remove unknown files that we know we touched or
1082 # only remove unknown files that we know we touched or
1083 # created while patching
1083 # created while patching
1084 for f in unknown:
1084 for f in unknown:
1085 if f in all_files:
1085 if f in all_files:
1086 util.unlink(repo.wjoin(f))
1086 util.unlink(repo.wjoin(f))
1087 self.ui.warn(_('done\n'))
1087 self.ui.warn(_('done\n'))
1088 raise
1088 raise
1089
1089
1090 if not self.applied:
1090 if not self.applied:
1091 return ret[0]
1091 return ret[0]
1092 top = self.applied[-1].name
1092 top = self.applied[-1].name
1093 if ret[0] and ret[0] > 1:
1093 if ret[0] and ret[0] > 1:
1094 msg = _("errors during apply, please fix and refresh %s\n")
1094 msg = _("errors during apply, please fix and refresh %s\n")
1095 self.ui.write(msg % top)
1095 self.ui.write(msg % top)
1096 else:
1096 else:
1097 self.ui.write(_("now at: %s\n") % top)
1097 self.ui.write(_("now at: %s\n") % top)
1098 return ret[0]
1098 return ret[0]
1099
1099
1100 finally:
1100 finally:
1101 wlock.release()
1101 wlock.release()
1102
1102
1103 def pop(self, repo, patch=None, force=False, update=True, all=False):
1103 def pop(self, repo, patch=None, force=False, update=True, all=False):
1104 def getfile(f, rev, flags):
1104 def getfile(f, rev, flags):
1105 t = repo.file(f).read(rev)
1105 t = repo.file(f).read(rev)
1106 repo.wwrite(f, t, flags)
1106 repo.wwrite(f, t, flags)
1107
1107
1108 wlock = repo.wlock()
1108 wlock = repo.wlock()
1109 try:
1109 try:
1110 if patch:
1110 if patch:
1111 # index, rev, patch
1111 # index, rev, patch
1112 info = self.isapplied(patch)
1112 info = self.isapplied(patch)
1113 if not info:
1113 if not info:
1114 patch = self.lookup(patch)
1114 patch = self.lookup(patch)
1115 info = self.isapplied(patch)
1115 info = self.isapplied(patch)
1116 if not info:
1116 if not info:
1117 raise util.Abort(_("patch %s is not applied") % patch)
1117 raise util.Abort(_("patch %s is not applied") % patch)
1118
1118
1119 if len(self.applied) == 0:
1119 if len(self.applied) == 0:
1120 # Allow qpop -a to work repeatedly,
1120 # Allow qpop -a to work repeatedly,
1121 # but not qpop without an argument
1121 # but not qpop without an argument
1122 self.ui.warn(_("no patches applied\n"))
1122 self.ui.warn(_("no patches applied\n"))
1123 return not all
1123 return not all
1124
1124
1125 if all:
1125 if all:
1126 start = 0
1126 start = 0
1127 elif patch:
1127 elif patch:
1128 start = info[0] + 1
1128 start = info[0] + 1
1129 else:
1129 else:
1130 start = len(self.applied) - 1
1130 start = len(self.applied) - 1
1131
1131
1132 if start >= len(self.applied):
1132 if start >= len(self.applied):
1133 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1133 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1134 return
1134 return
1135
1135
1136 if not update:
1136 if not update:
1137 parents = repo.dirstate.parents()
1137 parents = repo.dirstate.parents()
1138 rr = [bin(x.rev) for x in self.applied]
1138 rr = [bin(x.rev) for x in self.applied]
1139 for p in parents:
1139 for p in parents:
1140 if p in rr:
1140 if p in rr:
1141 self.ui.warn(_("qpop: forcing dirstate update\n"))
1141 self.ui.warn(_("qpop: forcing dirstate update\n"))
1142 update = True
1142 update = True
1143 else:
1143 else:
1144 parents = [p.hex() for p in repo[None].parents()]
1144 parents = [p.hex() for p in repo[None].parents()]
1145 needupdate = False
1145 needupdate = False
1146 for entry in self.applied[start:]:
1146 for entry in self.applied[start:]:
1147 if entry.rev in parents:
1147 if entry.rev in parents:
1148 needupdate = True
1148 needupdate = True
1149 break
1149 break
1150 update = needupdate
1150 update = needupdate
1151
1151
1152 if not force and update:
1152 if not force and update:
1153 self.check_localchanges(repo)
1153 self.check_localchanges(repo)
1154
1154
1155 self.applied_dirty = 1
1155 self.applied_dirty = 1
1156 end = len(self.applied)
1156 end = len(self.applied)
1157 rev = bin(self.applied[start].rev)
1157 rev = bin(self.applied[start].rev)
1158 if update:
1158 if update:
1159 top = self.check_toppatch(repo)[0]
1159 top = self.check_toppatch(repo)[0]
1160
1160
1161 try:
1161 try:
1162 heads = repo.changelog.heads(rev)
1162 heads = repo.changelog.heads(rev)
1163 except error.LookupError:
1163 except error.LookupError:
1164 node = short(rev)
1164 node = short(rev)
1165 raise util.Abort(_('trying to pop unknown node %s') % node)
1165 raise util.Abort(_('trying to pop unknown node %s') % node)
1166
1166
1167 if heads != [bin(self.applied[-1].rev)]:
1167 if heads != [bin(self.applied[-1].rev)]:
1168 raise util.Abort(_("popping would remove a revision not "
1168 raise util.Abort(_("popping would remove a revision not "
1169 "managed by this patch queue"))
1169 "managed by this patch queue"))
1170
1170
1171 # we know there are no local changes, so we can make a simplified
1171 # we know there are no local changes, so we can make a simplified
1172 # form of hg.update.
1172 # form of hg.update.
1173 if update:
1173 if update:
1174 qp = self.qparents(repo, rev)
1174 qp = self.qparents(repo, rev)
1175 changes = repo.changelog.read(qp)
1175 changes = repo.changelog.read(qp)
1176 mmap = repo.manifest.read(changes[0])
1176 mmap = repo.manifest.read(changes[0])
1177 m, a, r, d = repo.status(qp, top)[:4]
1177 m, a, r, d = repo.status(qp, top)[:4]
1178 if d:
1178 if d:
1179 raise util.Abort(_("deletions found between repo revs"))
1179 raise util.Abort(_("deletions found between repo revs"))
1180 for f in a:
1180 for f in a:
1181 try:
1181 try:
1182 os.unlink(repo.wjoin(f))
1182 os.unlink(repo.wjoin(f))
1183 except OSError, e:
1183 except OSError, e:
1184 if e.errno != errno.ENOENT:
1184 if e.errno != errno.ENOENT:
1185 raise
1185 raise
1186 try: os.removedirs(os.path.dirname(repo.wjoin(f)))
1186 try: os.removedirs(os.path.dirname(repo.wjoin(f)))
1187 except: pass
1187 except: pass
1188 repo.dirstate.forget(f)
1188 repo.dirstate.forget(f)
1189 for f in m:
1189 for f in m:
1190 getfile(f, mmap[f], mmap.flags(f))
1190 getfile(f, mmap[f], mmap.flags(f))
1191 for f in r:
1191 for f in r:
1192 getfile(f, mmap[f], mmap.flags(f))
1192 getfile(f, mmap[f], mmap.flags(f))
1193 for f in m + r:
1193 for f in m + r:
1194 repo.dirstate.normal(f)
1194 repo.dirstate.normal(f)
1195 repo.dirstate.setparents(qp, nullid)
1195 repo.dirstate.setparents(qp, nullid)
1196 for patch in reversed(self.applied[start:end]):
1196 for patch in reversed(self.applied[start:end]):
1197 self.ui.status(_("popping %s\n") % patch.name)
1197 self.ui.status(_("popping %s\n") % patch.name)
1198 del self.applied[start:end]
1198 del self.applied[start:end]
1199 self.strip(repo, rev, update=False, backup='strip')
1199 self.strip(repo, rev, update=False, backup='strip')
1200 if len(self.applied):
1200 if len(self.applied):
1201 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1201 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1202 else:
1202 else:
1203 self.ui.write(_("patch queue now empty\n"))
1203 self.ui.write(_("patch queue now empty\n"))
1204 finally:
1204 finally:
1205 wlock.release()
1205 wlock.release()
1206
1206
1207 def diff(self, repo, pats, opts):
1207 def diff(self, repo, pats, opts):
1208 top, patch = self.check_toppatch(repo)
1208 top, patch = self.check_toppatch(repo)
1209 if not top:
1209 if not top:
1210 self.ui.write(_("no patches applied\n"))
1210 self.ui.write(_("no patches applied\n"))
1211 return
1211 return
1212 qp = self.qparents(repo, top)
1212 qp = self.qparents(repo, top)
1213 if opts.get('reverse'):
1213 if opts.get('reverse'):
1214 node1, node2 = None, qp
1214 node1, node2 = None, qp
1215 else:
1215 else:
1216 node1, node2 = qp, None
1216 node1, node2 = qp, None
1217 diffopts = self.diffopts(opts, patch)
1217 diffopts = self.diffopts(opts, patch)
1218 self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
1218 self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
1219
1219
1220 def refresh(self, repo, pats=None, **opts):
1220 def refresh(self, repo, pats=None, **opts):
1221 if len(self.applied) == 0:
1221 if len(self.applied) == 0:
1222 self.ui.write(_("no patches applied\n"))
1222 self.ui.write(_("no patches applied\n"))
1223 return 1
1223 return 1
1224 msg = opts.get('msg', '').rstrip()
1224 msg = opts.get('msg', '').rstrip()
1225 newuser = opts.get('user')
1225 newuser = opts.get('user')
1226 newdate = opts.get('date')
1226 newdate = opts.get('date')
1227 if newdate:
1227 if newdate:
1228 newdate = '%d %d' % util.parsedate(newdate)
1228 newdate = '%d %d' % util.parsedate(newdate)
1229 wlock = repo.wlock()
1229 wlock = repo.wlock()
1230
1230
1231 try:
1231 try:
1232 self.check_toppatch(repo)
1232 self.check_toppatch(repo)
1233 (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
1233 (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
1234 top = bin(top)
1234 top = bin(top)
1235 if repo.changelog.heads(top) != [top]:
1235 if repo.changelog.heads(top) != [top]:
1236 raise util.Abort(_("cannot refresh a revision with children"))
1236 raise util.Abort(_("cannot refresh a revision with children"))
1237
1237
1238 cparents = repo.changelog.parents(top)
1238 cparents = repo.changelog.parents(top)
1239 patchparent = self.qparents(repo, top)
1239 patchparent = self.qparents(repo, top)
1240 ph = patchheader(self.join(patchfn), self.plainmode)
1240 ph = patchheader(self.join(patchfn), self.plainmode)
1241 diffopts = self.diffopts({'git': opts.get('git')}, patchfn)
1241 diffopts = self.diffopts({'git': opts.get('git')}, patchfn)
1242 if msg:
1242 if msg:
1243 ph.setmessage(msg)
1243 ph.setmessage(msg)
1244 if newuser:
1244 if newuser:
1245 ph.setuser(newuser)
1245 ph.setuser(newuser)
1246 if newdate:
1246 if newdate:
1247 ph.setdate(newdate)
1247 ph.setdate(newdate)
1248 ph.setparent(hex(patchparent))
1248 ph.setparent(hex(patchparent))
1249
1249
1250 # only commit new patch when write is complete
1250 # only commit new patch when write is complete
1251 patchf = self.opener(patchfn, 'w', atomictemp=True)
1251 patchf = self.opener(patchfn, 'w', atomictemp=True)
1252
1252
1253 comments = str(ph)
1253 comments = str(ph)
1254 if comments:
1254 if comments:
1255 patchf.write(comments)
1255 patchf.write(comments)
1256
1256
1257 # update the dirstate in place, strip off the qtip commit
1257 # update the dirstate in place, strip off the qtip commit
1258 # and then commit.
1258 # and then commit.
1259 #
1259 #
1260 # this should really read:
1260 # this should really read:
1261 # mm, dd, aa, aa2 = repo.status(tip, patchparent)[:4]
1261 # mm, dd, aa, aa2 = repo.status(tip, patchparent)[:4]
1262 # but we do it backwards to take advantage of manifest/chlog
1262 # but we do it backwards to take advantage of manifest/chlog
1263 # caching against the next repo.status call
1263 # caching against the next repo.status call
1264 mm, aa, dd, aa2 = repo.status(patchparent, top)[:4]
1264 mm, aa, dd, aa2 = repo.status(patchparent, top)[:4]
1265 changes = repo.changelog.read(top)
1265 changes = repo.changelog.read(top)
1266 man = repo.manifest.read(changes[0])
1266 man = repo.manifest.read(changes[0])
1267 aaa = aa[:]
1267 aaa = aa[:]
1268 matchfn = cmdutil.match(repo, pats, opts)
1268 matchfn = cmdutil.match(repo, pats, opts)
1269 # in short mode, we only diff the files included in the
1269 # in short mode, we only diff the files included in the
1270 # patch already plus specified files
1270 # patch already plus specified files
1271 if opts.get('short'):
1271 if opts.get('short'):
1272 # if amending a patch, we start with existing
1272 # if amending a patch, we start with existing
1273 # files plus specified files - unfiltered
1273 # files plus specified files - unfiltered
1274 match = cmdutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1274 match = cmdutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1275 # filter with inc/exl options
1275 # filter with inc/exl options
1276 matchfn = cmdutil.match(repo, opts=opts)
1276 matchfn = cmdutil.match(repo, opts=opts)
1277 else:
1277 else:
1278 match = cmdutil.matchall(repo)
1278 match = cmdutil.matchall(repo)
1279 m, a, r, d = repo.status(match=match)[:4]
1279 m, a, r, d = repo.status(match=match)[:4]
1280
1280
1281 # we might end up with files that were added between
1281 # we might end up with files that were added between
1282 # qtip and the dirstate parent, but then changed in the
1282 # qtip and the dirstate parent, but then changed in the
1283 # local dirstate. in this case, we want them to only
1283 # local dirstate. in this case, we want them to only
1284 # show up in the added section
1284 # show up in the added section
1285 for x in m:
1285 for x in m:
1286 if x not in aa:
1286 if x not in aa:
1287 mm.append(x)
1287 mm.append(x)
1288 # we might end up with files added by the local dirstate that
1288 # we might end up with files added by the local dirstate that
1289 # were deleted by the patch. In this case, they should only
1289 # were deleted by the patch. In this case, they should only
1290 # show up in the changed section.
1290 # show up in the changed section.
1291 for x in a:
1291 for x in a:
1292 if x in dd:
1292 if x in dd:
1293 del dd[dd.index(x)]
1293 del dd[dd.index(x)]
1294 mm.append(x)
1294 mm.append(x)
1295 else:
1295 else:
1296 aa.append(x)
1296 aa.append(x)
1297 # make sure any files deleted in the local dirstate
1297 # make sure any files deleted in the local dirstate
1298 # are not in the add or change column of the patch
1298 # are not in the add or change column of the patch
1299 forget = []
1299 forget = []
1300 for x in d + r:
1300 for x in d + r:
1301 if x in aa:
1301 if x in aa:
1302 del aa[aa.index(x)]
1302 del aa[aa.index(x)]
1303 forget.append(x)
1303 forget.append(x)
1304 continue
1304 continue
1305 elif x in mm:
1305 elif x in mm:
1306 del mm[mm.index(x)]
1306 del mm[mm.index(x)]
1307 dd.append(x)
1307 dd.append(x)
1308
1308
1309 m = list(set(mm))
1309 m = list(set(mm))
1310 r = list(set(dd))
1310 r = list(set(dd))
1311 a = list(set(aa))
1311 a = list(set(aa))
1312 c = [filter(matchfn, l) for l in (m, a, r)]
1312 c = [filter(matchfn, l) for l in (m, a, r)]
1313 match = cmdutil.matchfiles(repo, set(c[0] + c[1] + c[2]))
1313 match = cmdutil.matchfiles(repo, set(c[0] + c[1] + c[2]))
1314 chunks = patch.diff(repo, patchparent, match=match,
1314 chunks = patch.diff(repo, patchparent, match=match,
1315 changes=c, opts=diffopts)
1315 changes=c, opts=diffopts)
1316 for chunk in chunks:
1316 for chunk in chunks:
1317 patchf.write(chunk)
1317 patchf.write(chunk)
1318
1318
1319 try:
1319 try:
1320 if diffopts.git or diffopts.upgrade:
1320 if diffopts.git or diffopts.upgrade:
1321 copies = {}
1321 copies = {}
1322 for dst in a:
1322 for dst in a:
1323 src = repo.dirstate.copied(dst)
1323 src = repo.dirstate.copied(dst)
1324 # during qfold, the source file for copies may
1324 # during qfold, the source file for copies may
1325 # be removed. Treat this as a simple add.
1325 # be removed. Treat this as a simple add.
1326 if src is not None and src in repo.dirstate:
1326 if src is not None and src in repo.dirstate:
1327 copies.setdefault(src, []).append(dst)
1327 copies.setdefault(src, []).append(dst)
1328 repo.dirstate.add(dst)
1328 repo.dirstate.add(dst)
1329 # remember the copies between patchparent and qtip
1329 # remember the copies between patchparent and qtip
1330 for dst in aaa:
1330 for dst in aaa:
1331 f = repo.file(dst)
1331 f = repo.file(dst)
1332 src = f.renamed(man[dst])
1332 src = f.renamed(man[dst])
1333 if src:
1333 if src:
1334 copies.setdefault(src[0], []).extend(
1334 copies.setdefault(src[0], []).extend(
1335 copies.get(dst, []))
1335 copies.get(dst, []))
1336 if dst in a:
1336 if dst in a:
1337 copies[src[0]].append(dst)
1337 copies[src[0]].append(dst)
1338 # we can't copy a file created by the patch itself
1338 # we can't copy a file created by the patch itself
1339 if dst in copies:
1339 if dst in copies:
1340 del copies[dst]
1340 del copies[dst]
1341 for src, dsts in copies.iteritems():
1341 for src, dsts in copies.iteritems():
1342 for dst in dsts:
1342 for dst in dsts:
1343 repo.dirstate.copy(src, dst)
1343 repo.dirstate.copy(src, dst)
1344 else:
1344 else:
1345 for dst in a:
1345 for dst in a:
1346 repo.dirstate.add(dst)
1346 repo.dirstate.add(dst)
1347 # Drop useless copy information
1347 # Drop useless copy information
1348 for f in list(repo.dirstate.copies()):
1348 for f in list(repo.dirstate.copies()):
1349 repo.dirstate.copy(None, f)
1349 repo.dirstate.copy(None, f)
1350 for f in r:
1350 for f in r:
1351 repo.dirstate.remove(f)
1351 repo.dirstate.remove(f)
1352 # if the patch excludes a modified file, mark that
1352 # if the patch excludes a modified file, mark that
1353 # file with mtime=0 so status can see it.
1353 # file with mtime=0 so status can see it.
1354 mm = []
1354 mm = []
1355 for i in xrange(len(m)-1, -1, -1):
1355 for i in xrange(len(m)-1, -1, -1):
1356 if not matchfn(m[i]):
1356 if not matchfn(m[i]):
1357 mm.append(m[i])
1357 mm.append(m[i])
1358 del m[i]
1358 del m[i]
1359 for f in m:
1359 for f in m:
1360 repo.dirstate.normal(f)
1360 repo.dirstate.normal(f)
1361 for f in mm:
1361 for f in mm:
1362 repo.dirstate.normallookup(f)
1362 repo.dirstate.normallookup(f)
1363 for f in forget:
1363 for f in forget:
1364 repo.dirstate.forget(f)
1364 repo.dirstate.forget(f)
1365
1365
1366 if not msg:
1366 if not msg:
1367 if not ph.message:
1367 if not ph.message:
1368 message = "[mq]: %s\n" % patchfn
1368 message = "[mq]: %s\n" % patchfn
1369 else:
1369 else:
1370 message = "\n".join(ph.message)
1370 message = "\n".join(ph.message)
1371 else:
1371 else:
1372 message = msg
1372 message = msg
1373
1373
1374 user = ph.user or changes[1]
1374 user = ph.user or changes[1]
1375
1375
1376 # assumes strip can roll itself back if interrupted
1376 # assumes strip can roll itself back if interrupted
1377 repo.dirstate.setparents(*cparents)
1377 repo.dirstate.setparents(*cparents)
1378 self.applied.pop()
1378 self.applied.pop()
1379 self.applied_dirty = 1
1379 self.applied_dirty = 1
1380 self.strip(repo, top, update=False,
1380 self.strip(repo, top, update=False,
1381 backup='strip')
1381 backup='strip')
1382 except:
1382 except:
1383 repo.dirstate.invalidate()
1383 repo.dirstate.invalidate()
1384 raise
1384 raise
1385
1385
1386 try:
1386 try:
1387 # might be nice to attempt to roll back strip after this
1387 # might be nice to attempt to roll back strip after this
1388 patchf.rename()
1388 patchf.rename()
1389 n = repo.commit(message, user, ph.date, match=match,
1389 n = repo.commit(message, user, ph.date, match=match,
1390 force=True)
1390 force=True)
1391 self.applied.append(statusentry(hex(n), patchfn))
1391 self.applied.append(statusentry(hex(n), patchfn))
1392 except:
1392 except:
1393 ctx = repo[cparents[0]]
1393 ctx = repo[cparents[0]]
1394 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1394 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1395 self.save_dirty()
1395 self.save_dirty()
1396 self.ui.warn(_('refresh interrupted while patch was popped! '
1396 self.ui.warn(_('refresh interrupted while patch was popped! '
1397 '(revert --all, qpush to recover)\n'))
1397 '(revert --all, qpush to recover)\n'))
1398 raise
1398 raise
1399 finally:
1399 finally:
1400 wlock.release()
1400 wlock.release()
1401 self.removeundo(repo)
1401 self.removeundo(repo)
1402
1402
1403 def init(self, repo, create=False):
1403 def init(self, repo, create=False):
1404 if not create and os.path.isdir(self.path):
1404 if not create and os.path.isdir(self.path):
1405 raise util.Abort(_("patch queue directory already exists"))
1405 raise util.Abort(_("patch queue directory already exists"))
1406 try:
1406 try:
1407 os.mkdir(self.path)
1407 os.mkdir(self.path)
1408 except OSError, inst:
1408 except OSError, inst:
1409 if inst.errno != errno.EEXIST or not create:
1409 if inst.errno != errno.EEXIST or not create:
1410 raise
1410 raise
1411 if create:
1411 if create:
1412 return self.qrepo(create=True)
1412 return self.qrepo(create=True)
1413
1413
1414 def unapplied(self, repo, patch=None):
1414 def unapplied(self, repo, patch=None):
1415 if patch and patch not in self.series:
1415 if patch and patch not in self.series:
1416 raise util.Abort(_("patch %s is not in series file") % patch)
1416 raise util.Abort(_("patch %s is not in series file") % patch)
1417 if not patch:
1417 if not patch:
1418 start = self.series_end()
1418 start = self.series_end()
1419 else:
1419 else:
1420 start = self.series.index(patch) + 1
1420 start = self.series.index(patch) + 1
1421 unapplied = []
1421 unapplied = []
1422 for i in xrange(start, len(self.series)):
1422 for i in xrange(start, len(self.series)):
1423 pushable, reason = self.pushable(i)
1423 pushable, reason = self.pushable(i)
1424 if pushable:
1424 if pushable:
1425 unapplied.append((i, self.series[i]))
1425 unapplied.append((i, self.series[i]))
1426 self.explain_pushable(i)
1426 self.explain_pushable(i)
1427 return unapplied
1427 return unapplied
1428
1428
1429 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1429 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1430 summary=False):
1430 summary=False):
1431 def displayname(pfx, patchname):
1431 def displayname(pfx, patchname):
1432 if summary:
1432 if summary:
1433 ph = patchheader(self.join(patchname), self.plainmode)
1433 ph = patchheader(self.join(patchname), self.plainmode)
1434 msg = ph.message and ph.message[0] or ''
1434 msg = ph.message and ph.message[0] or ''
1435 if self.ui.interactive():
1435 if self.ui.interactive():
1436 width = util.termwidth() - len(pfx) - len(patchname) - 2
1436 width = util.termwidth() - len(pfx) - len(patchname) - 2
1437 if width > 0:
1437 if width > 0:
1438 msg = util.ellipsis(msg, width)
1438 msg = util.ellipsis(msg, width)
1439 else:
1439 else:
1440 msg = ''
1440 msg = ''
1441 msg = "%s%s: %s" % (pfx, patchname, msg)
1441 msg = "%s%s: %s" % (pfx, patchname, msg)
1442 else:
1442 else:
1443 msg = pfx + patchname
1443 msg = pfx + patchname
1444 self.ui.write(msg + '\n')
1444 self.ui.write(msg + '\n')
1445
1445
1446 applied = set([p.name for p in self.applied])
1446 applied = set([p.name for p in self.applied])
1447 if length is None:
1447 if length is None:
1448 length = len(self.series) - start
1448 length = len(self.series) - start
1449 if not missing:
1449 if not missing:
1450 if self.ui.verbose:
1450 if self.ui.verbose:
1451 idxwidth = len(str(start + length - 1))
1451 idxwidth = len(str(start + length - 1))
1452 for i in xrange(start, start + length):
1452 for i in xrange(start, start + length):
1453 patch = self.series[i]
1453 patch = self.series[i]
1454 if patch in applied:
1454 if patch in applied:
1455 stat = 'A'
1455 stat = 'A'
1456 elif self.pushable(i)[0]:
1456 elif self.pushable(i)[0]:
1457 stat = 'U'
1457 stat = 'U'
1458 else:
1458 else:
1459 stat = 'G'
1459 stat = 'G'
1460 pfx = ''
1460 pfx = ''
1461 if self.ui.verbose:
1461 if self.ui.verbose:
1462 pfx = '%*d %s ' % (idxwidth, i, stat)
1462 pfx = '%*d %s ' % (idxwidth, i, stat)
1463 elif status and status != stat:
1463 elif status and status != stat:
1464 continue
1464 continue
1465 displayname(pfx, patch)
1465 displayname(pfx, patch)
1466 else:
1466 else:
1467 msng_list = []
1467 msng_list = []
1468 for root, dirs, files in os.walk(self.path):
1468 for root, dirs, files in os.walk(self.path):
1469 d = root[len(self.path) + 1:]
1469 d = root[len(self.path) + 1:]
1470 for f in files:
1470 for f in files:
1471 fl = os.path.join(d, f)
1471 fl = os.path.join(d, f)
1472 if (fl not in self.series and
1472 if (fl not in self.series and
1473 fl not in (self.status_path, self.series_path,
1473 fl not in (self.status_path, self.series_path,
1474 self.guards_path)
1474 self.guards_path)
1475 and not fl.startswith('.')):
1475 and not fl.startswith('.')):
1476 msng_list.append(fl)
1476 msng_list.append(fl)
1477 for x in sorted(msng_list):
1477 for x in sorted(msng_list):
1478 pfx = self.ui.verbose and ('D ') or ''
1478 pfx = self.ui.verbose and ('D ') or ''
1479 displayname(pfx, x)
1479 displayname(pfx, x)
1480
1480
1481 def issaveline(self, l):
1481 def issaveline(self, l):
1482 if l.name == '.hg.patches.save.line':
1482 if l.name == '.hg.patches.save.line':
1483 return True
1483 return True
1484
1484
1485 def qrepo(self, create=False):
1485 def qrepo(self, create=False):
1486 if create or os.path.isdir(self.join(".hg")):
1486 if create or os.path.isdir(self.join(".hg")):
1487 return hg.repository(self.ui, path=self.path, create=create)
1487 return hg.repository(self.ui, path=self.path, create=create)
1488
1488
1489 def restore(self, repo, rev, delete=None, qupdate=None):
1489 def restore(self, repo, rev, delete=None, qupdate=None):
1490 c = repo.changelog.read(rev)
1490 c = repo.changelog.read(rev)
1491 desc = c[4].strip()
1491 desc = c[4].strip()
1492 lines = desc.splitlines()
1492 lines = desc.splitlines()
1493 i = 0
1493 i = 0
1494 datastart = None
1494 datastart = None
1495 series = []
1495 series = []
1496 applied = []
1496 applied = []
1497 qpp = None
1497 qpp = None
1498 for i, line in enumerate(lines):
1498 for i, line in enumerate(lines):
1499 if line == 'Patch Data:':
1499 if line == 'Patch Data:':
1500 datastart = i + 1
1500 datastart = i + 1
1501 elif line.startswith('Dirstate:'):
1501 elif line.startswith('Dirstate:'):
1502 l = line.rstrip()
1502 l = line.rstrip()
1503 l = l[10:].split(' ')
1503 l = l[10:].split(' ')
1504 qpp = [bin(x) for x in l]
1504 qpp = [bin(x) for x in l]
1505 elif datastart != None:
1505 elif datastart != None:
1506 l = line.rstrip()
1506 l = line.rstrip()
1507 se = statusentry(l)
1507 se = statusentry(l)
1508 file_ = se.name
1508 file_ = se.name
1509 if se.rev:
1509 if se.rev:
1510 applied.append(se)
1510 applied.append(se)
1511 else:
1511 else:
1512 series.append(file_)
1512 series.append(file_)
1513 if datastart is None:
1513 if datastart is None:
1514 self.ui.warn(_("No saved patch data found\n"))
1514 self.ui.warn(_("No saved patch data found\n"))
1515 return 1
1515 return 1
1516 self.ui.warn(_("restoring status: %s\n") % lines[0])
1516 self.ui.warn(_("restoring status: %s\n") % lines[0])
1517 self.full_series = series
1517 self.full_series = series
1518 self.applied = applied
1518 self.applied = applied
1519 self.parse_series()
1519 self.parse_series()
1520 self.series_dirty = 1
1520 self.series_dirty = 1
1521 self.applied_dirty = 1
1521 self.applied_dirty = 1
1522 heads = repo.changelog.heads()
1522 heads = repo.changelog.heads()
1523 if delete:
1523 if delete:
1524 if rev not in heads:
1524 if rev not in heads:
1525 self.ui.warn(_("save entry has children, leaving it alone\n"))
1525 self.ui.warn(_("save entry has children, leaving it alone\n"))
1526 else:
1526 else:
1527 self.ui.warn(_("removing save entry %s\n") % short(rev))
1527 self.ui.warn(_("removing save entry %s\n") % short(rev))
1528 pp = repo.dirstate.parents()
1528 pp = repo.dirstate.parents()
1529 if rev in pp:
1529 if rev in pp:
1530 update = True
1530 update = True
1531 else:
1531 else:
1532 update = False
1532 update = False
1533 self.strip(repo, rev, update=update, backup='strip')
1533 self.strip(repo, rev, update=update, backup='strip')
1534 if qpp:
1534 if qpp:
1535 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1535 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1536 (short(qpp[0]), short(qpp[1])))
1536 (short(qpp[0]), short(qpp[1])))
1537 if qupdate:
1537 if qupdate:
1538 self.ui.status(_("queue directory updating\n"))
1538 self.ui.status(_("queue directory updating\n"))
1539 r = self.qrepo()
1539 r = self.qrepo()
1540 if not r:
1540 if not r:
1541 self.ui.warn(_("Unable to load queue repository\n"))
1541 self.ui.warn(_("Unable to load queue repository\n"))
1542 return 1
1542 return 1
1543 hg.clean(r, qpp[0])
1543 hg.clean(r, qpp[0])
1544
1544
1545 def save(self, repo, msg=None):
1545 def save(self, repo, msg=None):
1546 if len(self.applied) == 0:
1546 if len(self.applied) == 0:
1547 self.ui.warn(_("save: no patches applied, exiting\n"))
1547 self.ui.warn(_("save: no patches applied, exiting\n"))
1548 return 1
1548 return 1
1549 if self.issaveline(self.applied[-1]):
1549 if self.issaveline(self.applied[-1]):
1550 self.ui.warn(_("status is already saved\n"))
1550 self.ui.warn(_("status is already saved\n"))
1551 return 1
1551 return 1
1552
1552
1553 ar = [':' + x for x in self.full_series]
1553 ar = [':' + x for x in self.full_series]
1554 if not msg:
1554 if not msg:
1555 msg = _("hg patches saved state")
1555 msg = _("hg patches saved state")
1556 else:
1556 else:
1557 msg = "hg patches: " + msg.rstrip('\r\n')
1557 msg = "hg patches: " + msg.rstrip('\r\n')
1558 r = self.qrepo()
1558 r = self.qrepo()
1559 if r:
1559 if r:
1560 pp = r.dirstate.parents()
1560 pp = r.dirstate.parents()
1561 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
1561 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
1562 msg += "\n\nPatch Data:\n"
1562 msg += "\n\nPatch Data:\n"
1563 text = msg + "\n".join([str(x) for x in self.applied]) + '\n' + (ar and
1563 text = msg + "\n".join([str(x) for x in self.applied]) + '\n' + (ar and
1564 "\n".join(ar) + '\n' or "")
1564 "\n".join(ar) + '\n' or "")
1565 n = repo.commit(text, force=True)
1565 n = repo.commit(text, force=True)
1566 if not n:
1566 if not n:
1567 self.ui.warn(_("repo commit failed\n"))
1567 self.ui.warn(_("repo commit failed\n"))
1568 return 1
1568 return 1
1569 self.applied.append(statusentry(hex(n),'.hg.patches.save.line'))
1569 self.applied.append(statusentry(hex(n),'.hg.patches.save.line'))
1570 self.applied_dirty = 1
1570 self.applied_dirty = 1
1571 self.removeundo(repo)
1571 self.removeundo(repo)
1572
1572
1573 def full_series_end(self):
1573 def full_series_end(self):
1574 if len(self.applied) > 0:
1574 if len(self.applied) > 0:
1575 p = self.applied[-1].name
1575 p = self.applied[-1].name
1576 end = self.find_series(p)
1576 end = self.find_series(p)
1577 if end is None:
1577 if end is None:
1578 return len(self.full_series)
1578 return len(self.full_series)
1579 return end + 1
1579 return end + 1
1580 return 0
1580 return 0
1581
1581
1582 def series_end(self, all_patches=False):
1582 def series_end(self, all_patches=False):
1583 """If all_patches is False, return the index of the next pushable patch
1583 """If all_patches is False, return the index of the next pushable patch
1584 in the series, or the series length. If all_patches is True, return the
1584 in the series, or the series length. If all_patches is True, return the
1585 index of the first patch past the last applied one.
1585 index of the first patch past the last applied one.
1586 """
1586 """
1587 end = 0
1587 end = 0
1588 def next(start):
1588 def next(start):
1589 if all_patches:
1589 if all_patches:
1590 return start
1590 return start
1591 i = start
1591 i = start
1592 while i < len(self.series):
1592 while i < len(self.series):
1593 p, reason = self.pushable(i)
1593 p, reason = self.pushable(i)
1594 if p:
1594 if p:
1595 break
1595 break
1596 self.explain_pushable(i)
1596 self.explain_pushable(i)
1597 i += 1
1597 i += 1
1598 return i
1598 return i
1599 if len(self.applied) > 0:
1599 if len(self.applied) > 0:
1600 p = self.applied[-1].name
1600 p = self.applied[-1].name
1601 try:
1601 try:
1602 end = self.series.index(p)
1602 end = self.series.index(p)
1603 except ValueError:
1603 except ValueError:
1604 return 0
1604 return 0
1605 return next(end + 1)
1605 return next(end + 1)
1606 return next(end)
1606 return next(end)
1607
1607
1608 def appliedname(self, index):
1608 def appliedname(self, index):
1609 pname = self.applied[index].name
1609 pname = self.applied[index].name
1610 if not self.ui.verbose:
1610 if not self.ui.verbose:
1611 p = pname
1611 p = pname
1612 else:
1612 else:
1613 p = str(self.series.index(pname)) + " " + pname
1613 p = str(self.series.index(pname)) + " " + pname
1614 return p
1614 return p
1615
1615
1616 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1616 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1617 force=None, git=False):
1617 force=None, git=False):
1618 def checkseries(patchname):
1618 def checkseries(patchname):
1619 if patchname in self.series:
1619 if patchname in self.series:
1620 raise util.Abort(_('patch %s is already in the series file')
1620 raise util.Abort(_('patch %s is already in the series file')
1621 % patchname)
1621 % patchname)
1622 def checkfile(patchname):
1622 def checkfile(patchname):
1623 if not force and os.path.exists(self.join(patchname)):
1623 if not force and os.path.exists(self.join(patchname)):
1624 raise util.Abort(_('patch "%s" already exists')
1624 raise util.Abort(_('patch "%s" already exists')
1625 % patchname)
1625 % patchname)
1626
1626
1627 if rev:
1627 if rev:
1628 if files:
1628 if files:
1629 raise util.Abort(_('option "-r" not valid when importing '
1629 raise util.Abort(_('option "-r" not valid when importing '
1630 'files'))
1630 'files'))
1631 rev = cmdutil.revrange(repo, rev)
1631 rev = cmdutil.revrange(repo, rev)
1632 rev.sort(reverse=True)
1632 rev.sort(reverse=True)
1633 if (len(files) > 1 or len(rev) > 1) and patchname:
1633 if (len(files) > 1 or len(rev) > 1) and patchname:
1634 raise util.Abort(_('option "-n" not valid when importing multiple '
1634 raise util.Abort(_('option "-n" not valid when importing multiple '
1635 'patches'))
1635 'patches'))
1636 i = 0
1636 i = 0
1637 added = []
1637 added = []
1638 if rev:
1638 if rev:
1639 # If mq patches are applied, we can only import revisions
1639 # If mq patches are applied, we can only import revisions
1640 # that form a linear path to qbase.
1640 # that form a linear path to qbase.
1641 # Otherwise, they should form a linear path to a head.
1641 # Otherwise, they should form a linear path to a head.
1642 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1642 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1643 if len(heads) > 1:
1643 if len(heads) > 1:
1644 raise util.Abort(_('revision %d is the root of more than one '
1644 raise util.Abort(_('revision %d is the root of more than one '
1645 'branch') % rev[-1])
1645 'branch') % rev[-1])
1646 if self.applied:
1646 if self.applied:
1647 base = hex(repo.changelog.node(rev[0]))
1647 base = hex(repo.changelog.node(rev[0]))
1648 if base in [n.rev for n in self.applied]:
1648 if base in [n.rev for n in self.applied]:
1649 raise util.Abort(_('revision %d is already managed')
1649 raise util.Abort(_('revision %d is already managed')
1650 % rev[0])
1650 % rev[0])
1651 if heads != [bin(self.applied[-1].rev)]:
1651 if heads != [bin(self.applied[-1].rev)]:
1652 raise util.Abort(_('revision %d is not the parent of '
1652 raise util.Abort(_('revision %d is not the parent of '
1653 'the queue') % rev[0])
1653 'the queue') % rev[0])
1654 base = repo.changelog.rev(bin(self.applied[0].rev))
1654 base = repo.changelog.rev(bin(self.applied[0].rev))
1655 lastparent = repo.changelog.parentrevs(base)[0]
1655 lastparent = repo.changelog.parentrevs(base)[0]
1656 else:
1656 else:
1657 if heads != [repo.changelog.node(rev[0])]:
1657 if heads != [repo.changelog.node(rev[0])]:
1658 raise util.Abort(_('revision %d has unmanaged children')
1658 raise util.Abort(_('revision %d has unmanaged children')
1659 % rev[0])
1659 % rev[0])
1660 lastparent = None
1660 lastparent = None
1661
1661
1662 diffopts = self.diffopts({'git': git})
1662 diffopts = self.diffopts({'git': git})
1663 for r in rev:
1663 for r in rev:
1664 p1, p2 = repo.changelog.parentrevs(r)
1664 p1, p2 = repo.changelog.parentrevs(r)
1665 n = repo.changelog.node(r)
1665 n = repo.changelog.node(r)
1666 if p2 != nullrev:
1666 if p2 != nullrev:
1667 raise util.Abort(_('cannot import merge revision %d') % r)
1667 raise util.Abort(_('cannot import merge revision %d') % r)
1668 if lastparent and lastparent != r:
1668 if lastparent and lastparent != r:
1669 raise util.Abort(_('revision %d is not the parent of %d')
1669 raise util.Abort(_('revision %d is not the parent of %d')
1670 % (r, lastparent))
1670 % (r, lastparent))
1671 lastparent = p1
1671 lastparent = p1
1672
1672
1673 if not patchname:
1673 if not patchname:
1674 patchname = normname('%d.diff' % r)
1674 patchname = normname('%d.diff' % r)
1675 self.check_reserved_name(patchname)
1675 self.check_reserved_name(patchname)
1676 checkseries(patchname)
1676 checkseries(patchname)
1677 checkfile(patchname)
1677 checkfile(patchname)
1678 self.full_series.insert(0, patchname)
1678 self.full_series.insert(0, patchname)
1679
1679
1680 patchf = self.opener(patchname, "w")
1680 patchf = self.opener(patchname, "w")
1681 patch.export(repo, [n], fp=patchf, opts=diffopts)
1681 patch.export(repo, [n], fp=patchf, opts=diffopts)
1682 patchf.close()
1682 patchf.close()
1683
1683
1684 se = statusentry(hex(n), patchname)
1684 se = statusentry(hex(n), patchname)
1685 self.applied.insert(0, se)
1685 self.applied.insert(0, se)
1686
1686
1687 added.append(patchname)
1687 added.append(patchname)
1688 patchname = None
1688 patchname = None
1689 self.parse_series()
1689 self.parse_series()
1690 self.applied_dirty = 1
1690 self.applied_dirty = 1
1691
1691
1692 for filename in files:
1692 for filename in files:
1693 if existing:
1693 if existing:
1694 if filename == '-':
1694 if filename == '-':
1695 raise util.Abort(_('-e is incompatible with import from -'))
1695 raise util.Abort(_('-e is incompatible with import from -'))
1696 if not patchname:
1696 if not patchname:
1697 patchname = normname(filename)
1697 patchname = normname(filename)
1698 self.check_reserved_name(patchname)
1698 self.check_reserved_name(patchname)
1699 if not os.path.isfile(self.join(patchname)):
1699 if not os.path.isfile(self.join(patchname)):
1700 raise util.Abort(_("patch %s does not exist") % patchname)
1700 raise util.Abort(_("patch %s does not exist") % patchname)
1701 else:
1701 else:
1702 try:
1702 try:
1703 if filename == '-':
1703 if filename == '-':
1704 if not patchname:
1704 if not patchname:
1705 raise util.Abort(
1705 raise util.Abort(
1706 _('need --name to import a patch from -'))
1706 _('need --name to import a patch from -'))
1707 text = sys.stdin.read()
1707 text = sys.stdin.read()
1708 else:
1708 else:
1709 text = url.open(self.ui, filename).read()
1709 text = url.open(self.ui, filename).read()
1710 except (OSError, IOError):
1710 except (OSError, IOError):
1711 raise util.Abort(_("unable to read %s") % filename)
1711 raise util.Abort(_("unable to read %s") % filename)
1712 if not patchname:
1712 if not patchname:
1713 patchname = normname(os.path.basename(filename))
1713 patchname = normname(os.path.basename(filename))
1714 self.check_reserved_name(patchname)
1714 self.check_reserved_name(patchname)
1715 checkfile(patchname)
1715 checkfile(patchname)
1716 patchf = self.opener(patchname, "w")
1716 patchf = self.opener(patchname, "w")
1717 patchf.write(text)
1717 patchf.write(text)
1718 if not force:
1718 if not force:
1719 checkseries(patchname)
1719 checkseries(patchname)
1720 if patchname not in self.series:
1720 if patchname not in self.series:
1721 index = self.full_series_end() + i
1721 index = self.full_series_end() + i
1722 self.full_series[index:index] = [patchname]
1722 self.full_series[index:index] = [patchname]
1723 self.parse_series()
1723 self.parse_series()
1724 self.ui.warn(_("adding %s to series file\n") % patchname)
1724 self.ui.warn(_("adding %s to series file\n") % patchname)
1725 i += 1
1725 i += 1
1726 added.append(patchname)
1726 added.append(patchname)
1727 patchname = None
1727 patchname = None
1728 self.series_dirty = 1
1728 self.series_dirty = 1
1729 qrepo = self.qrepo()
1729 qrepo = self.qrepo()
1730 if qrepo:
1730 if qrepo:
1731 qrepo.add(added)
1731 qrepo.add(added)
1732
1732
1733 def delete(ui, repo, *patches, **opts):
1733 def delete(ui, repo, *patches, **opts):
1734 """remove patches from queue
1734 """remove patches from queue
1735
1735
1736 The patches must not be applied, and at least one patch is required. With
1736 The patches must not be applied, and at least one patch is required. With
1737 -k/--keep, the patch files are preserved in the patch directory.
1737 -k/--keep, the patch files are preserved in the patch directory.
1738
1738
1739 To stop managing a patch and move it into permanent history,
1739 To stop managing a patch and move it into permanent history,
1740 use the qfinish command."""
1740 use the qfinish command."""
1741 q = repo.mq
1741 q = repo.mq
1742 q.delete(repo, patches, opts)
1742 q.delete(repo, patches, opts)
1743 q.save_dirty()
1743 q.save_dirty()
1744 return 0
1744 return 0
1745
1745
1746 def applied(ui, repo, patch=None, **opts):
1746 def applied(ui, repo, patch=None, **opts):
1747 """print the patches already applied"""
1747 """print the patches already applied"""
1748
1748
1749 q = repo.mq
1749 q = repo.mq
1750 l = len(q.applied)
1750 l = len(q.applied)
1751
1751
1752 if patch:
1752 if patch:
1753 if patch not in q.series:
1753 if patch not in q.series:
1754 raise util.Abort(_("patch %s is not in series file") % patch)
1754 raise util.Abort(_("patch %s is not in series file") % patch)
1755 end = q.series.index(patch) + 1
1755 end = q.series.index(patch) + 1
1756 else:
1756 else:
1757 end = q.series_end(True)
1757 end = q.series_end(True)
1758
1758
1759 if opts.get('last') and not end:
1759 if opts.get('last') and not end:
1760 ui.write(_("no patches applied\n"))
1760 ui.write(_("no patches applied\n"))
1761 return 1
1761 return 1
1762 elif opts.get('last') and end == 1:
1762 elif opts.get('last') and end == 1:
1763 ui.write(_("only one patch applied\n"))
1763 ui.write(_("only one patch applied\n"))
1764 return 1
1764 return 1
1765 elif opts.get('last'):
1765 elif opts.get('last'):
1766 start = end - 2
1766 start = end - 2
1767 end = 1
1767 end = 1
1768 else:
1768 else:
1769 start = 0
1769 start = 0
1770
1770
1771 return q.qseries(repo, length=end, start=start, status='A',
1771 return q.qseries(repo, length=end, start=start, status='A',
1772 summary=opts.get('summary'))
1772 summary=opts.get('summary'))
1773
1773
1774 def unapplied(ui, repo, patch=None, **opts):
1774 def unapplied(ui, repo, patch=None, **opts):
1775 """print the patches not yet applied"""
1775 """print the patches not yet applied"""
1776
1776
1777 q = repo.mq
1777 q = repo.mq
1778 if patch:
1778 if patch:
1779 if patch not in q.series:
1779 if patch not in q.series:
1780 raise util.Abort(_("patch %s is not in series file") % patch)
1780 raise util.Abort(_("patch %s is not in series file") % patch)
1781 start = q.series.index(patch) + 1
1781 start = q.series.index(patch) + 1
1782 else:
1782 else:
1783 start = q.series_end(True)
1783 start = q.series_end(True)
1784
1784
1785 if start == len(q.series) and opts.get('first'):
1785 if start == len(q.series) and opts.get('first'):
1786 ui.write(_("all patches applied\n"))
1786 ui.write(_("all patches applied\n"))
1787 return 1
1787 return 1
1788
1788
1789 length = opts.get('first') and 1 or None
1789 length = opts.get('first') and 1 or None
1790 return q.qseries(repo, start=start, length=length, status='U',
1790 return q.qseries(repo, start=start, length=length, status='U',
1791 summary=opts.get('summary'))
1791 summary=opts.get('summary'))
1792
1792
1793 def qimport(ui, repo, *filename, **opts):
1793 def qimport(ui, repo, *filename, **opts):
1794 """import a patch
1794 """import a patch
1795
1795
1796 The patch is inserted into the series after the last applied
1796 The patch is inserted into the series after the last applied
1797 patch. If no patches have been applied, qimport prepends the patch
1797 patch. If no patches have been applied, qimport prepends the patch
1798 to the series.
1798 to the series.
1799
1799
1800 The patch will have the same name as its source file unless you
1800 The patch will have the same name as its source file unless you
1801 give it a new one with -n/--name.
1801 give it a new one with -n/--name.
1802
1802
1803 You can register an existing patch inside the patch directory with
1803 You can register an existing patch inside the patch directory with
1804 the -e/--existing flag.
1804 the -e/--existing flag.
1805
1805
1806 With -f/--force, an existing patch of the same name will be
1806 With -f/--force, an existing patch of the same name will be
1807 overwritten.
1807 overwritten.
1808
1808
1809 An existing changeset may be placed under mq control with -r/--rev
1809 An existing changeset may be placed under mq control with -r/--rev
1810 (e.g. qimport --rev tip -n patch will place tip under mq control).
1810 (e.g. qimport --rev tip -n patch will place tip under mq control).
1811 With -g/--git, patches imported with --rev will use the git diff
1811 With -g/--git, patches imported with --rev will use the git diff
1812 format. See the diffs help topic for information on why this is
1812 format. See the diffs help topic for information on why this is
1813 important for preserving rename/copy information and permission
1813 important for preserving rename/copy information and permission
1814 changes.
1814 changes.
1815
1815
1816 To import a patch from standard input, pass - as the patch file.
1816 To import a patch from standard input, pass - as the patch file.
1817 When importing from standard input, a patch name must be specified
1817 When importing from standard input, a patch name must be specified
1818 using the --name flag.
1818 using the --name flag.
1819 """
1819 """
1820 q = repo.mq
1820 q = repo.mq
1821 q.qimport(repo, filename, patchname=opts['name'],
1821 q.qimport(repo, filename, patchname=opts['name'],
1822 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1822 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1823 git=opts['git'])
1823 git=opts['git'])
1824 q.save_dirty()
1824 q.save_dirty()
1825
1825
1826 if opts.get('push') and not opts.get('rev'):
1826 if opts.get('push') and not opts.get('rev'):
1827 return q.push(repo, None)
1827 return q.push(repo, None)
1828 return 0
1828 return 0
1829
1829
1830 def qinit(ui, repo, create):
1830 def qinit(ui, repo, create):
1831 """initialize a new queue repository
1831 """initialize a new queue repository
1832
1832
1833 This command also creates a series file for ordering patches, and
1833 This command also creates a series file for ordering patches, and
1834 an mq-specific .hgignore file in the queue repository, to exclude
1834 an mq-specific .hgignore file in the queue repository, to exclude
1835 the status and guards files (these contain mostly transient state)."""
1835 the status and guards files (these contain mostly transient state)."""
1836 q = repo.mq
1836 q = repo.mq
1837 r = q.init(repo, create)
1837 r = q.init(repo, create)
1838 q.save_dirty()
1838 q.save_dirty()
1839 if r:
1839 if r:
1840 if not os.path.exists(r.wjoin('.hgignore')):
1840 if not os.path.exists(r.wjoin('.hgignore')):
1841 fp = r.wopener('.hgignore', 'w')
1841 fp = r.wopener('.hgignore', 'w')
1842 fp.write('^\\.hg\n')
1842 fp.write('^\\.hg\n')
1843 fp.write('^\\.mq\n')
1843 fp.write('^\\.mq\n')
1844 fp.write('syntax: glob\n')
1844 fp.write('syntax: glob\n')
1845 fp.write('status\n')
1845 fp.write('status\n')
1846 fp.write('guards\n')
1846 fp.write('guards\n')
1847 fp.close()
1847 fp.close()
1848 if not os.path.exists(r.wjoin('series')):
1848 if not os.path.exists(r.wjoin('series')):
1849 r.wopener('series', 'w').close()
1849 r.wopener('series', 'w').close()
1850 r.add(['.hgignore', 'series'])
1850 r.add(['.hgignore', 'series'])
1851 commands.add(ui, r)
1851 commands.add(ui, r)
1852 return 0
1852 return 0
1853
1853
1854 def init(ui, repo, **opts):
1854 def init(ui, repo, **opts):
1855 """init a new queue repository (DEPRECATED)
1855 """init a new queue repository (DEPRECATED)
1856
1856
1857 The queue repository is unversioned by default. If
1857 The queue repository is unversioned by default. If
1858 -c/--create-repo is specified, qinit will create a separate nested
1858 -c/--create-repo is specified, qinit will create a separate nested
1859 repository for patches (qinit -c may also be run later to convert
1859 repository for patches (qinit -c may also be run later to convert
1860 an unversioned patch repository into a versioned one). You can use
1860 an unversioned patch repository into a versioned one). You can use
1861 qcommit to commit changes to this queue repository.
1861 qcommit to commit changes to this queue repository.
1862
1862
1863 This command is deprecated. Without -c, it's implied by other relevant
1863 This command is deprecated. Without -c, it's implied by other relevant
1864 commands. With -c, use hg init --mq instead."""
1864 commands. With -c, use hg init --mq instead."""
1865 return qinit(ui, repo, create=opts['create_repo'])
1865 return qinit(ui, repo, create=opts['create_repo'])
1866
1866
1867 def clone(ui, source, dest=None, **opts):
1867 def clone(ui, source, dest=None, **opts):
1868 '''clone main and patch repository at same time
1868 '''clone main and patch repository at same time
1869
1869
1870 If source is local, destination will have no patches applied. If
1870 If source is local, destination will have no patches applied. If
1871 source is remote, this command can not check if patches are
1871 source is remote, this command can not check if patches are
1872 applied in source, so cannot guarantee that patches are not
1872 applied in source, so cannot guarantee that patches are not
1873 applied in destination. If you clone remote repository, be sure
1873 applied in destination. If you clone remote repository, be sure
1874 before that it has no patches applied.
1874 before that it has no patches applied.
1875
1875
1876 Source patch repository is looked for in <src>/.hg/patches by
1876 Source patch repository is looked for in <src>/.hg/patches by
1877 default. Use -p <url> to change.
1877 default. Use -p <url> to change.
1878
1878
1879 The patch directory must be a nested Mercurial repository, as
1879 The patch directory must be a nested Mercurial repository, as
1880 would be created by init --mq.
1880 would be created by init --mq.
1881 '''
1881 '''
1882 def patchdir(repo):
1882 def patchdir(repo):
1883 url = repo.url()
1883 url = repo.url()
1884 if url.endswith('/'):
1884 if url.endswith('/'):
1885 url = url[:-1]
1885 url = url[:-1]
1886 return url + '/.hg/patches'
1886 return url + '/.hg/patches'
1887 if dest is None:
1887 if dest is None:
1888 dest = hg.defaultdest(source)
1888 dest = hg.defaultdest(source)
1889 sr = hg.repository(cmdutil.remoteui(ui, opts), ui.expandpath(source))
1889 sr = hg.repository(cmdutil.remoteui(ui, opts), ui.expandpath(source))
1890 if opts['patches']:
1890 if opts['patches']:
1891 patchespath = ui.expandpath(opts['patches'])
1891 patchespath = ui.expandpath(opts['patches'])
1892 else:
1892 else:
1893 patchespath = patchdir(sr)
1893 patchespath = patchdir(sr)
1894 try:
1894 try:
1895 hg.repository(ui, patchespath)
1895 hg.repository(ui, patchespath)
1896 except error.RepoError:
1896 except error.RepoError:
1897 raise util.Abort(_('versioned patch repository not found'
1897 raise util.Abort(_('versioned patch repository not found'
1898 ' (see init --mq)'))
1898 ' (see init --mq)'))
1899 qbase, destrev = None, None
1899 qbase, destrev = None, None
1900 if sr.local():
1900 if sr.local():
1901 if sr.mq.applied:
1901 if sr.mq.applied:
1902 qbase = bin(sr.mq.applied[0].rev)
1902 qbase = bin(sr.mq.applied[0].rev)
1903 if not hg.islocal(dest):
1903 if not hg.islocal(dest):
1904 heads = set(sr.heads())
1904 heads = set(sr.heads())
1905 destrev = list(heads.difference(sr.heads(qbase)))
1905 destrev = list(heads.difference(sr.heads(qbase)))
1906 destrev.append(sr.changelog.parents(qbase)[0])
1906 destrev.append(sr.changelog.parents(qbase)[0])
1907 elif sr.capable('lookup'):
1907 elif sr.capable('lookup'):
1908 try:
1908 try:
1909 qbase = sr.lookup('qbase')
1909 qbase = sr.lookup('qbase')
1910 except error.RepoError:
1910 except error.RepoError:
1911 pass
1911 pass
1912 ui.note(_('cloning main repository\n'))
1912 ui.note(_('cloning main repository\n'))
1913 sr, dr = hg.clone(ui, sr.url(), dest,
1913 sr, dr = hg.clone(ui, sr.url(), dest,
1914 pull=opts['pull'],
1914 pull=opts['pull'],
1915 rev=destrev,
1915 rev=destrev,
1916 update=False,
1916 update=False,
1917 stream=opts['uncompressed'])
1917 stream=opts['uncompressed'])
1918 ui.note(_('cloning patch repository\n'))
1918 ui.note(_('cloning patch repository\n'))
1919 hg.clone(ui, opts['patches'] or patchdir(sr), patchdir(dr),
1919 hg.clone(ui, opts['patches'] or patchdir(sr), patchdir(dr),
1920 pull=opts['pull'], update=not opts['noupdate'],
1920 pull=opts['pull'], update=not opts['noupdate'],
1921 stream=opts['uncompressed'])
1921 stream=opts['uncompressed'])
1922 if dr.local():
1922 if dr.local():
1923 if qbase:
1923 if qbase:
1924 ui.note(_('stripping applied patches from destination '
1924 ui.note(_('stripping applied patches from destination '
1925 'repository\n'))
1925 'repository\n'))
1926 dr.mq.strip(dr, qbase, update=False, backup=None)
1926 dr.mq.strip(dr, qbase, update=False, backup=None)
1927 if not opts['noupdate']:
1927 if not opts['noupdate']:
1928 ui.note(_('updating destination repository\n'))
1928 ui.note(_('updating destination repository\n'))
1929 hg.update(dr, dr.changelog.tip())
1929 hg.update(dr, dr.changelog.tip())
1930
1930
1931 def commit(ui, repo, *pats, **opts):
1931 def commit(ui, repo, *pats, **opts):
1932 """commit changes in the queue repository (DEPRECATED)
1932 """commit changes in the queue repository (DEPRECATED)
1933
1933
1934 This command is deprecated; use hg commit --mq instead."""
1934 This command is deprecated; use hg commit --mq instead."""
1935 q = repo.mq
1935 q = repo.mq
1936 r = q.qrepo()
1936 r = q.qrepo()
1937 if not r:
1937 if not r:
1938 raise util.Abort('no queue repository')
1938 raise util.Abort('no queue repository')
1939 commands.commit(r.ui, r, *pats, **opts)
1939 commands.commit(r.ui, r, *pats, **opts)
1940
1940
1941 def series(ui, repo, **opts):
1941 def series(ui, repo, **opts):
1942 """print the entire series file"""
1942 """print the entire series file"""
1943 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1943 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1944 return 0
1944 return 0
1945
1945
1946 def top(ui, repo, **opts):
1946 def top(ui, repo, **opts):
1947 """print the name of the current patch"""
1947 """print the name of the current patch"""
1948 q = repo.mq
1948 q = repo.mq
1949 t = q.applied and q.series_end(True) or 0
1949 t = q.applied and q.series_end(True) or 0
1950 if t:
1950 if t:
1951 return q.qseries(repo, start=t - 1, length=1, status='A',
1951 return q.qseries(repo, start=t - 1, length=1, status='A',
1952 summary=opts.get('summary'))
1952 summary=opts.get('summary'))
1953 else:
1953 else:
1954 ui.write(_("no patches applied\n"))
1954 ui.write(_("no patches applied\n"))
1955 return 1
1955 return 1
1956
1956
1957 def next(ui, repo, **opts):
1957 def next(ui, repo, **opts):
1958 """print the name of the next patch"""
1958 """print the name of the next patch"""
1959 q = repo.mq
1959 q = repo.mq
1960 end = q.series_end()
1960 end = q.series_end()
1961 if end == len(q.series):
1961 if end == len(q.series):
1962 ui.write(_("all patches applied\n"))
1962 ui.write(_("all patches applied\n"))
1963 return 1
1963 return 1
1964 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1964 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1965
1965
1966 def prev(ui, repo, **opts):
1966 def prev(ui, repo, **opts):
1967 """print the name of the previous patch"""
1967 """print the name of the previous patch"""
1968 q = repo.mq
1968 q = repo.mq
1969 l = len(q.applied)
1969 l = len(q.applied)
1970 if l == 1:
1970 if l == 1:
1971 ui.write(_("only one patch applied\n"))
1971 ui.write(_("only one patch applied\n"))
1972 return 1
1972 return 1
1973 if not l:
1973 if not l:
1974 ui.write(_("no patches applied\n"))
1974 ui.write(_("no patches applied\n"))
1975 return 1
1975 return 1
1976 return q.qseries(repo, start=l - 2, length=1, status='A',
1976 return q.qseries(repo, start=l - 2, length=1, status='A',
1977 summary=opts.get('summary'))
1977 summary=opts.get('summary'))
1978
1978
1979 def setupheaderopts(ui, opts):
1979 def setupheaderopts(ui, opts):
1980 if not opts.get('user') and opts.get('currentuser'):
1980 if not opts.get('user') and opts.get('currentuser'):
1981 opts['user'] = ui.username()
1981 opts['user'] = ui.username()
1982 if not opts.get('date') and opts.get('currentdate'):
1982 if not opts.get('date') and opts.get('currentdate'):
1983 opts['date'] = "%d %d" % util.makedate()
1983 opts['date'] = "%d %d" % util.makedate()
1984
1984
1985 def new(ui, repo, patch, *args, **opts):
1985 def new(ui, repo, patch, *args, **opts):
1986 """create a new patch
1986 """create a new patch
1987
1987
1988 qnew creates a new patch on top of the currently-applied patch (if
1988 qnew creates a new patch on top of the currently-applied patch (if
1989 any). It will refuse to run if there are any outstanding changes
1989 any). It will refuse to run if there are any outstanding changes
1990 unless -f/--force is specified, in which case the patch will be
1990 unless -f/--force is specified, in which case the patch will be
1991 initialized with them. You may also use -I/--include,
1991 initialized with them. You may also use -I/--include,
1992 -X/--exclude, and/or a list of files after the patch name to add
1992 -X/--exclude, and/or a list of files after the patch name to add
1993 only changes to matching files to the new patch, leaving the rest
1993 only changes to matching files to the new patch, leaving the rest
1994 as uncommitted modifications.
1994 as uncommitted modifications.
1995
1995
1996 -u/--user and -d/--date can be used to set the (given) user and
1996 -u/--user and -d/--date can be used to set the (given) user and
1997 date, respectively. -U/--currentuser and -D/--currentdate set user
1997 date, respectively. -U/--currentuser and -D/--currentdate set user
1998 to current user and date to current date.
1998 to current user and date to current date.
1999
1999
2000 -e/--edit, -m/--message or -l/--logfile set the patch header as
2000 -e/--edit, -m/--message or -l/--logfile set the patch header as
2001 well as the commit message. If none is specified, the header is
2001 well as the commit message. If none is specified, the header is
2002 empty and the commit message is '[mq]: PATCH'.
2002 empty and the commit message is '[mq]: PATCH'.
2003
2003
2004 Use the -g/--git option to keep the patch in the git extended diff
2004 Use the -g/--git option to keep the patch in the git extended diff
2005 format. Read the diffs help topic for more information on why this
2005 format. Read the diffs help topic for more information on why this
2006 is important for preserving permission changes and copy/rename
2006 is important for preserving permission changes and copy/rename
2007 information.
2007 information.
2008 """
2008 """
2009 msg = cmdutil.logmessage(opts)
2009 msg = cmdutil.logmessage(opts)
2010 def getmsg():
2010 def getmsg():
2011 return ui.edit(msg, ui.username())
2011 return ui.edit(msg, ui.username())
2012 q = repo.mq
2012 q = repo.mq
2013 opts['msg'] = msg
2013 opts['msg'] = msg
2014 if opts.get('edit'):
2014 if opts.get('edit'):
2015 opts['msg'] = getmsg
2015 opts['msg'] = getmsg
2016 else:
2016 else:
2017 opts['msg'] = msg
2017 opts['msg'] = msg
2018 setupheaderopts(ui, opts)
2018 setupheaderopts(ui, opts)
2019 q.new(repo, patch, *args, **opts)
2019 q.new(repo, patch, *args, **opts)
2020 q.save_dirty()
2020 q.save_dirty()
2021 return 0
2021 return 0
2022
2022
2023 def refresh(ui, repo, *pats, **opts):
2023 def refresh(ui, repo, *pats, **opts):
2024 """update the current patch
2024 """update the current patch
2025
2025
2026 If any file patterns are provided, the refreshed patch will
2026 If any file patterns are provided, the refreshed patch will
2027 contain only the modifications that match those patterns; the
2027 contain only the modifications that match those patterns; the
2028 remaining modifications will remain in the working directory.
2028 remaining modifications will remain in the working directory.
2029
2029
2030 If -s/--short is specified, files currently included in the patch
2030 If -s/--short is specified, files currently included in the patch
2031 will be refreshed just like matched files and remain in the patch.
2031 will be refreshed just like matched files and remain in the patch.
2032
2032
2033 hg add/remove/copy/rename work as usual, though you might want to
2033 hg add/remove/copy/rename work as usual, though you might want to
2034 use git-style patches (-g/--git or [diff] git=1) to track copies
2034 use git-style patches (-g/--git or [diff] git=1) to track copies
2035 and renames. See the diffs help topic for more information on the
2035 and renames. See the diffs help topic for more information on the
2036 git diff format.
2036 git diff format.
2037 """
2037 """
2038 q = repo.mq
2038 q = repo.mq
2039 message = cmdutil.logmessage(opts)
2039 message = cmdutil.logmessage(opts)
2040 if opts['edit']:
2040 if opts['edit']:
2041 if not q.applied:
2041 if not q.applied:
2042 ui.write(_("no patches applied\n"))
2042 ui.write(_("no patches applied\n"))
2043 return 1
2043 return 1
2044 if message:
2044 if message:
2045 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2045 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2046 patch = q.applied[-1].name
2046 patch = q.applied[-1].name
2047 ph = patchheader(q.join(patch), q.plainmode)
2047 ph = patchheader(q.join(patch), q.plainmode)
2048 message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
2048 message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
2049 setupheaderopts(ui, opts)
2049 setupheaderopts(ui, opts)
2050 ret = q.refresh(repo, pats, msg=message, **opts)
2050 ret = q.refresh(repo, pats, msg=message, **opts)
2051 q.save_dirty()
2051 q.save_dirty()
2052 return ret
2052 return ret
2053
2053
2054 def diff(ui, repo, *pats, **opts):
2054 def diff(ui, repo, *pats, **opts):
2055 """diff of the current patch and subsequent modifications
2055 """diff of the current patch and subsequent modifications
2056
2056
2057 Shows a diff which includes the current patch as well as any
2057 Shows a diff which includes the current patch as well as any
2058 changes which have been made in the working directory since the
2058 changes which have been made in the working directory since the
2059 last refresh (thus showing what the current patch would become
2059 last refresh (thus showing what the current patch would become
2060 after a qrefresh).
2060 after a qrefresh).
2061
2061
2062 Use 'hg diff' if you only want to see the changes made since the
2062 Use 'hg diff' if you only want to see the changes made since the
2063 last qrefresh, or 'hg export qtip' if you want to see changes made
2063 last qrefresh, or 'hg export qtip' if you want to see changes made
2064 by the current patch without including changes made since the
2064 by the current patch without including changes made since the
2065 qrefresh.
2065 qrefresh.
2066 """
2066 """
2067 repo.mq.diff(repo, pats, opts)
2067 repo.mq.diff(repo, pats, opts)
2068 return 0
2068 return 0
2069
2069
2070 def fold(ui, repo, *files, **opts):
2070 def fold(ui, repo, *files, **opts):
2071 """fold the named patches into the current patch
2071 """fold the named patches into the current patch
2072
2072
2073 Patches must not yet be applied. Each patch will be successively
2073 Patches must not yet be applied. Each patch will be successively
2074 applied to the current patch in the order given. If all the
2074 applied to the current patch in the order given. If all the
2075 patches apply successfully, the current patch will be refreshed
2075 patches apply successfully, the current patch will be refreshed
2076 with the new cumulative patch, and the folded patches will be
2076 with the new cumulative patch, and the folded patches will be
2077 deleted. With -k/--keep, the folded patch files will not be
2077 deleted. With -k/--keep, the folded patch files will not be
2078 removed afterwards.
2078 removed afterwards.
2079
2079
2080 The header for each folded patch will be concatenated with the
2080 The header for each folded patch will be concatenated with the
2081 current patch header, separated by a line of '* * *'."""
2081 current patch header, separated by a line of '* * *'."""
2082
2082
2083 q = repo.mq
2083 q = repo.mq
2084
2084
2085 if not files:
2085 if not files:
2086 raise util.Abort(_('qfold requires at least one patch name'))
2086 raise util.Abort(_('qfold requires at least one patch name'))
2087 if not q.check_toppatch(repo)[0]:
2087 if not q.check_toppatch(repo)[0]:
2088 raise util.Abort(_('No patches applied'))
2088 raise util.Abort(_('No patches applied'))
2089 q.check_localchanges(repo)
2089 q.check_localchanges(repo)
2090
2090
2091 message = cmdutil.logmessage(opts)
2091 message = cmdutil.logmessage(opts)
2092 if opts['edit']:
2092 if opts['edit']:
2093 if message:
2093 if message:
2094 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2094 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2095
2095
2096 parent = q.lookup('qtip')
2096 parent = q.lookup('qtip')
2097 patches = []
2097 patches = []
2098 messages = []
2098 messages = []
2099 for f in files:
2099 for f in files:
2100 p = q.lookup(f)
2100 p = q.lookup(f)
2101 if p in patches or p == parent:
2101 if p in patches or p == parent:
2102 ui.warn(_('Skipping already folded patch %s') % p)
2102 ui.warn(_('Skipping already folded patch %s') % p)
2103 if q.isapplied(p):
2103 if q.isapplied(p):
2104 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
2104 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
2105 patches.append(p)
2105 patches.append(p)
2106
2106
2107 for p in patches:
2107 for p in patches:
2108 if not message:
2108 if not message:
2109 ph = patchheader(q.join(p), q.plainmode)
2109 ph = patchheader(q.join(p), q.plainmode)
2110 if ph.message:
2110 if ph.message:
2111 messages.append(ph.message)
2111 messages.append(ph.message)
2112 pf = q.join(p)
2112 pf = q.join(p)
2113 (patchsuccess, files, fuzz) = q.patch(repo, pf)
2113 (patchsuccess, files, fuzz) = q.patch(repo, pf)
2114 if not patchsuccess:
2114 if not patchsuccess:
2115 raise util.Abort(_('Error folding patch %s') % p)
2115 raise util.Abort(_('Error folding patch %s') % p)
2116 patch.updatedir(ui, repo, files)
2116 patch.updatedir(ui, repo, files)
2117
2117
2118 if not message:
2118 if not message:
2119 ph = patchheader(q.join(parent), q.plainmode)
2119 ph = patchheader(q.join(parent), q.plainmode)
2120 message, user = ph.message, ph.user
2120 message, user = ph.message, ph.user
2121 for msg in messages:
2121 for msg in messages:
2122 message.append('* * *')
2122 message.append('* * *')
2123 message.extend(msg)
2123 message.extend(msg)
2124 message = '\n'.join(message)
2124 message = '\n'.join(message)
2125
2125
2126 if opts['edit']:
2126 if opts['edit']:
2127 message = ui.edit(message, user or ui.username())
2127 message = ui.edit(message, user or ui.username())
2128
2128
2129 diffopts = q.patchopts(q.diffopts(), *patches)
2129 diffopts = q.patchopts(q.diffopts(), *patches)
2130 q.refresh(repo, msg=message, git=diffopts.git)
2130 q.refresh(repo, msg=message, git=diffopts.git)
2131 q.delete(repo, patches, opts)
2131 q.delete(repo, patches, opts)
2132 q.save_dirty()
2132 q.save_dirty()
2133
2133
2134 def goto(ui, repo, patch, **opts):
2134 def goto(ui, repo, patch, **opts):
2135 '''push or pop patches until named patch is at top of stack'''
2135 '''push or pop patches until named patch is at top of stack'''
2136 q = repo.mq
2136 q = repo.mq
2137 patch = q.lookup(patch)
2137 patch = q.lookup(patch)
2138 if q.isapplied(patch):
2138 if q.isapplied(patch):
2139 ret = q.pop(repo, patch, force=opts['force'])
2139 ret = q.pop(repo, patch, force=opts['force'])
2140 else:
2140 else:
2141 ret = q.push(repo, patch, force=opts['force'])
2141 ret = q.push(repo, patch, force=opts['force'])
2142 q.save_dirty()
2142 q.save_dirty()
2143 return ret
2143 return ret
2144
2144
2145 def guard(ui, repo, *args, **opts):
2145 def guard(ui, repo, *args, **opts):
2146 '''set or print guards for a patch
2146 '''set or print guards for a patch
2147
2147
2148 Guards control whether a patch can be pushed. A patch with no
2148 Guards control whether a patch can be pushed. A patch with no
2149 guards is always pushed. A patch with a positive guard ("+foo") is
2149 guards is always pushed. A patch with a positive guard ("+foo") is
2150 pushed only if the qselect command has activated it. A patch with
2150 pushed only if the qselect command has activated it. A patch with
2151 a negative guard ("-foo") is never pushed if the qselect command
2151 a negative guard ("-foo") is never pushed if the qselect command
2152 has activated it.
2152 has activated it.
2153
2153
2154 With no arguments, print the currently active guards.
2154 With no arguments, print the currently active guards.
2155 With arguments, set guards for the named patch.
2155 With arguments, set guards for the named patch.
2156 NOTE: Specifying negative guards now requires '--'.
2156 NOTE: Specifying negative guards now requires '--'.
2157
2157
2158 To set guards on another patch::
2158 To set guards on another patch::
2159
2159
2160 hg qguard other.patch -- +2.6.17 -stable
2160 hg qguard other.patch -- +2.6.17 -stable
2161 '''
2161 '''
2162 def status(idx):
2162 def status(idx):
2163 guards = q.series_guards[idx] or ['unguarded']
2163 guards = q.series_guards[idx] or ['unguarded']
2164 ui.write('%s: %s\n' % (q.series[idx], ' '.join(guards)))
2164 ui.write('%s: %s\n' % (q.series[idx], ' '.join(guards)))
2165 q = repo.mq
2165 q = repo.mq
2166 patch = None
2166 patch = None
2167 args = list(args)
2167 args = list(args)
2168 if opts['list']:
2168 if opts['list']:
2169 if args or opts['none']:
2169 if args or opts['none']:
2170 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
2170 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
2171 for i in xrange(len(q.series)):
2171 for i in xrange(len(q.series)):
2172 status(i)
2172 status(i)
2173 return
2173 return
2174 if not args or args[0][0:1] in '-+':
2174 if not args or args[0][0:1] in '-+':
2175 if not q.applied:
2175 if not q.applied:
2176 raise util.Abort(_('no patches applied'))
2176 raise util.Abort(_('no patches applied'))
2177 patch = q.applied[-1].name
2177 patch = q.applied[-1].name
2178 if patch is None and args[0][0:1] not in '-+':
2178 if patch is None and args[0][0:1] not in '-+':
2179 patch = args.pop(0)
2179 patch = args.pop(0)
2180 if patch is None:
2180 if patch is None:
2181 raise util.Abort(_('no patch to work with'))
2181 raise util.Abort(_('no patch to work with'))
2182 if args or opts['none']:
2182 if args or opts['none']:
2183 idx = q.find_series(patch)
2183 idx = q.find_series(patch)
2184 if idx is None:
2184 if idx is None:
2185 raise util.Abort(_('no patch named %s') % patch)
2185 raise util.Abort(_('no patch named %s') % patch)
2186 q.set_guards(idx, args)
2186 q.set_guards(idx, args)
2187 q.save_dirty()
2187 q.save_dirty()
2188 else:
2188 else:
2189 status(q.series.index(q.lookup(patch)))
2189 status(q.series.index(q.lookup(patch)))
2190
2190
2191 def header(ui, repo, patch=None):
2191 def header(ui, repo, patch=None):
2192 """print the header of the topmost or specified patch"""
2192 """print the header of the topmost or specified patch"""
2193 q = repo.mq
2193 q = repo.mq
2194
2194
2195 if patch:
2195 if patch:
2196 patch = q.lookup(patch)
2196 patch = q.lookup(patch)
2197 else:
2197 else:
2198 if not q.applied:
2198 if not q.applied:
2199 ui.write(_('no patches applied\n'))
2199 ui.write(_('no patches applied\n'))
2200 return 1
2200 return 1
2201 patch = q.lookup('qtip')
2201 patch = q.lookup('qtip')
2202 ph = patchheader(q.join(patch), q.plainmode)
2202 ph = patchheader(q.join(patch), q.plainmode)
2203
2203
2204 ui.write('\n'.join(ph.message) + '\n')
2204 ui.write('\n'.join(ph.message) + '\n')
2205
2205
2206 def lastsavename(path):
2206 def lastsavename(path):
2207 (directory, base) = os.path.split(path)
2207 (directory, base) = os.path.split(path)
2208 names = os.listdir(directory)
2208 names = os.listdir(directory)
2209 namere = re.compile("%s.([0-9]+)" % base)
2209 namere = re.compile("%s.([0-9]+)" % base)
2210 maxindex = None
2210 maxindex = None
2211 maxname = None
2211 maxname = None
2212 for f in names:
2212 for f in names:
2213 m = namere.match(f)
2213 m = namere.match(f)
2214 if m:
2214 if m:
2215 index = int(m.group(1))
2215 index = int(m.group(1))
2216 if maxindex is None or index > maxindex:
2216 if maxindex is None or index > maxindex:
2217 maxindex = index
2217 maxindex = index
2218 maxname = f
2218 maxname = f
2219 if maxname:
2219 if maxname:
2220 return (os.path.join(directory, maxname), maxindex)
2220 return (os.path.join(directory, maxname), maxindex)
2221 return (None, None)
2221 return (None, None)
2222
2222
2223 def savename(path):
2223 def savename(path):
2224 (last, index) = lastsavename(path)
2224 (last, index) = lastsavename(path)
2225 if last is None:
2225 if last is None:
2226 index = 0
2226 index = 0
2227 newpath = path + ".%d" % (index + 1)
2227 newpath = path + ".%d" % (index + 1)
2228 return newpath
2228 return newpath
2229
2229
2230 def push(ui, repo, patch=None, **opts):
2230 def push(ui, repo, patch=None, **opts):
2231 """push the next patch onto the stack
2231 """push the next patch onto the stack
2232
2232
2233 When -f/--force is applied, all local changes in patched files
2233 When -f/--force is applied, all local changes in patched files
2234 will be lost.
2234 will be lost.
2235 """
2235 """
2236 q = repo.mq
2236 q = repo.mq
2237 mergeq = None
2237 mergeq = None
2238
2238
2239 if opts['merge']:
2239 if opts['merge']:
2240 if opts['name']:
2240 if opts['name']:
2241 newpath = repo.join(opts['name'])
2241 newpath = repo.join(opts['name'])
2242 else:
2242 else:
2243 newpath, i = lastsavename(q.path)
2243 newpath, i = lastsavename(q.path)
2244 if not newpath:
2244 if not newpath:
2245 ui.warn(_("no saved queues found, please use -n\n"))
2245 ui.warn(_("no saved queues found, please use -n\n"))
2246 return 1
2246 return 1
2247 mergeq = queue(ui, repo.join(""), newpath)
2247 mergeq = queue(ui, repo.join(""), newpath)
2248 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2248 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2249 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
2249 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
2250 mergeq=mergeq, all=opts.get('all'))
2250 mergeq=mergeq, all=opts.get('all'))
2251 return ret
2251 return ret
2252
2252
2253 def pop(ui, repo, patch=None, **opts):
2253 def pop(ui, repo, patch=None, **opts):
2254 """pop the current patch off the stack
2254 """pop the current patch off the stack
2255
2255
2256 By default, pops off the top of the patch stack. If given a patch
2256 By default, pops off the top of the patch stack. If given a patch
2257 name, keeps popping off patches until the named patch is at the
2257 name, keeps popping off patches until the named patch is at the
2258 top of the stack.
2258 top of the stack.
2259 """
2259 """
2260 localupdate = True
2260 localupdate = True
2261 if opts['name']:
2261 if opts['name']:
2262 q = queue(ui, repo.join(""), repo.join(opts['name']))
2262 q = queue(ui, repo.join(""), repo.join(opts['name']))
2263 ui.warn(_('using patch queue: %s\n') % q.path)
2263 ui.warn(_('using patch queue: %s\n') % q.path)
2264 localupdate = False
2264 localupdate = False
2265 else:
2265 else:
2266 q = repo.mq
2266 q = repo.mq
2267 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
2267 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
2268 all=opts['all'])
2268 all=opts['all'])
2269 q.save_dirty()
2269 q.save_dirty()
2270 return ret
2270 return ret
2271
2271
2272 def rename(ui, repo, patch, name=None, **opts):
2272 def rename(ui, repo, patch, name=None, **opts):
2273 """rename a patch
2273 """rename a patch
2274
2274
2275 With one argument, renames the current patch to PATCH1.
2275 With one argument, renames the current patch to PATCH1.
2276 With two arguments, renames PATCH1 to PATCH2."""
2276 With two arguments, renames PATCH1 to PATCH2."""
2277
2277
2278 q = repo.mq
2278 q = repo.mq
2279
2279
2280 if not name:
2280 if not name:
2281 name = patch
2281 name = patch
2282 patch = None
2282 patch = None
2283
2283
2284 if patch:
2284 if patch:
2285 patch = q.lookup(patch)
2285 patch = q.lookup(patch)
2286 else:
2286 else:
2287 if not q.applied:
2287 if not q.applied:
2288 ui.write(_('no patches applied\n'))
2288 ui.write(_('no patches applied\n'))
2289 return
2289 return
2290 patch = q.lookup('qtip')
2290 patch = q.lookup('qtip')
2291 absdest = q.join(name)
2291 absdest = q.join(name)
2292 if os.path.isdir(absdest):
2292 if os.path.isdir(absdest):
2293 name = normname(os.path.join(name, os.path.basename(patch)))
2293 name = normname(os.path.join(name, os.path.basename(patch)))
2294 absdest = q.join(name)
2294 absdest = q.join(name)
2295 if os.path.exists(absdest):
2295 if os.path.exists(absdest):
2296 raise util.Abort(_('%s already exists') % absdest)
2296 raise util.Abort(_('%s already exists') % absdest)
2297
2297
2298 if name in q.series:
2298 if name in q.series:
2299 raise util.Abort(
2299 raise util.Abort(
2300 _('A patch named %s already exists in the series file') % name)
2300 _('A patch named %s already exists in the series file') % name)
2301
2301
2302 ui.note(_('renaming %s to %s\n') % (patch, name))
2302 ui.note(_('renaming %s to %s\n') % (patch, name))
2303 i = q.find_series(patch)
2303 i = q.find_series(patch)
2304 guards = q.guard_re.findall(q.full_series[i])
2304 guards = q.guard_re.findall(q.full_series[i])
2305 q.full_series[i] = name + ''.join([' #' + g for g in guards])
2305 q.full_series[i] = name + ''.join([' #' + g for g in guards])
2306 q.parse_series()
2306 q.parse_series()
2307 q.series_dirty = 1
2307 q.series_dirty = 1
2308
2308
2309 info = q.isapplied(patch)
2309 info = q.isapplied(patch)
2310 if info:
2310 if info:
2311 q.applied[info[0]] = statusentry(info[1], name)
2311 q.applied[info[0]] = statusentry(info[1], name)
2312 q.applied_dirty = 1
2312 q.applied_dirty = 1
2313
2313
2314 util.rename(q.join(patch), absdest)
2314 util.rename(q.join(patch), absdest)
2315 r = q.qrepo()
2315 r = q.qrepo()
2316 if r:
2316 if r:
2317 wlock = r.wlock()
2317 wlock = r.wlock()
2318 try:
2318 try:
2319 if r.dirstate[patch] == 'a':
2319 if r.dirstate[patch] == 'a':
2320 r.dirstate.forget(patch)
2320 r.dirstate.forget(patch)
2321 r.dirstate.add(name)
2321 r.dirstate.add(name)
2322 else:
2322 else:
2323 if r.dirstate[name] == 'r':
2323 if r.dirstate[name] == 'r':
2324 r.undelete([name])
2324 r.undelete([name])
2325 r.copy(patch, name)
2325 r.copy(patch, name)
2326 r.remove([patch], False)
2326 r.remove([patch], False)
2327 finally:
2327 finally:
2328 wlock.release()
2328 wlock.release()
2329
2329
2330 q.save_dirty()
2330 q.save_dirty()
2331
2331
2332 def restore(ui, repo, rev, **opts):
2332 def restore(ui, repo, rev, **opts):
2333 """restore the queue state saved by a revision (DEPRECATED)
2333 """restore the queue state saved by a revision (DEPRECATED)
2334
2334
2335 This command is deprecated, use rebase --mq instead."""
2335 This command is deprecated, use rebase --mq instead."""
2336 rev = repo.lookup(rev)
2336 rev = repo.lookup(rev)
2337 q = repo.mq
2337 q = repo.mq
2338 q.restore(repo, rev, delete=opts['delete'],
2338 q.restore(repo, rev, delete=opts['delete'],
2339 qupdate=opts['update'])
2339 qupdate=opts['update'])
2340 q.save_dirty()
2340 q.save_dirty()
2341 return 0
2341 return 0
2342
2342
2343 def save(ui, repo, **opts):
2343 def save(ui, repo, **opts):
2344 """save current queue state (DEPRECATED)
2344 """save current queue state (DEPRECATED)
2345
2345
2346 This command is deprecated, use rebase --mq instead."""
2346 This command is deprecated, use rebase --mq instead."""
2347 q = repo.mq
2347 q = repo.mq
2348 message = cmdutil.logmessage(opts)
2348 message = cmdutil.logmessage(opts)
2349 ret = q.save(repo, msg=message)
2349 ret = q.save(repo, msg=message)
2350 if ret:
2350 if ret:
2351 return ret
2351 return ret
2352 q.save_dirty()
2352 q.save_dirty()
2353 if opts['copy']:
2353 if opts['copy']:
2354 path = q.path
2354 path = q.path
2355 if opts['name']:
2355 if opts['name']:
2356 newpath = os.path.join(q.basepath, opts['name'])
2356 newpath = os.path.join(q.basepath, opts['name'])
2357 if os.path.exists(newpath):
2357 if os.path.exists(newpath):
2358 if not os.path.isdir(newpath):
2358 if not os.path.isdir(newpath):
2359 raise util.Abort(_('destination %s exists and is not '
2359 raise util.Abort(_('destination %s exists and is not '
2360 'a directory') % newpath)
2360 'a directory') % newpath)
2361 if not opts['force']:
2361 if not opts['force']:
2362 raise util.Abort(_('destination %s exists, '
2362 raise util.Abort(_('destination %s exists, '
2363 'use -f to force') % newpath)
2363 'use -f to force') % newpath)
2364 else:
2364 else:
2365 newpath = savename(path)
2365 newpath = savename(path)
2366 ui.warn(_("copy %s to %s\n") % (path, newpath))
2366 ui.warn(_("copy %s to %s\n") % (path, newpath))
2367 util.copyfiles(path, newpath)
2367 util.copyfiles(path, newpath)
2368 if opts['empty']:
2368 if opts['empty']:
2369 try:
2369 try:
2370 os.unlink(q.join(q.status_path))
2370 os.unlink(q.join(q.status_path))
2371 except:
2371 except:
2372 pass
2372 pass
2373 return 0
2373 return 0
2374
2374
2375 def strip(ui, repo, rev, **opts):
2375 def strip(ui, repo, rev, **opts):
2376 """strip a revision and all its descendants from the repository
2376 """strip a revision and all its descendants from the repository
2377
2377
2378 If one of the working directory's parent revisions is stripped, the
2378 If one of the working directory's parent revisions is stripped, the
2379 working directory will be updated to the parent of the stripped
2379 working directory will be updated to the parent of the stripped
2380 revision.
2380 revision.
2381 """
2381 """
2382 backup = 'all'
2382 backup = 'all'
2383 if opts['backup']:
2383 if opts['backup']:
2384 backup = 'strip'
2384 backup = 'strip'
2385 elif opts['nobackup']:
2385 elif opts['nobackup']:
2386 backup = 'none'
2386 backup = 'none'
2387
2387
2388 rev = repo.lookup(rev)
2388 rev = repo.lookup(rev)
2389 p = repo.dirstate.parents()
2389 p = repo.dirstate.parents()
2390 cl = repo.changelog
2390 cl = repo.changelog
2391 update = True
2391 update = True
2392 if p[0] == nullid:
2392 if p[0] == nullid:
2393 update = False
2393 update = False
2394 elif p[1] == nullid and rev != cl.ancestor(p[0], rev):
2394 elif p[1] == nullid and rev != cl.ancestor(p[0], rev):
2395 update = False
2395 update = False
2396 elif rev not in (cl.ancestor(p[0], rev), cl.ancestor(p[1], rev)):
2396 elif rev not in (cl.ancestor(p[0], rev), cl.ancestor(p[1], rev)):
2397 update = False
2397 update = False
2398
2398
2399 repo.mq.strip(repo, rev, backup=backup, update=update, force=opts['force'])
2399 repo.mq.strip(repo, rev, backup=backup, update=update, force=opts['force'])
2400 return 0
2400 return 0
2401
2401
2402 def select(ui, repo, *args, **opts):
2402 def select(ui, repo, *args, **opts):
2403 '''set or print guarded patches to push
2403 '''set or print guarded patches to push
2404
2404
2405 Use the qguard command to set or print guards on patch, then use
2405 Use the qguard command to set or print guards on patch, then use
2406 qselect to tell mq which guards to use. A patch will be pushed if
2406 qselect to tell mq which guards to use. A patch will be pushed if
2407 it has no guards or any positive guards match the currently
2407 it has no guards or any positive guards match the currently
2408 selected guard, but will not be pushed if any negative guards
2408 selected guard, but will not be pushed if any negative guards
2409 match the current guard. For example::
2409 match the current guard. For example::
2410
2410
2411 qguard foo.patch -stable (negative guard)
2411 qguard foo.patch -stable (negative guard)
2412 qguard bar.patch +stable (positive guard)
2412 qguard bar.patch +stable (positive guard)
2413 qselect stable
2413 qselect stable
2414
2414
2415 This activates the "stable" guard. mq will skip foo.patch (because
2415 This activates the "stable" guard. mq will skip foo.patch (because
2416 it has a negative match) but push bar.patch (because it has a
2416 it has a negative match) but push bar.patch (because it has a
2417 positive match).
2417 positive match).
2418
2418
2419 With no arguments, prints the currently active guards.
2419 With no arguments, prints the currently active guards.
2420 With one argument, sets the active guard.
2420 With one argument, sets the active guard.
2421
2421
2422 Use -n/--none to deactivate guards (no other arguments needed).
2422 Use -n/--none to deactivate guards (no other arguments needed).
2423 When no guards are active, patches with positive guards are
2423 When no guards are active, patches with positive guards are
2424 skipped and patches with negative guards are pushed.
2424 skipped and patches with negative guards are pushed.
2425
2425
2426 qselect can change the guards on applied patches. It does not pop
2426 qselect can change the guards on applied patches. It does not pop
2427 guarded patches by default. Use --pop to pop back to the last
2427 guarded patches by default. Use --pop to pop back to the last
2428 applied patch that is not guarded. Use --reapply (which implies
2428 applied patch that is not guarded. Use --reapply (which implies
2429 --pop) to push back to the current patch afterwards, but skip
2429 --pop) to push back to the current patch afterwards, but skip
2430 guarded patches.
2430 guarded patches.
2431
2431
2432 Use -s/--series to print a list of all guards in the series file
2432 Use -s/--series to print a list of all guards in the series file
2433 (no other arguments needed). Use -v for more information.'''
2433 (no other arguments needed). Use -v for more information.'''
2434
2434
2435 q = repo.mq
2435 q = repo.mq
2436 guards = q.active()
2436 guards = q.active()
2437 if args or opts['none']:
2437 if args or opts['none']:
2438 old_unapplied = q.unapplied(repo)
2438 old_unapplied = q.unapplied(repo)
2439 old_guarded = [i for i in xrange(len(q.applied)) if
2439 old_guarded = [i for i in xrange(len(q.applied)) if
2440 not q.pushable(i)[0]]
2440 not q.pushable(i)[0]]
2441 q.set_active(args)
2441 q.set_active(args)
2442 q.save_dirty()
2442 q.save_dirty()
2443 if not args:
2443 if not args:
2444 ui.status(_('guards deactivated\n'))
2444 ui.status(_('guards deactivated\n'))
2445 if not opts['pop'] and not opts['reapply']:
2445 if not opts['pop'] and not opts['reapply']:
2446 unapplied = q.unapplied(repo)
2446 unapplied = q.unapplied(repo)
2447 guarded = [i for i in xrange(len(q.applied))
2447 guarded = [i for i in xrange(len(q.applied))
2448 if not q.pushable(i)[0]]
2448 if not q.pushable(i)[0]]
2449 if len(unapplied) != len(old_unapplied):
2449 if len(unapplied) != len(old_unapplied):
2450 ui.status(_('number of unguarded, unapplied patches has '
2450 ui.status(_('number of unguarded, unapplied patches has '
2451 'changed from %d to %d\n') %
2451 'changed from %d to %d\n') %
2452 (len(old_unapplied), len(unapplied)))
2452 (len(old_unapplied), len(unapplied)))
2453 if len(guarded) != len(old_guarded):
2453 if len(guarded) != len(old_guarded):
2454 ui.status(_('number of guarded, applied patches has changed '
2454 ui.status(_('number of guarded, applied patches has changed '
2455 'from %d to %d\n') %
2455 'from %d to %d\n') %
2456 (len(old_guarded), len(guarded)))
2456 (len(old_guarded), len(guarded)))
2457 elif opts['series']:
2457 elif opts['series']:
2458 guards = {}
2458 guards = {}
2459 noguards = 0
2459 noguards = 0
2460 for gs in q.series_guards:
2460 for gs in q.series_guards:
2461 if not gs:
2461 if not gs:
2462 noguards += 1
2462 noguards += 1
2463 for g in gs:
2463 for g in gs:
2464 guards.setdefault(g, 0)
2464 guards.setdefault(g, 0)
2465 guards[g] += 1
2465 guards[g] += 1
2466 if ui.verbose:
2466 if ui.verbose:
2467 guards['NONE'] = noguards
2467 guards['NONE'] = noguards
2468 guards = guards.items()
2468 guards = guards.items()
2469 guards.sort(key=lambda x: x[0][1:])
2469 guards.sort(key=lambda x: x[0][1:])
2470 if guards:
2470 if guards:
2471 ui.note(_('guards in series file:\n'))
2471 ui.note(_('guards in series file:\n'))
2472 for guard, count in guards:
2472 for guard, count in guards:
2473 ui.note('%2d ' % count)
2473 ui.note('%2d ' % count)
2474 ui.write(guard, '\n')
2474 ui.write(guard, '\n')
2475 else:
2475 else:
2476 ui.note(_('no guards in series file\n'))
2476 ui.note(_('no guards in series file\n'))
2477 else:
2477 else:
2478 if guards:
2478 if guards:
2479 ui.note(_('active guards:\n'))
2479 ui.note(_('active guards:\n'))
2480 for g in guards:
2480 for g in guards:
2481 ui.write(g, '\n')
2481 ui.write(g, '\n')
2482 else:
2482 else:
2483 ui.write(_('no active guards\n'))
2483 ui.write(_('no active guards\n'))
2484 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2484 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2485 popped = False
2485 popped = False
2486 if opts['pop'] or opts['reapply']:
2486 if opts['pop'] or opts['reapply']:
2487 for i in xrange(len(q.applied)):
2487 for i in xrange(len(q.applied)):
2488 pushable, reason = q.pushable(i)
2488 pushable, reason = q.pushable(i)
2489 if not pushable:
2489 if not pushable:
2490 ui.status(_('popping guarded patches\n'))
2490 ui.status(_('popping guarded patches\n'))
2491 popped = True
2491 popped = True
2492 if i == 0:
2492 if i == 0:
2493 q.pop(repo, all=True)
2493 q.pop(repo, all=True)
2494 else:
2494 else:
2495 q.pop(repo, i - 1)
2495 q.pop(repo, i - 1)
2496 break
2496 break
2497 if popped:
2497 if popped:
2498 try:
2498 try:
2499 if reapply:
2499 if reapply:
2500 ui.status(_('reapplying unguarded patches\n'))
2500 ui.status(_('reapplying unguarded patches\n'))
2501 q.push(repo, reapply)
2501 q.push(repo, reapply)
2502 finally:
2502 finally:
2503 q.save_dirty()
2503 q.save_dirty()
2504
2504
2505 def finish(ui, repo, *revrange, **opts):
2505 def finish(ui, repo, *revrange, **opts):
2506 """move applied patches into repository history
2506 """move applied patches into repository history
2507
2507
2508 Finishes the specified revisions (corresponding to applied
2508 Finishes the specified revisions (corresponding to applied
2509 patches) by moving them out of mq control into regular repository
2509 patches) by moving them out of mq control into regular repository
2510 history.
2510 history.
2511
2511
2512 Accepts a revision range or the -a/--applied option. If --applied
2512 Accepts a revision range or the -a/--applied option. If --applied
2513 is specified, all applied mq revisions are removed from mq
2513 is specified, all applied mq revisions are removed from mq
2514 control. Otherwise, the given revisions must be at the base of the
2514 control. Otherwise, the given revisions must be at the base of the
2515 stack of applied patches.
2515 stack of applied patches.
2516
2516
2517 This can be especially useful if your changes have been applied to
2517 This can be especially useful if your changes have been applied to
2518 an upstream repository, or if you are about to push your changes
2518 an upstream repository, or if you are about to push your changes
2519 to upstream.
2519 to upstream.
2520 """
2520 """
2521 if not opts['applied'] and not revrange:
2521 if not opts['applied'] and not revrange:
2522 raise util.Abort(_('no revisions specified'))
2522 raise util.Abort(_('no revisions specified'))
2523 elif opts['applied']:
2523 elif opts['applied']:
2524 revrange = ('qbase:qtip',) + revrange
2524 revrange = ('qbase:qtip',) + revrange
2525
2525
2526 q = repo.mq
2526 q = repo.mq
2527 if not q.applied:
2527 if not q.applied:
2528 ui.status(_('no patches applied\n'))
2528 ui.status(_('no patches applied\n'))
2529 return 0
2529 return 0
2530
2530
2531 revs = cmdutil.revrange(repo, revrange)
2531 revs = cmdutil.revrange(repo, revrange)
2532 q.finish(repo, revs)
2532 q.finish(repo, revs)
2533 q.save_dirty()
2533 q.save_dirty()
2534 return 0
2534 return 0
2535
2535
2536 def reposetup(ui, repo):
2536 def reposetup(ui, repo):
2537 class mqrepo(repo.__class__):
2537 class mqrepo(repo.__class__):
2538 @util.propertycache
2538 @util.propertycache
2539 def mq(self):
2539 def mq(self):
2540 return queue(self.ui, self.join(""))
2540 return queue(self.ui, self.join(""))
2541
2541
2542 def abort_if_wdir_patched(self, errmsg, force=False):
2542 def abort_if_wdir_patched(self, errmsg, force=False):
2543 if self.mq.applied and not force:
2543 if self.mq.applied and not force:
2544 parent = hex(self.dirstate.parents()[0])
2544 parent = hex(self.dirstate.parents()[0])
2545 if parent in [s.rev for s in self.mq.applied]:
2545 if parent in [s.rev for s in self.mq.applied]:
2546 raise util.Abort(errmsg)
2546 raise util.Abort(errmsg)
2547
2547
2548 def commit(self, text="", user=None, date=None, match=None,
2548 def commit(self, text="", user=None, date=None, match=None,
2549 force=False, editor=False, extra={}):
2549 force=False, editor=False, extra={}):
2550 self.abort_if_wdir_patched(
2550 self.abort_if_wdir_patched(
2551 _('cannot commit over an applied mq patch'),
2551 _('cannot commit over an applied mq patch'),
2552 force)
2552 force)
2553
2553
2554 return super(mqrepo, self).commit(text, user, date, match, force,
2554 return super(mqrepo, self).commit(text, user, date, match, force,
2555 editor, extra)
2555 editor, extra)
2556
2556
2557 def push(self, remote, force=False, revs=None):
2557 def push(self, remote, force=False, revs=None):
2558 if self.mq.applied and not force and not revs:
2558 if self.mq.applied and not force and not revs:
2559 raise util.Abort(_('source has mq patches applied'))
2559 raise util.Abort(_('source has mq patches applied'))
2560 return super(mqrepo, self).push(remote, force, revs)
2560 return super(mqrepo, self).push(remote, force, revs)
2561
2561
2562 def _findtags(self):
2562 def _findtags(self):
2563 '''augment tags from base class with patch tags'''
2563 '''augment tags from base class with patch tags'''
2564 result = super(mqrepo, self)._findtags()
2564 result = super(mqrepo, self)._findtags()
2565
2565
2566 q = self.mq
2566 q = self.mq
2567 if not q.applied:
2567 if not q.applied:
2568 return result
2568 return result
2569
2569
2570 mqtags = [(bin(patch.rev), patch.name) for patch in q.applied]
2570 mqtags = [(bin(patch.rev), patch.name) for patch in q.applied]
2571
2571
2572 if mqtags[-1][0] not in self.changelog.nodemap:
2572 if mqtags[-1][0] not in self.changelog.nodemap:
2573 self.ui.warn(_('mq status file refers to unknown node %s\n')
2573 self.ui.warn(_('mq status file refers to unknown node %s\n')
2574 % short(mqtags[-1][0]))
2574 % short(mqtags[-1][0]))
2575 return result
2575 return result
2576
2576
2577 mqtags.append((mqtags[-1][0], 'qtip'))
2577 mqtags.append((mqtags[-1][0], 'qtip'))
2578 mqtags.append((mqtags[0][0], 'qbase'))
2578 mqtags.append((mqtags[0][0], 'qbase'))
2579 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2579 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2580 tags = result[0]
2580 tags = result[0]
2581 for patch in mqtags:
2581 for patch in mqtags:
2582 if patch[1] in tags:
2582 if patch[1] in tags:
2583 self.ui.warn(_('Tag %s overrides mq patch of the same name\n')
2583 self.ui.warn(_('Tag %s overrides mq patch of the same name\n')
2584 % patch[1])
2584 % patch[1])
2585 else:
2585 else:
2586 tags[patch[1]] = patch[0]
2586 tags[patch[1]] = patch[0]
2587
2587
2588 return result
2588 return result
2589
2589
2590 def _branchtags(self, partial, lrev):
2590 def _branchtags(self, partial, lrev):
2591 q = self.mq
2591 q = self.mq
2592 if not q.applied:
2592 if not q.applied:
2593 return super(mqrepo, self)._branchtags(partial, lrev)
2593 return super(mqrepo, self)._branchtags(partial, lrev)
2594
2594
2595 cl = self.changelog
2595 cl = self.changelog
2596 qbasenode = bin(q.applied[0].rev)
2596 qbasenode = bin(q.applied[0].rev)
2597 if qbasenode not in cl.nodemap:
2597 if qbasenode not in cl.nodemap:
2598 self.ui.warn(_('mq status file refers to unknown node %s\n')
2598 self.ui.warn(_('mq status file refers to unknown node %s\n')
2599 % short(qbasenode))
2599 % short(qbasenode))
2600 return super(mqrepo, self)._branchtags(partial, lrev)
2600 return super(mqrepo, self)._branchtags(partial, lrev)
2601
2601
2602 qbase = cl.rev(qbasenode)
2602 qbase = cl.rev(qbasenode)
2603 start = lrev + 1
2603 start = lrev + 1
2604 if start < qbase:
2604 if start < qbase:
2605 # update the cache (excluding the patches) and save it
2605 # update the cache (excluding the patches) and save it
2606 self._updatebranchcache(partial, lrev + 1, qbase)
2606 ctxgen = (self[r] for r in xrange(lrev + 1, qbase))
2607 self._updatebranchcache(partial, ctxgen)
2607 self._writebranchcache(partial, cl.node(qbase - 1), qbase - 1)
2608 self._writebranchcache(partial, cl.node(qbase - 1), qbase - 1)
2608 start = qbase
2609 start = qbase
2609 # if start = qbase, the cache is as updated as it should be.
2610 # if start = qbase, the cache is as updated as it should be.
2610 # if start > qbase, the cache includes (part of) the patches.
2611 # if start > qbase, the cache includes (part of) the patches.
2611 # we might as well use it, but we won't save it.
2612 # we might as well use it, but we won't save it.
2612
2613
2613 # update the cache up to the tip
2614 # update the cache up to the tip
2614 self._updatebranchcache(partial, start, len(cl))
2615 ctxgen = (self[r] for r in xrange(start, len(cl)))
2616 self._updatebranchcache(partial, ctxgen)
2615
2617
2616 return partial
2618 return partial
2617
2619
2618 if repo.local():
2620 if repo.local():
2619 repo.__class__ = mqrepo
2621 repo.__class__ = mqrepo
2620
2622
2621 def mqimport(orig, ui, repo, *args, **kwargs):
2623 def mqimport(orig, ui, repo, *args, **kwargs):
2622 if (hasattr(repo, 'abort_if_wdir_patched')
2624 if (hasattr(repo, 'abort_if_wdir_patched')
2623 and not kwargs.get('no_commit', False)):
2625 and not kwargs.get('no_commit', False)):
2624 repo.abort_if_wdir_patched(_('cannot import over an applied patch'),
2626 repo.abort_if_wdir_patched(_('cannot import over an applied patch'),
2625 kwargs.get('force'))
2627 kwargs.get('force'))
2626 return orig(ui, repo, *args, **kwargs)
2628 return orig(ui, repo, *args, **kwargs)
2627
2629
2628 def mqinit(orig, ui, *args, **kwargs):
2630 def mqinit(orig, ui, *args, **kwargs):
2629 mq = kwargs.pop('mq', None)
2631 mq = kwargs.pop('mq', None)
2630
2632
2631 if not mq:
2633 if not mq:
2632 return orig(ui, *args, **kwargs)
2634 return orig(ui, *args, **kwargs)
2633
2635
2634 if args:
2636 if args:
2635 repopath = args[0]
2637 repopath = args[0]
2636 if not hg.islocal(repopath):
2638 if not hg.islocal(repopath):
2637 raise util.Abort(_('only a local queue repository '
2639 raise util.Abort(_('only a local queue repository '
2638 'may be initialized'))
2640 'may be initialized'))
2639 else:
2641 else:
2640 repopath = cmdutil.findrepo(os.getcwd())
2642 repopath = cmdutil.findrepo(os.getcwd())
2641 if not repopath:
2643 if not repopath:
2642 raise util.Abort(_('There is no Mercurial repository here '
2644 raise util.Abort(_('There is no Mercurial repository here '
2643 '(.hg not found)'))
2645 '(.hg not found)'))
2644 repo = hg.repository(ui, repopath)
2646 repo = hg.repository(ui, repopath)
2645 return qinit(ui, repo, True)
2647 return qinit(ui, repo, True)
2646
2648
2647 def mqcommand(orig, ui, repo, *args, **kwargs):
2649 def mqcommand(orig, ui, repo, *args, **kwargs):
2648 """Add --mq option to operate on patch repository instead of main"""
2650 """Add --mq option to operate on patch repository instead of main"""
2649
2651
2650 # some commands do not like getting unknown options
2652 # some commands do not like getting unknown options
2651 mq = kwargs.pop('mq', None)
2653 mq = kwargs.pop('mq', None)
2652
2654
2653 if not mq:
2655 if not mq:
2654 return orig(ui, repo, *args, **kwargs)
2656 return orig(ui, repo, *args, **kwargs)
2655
2657
2656 q = repo.mq
2658 q = repo.mq
2657 r = q.qrepo()
2659 r = q.qrepo()
2658 if not r:
2660 if not r:
2659 raise util.Abort('no queue repository')
2661 raise util.Abort('no queue repository')
2660 return orig(r.ui, r, *args, **kwargs)
2662 return orig(r.ui, r, *args, **kwargs)
2661
2663
2662 def uisetup(ui):
2664 def uisetup(ui):
2663 mqopt = [('', 'mq', None, _("operate on patch repository"))]
2665 mqopt = [('', 'mq', None, _("operate on patch repository"))]
2664
2666
2665 extensions.wrapcommand(commands.table, 'import', mqimport)
2667 extensions.wrapcommand(commands.table, 'import', mqimport)
2666
2668
2667 entry = extensions.wrapcommand(commands.table, 'init', mqinit)
2669 entry = extensions.wrapcommand(commands.table, 'init', mqinit)
2668 entry[1].extend(mqopt)
2670 entry[1].extend(mqopt)
2669
2671
2670 for cmd in commands.table.keys():
2672 for cmd in commands.table.keys():
2671 cmd = cmdutil.parsealiases(cmd)[0]
2673 cmd = cmdutil.parsealiases(cmd)[0]
2672 if cmd in commands.norepo:
2674 if cmd in commands.norepo:
2673 continue
2675 continue
2674 entry = extensions.wrapcommand(commands.table, cmd, mqcommand)
2676 entry = extensions.wrapcommand(commands.table, cmd, mqcommand)
2675 entry[1].extend(mqopt)
2677 entry[1].extend(mqopt)
2676
2678
2677 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2679 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2678
2680
2679 cmdtable = {
2681 cmdtable = {
2680 "qapplied":
2682 "qapplied":
2681 (applied,
2683 (applied,
2682 [('1', 'last', None, _('show only the last patch'))] + seriesopts,
2684 [('1', 'last', None, _('show only the last patch'))] + seriesopts,
2683 _('hg qapplied [-1] [-s] [PATCH]')),
2685 _('hg qapplied [-1] [-s] [PATCH]')),
2684 "qclone":
2686 "qclone":
2685 (clone,
2687 (clone,
2686 [('', 'pull', None, _('use pull protocol to copy metadata')),
2688 [('', 'pull', None, _('use pull protocol to copy metadata')),
2687 ('U', 'noupdate', None, _('do not update the new working directories')),
2689 ('U', 'noupdate', None, _('do not update the new working directories')),
2688 ('', 'uncompressed', None,
2690 ('', 'uncompressed', None,
2689 _('use uncompressed transfer (fast over LAN)')),
2691 _('use uncompressed transfer (fast over LAN)')),
2690 ('p', 'patches', '', _('location of source patch repository')),
2692 ('p', 'patches', '', _('location of source patch repository')),
2691 ] + commands.remoteopts,
2693 ] + commands.remoteopts,
2692 _('hg qclone [OPTION]... SOURCE [DEST]')),
2694 _('hg qclone [OPTION]... SOURCE [DEST]')),
2693 "qcommit|qci":
2695 "qcommit|qci":
2694 (commit,
2696 (commit,
2695 commands.table["^commit|ci"][1],
2697 commands.table["^commit|ci"][1],
2696 _('hg qcommit [OPTION]... [FILE]...')),
2698 _('hg qcommit [OPTION]... [FILE]...')),
2697 "^qdiff":
2699 "^qdiff":
2698 (diff,
2700 (diff,
2699 commands.diffopts + commands.diffopts2 + commands.walkopts,
2701 commands.diffopts + commands.diffopts2 + commands.walkopts,
2700 _('hg qdiff [OPTION]... [FILE]...')),
2702 _('hg qdiff [OPTION]... [FILE]...')),
2701 "qdelete|qremove|qrm":
2703 "qdelete|qremove|qrm":
2702 (delete,
2704 (delete,
2703 [('k', 'keep', None, _('keep patch file')),
2705 [('k', 'keep', None, _('keep patch file')),
2704 ('r', 'rev', [], _('stop managing a revision (DEPRECATED)'))],
2706 ('r', 'rev', [], _('stop managing a revision (DEPRECATED)'))],
2705 _('hg qdelete [-k] [-r REV]... [PATCH]...')),
2707 _('hg qdelete [-k] [-r REV]... [PATCH]...')),
2706 'qfold':
2708 'qfold':
2707 (fold,
2709 (fold,
2708 [('e', 'edit', None, _('edit patch header')),
2710 [('e', 'edit', None, _('edit patch header')),
2709 ('k', 'keep', None, _('keep folded patch files')),
2711 ('k', 'keep', None, _('keep folded patch files')),
2710 ] + commands.commitopts,
2712 ] + commands.commitopts,
2711 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')),
2713 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')),
2712 'qgoto':
2714 'qgoto':
2713 (goto,
2715 (goto,
2714 [('f', 'force', None, _('overwrite any local changes'))],
2716 [('f', 'force', None, _('overwrite any local changes'))],
2715 _('hg qgoto [OPTION]... PATCH')),
2717 _('hg qgoto [OPTION]... PATCH')),
2716 'qguard':
2718 'qguard':
2717 (guard,
2719 (guard,
2718 [('l', 'list', None, _('list all patches and guards')),
2720 [('l', 'list', None, _('list all patches and guards')),
2719 ('n', 'none', None, _('drop all guards'))],
2721 ('n', 'none', None, _('drop all guards'))],
2720 _('hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]')),
2722 _('hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]')),
2721 'qheader': (header, [], _('hg qheader [PATCH]')),
2723 'qheader': (header, [], _('hg qheader [PATCH]')),
2722 "^qimport":
2724 "^qimport":
2723 (qimport,
2725 (qimport,
2724 [('e', 'existing', None, _('import file in patch directory')),
2726 [('e', 'existing', None, _('import file in patch directory')),
2725 ('n', 'name', '', _('name of patch file')),
2727 ('n', 'name', '', _('name of patch file')),
2726 ('f', 'force', None, _('overwrite existing files')),
2728 ('f', 'force', None, _('overwrite existing files')),
2727 ('r', 'rev', [], _('place existing revisions under mq control')),
2729 ('r', 'rev', [], _('place existing revisions under mq control')),
2728 ('g', 'git', None, _('use git extended diff format')),
2730 ('g', 'git', None, _('use git extended diff format')),
2729 ('P', 'push', None, _('qpush after importing'))],
2731 ('P', 'push', None, _('qpush after importing'))],
2730 _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... FILE...')),
2732 _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... FILE...')),
2731 "^qinit":
2733 "^qinit":
2732 (init,
2734 (init,
2733 [('c', 'create-repo', None, _('create queue repository'))],
2735 [('c', 'create-repo', None, _('create queue repository'))],
2734 _('hg qinit [-c]')),
2736 _('hg qinit [-c]')),
2735 "qnew":
2737 "qnew":
2736 (new,
2738 (new,
2737 [('e', 'edit', None, _('edit commit message')),
2739 [('e', 'edit', None, _('edit commit message')),
2738 ('f', 'force', None, _('import uncommitted changes (DEPRECATED)')),
2740 ('f', 'force', None, _('import uncommitted changes (DEPRECATED)')),
2739 ('g', 'git', None, _('use git extended diff format')),
2741 ('g', 'git', None, _('use git extended diff format')),
2740 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2742 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2741 ('u', 'user', '', _('add "From: <given user>" to patch')),
2743 ('u', 'user', '', _('add "From: <given user>" to patch')),
2742 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2744 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2743 ('d', 'date', '', _('add "Date: <given date>" to patch'))
2745 ('d', 'date', '', _('add "Date: <given date>" to patch'))
2744 ] + commands.walkopts + commands.commitopts,
2746 ] + commands.walkopts + commands.commitopts,
2745 _('hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH [FILE]...')),
2747 _('hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH [FILE]...')),
2746 "qnext": (next, [] + seriesopts, _('hg qnext [-s]')),
2748 "qnext": (next, [] + seriesopts, _('hg qnext [-s]')),
2747 "qprev": (prev, [] + seriesopts, _('hg qprev [-s]')),
2749 "qprev": (prev, [] + seriesopts, _('hg qprev [-s]')),
2748 "^qpop":
2750 "^qpop":
2749 (pop,
2751 (pop,
2750 [('a', 'all', None, _('pop all patches')),
2752 [('a', 'all', None, _('pop all patches')),
2751 ('n', 'name', '', _('queue name to pop (DEPRECATED)')),
2753 ('n', 'name', '', _('queue name to pop (DEPRECATED)')),
2752 ('f', 'force', None, _('forget any local changes to patched files'))],
2754 ('f', 'force', None, _('forget any local changes to patched files'))],
2753 _('hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]')),
2755 _('hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]')),
2754 "^qpush":
2756 "^qpush":
2755 (push,
2757 (push,
2756 [('f', 'force', None, _('apply if the patch has rejects')),
2758 [('f', 'force', None, _('apply if the patch has rejects')),
2757 ('l', 'list', None, _('list patch name in commit text')),
2759 ('l', 'list', None, _('list patch name in commit text')),
2758 ('a', 'all', None, _('apply all patches')),
2760 ('a', 'all', None, _('apply all patches')),
2759 ('m', 'merge', None, _('merge from another queue (DEPRECATED)')),
2761 ('m', 'merge', None, _('merge from another queue (DEPRECATED)')),
2760 ('n', 'name', '', _('merge queue name (DEPRECATED)'))],
2762 ('n', 'name', '', _('merge queue name (DEPRECATED)'))],
2761 _('hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]')),
2763 _('hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]')),
2762 "^qrefresh":
2764 "^qrefresh":
2763 (refresh,
2765 (refresh,
2764 [('e', 'edit', None, _('edit commit message')),
2766 [('e', 'edit', None, _('edit commit message')),
2765 ('g', 'git', None, _('use git extended diff format')),
2767 ('g', 'git', None, _('use git extended diff format')),
2766 ('s', 'short', None,
2768 ('s', 'short', None,
2767 _('refresh only files already in the patch and specified files')),
2769 _('refresh only files already in the patch and specified files')),
2768 ('U', 'currentuser', None,
2770 ('U', 'currentuser', None,
2769 _('add/update author field in patch with current user')),
2771 _('add/update author field in patch with current user')),
2770 ('u', 'user', '',
2772 ('u', 'user', '',
2771 _('add/update author field in patch with given user')),
2773 _('add/update author field in patch with given user')),
2772 ('D', 'currentdate', None,
2774 ('D', 'currentdate', None,
2773 _('add/update date field in patch with current date')),
2775 _('add/update date field in patch with current date')),
2774 ('d', 'date', '',
2776 ('d', 'date', '',
2775 _('add/update date field in patch with given date'))
2777 _('add/update date field in patch with given date'))
2776 ] + commands.walkopts + commands.commitopts,
2778 ] + commands.walkopts + commands.commitopts,
2777 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')),
2779 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')),
2778 'qrename|qmv':
2780 'qrename|qmv':
2779 (rename, [], _('hg qrename PATCH1 [PATCH2]')),
2781 (rename, [], _('hg qrename PATCH1 [PATCH2]')),
2780 "qrestore":
2782 "qrestore":
2781 (restore,
2783 (restore,
2782 [('d', 'delete', None, _('delete save entry')),
2784 [('d', 'delete', None, _('delete save entry')),
2783 ('u', 'update', None, _('update queue working directory'))],
2785 ('u', 'update', None, _('update queue working directory'))],
2784 _('hg qrestore [-d] [-u] REV')),
2786 _('hg qrestore [-d] [-u] REV')),
2785 "qsave":
2787 "qsave":
2786 (save,
2788 (save,
2787 [('c', 'copy', None, _('copy patch directory')),
2789 [('c', 'copy', None, _('copy patch directory')),
2788 ('n', 'name', '', _('copy directory name')),
2790 ('n', 'name', '', _('copy directory name')),
2789 ('e', 'empty', None, _('clear queue status file')),
2791 ('e', 'empty', None, _('clear queue status file')),
2790 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2792 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2791 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')),
2793 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')),
2792 "qselect":
2794 "qselect":
2793 (select,
2795 (select,
2794 [('n', 'none', None, _('disable all guards')),
2796 [('n', 'none', None, _('disable all guards')),
2795 ('s', 'series', None, _('list all guards in series file')),
2797 ('s', 'series', None, _('list all guards in series file')),
2796 ('', 'pop', None, _('pop to before first guarded applied patch')),
2798 ('', 'pop', None, _('pop to before first guarded applied patch')),
2797 ('', 'reapply', None, _('pop, then reapply patches'))],
2799 ('', 'reapply', None, _('pop, then reapply patches'))],
2798 _('hg qselect [OPTION]... [GUARD]...')),
2800 _('hg qselect [OPTION]... [GUARD]...')),
2799 "qseries":
2801 "qseries":
2800 (series,
2802 (series,
2801 [('m', 'missing', None, _('print patches not in series')),
2803 [('m', 'missing', None, _('print patches not in series')),
2802 ] + seriesopts,
2804 ] + seriesopts,
2803 _('hg qseries [-ms]')),
2805 _('hg qseries [-ms]')),
2804 "^strip":
2806 "^strip":
2805 (strip,
2807 (strip,
2806 [('f', 'force', None, _('force removal with local changes')),
2808 [('f', 'force', None, _('force removal with local changes')),
2807 ('b', 'backup', None, _('bundle unrelated changesets')),
2809 ('b', 'backup', None, _('bundle unrelated changesets')),
2808 ('n', 'nobackup', None, _('no backups'))],
2810 ('n', 'nobackup', None, _('no backups'))],
2809 _('hg strip [-f] [-b] [-n] REV')),
2811 _('hg strip [-f] [-b] [-n] REV')),
2810 "qtop": (top, [] + seriesopts, _('hg qtop [-s]')),
2812 "qtop": (top, [] + seriesopts, _('hg qtop [-s]')),
2811 "qunapplied":
2813 "qunapplied":
2812 (unapplied,
2814 (unapplied,
2813 [('1', 'first', None, _('show only the first patch'))] + seriesopts,
2815 [('1', 'first', None, _('show only the first patch'))] + seriesopts,
2814 _('hg qunapplied [-1] [-s] [PATCH]')),
2816 _('hg qunapplied [-1] [-s] [PATCH]')),
2815 "qfinish":
2817 "qfinish":
2816 (finish,
2818 (finish,
2817 [('a', 'applied', None, _('finish all applied changesets'))],
2819 [('a', 'applied', None, _('finish all applied changesets'))],
2818 _('hg qfinish [-a] [REV]...')),
2820 _('hg qfinish [-a] [REV]...')),
2819 }
2821 }
@@ -1,2224 +1,2224 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo
10 import repo, changegroup, subrepo
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16 import tags as tags_
16 import tags as tags_
17 from lock import release
17 from lock import release
18 import weakref, stat, errno, os, time, inspect
18 import weakref, stat, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20
20
21 class localrepository(repo.repository):
21 class localrepository(repo.repository):
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
23 supported = set('revlogv1 store fncache shared'.split())
23 supported = set('revlogv1 store fncache shared'.split())
24
24
25 def __init__(self, baseui, path=None, create=0):
25 def __init__(self, baseui, path=None, create=0):
26 repo.repository.__init__(self)
26 repo.repository.__init__(self)
27 self.root = os.path.realpath(path)
27 self.root = os.path.realpath(path)
28 self.path = os.path.join(self.root, ".hg")
28 self.path = os.path.join(self.root, ".hg")
29 self.origroot = path
29 self.origroot = path
30 self.opener = util.opener(self.path)
30 self.opener = util.opener(self.path)
31 self.wopener = util.opener(self.root)
31 self.wopener = util.opener(self.root)
32 self.baseui = baseui
32 self.baseui = baseui
33 self.ui = baseui.copy()
33 self.ui = baseui.copy()
34
34
35 try:
35 try:
36 self.ui.readconfig(self.join("hgrc"), self.root)
36 self.ui.readconfig(self.join("hgrc"), self.root)
37 extensions.loadall(self.ui)
37 extensions.loadall(self.ui)
38 except IOError:
38 except IOError:
39 pass
39 pass
40
40
41 if not os.path.isdir(self.path):
41 if not os.path.isdir(self.path):
42 if create:
42 if create:
43 if not os.path.exists(path):
43 if not os.path.exists(path):
44 os.mkdir(path)
44 os.mkdir(path)
45 os.mkdir(self.path)
45 os.mkdir(self.path)
46 requirements = ["revlogv1"]
46 requirements = ["revlogv1"]
47 if self.ui.configbool('format', 'usestore', True):
47 if self.ui.configbool('format', 'usestore', True):
48 os.mkdir(os.path.join(self.path, "store"))
48 os.mkdir(os.path.join(self.path, "store"))
49 requirements.append("store")
49 requirements.append("store")
50 if self.ui.configbool('format', 'usefncache', True):
50 if self.ui.configbool('format', 'usefncache', True):
51 requirements.append("fncache")
51 requirements.append("fncache")
52 # create an invalid changelog
52 # create an invalid changelog
53 self.opener("00changelog.i", "a").write(
53 self.opener("00changelog.i", "a").write(
54 '\0\0\0\2' # represents revlogv2
54 '\0\0\0\2' # represents revlogv2
55 ' dummy changelog to prevent using the old repo layout'
55 ' dummy changelog to prevent using the old repo layout'
56 )
56 )
57 reqfile = self.opener("requires", "w")
57 reqfile = self.opener("requires", "w")
58 for r in requirements:
58 for r in requirements:
59 reqfile.write("%s\n" % r)
59 reqfile.write("%s\n" % r)
60 reqfile.close()
60 reqfile.close()
61 else:
61 else:
62 raise error.RepoError(_("repository %s not found") % path)
62 raise error.RepoError(_("repository %s not found") % path)
63 elif create:
63 elif create:
64 raise error.RepoError(_("repository %s already exists") % path)
64 raise error.RepoError(_("repository %s already exists") % path)
65 else:
65 else:
66 # find requirements
66 # find requirements
67 requirements = set()
67 requirements = set()
68 try:
68 try:
69 requirements = set(self.opener("requires").read().splitlines())
69 requirements = set(self.opener("requires").read().splitlines())
70 except IOError, inst:
70 except IOError, inst:
71 if inst.errno != errno.ENOENT:
71 if inst.errno != errno.ENOENT:
72 raise
72 raise
73 for r in requirements - self.supported:
73 for r in requirements - self.supported:
74 raise error.RepoError(_("requirement '%s' not supported") % r)
74 raise error.RepoError(_("requirement '%s' not supported") % r)
75
75
76 self.sharedpath = self.path
76 self.sharedpath = self.path
77 try:
77 try:
78 s = os.path.realpath(self.opener("sharedpath").read())
78 s = os.path.realpath(self.opener("sharedpath").read())
79 if not os.path.exists(s):
79 if not os.path.exists(s):
80 raise error.RepoError(
80 raise error.RepoError(
81 _('.hg/sharedpath points to nonexistent directory %s') % s)
81 _('.hg/sharedpath points to nonexistent directory %s') % s)
82 self.sharedpath = s
82 self.sharedpath = s
83 except IOError, inst:
83 except IOError, inst:
84 if inst.errno != errno.ENOENT:
84 if inst.errno != errno.ENOENT:
85 raise
85 raise
86
86
87 self.store = store.store(requirements, self.sharedpath, util.opener)
87 self.store = store.store(requirements, self.sharedpath, util.opener)
88 self.spath = self.store.path
88 self.spath = self.store.path
89 self.sopener = self.store.opener
89 self.sopener = self.store.opener
90 self.sjoin = self.store.join
90 self.sjoin = self.store.join
91 self.opener.createmode = self.store.createmode
91 self.opener.createmode = self.store.createmode
92 self.sopener.options = {}
92 self.sopener.options = {}
93
93
94 # These two define the set of tags for this repository. _tags
94 # These two define the set of tags for this repository. _tags
95 # maps tag name to node; _tagtypes maps tag name to 'global' or
95 # maps tag name to node; _tagtypes maps tag name to 'global' or
96 # 'local'. (Global tags are defined by .hgtags across all
96 # 'local'. (Global tags are defined by .hgtags across all
97 # heads, and local tags are defined in .hg/localtags.) They
97 # heads, and local tags are defined in .hg/localtags.) They
98 # constitute the in-memory cache of tags.
98 # constitute the in-memory cache of tags.
99 self._tags = None
99 self._tags = None
100 self._tagtypes = None
100 self._tagtypes = None
101
101
102 self._branchcache = None # in UTF-8
102 self._branchcache = None # in UTF-8
103 self._branchcachetip = None
103 self._branchcachetip = None
104 self.nodetagscache = None
104 self.nodetagscache = None
105 self.filterpats = {}
105 self.filterpats = {}
106 self._datafilters = {}
106 self._datafilters = {}
107 self._transref = self._lockref = self._wlockref = None
107 self._transref = self._lockref = self._wlockref = None
108
108
109 @propertycache
109 @propertycache
110 def changelog(self):
110 def changelog(self):
111 c = changelog.changelog(self.sopener)
111 c = changelog.changelog(self.sopener)
112 if 'HG_PENDING' in os.environ:
112 if 'HG_PENDING' in os.environ:
113 p = os.environ['HG_PENDING']
113 p = os.environ['HG_PENDING']
114 if p.startswith(self.root):
114 if p.startswith(self.root):
115 c.readpending('00changelog.i.a')
115 c.readpending('00changelog.i.a')
116 self.sopener.options['defversion'] = c.version
116 self.sopener.options['defversion'] = c.version
117 return c
117 return c
118
118
119 @propertycache
119 @propertycache
120 def manifest(self):
120 def manifest(self):
121 return manifest.manifest(self.sopener)
121 return manifest.manifest(self.sopener)
122
122
123 @propertycache
123 @propertycache
124 def dirstate(self):
124 def dirstate(self):
125 return dirstate.dirstate(self.opener, self.ui, self.root)
125 return dirstate.dirstate(self.opener, self.ui, self.root)
126
126
127 def __getitem__(self, changeid):
127 def __getitem__(self, changeid):
128 if changeid is None:
128 if changeid is None:
129 return context.workingctx(self)
129 return context.workingctx(self)
130 return context.changectx(self, changeid)
130 return context.changectx(self, changeid)
131
131
132 def __contains__(self, changeid):
132 def __contains__(self, changeid):
133 try:
133 try:
134 return bool(self.lookup(changeid))
134 return bool(self.lookup(changeid))
135 except error.RepoLookupError:
135 except error.RepoLookupError:
136 return False
136 return False
137
137
138 def __nonzero__(self):
138 def __nonzero__(self):
139 return True
139 return True
140
140
141 def __len__(self):
141 def __len__(self):
142 return len(self.changelog)
142 return len(self.changelog)
143
143
144 def __iter__(self):
144 def __iter__(self):
145 for i in xrange(len(self)):
145 for i in xrange(len(self)):
146 yield i
146 yield i
147
147
148 def url(self):
148 def url(self):
149 return 'file:' + self.root
149 return 'file:' + self.root
150
150
151 def hook(self, name, throw=False, **args):
151 def hook(self, name, throw=False, **args):
152 return hook.hook(self.ui, self, name, throw, **args)
152 return hook.hook(self.ui, self, name, throw, **args)
153
153
154 tag_disallowed = ':\r\n'
154 tag_disallowed = ':\r\n'
155
155
156 def _tag(self, names, node, message, local, user, date, extra={}):
156 def _tag(self, names, node, message, local, user, date, extra={}):
157 if isinstance(names, str):
157 if isinstance(names, str):
158 allchars = names
158 allchars = names
159 names = (names,)
159 names = (names,)
160 else:
160 else:
161 allchars = ''.join(names)
161 allchars = ''.join(names)
162 for c in self.tag_disallowed:
162 for c in self.tag_disallowed:
163 if c in allchars:
163 if c in allchars:
164 raise util.Abort(_('%r cannot be used in a tag name') % c)
164 raise util.Abort(_('%r cannot be used in a tag name') % c)
165
165
166 for name in names:
166 for name in names:
167 self.hook('pretag', throw=True, node=hex(node), tag=name,
167 self.hook('pretag', throw=True, node=hex(node), tag=name,
168 local=local)
168 local=local)
169
169
170 def writetags(fp, names, munge, prevtags):
170 def writetags(fp, names, munge, prevtags):
171 fp.seek(0, 2)
171 fp.seek(0, 2)
172 if prevtags and prevtags[-1] != '\n':
172 if prevtags and prevtags[-1] != '\n':
173 fp.write('\n')
173 fp.write('\n')
174 for name in names:
174 for name in names:
175 m = munge and munge(name) or name
175 m = munge and munge(name) or name
176 if self._tagtypes and name in self._tagtypes:
176 if self._tagtypes and name in self._tagtypes:
177 old = self._tags.get(name, nullid)
177 old = self._tags.get(name, nullid)
178 fp.write('%s %s\n' % (hex(old), m))
178 fp.write('%s %s\n' % (hex(old), m))
179 fp.write('%s %s\n' % (hex(node), m))
179 fp.write('%s %s\n' % (hex(node), m))
180 fp.close()
180 fp.close()
181
181
182 prevtags = ''
182 prevtags = ''
183 if local:
183 if local:
184 try:
184 try:
185 fp = self.opener('localtags', 'r+')
185 fp = self.opener('localtags', 'r+')
186 except IOError:
186 except IOError:
187 fp = self.opener('localtags', 'a')
187 fp = self.opener('localtags', 'a')
188 else:
188 else:
189 prevtags = fp.read()
189 prevtags = fp.read()
190
190
191 # local tags are stored in the current charset
191 # local tags are stored in the current charset
192 writetags(fp, names, None, prevtags)
192 writetags(fp, names, None, prevtags)
193 for name in names:
193 for name in names:
194 self.hook('tag', node=hex(node), tag=name, local=local)
194 self.hook('tag', node=hex(node), tag=name, local=local)
195 return
195 return
196
196
197 try:
197 try:
198 fp = self.wfile('.hgtags', 'rb+')
198 fp = self.wfile('.hgtags', 'rb+')
199 except IOError:
199 except IOError:
200 fp = self.wfile('.hgtags', 'ab')
200 fp = self.wfile('.hgtags', 'ab')
201 else:
201 else:
202 prevtags = fp.read()
202 prevtags = fp.read()
203
203
204 # committed tags are stored in UTF-8
204 # committed tags are stored in UTF-8
205 writetags(fp, names, encoding.fromlocal, prevtags)
205 writetags(fp, names, encoding.fromlocal, prevtags)
206
206
207 if '.hgtags' not in self.dirstate:
207 if '.hgtags' not in self.dirstate:
208 self.add(['.hgtags'])
208 self.add(['.hgtags'])
209
209
210 m = match_.exact(self.root, '', ['.hgtags'])
210 m = match_.exact(self.root, '', ['.hgtags'])
211 tagnode = self.commit(message, user, date, extra=extra, match=m)
211 tagnode = self.commit(message, user, date, extra=extra, match=m)
212
212
213 for name in names:
213 for name in names:
214 self.hook('tag', node=hex(node), tag=name, local=local)
214 self.hook('tag', node=hex(node), tag=name, local=local)
215
215
216 return tagnode
216 return tagnode
217
217
218 def tag(self, names, node, message, local, user, date):
218 def tag(self, names, node, message, local, user, date):
219 '''tag a revision with one or more symbolic names.
219 '''tag a revision with one or more symbolic names.
220
220
221 names is a list of strings or, when adding a single tag, names may be a
221 names is a list of strings or, when adding a single tag, names may be a
222 string.
222 string.
223
223
224 if local is True, the tags are stored in a per-repository file.
224 if local is True, the tags are stored in a per-repository file.
225 otherwise, they are stored in the .hgtags file, and a new
225 otherwise, they are stored in the .hgtags file, and a new
226 changeset is committed with the change.
226 changeset is committed with the change.
227
227
228 keyword arguments:
228 keyword arguments:
229
229
230 local: whether to store tags in non-version-controlled file
230 local: whether to store tags in non-version-controlled file
231 (default False)
231 (default False)
232
232
233 message: commit message to use if committing
233 message: commit message to use if committing
234
234
235 user: name of user to use if committing
235 user: name of user to use if committing
236
236
237 date: date tuple to use if committing'''
237 date: date tuple to use if committing'''
238
238
239 for x in self.status()[:5]:
239 for x in self.status()[:5]:
240 if '.hgtags' in x:
240 if '.hgtags' in x:
241 raise util.Abort(_('working copy of .hgtags is changed '
241 raise util.Abort(_('working copy of .hgtags is changed '
242 '(please commit .hgtags manually)'))
242 '(please commit .hgtags manually)'))
243
243
244 self.tags() # instantiate the cache
244 self.tags() # instantiate the cache
245 self._tag(names, node, message, local, user, date)
245 self._tag(names, node, message, local, user, date)
246
246
247 def tags(self):
247 def tags(self):
248 '''return a mapping of tag to node'''
248 '''return a mapping of tag to node'''
249 if self._tags is None:
249 if self._tags is None:
250 (self._tags, self._tagtypes) = self._findtags()
250 (self._tags, self._tagtypes) = self._findtags()
251
251
252 return self._tags
252 return self._tags
253
253
254 def _findtags(self):
254 def _findtags(self):
255 '''Do the hard work of finding tags. Return a pair of dicts
255 '''Do the hard work of finding tags. Return a pair of dicts
256 (tags, tagtypes) where tags maps tag name to node, and tagtypes
256 (tags, tagtypes) where tags maps tag name to node, and tagtypes
257 maps tag name to a string like \'global\' or \'local\'.
257 maps tag name to a string like \'global\' or \'local\'.
258 Subclasses or extensions are free to add their own tags, but
258 Subclasses or extensions are free to add their own tags, but
259 should be aware that the returned dicts will be retained for the
259 should be aware that the returned dicts will be retained for the
260 duration of the localrepo object.'''
260 duration of the localrepo object.'''
261
261
262 # XXX what tagtype should subclasses/extensions use? Currently
262 # XXX what tagtype should subclasses/extensions use? Currently
263 # mq and bookmarks add tags, but do not set the tagtype at all.
263 # mq and bookmarks add tags, but do not set the tagtype at all.
264 # Should each extension invent its own tag type? Should there
264 # Should each extension invent its own tag type? Should there
265 # be one tagtype for all such "virtual" tags? Or is the status
265 # be one tagtype for all such "virtual" tags? Or is the status
266 # quo fine?
266 # quo fine?
267
267
268 alltags = {} # map tag name to (node, hist)
268 alltags = {} # map tag name to (node, hist)
269 tagtypes = {}
269 tagtypes = {}
270
270
271 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
271 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
272 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
272 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
273
273
274 # Build the return dicts. Have to re-encode tag names because
274 # Build the return dicts. Have to re-encode tag names because
275 # the tags module always uses UTF-8 (in order not to lose info
275 # the tags module always uses UTF-8 (in order not to lose info
276 # writing to the cache), but the rest of Mercurial wants them in
276 # writing to the cache), but the rest of Mercurial wants them in
277 # local encoding.
277 # local encoding.
278 tags = {}
278 tags = {}
279 for (name, (node, hist)) in alltags.iteritems():
279 for (name, (node, hist)) in alltags.iteritems():
280 if node != nullid:
280 if node != nullid:
281 tags[encoding.tolocal(name)] = node
281 tags[encoding.tolocal(name)] = node
282 tags['tip'] = self.changelog.tip()
282 tags['tip'] = self.changelog.tip()
283 tagtypes = dict([(encoding.tolocal(name), value)
283 tagtypes = dict([(encoding.tolocal(name), value)
284 for (name, value) in tagtypes.iteritems()])
284 for (name, value) in tagtypes.iteritems()])
285 return (tags, tagtypes)
285 return (tags, tagtypes)
286
286
287 def tagtype(self, tagname):
287 def tagtype(self, tagname):
288 '''
288 '''
289 return the type of the given tag. result can be:
289 return the type of the given tag. result can be:
290
290
291 'local' : a local tag
291 'local' : a local tag
292 'global' : a global tag
292 'global' : a global tag
293 None : tag does not exist
293 None : tag does not exist
294 '''
294 '''
295
295
296 self.tags()
296 self.tags()
297
297
298 return self._tagtypes.get(tagname)
298 return self._tagtypes.get(tagname)
299
299
300 def tagslist(self):
300 def tagslist(self):
301 '''return a list of tags ordered by revision'''
301 '''return a list of tags ordered by revision'''
302 l = []
302 l = []
303 for t, n in self.tags().iteritems():
303 for t, n in self.tags().iteritems():
304 try:
304 try:
305 r = self.changelog.rev(n)
305 r = self.changelog.rev(n)
306 except:
306 except:
307 r = -2 # sort to the beginning of the list if unknown
307 r = -2 # sort to the beginning of the list if unknown
308 l.append((r, t, n))
308 l.append((r, t, n))
309 return [(t, n) for r, t, n in sorted(l)]
309 return [(t, n) for r, t, n in sorted(l)]
310
310
311 def nodetags(self, node):
311 def nodetags(self, node):
312 '''return the tags associated with a node'''
312 '''return the tags associated with a node'''
313 if not self.nodetagscache:
313 if not self.nodetagscache:
314 self.nodetagscache = {}
314 self.nodetagscache = {}
315 for t, n in self.tags().iteritems():
315 for t, n in self.tags().iteritems():
316 self.nodetagscache.setdefault(n, []).append(t)
316 self.nodetagscache.setdefault(n, []).append(t)
317 return self.nodetagscache.get(node, [])
317 return self.nodetagscache.get(node, [])
318
318
319 def _branchtags(self, partial, lrev):
319 def _branchtags(self, partial, lrev):
320 # TODO: rename this function?
320 # TODO: rename this function?
321 tiprev = len(self) - 1
321 tiprev = len(self) - 1
322 if lrev != tiprev:
322 if lrev != tiprev:
323 self._updatebranchcache(partial, lrev + 1, tiprev + 1)
323 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
324 self._updatebranchcache(partial, ctxgen)
324 self._writebranchcache(partial, self.changelog.tip(), tiprev)
325 self._writebranchcache(partial, self.changelog.tip(), tiprev)
325
326
326 return partial
327 return partial
327
328
328 def branchmap(self):
329 def branchmap(self):
329 '''returns a dictionary {branch: [branchheads]}'''
330 '''returns a dictionary {branch: [branchheads]}'''
330 tip = self.changelog.tip()
331 tip = self.changelog.tip()
331 if self._branchcache is not None and self._branchcachetip == tip:
332 if self._branchcache is not None and self._branchcachetip == tip:
332 return self._branchcache
333 return self._branchcache
333
334
334 oldtip = self._branchcachetip
335 oldtip = self._branchcachetip
335 self._branchcachetip = tip
336 self._branchcachetip = tip
336 if oldtip is None or oldtip not in self.changelog.nodemap:
337 if oldtip is None or oldtip not in self.changelog.nodemap:
337 partial, last, lrev = self._readbranchcache()
338 partial, last, lrev = self._readbranchcache()
338 else:
339 else:
339 lrev = self.changelog.rev(oldtip)
340 lrev = self.changelog.rev(oldtip)
340 partial = self._branchcache
341 partial = self._branchcache
341
342
342 self._branchtags(partial, lrev)
343 self._branchtags(partial, lrev)
343 # this private cache holds all heads (not just tips)
344 # this private cache holds all heads (not just tips)
344 self._branchcache = partial
345 self._branchcache = partial
345
346
346 return self._branchcache
347 return self._branchcache
347
348
348 def branchtags(self):
349 def branchtags(self):
349 '''return a dict where branch names map to the tipmost head of
350 '''return a dict where branch names map to the tipmost head of
350 the branch, open heads come before closed'''
351 the branch, open heads come before closed'''
351 bt = {}
352 bt = {}
352 for bn, heads in self.branchmap().iteritems():
353 for bn, heads in self.branchmap().iteritems():
353 tip = heads[-1]
354 tip = heads[-1]
354 for h in reversed(heads):
355 for h in reversed(heads):
355 if 'close' not in self.changelog.read(h)[5]:
356 if 'close' not in self.changelog.read(h)[5]:
356 tip = h
357 tip = h
357 break
358 break
358 bt[bn] = tip
359 bt[bn] = tip
359 return bt
360 return bt
360
361
361
362
362 def _readbranchcache(self):
363 def _readbranchcache(self):
363 partial = {}
364 partial = {}
364 try:
365 try:
365 f = self.opener("branchheads.cache")
366 f = self.opener("branchheads.cache")
366 lines = f.read().split('\n')
367 lines = f.read().split('\n')
367 f.close()
368 f.close()
368 except (IOError, OSError):
369 except (IOError, OSError):
369 return {}, nullid, nullrev
370 return {}, nullid, nullrev
370
371
371 try:
372 try:
372 last, lrev = lines.pop(0).split(" ", 1)
373 last, lrev = lines.pop(0).split(" ", 1)
373 last, lrev = bin(last), int(lrev)
374 last, lrev = bin(last), int(lrev)
374 if lrev >= len(self) or self[lrev].node() != last:
375 if lrev >= len(self) or self[lrev].node() != last:
375 # invalidate the cache
376 # invalidate the cache
376 raise ValueError('invalidating branch cache (tip differs)')
377 raise ValueError('invalidating branch cache (tip differs)')
377 for l in lines:
378 for l in lines:
378 if not l:
379 if not l:
379 continue
380 continue
380 node, label = l.split(" ", 1)
381 node, label = l.split(" ", 1)
381 partial.setdefault(label.strip(), []).append(bin(node))
382 partial.setdefault(label.strip(), []).append(bin(node))
382 except KeyboardInterrupt:
383 except KeyboardInterrupt:
383 raise
384 raise
384 except Exception, inst:
385 except Exception, inst:
385 if self.ui.debugflag:
386 if self.ui.debugflag:
386 self.ui.warn(str(inst), '\n')
387 self.ui.warn(str(inst), '\n')
387 partial, last, lrev = {}, nullid, nullrev
388 partial, last, lrev = {}, nullid, nullrev
388 return partial, last, lrev
389 return partial, last, lrev
389
390
390 def _writebranchcache(self, branches, tip, tiprev):
391 def _writebranchcache(self, branches, tip, tiprev):
391 try:
392 try:
392 f = self.opener("branchheads.cache", "w", atomictemp=True)
393 f = self.opener("branchheads.cache", "w", atomictemp=True)
393 f.write("%s %s\n" % (hex(tip), tiprev))
394 f.write("%s %s\n" % (hex(tip), tiprev))
394 for label, nodes in branches.iteritems():
395 for label, nodes in branches.iteritems():
395 for node in nodes:
396 for node in nodes:
396 f.write("%s %s\n" % (hex(node), label))
397 f.write("%s %s\n" % (hex(node), label))
397 f.rename()
398 f.rename()
398 except (IOError, OSError):
399 except (IOError, OSError):
399 pass
400 pass
400
401
401 def _updatebranchcache(self, partial, start, end):
402 def _updatebranchcache(self, partial, ctxgen):
402 # collect new branch entries
403 # collect new branch entries
403 newbranches = {}
404 newbranches = {}
404 for r in xrange(start, end):
405 for c in ctxgen:
405 c = self[r]
406 newbranches.setdefault(c.branch(), []).append(c.node())
406 newbranches.setdefault(c.branch(), []).append(c.node())
407 # if older branchheads are reachable from new ones, they aren't
407 # if older branchheads are reachable from new ones, they aren't
408 # really branchheads. Note checking parents is insufficient:
408 # really branchheads. Note checking parents is insufficient:
409 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
409 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
410 for branch, newnodes in newbranches.iteritems():
410 for branch, newnodes in newbranches.iteritems():
411 bheads = partial.setdefault(branch, [])
411 bheads = partial.setdefault(branch, [])
412 bheads.extend(newnodes)
412 bheads.extend(newnodes)
413 if len(bheads) < 2:
413 if len(bheads) < 2:
414 continue
414 continue
415 newbheads = []
415 newbheads = []
416 # starting from tip means fewer passes over reachable
416 # starting from tip means fewer passes over reachable
417 while newnodes:
417 while newnodes:
418 latest = newnodes.pop()
418 latest = newnodes.pop()
419 if latest not in bheads:
419 if latest not in bheads:
420 continue
420 continue
421 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
421 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
422 reachable = self.changelog.reachable(latest, minbhrev)
422 reachable = self.changelog.reachable(latest, minbhrev)
423 bheads = [b for b in bheads if b not in reachable]
423 bheads = [b for b in bheads if b not in reachable]
424 newbheads.insert(0, latest)
424 newbheads.insert(0, latest)
425 bheads.extend(newbheads)
425 bheads.extend(newbheads)
426 partial[branch] = bheads
426 partial[branch] = bheads
427
427
428 def lookup(self, key):
428 def lookup(self, key):
429 if isinstance(key, int):
429 if isinstance(key, int):
430 return self.changelog.node(key)
430 return self.changelog.node(key)
431 elif key == '.':
431 elif key == '.':
432 return self.dirstate.parents()[0]
432 return self.dirstate.parents()[0]
433 elif key == 'null':
433 elif key == 'null':
434 return nullid
434 return nullid
435 elif key == 'tip':
435 elif key == 'tip':
436 return self.changelog.tip()
436 return self.changelog.tip()
437 n = self.changelog._match(key)
437 n = self.changelog._match(key)
438 if n:
438 if n:
439 return n
439 return n
440 if key in self.tags():
440 if key in self.tags():
441 return self.tags()[key]
441 return self.tags()[key]
442 if key in self.branchtags():
442 if key in self.branchtags():
443 return self.branchtags()[key]
443 return self.branchtags()[key]
444 n = self.changelog._partialmatch(key)
444 n = self.changelog._partialmatch(key)
445 if n:
445 if n:
446 return n
446 return n
447
447
448 # can't find key, check if it might have come from damaged dirstate
448 # can't find key, check if it might have come from damaged dirstate
449 if key in self.dirstate.parents():
449 if key in self.dirstate.parents():
450 raise error.Abort(_("working directory has unknown parent '%s'!")
450 raise error.Abort(_("working directory has unknown parent '%s'!")
451 % short(key))
451 % short(key))
452 try:
452 try:
453 if len(key) == 20:
453 if len(key) == 20:
454 key = hex(key)
454 key = hex(key)
455 except:
455 except:
456 pass
456 pass
457 raise error.RepoLookupError(_("unknown revision '%s'") % key)
457 raise error.RepoLookupError(_("unknown revision '%s'") % key)
458
458
459 def local(self):
459 def local(self):
460 return True
460 return True
461
461
462 def join(self, f):
462 def join(self, f):
463 return os.path.join(self.path, f)
463 return os.path.join(self.path, f)
464
464
465 def wjoin(self, f):
465 def wjoin(self, f):
466 return os.path.join(self.root, f)
466 return os.path.join(self.root, f)
467
467
468 def rjoin(self, f):
468 def rjoin(self, f):
469 return os.path.join(self.root, util.pconvert(f))
469 return os.path.join(self.root, util.pconvert(f))
470
470
471 def file(self, f):
471 def file(self, f):
472 if f[0] == '/':
472 if f[0] == '/':
473 f = f[1:]
473 f = f[1:]
474 return filelog.filelog(self.sopener, f)
474 return filelog.filelog(self.sopener, f)
475
475
476 def changectx(self, changeid):
476 def changectx(self, changeid):
477 return self[changeid]
477 return self[changeid]
478
478
479 def parents(self, changeid=None):
479 def parents(self, changeid=None):
480 '''get list of changectxs for parents of changeid'''
480 '''get list of changectxs for parents of changeid'''
481 return self[changeid].parents()
481 return self[changeid].parents()
482
482
483 def filectx(self, path, changeid=None, fileid=None):
483 def filectx(self, path, changeid=None, fileid=None):
484 """changeid can be a changeset revision, node, or tag.
484 """changeid can be a changeset revision, node, or tag.
485 fileid can be a file revision or node."""
485 fileid can be a file revision or node."""
486 return context.filectx(self, path, changeid, fileid)
486 return context.filectx(self, path, changeid, fileid)
487
487
488 def getcwd(self):
488 def getcwd(self):
489 return self.dirstate.getcwd()
489 return self.dirstate.getcwd()
490
490
491 def pathto(self, f, cwd=None):
491 def pathto(self, f, cwd=None):
492 return self.dirstate.pathto(f, cwd)
492 return self.dirstate.pathto(f, cwd)
493
493
494 def wfile(self, f, mode='r'):
494 def wfile(self, f, mode='r'):
495 return self.wopener(f, mode)
495 return self.wopener(f, mode)
496
496
497 def _link(self, f):
497 def _link(self, f):
498 return os.path.islink(self.wjoin(f))
498 return os.path.islink(self.wjoin(f))
499
499
500 def _filter(self, filter, filename, data):
500 def _filter(self, filter, filename, data):
501 if filter not in self.filterpats:
501 if filter not in self.filterpats:
502 l = []
502 l = []
503 for pat, cmd in self.ui.configitems(filter):
503 for pat, cmd in self.ui.configitems(filter):
504 if cmd == '!':
504 if cmd == '!':
505 continue
505 continue
506 mf = match_.match(self.root, '', [pat])
506 mf = match_.match(self.root, '', [pat])
507 fn = None
507 fn = None
508 params = cmd
508 params = cmd
509 for name, filterfn in self._datafilters.iteritems():
509 for name, filterfn in self._datafilters.iteritems():
510 if cmd.startswith(name):
510 if cmd.startswith(name):
511 fn = filterfn
511 fn = filterfn
512 params = cmd[len(name):].lstrip()
512 params = cmd[len(name):].lstrip()
513 break
513 break
514 if not fn:
514 if not fn:
515 fn = lambda s, c, **kwargs: util.filter(s, c)
515 fn = lambda s, c, **kwargs: util.filter(s, c)
516 # Wrap old filters not supporting keyword arguments
516 # Wrap old filters not supporting keyword arguments
517 if not inspect.getargspec(fn)[2]:
517 if not inspect.getargspec(fn)[2]:
518 oldfn = fn
518 oldfn = fn
519 fn = lambda s, c, **kwargs: oldfn(s, c)
519 fn = lambda s, c, **kwargs: oldfn(s, c)
520 l.append((mf, fn, params))
520 l.append((mf, fn, params))
521 self.filterpats[filter] = l
521 self.filterpats[filter] = l
522
522
523 for mf, fn, cmd in self.filterpats[filter]:
523 for mf, fn, cmd in self.filterpats[filter]:
524 if mf(filename):
524 if mf(filename):
525 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
525 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
526 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
526 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
527 break
527 break
528
528
529 return data
529 return data
530
530
531 def adddatafilter(self, name, filter):
531 def adddatafilter(self, name, filter):
532 self._datafilters[name] = filter
532 self._datafilters[name] = filter
533
533
534 def wread(self, filename):
534 def wread(self, filename):
535 if self._link(filename):
535 if self._link(filename):
536 data = os.readlink(self.wjoin(filename))
536 data = os.readlink(self.wjoin(filename))
537 else:
537 else:
538 data = self.wopener(filename, 'r').read()
538 data = self.wopener(filename, 'r').read()
539 return self._filter("encode", filename, data)
539 return self._filter("encode", filename, data)
540
540
541 def wwrite(self, filename, data, flags):
541 def wwrite(self, filename, data, flags):
542 data = self._filter("decode", filename, data)
542 data = self._filter("decode", filename, data)
543 try:
543 try:
544 os.unlink(self.wjoin(filename))
544 os.unlink(self.wjoin(filename))
545 except OSError:
545 except OSError:
546 pass
546 pass
547 if 'l' in flags:
547 if 'l' in flags:
548 self.wopener.symlink(data, filename)
548 self.wopener.symlink(data, filename)
549 else:
549 else:
550 self.wopener(filename, 'w').write(data)
550 self.wopener(filename, 'w').write(data)
551 if 'x' in flags:
551 if 'x' in flags:
552 util.set_flags(self.wjoin(filename), False, True)
552 util.set_flags(self.wjoin(filename), False, True)
553
553
554 def wwritedata(self, filename, data):
554 def wwritedata(self, filename, data):
555 return self._filter("decode", filename, data)
555 return self._filter("decode", filename, data)
556
556
557 def transaction(self):
557 def transaction(self):
558 tr = self._transref and self._transref() or None
558 tr = self._transref and self._transref() or None
559 if tr and tr.running():
559 if tr and tr.running():
560 return tr.nest()
560 return tr.nest()
561
561
562 # abort here if the journal already exists
562 # abort here if the journal already exists
563 if os.path.exists(self.sjoin("journal")):
563 if os.path.exists(self.sjoin("journal")):
564 raise error.RepoError(
564 raise error.RepoError(
565 _("abandoned transaction found - run hg recover"))
565 _("abandoned transaction found - run hg recover"))
566
566
567 # save dirstate for rollback
567 # save dirstate for rollback
568 try:
568 try:
569 ds = self.opener("dirstate").read()
569 ds = self.opener("dirstate").read()
570 except IOError:
570 except IOError:
571 ds = ""
571 ds = ""
572 self.opener("journal.dirstate", "w").write(ds)
572 self.opener("journal.dirstate", "w").write(ds)
573 self.opener("journal.branch", "w").write(self.dirstate.branch())
573 self.opener("journal.branch", "w").write(self.dirstate.branch())
574
574
575 renames = [(self.sjoin("journal"), self.sjoin("undo")),
575 renames = [(self.sjoin("journal"), self.sjoin("undo")),
576 (self.join("journal.dirstate"), self.join("undo.dirstate")),
576 (self.join("journal.dirstate"), self.join("undo.dirstate")),
577 (self.join("journal.branch"), self.join("undo.branch"))]
577 (self.join("journal.branch"), self.join("undo.branch"))]
578 tr = transaction.transaction(self.ui.warn, self.sopener,
578 tr = transaction.transaction(self.ui.warn, self.sopener,
579 self.sjoin("journal"),
579 self.sjoin("journal"),
580 aftertrans(renames),
580 aftertrans(renames),
581 self.store.createmode)
581 self.store.createmode)
582 self._transref = weakref.ref(tr)
582 self._transref = weakref.ref(tr)
583 return tr
583 return tr
584
584
585 def recover(self):
585 def recover(self):
586 lock = self.lock()
586 lock = self.lock()
587 try:
587 try:
588 if os.path.exists(self.sjoin("journal")):
588 if os.path.exists(self.sjoin("journal")):
589 self.ui.status(_("rolling back interrupted transaction\n"))
589 self.ui.status(_("rolling back interrupted transaction\n"))
590 transaction.rollback(self.sopener, self.sjoin("journal"),
590 transaction.rollback(self.sopener, self.sjoin("journal"),
591 self.ui.warn)
591 self.ui.warn)
592 self.invalidate()
592 self.invalidate()
593 return True
593 return True
594 else:
594 else:
595 self.ui.warn(_("no interrupted transaction available\n"))
595 self.ui.warn(_("no interrupted transaction available\n"))
596 return False
596 return False
597 finally:
597 finally:
598 lock.release()
598 lock.release()
599
599
600 def rollback(self):
600 def rollback(self):
601 wlock = lock = None
601 wlock = lock = None
602 try:
602 try:
603 wlock = self.wlock()
603 wlock = self.wlock()
604 lock = self.lock()
604 lock = self.lock()
605 if os.path.exists(self.sjoin("undo")):
605 if os.path.exists(self.sjoin("undo")):
606 self.ui.status(_("rolling back last transaction\n"))
606 self.ui.status(_("rolling back last transaction\n"))
607 transaction.rollback(self.sopener, self.sjoin("undo"),
607 transaction.rollback(self.sopener, self.sjoin("undo"),
608 self.ui.warn)
608 self.ui.warn)
609 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
609 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
610 try:
610 try:
611 branch = self.opener("undo.branch").read()
611 branch = self.opener("undo.branch").read()
612 self.dirstate.setbranch(branch)
612 self.dirstate.setbranch(branch)
613 except IOError:
613 except IOError:
614 self.ui.warn(_("Named branch could not be reset, "
614 self.ui.warn(_("Named branch could not be reset, "
615 "current branch still is: %s\n")
615 "current branch still is: %s\n")
616 % encoding.tolocal(self.dirstate.branch()))
616 % encoding.tolocal(self.dirstate.branch()))
617 self.invalidate()
617 self.invalidate()
618 self.dirstate.invalidate()
618 self.dirstate.invalidate()
619 self.destroyed()
619 self.destroyed()
620 else:
620 else:
621 self.ui.warn(_("no rollback information available\n"))
621 self.ui.warn(_("no rollback information available\n"))
622 finally:
622 finally:
623 release(lock, wlock)
623 release(lock, wlock)
624
624
625 def invalidatecaches(self):
625 def invalidatecaches(self):
626 self._tags = None
626 self._tags = None
627 self._tagtypes = None
627 self._tagtypes = None
628 self.nodetagscache = None
628 self.nodetagscache = None
629 self._branchcache = None # in UTF-8
629 self._branchcache = None # in UTF-8
630 self._branchcachetip = None
630 self._branchcachetip = None
631
631
632 def invalidate(self):
632 def invalidate(self):
633 for a in "changelog manifest".split():
633 for a in "changelog manifest".split():
634 if a in self.__dict__:
634 if a in self.__dict__:
635 delattr(self, a)
635 delattr(self, a)
636 self.invalidatecaches()
636 self.invalidatecaches()
637
637
638 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
638 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
639 try:
639 try:
640 l = lock.lock(lockname, 0, releasefn, desc=desc)
640 l = lock.lock(lockname, 0, releasefn, desc=desc)
641 except error.LockHeld, inst:
641 except error.LockHeld, inst:
642 if not wait:
642 if not wait:
643 raise
643 raise
644 self.ui.warn(_("waiting for lock on %s held by %r\n") %
644 self.ui.warn(_("waiting for lock on %s held by %r\n") %
645 (desc, inst.locker))
645 (desc, inst.locker))
646 # default to 600 seconds timeout
646 # default to 600 seconds timeout
647 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
647 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
648 releasefn, desc=desc)
648 releasefn, desc=desc)
649 if acquirefn:
649 if acquirefn:
650 acquirefn()
650 acquirefn()
651 return l
651 return l
652
652
653 def lock(self, wait=True):
653 def lock(self, wait=True):
654 '''Lock the repository store (.hg/store) and return a weak reference
654 '''Lock the repository store (.hg/store) and return a weak reference
655 to the lock. Use this before modifying the store (e.g. committing or
655 to the lock. Use this before modifying the store (e.g. committing or
656 stripping). If you are opening a transaction, get a lock as well.)'''
656 stripping). If you are opening a transaction, get a lock as well.)'''
657 l = self._lockref and self._lockref()
657 l = self._lockref and self._lockref()
658 if l is not None and l.held:
658 if l is not None and l.held:
659 l.lock()
659 l.lock()
660 return l
660 return l
661
661
662 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
662 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
663 _('repository %s') % self.origroot)
663 _('repository %s') % self.origroot)
664 self._lockref = weakref.ref(l)
664 self._lockref = weakref.ref(l)
665 return l
665 return l
666
666
667 def wlock(self, wait=True):
667 def wlock(self, wait=True):
668 '''Lock the non-store parts of the repository (everything under
668 '''Lock the non-store parts of the repository (everything under
669 .hg except .hg/store) and return a weak reference to the lock.
669 .hg except .hg/store) and return a weak reference to the lock.
670 Use this before modifying files in .hg.'''
670 Use this before modifying files in .hg.'''
671 l = self._wlockref and self._wlockref()
671 l = self._wlockref and self._wlockref()
672 if l is not None and l.held:
672 if l is not None and l.held:
673 l.lock()
673 l.lock()
674 return l
674 return l
675
675
676 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
676 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
677 self.dirstate.invalidate, _('working directory of %s') %
677 self.dirstate.invalidate, _('working directory of %s') %
678 self.origroot)
678 self.origroot)
679 self._wlockref = weakref.ref(l)
679 self._wlockref = weakref.ref(l)
680 return l
680 return l
681
681
682 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
682 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
683 """
683 """
684 commit an individual file as part of a larger transaction
684 commit an individual file as part of a larger transaction
685 """
685 """
686
686
687 fname = fctx.path()
687 fname = fctx.path()
688 text = fctx.data()
688 text = fctx.data()
689 flog = self.file(fname)
689 flog = self.file(fname)
690 fparent1 = manifest1.get(fname, nullid)
690 fparent1 = manifest1.get(fname, nullid)
691 fparent2 = fparent2o = manifest2.get(fname, nullid)
691 fparent2 = fparent2o = manifest2.get(fname, nullid)
692
692
693 meta = {}
693 meta = {}
694 copy = fctx.renamed()
694 copy = fctx.renamed()
695 if copy and copy[0] != fname:
695 if copy and copy[0] != fname:
696 # Mark the new revision of this file as a copy of another
696 # Mark the new revision of this file as a copy of another
697 # file. This copy data will effectively act as a parent
697 # file. This copy data will effectively act as a parent
698 # of this new revision. If this is a merge, the first
698 # of this new revision. If this is a merge, the first
699 # parent will be the nullid (meaning "look up the copy data")
699 # parent will be the nullid (meaning "look up the copy data")
700 # and the second one will be the other parent. For example:
700 # and the second one will be the other parent. For example:
701 #
701 #
702 # 0 --- 1 --- 3 rev1 changes file foo
702 # 0 --- 1 --- 3 rev1 changes file foo
703 # \ / rev2 renames foo to bar and changes it
703 # \ / rev2 renames foo to bar and changes it
704 # \- 2 -/ rev3 should have bar with all changes and
704 # \- 2 -/ rev3 should have bar with all changes and
705 # should record that bar descends from
705 # should record that bar descends from
706 # bar in rev2 and foo in rev1
706 # bar in rev2 and foo in rev1
707 #
707 #
708 # this allows this merge to succeed:
708 # this allows this merge to succeed:
709 #
709 #
710 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
710 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
711 # \ / merging rev3 and rev4 should use bar@rev2
711 # \ / merging rev3 and rev4 should use bar@rev2
712 # \- 2 --- 4 as the merge base
712 # \- 2 --- 4 as the merge base
713 #
713 #
714
714
715 cfname = copy[0]
715 cfname = copy[0]
716 crev = manifest1.get(cfname)
716 crev = manifest1.get(cfname)
717 newfparent = fparent2
717 newfparent = fparent2
718
718
719 if manifest2: # branch merge
719 if manifest2: # branch merge
720 if fparent2 == nullid or crev is None: # copied on remote side
720 if fparent2 == nullid or crev is None: # copied on remote side
721 if cfname in manifest2:
721 if cfname in manifest2:
722 crev = manifest2[cfname]
722 crev = manifest2[cfname]
723 newfparent = fparent1
723 newfparent = fparent1
724
724
725 # find source in nearest ancestor if we've lost track
725 # find source in nearest ancestor if we've lost track
726 if not crev:
726 if not crev:
727 self.ui.debug(" %s: searching for copy revision for %s\n" %
727 self.ui.debug(" %s: searching for copy revision for %s\n" %
728 (fname, cfname))
728 (fname, cfname))
729 for ancestor in self['.'].ancestors():
729 for ancestor in self['.'].ancestors():
730 if cfname in ancestor:
730 if cfname in ancestor:
731 crev = ancestor[cfname].filenode()
731 crev = ancestor[cfname].filenode()
732 break
732 break
733
733
734 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
734 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
735 meta["copy"] = cfname
735 meta["copy"] = cfname
736 meta["copyrev"] = hex(crev)
736 meta["copyrev"] = hex(crev)
737 fparent1, fparent2 = nullid, newfparent
737 fparent1, fparent2 = nullid, newfparent
738 elif fparent2 != nullid:
738 elif fparent2 != nullid:
739 # is one parent an ancestor of the other?
739 # is one parent an ancestor of the other?
740 fparentancestor = flog.ancestor(fparent1, fparent2)
740 fparentancestor = flog.ancestor(fparent1, fparent2)
741 if fparentancestor == fparent1:
741 if fparentancestor == fparent1:
742 fparent1, fparent2 = fparent2, nullid
742 fparent1, fparent2 = fparent2, nullid
743 elif fparentancestor == fparent2:
743 elif fparentancestor == fparent2:
744 fparent2 = nullid
744 fparent2 = nullid
745
745
746 # is the file changed?
746 # is the file changed?
747 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
747 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
748 changelist.append(fname)
748 changelist.append(fname)
749 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
749 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
750
750
751 # are just the flags changed during merge?
751 # are just the flags changed during merge?
752 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
752 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
753 changelist.append(fname)
753 changelist.append(fname)
754
754
755 return fparent1
755 return fparent1
756
756
757 def commit(self, text="", user=None, date=None, match=None, force=False,
757 def commit(self, text="", user=None, date=None, match=None, force=False,
758 editor=False, extra={}):
758 editor=False, extra={}):
759 """Add a new revision to current repository.
759 """Add a new revision to current repository.
760
760
761 Revision information is gathered from the working directory,
761 Revision information is gathered from the working directory,
762 match can be used to filter the committed files. If editor is
762 match can be used to filter the committed files. If editor is
763 supplied, it is called to get a commit message.
763 supplied, it is called to get a commit message.
764 """
764 """
765
765
766 def fail(f, msg):
766 def fail(f, msg):
767 raise util.Abort('%s: %s' % (f, msg))
767 raise util.Abort('%s: %s' % (f, msg))
768
768
769 if not match:
769 if not match:
770 match = match_.always(self.root, '')
770 match = match_.always(self.root, '')
771
771
772 if not force:
772 if not force:
773 vdirs = []
773 vdirs = []
774 match.dir = vdirs.append
774 match.dir = vdirs.append
775 match.bad = fail
775 match.bad = fail
776
776
777 wlock = self.wlock()
777 wlock = self.wlock()
778 try:
778 try:
779 p1, p2 = self.dirstate.parents()
779 p1, p2 = self.dirstate.parents()
780 wctx = self[None]
780 wctx = self[None]
781
781
782 if (not force and p2 != nullid and match and
782 if (not force and p2 != nullid and match and
783 (match.files() or match.anypats())):
783 (match.files() or match.anypats())):
784 raise util.Abort(_('cannot partially commit a merge '
784 raise util.Abort(_('cannot partially commit a merge '
785 '(do not specify files or patterns)'))
785 '(do not specify files or patterns)'))
786
786
787 changes = self.status(match=match, clean=force)
787 changes = self.status(match=match, clean=force)
788 if force:
788 if force:
789 changes[0].extend(changes[6]) # mq may commit unchanged files
789 changes[0].extend(changes[6]) # mq may commit unchanged files
790
790
791 # check subrepos
791 # check subrepos
792 subs = []
792 subs = []
793 removedsubs = set()
793 removedsubs = set()
794 for p in wctx.parents():
794 for p in wctx.parents():
795 removedsubs.update(s for s in p.substate if match(s))
795 removedsubs.update(s for s in p.substate if match(s))
796 for s in wctx.substate:
796 for s in wctx.substate:
797 removedsubs.discard(s)
797 removedsubs.discard(s)
798 if match(s) and wctx.sub(s).dirty():
798 if match(s) and wctx.sub(s).dirty():
799 subs.append(s)
799 subs.append(s)
800 if (subs or removedsubs) and '.hgsubstate' not in changes[0]:
800 if (subs or removedsubs) and '.hgsubstate' not in changes[0]:
801 changes[0].insert(0, '.hgsubstate')
801 changes[0].insert(0, '.hgsubstate')
802
802
803 # make sure all explicit patterns are matched
803 # make sure all explicit patterns are matched
804 if not force and match.files():
804 if not force and match.files():
805 matched = set(changes[0] + changes[1] + changes[2])
805 matched = set(changes[0] + changes[1] + changes[2])
806
806
807 for f in match.files():
807 for f in match.files():
808 if f == '.' or f in matched or f in wctx.substate:
808 if f == '.' or f in matched or f in wctx.substate:
809 continue
809 continue
810 if f in changes[3]: # missing
810 if f in changes[3]: # missing
811 fail(f, _('file not found!'))
811 fail(f, _('file not found!'))
812 if f in vdirs: # visited directory
812 if f in vdirs: # visited directory
813 d = f + '/'
813 d = f + '/'
814 for mf in matched:
814 for mf in matched:
815 if mf.startswith(d):
815 if mf.startswith(d):
816 break
816 break
817 else:
817 else:
818 fail(f, _("no match under directory!"))
818 fail(f, _("no match under directory!"))
819 elif f not in self.dirstate:
819 elif f not in self.dirstate:
820 fail(f, _("file not tracked!"))
820 fail(f, _("file not tracked!"))
821
821
822 if (not force and not extra.get("close") and p2 == nullid
822 if (not force and not extra.get("close") and p2 == nullid
823 and not (changes[0] or changes[1] or changes[2])
823 and not (changes[0] or changes[1] or changes[2])
824 and self[None].branch() == self['.'].branch()):
824 and self[None].branch() == self['.'].branch()):
825 return None
825 return None
826
826
827 ms = merge_.mergestate(self)
827 ms = merge_.mergestate(self)
828 for f in changes[0]:
828 for f in changes[0]:
829 if f in ms and ms[f] == 'u':
829 if f in ms and ms[f] == 'u':
830 raise util.Abort(_("unresolved merge conflicts "
830 raise util.Abort(_("unresolved merge conflicts "
831 "(see hg resolve)"))
831 "(see hg resolve)"))
832
832
833 cctx = context.workingctx(self, (p1, p2), text, user, date,
833 cctx = context.workingctx(self, (p1, p2), text, user, date,
834 extra, changes)
834 extra, changes)
835 if editor:
835 if editor:
836 cctx._text = editor(self, cctx, subs)
836 cctx._text = editor(self, cctx, subs)
837 edited = (text != cctx._text)
837 edited = (text != cctx._text)
838
838
839 # commit subs
839 # commit subs
840 if subs or removedsubs:
840 if subs or removedsubs:
841 state = wctx.substate.copy()
841 state = wctx.substate.copy()
842 for s in subs:
842 for s in subs:
843 self.ui.status(_('committing subrepository %s\n') % s)
843 self.ui.status(_('committing subrepository %s\n') % s)
844 sr = wctx.sub(s).commit(cctx._text, user, date)
844 sr = wctx.sub(s).commit(cctx._text, user, date)
845 state[s] = (state[s][0], sr)
845 state[s] = (state[s][0], sr)
846 subrepo.writestate(self, state)
846 subrepo.writestate(self, state)
847
847
848 # Save commit message in case this transaction gets rolled back
848 # Save commit message in case this transaction gets rolled back
849 # (e.g. by a pretxncommit hook). Leave the content alone on
849 # (e.g. by a pretxncommit hook). Leave the content alone on
850 # the assumption that the user will use the same editor again.
850 # the assumption that the user will use the same editor again.
851 msgfile = self.opener('last-message.txt', 'wb')
851 msgfile = self.opener('last-message.txt', 'wb')
852 msgfile.write(cctx._text)
852 msgfile.write(cctx._text)
853 msgfile.close()
853 msgfile.close()
854
854
855 try:
855 try:
856 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
856 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
857 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
857 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
858 ret = self.commitctx(cctx, True)
858 ret = self.commitctx(cctx, True)
859 except:
859 except:
860 if edited:
860 if edited:
861 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
861 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
862 self.ui.write(
862 self.ui.write(
863 _('note: commit message saved in %s\n') % msgfn)
863 _('note: commit message saved in %s\n') % msgfn)
864 raise
864 raise
865
865
866 # update dirstate and mergestate
866 # update dirstate and mergestate
867 for f in changes[0] + changes[1]:
867 for f in changes[0] + changes[1]:
868 self.dirstate.normal(f)
868 self.dirstate.normal(f)
869 for f in changes[2]:
869 for f in changes[2]:
870 self.dirstate.forget(f)
870 self.dirstate.forget(f)
871 self.dirstate.setparents(ret)
871 self.dirstate.setparents(ret)
872 ms.reset()
872 ms.reset()
873 finally:
873 finally:
874 wlock.release()
874 wlock.release()
875
875
876 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
876 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
877 return ret
877 return ret
878
878
879 def commitctx(self, ctx, error=False):
879 def commitctx(self, ctx, error=False):
880 """Add a new revision to current repository.
880 """Add a new revision to current repository.
881 Revision information is passed via the context argument.
881 Revision information is passed via the context argument.
882 """
882 """
883
883
884 tr = lock = None
884 tr = lock = None
885 removed = ctx.removed()
885 removed = ctx.removed()
886 p1, p2 = ctx.p1(), ctx.p2()
886 p1, p2 = ctx.p1(), ctx.p2()
887 m1 = p1.manifest().copy()
887 m1 = p1.manifest().copy()
888 m2 = p2.manifest()
888 m2 = p2.manifest()
889 user = ctx.user()
889 user = ctx.user()
890
890
891 lock = self.lock()
891 lock = self.lock()
892 try:
892 try:
893 tr = self.transaction()
893 tr = self.transaction()
894 trp = weakref.proxy(tr)
894 trp = weakref.proxy(tr)
895
895
896 # check in files
896 # check in files
897 new = {}
897 new = {}
898 changed = []
898 changed = []
899 linkrev = len(self)
899 linkrev = len(self)
900 for f in sorted(ctx.modified() + ctx.added()):
900 for f in sorted(ctx.modified() + ctx.added()):
901 self.ui.note(f + "\n")
901 self.ui.note(f + "\n")
902 try:
902 try:
903 fctx = ctx[f]
903 fctx = ctx[f]
904 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
904 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
905 changed)
905 changed)
906 m1.set(f, fctx.flags())
906 m1.set(f, fctx.flags())
907 except OSError, inst:
907 except OSError, inst:
908 self.ui.warn(_("trouble committing %s!\n") % f)
908 self.ui.warn(_("trouble committing %s!\n") % f)
909 raise
909 raise
910 except IOError, inst:
910 except IOError, inst:
911 errcode = getattr(inst, 'errno', errno.ENOENT)
911 errcode = getattr(inst, 'errno', errno.ENOENT)
912 if error or errcode and errcode != errno.ENOENT:
912 if error or errcode and errcode != errno.ENOENT:
913 self.ui.warn(_("trouble committing %s!\n") % f)
913 self.ui.warn(_("trouble committing %s!\n") % f)
914 raise
914 raise
915 else:
915 else:
916 removed.append(f)
916 removed.append(f)
917
917
918 # update manifest
918 # update manifest
919 m1.update(new)
919 m1.update(new)
920 removed = [f for f in sorted(removed) if f in m1 or f in m2]
920 removed = [f for f in sorted(removed) if f in m1 or f in m2]
921 drop = [f for f in removed if f in m1]
921 drop = [f for f in removed if f in m1]
922 for f in drop:
922 for f in drop:
923 del m1[f]
923 del m1[f]
924 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
924 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
925 p2.manifestnode(), (new, drop))
925 p2.manifestnode(), (new, drop))
926
926
927 # update changelog
927 # update changelog
928 self.changelog.delayupdate()
928 self.changelog.delayupdate()
929 n = self.changelog.add(mn, changed + removed, ctx.description(),
929 n = self.changelog.add(mn, changed + removed, ctx.description(),
930 trp, p1.node(), p2.node(),
930 trp, p1.node(), p2.node(),
931 user, ctx.date(), ctx.extra().copy())
931 user, ctx.date(), ctx.extra().copy())
932 p = lambda: self.changelog.writepending() and self.root or ""
932 p = lambda: self.changelog.writepending() and self.root or ""
933 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
933 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
934 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
934 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
935 parent2=xp2, pending=p)
935 parent2=xp2, pending=p)
936 self.changelog.finalize(trp)
936 self.changelog.finalize(trp)
937 tr.close()
937 tr.close()
938
938
939 if self._branchcache:
939 if self._branchcache:
940 self.branchtags()
940 self.branchtags()
941 return n
941 return n
942 finally:
942 finally:
943 del tr
943 del tr
944 lock.release()
944 lock.release()
945
945
946 def destroyed(self):
946 def destroyed(self):
947 '''Inform the repository that nodes have been destroyed.
947 '''Inform the repository that nodes have been destroyed.
948 Intended for use by strip and rollback, so there's a common
948 Intended for use by strip and rollback, so there's a common
949 place for anything that has to be done after destroying history.'''
949 place for anything that has to be done after destroying history.'''
950 # XXX it might be nice if we could take the list of destroyed
950 # XXX it might be nice if we could take the list of destroyed
951 # nodes, but I don't see an easy way for rollback() to do that
951 # nodes, but I don't see an easy way for rollback() to do that
952
952
953 # Ensure the persistent tag cache is updated. Doing it now
953 # Ensure the persistent tag cache is updated. Doing it now
954 # means that the tag cache only has to worry about destroyed
954 # means that the tag cache only has to worry about destroyed
955 # heads immediately after a strip/rollback. That in turn
955 # heads immediately after a strip/rollback. That in turn
956 # guarantees that "cachetip == currenttip" (comparing both rev
956 # guarantees that "cachetip == currenttip" (comparing both rev
957 # and node) always means no nodes have been added or destroyed.
957 # and node) always means no nodes have been added or destroyed.
958
958
959 # XXX this is suboptimal when qrefresh'ing: we strip the current
959 # XXX this is suboptimal when qrefresh'ing: we strip the current
960 # head, refresh the tag cache, then immediately add a new head.
960 # head, refresh the tag cache, then immediately add a new head.
961 # But I think doing it this way is necessary for the "instant
961 # But I think doing it this way is necessary for the "instant
962 # tag cache retrieval" case to work.
962 # tag cache retrieval" case to work.
963 self.invalidatecaches()
963 self.invalidatecaches()
964
964
965 def walk(self, match, node=None):
965 def walk(self, match, node=None):
966 '''
966 '''
967 walk recursively through the directory tree or a given
967 walk recursively through the directory tree or a given
968 changeset, finding all files matched by the match
968 changeset, finding all files matched by the match
969 function
969 function
970 '''
970 '''
971 return self[node].walk(match)
971 return self[node].walk(match)
972
972
973 def status(self, node1='.', node2=None, match=None,
973 def status(self, node1='.', node2=None, match=None,
974 ignored=False, clean=False, unknown=False):
974 ignored=False, clean=False, unknown=False):
975 """return status of files between two nodes or node and working directory
975 """return status of files between two nodes or node and working directory
976
976
977 If node1 is None, use the first dirstate parent instead.
977 If node1 is None, use the first dirstate parent instead.
978 If node2 is None, compare node1 with working directory.
978 If node2 is None, compare node1 with working directory.
979 """
979 """
980
980
981 def mfmatches(ctx):
981 def mfmatches(ctx):
982 mf = ctx.manifest().copy()
982 mf = ctx.manifest().copy()
983 for fn in mf.keys():
983 for fn in mf.keys():
984 if not match(fn):
984 if not match(fn):
985 del mf[fn]
985 del mf[fn]
986 return mf
986 return mf
987
987
988 if isinstance(node1, context.changectx):
988 if isinstance(node1, context.changectx):
989 ctx1 = node1
989 ctx1 = node1
990 else:
990 else:
991 ctx1 = self[node1]
991 ctx1 = self[node1]
992 if isinstance(node2, context.changectx):
992 if isinstance(node2, context.changectx):
993 ctx2 = node2
993 ctx2 = node2
994 else:
994 else:
995 ctx2 = self[node2]
995 ctx2 = self[node2]
996
996
997 working = ctx2.rev() is None
997 working = ctx2.rev() is None
998 parentworking = working and ctx1 == self['.']
998 parentworking = working and ctx1 == self['.']
999 match = match or match_.always(self.root, self.getcwd())
999 match = match or match_.always(self.root, self.getcwd())
1000 listignored, listclean, listunknown = ignored, clean, unknown
1000 listignored, listclean, listunknown = ignored, clean, unknown
1001
1001
1002 # load earliest manifest first for caching reasons
1002 # load earliest manifest first for caching reasons
1003 if not working and ctx2.rev() < ctx1.rev():
1003 if not working and ctx2.rev() < ctx1.rev():
1004 ctx2.manifest()
1004 ctx2.manifest()
1005
1005
1006 if not parentworking:
1006 if not parentworking:
1007 def bad(f, msg):
1007 def bad(f, msg):
1008 if f not in ctx1:
1008 if f not in ctx1:
1009 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1009 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1010 match.bad = bad
1010 match.bad = bad
1011
1011
1012 if working: # we need to scan the working dir
1012 if working: # we need to scan the working dir
1013 subrepos = ctx1.substate.keys()
1013 subrepos = ctx1.substate.keys()
1014 s = self.dirstate.status(match, subrepos, listignored,
1014 s = self.dirstate.status(match, subrepos, listignored,
1015 listclean, listunknown)
1015 listclean, listunknown)
1016 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1016 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1017
1017
1018 # check for any possibly clean files
1018 # check for any possibly clean files
1019 if parentworking and cmp:
1019 if parentworking and cmp:
1020 fixup = []
1020 fixup = []
1021 # do a full compare of any files that might have changed
1021 # do a full compare of any files that might have changed
1022 for f in sorted(cmp):
1022 for f in sorted(cmp):
1023 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1023 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1024 or ctx1[f].cmp(ctx2[f].data())):
1024 or ctx1[f].cmp(ctx2[f].data())):
1025 modified.append(f)
1025 modified.append(f)
1026 else:
1026 else:
1027 fixup.append(f)
1027 fixup.append(f)
1028
1028
1029 if listclean:
1029 if listclean:
1030 clean += fixup
1030 clean += fixup
1031
1031
1032 # update dirstate for files that are actually clean
1032 # update dirstate for files that are actually clean
1033 if fixup:
1033 if fixup:
1034 try:
1034 try:
1035 # updating the dirstate is optional
1035 # updating the dirstate is optional
1036 # so we don't wait on the lock
1036 # so we don't wait on the lock
1037 wlock = self.wlock(False)
1037 wlock = self.wlock(False)
1038 try:
1038 try:
1039 for f in fixup:
1039 for f in fixup:
1040 self.dirstate.normal(f)
1040 self.dirstate.normal(f)
1041 finally:
1041 finally:
1042 wlock.release()
1042 wlock.release()
1043 except error.LockError:
1043 except error.LockError:
1044 pass
1044 pass
1045
1045
1046 if not parentworking:
1046 if not parentworking:
1047 mf1 = mfmatches(ctx1)
1047 mf1 = mfmatches(ctx1)
1048 if working:
1048 if working:
1049 # we are comparing working dir against non-parent
1049 # we are comparing working dir against non-parent
1050 # generate a pseudo-manifest for the working dir
1050 # generate a pseudo-manifest for the working dir
1051 mf2 = mfmatches(self['.'])
1051 mf2 = mfmatches(self['.'])
1052 for f in cmp + modified + added:
1052 for f in cmp + modified + added:
1053 mf2[f] = None
1053 mf2[f] = None
1054 mf2.set(f, ctx2.flags(f))
1054 mf2.set(f, ctx2.flags(f))
1055 for f in removed:
1055 for f in removed:
1056 if f in mf2:
1056 if f in mf2:
1057 del mf2[f]
1057 del mf2[f]
1058 else:
1058 else:
1059 # we are comparing two revisions
1059 # we are comparing two revisions
1060 deleted, unknown, ignored = [], [], []
1060 deleted, unknown, ignored = [], [], []
1061 mf2 = mfmatches(ctx2)
1061 mf2 = mfmatches(ctx2)
1062
1062
1063 modified, added, clean = [], [], []
1063 modified, added, clean = [], [], []
1064 for fn in mf2:
1064 for fn in mf2:
1065 if fn in mf1:
1065 if fn in mf1:
1066 if (mf1.flags(fn) != mf2.flags(fn) or
1066 if (mf1.flags(fn) != mf2.flags(fn) or
1067 (mf1[fn] != mf2[fn] and
1067 (mf1[fn] != mf2[fn] and
1068 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1068 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1069 modified.append(fn)
1069 modified.append(fn)
1070 elif listclean:
1070 elif listclean:
1071 clean.append(fn)
1071 clean.append(fn)
1072 del mf1[fn]
1072 del mf1[fn]
1073 else:
1073 else:
1074 added.append(fn)
1074 added.append(fn)
1075 removed = mf1.keys()
1075 removed = mf1.keys()
1076
1076
1077 r = modified, added, removed, deleted, unknown, ignored, clean
1077 r = modified, added, removed, deleted, unknown, ignored, clean
1078 [l.sort() for l in r]
1078 [l.sort() for l in r]
1079 return r
1079 return r
1080
1080
1081 def add(self, list):
1081 def add(self, list):
1082 wlock = self.wlock()
1082 wlock = self.wlock()
1083 try:
1083 try:
1084 rejected = []
1084 rejected = []
1085 for f in list:
1085 for f in list:
1086 p = self.wjoin(f)
1086 p = self.wjoin(f)
1087 try:
1087 try:
1088 st = os.lstat(p)
1088 st = os.lstat(p)
1089 except:
1089 except:
1090 self.ui.warn(_("%s does not exist!\n") % f)
1090 self.ui.warn(_("%s does not exist!\n") % f)
1091 rejected.append(f)
1091 rejected.append(f)
1092 continue
1092 continue
1093 if st.st_size > 10000000:
1093 if st.st_size > 10000000:
1094 self.ui.warn(_("%s: up to %d MB of RAM may be required "
1094 self.ui.warn(_("%s: up to %d MB of RAM may be required "
1095 "to manage this file\n"
1095 "to manage this file\n"
1096 "(use 'hg revert %s' to cancel the "
1096 "(use 'hg revert %s' to cancel the "
1097 "pending addition)\n")
1097 "pending addition)\n")
1098 % (f, 3 * st.st_size // 1000000, f))
1098 % (f, 3 * st.st_size // 1000000, f))
1099 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1099 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1100 self.ui.warn(_("%s not added: only files and symlinks "
1100 self.ui.warn(_("%s not added: only files and symlinks "
1101 "supported currently\n") % f)
1101 "supported currently\n") % f)
1102 rejected.append(p)
1102 rejected.append(p)
1103 elif self.dirstate[f] in 'amn':
1103 elif self.dirstate[f] in 'amn':
1104 self.ui.warn(_("%s already tracked!\n") % f)
1104 self.ui.warn(_("%s already tracked!\n") % f)
1105 elif self.dirstate[f] == 'r':
1105 elif self.dirstate[f] == 'r':
1106 self.dirstate.normallookup(f)
1106 self.dirstate.normallookup(f)
1107 else:
1107 else:
1108 self.dirstate.add(f)
1108 self.dirstate.add(f)
1109 return rejected
1109 return rejected
1110 finally:
1110 finally:
1111 wlock.release()
1111 wlock.release()
1112
1112
1113 def forget(self, list):
1113 def forget(self, list):
1114 wlock = self.wlock()
1114 wlock = self.wlock()
1115 try:
1115 try:
1116 for f in list:
1116 for f in list:
1117 if self.dirstate[f] != 'a':
1117 if self.dirstate[f] != 'a':
1118 self.ui.warn(_("%s not added!\n") % f)
1118 self.ui.warn(_("%s not added!\n") % f)
1119 else:
1119 else:
1120 self.dirstate.forget(f)
1120 self.dirstate.forget(f)
1121 finally:
1121 finally:
1122 wlock.release()
1122 wlock.release()
1123
1123
1124 def remove(self, list, unlink=False):
1124 def remove(self, list, unlink=False):
1125 if unlink:
1125 if unlink:
1126 for f in list:
1126 for f in list:
1127 try:
1127 try:
1128 util.unlink(self.wjoin(f))
1128 util.unlink(self.wjoin(f))
1129 except OSError, inst:
1129 except OSError, inst:
1130 if inst.errno != errno.ENOENT:
1130 if inst.errno != errno.ENOENT:
1131 raise
1131 raise
1132 wlock = self.wlock()
1132 wlock = self.wlock()
1133 try:
1133 try:
1134 for f in list:
1134 for f in list:
1135 if unlink and os.path.exists(self.wjoin(f)):
1135 if unlink and os.path.exists(self.wjoin(f)):
1136 self.ui.warn(_("%s still exists!\n") % f)
1136 self.ui.warn(_("%s still exists!\n") % f)
1137 elif self.dirstate[f] == 'a':
1137 elif self.dirstate[f] == 'a':
1138 self.dirstate.forget(f)
1138 self.dirstate.forget(f)
1139 elif f not in self.dirstate:
1139 elif f not in self.dirstate:
1140 self.ui.warn(_("%s not tracked!\n") % f)
1140 self.ui.warn(_("%s not tracked!\n") % f)
1141 else:
1141 else:
1142 self.dirstate.remove(f)
1142 self.dirstate.remove(f)
1143 finally:
1143 finally:
1144 wlock.release()
1144 wlock.release()
1145
1145
1146 def undelete(self, list):
1146 def undelete(self, list):
1147 manifests = [self.manifest.read(self.changelog.read(p)[0])
1147 manifests = [self.manifest.read(self.changelog.read(p)[0])
1148 for p in self.dirstate.parents() if p != nullid]
1148 for p in self.dirstate.parents() if p != nullid]
1149 wlock = self.wlock()
1149 wlock = self.wlock()
1150 try:
1150 try:
1151 for f in list:
1151 for f in list:
1152 if self.dirstate[f] != 'r':
1152 if self.dirstate[f] != 'r':
1153 self.ui.warn(_("%s not removed!\n") % f)
1153 self.ui.warn(_("%s not removed!\n") % f)
1154 else:
1154 else:
1155 m = f in manifests[0] and manifests[0] or manifests[1]
1155 m = f in manifests[0] and manifests[0] or manifests[1]
1156 t = self.file(f).read(m[f])
1156 t = self.file(f).read(m[f])
1157 self.wwrite(f, t, m.flags(f))
1157 self.wwrite(f, t, m.flags(f))
1158 self.dirstate.normal(f)
1158 self.dirstate.normal(f)
1159 finally:
1159 finally:
1160 wlock.release()
1160 wlock.release()
1161
1161
1162 def copy(self, source, dest):
1162 def copy(self, source, dest):
1163 p = self.wjoin(dest)
1163 p = self.wjoin(dest)
1164 if not (os.path.exists(p) or os.path.islink(p)):
1164 if not (os.path.exists(p) or os.path.islink(p)):
1165 self.ui.warn(_("%s does not exist!\n") % dest)
1165 self.ui.warn(_("%s does not exist!\n") % dest)
1166 elif not (os.path.isfile(p) or os.path.islink(p)):
1166 elif not (os.path.isfile(p) or os.path.islink(p)):
1167 self.ui.warn(_("copy failed: %s is not a file or a "
1167 self.ui.warn(_("copy failed: %s is not a file or a "
1168 "symbolic link\n") % dest)
1168 "symbolic link\n") % dest)
1169 else:
1169 else:
1170 wlock = self.wlock()
1170 wlock = self.wlock()
1171 try:
1171 try:
1172 if self.dirstate[dest] in '?r':
1172 if self.dirstate[dest] in '?r':
1173 self.dirstate.add(dest)
1173 self.dirstate.add(dest)
1174 self.dirstate.copy(source, dest)
1174 self.dirstate.copy(source, dest)
1175 finally:
1175 finally:
1176 wlock.release()
1176 wlock.release()
1177
1177
1178 def heads(self, start=None):
1178 def heads(self, start=None):
1179 heads = self.changelog.heads(start)
1179 heads = self.changelog.heads(start)
1180 # sort the output in rev descending order
1180 # sort the output in rev descending order
1181 heads = [(-self.changelog.rev(h), h) for h in heads]
1181 heads = [(-self.changelog.rev(h), h) for h in heads]
1182 return [n for (r, n) in sorted(heads)]
1182 return [n for (r, n) in sorted(heads)]
1183
1183
1184 def branchheads(self, branch=None, start=None, closed=False):
1184 def branchheads(self, branch=None, start=None, closed=False):
1185 '''return a (possibly filtered) list of heads for the given branch
1185 '''return a (possibly filtered) list of heads for the given branch
1186
1186
1187 Heads are returned in topological order, from newest to oldest.
1187 Heads are returned in topological order, from newest to oldest.
1188 If branch is None, use the dirstate branch.
1188 If branch is None, use the dirstate branch.
1189 If start is not None, return only heads reachable from start.
1189 If start is not None, return only heads reachable from start.
1190 If closed is True, return heads that are marked as closed as well.
1190 If closed is True, return heads that are marked as closed as well.
1191 '''
1191 '''
1192 if branch is None:
1192 if branch is None:
1193 branch = self[None].branch()
1193 branch = self[None].branch()
1194 branches = self.branchmap()
1194 branches = self.branchmap()
1195 if branch not in branches:
1195 if branch not in branches:
1196 return []
1196 return []
1197 # the cache returns heads ordered lowest to highest
1197 # the cache returns heads ordered lowest to highest
1198 bheads = list(reversed(branches[branch]))
1198 bheads = list(reversed(branches[branch]))
1199 if start is not None:
1199 if start is not None:
1200 # filter out the heads that cannot be reached from startrev
1200 # filter out the heads that cannot be reached from startrev
1201 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1201 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1202 bheads = [h for h in bheads if h in fbheads]
1202 bheads = [h for h in bheads if h in fbheads]
1203 if not closed:
1203 if not closed:
1204 bheads = [h for h in bheads if
1204 bheads = [h for h in bheads if
1205 ('close' not in self.changelog.read(h)[5])]
1205 ('close' not in self.changelog.read(h)[5])]
1206 return bheads
1206 return bheads
1207
1207
1208 def branches(self, nodes):
1208 def branches(self, nodes):
1209 if not nodes:
1209 if not nodes:
1210 nodes = [self.changelog.tip()]
1210 nodes = [self.changelog.tip()]
1211 b = []
1211 b = []
1212 for n in nodes:
1212 for n in nodes:
1213 t = n
1213 t = n
1214 while 1:
1214 while 1:
1215 p = self.changelog.parents(n)
1215 p = self.changelog.parents(n)
1216 if p[1] != nullid or p[0] == nullid:
1216 if p[1] != nullid or p[0] == nullid:
1217 b.append((t, n, p[0], p[1]))
1217 b.append((t, n, p[0], p[1]))
1218 break
1218 break
1219 n = p[0]
1219 n = p[0]
1220 return b
1220 return b
1221
1221
1222 def between(self, pairs):
1222 def between(self, pairs):
1223 r = []
1223 r = []
1224
1224
1225 for top, bottom in pairs:
1225 for top, bottom in pairs:
1226 n, l, i = top, [], 0
1226 n, l, i = top, [], 0
1227 f = 1
1227 f = 1
1228
1228
1229 while n != bottom and n != nullid:
1229 while n != bottom and n != nullid:
1230 p = self.changelog.parents(n)[0]
1230 p = self.changelog.parents(n)[0]
1231 if i == f:
1231 if i == f:
1232 l.append(n)
1232 l.append(n)
1233 f = f * 2
1233 f = f * 2
1234 n = p
1234 n = p
1235 i += 1
1235 i += 1
1236
1236
1237 r.append(l)
1237 r.append(l)
1238
1238
1239 return r
1239 return r
1240
1240
1241 def findincoming(self, remote, base=None, heads=None, force=False):
1241 def findincoming(self, remote, base=None, heads=None, force=False):
1242 """Return list of roots of the subsets of missing nodes from remote
1242 """Return list of roots of the subsets of missing nodes from remote
1243
1243
1244 If base dict is specified, assume that these nodes and their parents
1244 If base dict is specified, assume that these nodes and their parents
1245 exist on the remote side and that no child of a node of base exists
1245 exist on the remote side and that no child of a node of base exists
1246 in both remote and self.
1246 in both remote and self.
1247 Furthermore base will be updated to include the nodes that exists
1247 Furthermore base will be updated to include the nodes that exists
1248 in self and remote but no children exists in self and remote.
1248 in self and remote but no children exists in self and remote.
1249 If a list of heads is specified, return only nodes which are heads
1249 If a list of heads is specified, return only nodes which are heads
1250 or ancestors of these heads.
1250 or ancestors of these heads.
1251
1251
1252 All the ancestors of base are in self and in remote.
1252 All the ancestors of base are in self and in remote.
1253 All the descendants of the list returned are missing in self.
1253 All the descendants of the list returned are missing in self.
1254 (and so we know that the rest of the nodes are missing in remote, see
1254 (and so we know that the rest of the nodes are missing in remote, see
1255 outgoing)
1255 outgoing)
1256 """
1256 """
1257 return self.findcommonincoming(remote, base, heads, force)[1]
1257 return self.findcommonincoming(remote, base, heads, force)[1]
1258
1258
1259 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1259 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1260 """Return a tuple (common, missing roots, heads) used to identify
1260 """Return a tuple (common, missing roots, heads) used to identify
1261 missing nodes from remote.
1261 missing nodes from remote.
1262
1262
1263 If base dict is specified, assume that these nodes and their parents
1263 If base dict is specified, assume that these nodes and their parents
1264 exist on the remote side and that no child of a node of base exists
1264 exist on the remote side and that no child of a node of base exists
1265 in both remote and self.
1265 in both remote and self.
1266 Furthermore base will be updated to include the nodes that exists
1266 Furthermore base will be updated to include the nodes that exists
1267 in self and remote but no children exists in self and remote.
1267 in self and remote but no children exists in self and remote.
1268 If a list of heads is specified, return only nodes which are heads
1268 If a list of heads is specified, return only nodes which are heads
1269 or ancestors of these heads.
1269 or ancestors of these heads.
1270
1270
1271 All the ancestors of base are in self and in remote.
1271 All the ancestors of base are in self and in remote.
1272 """
1272 """
1273 m = self.changelog.nodemap
1273 m = self.changelog.nodemap
1274 search = []
1274 search = []
1275 fetch = set()
1275 fetch = set()
1276 seen = set()
1276 seen = set()
1277 seenbranch = set()
1277 seenbranch = set()
1278 if base is None:
1278 if base is None:
1279 base = {}
1279 base = {}
1280
1280
1281 if not heads:
1281 if not heads:
1282 heads = remote.heads()
1282 heads = remote.heads()
1283
1283
1284 if self.changelog.tip() == nullid:
1284 if self.changelog.tip() == nullid:
1285 base[nullid] = 1
1285 base[nullid] = 1
1286 if heads != [nullid]:
1286 if heads != [nullid]:
1287 return [nullid], [nullid], list(heads)
1287 return [nullid], [nullid], list(heads)
1288 return [nullid], [], []
1288 return [nullid], [], []
1289
1289
1290 # assume we're closer to the tip than the root
1290 # assume we're closer to the tip than the root
1291 # and start by examining the heads
1291 # and start by examining the heads
1292 self.ui.status(_("searching for changes\n"))
1292 self.ui.status(_("searching for changes\n"))
1293
1293
1294 unknown = []
1294 unknown = []
1295 for h in heads:
1295 for h in heads:
1296 if h not in m:
1296 if h not in m:
1297 unknown.append(h)
1297 unknown.append(h)
1298 else:
1298 else:
1299 base[h] = 1
1299 base[h] = 1
1300
1300
1301 heads = unknown
1301 heads = unknown
1302 if not unknown:
1302 if not unknown:
1303 return base.keys(), [], []
1303 return base.keys(), [], []
1304
1304
1305 req = set(unknown)
1305 req = set(unknown)
1306 reqcnt = 0
1306 reqcnt = 0
1307
1307
1308 # search through remote branches
1308 # search through remote branches
1309 # a 'branch' here is a linear segment of history, with four parts:
1309 # a 'branch' here is a linear segment of history, with four parts:
1310 # head, root, first parent, second parent
1310 # head, root, first parent, second parent
1311 # (a branch always has two parents (or none) by definition)
1311 # (a branch always has two parents (or none) by definition)
1312 unknown = remote.branches(unknown)
1312 unknown = remote.branches(unknown)
1313 while unknown:
1313 while unknown:
1314 r = []
1314 r = []
1315 while unknown:
1315 while unknown:
1316 n = unknown.pop(0)
1316 n = unknown.pop(0)
1317 if n[0] in seen:
1317 if n[0] in seen:
1318 continue
1318 continue
1319
1319
1320 self.ui.debug("examining %s:%s\n"
1320 self.ui.debug("examining %s:%s\n"
1321 % (short(n[0]), short(n[1])))
1321 % (short(n[0]), short(n[1])))
1322 if n[0] == nullid: # found the end of the branch
1322 if n[0] == nullid: # found the end of the branch
1323 pass
1323 pass
1324 elif n in seenbranch:
1324 elif n in seenbranch:
1325 self.ui.debug("branch already found\n")
1325 self.ui.debug("branch already found\n")
1326 continue
1326 continue
1327 elif n[1] and n[1] in m: # do we know the base?
1327 elif n[1] and n[1] in m: # do we know the base?
1328 self.ui.debug("found incomplete branch %s:%s\n"
1328 self.ui.debug("found incomplete branch %s:%s\n"
1329 % (short(n[0]), short(n[1])))
1329 % (short(n[0]), short(n[1])))
1330 search.append(n[0:2]) # schedule branch range for scanning
1330 search.append(n[0:2]) # schedule branch range for scanning
1331 seenbranch.add(n)
1331 seenbranch.add(n)
1332 else:
1332 else:
1333 if n[1] not in seen and n[1] not in fetch:
1333 if n[1] not in seen and n[1] not in fetch:
1334 if n[2] in m and n[3] in m:
1334 if n[2] in m and n[3] in m:
1335 self.ui.debug("found new changeset %s\n" %
1335 self.ui.debug("found new changeset %s\n" %
1336 short(n[1]))
1336 short(n[1]))
1337 fetch.add(n[1]) # earliest unknown
1337 fetch.add(n[1]) # earliest unknown
1338 for p in n[2:4]:
1338 for p in n[2:4]:
1339 if p in m:
1339 if p in m:
1340 base[p] = 1 # latest known
1340 base[p] = 1 # latest known
1341
1341
1342 for p in n[2:4]:
1342 for p in n[2:4]:
1343 if p not in req and p not in m:
1343 if p not in req and p not in m:
1344 r.append(p)
1344 r.append(p)
1345 req.add(p)
1345 req.add(p)
1346 seen.add(n[0])
1346 seen.add(n[0])
1347
1347
1348 if r:
1348 if r:
1349 reqcnt += 1
1349 reqcnt += 1
1350 self.ui.progress(_('searching'), reqcnt, unit=_('queries'))
1350 self.ui.progress(_('searching'), reqcnt, unit=_('queries'))
1351 self.ui.debug("request %d: %s\n" %
1351 self.ui.debug("request %d: %s\n" %
1352 (reqcnt, " ".join(map(short, r))))
1352 (reqcnt, " ".join(map(short, r))))
1353 for p in xrange(0, len(r), 10):
1353 for p in xrange(0, len(r), 10):
1354 for b in remote.branches(r[p:p + 10]):
1354 for b in remote.branches(r[p:p + 10]):
1355 self.ui.debug("received %s:%s\n" %
1355 self.ui.debug("received %s:%s\n" %
1356 (short(b[0]), short(b[1])))
1356 (short(b[0]), short(b[1])))
1357 unknown.append(b)
1357 unknown.append(b)
1358
1358
1359 # do binary search on the branches we found
1359 # do binary search on the branches we found
1360 while search:
1360 while search:
1361 newsearch = []
1361 newsearch = []
1362 reqcnt += 1
1362 reqcnt += 1
1363 self.ui.progress(_('searching'), reqcnt, unit=_('queries'))
1363 self.ui.progress(_('searching'), reqcnt, unit=_('queries'))
1364 for n, l in zip(search, remote.between(search)):
1364 for n, l in zip(search, remote.between(search)):
1365 l.append(n[1])
1365 l.append(n[1])
1366 p = n[0]
1366 p = n[0]
1367 f = 1
1367 f = 1
1368 for i in l:
1368 for i in l:
1369 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1369 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1370 if i in m:
1370 if i in m:
1371 if f <= 2:
1371 if f <= 2:
1372 self.ui.debug("found new branch changeset %s\n" %
1372 self.ui.debug("found new branch changeset %s\n" %
1373 short(p))
1373 short(p))
1374 fetch.add(p)
1374 fetch.add(p)
1375 base[i] = 1
1375 base[i] = 1
1376 else:
1376 else:
1377 self.ui.debug("narrowed branch search to %s:%s\n"
1377 self.ui.debug("narrowed branch search to %s:%s\n"
1378 % (short(p), short(i)))
1378 % (short(p), short(i)))
1379 newsearch.append((p, i))
1379 newsearch.append((p, i))
1380 break
1380 break
1381 p, f = i, f * 2
1381 p, f = i, f * 2
1382 search = newsearch
1382 search = newsearch
1383
1383
1384 # sanity check our fetch list
1384 # sanity check our fetch list
1385 for f in fetch:
1385 for f in fetch:
1386 if f in m:
1386 if f in m:
1387 raise error.RepoError(_("already have changeset ")
1387 raise error.RepoError(_("already have changeset ")
1388 + short(f[:4]))
1388 + short(f[:4]))
1389
1389
1390 if base.keys() == [nullid]:
1390 if base.keys() == [nullid]:
1391 if force:
1391 if force:
1392 self.ui.warn(_("warning: repository is unrelated\n"))
1392 self.ui.warn(_("warning: repository is unrelated\n"))
1393 else:
1393 else:
1394 raise util.Abort(_("repository is unrelated"))
1394 raise util.Abort(_("repository is unrelated"))
1395
1395
1396 self.ui.debug("found new changesets starting at " +
1396 self.ui.debug("found new changesets starting at " +
1397 " ".join([short(f) for f in fetch]) + "\n")
1397 " ".join([short(f) for f in fetch]) + "\n")
1398
1398
1399 self.ui.progress(_('searching'), None, unit=_('queries'))
1399 self.ui.progress(_('searching'), None, unit=_('queries'))
1400 self.ui.debug("%d total queries\n" % reqcnt)
1400 self.ui.debug("%d total queries\n" % reqcnt)
1401
1401
1402 return base.keys(), list(fetch), heads
1402 return base.keys(), list(fetch), heads
1403
1403
1404 def findoutgoing(self, remote, base=None, heads=None, force=False):
1404 def findoutgoing(self, remote, base=None, heads=None, force=False):
1405 """Return list of nodes that are roots of subsets not in remote
1405 """Return list of nodes that are roots of subsets not in remote
1406
1406
1407 If base dict is specified, assume that these nodes and their parents
1407 If base dict is specified, assume that these nodes and their parents
1408 exist on the remote side.
1408 exist on the remote side.
1409 If a list of heads is specified, return only nodes which are heads
1409 If a list of heads is specified, return only nodes which are heads
1410 or ancestors of these heads, and return a second element which
1410 or ancestors of these heads, and return a second element which
1411 contains all remote heads which get new children.
1411 contains all remote heads which get new children.
1412 """
1412 """
1413 if base is None:
1413 if base is None:
1414 base = {}
1414 base = {}
1415 self.findincoming(remote, base, heads, force=force)
1415 self.findincoming(remote, base, heads, force=force)
1416
1416
1417 self.ui.debug("common changesets up to "
1417 self.ui.debug("common changesets up to "
1418 + " ".join(map(short, base.keys())) + "\n")
1418 + " ".join(map(short, base.keys())) + "\n")
1419
1419
1420 remain = set(self.changelog.nodemap)
1420 remain = set(self.changelog.nodemap)
1421
1421
1422 # prune everything remote has from the tree
1422 # prune everything remote has from the tree
1423 remain.remove(nullid)
1423 remain.remove(nullid)
1424 remove = base.keys()
1424 remove = base.keys()
1425 while remove:
1425 while remove:
1426 n = remove.pop(0)
1426 n = remove.pop(0)
1427 if n in remain:
1427 if n in remain:
1428 remain.remove(n)
1428 remain.remove(n)
1429 for p in self.changelog.parents(n):
1429 for p in self.changelog.parents(n):
1430 remove.append(p)
1430 remove.append(p)
1431
1431
1432 # find every node whose parents have been pruned
1432 # find every node whose parents have been pruned
1433 subset = []
1433 subset = []
1434 # find every remote head that will get new children
1434 # find every remote head that will get new children
1435 updated_heads = set()
1435 updated_heads = set()
1436 for n in remain:
1436 for n in remain:
1437 p1, p2 = self.changelog.parents(n)
1437 p1, p2 = self.changelog.parents(n)
1438 if p1 not in remain and p2 not in remain:
1438 if p1 not in remain and p2 not in remain:
1439 subset.append(n)
1439 subset.append(n)
1440 if heads:
1440 if heads:
1441 if p1 in heads:
1441 if p1 in heads:
1442 updated_heads.add(p1)
1442 updated_heads.add(p1)
1443 if p2 in heads:
1443 if p2 in heads:
1444 updated_heads.add(p2)
1444 updated_heads.add(p2)
1445
1445
1446 # this is the set of all roots we have to push
1446 # this is the set of all roots we have to push
1447 if heads:
1447 if heads:
1448 return subset, list(updated_heads)
1448 return subset, list(updated_heads)
1449 else:
1449 else:
1450 return subset
1450 return subset
1451
1451
1452 def pull(self, remote, heads=None, force=False):
1452 def pull(self, remote, heads=None, force=False):
1453 lock = self.lock()
1453 lock = self.lock()
1454 try:
1454 try:
1455 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1455 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1456 force=force)
1456 force=force)
1457 if fetch == [nullid]:
1457 if fetch == [nullid]:
1458 self.ui.status(_("requesting all changes\n"))
1458 self.ui.status(_("requesting all changes\n"))
1459
1459
1460 if not fetch:
1460 if not fetch:
1461 self.ui.status(_("no changes found\n"))
1461 self.ui.status(_("no changes found\n"))
1462 return 0
1462 return 0
1463
1463
1464 if heads is None and remote.capable('changegroupsubset'):
1464 if heads is None and remote.capable('changegroupsubset'):
1465 heads = rheads
1465 heads = rheads
1466
1466
1467 if heads is None:
1467 if heads is None:
1468 cg = remote.changegroup(fetch, 'pull')
1468 cg = remote.changegroup(fetch, 'pull')
1469 else:
1469 else:
1470 if not remote.capable('changegroupsubset'):
1470 if not remote.capable('changegroupsubset'):
1471 raise util.Abort(_("Partial pull cannot be done because "
1471 raise util.Abort(_("Partial pull cannot be done because "
1472 "other repository doesn't support "
1472 "other repository doesn't support "
1473 "changegroupsubset."))
1473 "changegroupsubset."))
1474 cg = remote.changegroupsubset(fetch, heads, 'pull')
1474 cg = remote.changegroupsubset(fetch, heads, 'pull')
1475 return self.addchangegroup(cg, 'pull', remote.url())
1475 return self.addchangegroup(cg, 'pull', remote.url())
1476 finally:
1476 finally:
1477 lock.release()
1477 lock.release()
1478
1478
1479 def push(self, remote, force=False, revs=None):
1479 def push(self, remote, force=False, revs=None):
1480 # there are two ways to push to remote repo:
1480 # there are two ways to push to remote repo:
1481 #
1481 #
1482 # addchangegroup assumes local user can lock remote
1482 # addchangegroup assumes local user can lock remote
1483 # repo (local filesystem, old ssh servers).
1483 # repo (local filesystem, old ssh servers).
1484 #
1484 #
1485 # unbundle assumes local user cannot lock remote repo (new ssh
1485 # unbundle assumes local user cannot lock remote repo (new ssh
1486 # servers, http servers).
1486 # servers, http servers).
1487
1487
1488 if remote.capable('unbundle'):
1488 if remote.capable('unbundle'):
1489 return self.push_unbundle(remote, force, revs)
1489 return self.push_unbundle(remote, force, revs)
1490 return self.push_addchangegroup(remote, force, revs)
1490 return self.push_addchangegroup(remote, force, revs)
1491
1491
1492 def prepush(self, remote, force, revs):
1492 def prepush(self, remote, force, revs):
1493 '''Analyze the local and remote repositories and determine which
1493 '''Analyze the local and remote repositories and determine which
1494 changesets need to be pushed to the remote. Return a tuple
1494 changesets need to be pushed to the remote. Return a tuple
1495 (changegroup, remoteheads). changegroup is a readable file-like
1495 (changegroup, remoteheads). changegroup is a readable file-like
1496 object whose read() returns successive changegroup chunks ready to
1496 object whose read() returns successive changegroup chunks ready to
1497 be sent over the wire. remoteheads is the list of remote heads.
1497 be sent over the wire. remoteheads is the list of remote heads.
1498 '''
1498 '''
1499 common = {}
1499 common = {}
1500 remote_heads = remote.heads()
1500 remote_heads = remote.heads()
1501 inc = self.findincoming(remote, common, remote_heads, force=force)
1501 inc = self.findincoming(remote, common, remote_heads, force=force)
1502
1502
1503 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1503 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1504 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1504 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1505
1505
1506 def checkbranch(lheads, rheads, updatelb, branchname=None):
1506 def checkbranch(lheads, rheads, updatelb, branchname=None):
1507 '''
1507 '''
1508 check whether there are more local heads than remote heads on
1508 check whether there are more local heads than remote heads on
1509 a specific branch.
1509 a specific branch.
1510
1510
1511 lheads: local branch heads
1511 lheads: local branch heads
1512 rheads: remote branch heads
1512 rheads: remote branch heads
1513 updatelb: outgoing local branch bases
1513 updatelb: outgoing local branch bases
1514 '''
1514 '''
1515
1515
1516 warn = 0
1516 warn = 0
1517
1517
1518 if not revs and len(lheads) > len(rheads):
1518 if not revs and len(lheads) > len(rheads):
1519 warn = 1
1519 warn = 1
1520 else:
1520 else:
1521 # add local heads involved in the push
1521 # add local heads involved in the push
1522 updatelheads = [self.changelog.heads(x, lheads)
1522 updatelheads = [self.changelog.heads(x, lheads)
1523 for x in updatelb]
1523 for x in updatelb]
1524 newheads = set(sum(updatelheads, [])) & set(lheads)
1524 newheads = set(sum(updatelheads, [])) & set(lheads)
1525
1525
1526 if not newheads:
1526 if not newheads:
1527 return True
1527 return True
1528
1528
1529 # add heads we don't have or that are not involved in the push
1529 # add heads we don't have or that are not involved in the push
1530 for r in rheads:
1530 for r in rheads:
1531 if r in self.changelog.nodemap:
1531 if r in self.changelog.nodemap:
1532 desc = self.changelog.heads(r, heads)
1532 desc = self.changelog.heads(r, heads)
1533 l = [h for h in heads if h in desc]
1533 l = [h for h in heads if h in desc]
1534 if not l:
1534 if not l:
1535 newheads.add(r)
1535 newheads.add(r)
1536 else:
1536 else:
1537 newheads.add(r)
1537 newheads.add(r)
1538 if len(newheads) > len(rheads):
1538 if len(newheads) > len(rheads):
1539 warn = 1
1539 warn = 1
1540
1540
1541 if warn:
1541 if warn:
1542 if branchname is not None:
1542 if branchname is not None:
1543 msg = _("abort: push creates new remote heads"
1543 msg = _("abort: push creates new remote heads"
1544 " on branch '%s'!\n") % branchname
1544 " on branch '%s'!\n") % branchname
1545 else:
1545 else:
1546 msg = _("abort: push creates new remote heads!\n")
1546 msg = _("abort: push creates new remote heads!\n")
1547 self.ui.warn(msg)
1547 self.ui.warn(msg)
1548 if len(lheads) > len(rheads):
1548 if len(lheads) > len(rheads):
1549 self.ui.status(_("(did you forget to merge?"
1549 self.ui.status(_("(did you forget to merge?"
1550 " use push -f to force)\n"))
1550 " use push -f to force)\n"))
1551 else:
1551 else:
1552 self.ui.status(_("(you should pull and merge or"
1552 self.ui.status(_("(you should pull and merge or"
1553 " use push -f to force)\n"))
1553 " use push -f to force)\n"))
1554 return False
1554 return False
1555 return True
1555 return True
1556
1556
1557 if not bases:
1557 if not bases:
1558 self.ui.status(_("no changes found\n"))
1558 self.ui.status(_("no changes found\n"))
1559 return None, 1
1559 return None, 1
1560 elif not force:
1560 elif not force:
1561 # Check for each named branch if we're creating new remote heads.
1561 # Check for each named branch if we're creating new remote heads.
1562 # To be a remote head after push, node must be either:
1562 # To be a remote head after push, node must be either:
1563 # - unknown locally
1563 # - unknown locally
1564 # - a local outgoing head descended from update
1564 # - a local outgoing head descended from update
1565 # - a remote head that's known locally and not
1565 # - a remote head that's known locally and not
1566 # ancestral to an outgoing head
1566 # ancestral to an outgoing head
1567 #
1567 #
1568 # New named branches cannot be created without --force.
1568 # New named branches cannot be created without --force.
1569
1569
1570 if remote_heads != [nullid]:
1570 if remote_heads != [nullid]:
1571 if remote.capable('branchmap'):
1571 if remote.capable('branchmap'):
1572 remotebrheads = remote.branchmap()
1572 remotebrheads = remote.branchmap()
1573
1573
1574 if not revs:
1574 if not revs:
1575 localbrheads = self.branchmap()
1575 localbrheads = self.branchmap()
1576 else:
1576 else:
1577 localbrheads = {}
1577 localbrheads = {}
1578 for n in heads:
1578 for n in heads:
1579 branch = self[n].branch()
1579 branch = self[n].branch()
1580 localbrheads.setdefault(branch, []).append(n)
1580 localbrheads.setdefault(branch, []).append(n)
1581
1581
1582 newbranches = list(set(localbrheads) - set(remotebrheads))
1582 newbranches = list(set(localbrheads) - set(remotebrheads))
1583 if newbranches: # new branch requires --force
1583 if newbranches: # new branch requires --force
1584 branchnames = ', '.join("%s" % b for b in newbranches)
1584 branchnames = ', '.join("%s" % b for b in newbranches)
1585 self.ui.warn(_("abort: push creates "
1585 self.ui.warn(_("abort: push creates "
1586 "new remote branches: %s!\n")
1586 "new remote branches: %s!\n")
1587 % branchnames)
1587 % branchnames)
1588 # propose 'push -b .' in the msg too?
1588 # propose 'push -b .' in the msg too?
1589 self.ui.status(_("(use 'hg push -f' to force)\n"))
1589 self.ui.status(_("(use 'hg push -f' to force)\n"))
1590 return None, 0
1590 return None, 0
1591 for branch, lheads in localbrheads.iteritems():
1591 for branch, lheads in localbrheads.iteritems():
1592 if branch in remotebrheads:
1592 if branch in remotebrheads:
1593 rheads = remotebrheads[branch]
1593 rheads = remotebrheads[branch]
1594 if not checkbranch(lheads, rheads, update, branch):
1594 if not checkbranch(lheads, rheads, update, branch):
1595 return None, 0
1595 return None, 0
1596 else:
1596 else:
1597 if not checkbranch(heads, remote_heads, update):
1597 if not checkbranch(heads, remote_heads, update):
1598 return None, 0
1598 return None, 0
1599
1599
1600 if inc:
1600 if inc:
1601 self.ui.warn(_("note: unsynced remote changes!\n"))
1601 self.ui.warn(_("note: unsynced remote changes!\n"))
1602
1602
1603
1603
1604 if revs is None:
1604 if revs is None:
1605 # use the fast path, no race possible on push
1605 # use the fast path, no race possible on push
1606 nodes = self.changelog.findmissing(common.keys())
1606 nodes = self.changelog.findmissing(common.keys())
1607 cg = self._changegroup(nodes, 'push')
1607 cg = self._changegroup(nodes, 'push')
1608 else:
1608 else:
1609 cg = self.changegroupsubset(update, revs, 'push')
1609 cg = self.changegroupsubset(update, revs, 'push')
1610 return cg, remote_heads
1610 return cg, remote_heads
1611
1611
1612 def push_addchangegroup(self, remote, force, revs):
1612 def push_addchangegroup(self, remote, force, revs):
1613 lock = remote.lock()
1613 lock = remote.lock()
1614 try:
1614 try:
1615 ret = self.prepush(remote, force, revs)
1615 ret = self.prepush(remote, force, revs)
1616 if ret[0] is not None:
1616 if ret[0] is not None:
1617 cg, remote_heads = ret
1617 cg, remote_heads = ret
1618 return remote.addchangegroup(cg, 'push', self.url())
1618 return remote.addchangegroup(cg, 'push', self.url())
1619 return ret[1]
1619 return ret[1]
1620 finally:
1620 finally:
1621 lock.release()
1621 lock.release()
1622
1622
1623 def push_unbundle(self, remote, force, revs):
1623 def push_unbundle(self, remote, force, revs):
1624 # local repo finds heads on server, finds out what revs it
1624 # local repo finds heads on server, finds out what revs it
1625 # must push. once revs transferred, if server finds it has
1625 # must push. once revs transferred, if server finds it has
1626 # different heads (someone else won commit/push race), server
1626 # different heads (someone else won commit/push race), server
1627 # aborts.
1627 # aborts.
1628
1628
1629 ret = self.prepush(remote, force, revs)
1629 ret = self.prepush(remote, force, revs)
1630 if ret[0] is not None:
1630 if ret[0] is not None:
1631 cg, remote_heads = ret
1631 cg, remote_heads = ret
1632 if force:
1632 if force:
1633 remote_heads = ['force']
1633 remote_heads = ['force']
1634 return remote.unbundle(cg, remote_heads, 'push')
1634 return remote.unbundle(cg, remote_heads, 'push')
1635 return ret[1]
1635 return ret[1]
1636
1636
1637 def changegroupinfo(self, nodes, source):
1637 def changegroupinfo(self, nodes, source):
1638 if self.ui.verbose or source == 'bundle':
1638 if self.ui.verbose or source == 'bundle':
1639 self.ui.status(_("%d changesets found\n") % len(nodes))
1639 self.ui.status(_("%d changesets found\n") % len(nodes))
1640 if self.ui.debugflag:
1640 if self.ui.debugflag:
1641 self.ui.debug("list of changesets:\n")
1641 self.ui.debug("list of changesets:\n")
1642 for node in nodes:
1642 for node in nodes:
1643 self.ui.debug("%s\n" % hex(node))
1643 self.ui.debug("%s\n" % hex(node))
1644
1644
1645 def changegroupsubset(self, bases, heads, source, extranodes=None):
1645 def changegroupsubset(self, bases, heads, source, extranodes=None):
1646 """Compute a changegroup consisting of all the nodes that are
1646 """Compute a changegroup consisting of all the nodes that are
1647 descendents of any of the bases and ancestors of any of the heads.
1647 descendents of any of the bases and ancestors of any of the heads.
1648 Return a chunkbuffer object whose read() method will return
1648 Return a chunkbuffer object whose read() method will return
1649 successive changegroup chunks.
1649 successive changegroup chunks.
1650
1650
1651 It is fairly complex as determining which filenodes and which
1651 It is fairly complex as determining which filenodes and which
1652 manifest nodes need to be included for the changeset to be complete
1652 manifest nodes need to be included for the changeset to be complete
1653 is non-trivial.
1653 is non-trivial.
1654
1654
1655 Another wrinkle is doing the reverse, figuring out which changeset in
1655 Another wrinkle is doing the reverse, figuring out which changeset in
1656 the changegroup a particular filenode or manifestnode belongs to.
1656 the changegroup a particular filenode or manifestnode belongs to.
1657
1657
1658 The caller can specify some nodes that must be included in the
1658 The caller can specify some nodes that must be included in the
1659 changegroup using the extranodes argument. It should be a dict
1659 changegroup using the extranodes argument. It should be a dict
1660 where the keys are the filenames (or 1 for the manifest), and the
1660 where the keys are the filenames (or 1 for the manifest), and the
1661 values are lists of (node, linknode) tuples, where node is a wanted
1661 values are lists of (node, linknode) tuples, where node is a wanted
1662 node and linknode is the changelog node that should be transmitted as
1662 node and linknode is the changelog node that should be transmitted as
1663 the linkrev.
1663 the linkrev.
1664 """
1664 """
1665
1665
1666 # Set up some initial variables
1666 # Set up some initial variables
1667 # Make it easy to refer to self.changelog
1667 # Make it easy to refer to self.changelog
1668 cl = self.changelog
1668 cl = self.changelog
1669 # msng is short for missing - compute the list of changesets in this
1669 # msng is short for missing - compute the list of changesets in this
1670 # changegroup.
1670 # changegroup.
1671 if not bases:
1671 if not bases:
1672 bases = [nullid]
1672 bases = [nullid]
1673 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1673 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1674
1674
1675 if extranodes is None:
1675 if extranodes is None:
1676 # can we go through the fast path ?
1676 # can we go through the fast path ?
1677 heads.sort()
1677 heads.sort()
1678 allheads = self.heads()
1678 allheads = self.heads()
1679 allheads.sort()
1679 allheads.sort()
1680 if heads == allheads:
1680 if heads == allheads:
1681 return self._changegroup(msng_cl_lst, source)
1681 return self._changegroup(msng_cl_lst, source)
1682
1682
1683 # slow path
1683 # slow path
1684 self.hook('preoutgoing', throw=True, source=source)
1684 self.hook('preoutgoing', throw=True, source=source)
1685
1685
1686 self.changegroupinfo(msng_cl_lst, source)
1686 self.changegroupinfo(msng_cl_lst, source)
1687 # Some bases may turn out to be superfluous, and some heads may be
1687 # Some bases may turn out to be superfluous, and some heads may be
1688 # too. nodesbetween will return the minimal set of bases and heads
1688 # too. nodesbetween will return the minimal set of bases and heads
1689 # necessary to re-create the changegroup.
1689 # necessary to re-create the changegroup.
1690
1690
1691 # Known heads are the list of heads that it is assumed the recipient
1691 # Known heads are the list of heads that it is assumed the recipient
1692 # of this changegroup will know about.
1692 # of this changegroup will know about.
1693 knownheads = set()
1693 knownheads = set()
1694 # We assume that all parents of bases are known heads.
1694 # We assume that all parents of bases are known heads.
1695 for n in bases:
1695 for n in bases:
1696 knownheads.update(cl.parents(n))
1696 knownheads.update(cl.parents(n))
1697 knownheads.discard(nullid)
1697 knownheads.discard(nullid)
1698 knownheads = list(knownheads)
1698 knownheads = list(knownheads)
1699 if knownheads:
1699 if knownheads:
1700 # Now that we know what heads are known, we can compute which
1700 # Now that we know what heads are known, we can compute which
1701 # changesets are known. The recipient must know about all
1701 # changesets are known. The recipient must know about all
1702 # changesets required to reach the known heads from the null
1702 # changesets required to reach the known heads from the null
1703 # changeset.
1703 # changeset.
1704 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1704 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1705 junk = None
1705 junk = None
1706 # Transform the list into a set.
1706 # Transform the list into a set.
1707 has_cl_set = set(has_cl_set)
1707 has_cl_set = set(has_cl_set)
1708 else:
1708 else:
1709 # If there were no known heads, the recipient cannot be assumed to
1709 # If there were no known heads, the recipient cannot be assumed to
1710 # know about any changesets.
1710 # know about any changesets.
1711 has_cl_set = set()
1711 has_cl_set = set()
1712
1712
1713 # Make it easy to refer to self.manifest
1713 # Make it easy to refer to self.manifest
1714 mnfst = self.manifest
1714 mnfst = self.manifest
1715 # We don't know which manifests are missing yet
1715 # We don't know which manifests are missing yet
1716 msng_mnfst_set = {}
1716 msng_mnfst_set = {}
1717 # Nor do we know which filenodes are missing.
1717 # Nor do we know which filenodes are missing.
1718 msng_filenode_set = {}
1718 msng_filenode_set = {}
1719
1719
1720 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1720 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1721 junk = None
1721 junk = None
1722
1722
1723 # A changeset always belongs to itself, so the changenode lookup
1723 # A changeset always belongs to itself, so the changenode lookup
1724 # function for a changenode is identity.
1724 # function for a changenode is identity.
1725 def identity(x):
1725 def identity(x):
1726 return x
1726 return x
1727
1727
1728 # If we determine that a particular file or manifest node must be a
1728 # If we determine that a particular file or manifest node must be a
1729 # node that the recipient of the changegroup will already have, we can
1729 # node that the recipient of the changegroup will already have, we can
1730 # also assume the recipient will have all the parents. This function
1730 # also assume the recipient will have all the parents. This function
1731 # prunes them from the set of missing nodes.
1731 # prunes them from the set of missing nodes.
1732 def prune_parents(revlog, hasset, msngset):
1732 def prune_parents(revlog, hasset, msngset):
1733 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1733 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1734 msngset.pop(revlog.node(r), None)
1734 msngset.pop(revlog.node(r), None)
1735
1735
1736 # Use the information collected in collect_manifests_and_files to say
1736 # Use the information collected in collect_manifests_and_files to say
1737 # which changenode any manifestnode belongs to.
1737 # which changenode any manifestnode belongs to.
1738 def lookup_manifest_link(mnfstnode):
1738 def lookup_manifest_link(mnfstnode):
1739 return msng_mnfst_set[mnfstnode]
1739 return msng_mnfst_set[mnfstnode]
1740
1740
1741 # A function generating function that sets up the initial environment
1741 # A function generating function that sets up the initial environment
1742 # the inner function.
1742 # the inner function.
1743 def filenode_collector(changedfiles):
1743 def filenode_collector(changedfiles):
1744 # This gathers information from each manifestnode included in the
1744 # This gathers information from each manifestnode included in the
1745 # changegroup about which filenodes the manifest node references
1745 # changegroup about which filenodes the manifest node references
1746 # so we can include those in the changegroup too.
1746 # so we can include those in the changegroup too.
1747 #
1747 #
1748 # It also remembers which changenode each filenode belongs to. It
1748 # It also remembers which changenode each filenode belongs to. It
1749 # does this by assuming the a filenode belongs to the changenode
1749 # does this by assuming the a filenode belongs to the changenode
1750 # the first manifest that references it belongs to.
1750 # the first manifest that references it belongs to.
1751 def collect_msng_filenodes(mnfstnode):
1751 def collect_msng_filenodes(mnfstnode):
1752 r = mnfst.rev(mnfstnode)
1752 r = mnfst.rev(mnfstnode)
1753 if r - 1 in mnfst.parentrevs(r):
1753 if r - 1 in mnfst.parentrevs(r):
1754 # If the previous rev is one of the parents,
1754 # If the previous rev is one of the parents,
1755 # we only need to see a diff.
1755 # we only need to see a diff.
1756 deltamf = mnfst.readdelta(mnfstnode)
1756 deltamf = mnfst.readdelta(mnfstnode)
1757 # For each line in the delta
1757 # For each line in the delta
1758 for f, fnode in deltamf.iteritems():
1758 for f, fnode in deltamf.iteritems():
1759 f = changedfiles.get(f, None)
1759 f = changedfiles.get(f, None)
1760 # And if the file is in the list of files we care
1760 # And if the file is in the list of files we care
1761 # about.
1761 # about.
1762 if f is not None:
1762 if f is not None:
1763 # Get the changenode this manifest belongs to
1763 # Get the changenode this manifest belongs to
1764 clnode = msng_mnfst_set[mnfstnode]
1764 clnode = msng_mnfst_set[mnfstnode]
1765 # Create the set of filenodes for the file if
1765 # Create the set of filenodes for the file if
1766 # there isn't one already.
1766 # there isn't one already.
1767 ndset = msng_filenode_set.setdefault(f, {})
1767 ndset = msng_filenode_set.setdefault(f, {})
1768 # And set the filenode's changelog node to the
1768 # And set the filenode's changelog node to the
1769 # manifest's if it hasn't been set already.
1769 # manifest's if it hasn't been set already.
1770 ndset.setdefault(fnode, clnode)
1770 ndset.setdefault(fnode, clnode)
1771 else:
1771 else:
1772 # Otherwise we need a full manifest.
1772 # Otherwise we need a full manifest.
1773 m = mnfst.read(mnfstnode)
1773 m = mnfst.read(mnfstnode)
1774 # For every file in we care about.
1774 # For every file in we care about.
1775 for f in changedfiles:
1775 for f in changedfiles:
1776 fnode = m.get(f, None)
1776 fnode = m.get(f, None)
1777 # If it's in the manifest
1777 # If it's in the manifest
1778 if fnode is not None:
1778 if fnode is not None:
1779 # See comments above.
1779 # See comments above.
1780 clnode = msng_mnfst_set[mnfstnode]
1780 clnode = msng_mnfst_set[mnfstnode]
1781 ndset = msng_filenode_set.setdefault(f, {})
1781 ndset = msng_filenode_set.setdefault(f, {})
1782 ndset.setdefault(fnode, clnode)
1782 ndset.setdefault(fnode, clnode)
1783 return collect_msng_filenodes
1783 return collect_msng_filenodes
1784
1784
1785 # We have a list of filenodes we think we need for a file, lets remove
1785 # We have a list of filenodes we think we need for a file, lets remove
1786 # all those we know the recipient must have.
1786 # all those we know the recipient must have.
1787 def prune_filenodes(f, filerevlog):
1787 def prune_filenodes(f, filerevlog):
1788 msngset = msng_filenode_set[f]
1788 msngset = msng_filenode_set[f]
1789 hasset = set()
1789 hasset = set()
1790 # If a 'missing' filenode thinks it belongs to a changenode we
1790 # If a 'missing' filenode thinks it belongs to a changenode we
1791 # assume the recipient must have, then the recipient must have
1791 # assume the recipient must have, then the recipient must have
1792 # that filenode.
1792 # that filenode.
1793 for n in msngset:
1793 for n in msngset:
1794 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1794 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1795 if clnode in has_cl_set:
1795 if clnode in has_cl_set:
1796 hasset.add(n)
1796 hasset.add(n)
1797 prune_parents(filerevlog, hasset, msngset)
1797 prune_parents(filerevlog, hasset, msngset)
1798
1798
1799 # A function generator function that sets up the a context for the
1799 # A function generator function that sets up the a context for the
1800 # inner function.
1800 # inner function.
1801 def lookup_filenode_link_func(fname):
1801 def lookup_filenode_link_func(fname):
1802 msngset = msng_filenode_set[fname]
1802 msngset = msng_filenode_set[fname]
1803 # Lookup the changenode the filenode belongs to.
1803 # Lookup the changenode the filenode belongs to.
1804 def lookup_filenode_link(fnode):
1804 def lookup_filenode_link(fnode):
1805 return msngset[fnode]
1805 return msngset[fnode]
1806 return lookup_filenode_link
1806 return lookup_filenode_link
1807
1807
1808 # Add the nodes that were explicitly requested.
1808 # Add the nodes that were explicitly requested.
1809 def add_extra_nodes(name, nodes):
1809 def add_extra_nodes(name, nodes):
1810 if not extranodes or name not in extranodes:
1810 if not extranodes or name not in extranodes:
1811 return
1811 return
1812
1812
1813 for node, linknode in extranodes[name]:
1813 for node, linknode in extranodes[name]:
1814 if node not in nodes:
1814 if node not in nodes:
1815 nodes[node] = linknode
1815 nodes[node] = linknode
1816
1816
1817 # Now that we have all theses utility functions to help out and
1817 # Now that we have all theses utility functions to help out and
1818 # logically divide up the task, generate the group.
1818 # logically divide up the task, generate the group.
1819 def gengroup():
1819 def gengroup():
1820 # The set of changed files starts empty.
1820 # The set of changed files starts empty.
1821 changedfiles = {}
1821 changedfiles = {}
1822 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1822 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1823
1823
1824 # Create a changenode group generator that will call our functions
1824 # Create a changenode group generator that will call our functions
1825 # back to lookup the owning changenode and collect information.
1825 # back to lookup the owning changenode and collect information.
1826 group = cl.group(msng_cl_lst, identity, collect)
1826 group = cl.group(msng_cl_lst, identity, collect)
1827 cnt = 0
1827 cnt = 0
1828 for chnk in group:
1828 for chnk in group:
1829 yield chnk
1829 yield chnk
1830 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1830 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1831 cnt += 1
1831 cnt += 1
1832 self.ui.progress(_('bundling changes'), None, unit=_('chunks'))
1832 self.ui.progress(_('bundling changes'), None, unit=_('chunks'))
1833
1833
1834
1834
1835 # Figure out which manifest nodes (of the ones we think might be
1835 # Figure out which manifest nodes (of the ones we think might be
1836 # part of the changegroup) the recipient must know about and
1836 # part of the changegroup) the recipient must know about and
1837 # remove them from the changegroup.
1837 # remove them from the changegroup.
1838 has_mnfst_set = set()
1838 has_mnfst_set = set()
1839 for n in msng_mnfst_set:
1839 for n in msng_mnfst_set:
1840 # If a 'missing' manifest thinks it belongs to a changenode
1840 # If a 'missing' manifest thinks it belongs to a changenode
1841 # the recipient is assumed to have, obviously the recipient
1841 # the recipient is assumed to have, obviously the recipient
1842 # must have that manifest.
1842 # must have that manifest.
1843 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1843 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1844 if linknode in has_cl_set:
1844 if linknode in has_cl_set:
1845 has_mnfst_set.add(n)
1845 has_mnfst_set.add(n)
1846 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1846 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1847 add_extra_nodes(1, msng_mnfst_set)
1847 add_extra_nodes(1, msng_mnfst_set)
1848 msng_mnfst_lst = msng_mnfst_set.keys()
1848 msng_mnfst_lst = msng_mnfst_set.keys()
1849 # Sort the manifestnodes by revision number.
1849 # Sort the manifestnodes by revision number.
1850 msng_mnfst_lst.sort(key=mnfst.rev)
1850 msng_mnfst_lst.sort(key=mnfst.rev)
1851 # Create a generator for the manifestnodes that calls our lookup
1851 # Create a generator for the manifestnodes that calls our lookup
1852 # and data collection functions back.
1852 # and data collection functions back.
1853 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1853 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1854 filenode_collector(changedfiles))
1854 filenode_collector(changedfiles))
1855 cnt = 0
1855 cnt = 0
1856 for chnk in group:
1856 for chnk in group:
1857 yield chnk
1857 yield chnk
1858 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1858 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1859 cnt += 1
1859 cnt += 1
1860 self.ui.progress(_('bundling manifests'), None, unit=_('chunks'))
1860 self.ui.progress(_('bundling manifests'), None, unit=_('chunks'))
1861
1861
1862 # These are no longer needed, dereference and toss the memory for
1862 # These are no longer needed, dereference and toss the memory for
1863 # them.
1863 # them.
1864 msng_mnfst_lst = None
1864 msng_mnfst_lst = None
1865 msng_mnfst_set.clear()
1865 msng_mnfst_set.clear()
1866
1866
1867 if extranodes:
1867 if extranodes:
1868 for fname in extranodes:
1868 for fname in extranodes:
1869 if isinstance(fname, int):
1869 if isinstance(fname, int):
1870 continue
1870 continue
1871 msng_filenode_set.setdefault(fname, {})
1871 msng_filenode_set.setdefault(fname, {})
1872 changedfiles[fname] = 1
1872 changedfiles[fname] = 1
1873 # Go through all our files in order sorted by name.
1873 # Go through all our files in order sorted by name.
1874 cnt = 0
1874 cnt = 0
1875 for fname in sorted(changedfiles):
1875 for fname in sorted(changedfiles):
1876 filerevlog = self.file(fname)
1876 filerevlog = self.file(fname)
1877 if not len(filerevlog):
1877 if not len(filerevlog):
1878 raise util.Abort(_("empty or missing revlog for %s") % fname)
1878 raise util.Abort(_("empty or missing revlog for %s") % fname)
1879 # Toss out the filenodes that the recipient isn't really
1879 # Toss out the filenodes that the recipient isn't really
1880 # missing.
1880 # missing.
1881 if fname in msng_filenode_set:
1881 if fname in msng_filenode_set:
1882 prune_filenodes(fname, filerevlog)
1882 prune_filenodes(fname, filerevlog)
1883 add_extra_nodes(fname, msng_filenode_set[fname])
1883 add_extra_nodes(fname, msng_filenode_set[fname])
1884 msng_filenode_lst = msng_filenode_set[fname].keys()
1884 msng_filenode_lst = msng_filenode_set[fname].keys()
1885 else:
1885 else:
1886 msng_filenode_lst = []
1886 msng_filenode_lst = []
1887 # If any filenodes are left, generate the group for them,
1887 # If any filenodes are left, generate the group for them,
1888 # otherwise don't bother.
1888 # otherwise don't bother.
1889 if len(msng_filenode_lst) > 0:
1889 if len(msng_filenode_lst) > 0:
1890 yield changegroup.chunkheader(len(fname))
1890 yield changegroup.chunkheader(len(fname))
1891 yield fname
1891 yield fname
1892 # Sort the filenodes by their revision #
1892 # Sort the filenodes by their revision #
1893 msng_filenode_lst.sort(key=filerevlog.rev)
1893 msng_filenode_lst.sort(key=filerevlog.rev)
1894 # Create a group generator and only pass in a changenode
1894 # Create a group generator and only pass in a changenode
1895 # lookup function as we need to collect no information
1895 # lookup function as we need to collect no information
1896 # from filenodes.
1896 # from filenodes.
1897 group = filerevlog.group(msng_filenode_lst,
1897 group = filerevlog.group(msng_filenode_lst,
1898 lookup_filenode_link_func(fname))
1898 lookup_filenode_link_func(fname))
1899 for chnk in group:
1899 for chnk in group:
1900 self.ui.progress(
1900 self.ui.progress(
1901 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1901 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1902 cnt += 1
1902 cnt += 1
1903 yield chnk
1903 yield chnk
1904 if fname in msng_filenode_set:
1904 if fname in msng_filenode_set:
1905 # Don't need this anymore, toss it to free memory.
1905 # Don't need this anymore, toss it to free memory.
1906 del msng_filenode_set[fname]
1906 del msng_filenode_set[fname]
1907 # Signal that no more groups are left.
1907 # Signal that no more groups are left.
1908 yield changegroup.closechunk()
1908 yield changegroup.closechunk()
1909 self.ui.progress(_('bundling files'), None, unit=_('chunks'))
1909 self.ui.progress(_('bundling files'), None, unit=_('chunks'))
1910
1910
1911 if msng_cl_lst:
1911 if msng_cl_lst:
1912 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1912 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1913
1913
1914 return util.chunkbuffer(gengroup())
1914 return util.chunkbuffer(gengroup())
1915
1915
1916 def changegroup(self, basenodes, source):
1916 def changegroup(self, basenodes, source):
1917 # to avoid a race we use changegroupsubset() (issue1320)
1917 # to avoid a race we use changegroupsubset() (issue1320)
1918 return self.changegroupsubset(basenodes, self.heads(), source)
1918 return self.changegroupsubset(basenodes, self.heads(), source)
1919
1919
1920 def _changegroup(self, nodes, source):
1920 def _changegroup(self, nodes, source):
1921 """Compute the changegroup of all nodes that we have that a recipient
1921 """Compute the changegroup of all nodes that we have that a recipient
1922 doesn't. Return a chunkbuffer object whose read() method will return
1922 doesn't. Return a chunkbuffer object whose read() method will return
1923 successive changegroup chunks.
1923 successive changegroup chunks.
1924
1924
1925 This is much easier than the previous function as we can assume that
1925 This is much easier than the previous function as we can assume that
1926 the recipient has any changenode we aren't sending them.
1926 the recipient has any changenode we aren't sending them.
1927
1927
1928 nodes is the set of nodes to send"""
1928 nodes is the set of nodes to send"""
1929
1929
1930 self.hook('preoutgoing', throw=True, source=source)
1930 self.hook('preoutgoing', throw=True, source=source)
1931
1931
1932 cl = self.changelog
1932 cl = self.changelog
1933 revset = set([cl.rev(n) for n in nodes])
1933 revset = set([cl.rev(n) for n in nodes])
1934 self.changegroupinfo(nodes, source)
1934 self.changegroupinfo(nodes, source)
1935
1935
1936 def identity(x):
1936 def identity(x):
1937 return x
1937 return x
1938
1938
1939 def gennodelst(log):
1939 def gennodelst(log):
1940 for r in log:
1940 for r in log:
1941 if log.linkrev(r) in revset:
1941 if log.linkrev(r) in revset:
1942 yield log.node(r)
1942 yield log.node(r)
1943
1943
1944 def lookuprevlink_func(revlog):
1944 def lookuprevlink_func(revlog):
1945 def lookuprevlink(n):
1945 def lookuprevlink(n):
1946 return cl.node(revlog.linkrev(revlog.rev(n)))
1946 return cl.node(revlog.linkrev(revlog.rev(n)))
1947 return lookuprevlink
1947 return lookuprevlink
1948
1948
1949 def gengroup():
1949 def gengroup():
1950 '''yield a sequence of changegroup chunks (strings)'''
1950 '''yield a sequence of changegroup chunks (strings)'''
1951 # construct a list of all changed files
1951 # construct a list of all changed files
1952 changedfiles = {}
1952 changedfiles = {}
1953 mmfs = {}
1953 mmfs = {}
1954 collect = changegroup.collector(cl, mmfs, changedfiles)
1954 collect = changegroup.collector(cl, mmfs, changedfiles)
1955
1955
1956 cnt = 0
1956 cnt = 0
1957 for chnk in cl.group(nodes, identity, collect):
1957 for chnk in cl.group(nodes, identity, collect):
1958 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1958 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1959 cnt += 1
1959 cnt += 1
1960 yield chnk
1960 yield chnk
1961 self.ui.progress(_('bundling changes'), None, unit=_('chunks'))
1961 self.ui.progress(_('bundling changes'), None, unit=_('chunks'))
1962
1962
1963 mnfst = self.manifest
1963 mnfst = self.manifest
1964 nodeiter = gennodelst(mnfst)
1964 nodeiter = gennodelst(mnfst)
1965 cnt = 0
1965 cnt = 0
1966 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1966 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1967 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1967 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1968 cnt += 1
1968 cnt += 1
1969 yield chnk
1969 yield chnk
1970 self.ui.progress(_('bundling manifests'), None, unit=_('chunks'))
1970 self.ui.progress(_('bundling manifests'), None, unit=_('chunks'))
1971
1971
1972 cnt = 0
1972 cnt = 0
1973 for fname in sorted(changedfiles):
1973 for fname in sorted(changedfiles):
1974 filerevlog = self.file(fname)
1974 filerevlog = self.file(fname)
1975 if not len(filerevlog):
1975 if not len(filerevlog):
1976 raise util.Abort(_("empty or missing revlog for %s") % fname)
1976 raise util.Abort(_("empty or missing revlog for %s") % fname)
1977 nodeiter = gennodelst(filerevlog)
1977 nodeiter = gennodelst(filerevlog)
1978 nodeiter = list(nodeiter)
1978 nodeiter = list(nodeiter)
1979 if nodeiter:
1979 if nodeiter:
1980 yield changegroup.chunkheader(len(fname))
1980 yield changegroup.chunkheader(len(fname))
1981 yield fname
1981 yield fname
1982 lookup = lookuprevlink_func(filerevlog)
1982 lookup = lookuprevlink_func(filerevlog)
1983 for chnk in filerevlog.group(nodeiter, lookup):
1983 for chnk in filerevlog.group(nodeiter, lookup):
1984 self.ui.progress(
1984 self.ui.progress(
1985 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1985 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1986 cnt += 1
1986 cnt += 1
1987 yield chnk
1987 yield chnk
1988 self.ui.progress(_('bundling files'), None, unit=_('chunks'))
1988 self.ui.progress(_('bundling files'), None, unit=_('chunks'))
1989
1989
1990 yield changegroup.closechunk()
1990 yield changegroup.closechunk()
1991
1991
1992 if nodes:
1992 if nodes:
1993 self.hook('outgoing', node=hex(nodes[0]), source=source)
1993 self.hook('outgoing', node=hex(nodes[0]), source=source)
1994
1994
1995 return util.chunkbuffer(gengroup())
1995 return util.chunkbuffer(gengroup())
1996
1996
1997 def addchangegroup(self, source, srctype, url, emptyok=False):
1997 def addchangegroup(self, source, srctype, url, emptyok=False):
1998 """add changegroup to repo.
1998 """add changegroup to repo.
1999
1999
2000 return values:
2000 return values:
2001 - nothing changed or no source: 0
2001 - nothing changed or no source: 0
2002 - more heads than before: 1+added heads (2..n)
2002 - more heads than before: 1+added heads (2..n)
2003 - less heads than before: -1-removed heads (-2..-n)
2003 - less heads than before: -1-removed heads (-2..-n)
2004 - number of heads stays the same: 1
2004 - number of heads stays the same: 1
2005 """
2005 """
2006 def csmap(x):
2006 def csmap(x):
2007 self.ui.debug("add changeset %s\n" % short(x))
2007 self.ui.debug("add changeset %s\n" % short(x))
2008 return len(cl)
2008 return len(cl)
2009
2009
2010 def revmap(x):
2010 def revmap(x):
2011 return cl.rev(x)
2011 return cl.rev(x)
2012
2012
2013 if not source:
2013 if not source:
2014 return 0
2014 return 0
2015
2015
2016 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2016 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2017
2017
2018 changesets = files = revisions = 0
2018 changesets = files = revisions = 0
2019
2019
2020 # write changelog data to temp files so concurrent readers will not see
2020 # write changelog data to temp files so concurrent readers will not see
2021 # inconsistent view
2021 # inconsistent view
2022 cl = self.changelog
2022 cl = self.changelog
2023 cl.delayupdate()
2023 cl.delayupdate()
2024 oldheads = len(cl.heads())
2024 oldheads = len(cl.heads())
2025
2025
2026 tr = self.transaction()
2026 tr = self.transaction()
2027 try:
2027 try:
2028 trp = weakref.proxy(tr)
2028 trp = weakref.proxy(tr)
2029 # pull off the changeset group
2029 # pull off the changeset group
2030 self.ui.status(_("adding changesets\n"))
2030 self.ui.status(_("adding changesets\n"))
2031 clstart = len(cl)
2031 clstart = len(cl)
2032 class prog(object):
2032 class prog(object):
2033 step = _('changesets')
2033 step = _('changesets')
2034 count = 1
2034 count = 1
2035 ui = self.ui
2035 ui = self.ui
2036 def __call__(self):
2036 def __call__(self):
2037 self.ui.progress(self.step, self.count, unit=_('chunks'))
2037 self.ui.progress(self.step, self.count, unit=_('chunks'))
2038 self.count += 1
2038 self.count += 1
2039 pr = prog()
2039 pr = prog()
2040 chunkiter = changegroup.chunkiter(source, progress=pr)
2040 chunkiter = changegroup.chunkiter(source, progress=pr)
2041 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2041 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2042 raise util.Abort(_("received changelog group is empty"))
2042 raise util.Abort(_("received changelog group is empty"))
2043 clend = len(cl)
2043 clend = len(cl)
2044 changesets = clend - clstart
2044 changesets = clend - clstart
2045 self.ui.progress(_('changesets'), None)
2045 self.ui.progress(_('changesets'), None)
2046
2046
2047 # pull off the manifest group
2047 # pull off the manifest group
2048 self.ui.status(_("adding manifests\n"))
2048 self.ui.status(_("adding manifests\n"))
2049 pr.step = _('manifests')
2049 pr.step = _('manifests')
2050 pr.count = 1
2050 pr.count = 1
2051 chunkiter = changegroup.chunkiter(source, progress=pr)
2051 chunkiter = changegroup.chunkiter(source, progress=pr)
2052 # no need to check for empty manifest group here:
2052 # no need to check for empty manifest group here:
2053 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2053 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2054 # no new manifest will be created and the manifest group will
2054 # no new manifest will be created and the manifest group will
2055 # be empty during the pull
2055 # be empty during the pull
2056 self.manifest.addgroup(chunkiter, revmap, trp)
2056 self.manifest.addgroup(chunkiter, revmap, trp)
2057 self.ui.progress(_('manifests'), None)
2057 self.ui.progress(_('manifests'), None)
2058
2058
2059 needfiles = {}
2059 needfiles = {}
2060 if self.ui.configbool('server', 'validate', default=False):
2060 if self.ui.configbool('server', 'validate', default=False):
2061 # validate incoming csets have their manifests
2061 # validate incoming csets have their manifests
2062 for cset in xrange(clstart, clend):
2062 for cset in xrange(clstart, clend):
2063 mfest = self.changelog.read(self.changelog.node(cset))[0]
2063 mfest = self.changelog.read(self.changelog.node(cset))[0]
2064 mfest = self.manifest.readdelta(mfest)
2064 mfest = self.manifest.readdelta(mfest)
2065 # store file nodes we must see
2065 # store file nodes we must see
2066 for f, n in mfest.iteritems():
2066 for f, n in mfest.iteritems():
2067 needfiles.setdefault(f, set()).add(n)
2067 needfiles.setdefault(f, set()).add(n)
2068
2068
2069 # process the files
2069 # process the files
2070 self.ui.status(_("adding file changes\n"))
2070 self.ui.status(_("adding file changes\n"))
2071 pr.step = 'files'
2071 pr.step = 'files'
2072 pr.count = 1
2072 pr.count = 1
2073 while 1:
2073 while 1:
2074 f = changegroup.getchunk(source)
2074 f = changegroup.getchunk(source)
2075 if not f:
2075 if not f:
2076 break
2076 break
2077 self.ui.debug("adding %s revisions\n" % f)
2077 self.ui.debug("adding %s revisions\n" % f)
2078 fl = self.file(f)
2078 fl = self.file(f)
2079 o = len(fl)
2079 o = len(fl)
2080 chunkiter = changegroup.chunkiter(source, progress=pr)
2080 chunkiter = changegroup.chunkiter(source, progress=pr)
2081 if fl.addgroup(chunkiter, revmap, trp) is None:
2081 if fl.addgroup(chunkiter, revmap, trp) is None:
2082 raise util.Abort(_("received file revlog group is empty"))
2082 raise util.Abort(_("received file revlog group is empty"))
2083 revisions += len(fl) - o
2083 revisions += len(fl) - o
2084 files += 1
2084 files += 1
2085 if f in needfiles:
2085 if f in needfiles:
2086 needs = needfiles[f]
2086 needs = needfiles[f]
2087 for new in xrange(o, len(fl)):
2087 for new in xrange(o, len(fl)):
2088 n = fl.node(new)
2088 n = fl.node(new)
2089 if n in needs:
2089 if n in needs:
2090 needs.remove(n)
2090 needs.remove(n)
2091 if not needs:
2091 if not needs:
2092 del needfiles[f]
2092 del needfiles[f]
2093 self.ui.progress(_('files'), None)
2093 self.ui.progress(_('files'), None)
2094
2094
2095 for f, needs in needfiles.iteritems():
2095 for f, needs in needfiles.iteritems():
2096 fl = self.file(f)
2096 fl = self.file(f)
2097 for n in needs:
2097 for n in needs:
2098 try:
2098 try:
2099 fl.rev(n)
2099 fl.rev(n)
2100 except error.LookupError:
2100 except error.LookupError:
2101 raise util.Abort(
2101 raise util.Abort(
2102 _('missing file data for %s:%s - run hg verify') %
2102 _('missing file data for %s:%s - run hg verify') %
2103 (f, hex(n)))
2103 (f, hex(n)))
2104
2104
2105 newheads = len(cl.heads())
2105 newheads = len(cl.heads())
2106 heads = ""
2106 heads = ""
2107 if oldheads and newheads != oldheads:
2107 if oldheads and newheads != oldheads:
2108 heads = _(" (%+d heads)") % (newheads - oldheads)
2108 heads = _(" (%+d heads)") % (newheads - oldheads)
2109
2109
2110 self.ui.status(_("added %d changesets"
2110 self.ui.status(_("added %d changesets"
2111 " with %d changes to %d files%s\n")
2111 " with %d changes to %d files%s\n")
2112 % (changesets, revisions, files, heads))
2112 % (changesets, revisions, files, heads))
2113
2113
2114 if changesets > 0:
2114 if changesets > 0:
2115 p = lambda: cl.writepending() and self.root or ""
2115 p = lambda: cl.writepending() and self.root or ""
2116 self.hook('pretxnchangegroup', throw=True,
2116 self.hook('pretxnchangegroup', throw=True,
2117 node=hex(cl.node(clstart)), source=srctype,
2117 node=hex(cl.node(clstart)), source=srctype,
2118 url=url, pending=p)
2118 url=url, pending=p)
2119
2119
2120 # make changelog see real files again
2120 # make changelog see real files again
2121 cl.finalize(trp)
2121 cl.finalize(trp)
2122
2122
2123 tr.close()
2123 tr.close()
2124 finally:
2124 finally:
2125 del tr
2125 del tr
2126
2126
2127 if changesets > 0:
2127 if changesets > 0:
2128 # forcefully update the on-disk branch cache
2128 # forcefully update the on-disk branch cache
2129 self.ui.debug("updating the branch cache\n")
2129 self.ui.debug("updating the branch cache\n")
2130 self.branchtags()
2130 self.branchtags()
2131 self.hook("changegroup", node=hex(cl.node(clstart)),
2131 self.hook("changegroup", node=hex(cl.node(clstart)),
2132 source=srctype, url=url)
2132 source=srctype, url=url)
2133
2133
2134 for i in xrange(clstart, clend):
2134 for i in xrange(clstart, clend):
2135 self.hook("incoming", node=hex(cl.node(i)),
2135 self.hook("incoming", node=hex(cl.node(i)),
2136 source=srctype, url=url)
2136 source=srctype, url=url)
2137
2137
2138 # never return 0 here:
2138 # never return 0 here:
2139 if newheads < oldheads:
2139 if newheads < oldheads:
2140 return newheads - oldheads - 1
2140 return newheads - oldheads - 1
2141 else:
2141 else:
2142 return newheads - oldheads + 1
2142 return newheads - oldheads + 1
2143
2143
2144
2144
2145 def stream_in(self, remote):
2145 def stream_in(self, remote):
2146 fp = remote.stream_out()
2146 fp = remote.stream_out()
2147 l = fp.readline()
2147 l = fp.readline()
2148 try:
2148 try:
2149 resp = int(l)
2149 resp = int(l)
2150 except ValueError:
2150 except ValueError:
2151 raise error.ResponseError(
2151 raise error.ResponseError(
2152 _('Unexpected response from remote server:'), l)
2152 _('Unexpected response from remote server:'), l)
2153 if resp == 1:
2153 if resp == 1:
2154 raise util.Abort(_('operation forbidden by server'))
2154 raise util.Abort(_('operation forbidden by server'))
2155 elif resp == 2:
2155 elif resp == 2:
2156 raise util.Abort(_('locking the remote repository failed'))
2156 raise util.Abort(_('locking the remote repository failed'))
2157 elif resp != 0:
2157 elif resp != 0:
2158 raise util.Abort(_('the server sent an unknown error code'))
2158 raise util.Abort(_('the server sent an unknown error code'))
2159 self.ui.status(_('streaming all changes\n'))
2159 self.ui.status(_('streaming all changes\n'))
2160 l = fp.readline()
2160 l = fp.readline()
2161 try:
2161 try:
2162 total_files, total_bytes = map(int, l.split(' ', 1))
2162 total_files, total_bytes = map(int, l.split(' ', 1))
2163 except (ValueError, TypeError):
2163 except (ValueError, TypeError):
2164 raise error.ResponseError(
2164 raise error.ResponseError(
2165 _('Unexpected response from remote server:'), l)
2165 _('Unexpected response from remote server:'), l)
2166 self.ui.status(_('%d files to transfer, %s of data\n') %
2166 self.ui.status(_('%d files to transfer, %s of data\n') %
2167 (total_files, util.bytecount(total_bytes)))
2167 (total_files, util.bytecount(total_bytes)))
2168 start = time.time()
2168 start = time.time()
2169 for i in xrange(total_files):
2169 for i in xrange(total_files):
2170 # XXX doesn't support '\n' or '\r' in filenames
2170 # XXX doesn't support '\n' or '\r' in filenames
2171 l = fp.readline()
2171 l = fp.readline()
2172 try:
2172 try:
2173 name, size = l.split('\0', 1)
2173 name, size = l.split('\0', 1)
2174 size = int(size)
2174 size = int(size)
2175 except (ValueError, TypeError):
2175 except (ValueError, TypeError):
2176 raise error.ResponseError(
2176 raise error.ResponseError(
2177 _('Unexpected response from remote server:'), l)
2177 _('Unexpected response from remote server:'), l)
2178 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2178 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2179 # for backwards compat, name was partially encoded
2179 # for backwards compat, name was partially encoded
2180 ofp = self.sopener(store.decodedir(name), 'w')
2180 ofp = self.sopener(store.decodedir(name), 'w')
2181 for chunk in util.filechunkiter(fp, limit=size):
2181 for chunk in util.filechunkiter(fp, limit=size):
2182 ofp.write(chunk)
2182 ofp.write(chunk)
2183 ofp.close()
2183 ofp.close()
2184 elapsed = time.time() - start
2184 elapsed = time.time() - start
2185 if elapsed <= 0:
2185 if elapsed <= 0:
2186 elapsed = 0.001
2186 elapsed = 0.001
2187 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2187 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2188 (util.bytecount(total_bytes), elapsed,
2188 (util.bytecount(total_bytes), elapsed,
2189 util.bytecount(total_bytes / elapsed)))
2189 util.bytecount(total_bytes / elapsed)))
2190 self.invalidate()
2190 self.invalidate()
2191 return len(self.heads()) + 1
2191 return len(self.heads()) + 1
2192
2192
2193 def clone(self, remote, heads=[], stream=False):
2193 def clone(self, remote, heads=[], stream=False):
2194 '''clone remote repository.
2194 '''clone remote repository.
2195
2195
2196 keyword arguments:
2196 keyword arguments:
2197 heads: list of revs to clone (forces use of pull)
2197 heads: list of revs to clone (forces use of pull)
2198 stream: use streaming clone if possible'''
2198 stream: use streaming clone if possible'''
2199
2199
2200 # now, all clients that can request uncompressed clones can
2200 # now, all clients that can request uncompressed clones can
2201 # read repo formats supported by all servers that can serve
2201 # read repo formats supported by all servers that can serve
2202 # them.
2202 # them.
2203
2203
2204 # if revlog format changes, client will have to check version
2204 # if revlog format changes, client will have to check version
2205 # and format flags on "stream" capability, and use
2205 # and format flags on "stream" capability, and use
2206 # uncompressed only if compatible.
2206 # uncompressed only if compatible.
2207
2207
2208 if stream and not heads and remote.capable('stream'):
2208 if stream and not heads and remote.capable('stream'):
2209 return self.stream_in(remote)
2209 return self.stream_in(remote)
2210 return self.pull(remote, heads)
2210 return self.pull(remote, heads)
2211
2211
2212 # used to avoid circular references so destructors work
2212 # used to avoid circular references so destructors work
2213 def aftertrans(files):
2213 def aftertrans(files):
2214 renamefiles = [tuple(t) for t in files]
2214 renamefiles = [tuple(t) for t in files]
2215 def a():
2215 def a():
2216 for src, dest in renamefiles:
2216 for src, dest in renamefiles:
2217 util.rename(src, dest)
2217 util.rename(src, dest)
2218 return a
2218 return a
2219
2219
2220 def instance(ui, path, create):
2220 def instance(ui, path, create):
2221 return localrepository(ui, util.drop_scheme('file', path), create)
2221 return localrepository(ui, util.drop_scheme('file', path), create)
2222
2222
2223 def islocal(path):
2223 def islocal(path):
2224 return True
2224 return True
General Comments 0
You need to be logged in to leave comments. Login now