##// END OF EJS Templates
branchmap: simplify _branchtags using a new _cacheabletip method...
Pierre-Yves David -
r18112:569091b9 default
parent child Browse files
Show More
@@ -1,3626 +1,3611 b''
1 # mq.py - patch queues for mercurial
1 # mq.py - patch queues for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''manage a stack of patches
8 '''manage a stack of patches
9
9
10 This extension lets you work with a stack of patches in a Mercurial
10 This extension lets you work with a stack of patches in a Mercurial
11 repository. It manages two stacks of patches - all known patches, and
11 repository. It manages two stacks of patches - all known patches, and
12 applied patches (subset of known patches).
12 applied patches (subset of known patches).
13
13
14 Known patches are represented as patch files in the .hg/patches
14 Known patches are represented as patch files in the .hg/patches
15 directory. Applied patches are both patch files and changesets.
15 directory. Applied patches are both patch files and changesets.
16
16
17 Common tasks (use :hg:`help command` for more details)::
17 Common tasks (use :hg:`help command` for more details)::
18
18
19 create new patch qnew
19 create new patch qnew
20 import existing patch qimport
20 import existing patch qimport
21
21
22 print patch series qseries
22 print patch series qseries
23 print applied patches qapplied
23 print applied patches qapplied
24
24
25 add known patch to applied stack qpush
25 add known patch to applied stack qpush
26 remove patch from applied stack qpop
26 remove patch from applied stack qpop
27 refresh contents of top applied patch qrefresh
27 refresh contents of top applied patch qrefresh
28
28
29 By default, mq will automatically use git patches when required to
29 By default, mq will automatically use git patches when required to
30 avoid losing file mode changes, copy records, binary files or empty
30 avoid losing file mode changes, copy records, binary files or empty
31 files creations or deletions. This behaviour can be configured with::
31 files creations or deletions. This behaviour can be configured with::
32
32
33 [mq]
33 [mq]
34 git = auto/keep/yes/no
34 git = auto/keep/yes/no
35
35
36 If set to 'keep', mq will obey the [diff] section configuration while
36 If set to 'keep', mq will obey the [diff] section configuration while
37 preserving existing git patches upon qrefresh. If set to 'yes' or
37 preserving existing git patches upon qrefresh. If set to 'yes' or
38 'no', mq will override the [diff] section and always generate git or
38 'no', mq will override the [diff] section and always generate git or
39 regular patches, possibly losing data in the second case.
39 regular patches, possibly losing data in the second case.
40
40
41 It may be desirable for mq changesets to be kept in the secret phase (see
41 It may be desirable for mq changesets to be kept in the secret phase (see
42 :hg:`help phases`), which can be enabled with the following setting::
42 :hg:`help phases`), which can be enabled with the following setting::
43
43
44 [mq]
44 [mq]
45 secret = True
45 secret = True
46
46
47 You will by default be managing a patch queue named "patches". You can
47 You will by default be managing a patch queue named "patches". You can
48 create other, independent patch queues with the :hg:`qqueue` command.
48 create other, independent patch queues with the :hg:`qqueue` command.
49
49
50 If the working directory contains uncommitted files, qpush, qpop and
50 If the working directory contains uncommitted files, qpush, qpop and
51 qgoto abort immediately. If -f/--force is used, the changes are
51 qgoto abort immediately. If -f/--force is used, the changes are
52 discarded. Setting::
52 discarded. Setting::
53
53
54 [mq]
54 [mq]
55 keepchanges = True
55 keepchanges = True
56
56
57 make them behave as if --keep-changes were passed, and non-conflicting
57 make them behave as if --keep-changes were passed, and non-conflicting
58 local changes will be tolerated and preserved. If incompatible options
58 local changes will be tolerated and preserved. If incompatible options
59 such as -f/--force or --exact are passed, this setting is ignored.
59 such as -f/--force or --exact are passed, this setting is ignored.
60 '''
60 '''
61
61
62 from mercurial.i18n import _
62 from mercurial.i18n import _
63 from mercurial.node import bin, hex, short, nullid, nullrev
63 from mercurial.node import bin, hex, short, nullid, nullrev
64 from mercurial.lock import release
64 from mercurial.lock import release
65 from mercurial import commands, cmdutil, hg, scmutil, util, revset
65 from mercurial import commands, cmdutil, hg, scmutil, util, revset
66 from mercurial import repair, extensions, error, phases
66 from mercurial import repair, extensions, error, phases
67 from mercurial import patch as patchmod
67 from mercurial import patch as patchmod
68 import os, re, errno, shutil
68 import os, re, errno, shutil
69
69
70 commands.norepo += " qclone"
70 commands.norepo += " qclone"
71
71
72 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
72 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
73
73
74 cmdtable = {}
74 cmdtable = {}
75 command = cmdutil.command(cmdtable)
75 command = cmdutil.command(cmdtable)
76 testedwith = 'internal'
76 testedwith = 'internal'
77
77
78 # Patch names looks like unix-file names.
78 # Patch names looks like unix-file names.
79 # They must be joinable with queue directory and result in the patch path.
79 # They must be joinable with queue directory and result in the patch path.
80 normname = util.normpath
80 normname = util.normpath
81
81
82 class statusentry(object):
82 class statusentry(object):
83 def __init__(self, node, name):
83 def __init__(self, node, name):
84 self.node, self.name = node, name
84 self.node, self.name = node, name
85 def __repr__(self):
85 def __repr__(self):
86 return hex(self.node) + ':' + self.name
86 return hex(self.node) + ':' + self.name
87
87
88 class patchheader(object):
88 class patchheader(object):
89 def __init__(self, pf, plainmode=False):
89 def __init__(self, pf, plainmode=False):
90 def eatdiff(lines):
90 def eatdiff(lines):
91 while lines:
91 while lines:
92 l = lines[-1]
92 l = lines[-1]
93 if (l.startswith("diff -") or
93 if (l.startswith("diff -") or
94 l.startswith("Index:") or
94 l.startswith("Index:") or
95 l.startswith("===========")):
95 l.startswith("===========")):
96 del lines[-1]
96 del lines[-1]
97 else:
97 else:
98 break
98 break
99 def eatempty(lines):
99 def eatempty(lines):
100 while lines:
100 while lines:
101 if not lines[-1].strip():
101 if not lines[-1].strip():
102 del lines[-1]
102 del lines[-1]
103 else:
103 else:
104 break
104 break
105
105
106 message = []
106 message = []
107 comments = []
107 comments = []
108 user = None
108 user = None
109 date = None
109 date = None
110 parent = None
110 parent = None
111 format = None
111 format = None
112 subject = None
112 subject = None
113 branch = None
113 branch = None
114 nodeid = None
114 nodeid = None
115 diffstart = 0
115 diffstart = 0
116
116
117 for line in file(pf):
117 for line in file(pf):
118 line = line.rstrip()
118 line = line.rstrip()
119 if (line.startswith('diff --git')
119 if (line.startswith('diff --git')
120 or (diffstart and line.startswith('+++ '))):
120 or (diffstart and line.startswith('+++ '))):
121 diffstart = 2
121 diffstart = 2
122 break
122 break
123 diffstart = 0 # reset
123 diffstart = 0 # reset
124 if line.startswith("--- "):
124 if line.startswith("--- "):
125 diffstart = 1
125 diffstart = 1
126 continue
126 continue
127 elif format == "hgpatch":
127 elif format == "hgpatch":
128 # parse values when importing the result of an hg export
128 # parse values when importing the result of an hg export
129 if line.startswith("# User "):
129 if line.startswith("# User "):
130 user = line[7:]
130 user = line[7:]
131 elif line.startswith("# Date "):
131 elif line.startswith("# Date "):
132 date = line[7:]
132 date = line[7:]
133 elif line.startswith("# Parent "):
133 elif line.startswith("# Parent "):
134 parent = line[9:].lstrip()
134 parent = line[9:].lstrip()
135 elif line.startswith("# Branch "):
135 elif line.startswith("# Branch "):
136 branch = line[9:]
136 branch = line[9:]
137 elif line.startswith("# Node ID "):
137 elif line.startswith("# Node ID "):
138 nodeid = line[10:]
138 nodeid = line[10:]
139 elif not line.startswith("# ") and line:
139 elif not line.startswith("# ") and line:
140 message.append(line)
140 message.append(line)
141 format = None
141 format = None
142 elif line == '# HG changeset patch':
142 elif line == '# HG changeset patch':
143 message = []
143 message = []
144 format = "hgpatch"
144 format = "hgpatch"
145 elif (format != "tagdone" and (line.startswith("Subject: ") or
145 elif (format != "tagdone" and (line.startswith("Subject: ") or
146 line.startswith("subject: "))):
146 line.startswith("subject: "))):
147 subject = line[9:]
147 subject = line[9:]
148 format = "tag"
148 format = "tag"
149 elif (format != "tagdone" and (line.startswith("From: ") or
149 elif (format != "tagdone" and (line.startswith("From: ") or
150 line.startswith("from: "))):
150 line.startswith("from: "))):
151 user = line[6:]
151 user = line[6:]
152 format = "tag"
152 format = "tag"
153 elif (format != "tagdone" and (line.startswith("Date: ") or
153 elif (format != "tagdone" and (line.startswith("Date: ") or
154 line.startswith("date: "))):
154 line.startswith("date: "))):
155 date = line[6:]
155 date = line[6:]
156 format = "tag"
156 format = "tag"
157 elif format == "tag" and line == "":
157 elif format == "tag" and line == "":
158 # when looking for tags (subject: from: etc) they
158 # when looking for tags (subject: from: etc) they
159 # end once you find a blank line in the source
159 # end once you find a blank line in the source
160 format = "tagdone"
160 format = "tagdone"
161 elif message or line:
161 elif message or line:
162 message.append(line)
162 message.append(line)
163 comments.append(line)
163 comments.append(line)
164
164
165 eatdiff(message)
165 eatdiff(message)
166 eatdiff(comments)
166 eatdiff(comments)
167 # Remember the exact starting line of the patch diffs before consuming
167 # Remember the exact starting line of the patch diffs before consuming
168 # empty lines, for external use by TortoiseHg and others
168 # empty lines, for external use by TortoiseHg and others
169 self.diffstartline = len(comments)
169 self.diffstartline = len(comments)
170 eatempty(message)
170 eatempty(message)
171 eatempty(comments)
171 eatempty(comments)
172
172
173 # make sure message isn't empty
173 # make sure message isn't empty
174 if format and format.startswith("tag") and subject:
174 if format and format.startswith("tag") and subject:
175 message.insert(0, "")
175 message.insert(0, "")
176 message.insert(0, subject)
176 message.insert(0, subject)
177
177
178 self.message = message
178 self.message = message
179 self.comments = comments
179 self.comments = comments
180 self.user = user
180 self.user = user
181 self.date = date
181 self.date = date
182 self.parent = parent
182 self.parent = parent
183 # nodeid and branch are for external use by TortoiseHg and others
183 # nodeid and branch are for external use by TortoiseHg and others
184 self.nodeid = nodeid
184 self.nodeid = nodeid
185 self.branch = branch
185 self.branch = branch
186 self.haspatch = diffstart > 1
186 self.haspatch = diffstart > 1
187 self.plainmode = plainmode
187 self.plainmode = plainmode
188
188
189 def setuser(self, user):
189 def setuser(self, user):
190 if not self.updateheader(['From: ', '# User '], user):
190 if not self.updateheader(['From: ', '# User '], user):
191 try:
191 try:
192 patchheaderat = self.comments.index('# HG changeset patch')
192 patchheaderat = self.comments.index('# HG changeset patch')
193 self.comments.insert(patchheaderat + 1, '# User ' + user)
193 self.comments.insert(patchheaderat + 1, '# User ' + user)
194 except ValueError:
194 except ValueError:
195 if self.plainmode or self._hasheader(['Date: ']):
195 if self.plainmode or self._hasheader(['Date: ']):
196 self.comments = ['From: ' + user] + self.comments
196 self.comments = ['From: ' + user] + self.comments
197 else:
197 else:
198 tmp = ['# HG changeset patch', '# User ' + user, '']
198 tmp = ['# HG changeset patch', '# User ' + user, '']
199 self.comments = tmp + self.comments
199 self.comments = tmp + self.comments
200 self.user = user
200 self.user = user
201
201
202 def setdate(self, date):
202 def setdate(self, date):
203 if not self.updateheader(['Date: ', '# Date '], date):
203 if not self.updateheader(['Date: ', '# Date '], date):
204 try:
204 try:
205 patchheaderat = self.comments.index('# HG changeset patch')
205 patchheaderat = self.comments.index('# HG changeset patch')
206 self.comments.insert(patchheaderat + 1, '# Date ' + date)
206 self.comments.insert(patchheaderat + 1, '# Date ' + date)
207 except ValueError:
207 except ValueError:
208 if self.plainmode or self._hasheader(['From: ']):
208 if self.plainmode or self._hasheader(['From: ']):
209 self.comments = ['Date: ' + date] + self.comments
209 self.comments = ['Date: ' + date] + self.comments
210 else:
210 else:
211 tmp = ['# HG changeset patch', '# Date ' + date, '']
211 tmp = ['# HG changeset patch', '# Date ' + date, '']
212 self.comments = tmp + self.comments
212 self.comments = tmp + self.comments
213 self.date = date
213 self.date = date
214
214
215 def setparent(self, parent):
215 def setparent(self, parent):
216 if not self.updateheader(['# Parent '], parent):
216 if not self.updateheader(['# Parent '], parent):
217 try:
217 try:
218 patchheaderat = self.comments.index('# HG changeset patch')
218 patchheaderat = self.comments.index('# HG changeset patch')
219 self.comments.insert(patchheaderat + 1, '# Parent ' + parent)
219 self.comments.insert(patchheaderat + 1, '# Parent ' + parent)
220 except ValueError:
220 except ValueError:
221 pass
221 pass
222 self.parent = parent
222 self.parent = parent
223
223
224 def setmessage(self, message):
224 def setmessage(self, message):
225 if self.comments:
225 if self.comments:
226 self._delmsg()
226 self._delmsg()
227 self.message = [message]
227 self.message = [message]
228 self.comments += self.message
228 self.comments += self.message
229
229
230 def updateheader(self, prefixes, new):
230 def updateheader(self, prefixes, new):
231 '''Update all references to a field in the patch header.
231 '''Update all references to a field in the patch header.
232 Return whether the field is present.'''
232 Return whether the field is present.'''
233 res = False
233 res = False
234 for prefix in prefixes:
234 for prefix in prefixes:
235 for i in xrange(len(self.comments)):
235 for i in xrange(len(self.comments)):
236 if self.comments[i].startswith(prefix):
236 if self.comments[i].startswith(prefix):
237 self.comments[i] = prefix + new
237 self.comments[i] = prefix + new
238 res = True
238 res = True
239 break
239 break
240 return res
240 return res
241
241
242 def _hasheader(self, prefixes):
242 def _hasheader(self, prefixes):
243 '''Check if a header starts with any of the given prefixes.'''
243 '''Check if a header starts with any of the given prefixes.'''
244 for prefix in prefixes:
244 for prefix in prefixes:
245 for comment in self.comments:
245 for comment in self.comments:
246 if comment.startswith(prefix):
246 if comment.startswith(prefix):
247 return True
247 return True
248 return False
248 return False
249
249
250 def __str__(self):
250 def __str__(self):
251 if not self.comments:
251 if not self.comments:
252 return ''
252 return ''
253 return '\n'.join(self.comments) + '\n\n'
253 return '\n'.join(self.comments) + '\n\n'
254
254
255 def _delmsg(self):
255 def _delmsg(self):
256 '''Remove existing message, keeping the rest of the comments fields.
256 '''Remove existing message, keeping the rest of the comments fields.
257 If comments contains 'subject: ', message will prepend
257 If comments contains 'subject: ', message will prepend
258 the field and a blank line.'''
258 the field and a blank line.'''
259 if self.message:
259 if self.message:
260 subj = 'subject: ' + self.message[0].lower()
260 subj = 'subject: ' + self.message[0].lower()
261 for i in xrange(len(self.comments)):
261 for i in xrange(len(self.comments)):
262 if subj == self.comments[i].lower():
262 if subj == self.comments[i].lower():
263 del self.comments[i]
263 del self.comments[i]
264 self.message = self.message[2:]
264 self.message = self.message[2:]
265 break
265 break
266 ci = 0
266 ci = 0
267 for mi in self.message:
267 for mi in self.message:
268 while mi != self.comments[ci]:
268 while mi != self.comments[ci]:
269 ci += 1
269 ci += 1
270 del self.comments[ci]
270 del self.comments[ci]
271
271
272 def newcommit(repo, phase, *args, **kwargs):
272 def newcommit(repo, phase, *args, **kwargs):
273 """helper dedicated to ensure a commit respect mq.secret setting
273 """helper dedicated to ensure a commit respect mq.secret setting
274
274
275 It should be used instead of repo.commit inside the mq source for operation
275 It should be used instead of repo.commit inside the mq source for operation
276 creating new changeset.
276 creating new changeset.
277 """
277 """
278 repo = repo.unfiltered()
278 repo = repo.unfiltered()
279 if phase is None:
279 if phase is None:
280 if repo.ui.configbool('mq', 'secret', False):
280 if repo.ui.configbool('mq', 'secret', False):
281 phase = phases.secret
281 phase = phases.secret
282 if phase is not None:
282 if phase is not None:
283 backup = repo.ui.backupconfig('phases', 'new-commit')
283 backup = repo.ui.backupconfig('phases', 'new-commit')
284 # Marking the repository as committing an mq patch can be used
284 # Marking the repository as committing an mq patch can be used
285 # to optimize operations like _branchtags().
285 # to optimize operations like _branchtags().
286 repo._committingpatch = True
286 repo._committingpatch = True
287 try:
287 try:
288 if phase is not None:
288 if phase is not None:
289 repo.ui.setconfig('phases', 'new-commit', phase)
289 repo.ui.setconfig('phases', 'new-commit', phase)
290 return repo.commit(*args, **kwargs)
290 return repo.commit(*args, **kwargs)
291 finally:
291 finally:
292 repo._committingpatch = False
292 repo._committingpatch = False
293 if phase is not None:
293 if phase is not None:
294 repo.ui.restoreconfig(backup)
294 repo.ui.restoreconfig(backup)
295
295
296 class AbortNoCleanup(error.Abort):
296 class AbortNoCleanup(error.Abort):
297 pass
297 pass
298
298
299 class queue(object):
299 class queue(object):
300 def __init__(self, ui, path, patchdir=None):
300 def __init__(self, ui, path, patchdir=None):
301 self.basepath = path
301 self.basepath = path
302 try:
302 try:
303 fh = open(os.path.join(path, 'patches.queue'))
303 fh = open(os.path.join(path, 'patches.queue'))
304 cur = fh.read().rstrip()
304 cur = fh.read().rstrip()
305 fh.close()
305 fh.close()
306 if not cur:
306 if not cur:
307 curpath = os.path.join(path, 'patches')
307 curpath = os.path.join(path, 'patches')
308 else:
308 else:
309 curpath = os.path.join(path, 'patches-' + cur)
309 curpath = os.path.join(path, 'patches-' + cur)
310 except IOError:
310 except IOError:
311 curpath = os.path.join(path, 'patches')
311 curpath = os.path.join(path, 'patches')
312 self.path = patchdir or curpath
312 self.path = patchdir or curpath
313 self.opener = scmutil.opener(self.path)
313 self.opener = scmutil.opener(self.path)
314 self.ui = ui
314 self.ui = ui
315 self.applieddirty = False
315 self.applieddirty = False
316 self.seriesdirty = False
316 self.seriesdirty = False
317 self.added = []
317 self.added = []
318 self.seriespath = "series"
318 self.seriespath = "series"
319 self.statuspath = "status"
319 self.statuspath = "status"
320 self.guardspath = "guards"
320 self.guardspath = "guards"
321 self.activeguards = None
321 self.activeguards = None
322 self.guardsdirty = False
322 self.guardsdirty = False
323 # Handle mq.git as a bool with extended values
323 # Handle mq.git as a bool with extended values
324 try:
324 try:
325 gitmode = ui.configbool('mq', 'git', None)
325 gitmode = ui.configbool('mq', 'git', None)
326 if gitmode is None:
326 if gitmode is None:
327 raise error.ConfigError
327 raise error.ConfigError
328 self.gitmode = gitmode and 'yes' or 'no'
328 self.gitmode = gitmode and 'yes' or 'no'
329 except error.ConfigError:
329 except error.ConfigError:
330 self.gitmode = ui.config('mq', 'git', 'auto').lower()
330 self.gitmode = ui.config('mq', 'git', 'auto').lower()
331 self.plainmode = ui.configbool('mq', 'plain', False)
331 self.plainmode = ui.configbool('mq', 'plain', False)
332
332
333 @util.propertycache
333 @util.propertycache
334 def applied(self):
334 def applied(self):
335 def parselines(lines):
335 def parselines(lines):
336 for l in lines:
336 for l in lines:
337 entry = l.split(':', 1)
337 entry = l.split(':', 1)
338 if len(entry) > 1:
338 if len(entry) > 1:
339 n, name = entry
339 n, name = entry
340 yield statusentry(bin(n), name)
340 yield statusentry(bin(n), name)
341 elif l.strip():
341 elif l.strip():
342 self.ui.warn(_('malformated mq status line: %s\n') % entry)
342 self.ui.warn(_('malformated mq status line: %s\n') % entry)
343 # else we ignore empty lines
343 # else we ignore empty lines
344 try:
344 try:
345 lines = self.opener.read(self.statuspath).splitlines()
345 lines = self.opener.read(self.statuspath).splitlines()
346 return list(parselines(lines))
346 return list(parselines(lines))
347 except IOError, e:
347 except IOError, e:
348 if e.errno == errno.ENOENT:
348 if e.errno == errno.ENOENT:
349 return []
349 return []
350 raise
350 raise
351
351
352 @util.propertycache
352 @util.propertycache
353 def fullseries(self):
353 def fullseries(self):
354 try:
354 try:
355 return self.opener.read(self.seriespath).splitlines()
355 return self.opener.read(self.seriespath).splitlines()
356 except IOError, e:
356 except IOError, e:
357 if e.errno == errno.ENOENT:
357 if e.errno == errno.ENOENT:
358 return []
358 return []
359 raise
359 raise
360
360
361 @util.propertycache
361 @util.propertycache
362 def series(self):
362 def series(self):
363 self.parseseries()
363 self.parseseries()
364 return self.series
364 return self.series
365
365
366 @util.propertycache
366 @util.propertycache
367 def seriesguards(self):
367 def seriesguards(self):
368 self.parseseries()
368 self.parseseries()
369 return self.seriesguards
369 return self.seriesguards
370
370
371 def invalidate(self):
371 def invalidate(self):
372 for a in 'applied fullseries series seriesguards'.split():
372 for a in 'applied fullseries series seriesguards'.split():
373 if a in self.__dict__:
373 if a in self.__dict__:
374 delattr(self, a)
374 delattr(self, a)
375 self.applieddirty = False
375 self.applieddirty = False
376 self.seriesdirty = False
376 self.seriesdirty = False
377 self.guardsdirty = False
377 self.guardsdirty = False
378 self.activeguards = None
378 self.activeguards = None
379
379
380 def diffopts(self, opts={}, patchfn=None):
380 def diffopts(self, opts={}, patchfn=None):
381 diffopts = patchmod.diffopts(self.ui, opts)
381 diffopts = patchmod.diffopts(self.ui, opts)
382 if self.gitmode == 'auto':
382 if self.gitmode == 'auto':
383 diffopts.upgrade = True
383 diffopts.upgrade = True
384 elif self.gitmode == 'keep':
384 elif self.gitmode == 'keep':
385 pass
385 pass
386 elif self.gitmode in ('yes', 'no'):
386 elif self.gitmode in ('yes', 'no'):
387 diffopts.git = self.gitmode == 'yes'
387 diffopts.git = self.gitmode == 'yes'
388 else:
388 else:
389 raise util.Abort(_('mq.git option can be auto/keep/yes/no'
389 raise util.Abort(_('mq.git option can be auto/keep/yes/no'
390 ' got %s') % self.gitmode)
390 ' got %s') % self.gitmode)
391 if patchfn:
391 if patchfn:
392 diffopts = self.patchopts(diffopts, patchfn)
392 diffopts = self.patchopts(diffopts, patchfn)
393 return diffopts
393 return diffopts
394
394
395 def patchopts(self, diffopts, *patches):
395 def patchopts(self, diffopts, *patches):
396 """Return a copy of input diff options with git set to true if
396 """Return a copy of input diff options with git set to true if
397 referenced patch is a git patch and should be preserved as such.
397 referenced patch is a git patch and should be preserved as such.
398 """
398 """
399 diffopts = diffopts.copy()
399 diffopts = diffopts.copy()
400 if not diffopts.git and self.gitmode == 'keep':
400 if not diffopts.git and self.gitmode == 'keep':
401 for patchfn in patches:
401 for patchfn in patches:
402 patchf = self.opener(patchfn, 'r')
402 patchf = self.opener(patchfn, 'r')
403 # if the patch was a git patch, refresh it as a git patch
403 # if the patch was a git patch, refresh it as a git patch
404 for line in patchf:
404 for line in patchf:
405 if line.startswith('diff --git'):
405 if line.startswith('diff --git'):
406 diffopts.git = True
406 diffopts.git = True
407 break
407 break
408 patchf.close()
408 patchf.close()
409 return diffopts
409 return diffopts
410
410
411 def join(self, *p):
411 def join(self, *p):
412 return os.path.join(self.path, *p)
412 return os.path.join(self.path, *p)
413
413
414 def findseries(self, patch):
414 def findseries(self, patch):
415 def matchpatch(l):
415 def matchpatch(l):
416 l = l.split('#', 1)[0]
416 l = l.split('#', 1)[0]
417 return l.strip() == patch
417 return l.strip() == patch
418 for index, l in enumerate(self.fullseries):
418 for index, l in enumerate(self.fullseries):
419 if matchpatch(l):
419 if matchpatch(l):
420 return index
420 return index
421 return None
421 return None
422
422
423 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
423 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
424
424
425 def parseseries(self):
425 def parseseries(self):
426 self.series = []
426 self.series = []
427 self.seriesguards = []
427 self.seriesguards = []
428 for l in self.fullseries:
428 for l in self.fullseries:
429 h = l.find('#')
429 h = l.find('#')
430 if h == -1:
430 if h == -1:
431 patch = l
431 patch = l
432 comment = ''
432 comment = ''
433 elif h == 0:
433 elif h == 0:
434 continue
434 continue
435 else:
435 else:
436 patch = l[:h]
436 patch = l[:h]
437 comment = l[h:]
437 comment = l[h:]
438 patch = patch.strip()
438 patch = patch.strip()
439 if patch:
439 if patch:
440 if patch in self.series:
440 if patch in self.series:
441 raise util.Abort(_('%s appears more than once in %s') %
441 raise util.Abort(_('%s appears more than once in %s') %
442 (patch, self.join(self.seriespath)))
442 (patch, self.join(self.seriespath)))
443 self.series.append(patch)
443 self.series.append(patch)
444 self.seriesguards.append(self.guard_re.findall(comment))
444 self.seriesguards.append(self.guard_re.findall(comment))
445
445
446 def checkguard(self, guard):
446 def checkguard(self, guard):
447 if not guard:
447 if not guard:
448 return _('guard cannot be an empty string')
448 return _('guard cannot be an empty string')
449 bad_chars = '# \t\r\n\f'
449 bad_chars = '# \t\r\n\f'
450 first = guard[0]
450 first = guard[0]
451 if first in '-+':
451 if first in '-+':
452 return (_('guard %r starts with invalid character: %r') %
452 return (_('guard %r starts with invalid character: %r') %
453 (guard, first))
453 (guard, first))
454 for c in bad_chars:
454 for c in bad_chars:
455 if c in guard:
455 if c in guard:
456 return _('invalid character in guard %r: %r') % (guard, c)
456 return _('invalid character in guard %r: %r') % (guard, c)
457
457
458 def setactive(self, guards):
458 def setactive(self, guards):
459 for guard in guards:
459 for guard in guards:
460 bad = self.checkguard(guard)
460 bad = self.checkguard(guard)
461 if bad:
461 if bad:
462 raise util.Abort(bad)
462 raise util.Abort(bad)
463 guards = sorted(set(guards))
463 guards = sorted(set(guards))
464 self.ui.debug('active guards: %s\n' % ' '.join(guards))
464 self.ui.debug('active guards: %s\n' % ' '.join(guards))
465 self.activeguards = guards
465 self.activeguards = guards
466 self.guardsdirty = True
466 self.guardsdirty = True
467
467
468 def active(self):
468 def active(self):
469 if self.activeguards is None:
469 if self.activeguards is None:
470 self.activeguards = []
470 self.activeguards = []
471 try:
471 try:
472 guards = self.opener.read(self.guardspath).split()
472 guards = self.opener.read(self.guardspath).split()
473 except IOError, err:
473 except IOError, err:
474 if err.errno != errno.ENOENT:
474 if err.errno != errno.ENOENT:
475 raise
475 raise
476 guards = []
476 guards = []
477 for i, guard in enumerate(guards):
477 for i, guard in enumerate(guards):
478 bad = self.checkguard(guard)
478 bad = self.checkguard(guard)
479 if bad:
479 if bad:
480 self.ui.warn('%s:%d: %s\n' %
480 self.ui.warn('%s:%d: %s\n' %
481 (self.join(self.guardspath), i + 1, bad))
481 (self.join(self.guardspath), i + 1, bad))
482 else:
482 else:
483 self.activeguards.append(guard)
483 self.activeguards.append(guard)
484 return self.activeguards
484 return self.activeguards
485
485
486 def setguards(self, idx, guards):
486 def setguards(self, idx, guards):
487 for g in guards:
487 for g in guards:
488 if len(g) < 2:
488 if len(g) < 2:
489 raise util.Abort(_('guard %r too short') % g)
489 raise util.Abort(_('guard %r too short') % g)
490 if g[0] not in '-+':
490 if g[0] not in '-+':
491 raise util.Abort(_('guard %r starts with invalid char') % g)
491 raise util.Abort(_('guard %r starts with invalid char') % g)
492 bad = self.checkguard(g[1:])
492 bad = self.checkguard(g[1:])
493 if bad:
493 if bad:
494 raise util.Abort(bad)
494 raise util.Abort(bad)
495 drop = self.guard_re.sub('', self.fullseries[idx])
495 drop = self.guard_re.sub('', self.fullseries[idx])
496 self.fullseries[idx] = drop + ''.join([' #' + g for g in guards])
496 self.fullseries[idx] = drop + ''.join([' #' + g for g in guards])
497 self.parseseries()
497 self.parseseries()
498 self.seriesdirty = True
498 self.seriesdirty = True
499
499
500 def pushable(self, idx):
500 def pushable(self, idx):
501 if isinstance(idx, str):
501 if isinstance(idx, str):
502 idx = self.series.index(idx)
502 idx = self.series.index(idx)
503 patchguards = self.seriesguards[idx]
503 patchguards = self.seriesguards[idx]
504 if not patchguards:
504 if not patchguards:
505 return True, None
505 return True, None
506 guards = self.active()
506 guards = self.active()
507 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
507 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
508 if exactneg:
508 if exactneg:
509 return False, repr(exactneg[0])
509 return False, repr(exactneg[0])
510 pos = [g for g in patchguards if g[0] == '+']
510 pos = [g for g in patchguards if g[0] == '+']
511 exactpos = [g for g in pos if g[1:] in guards]
511 exactpos = [g for g in pos if g[1:] in guards]
512 if pos:
512 if pos:
513 if exactpos:
513 if exactpos:
514 return True, repr(exactpos[0])
514 return True, repr(exactpos[0])
515 return False, ' '.join(map(repr, pos))
515 return False, ' '.join(map(repr, pos))
516 return True, ''
516 return True, ''
517
517
518 def explainpushable(self, idx, all_patches=False):
518 def explainpushable(self, idx, all_patches=False):
519 write = all_patches and self.ui.write or self.ui.warn
519 write = all_patches and self.ui.write or self.ui.warn
520 if all_patches or self.ui.verbose:
520 if all_patches or self.ui.verbose:
521 if isinstance(idx, str):
521 if isinstance(idx, str):
522 idx = self.series.index(idx)
522 idx = self.series.index(idx)
523 pushable, why = self.pushable(idx)
523 pushable, why = self.pushable(idx)
524 if all_patches and pushable:
524 if all_patches and pushable:
525 if why is None:
525 if why is None:
526 write(_('allowing %s - no guards in effect\n') %
526 write(_('allowing %s - no guards in effect\n') %
527 self.series[idx])
527 self.series[idx])
528 else:
528 else:
529 if not why:
529 if not why:
530 write(_('allowing %s - no matching negative guards\n') %
530 write(_('allowing %s - no matching negative guards\n') %
531 self.series[idx])
531 self.series[idx])
532 else:
532 else:
533 write(_('allowing %s - guarded by %s\n') %
533 write(_('allowing %s - guarded by %s\n') %
534 (self.series[idx], why))
534 (self.series[idx], why))
535 if not pushable:
535 if not pushable:
536 if why:
536 if why:
537 write(_('skipping %s - guarded by %s\n') %
537 write(_('skipping %s - guarded by %s\n') %
538 (self.series[idx], why))
538 (self.series[idx], why))
539 else:
539 else:
540 write(_('skipping %s - no matching guards\n') %
540 write(_('skipping %s - no matching guards\n') %
541 self.series[idx])
541 self.series[idx])
542
542
543 def savedirty(self):
543 def savedirty(self):
544 def writelist(items, path):
544 def writelist(items, path):
545 fp = self.opener(path, 'w')
545 fp = self.opener(path, 'w')
546 for i in items:
546 for i in items:
547 fp.write("%s\n" % i)
547 fp.write("%s\n" % i)
548 fp.close()
548 fp.close()
549 if self.applieddirty:
549 if self.applieddirty:
550 writelist(map(str, self.applied), self.statuspath)
550 writelist(map(str, self.applied), self.statuspath)
551 self.applieddirty = False
551 self.applieddirty = False
552 if self.seriesdirty:
552 if self.seriesdirty:
553 writelist(self.fullseries, self.seriespath)
553 writelist(self.fullseries, self.seriespath)
554 self.seriesdirty = False
554 self.seriesdirty = False
555 if self.guardsdirty:
555 if self.guardsdirty:
556 writelist(self.activeguards, self.guardspath)
556 writelist(self.activeguards, self.guardspath)
557 self.guardsdirty = False
557 self.guardsdirty = False
558 if self.added:
558 if self.added:
559 qrepo = self.qrepo()
559 qrepo = self.qrepo()
560 if qrepo:
560 if qrepo:
561 qrepo[None].add(f for f in self.added if f not in qrepo[None])
561 qrepo[None].add(f for f in self.added if f not in qrepo[None])
562 self.added = []
562 self.added = []
563
563
564 def removeundo(self, repo):
564 def removeundo(self, repo):
565 undo = repo.sjoin('undo')
565 undo = repo.sjoin('undo')
566 if not os.path.exists(undo):
566 if not os.path.exists(undo):
567 return
567 return
568 try:
568 try:
569 os.unlink(undo)
569 os.unlink(undo)
570 except OSError, inst:
570 except OSError, inst:
571 self.ui.warn(_('error removing undo: %s\n') % str(inst))
571 self.ui.warn(_('error removing undo: %s\n') % str(inst))
572
572
573 def backup(self, repo, files, copy=False):
573 def backup(self, repo, files, copy=False):
574 # backup local changes in --force case
574 # backup local changes in --force case
575 for f in sorted(files):
575 for f in sorted(files):
576 absf = repo.wjoin(f)
576 absf = repo.wjoin(f)
577 if os.path.lexists(absf):
577 if os.path.lexists(absf):
578 self.ui.note(_('saving current version of %s as %s\n') %
578 self.ui.note(_('saving current version of %s as %s\n') %
579 (f, f + '.orig'))
579 (f, f + '.orig'))
580 if copy:
580 if copy:
581 util.copyfile(absf, absf + '.orig')
581 util.copyfile(absf, absf + '.orig')
582 else:
582 else:
583 util.rename(absf, absf + '.orig')
583 util.rename(absf, absf + '.orig')
584
584
585 def printdiff(self, repo, diffopts, node1, node2=None, files=None,
585 def printdiff(self, repo, diffopts, node1, node2=None, files=None,
586 fp=None, changes=None, opts={}):
586 fp=None, changes=None, opts={}):
587 stat = opts.get('stat')
587 stat = opts.get('stat')
588 m = scmutil.match(repo[node1], files, opts)
588 m = scmutil.match(repo[node1], files, opts)
589 cmdutil.diffordiffstat(self.ui, repo, diffopts, node1, node2, m,
589 cmdutil.diffordiffstat(self.ui, repo, diffopts, node1, node2, m,
590 changes, stat, fp)
590 changes, stat, fp)
591
591
592 def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
592 def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
593 # first try just applying the patch
593 # first try just applying the patch
594 (err, n) = self.apply(repo, [patch], update_status=False,
594 (err, n) = self.apply(repo, [patch], update_status=False,
595 strict=True, merge=rev)
595 strict=True, merge=rev)
596
596
597 if err == 0:
597 if err == 0:
598 return (err, n)
598 return (err, n)
599
599
600 if n is None:
600 if n is None:
601 raise util.Abort(_("apply failed for patch %s") % patch)
601 raise util.Abort(_("apply failed for patch %s") % patch)
602
602
603 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
603 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
604
604
605 # apply failed, strip away that rev and merge.
605 # apply failed, strip away that rev and merge.
606 hg.clean(repo, head)
606 hg.clean(repo, head)
607 self.strip(repo, [n], update=False, backup='strip')
607 self.strip(repo, [n], update=False, backup='strip')
608
608
609 ctx = repo[rev]
609 ctx = repo[rev]
610 ret = hg.merge(repo, rev)
610 ret = hg.merge(repo, rev)
611 if ret:
611 if ret:
612 raise util.Abort(_("update returned %d") % ret)
612 raise util.Abort(_("update returned %d") % ret)
613 n = newcommit(repo, None, ctx.description(), ctx.user(), force=True)
613 n = newcommit(repo, None, ctx.description(), ctx.user(), force=True)
614 if n is None:
614 if n is None:
615 raise util.Abort(_("repo commit failed"))
615 raise util.Abort(_("repo commit failed"))
616 try:
616 try:
617 ph = patchheader(mergeq.join(patch), self.plainmode)
617 ph = patchheader(mergeq.join(patch), self.plainmode)
618 except Exception:
618 except Exception:
619 raise util.Abort(_("unable to read %s") % patch)
619 raise util.Abort(_("unable to read %s") % patch)
620
620
621 diffopts = self.patchopts(diffopts, patch)
621 diffopts = self.patchopts(diffopts, patch)
622 patchf = self.opener(patch, "w")
622 patchf = self.opener(patch, "w")
623 comments = str(ph)
623 comments = str(ph)
624 if comments:
624 if comments:
625 patchf.write(comments)
625 patchf.write(comments)
626 self.printdiff(repo, diffopts, head, n, fp=patchf)
626 self.printdiff(repo, diffopts, head, n, fp=patchf)
627 patchf.close()
627 patchf.close()
628 self.removeundo(repo)
628 self.removeundo(repo)
629 return (0, n)
629 return (0, n)
630
630
631 def qparents(self, repo, rev=None):
631 def qparents(self, repo, rev=None):
632 if rev is None:
632 if rev is None:
633 (p1, p2) = repo.dirstate.parents()
633 (p1, p2) = repo.dirstate.parents()
634 if p2 == nullid:
634 if p2 == nullid:
635 return p1
635 return p1
636 if not self.applied:
636 if not self.applied:
637 return None
637 return None
638 return self.applied[-1].node
638 return self.applied[-1].node
639 p1, p2 = repo.changelog.parents(rev)
639 p1, p2 = repo.changelog.parents(rev)
640 if p2 != nullid and p2 in [x.node for x in self.applied]:
640 if p2 != nullid and p2 in [x.node for x in self.applied]:
641 return p2
641 return p2
642 return p1
642 return p1
643
643
644 def mergepatch(self, repo, mergeq, series, diffopts):
644 def mergepatch(self, repo, mergeq, series, diffopts):
645 if not self.applied:
645 if not self.applied:
646 # each of the patches merged in will have two parents. This
646 # each of the patches merged in will have two parents. This
647 # can confuse the qrefresh, qdiff, and strip code because it
647 # can confuse the qrefresh, qdiff, and strip code because it
648 # needs to know which parent is actually in the patch queue.
648 # needs to know which parent is actually in the patch queue.
649 # so, we insert a merge marker with only one parent. This way
649 # so, we insert a merge marker with only one parent. This way
650 # the first patch in the queue is never a merge patch
650 # the first patch in the queue is never a merge patch
651 #
651 #
652 pname = ".hg.patches.merge.marker"
652 pname = ".hg.patches.merge.marker"
653 n = newcommit(repo, None, '[mq]: merge marker', force=True)
653 n = newcommit(repo, None, '[mq]: merge marker', force=True)
654 self.removeundo(repo)
654 self.removeundo(repo)
655 self.applied.append(statusentry(n, pname))
655 self.applied.append(statusentry(n, pname))
656 self.applieddirty = True
656 self.applieddirty = True
657
657
658 head = self.qparents(repo)
658 head = self.qparents(repo)
659
659
660 for patch in series:
660 for patch in series:
661 patch = mergeq.lookup(patch, strict=True)
661 patch = mergeq.lookup(patch, strict=True)
662 if not patch:
662 if not patch:
663 self.ui.warn(_("patch %s does not exist\n") % patch)
663 self.ui.warn(_("patch %s does not exist\n") % patch)
664 return (1, None)
664 return (1, None)
665 pushable, reason = self.pushable(patch)
665 pushable, reason = self.pushable(patch)
666 if not pushable:
666 if not pushable:
667 self.explainpushable(patch, all_patches=True)
667 self.explainpushable(patch, all_patches=True)
668 continue
668 continue
669 info = mergeq.isapplied(patch)
669 info = mergeq.isapplied(patch)
670 if not info:
670 if not info:
671 self.ui.warn(_("patch %s is not applied\n") % patch)
671 self.ui.warn(_("patch %s is not applied\n") % patch)
672 return (1, None)
672 return (1, None)
673 rev = info[1]
673 rev = info[1]
674 err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
674 err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
675 if head:
675 if head:
676 self.applied.append(statusentry(head, patch))
676 self.applied.append(statusentry(head, patch))
677 self.applieddirty = True
677 self.applieddirty = True
678 if err:
678 if err:
679 return (err, head)
679 return (err, head)
680 self.savedirty()
680 self.savedirty()
681 return (0, head)
681 return (0, head)
682
682
683 def patch(self, repo, patchfile):
683 def patch(self, repo, patchfile):
684 '''Apply patchfile to the working directory.
684 '''Apply patchfile to the working directory.
685 patchfile: name of patch file'''
685 patchfile: name of patch file'''
686 files = set()
686 files = set()
687 try:
687 try:
688 fuzz = patchmod.patch(self.ui, repo, patchfile, strip=1,
688 fuzz = patchmod.patch(self.ui, repo, patchfile, strip=1,
689 files=files, eolmode=None)
689 files=files, eolmode=None)
690 return (True, list(files), fuzz)
690 return (True, list(files), fuzz)
691 except Exception, inst:
691 except Exception, inst:
692 self.ui.note(str(inst) + '\n')
692 self.ui.note(str(inst) + '\n')
693 if not self.ui.verbose:
693 if not self.ui.verbose:
694 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
694 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
695 self.ui.traceback()
695 self.ui.traceback()
696 return (False, list(files), False)
696 return (False, list(files), False)
697
697
698 def apply(self, repo, series, list=False, update_status=True,
698 def apply(self, repo, series, list=False, update_status=True,
699 strict=False, patchdir=None, merge=None, all_files=None,
699 strict=False, patchdir=None, merge=None, all_files=None,
700 tobackup=None, keepchanges=False):
700 tobackup=None, keepchanges=False):
701 wlock = lock = tr = None
701 wlock = lock = tr = None
702 try:
702 try:
703 wlock = repo.wlock()
703 wlock = repo.wlock()
704 lock = repo.lock()
704 lock = repo.lock()
705 tr = repo.transaction("qpush")
705 tr = repo.transaction("qpush")
706 try:
706 try:
707 ret = self._apply(repo, series, list, update_status,
707 ret = self._apply(repo, series, list, update_status,
708 strict, patchdir, merge, all_files=all_files,
708 strict, patchdir, merge, all_files=all_files,
709 tobackup=tobackup, keepchanges=keepchanges)
709 tobackup=tobackup, keepchanges=keepchanges)
710 tr.close()
710 tr.close()
711 self.savedirty()
711 self.savedirty()
712 return ret
712 return ret
713 except AbortNoCleanup:
713 except AbortNoCleanup:
714 tr.close()
714 tr.close()
715 self.savedirty()
715 self.savedirty()
716 return 2, repo.dirstate.p1()
716 return 2, repo.dirstate.p1()
717 except: # re-raises
717 except: # re-raises
718 try:
718 try:
719 tr.abort()
719 tr.abort()
720 finally:
720 finally:
721 repo.invalidate()
721 repo.invalidate()
722 repo.dirstate.invalidate()
722 repo.dirstate.invalidate()
723 self.invalidate()
723 self.invalidate()
724 raise
724 raise
725 finally:
725 finally:
726 release(tr, lock, wlock)
726 release(tr, lock, wlock)
727 self.removeundo(repo)
727 self.removeundo(repo)
728
728
729 def _apply(self, repo, series, list=False, update_status=True,
729 def _apply(self, repo, series, list=False, update_status=True,
730 strict=False, patchdir=None, merge=None, all_files=None,
730 strict=False, patchdir=None, merge=None, all_files=None,
731 tobackup=None, keepchanges=False):
731 tobackup=None, keepchanges=False):
732 """returns (error, hash)
732 """returns (error, hash)
733
733
734 error = 1 for unable to read, 2 for patch failed, 3 for patch
734 error = 1 for unable to read, 2 for patch failed, 3 for patch
735 fuzz. tobackup is None or a set of files to backup before they
735 fuzz. tobackup is None or a set of files to backup before they
736 are modified by a patch.
736 are modified by a patch.
737 """
737 """
738 # TODO unify with commands.py
738 # TODO unify with commands.py
739 if not patchdir:
739 if not patchdir:
740 patchdir = self.path
740 patchdir = self.path
741 err = 0
741 err = 0
742 n = None
742 n = None
743 for patchname in series:
743 for patchname in series:
744 pushable, reason = self.pushable(patchname)
744 pushable, reason = self.pushable(patchname)
745 if not pushable:
745 if not pushable:
746 self.explainpushable(patchname, all_patches=True)
746 self.explainpushable(patchname, all_patches=True)
747 continue
747 continue
748 self.ui.status(_("applying %s\n") % patchname)
748 self.ui.status(_("applying %s\n") % patchname)
749 pf = os.path.join(patchdir, patchname)
749 pf = os.path.join(patchdir, patchname)
750
750
751 try:
751 try:
752 ph = patchheader(self.join(patchname), self.plainmode)
752 ph = patchheader(self.join(patchname), self.plainmode)
753 except IOError:
753 except IOError:
754 self.ui.warn(_("unable to read %s\n") % patchname)
754 self.ui.warn(_("unable to read %s\n") % patchname)
755 err = 1
755 err = 1
756 break
756 break
757
757
758 message = ph.message
758 message = ph.message
759 if not message:
759 if not message:
760 # The commit message should not be translated
760 # The commit message should not be translated
761 message = "imported patch %s\n" % patchname
761 message = "imported patch %s\n" % patchname
762 else:
762 else:
763 if list:
763 if list:
764 # The commit message should not be translated
764 # The commit message should not be translated
765 message.append("\nimported patch %s" % patchname)
765 message.append("\nimported patch %s" % patchname)
766 message = '\n'.join(message)
766 message = '\n'.join(message)
767
767
768 if ph.haspatch:
768 if ph.haspatch:
769 if tobackup:
769 if tobackup:
770 touched = patchmod.changedfiles(self.ui, repo, pf)
770 touched = patchmod.changedfiles(self.ui, repo, pf)
771 touched = set(touched) & tobackup
771 touched = set(touched) & tobackup
772 if touched and keepchanges:
772 if touched and keepchanges:
773 raise AbortNoCleanup(
773 raise AbortNoCleanup(
774 _("local changes found, refresh first"))
774 _("local changes found, refresh first"))
775 self.backup(repo, touched, copy=True)
775 self.backup(repo, touched, copy=True)
776 tobackup = tobackup - touched
776 tobackup = tobackup - touched
777 (patcherr, files, fuzz) = self.patch(repo, pf)
777 (patcherr, files, fuzz) = self.patch(repo, pf)
778 if all_files is not None:
778 if all_files is not None:
779 all_files.update(files)
779 all_files.update(files)
780 patcherr = not patcherr
780 patcherr = not patcherr
781 else:
781 else:
782 self.ui.warn(_("patch %s is empty\n") % patchname)
782 self.ui.warn(_("patch %s is empty\n") % patchname)
783 patcherr, files, fuzz = 0, [], 0
783 patcherr, files, fuzz = 0, [], 0
784
784
785 if merge and files:
785 if merge and files:
786 # Mark as removed/merged and update dirstate parent info
786 # Mark as removed/merged and update dirstate parent info
787 removed = []
787 removed = []
788 merged = []
788 merged = []
789 for f in files:
789 for f in files:
790 if os.path.lexists(repo.wjoin(f)):
790 if os.path.lexists(repo.wjoin(f)):
791 merged.append(f)
791 merged.append(f)
792 else:
792 else:
793 removed.append(f)
793 removed.append(f)
794 for f in removed:
794 for f in removed:
795 repo.dirstate.remove(f)
795 repo.dirstate.remove(f)
796 for f in merged:
796 for f in merged:
797 repo.dirstate.merge(f)
797 repo.dirstate.merge(f)
798 p1, p2 = repo.dirstate.parents()
798 p1, p2 = repo.dirstate.parents()
799 repo.setparents(p1, merge)
799 repo.setparents(p1, merge)
800
800
801 match = scmutil.matchfiles(repo, files or [])
801 match = scmutil.matchfiles(repo, files or [])
802 oldtip = repo['tip']
802 oldtip = repo['tip']
803 n = newcommit(repo, None, message, ph.user, ph.date, match=match,
803 n = newcommit(repo, None, message, ph.user, ph.date, match=match,
804 force=True)
804 force=True)
805 if repo['tip'] == oldtip:
805 if repo['tip'] == oldtip:
806 raise util.Abort(_("qpush exactly duplicates child changeset"))
806 raise util.Abort(_("qpush exactly duplicates child changeset"))
807 if n is None:
807 if n is None:
808 raise util.Abort(_("repository commit failed"))
808 raise util.Abort(_("repository commit failed"))
809
809
810 if update_status:
810 if update_status:
811 self.applied.append(statusentry(n, patchname))
811 self.applied.append(statusentry(n, patchname))
812
812
813 if patcherr:
813 if patcherr:
814 self.ui.warn(_("patch failed, rejects left in working dir\n"))
814 self.ui.warn(_("patch failed, rejects left in working dir\n"))
815 err = 2
815 err = 2
816 break
816 break
817
817
818 if fuzz and strict:
818 if fuzz and strict:
819 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
819 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
820 err = 3
820 err = 3
821 break
821 break
822 return (err, n)
822 return (err, n)
823
823
824 def _cleanup(self, patches, numrevs, keep=False):
824 def _cleanup(self, patches, numrevs, keep=False):
825 if not keep:
825 if not keep:
826 r = self.qrepo()
826 r = self.qrepo()
827 if r:
827 if r:
828 r[None].forget(patches)
828 r[None].forget(patches)
829 for p in patches:
829 for p in patches:
830 try:
830 try:
831 os.unlink(self.join(p))
831 os.unlink(self.join(p))
832 except OSError, inst:
832 except OSError, inst:
833 if inst.errno != errno.ENOENT:
833 if inst.errno != errno.ENOENT:
834 raise
834 raise
835
835
836 qfinished = []
836 qfinished = []
837 if numrevs:
837 if numrevs:
838 qfinished = self.applied[:numrevs]
838 qfinished = self.applied[:numrevs]
839 del self.applied[:numrevs]
839 del self.applied[:numrevs]
840 self.applieddirty = True
840 self.applieddirty = True
841
841
842 unknown = []
842 unknown = []
843
843
844 for (i, p) in sorted([(self.findseries(p), p) for p in patches],
844 for (i, p) in sorted([(self.findseries(p), p) for p in patches],
845 reverse=True):
845 reverse=True):
846 if i is not None:
846 if i is not None:
847 del self.fullseries[i]
847 del self.fullseries[i]
848 else:
848 else:
849 unknown.append(p)
849 unknown.append(p)
850
850
851 if unknown:
851 if unknown:
852 if numrevs:
852 if numrevs:
853 rev = dict((entry.name, entry.node) for entry in qfinished)
853 rev = dict((entry.name, entry.node) for entry in qfinished)
854 for p in unknown:
854 for p in unknown:
855 msg = _('revision %s refers to unknown patches: %s\n')
855 msg = _('revision %s refers to unknown patches: %s\n')
856 self.ui.warn(msg % (short(rev[p]), p))
856 self.ui.warn(msg % (short(rev[p]), p))
857 else:
857 else:
858 msg = _('unknown patches: %s\n')
858 msg = _('unknown patches: %s\n')
859 raise util.Abort(''.join(msg % p for p in unknown))
859 raise util.Abort(''.join(msg % p for p in unknown))
860
860
861 self.parseseries()
861 self.parseseries()
862 self.seriesdirty = True
862 self.seriesdirty = True
863 return [entry.node for entry in qfinished]
863 return [entry.node for entry in qfinished]
864
864
865 def _revpatches(self, repo, revs):
865 def _revpatches(self, repo, revs):
866 firstrev = repo[self.applied[0].node].rev()
866 firstrev = repo[self.applied[0].node].rev()
867 patches = []
867 patches = []
868 for i, rev in enumerate(revs):
868 for i, rev in enumerate(revs):
869
869
870 if rev < firstrev:
870 if rev < firstrev:
871 raise util.Abort(_('revision %d is not managed') % rev)
871 raise util.Abort(_('revision %d is not managed') % rev)
872
872
873 ctx = repo[rev]
873 ctx = repo[rev]
874 base = self.applied[i].node
874 base = self.applied[i].node
875 if ctx.node() != base:
875 if ctx.node() != base:
876 msg = _('cannot delete revision %d above applied patches')
876 msg = _('cannot delete revision %d above applied patches')
877 raise util.Abort(msg % rev)
877 raise util.Abort(msg % rev)
878
878
879 patch = self.applied[i].name
879 patch = self.applied[i].name
880 for fmt in ('[mq]: %s', 'imported patch %s'):
880 for fmt in ('[mq]: %s', 'imported patch %s'):
881 if ctx.description() == fmt % patch:
881 if ctx.description() == fmt % patch:
882 msg = _('patch %s finalized without changeset message\n')
882 msg = _('patch %s finalized without changeset message\n')
883 repo.ui.status(msg % patch)
883 repo.ui.status(msg % patch)
884 break
884 break
885
885
886 patches.append(patch)
886 patches.append(patch)
887 return patches
887 return patches
888
888
889 def finish(self, repo, revs):
889 def finish(self, repo, revs):
890 # Manually trigger phase computation to ensure phasedefaults is
890 # Manually trigger phase computation to ensure phasedefaults is
891 # executed before we remove the patches.
891 # executed before we remove the patches.
892 repo._phasecache
892 repo._phasecache
893 patches = self._revpatches(repo, sorted(revs))
893 patches = self._revpatches(repo, sorted(revs))
894 qfinished = self._cleanup(patches, len(patches))
894 qfinished = self._cleanup(patches, len(patches))
895 if qfinished and repo.ui.configbool('mq', 'secret', False):
895 if qfinished and repo.ui.configbool('mq', 'secret', False):
896 # only use this logic when the secret option is added
896 # only use this logic when the secret option is added
897 oldqbase = repo[qfinished[0]]
897 oldqbase = repo[qfinished[0]]
898 tphase = repo.ui.config('phases', 'new-commit', phases.draft)
898 tphase = repo.ui.config('phases', 'new-commit', phases.draft)
899 if oldqbase.phase() > tphase and oldqbase.p1().phase() <= tphase:
899 if oldqbase.phase() > tphase and oldqbase.p1().phase() <= tphase:
900 phases.advanceboundary(repo, tphase, qfinished)
900 phases.advanceboundary(repo, tphase, qfinished)
901
901
902 def delete(self, repo, patches, opts):
902 def delete(self, repo, patches, opts):
903 if not patches and not opts.get('rev'):
903 if not patches and not opts.get('rev'):
904 raise util.Abort(_('qdelete requires at least one revision or '
904 raise util.Abort(_('qdelete requires at least one revision or '
905 'patch name'))
905 'patch name'))
906
906
907 realpatches = []
907 realpatches = []
908 for patch in patches:
908 for patch in patches:
909 patch = self.lookup(patch, strict=True)
909 patch = self.lookup(patch, strict=True)
910 info = self.isapplied(patch)
910 info = self.isapplied(patch)
911 if info:
911 if info:
912 raise util.Abort(_("cannot delete applied patch %s") % patch)
912 raise util.Abort(_("cannot delete applied patch %s") % patch)
913 if patch not in self.series:
913 if patch not in self.series:
914 raise util.Abort(_("patch %s not in series file") % patch)
914 raise util.Abort(_("patch %s not in series file") % patch)
915 if patch not in realpatches:
915 if patch not in realpatches:
916 realpatches.append(patch)
916 realpatches.append(patch)
917
917
918 numrevs = 0
918 numrevs = 0
919 if opts.get('rev'):
919 if opts.get('rev'):
920 if not self.applied:
920 if not self.applied:
921 raise util.Abort(_('no patches applied'))
921 raise util.Abort(_('no patches applied'))
922 revs = scmutil.revrange(repo, opts.get('rev'))
922 revs = scmutil.revrange(repo, opts.get('rev'))
923 if len(revs) > 1 and revs[0] > revs[1]:
923 if len(revs) > 1 and revs[0] > revs[1]:
924 revs.reverse()
924 revs.reverse()
925 revpatches = self._revpatches(repo, revs)
925 revpatches = self._revpatches(repo, revs)
926 realpatches += revpatches
926 realpatches += revpatches
927 numrevs = len(revpatches)
927 numrevs = len(revpatches)
928
928
929 self._cleanup(realpatches, numrevs, opts.get('keep'))
929 self._cleanup(realpatches, numrevs, opts.get('keep'))
930
930
931 def checktoppatch(self, repo):
931 def checktoppatch(self, repo):
932 if self.applied:
932 if self.applied:
933 top = self.applied[-1].node
933 top = self.applied[-1].node
934 patch = self.applied[-1].name
934 patch = self.applied[-1].name
935 pp = repo.dirstate.parents()
935 pp = repo.dirstate.parents()
936 if top not in pp:
936 if top not in pp:
937 raise util.Abort(_("working directory revision is not qtip"))
937 raise util.Abort(_("working directory revision is not qtip"))
938 return top, patch
938 return top, patch
939 return None, None
939 return None, None
940
940
941 def checksubstate(self, repo, baserev=None):
941 def checksubstate(self, repo, baserev=None):
942 '''return list of subrepos at a different revision than substate.
942 '''return list of subrepos at a different revision than substate.
943 Abort if any subrepos have uncommitted changes.'''
943 Abort if any subrepos have uncommitted changes.'''
944 inclsubs = []
944 inclsubs = []
945 wctx = repo[None]
945 wctx = repo[None]
946 if baserev:
946 if baserev:
947 bctx = repo[baserev]
947 bctx = repo[baserev]
948 else:
948 else:
949 bctx = wctx.parents()[0]
949 bctx = wctx.parents()[0]
950 for s in wctx.substate:
950 for s in wctx.substate:
951 if wctx.sub(s).dirty(True):
951 if wctx.sub(s).dirty(True):
952 raise util.Abort(
952 raise util.Abort(
953 _("uncommitted changes in subrepository %s") % s)
953 _("uncommitted changes in subrepository %s") % s)
954 elif s not in bctx.substate or bctx.sub(s).dirty():
954 elif s not in bctx.substate or bctx.sub(s).dirty():
955 inclsubs.append(s)
955 inclsubs.append(s)
956 return inclsubs
956 return inclsubs
957
957
958 def putsubstate2changes(self, substatestate, changes):
958 def putsubstate2changes(self, substatestate, changes):
959 for files in changes[:3]:
959 for files in changes[:3]:
960 if '.hgsubstate' in files:
960 if '.hgsubstate' in files:
961 return # already listed up
961 return # already listed up
962 # not yet listed up
962 # not yet listed up
963 if substatestate in 'a?':
963 if substatestate in 'a?':
964 changes[1].append('.hgsubstate')
964 changes[1].append('.hgsubstate')
965 elif substatestate in 'r':
965 elif substatestate in 'r':
966 changes[2].append('.hgsubstate')
966 changes[2].append('.hgsubstate')
967 else: # modified
967 else: # modified
968 changes[0].append('.hgsubstate')
968 changes[0].append('.hgsubstate')
969
969
970 def localchangesfound(self, refresh=True):
970 def localchangesfound(self, refresh=True):
971 if refresh:
971 if refresh:
972 raise util.Abort(_("local changes found, refresh first"))
972 raise util.Abort(_("local changes found, refresh first"))
973 else:
973 else:
974 raise util.Abort(_("local changes found"))
974 raise util.Abort(_("local changes found"))
975
975
976 def checklocalchanges(self, repo, force=False, refresh=True):
976 def checklocalchanges(self, repo, force=False, refresh=True):
977 m, a, r, d = repo.status()[:4]
977 m, a, r, d = repo.status()[:4]
978 if (m or a or r or d) and not force:
978 if (m or a or r or d) and not force:
979 self.localchangesfound(refresh)
979 self.localchangesfound(refresh)
980 return m, a, r, d
980 return m, a, r, d
981
981
982 _reserved = ('series', 'status', 'guards', '.', '..')
982 _reserved = ('series', 'status', 'guards', '.', '..')
983 def checkreservedname(self, name):
983 def checkreservedname(self, name):
984 if name in self._reserved:
984 if name in self._reserved:
985 raise util.Abort(_('"%s" cannot be used as the name of a patch')
985 raise util.Abort(_('"%s" cannot be used as the name of a patch')
986 % name)
986 % name)
987 for prefix in ('.hg', '.mq'):
987 for prefix in ('.hg', '.mq'):
988 if name.startswith(prefix):
988 if name.startswith(prefix):
989 raise util.Abort(_('patch name cannot begin with "%s"')
989 raise util.Abort(_('patch name cannot begin with "%s"')
990 % prefix)
990 % prefix)
991 for c in ('#', ':'):
991 for c in ('#', ':'):
992 if c in name:
992 if c in name:
993 raise util.Abort(_('"%s" cannot be used in the name of a patch')
993 raise util.Abort(_('"%s" cannot be used in the name of a patch')
994 % c)
994 % c)
995
995
996 def checkpatchname(self, name, force=False):
996 def checkpatchname(self, name, force=False):
997 self.checkreservedname(name)
997 self.checkreservedname(name)
998 if not force and os.path.exists(self.join(name)):
998 if not force and os.path.exists(self.join(name)):
999 if os.path.isdir(self.join(name)):
999 if os.path.isdir(self.join(name)):
1000 raise util.Abort(_('"%s" already exists as a directory')
1000 raise util.Abort(_('"%s" already exists as a directory')
1001 % name)
1001 % name)
1002 else:
1002 else:
1003 raise util.Abort(_('patch "%s" already exists') % name)
1003 raise util.Abort(_('patch "%s" already exists') % name)
1004
1004
1005 def checkkeepchanges(self, keepchanges, force):
1005 def checkkeepchanges(self, keepchanges, force):
1006 if force and keepchanges:
1006 if force and keepchanges:
1007 raise util.Abort(_('cannot use both --force and --keep-changes'))
1007 raise util.Abort(_('cannot use both --force and --keep-changes'))
1008
1008
1009 def new(self, repo, patchfn, *pats, **opts):
1009 def new(self, repo, patchfn, *pats, **opts):
1010 """options:
1010 """options:
1011 msg: a string or a no-argument function returning a string
1011 msg: a string or a no-argument function returning a string
1012 """
1012 """
1013 msg = opts.get('msg')
1013 msg = opts.get('msg')
1014 user = opts.get('user')
1014 user = opts.get('user')
1015 date = opts.get('date')
1015 date = opts.get('date')
1016 if date:
1016 if date:
1017 date = util.parsedate(date)
1017 date = util.parsedate(date)
1018 diffopts = self.diffopts({'git': opts.get('git')})
1018 diffopts = self.diffopts({'git': opts.get('git')})
1019 if opts.get('checkname', True):
1019 if opts.get('checkname', True):
1020 self.checkpatchname(patchfn)
1020 self.checkpatchname(patchfn)
1021 inclsubs = self.checksubstate(repo)
1021 inclsubs = self.checksubstate(repo)
1022 if inclsubs:
1022 if inclsubs:
1023 inclsubs.append('.hgsubstate')
1023 inclsubs.append('.hgsubstate')
1024 substatestate = repo.dirstate['.hgsubstate']
1024 substatestate = repo.dirstate['.hgsubstate']
1025 if opts.get('include') or opts.get('exclude') or pats:
1025 if opts.get('include') or opts.get('exclude') or pats:
1026 if inclsubs:
1026 if inclsubs:
1027 pats = list(pats or []) + inclsubs
1027 pats = list(pats or []) + inclsubs
1028 match = scmutil.match(repo[None], pats, opts)
1028 match = scmutil.match(repo[None], pats, opts)
1029 # detect missing files in pats
1029 # detect missing files in pats
1030 def badfn(f, msg):
1030 def badfn(f, msg):
1031 if f != '.hgsubstate': # .hgsubstate is auto-created
1031 if f != '.hgsubstate': # .hgsubstate is auto-created
1032 raise util.Abort('%s: %s' % (f, msg))
1032 raise util.Abort('%s: %s' % (f, msg))
1033 match.bad = badfn
1033 match.bad = badfn
1034 changes = repo.status(match=match)
1034 changes = repo.status(match=match)
1035 m, a, r, d = changes[:4]
1035 m, a, r, d = changes[:4]
1036 else:
1036 else:
1037 changes = self.checklocalchanges(repo, force=True)
1037 changes = self.checklocalchanges(repo, force=True)
1038 m, a, r, d = changes
1038 m, a, r, d = changes
1039 match = scmutil.matchfiles(repo, m + a + r + inclsubs)
1039 match = scmutil.matchfiles(repo, m + a + r + inclsubs)
1040 if len(repo[None].parents()) > 1:
1040 if len(repo[None].parents()) > 1:
1041 raise util.Abort(_('cannot manage merge changesets'))
1041 raise util.Abort(_('cannot manage merge changesets'))
1042 commitfiles = m + a + r
1042 commitfiles = m + a + r
1043 self.checktoppatch(repo)
1043 self.checktoppatch(repo)
1044 insert = self.fullseriesend()
1044 insert = self.fullseriesend()
1045 wlock = repo.wlock()
1045 wlock = repo.wlock()
1046 try:
1046 try:
1047 try:
1047 try:
1048 # if patch file write fails, abort early
1048 # if patch file write fails, abort early
1049 p = self.opener(patchfn, "w")
1049 p = self.opener(patchfn, "w")
1050 except IOError, e:
1050 except IOError, e:
1051 raise util.Abort(_('cannot write patch "%s": %s')
1051 raise util.Abort(_('cannot write patch "%s": %s')
1052 % (patchfn, e.strerror))
1052 % (patchfn, e.strerror))
1053 try:
1053 try:
1054 if self.plainmode:
1054 if self.plainmode:
1055 if user:
1055 if user:
1056 p.write("From: " + user + "\n")
1056 p.write("From: " + user + "\n")
1057 if not date:
1057 if not date:
1058 p.write("\n")
1058 p.write("\n")
1059 if date:
1059 if date:
1060 p.write("Date: %d %d\n\n" % date)
1060 p.write("Date: %d %d\n\n" % date)
1061 else:
1061 else:
1062 p.write("# HG changeset patch\n")
1062 p.write("# HG changeset patch\n")
1063 p.write("# Parent "
1063 p.write("# Parent "
1064 + hex(repo[None].p1().node()) + "\n")
1064 + hex(repo[None].p1().node()) + "\n")
1065 if user:
1065 if user:
1066 p.write("# User " + user + "\n")
1066 p.write("# User " + user + "\n")
1067 if date:
1067 if date:
1068 p.write("# Date %s %s\n\n" % date)
1068 p.write("# Date %s %s\n\n" % date)
1069 if util.safehasattr(msg, '__call__'):
1069 if util.safehasattr(msg, '__call__'):
1070 msg = msg()
1070 msg = msg()
1071 commitmsg = msg and msg or ("[mq]: %s" % patchfn)
1071 commitmsg = msg and msg or ("[mq]: %s" % patchfn)
1072 n = newcommit(repo, None, commitmsg, user, date, match=match,
1072 n = newcommit(repo, None, commitmsg, user, date, match=match,
1073 force=True)
1073 force=True)
1074 if n is None:
1074 if n is None:
1075 raise util.Abort(_("repo commit failed"))
1075 raise util.Abort(_("repo commit failed"))
1076 try:
1076 try:
1077 self.fullseries[insert:insert] = [patchfn]
1077 self.fullseries[insert:insert] = [patchfn]
1078 self.applied.append(statusentry(n, patchfn))
1078 self.applied.append(statusentry(n, patchfn))
1079 self.parseseries()
1079 self.parseseries()
1080 self.seriesdirty = True
1080 self.seriesdirty = True
1081 self.applieddirty = True
1081 self.applieddirty = True
1082 if msg:
1082 if msg:
1083 msg = msg + "\n\n"
1083 msg = msg + "\n\n"
1084 p.write(msg)
1084 p.write(msg)
1085 if commitfiles:
1085 if commitfiles:
1086 parent = self.qparents(repo, n)
1086 parent = self.qparents(repo, n)
1087 if inclsubs:
1087 if inclsubs:
1088 self.putsubstate2changes(substatestate, changes)
1088 self.putsubstate2changes(substatestate, changes)
1089 chunks = patchmod.diff(repo, node1=parent, node2=n,
1089 chunks = patchmod.diff(repo, node1=parent, node2=n,
1090 changes=changes, opts=diffopts)
1090 changes=changes, opts=diffopts)
1091 for chunk in chunks:
1091 for chunk in chunks:
1092 p.write(chunk)
1092 p.write(chunk)
1093 p.close()
1093 p.close()
1094 r = self.qrepo()
1094 r = self.qrepo()
1095 if r:
1095 if r:
1096 r[None].add([patchfn])
1096 r[None].add([patchfn])
1097 except: # re-raises
1097 except: # re-raises
1098 repo.rollback()
1098 repo.rollback()
1099 raise
1099 raise
1100 except Exception:
1100 except Exception:
1101 patchpath = self.join(patchfn)
1101 patchpath = self.join(patchfn)
1102 try:
1102 try:
1103 os.unlink(patchpath)
1103 os.unlink(patchpath)
1104 except OSError:
1104 except OSError:
1105 self.ui.warn(_('error unlinking %s\n') % patchpath)
1105 self.ui.warn(_('error unlinking %s\n') % patchpath)
1106 raise
1106 raise
1107 self.removeundo(repo)
1107 self.removeundo(repo)
1108 finally:
1108 finally:
1109 release(wlock)
1109 release(wlock)
1110
1110
1111 def strip(self, repo, revs, update=True, backup="all", force=None):
1111 def strip(self, repo, revs, update=True, backup="all", force=None):
1112 wlock = lock = None
1112 wlock = lock = None
1113 try:
1113 try:
1114 wlock = repo.wlock()
1114 wlock = repo.wlock()
1115 lock = repo.lock()
1115 lock = repo.lock()
1116
1116
1117 if update:
1117 if update:
1118 self.checklocalchanges(repo, force=force, refresh=False)
1118 self.checklocalchanges(repo, force=force, refresh=False)
1119 urev = self.qparents(repo, revs[0])
1119 urev = self.qparents(repo, revs[0])
1120 hg.clean(repo, urev)
1120 hg.clean(repo, urev)
1121 repo.dirstate.write()
1121 repo.dirstate.write()
1122
1122
1123 repair.strip(self.ui, repo, revs, backup)
1123 repair.strip(self.ui, repo, revs, backup)
1124 finally:
1124 finally:
1125 release(lock, wlock)
1125 release(lock, wlock)
1126
1126
1127 def isapplied(self, patch):
1127 def isapplied(self, patch):
1128 """returns (index, rev, patch)"""
1128 """returns (index, rev, patch)"""
1129 for i, a in enumerate(self.applied):
1129 for i, a in enumerate(self.applied):
1130 if a.name == patch:
1130 if a.name == patch:
1131 return (i, a.node, a.name)
1131 return (i, a.node, a.name)
1132 return None
1132 return None
1133
1133
1134 # if the exact patch name does not exist, we try a few
1134 # if the exact patch name does not exist, we try a few
1135 # variations. If strict is passed, we try only #1
1135 # variations. If strict is passed, we try only #1
1136 #
1136 #
1137 # 1) a number (as string) to indicate an offset in the series file
1137 # 1) a number (as string) to indicate an offset in the series file
1138 # 2) a unique substring of the patch name was given
1138 # 2) a unique substring of the patch name was given
1139 # 3) patchname[-+]num to indicate an offset in the series file
1139 # 3) patchname[-+]num to indicate an offset in the series file
1140 def lookup(self, patch, strict=False):
1140 def lookup(self, patch, strict=False):
1141 def partialname(s):
1141 def partialname(s):
1142 if s in self.series:
1142 if s in self.series:
1143 return s
1143 return s
1144 matches = [x for x in self.series if s in x]
1144 matches = [x for x in self.series if s in x]
1145 if len(matches) > 1:
1145 if len(matches) > 1:
1146 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
1146 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
1147 for m in matches:
1147 for m in matches:
1148 self.ui.warn(' %s\n' % m)
1148 self.ui.warn(' %s\n' % m)
1149 return None
1149 return None
1150 if matches:
1150 if matches:
1151 return matches[0]
1151 return matches[0]
1152 if self.series and self.applied:
1152 if self.series and self.applied:
1153 if s == 'qtip':
1153 if s == 'qtip':
1154 return self.series[self.seriesend(True) - 1]
1154 return self.series[self.seriesend(True) - 1]
1155 if s == 'qbase':
1155 if s == 'qbase':
1156 return self.series[0]
1156 return self.series[0]
1157 return None
1157 return None
1158
1158
1159 if patch in self.series:
1159 if patch in self.series:
1160 return patch
1160 return patch
1161
1161
1162 if not os.path.isfile(self.join(patch)):
1162 if not os.path.isfile(self.join(patch)):
1163 try:
1163 try:
1164 sno = int(patch)
1164 sno = int(patch)
1165 except (ValueError, OverflowError):
1165 except (ValueError, OverflowError):
1166 pass
1166 pass
1167 else:
1167 else:
1168 if -len(self.series) <= sno < len(self.series):
1168 if -len(self.series) <= sno < len(self.series):
1169 return self.series[sno]
1169 return self.series[sno]
1170
1170
1171 if not strict:
1171 if not strict:
1172 res = partialname(patch)
1172 res = partialname(patch)
1173 if res:
1173 if res:
1174 return res
1174 return res
1175 minus = patch.rfind('-')
1175 minus = patch.rfind('-')
1176 if minus >= 0:
1176 if minus >= 0:
1177 res = partialname(patch[:minus])
1177 res = partialname(patch[:minus])
1178 if res:
1178 if res:
1179 i = self.series.index(res)
1179 i = self.series.index(res)
1180 try:
1180 try:
1181 off = int(patch[minus + 1:] or 1)
1181 off = int(patch[minus + 1:] or 1)
1182 except (ValueError, OverflowError):
1182 except (ValueError, OverflowError):
1183 pass
1183 pass
1184 else:
1184 else:
1185 if i - off >= 0:
1185 if i - off >= 0:
1186 return self.series[i - off]
1186 return self.series[i - off]
1187 plus = patch.rfind('+')
1187 plus = patch.rfind('+')
1188 if plus >= 0:
1188 if plus >= 0:
1189 res = partialname(patch[:plus])
1189 res = partialname(patch[:plus])
1190 if res:
1190 if res:
1191 i = self.series.index(res)
1191 i = self.series.index(res)
1192 try:
1192 try:
1193 off = int(patch[plus + 1:] or 1)
1193 off = int(patch[plus + 1:] or 1)
1194 except (ValueError, OverflowError):
1194 except (ValueError, OverflowError):
1195 pass
1195 pass
1196 else:
1196 else:
1197 if i + off < len(self.series):
1197 if i + off < len(self.series):
1198 return self.series[i + off]
1198 return self.series[i + off]
1199 raise util.Abort(_("patch %s not in series") % patch)
1199 raise util.Abort(_("patch %s not in series") % patch)
1200
1200
1201 def push(self, repo, patch=None, force=False, list=False, mergeq=None,
1201 def push(self, repo, patch=None, force=False, list=False, mergeq=None,
1202 all=False, move=False, exact=False, nobackup=False,
1202 all=False, move=False, exact=False, nobackup=False,
1203 keepchanges=False):
1203 keepchanges=False):
1204 self.checkkeepchanges(keepchanges, force)
1204 self.checkkeepchanges(keepchanges, force)
1205 diffopts = self.diffopts()
1205 diffopts = self.diffopts()
1206 wlock = repo.wlock()
1206 wlock = repo.wlock()
1207 try:
1207 try:
1208 heads = []
1208 heads = []
1209 for b, ls in repo.branchmap().iteritems():
1209 for b, ls in repo.branchmap().iteritems():
1210 heads += ls
1210 heads += ls
1211 if not heads:
1211 if not heads:
1212 heads = [nullid]
1212 heads = [nullid]
1213 if repo.dirstate.p1() not in heads and not exact:
1213 if repo.dirstate.p1() not in heads and not exact:
1214 self.ui.status(_("(working directory not at a head)\n"))
1214 self.ui.status(_("(working directory not at a head)\n"))
1215
1215
1216 if not self.series:
1216 if not self.series:
1217 self.ui.warn(_('no patches in series\n'))
1217 self.ui.warn(_('no patches in series\n'))
1218 return 0
1218 return 0
1219
1219
1220 # Suppose our series file is: A B C and the current 'top'
1220 # Suppose our series file is: A B C and the current 'top'
1221 # patch is B. qpush C should be performed (moving forward)
1221 # patch is B. qpush C should be performed (moving forward)
1222 # qpush B is a NOP (no change) qpush A is an error (can't
1222 # qpush B is a NOP (no change) qpush A is an error (can't
1223 # go backwards with qpush)
1223 # go backwards with qpush)
1224 if patch:
1224 if patch:
1225 patch = self.lookup(patch)
1225 patch = self.lookup(patch)
1226 info = self.isapplied(patch)
1226 info = self.isapplied(patch)
1227 if info and info[0] >= len(self.applied) - 1:
1227 if info and info[0] >= len(self.applied) - 1:
1228 self.ui.warn(
1228 self.ui.warn(
1229 _('qpush: %s is already at the top\n') % patch)
1229 _('qpush: %s is already at the top\n') % patch)
1230 return 0
1230 return 0
1231
1231
1232 pushable, reason = self.pushable(patch)
1232 pushable, reason = self.pushable(patch)
1233 if pushable:
1233 if pushable:
1234 if self.series.index(patch) < self.seriesend():
1234 if self.series.index(patch) < self.seriesend():
1235 raise util.Abort(
1235 raise util.Abort(
1236 _("cannot push to a previous patch: %s") % patch)
1236 _("cannot push to a previous patch: %s") % patch)
1237 else:
1237 else:
1238 if reason:
1238 if reason:
1239 reason = _('guarded by %s') % reason
1239 reason = _('guarded by %s') % reason
1240 else:
1240 else:
1241 reason = _('no matching guards')
1241 reason = _('no matching guards')
1242 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
1242 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
1243 return 1
1243 return 1
1244 elif all:
1244 elif all:
1245 patch = self.series[-1]
1245 patch = self.series[-1]
1246 if self.isapplied(patch):
1246 if self.isapplied(patch):
1247 self.ui.warn(_('all patches are currently applied\n'))
1247 self.ui.warn(_('all patches are currently applied\n'))
1248 return 0
1248 return 0
1249
1249
1250 # Following the above example, starting at 'top' of B:
1250 # Following the above example, starting at 'top' of B:
1251 # qpush should be performed (pushes C), but a subsequent
1251 # qpush should be performed (pushes C), but a subsequent
1252 # qpush without an argument is an error (nothing to
1252 # qpush without an argument is an error (nothing to
1253 # apply). This allows a loop of "...while hg qpush..." to
1253 # apply). This allows a loop of "...while hg qpush..." to
1254 # work as it detects an error when done
1254 # work as it detects an error when done
1255 start = self.seriesend()
1255 start = self.seriesend()
1256 if start == len(self.series):
1256 if start == len(self.series):
1257 self.ui.warn(_('patch series already fully applied\n'))
1257 self.ui.warn(_('patch series already fully applied\n'))
1258 return 1
1258 return 1
1259 if not force and not keepchanges:
1259 if not force and not keepchanges:
1260 self.checklocalchanges(repo, refresh=self.applied)
1260 self.checklocalchanges(repo, refresh=self.applied)
1261
1261
1262 if exact:
1262 if exact:
1263 if keepchanges:
1263 if keepchanges:
1264 raise util.Abort(
1264 raise util.Abort(
1265 _("cannot use --exact and --keep-changes together"))
1265 _("cannot use --exact and --keep-changes together"))
1266 if move:
1266 if move:
1267 raise util.Abort(_('cannot use --exact and --move '
1267 raise util.Abort(_('cannot use --exact and --move '
1268 'together'))
1268 'together'))
1269 if self.applied:
1269 if self.applied:
1270 raise util.Abort(_('cannot push --exact with applied '
1270 raise util.Abort(_('cannot push --exact with applied '
1271 'patches'))
1271 'patches'))
1272 root = self.series[start]
1272 root = self.series[start]
1273 target = patchheader(self.join(root), self.plainmode).parent
1273 target = patchheader(self.join(root), self.plainmode).parent
1274 if not target:
1274 if not target:
1275 raise util.Abort(
1275 raise util.Abort(
1276 _("%s does not have a parent recorded") % root)
1276 _("%s does not have a parent recorded") % root)
1277 if not repo[target] == repo['.']:
1277 if not repo[target] == repo['.']:
1278 hg.update(repo, target)
1278 hg.update(repo, target)
1279
1279
1280 if move:
1280 if move:
1281 if not patch:
1281 if not patch:
1282 raise util.Abort(_("please specify the patch to move"))
1282 raise util.Abort(_("please specify the patch to move"))
1283 for fullstart, rpn in enumerate(self.fullseries):
1283 for fullstart, rpn in enumerate(self.fullseries):
1284 # strip markers for patch guards
1284 # strip markers for patch guards
1285 if self.guard_re.split(rpn, 1)[0] == self.series[start]:
1285 if self.guard_re.split(rpn, 1)[0] == self.series[start]:
1286 break
1286 break
1287 for i, rpn in enumerate(self.fullseries[fullstart:]):
1287 for i, rpn in enumerate(self.fullseries[fullstart:]):
1288 # strip markers for patch guards
1288 # strip markers for patch guards
1289 if self.guard_re.split(rpn, 1)[0] == patch:
1289 if self.guard_re.split(rpn, 1)[0] == patch:
1290 break
1290 break
1291 index = fullstart + i
1291 index = fullstart + i
1292 assert index < len(self.fullseries)
1292 assert index < len(self.fullseries)
1293 fullpatch = self.fullseries[index]
1293 fullpatch = self.fullseries[index]
1294 del self.fullseries[index]
1294 del self.fullseries[index]
1295 self.fullseries.insert(fullstart, fullpatch)
1295 self.fullseries.insert(fullstart, fullpatch)
1296 self.parseseries()
1296 self.parseseries()
1297 self.seriesdirty = True
1297 self.seriesdirty = True
1298
1298
1299 self.applieddirty = True
1299 self.applieddirty = True
1300 if start > 0:
1300 if start > 0:
1301 self.checktoppatch(repo)
1301 self.checktoppatch(repo)
1302 if not patch:
1302 if not patch:
1303 patch = self.series[start]
1303 patch = self.series[start]
1304 end = start + 1
1304 end = start + 1
1305 else:
1305 else:
1306 end = self.series.index(patch, start) + 1
1306 end = self.series.index(patch, start) + 1
1307
1307
1308 tobackup = set()
1308 tobackup = set()
1309 if (not nobackup and force) or keepchanges:
1309 if (not nobackup and force) or keepchanges:
1310 m, a, r, d = self.checklocalchanges(repo, force=True)
1310 m, a, r, d = self.checklocalchanges(repo, force=True)
1311 if keepchanges:
1311 if keepchanges:
1312 tobackup.update(m + a + r + d)
1312 tobackup.update(m + a + r + d)
1313 else:
1313 else:
1314 tobackup.update(m + a)
1314 tobackup.update(m + a)
1315
1315
1316 s = self.series[start:end]
1316 s = self.series[start:end]
1317 all_files = set()
1317 all_files = set()
1318 try:
1318 try:
1319 if mergeq:
1319 if mergeq:
1320 ret = self.mergepatch(repo, mergeq, s, diffopts)
1320 ret = self.mergepatch(repo, mergeq, s, diffopts)
1321 else:
1321 else:
1322 ret = self.apply(repo, s, list, all_files=all_files,
1322 ret = self.apply(repo, s, list, all_files=all_files,
1323 tobackup=tobackup, keepchanges=keepchanges)
1323 tobackup=tobackup, keepchanges=keepchanges)
1324 except: # re-raises
1324 except: # re-raises
1325 self.ui.warn(_('cleaning up working directory...'))
1325 self.ui.warn(_('cleaning up working directory...'))
1326 node = repo.dirstate.p1()
1326 node = repo.dirstate.p1()
1327 hg.revert(repo, node, None)
1327 hg.revert(repo, node, None)
1328 # only remove unknown files that we know we touched or
1328 # only remove unknown files that we know we touched or
1329 # created while patching
1329 # created while patching
1330 for f in all_files:
1330 for f in all_files:
1331 if f not in repo.dirstate:
1331 if f not in repo.dirstate:
1332 try:
1332 try:
1333 util.unlinkpath(repo.wjoin(f))
1333 util.unlinkpath(repo.wjoin(f))
1334 except OSError, inst:
1334 except OSError, inst:
1335 if inst.errno != errno.ENOENT:
1335 if inst.errno != errno.ENOENT:
1336 raise
1336 raise
1337 self.ui.warn(_('done\n'))
1337 self.ui.warn(_('done\n'))
1338 raise
1338 raise
1339
1339
1340 if not self.applied:
1340 if not self.applied:
1341 return ret[0]
1341 return ret[0]
1342 top = self.applied[-1].name
1342 top = self.applied[-1].name
1343 if ret[0] and ret[0] > 1:
1343 if ret[0] and ret[0] > 1:
1344 msg = _("errors during apply, please fix and refresh %s\n")
1344 msg = _("errors during apply, please fix and refresh %s\n")
1345 self.ui.write(msg % top)
1345 self.ui.write(msg % top)
1346 else:
1346 else:
1347 self.ui.write(_("now at: %s\n") % top)
1347 self.ui.write(_("now at: %s\n") % top)
1348 return ret[0]
1348 return ret[0]
1349
1349
1350 finally:
1350 finally:
1351 wlock.release()
1351 wlock.release()
1352
1352
1353 def pop(self, repo, patch=None, force=False, update=True, all=False,
1353 def pop(self, repo, patch=None, force=False, update=True, all=False,
1354 nobackup=False, keepchanges=False):
1354 nobackup=False, keepchanges=False):
1355 self.checkkeepchanges(keepchanges, force)
1355 self.checkkeepchanges(keepchanges, force)
1356 wlock = repo.wlock()
1356 wlock = repo.wlock()
1357 try:
1357 try:
1358 if patch:
1358 if patch:
1359 # index, rev, patch
1359 # index, rev, patch
1360 info = self.isapplied(patch)
1360 info = self.isapplied(patch)
1361 if not info:
1361 if not info:
1362 patch = self.lookup(patch)
1362 patch = self.lookup(patch)
1363 info = self.isapplied(patch)
1363 info = self.isapplied(patch)
1364 if not info:
1364 if not info:
1365 raise util.Abort(_("patch %s is not applied") % patch)
1365 raise util.Abort(_("patch %s is not applied") % patch)
1366
1366
1367 if not self.applied:
1367 if not self.applied:
1368 # Allow qpop -a to work repeatedly,
1368 # Allow qpop -a to work repeatedly,
1369 # but not qpop without an argument
1369 # but not qpop without an argument
1370 self.ui.warn(_("no patches applied\n"))
1370 self.ui.warn(_("no patches applied\n"))
1371 return not all
1371 return not all
1372
1372
1373 if all:
1373 if all:
1374 start = 0
1374 start = 0
1375 elif patch:
1375 elif patch:
1376 start = info[0] + 1
1376 start = info[0] + 1
1377 else:
1377 else:
1378 start = len(self.applied) - 1
1378 start = len(self.applied) - 1
1379
1379
1380 if start >= len(self.applied):
1380 if start >= len(self.applied):
1381 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1381 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1382 return
1382 return
1383
1383
1384 if not update:
1384 if not update:
1385 parents = repo.dirstate.parents()
1385 parents = repo.dirstate.parents()
1386 rr = [x.node for x in self.applied]
1386 rr = [x.node for x in self.applied]
1387 for p in parents:
1387 for p in parents:
1388 if p in rr:
1388 if p in rr:
1389 self.ui.warn(_("qpop: forcing dirstate update\n"))
1389 self.ui.warn(_("qpop: forcing dirstate update\n"))
1390 update = True
1390 update = True
1391 else:
1391 else:
1392 parents = [p.node() for p in repo[None].parents()]
1392 parents = [p.node() for p in repo[None].parents()]
1393 needupdate = False
1393 needupdate = False
1394 for entry in self.applied[start:]:
1394 for entry in self.applied[start:]:
1395 if entry.node in parents:
1395 if entry.node in parents:
1396 needupdate = True
1396 needupdate = True
1397 break
1397 break
1398 update = needupdate
1398 update = needupdate
1399
1399
1400 tobackup = set()
1400 tobackup = set()
1401 if update:
1401 if update:
1402 m, a, r, d = self.checklocalchanges(
1402 m, a, r, d = self.checklocalchanges(
1403 repo, force=force or keepchanges)
1403 repo, force=force or keepchanges)
1404 if force:
1404 if force:
1405 if not nobackup:
1405 if not nobackup:
1406 tobackup.update(m + a)
1406 tobackup.update(m + a)
1407 elif keepchanges:
1407 elif keepchanges:
1408 tobackup.update(m + a + r + d)
1408 tobackup.update(m + a + r + d)
1409
1409
1410 self.applieddirty = True
1410 self.applieddirty = True
1411 end = len(self.applied)
1411 end = len(self.applied)
1412 rev = self.applied[start].node
1412 rev = self.applied[start].node
1413 if update:
1413 if update:
1414 top = self.checktoppatch(repo)[0]
1414 top = self.checktoppatch(repo)[0]
1415
1415
1416 try:
1416 try:
1417 heads = repo.changelog.heads(rev)
1417 heads = repo.changelog.heads(rev)
1418 except error.LookupError:
1418 except error.LookupError:
1419 node = short(rev)
1419 node = short(rev)
1420 raise util.Abort(_('trying to pop unknown node %s') % node)
1420 raise util.Abort(_('trying to pop unknown node %s') % node)
1421
1421
1422 if heads != [self.applied[-1].node]:
1422 if heads != [self.applied[-1].node]:
1423 raise util.Abort(_("popping would remove a revision not "
1423 raise util.Abort(_("popping would remove a revision not "
1424 "managed by this patch queue"))
1424 "managed by this patch queue"))
1425 if not repo[self.applied[-1].node].mutable():
1425 if not repo[self.applied[-1].node].mutable():
1426 raise util.Abort(
1426 raise util.Abort(
1427 _("popping would remove an immutable revision"),
1427 _("popping would remove an immutable revision"),
1428 hint=_('see "hg help phases" for details'))
1428 hint=_('see "hg help phases" for details'))
1429
1429
1430 # we know there are no local changes, so we can make a simplified
1430 # we know there are no local changes, so we can make a simplified
1431 # form of hg.update.
1431 # form of hg.update.
1432 if update:
1432 if update:
1433 qp = self.qparents(repo, rev)
1433 qp = self.qparents(repo, rev)
1434 ctx = repo[qp]
1434 ctx = repo[qp]
1435 m, a, r, d = repo.status(qp, top)[:4]
1435 m, a, r, d = repo.status(qp, top)[:4]
1436 if d:
1436 if d:
1437 raise util.Abort(_("deletions found between repo revs"))
1437 raise util.Abort(_("deletions found between repo revs"))
1438
1438
1439 tobackup = set(a + m + r) & tobackup
1439 tobackup = set(a + m + r) & tobackup
1440 if keepchanges and tobackup:
1440 if keepchanges and tobackup:
1441 self.localchangesfound()
1441 self.localchangesfound()
1442 self.backup(repo, tobackup)
1442 self.backup(repo, tobackup)
1443
1443
1444 for f in a:
1444 for f in a:
1445 try:
1445 try:
1446 util.unlinkpath(repo.wjoin(f))
1446 util.unlinkpath(repo.wjoin(f))
1447 except OSError, e:
1447 except OSError, e:
1448 if e.errno != errno.ENOENT:
1448 if e.errno != errno.ENOENT:
1449 raise
1449 raise
1450 repo.dirstate.drop(f)
1450 repo.dirstate.drop(f)
1451 for f in m + r:
1451 for f in m + r:
1452 fctx = ctx[f]
1452 fctx = ctx[f]
1453 repo.wwrite(f, fctx.data(), fctx.flags())
1453 repo.wwrite(f, fctx.data(), fctx.flags())
1454 repo.dirstate.normal(f)
1454 repo.dirstate.normal(f)
1455 repo.setparents(qp, nullid)
1455 repo.setparents(qp, nullid)
1456 for patch in reversed(self.applied[start:end]):
1456 for patch in reversed(self.applied[start:end]):
1457 self.ui.status(_("popping %s\n") % patch.name)
1457 self.ui.status(_("popping %s\n") % patch.name)
1458 del self.applied[start:end]
1458 del self.applied[start:end]
1459 self.strip(repo, [rev], update=False, backup='strip')
1459 self.strip(repo, [rev], update=False, backup='strip')
1460 if self.applied:
1460 if self.applied:
1461 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1461 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1462 else:
1462 else:
1463 self.ui.write(_("patch queue now empty\n"))
1463 self.ui.write(_("patch queue now empty\n"))
1464 finally:
1464 finally:
1465 wlock.release()
1465 wlock.release()
1466
1466
1467 def diff(self, repo, pats, opts):
1467 def diff(self, repo, pats, opts):
1468 top, patch = self.checktoppatch(repo)
1468 top, patch = self.checktoppatch(repo)
1469 if not top:
1469 if not top:
1470 self.ui.write(_("no patches applied\n"))
1470 self.ui.write(_("no patches applied\n"))
1471 return
1471 return
1472 qp = self.qparents(repo, top)
1472 qp = self.qparents(repo, top)
1473 if opts.get('reverse'):
1473 if opts.get('reverse'):
1474 node1, node2 = None, qp
1474 node1, node2 = None, qp
1475 else:
1475 else:
1476 node1, node2 = qp, None
1476 node1, node2 = qp, None
1477 diffopts = self.diffopts(opts, patch)
1477 diffopts = self.diffopts(opts, patch)
1478 self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
1478 self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
1479
1479
1480 def refresh(self, repo, pats=None, **opts):
1480 def refresh(self, repo, pats=None, **opts):
1481 if not self.applied:
1481 if not self.applied:
1482 self.ui.write(_("no patches applied\n"))
1482 self.ui.write(_("no patches applied\n"))
1483 return 1
1483 return 1
1484 msg = opts.get('msg', '').rstrip()
1484 msg = opts.get('msg', '').rstrip()
1485 newuser = opts.get('user')
1485 newuser = opts.get('user')
1486 newdate = opts.get('date')
1486 newdate = opts.get('date')
1487 if newdate:
1487 if newdate:
1488 newdate = '%d %d' % util.parsedate(newdate)
1488 newdate = '%d %d' % util.parsedate(newdate)
1489 wlock = repo.wlock()
1489 wlock = repo.wlock()
1490
1490
1491 try:
1491 try:
1492 self.checktoppatch(repo)
1492 self.checktoppatch(repo)
1493 (top, patchfn) = (self.applied[-1].node, self.applied[-1].name)
1493 (top, patchfn) = (self.applied[-1].node, self.applied[-1].name)
1494 if repo.changelog.heads(top) != [top]:
1494 if repo.changelog.heads(top) != [top]:
1495 raise util.Abort(_("cannot refresh a revision with children"))
1495 raise util.Abort(_("cannot refresh a revision with children"))
1496 if not repo[top].mutable():
1496 if not repo[top].mutable():
1497 raise util.Abort(_("cannot refresh immutable revision"),
1497 raise util.Abort(_("cannot refresh immutable revision"),
1498 hint=_('see "hg help phases" for details'))
1498 hint=_('see "hg help phases" for details'))
1499
1499
1500 cparents = repo.changelog.parents(top)
1500 cparents = repo.changelog.parents(top)
1501 patchparent = self.qparents(repo, top)
1501 patchparent = self.qparents(repo, top)
1502
1502
1503 inclsubs = self.checksubstate(repo, hex(patchparent))
1503 inclsubs = self.checksubstate(repo, hex(patchparent))
1504 if inclsubs:
1504 if inclsubs:
1505 inclsubs.append('.hgsubstate')
1505 inclsubs.append('.hgsubstate')
1506 substatestate = repo.dirstate['.hgsubstate']
1506 substatestate = repo.dirstate['.hgsubstate']
1507
1507
1508 ph = patchheader(self.join(patchfn), self.plainmode)
1508 ph = patchheader(self.join(patchfn), self.plainmode)
1509 diffopts = self.diffopts({'git': opts.get('git')}, patchfn)
1509 diffopts = self.diffopts({'git': opts.get('git')}, patchfn)
1510 if msg:
1510 if msg:
1511 ph.setmessage(msg)
1511 ph.setmessage(msg)
1512 if newuser:
1512 if newuser:
1513 ph.setuser(newuser)
1513 ph.setuser(newuser)
1514 if newdate:
1514 if newdate:
1515 ph.setdate(newdate)
1515 ph.setdate(newdate)
1516 ph.setparent(hex(patchparent))
1516 ph.setparent(hex(patchparent))
1517
1517
1518 # only commit new patch when write is complete
1518 # only commit new patch when write is complete
1519 patchf = self.opener(patchfn, 'w', atomictemp=True)
1519 patchf = self.opener(patchfn, 'w', atomictemp=True)
1520
1520
1521 comments = str(ph)
1521 comments = str(ph)
1522 if comments:
1522 if comments:
1523 patchf.write(comments)
1523 patchf.write(comments)
1524
1524
1525 # update the dirstate in place, strip off the qtip commit
1525 # update the dirstate in place, strip off the qtip commit
1526 # and then commit.
1526 # and then commit.
1527 #
1527 #
1528 # this should really read:
1528 # this should really read:
1529 # mm, dd, aa = repo.status(top, patchparent)[:3]
1529 # mm, dd, aa = repo.status(top, patchparent)[:3]
1530 # but we do it backwards to take advantage of manifest/changelog
1530 # but we do it backwards to take advantage of manifest/changelog
1531 # caching against the next repo.status call
1531 # caching against the next repo.status call
1532 mm, aa, dd = repo.status(patchparent, top)[:3]
1532 mm, aa, dd = repo.status(patchparent, top)[:3]
1533 changes = repo.changelog.read(top)
1533 changes = repo.changelog.read(top)
1534 man = repo.manifest.read(changes[0])
1534 man = repo.manifest.read(changes[0])
1535 aaa = aa[:]
1535 aaa = aa[:]
1536 matchfn = scmutil.match(repo[None], pats, opts)
1536 matchfn = scmutil.match(repo[None], pats, opts)
1537 # in short mode, we only diff the files included in the
1537 # in short mode, we only diff the files included in the
1538 # patch already plus specified files
1538 # patch already plus specified files
1539 if opts.get('short'):
1539 if opts.get('short'):
1540 # if amending a patch, we start with existing
1540 # if amending a patch, we start with existing
1541 # files plus specified files - unfiltered
1541 # files plus specified files - unfiltered
1542 match = scmutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1542 match = scmutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1543 # filter with include/exclude options
1543 # filter with include/exclude options
1544 matchfn = scmutil.match(repo[None], opts=opts)
1544 matchfn = scmutil.match(repo[None], opts=opts)
1545 else:
1545 else:
1546 match = scmutil.matchall(repo)
1546 match = scmutil.matchall(repo)
1547 m, a, r, d = repo.status(match=match)[:4]
1547 m, a, r, d = repo.status(match=match)[:4]
1548 mm = set(mm)
1548 mm = set(mm)
1549 aa = set(aa)
1549 aa = set(aa)
1550 dd = set(dd)
1550 dd = set(dd)
1551
1551
1552 # we might end up with files that were added between
1552 # we might end up with files that were added between
1553 # qtip and the dirstate parent, but then changed in the
1553 # qtip and the dirstate parent, but then changed in the
1554 # local dirstate. in this case, we want them to only
1554 # local dirstate. in this case, we want them to only
1555 # show up in the added section
1555 # show up in the added section
1556 for x in m:
1556 for x in m:
1557 if x not in aa:
1557 if x not in aa:
1558 mm.add(x)
1558 mm.add(x)
1559 # we might end up with files added by the local dirstate that
1559 # we might end up with files added by the local dirstate that
1560 # were deleted by the patch. In this case, they should only
1560 # were deleted by the patch. In this case, they should only
1561 # show up in the changed section.
1561 # show up in the changed section.
1562 for x in a:
1562 for x in a:
1563 if x in dd:
1563 if x in dd:
1564 dd.remove(x)
1564 dd.remove(x)
1565 mm.add(x)
1565 mm.add(x)
1566 else:
1566 else:
1567 aa.add(x)
1567 aa.add(x)
1568 # make sure any files deleted in the local dirstate
1568 # make sure any files deleted in the local dirstate
1569 # are not in the add or change column of the patch
1569 # are not in the add or change column of the patch
1570 forget = []
1570 forget = []
1571 for x in d + r:
1571 for x in d + r:
1572 if x in aa:
1572 if x in aa:
1573 aa.remove(x)
1573 aa.remove(x)
1574 forget.append(x)
1574 forget.append(x)
1575 continue
1575 continue
1576 else:
1576 else:
1577 mm.discard(x)
1577 mm.discard(x)
1578 dd.add(x)
1578 dd.add(x)
1579
1579
1580 m = list(mm)
1580 m = list(mm)
1581 r = list(dd)
1581 r = list(dd)
1582 a = list(aa)
1582 a = list(aa)
1583
1583
1584 # create 'match' that includes the files to be recommited.
1584 # create 'match' that includes the files to be recommited.
1585 # apply matchfn via repo.status to ensure correct case handling.
1585 # apply matchfn via repo.status to ensure correct case handling.
1586 cm, ca, cr, cd = repo.status(patchparent, match=matchfn)[:4]
1586 cm, ca, cr, cd = repo.status(patchparent, match=matchfn)[:4]
1587 allmatches = set(cm + ca + cr + cd)
1587 allmatches = set(cm + ca + cr + cd)
1588 refreshchanges = [x.intersection(allmatches) for x in (mm, aa, dd)]
1588 refreshchanges = [x.intersection(allmatches) for x in (mm, aa, dd)]
1589
1589
1590 files = set(inclsubs)
1590 files = set(inclsubs)
1591 for x in refreshchanges:
1591 for x in refreshchanges:
1592 files.update(x)
1592 files.update(x)
1593 match = scmutil.matchfiles(repo, files)
1593 match = scmutil.matchfiles(repo, files)
1594
1594
1595 bmlist = repo[top].bookmarks()
1595 bmlist = repo[top].bookmarks()
1596
1596
1597 try:
1597 try:
1598 if diffopts.git or diffopts.upgrade:
1598 if diffopts.git or diffopts.upgrade:
1599 copies = {}
1599 copies = {}
1600 for dst in a:
1600 for dst in a:
1601 src = repo.dirstate.copied(dst)
1601 src = repo.dirstate.copied(dst)
1602 # during qfold, the source file for copies may
1602 # during qfold, the source file for copies may
1603 # be removed. Treat this as a simple add.
1603 # be removed. Treat this as a simple add.
1604 if src is not None and src in repo.dirstate:
1604 if src is not None and src in repo.dirstate:
1605 copies.setdefault(src, []).append(dst)
1605 copies.setdefault(src, []).append(dst)
1606 repo.dirstate.add(dst)
1606 repo.dirstate.add(dst)
1607 # remember the copies between patchparent and qtip
1607 # remember the copies between patchparent and qtip
1608 for dst in aaa:
1608 for dst in aaa:
1609 f = repo.file(dst)
1609 f = repo.file(dst)
1610 src = f.renamed(man[dst])
1610 src = f.renamed(man[dst])
1611 if src:
1611 if src:
1612 copies.setdefault(src[0], []).extend(
1612 copies.setdefault(src[0], []).extend(
1613 copies.get(dst, []))
1613 copies.get(dst, []))
1614 if dst in a:
1614 if dst in a:
1615 copies[src[0]].append(dst)
1615 copies[src[0]].append(dst)
1616 # we can't copy a file created by the patch itself
1616 # we can't copy a file created by the patch itself
1617 if dst in copies:
1617 if dst in copies:
1618 del copies[dst]
1618 del copies[dst]
1619 for src, dsts in copies.iteritems():
1619 for src, dsts in copies.iteritems():
1620 for dst in dsts:
1620 for dst in dsts:
1621 repo.dirstate.copy(src, dst)
1621 repo.dirstate.copy(src, dst)
1622 else:
1622 else:
1623 for dst in a:
1623 for dst in a:
1624 repo.dirstate.add(dst)
1624 repo.dirstate.add(dst)
1625 # Drop useless copy information
1625 # Drop useless copy information
1626 for f in list(repo.dirstate.copies()):
1626 for f in list(repo.dirstate.copies()):
1627 repo.dirstate.copy(None, f)
1627 repo.dirstate.copy(None, f)
1628 for f in r:
1628 for f in r:
1629 repo.dirstate.remove(f)
1629 repo.dirstate.remove(f)
1630 # if the patch excludes a modified file, mark that
1630 # if the patch excludes a modified file, mark that
1631 # file with mtime=0 so status can see it.
1631 # file with mtime=0 so status can see it.
1632 mm = []
1632 mm = []
1633 for i in xrange(len(m) - 1, -1, -1):
1633 for i in xrange(len(m) - 1, -1, -1):
1634 if not matchfn(m[i]):
1634 if not matchfn(m[i]):
1635 mm.append(m[i])
1635 mm.append(m[i])
1636 del m[i]
1636 del m[i]
1637 for f in m:
1637 for f in m:
1638 repo.dirstate.normal(f)
1638 repo.dirstate.normal(f)
1639 for f in mm:
1639 for f in mm:
1640 repo.dirstate.normallookup(f)
1640 repo.dirstate.normallookup(f)
1641 for f in forget:
1641 for f in forget:
1642 repo.dirstate.drop(f)
1642 repo.dirstate.drop(f)
1643
1643
1644 if not msg:
1644 if not msg:
1645 if not ph.message:
1645 if not ph.message:
1646 message = "[mq]: %s\n" % patchfn
1646 message = "[mq]: %s\n" % patchfn
1647 else:
1647 else:
1648 message = "\n".join(ph.message)
1648 message = "\n".join(ph.message)
1649 else:
1649 else:
1650 message = msg
1650 message = msg
1651
1651
1652 user = ph.user or changes[1]
1652 user = ph.user or changes[1]
1653
1653
1654 oldphase = repo[top].phase()
1654 oldphase = repo[top].phase()
1655
1655
1656 # assumes strip can roll itself back if interrupted
1656 # assumes strip can roll itself back if interrupted
1657 repo.setparents(*cparents)
1657 repo.setparents(*cparents)
1658 self.applied.pop()
1658 self.applied.pop()
1659 self.applieddirty = True
1659 self.applieddirty = True
1660 self.strip(repo, [top], update=False,
1660 self.strip(repo, [top], update=False,
1661 backup='strip')
1661 backup='strip')
1662 except: # re-raises
1662 except: # re-raises
1663 repo.dirstate.invalidate()
1663 repo.dirstate.invalidate()
1664 raise
1664 raise
1665
1665
1666 try:
1666 try:
1667 # might be nice to attempt to roll back strip after this
1667 # might be nice to attempt to roll back strip after this
1668
1668
1669 # Ensure we create a new changeset in the same phase than
1669 # Ensure we create a new changeset in the same phase than
1670 # the old one.
1670 # the old one.
1671 n = newcommit(repo, oldphase, message, user, ph.date,
1671 n = newcommit(repo, oldphase, message, user, ph.date,
1672 match=match, force=True)
1672 match=match, force=True)
1673 # only write patch after a successful commit
1673 # only write patch after a successful commit
1674 c = [list(x) for x in refreshchanges]
1674 c = [list(x) for x in refreshchanges]
1675 if inclsubs:
1675 if inclsubs:
1676 self.putsubstate2changes(substatestate, c)
1676 self.putsubstate2changes(substatestate, c)
1677 chunks = patchmod.diff(repo, patchparent,
1677 chunks = patchmod.diff(repo, patchparent,
1678 changes=c, opts=diffopts)
1678 changes=c, opts=diffopts)
1679 for chunk in chunks:
1679 for chunk in chunks:
1680 patchf.write(chunk)
1680 patchf.write(chunk)
1681 patchf.close()
1681 patchf.close()
1682
1682
1683 marks = repo._bookmarks
1683 marks = repo._bookmarks
1684 for bm in bmlist:
1684 for bm in bmlist:
1685 marks[bm] = n
1685 marks[bm] = n
1686 marks.write()
1686 marks.write()
1687
1687
1688 self.applied.append(statusentry(n, patchfn))
1688 self.applied.append(statusentry(n, patchfn))
1689 except: # re-raises
1689 except: # re-raises
1690 ctx = repo[cparents[0]]
1690 ctx = repo[cparents[0]]
1691 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1691 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1692 self.savedirty()
1692 self.savedirty()
1693 self.ui.warn(_('refresh interrupted while patch was popped! '
1693 self.ui.warn(_('refresh interrupted while patch was popped! '
1694 '(revert --all, qpush to recover)\n'))
1694 '(revert --all, qpush to recover)\n'))
1695 raise
1695 raise
1696 finally:
1696 finally:
1697 wlock.release()
1697 wlock.release()
1698 self.removeundo(repo)
1698 self.removeundo(repo)
1699
1699
1700 def init(self, repo, create=False):
1700 def init(self, repo, create=False):
1701 if not create and os.path.isdir(self.path):
1701 if not create and os.path.isdir(self.path):
1702 raise util.Abort(_("patch queue directory already exists"))
1702 raise util.Abort(_("patch queue directory already exists"))
1703 try:
1703 try:
1704 os.mkdir(self.path)
1704 os.mkdir(self.path)
1705 except OSError, inst:
1705 except OSError, inst:
1706 if inst.errno != errno.EEXIST or not create:
1706 if inst.errno != errno.EEXIST or not create:
1707 raise
1707 raise
1708 if create:
1708 if create:
1709 return self.qrepo(create=True)
1709 return self.qrepo(create=True)
1710
1710
1711 def unapplied(self, repo, patch=None):
1711 def unapplied(self, repo, patch=None):
1712 if patch and patch not in self.series:
1712 if patch and patch not in self.series:
1713 raise util.Abort(_("patch %s is not in series file") % patch)
1713 raise util.Abort(_("patch %s is not in series file") % patch)
1714 if not patch:
1714 if not patch:
1715 start = self.seriesend()
1715 start = self.seriesend()
1716 else:
1716 else:
1717 start = self.series.index(patch) + 1
1717 start = self.series.index(patch) + 1
1718 unapplied = []
1718 unapplied = []
1719 for i in xrange(start, len(self.series)):
1719 for i in xrange(start, len(self.series)):
1720 pushable, reason = self.pushable(i)
1720 pushable, reason = self.pushable(i)
1721 if pushable:
1721 if pushable:
1722 unapplied.append((i, self.series[i]))
1722 unapplied.append((i, self.series[i]))
1723 self.explainpushable(i)
1723 self.explainpushable(i)
1724 return unapplied
1724 return unapplied
1725
1725
1726 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1726 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1727 summary=False):
1727 summary=False):
1728 def displayname(pfx, patchname, state):
1728 def displayname(pfx, patchname, state):
1729 if pfx:
1729 if pfx:
1730 self.ui.write(pfx)
1730 self.ui.write(pfx)
1731 if summary:
1731 if summary:
1732 ph = patchheader(self.join(patchname), self.plainmode)
1732 ph = patchheader(self.join(patchname), self.plainmode)
1733 msg = ph.message and ph.message[0] or ''
1733 msg = ph.message and ph.message[0] or ''
1734 if self.ui.formatted():
1734 if self.ui.formatted():
1735 width = self.ui.termwidth() - len(pfx) - len(patchname) - 2
1735 width = self.ui.termwidth() - len(pfx) - len(patchname) - 2
1736 if width > 0:
1736 if width > 0:
1737 msg = util.ellipsis(msg, width)
1737 msg = util.ellipsis(msg, width)
1738 else:
1738 else:
1739 msg = ''
1739 msg = ''
1740 self.ui.write(patchname, label='qseries.' + state)
1740 self.ui.write(patchname, label='qseries.' + state)
1741 self.ui.write(': ')
1741 self.ui.write(': ')
1742 self.ui.write(msg, label='qseries.message.' + state)
1742 self.ui.write(msg, label='qseries.message.' + state)
1743 else:
1743 else:
1744 self.ui.write(patchname, label='qseries.' + state)
1744 self.ui.write(patchname, label='qseries.' + state)
1745 self.ui.write('\n')
1745 self.ui.write('\n')
1746
1746
1747 applied = set([p.name for p in self.applied])
1747 applied = set([p.name for p in self.applied])
1748 if length is None:
1748 if length is None:
1749 length = len(self.series) - start
1749 length = len(self.series) - start
1750 if not missing:
1750 if not missing:
1751 if self.ui.verbose:
1751 if self.ui.verbose:
1752 idxwidth = len(str(start + length - 1))
1752 idxwidth = len(str(start + length - 1))
1753 for i in xrange(start, start + length):
1753 for i in xrange(start, start + length):
1754 patch = self.series[i]
1754 patch = self.series[i]
1755 if patch in applied:
1755 if patch in applied:
1756 char, state = 'A', 'applied'
1756 char, state = 'A', 'applied'
1757 elif self.pushable(i)[0]:
1757 elif self.pushable(i)[0]:
1758 char, state = 'U', 'unapplied'
1758 char, state = 'U', 'unapplied'
1759 else:
1759 else:
1760 char, state = 'G', 'guarded'
1760 char, state = 'G', 'guarded'
1761 pfx = ''
1761 pfx = ''
1762 if self.ui.verbose:
1762 if self.ui.verbose:
1763 pfx = '%*d %s ' % (idxwidth, i, char)
1763 pfx = '%*d %s ' % (idxwidth, i, char)
1764 elif status and status != char:
1764 elif status and status != char:
1765 continue
1765 continue
1766 displayname(pfx, patch, state)
1766 displayname(pfx, patch, state)
1767 else:
1767 else:
1768 msng_list = []
1768 msng_list = []
1769 for root, dirs, files in os.walk(self.path):
1769 for root, dirs, files in os.walk(self.path):
1770 d = root[len(self.path) + 1:]
1770 d = root[len(self.path) + 1:]
1771 for f in files:
1771 for f in files:
1772 fl = os.path.join(d, f)
1772 fl = os.path.join(d, f)
1773 if (fl not in self.series and
1773 if (fl not in self.series and
1774 fl not in (self.statuspath, self.seriespath,
1774 fl not in (self.statuspath, self.seriespath,
1775 self.guardspath)
1775 self.guardspath)
1776 and not fl.startswith('.')):
1776 and not fl.startswith('.')):
1777 msng_list.append(fl)
1777 msng_list.append(fl)
1778 for x in sorted(msng_list):
1778 for x in sorted(msng_list):
1779 pfx = self.ui.verbose and ('D ') or ''
1779 pfx = self.ui.verbose and ('D ') or ''
1780 displayname(pfx, x, 'missing')
1780 displayname(pfx, x, 'missing')
1781
1781
1782 def issaveline(self, l):
1782 def issaveline(self, l):
1783 if l.name == '.hg.patches.save.line':
1783 if l.name == '.hg.patches.save.line':
1784 return True
1784 return True
1785
1785
1786 def qrepo(self, create=False):
1786 def qrepo(self, create=False):
1787 ui = self.ui.copy()
1787 ui = self.ui.copy()
1788 ui.setconfig('paths', 'default', '', overlay=False)
1788 ui.setconfig('paths', 'default', '', overlay=False)
1789 ui.setconfig('paths', 'default-push', '', overlay=False)
1789 ui.setconfig('paths', 'default-push', '', overlay=False)
1790 if create or os.path.isdir(self.join(".hg")):
1790 if create or os.path.isdir(self.join(".hg")):
1791 return hg.repository(ui, path=self.path, create=create)
1791 return hg.repository(ui, path=self.path, create=create)
1792
1792
1793 def restore(self, repo, rev, delete=None, qupdate=None):
1793 def restore(self, repo, rev, delete=None, qupdate=None):
1794 desc = repo[rev].description().strip()
1794 desc = repo[rev].description().strip()
1795 lines = desc.splitlines()
1795 lines = desc.splitlines()
1796 i = 0
1796 i = 0
1797 datastart = None
1797 datastart = None
1798 series = []
1798 series = []
1799 applied = []
1799 applied = []
1800 qpp = None
1800 qpp = None
1801 for i, line in enumerate(lines):
1801 for i, line in enumerate(lines):
1802 if line == 'Patch Data:':
1802 if line == 'Patch Data:':
1803 datastart = i + 1
1803 datastart = i + 1
1804 elif line.startswith('Dirstate:'):
1804 elif line.startswith('Dirstate:'):
1805 l = line.rstrip()
1805 l = line.rstrip()
1806 l = l[10:].split(' ')
1806 l = l[10:].split(' ')
1807 qpp = [bin(x) for x in l]
1807 qpp = [bin(x) for x in l]
1808 elif datastart is not None:
1808 elif datastart is not None:
1809 l = line.rstrip()
1809 l = line.rstrip()
1810 n, name = l.split(':', 1)
1810 n, name = l.split(':', 1)
1811 if n:
1811 if n:
1812 applied.append(statusentry(bin(n), name))
1812 applied.append(statusentry(bin(n), name))
1813 else:
1813 else:
1814 series.append(l)
1814 series.append(l)
1815 if datastart is None:
1815 if datastart is None:
1816 self.ui.warn(_("no saved patch data found\n"))
1816 self.ui.warn(_("no saved patch data found\n"))
1817 return 1
1817 return 1
1818 self.ui.warn(_("restoring status: %s\n") % lines[0])
1818 self.ui.warn(_("restoring status: %s\n") % lines[0])
1819 self.fullseries = series
1819 self.fullseries = series
1820 self.applied = applied
1820 self.applied = applied
1821 self.parseseries()
1821 self.parseseries()
1822 self.seriesdirty = True
1822 self.seriesdirty = True
1823 self.applieddirty = True
1823 self.applieddirty = True
1824 heads = repo.changelog.heads()
1824 heads = repo.changelog.heads()
1825 if delete:
1825 if delete:
1826 if rev not in heads:
1826 if rev not in heads:
1827 self.ui.warn(_("save entry has children, leaving it alone\n"))
1827 self.ui.warn(_("save entry has children, leaving it alone\n"))
1828 else:
1828 else:
1829 self.ui.warn(_("removing save entry %s\n") % short(rev))
1829 self.ui.warn(_("removing save entry %s\n") % short(rev))
1830 pp = repo.dirstate.parents()
1830 pp = repo.dirstate.parents()
1831 if rev in pp:
1831 if rev in pp:
1832 update = True
1832 update = True
1833 else:
1833 else:
1834 update = False
1834 update = False
1835 self.strip(repo, [rev], update=update, backup='strip')
1835 self.strip(repo, [rev], update=update, backup='strip')
1836 if qpp:
1836 if qpp:
1837 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1837 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1838 (short(qpp[0]), short(qpp[1])))
1838 (short(qpp[0]), short(qpp[1])))
1839 if qupdate:
1839 if qupdate:
1840 self.ui.status(_("updating queue directory\n"))
1840 self.ui.status(_("updating queue directory\n"))
1841 r = self.qrepo()
1841 r = self.qrepo()
1842 if not r:
1842 if not r:
1843 self.ui.warn(_("unable to load queue repository\n"))
1843 self.ui.warn(_("unable to load queue repository\n"))
1844 return 1
1844 return 1
1845 hg.clean(r, qpp[0])
1845 hg.clean(r, qpp[0])
1846
1846
1847 def save(self, repo, msg=None):
1847 def save(self, repo, msg=None):
1848 if not self.applied:
1848 if not self.applied:
1849 self.ui.warn(_("save: no patches applied, exiting\n"))
1849 self.ui.warn(_("save: no patches applied, exiting\n"))
1850 return 1
1850 return 1
1851 if self.issaveline(self.applied[-1]):
1851 if self.issaveline(self.applied[-1]):
1852 self.ui.warn(_("status is already saved\n"))
1852 self.ui.warn(_("status is already saved\n"))
1853 return 1
1853 return 1
1854
1854
1855 if not msg:
1855 if not msg:
1856 msg = _("hg patches saved state")
1856 msg = _("hg patches saved state")
1857 else:
1857 else:
1858 msg = "hg patches: " + msg.rstrip('\r\n')
1858 msg = "hg patches: " + msg.rstrip('\r\n')
1859 r = self.qrepo()
1859 r = self.qrepo()
1860 if r:
1860 if r:
1861 pp = r.dirstate.parents()
1861 pp = r.dirstate.parents()
1862 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
1862 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
1863 msg += "\n\nPatch Data:\n"
1863 msg += "\n\nPatch Data:\n"
1864 msg += ''.join('%s\n' % x for x in self.applied)
1864 msg += ''.join('%s\n' % x for x in self.applied)
1865 msg += ''.join(':%s\n' % x for x in self.fullseries)
1865 msg += ''.join(':%s\n' % x for x in self.fullseries)
1866 n = repo.commit(msg, force=True)
1866 n = repo.commit(msg, force=True)
1867 if not n:
1867 if not n:
1868 self.ui.warn(_("repo commit failed\n"))
1868 self.ui.warn(_("repo commit failed\n"))
1869 return 1
1869 return 1
1870 self.applied.append(statusentry(n, '.hg.patches.save.line'))
1870 self.applied.append(statusentry(n, '.hg.patches.save.line'))
1871 self.applieddirty = True
1871 self.applieddirty = True
1872 self.removeundo(repo)
1872 self.removeundo(repo)
1873
1873
1874 def fullseriesend(self):
1874 def fullseriesend(self):
1875 if self.applied:
1875 if self.applied:
1876 p = self.applied[-1].name
1876 p = self.applied[-1].name
1877 end = self.findseries(p)
1877 end = self.findseries(p)
1878 if end is None:
1878 if end is None:
1879 return len(self.fullseries)
1879 return len(self.fullseries)
1880 return end + 1
1880 return end + 1
1881 return 0
1881 return 0
1882
1882
1883 def seriesend(self, all_patches=False):
1883 def seriesend(self, all_patches=False):
1884 """If all_patches is False, return the index of the next pushable patch
1884 """If all_patches is False, return the index of the next pushable patch
1885 in the series, or the series length. If all_patches is True, return the
1885 in the series, or the series length. If all_patches is True, return the
1886 index of the first patch past the last applied one.
1886 index of the first patch past the last applied one.
1887 """
1887 """
1888 end = 0
1888 end = 0
1889 def next(start):
1889 def next(start):
1890 if all_patches or start >= len(self.series):
1890 if all_patches or start >= len(self.series):
1891 return start
1891 return start
1892 for i in xrange(start, len(self.series)):
1892 for i in xrange(start, len(self.series)):
1893 p, reason = self.pushable(i)
1893 p, reason = self.pushable(i)
1894 if p:
1894 if p:
1895 return i
1895 return i
1896 self.explainpushable(i)
1896 self.explainpushable(i)
1897 return len(self.series)
1897 return len(self.series)
1898 if self.applied:
1898 if self.applied:
1899 p = self.applied[-1].name
1899 p = self.applied[-1].name
1900 try:
1900 try:
1901 end = self.series.index(p)
1901 end = self.series.index(p)
1902 except ValueError:
1902 except ValueError:
1903 return 0
1903 return 0
1904 return next(end + 1)
1904 return next(end + 1)
1905 return next(end)
1905 return next(end)
1906
1906
1907 def appliedname(self, index):
1907 def appliedname(self, index):
1908 pname = self.applied[index].name
1908 pname = self.applied[index].name
1909 if not self.ui.verbose:
1909 if not self.ui.verbose:
1910 p = pname
1910 p = pname
1911 else:
1911 else:
1912 p = str(self.series.index(pname)) + " " + pname
1912 p = str(self.series.index(pname)) + " " + pname
1913 return p
1913 return p
1914
1914
1915 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1915 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1916 force=None, git=False):
1916 force=None, git=False):
1917 def checkseries(patchname):
1917 def checkseries(patchname):
1918 if patchname in self.series:
1918 if patchname in self.series:
1919 raise util.Abort(_('patch %s is already in the series file')
1919 raise util.Abort(_('patch %s is already in the series file')
1920 % patchname)
1920 % patchname)
1921
1921
1922 if rev:
1922 if rev:
1923 if files:
1923 if files:
1924 raise util.Abort(_('option "-r" not valid when importing '
1924 raise util.Abort(_('option "-r" not valid when importing '
1925 'files'))
1925 'files'))
1926 rev = scmutil.revrange(repo, rev)
1926 rev = scmutil.revrange(repo, rev)
1927 rev.sort(reverse=True)
1927 rev.sort(reverse=True)
1928 elif not files:
1928 elif not files:
1929 raise util.Abort(_('no files or revisions specified'))
1929 raise util.Abort(_('no files or revisions specified'))
1930 if (len(files) > 1 or len(rev) > 1) and patchname:
1930 if (len(files) > 1 or len(rev) > 1) and patchname:
1931 raise util.Abort(_('option "-n" not valid when importing multiple '
1931 raise util.Abort(_('option "-n" not valid when importing multiple '
1932 'patches'))
1932 'patches'))
1933 imported = []
1933 imported = []
1934 if rev:
1934 if rev:
1935 # If mq patches are applied, we can only import revisions
1935 # If mq patches are applied, we can only import revisions
1936 # that form a linear path to qbase.
1936 # that form a linear path to qbase.
1937 # Otherwise, they should form a linear path to a head.
1937 # Otherwise, they should form a linear path to a head.
1938 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1938 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1939 if len(heads) > 1:
1939 if len(heads) > 1:
1940 raise util.Abort(_('revision %d is the root of more than one '
1940 raise util.Abort(_('revision %d is the root of more than one '
1941 'branch') % rev[-1])
1941 'branch') % rev[-1])
1942 if self.applied:
1942 if self.applied:
1943 base = repo.changelog.node(rev[0])
1943 base = repo.changelog.node(rev[0])
1944 if base in [n.node for n in self.applied]:
1944 if base in [n.node for n in self.applied]:
1945 raise util.Abort(_('revision %d is already managed')
1945 raise util.Abort(_('revision %d is already managed')
1946 % rev[0])
1946 % rev[0])
1947 if heads != [self.applied[-1].node]:
1947 if heads != [self.applied[-1].node]:
1948 raise util.Abort(_('revision %d is not the parent of '
1948 raise util.Abort(_('revision %d is not the parent of '
1949 'the queue') % rev[0])
1949 'the queue') % rev[0])
1950 base = repo.changelog.rev(self.applied[0].node)
1950 base = repo.changelog.rev(self.applied[0].node)
1951 lastparent = repo.changelog.parentrevs(base)[0]
1951 lastparent = repo.changelog.parentrevs(base)[0]
1952 else:
1952 else:
1953 if heads != [repo.changelog.node(rev[0])]:
1953 if heads != [repo.changelog.node(rev[0])]:
1954 raise util.Abort(_('revision %d has unmanaged children')
1954 raise util.Abort(_('revision %d has unmanaged children')
1955 % rev[0])
1955 % rev[0])
1956 lastparent = None
1956 lastparent = None
1957
1957
1958 diffopts = self.diffopts({'git': git})
1958 diffopts = self.diffopts({'git': git})
1959 for r in rev:
1959 for r in rev:
1960 if not repo[r].mutable():
1960 if not repo[r].mutable():
1961 raise util.Abort(_('revision %d is not mutable') % r,
1961 raise util.Abort(_('revision %d is not mutable') % r,
1962 hint=_('see "hg help phases" for details'))
1962 hint=_('see "hg help phases" for details'))
1963 p1, p2 = repo.changelog.parentrevs(r)
1963 p1, p2 = repo.changelog.parentrevs(r)
1964 n = repo.changelog.node(r)
1964 n = repo.changelog.node(r)
1965 if p2 != nullrev:
1965 if p2 != nullrev:
1966 raise util.Abort(_('cannot import merge revision %d') % r)
1966 raise util.Abort(_('cannot import merge revision %d') % r)
1967 if lastparent and lastparent != r:
1967 if lastparent and lastparent != r:
1968 raise util.Abort(_('revision %d is not the parent of %d')
1968 raise util.Abort(_('revision %d is not the parent of %d')
1969 % (r, lastparent))
1969 % (r, lastparent))
1970 lastparent = p1
1970 lastparent = p1
1971
1971
1972 if not patchname:
1972 if not patchname:
1973 patchname = normname('%d.diff' % r)
1973 patchname = normname('%d.diff' % r)
1974 checkseries(patchname)
1974 checkseries(patchname)
1975 self.checkpatchname(patchname, force)
1975 self.checkpatchname(patchname, force)
1976 self.fullseries.insert(0, patchname)
1976 self.fullseries.insert(0, patchname)
1977
1977
1978 patchf = self.opener(patchname, "w")
1978 patchf = self.opener(patchname, "w")
1979 cmdutil.export(repo, [n], fp=patchf, opts=diffopts)
1979 cmdutil.export(repo, [n], fp=patchf, opts=diffopts)
1980 patchf.close()
1980 patchf.close()
1981
1981
1982 se = statusentry(n, patchname)
1982 se = statusentry(n, patchname)
1983 self.applied.insert(0, se)
1983 self.applied.insert(0, se)
1984
1984
1985 self.added.append(patchname)
1985 self.added.append(patchname)
1986 imported.append(patchname)
1986 imported.append(patchname)
1987 patchname = None
1987 patchname = None
1988 if rev and repo.ui.configbool('mq', 'secret', False):
1988 if rev and repo.ui.configbool('mq', 'secret', False):
1989 # if we added anything with --rev, we must move the secret root
1989 # if we added anything with --rev, we must move the secret root
1990 phases.retractboundary(repo, phases.secret, [n])
1990 phases.retractboundary(repo, phases.secret, [n])
1991 self.parseseries()
1991 self.parseseries()
1992 self.applieddirty = True
1992 self.applieddirty = True
1993 self.seriesdirty = True
1993 self.seriesdirty = True
1994
1994
1995 for i, filename in enumerate(files):
1995 for i, filename in enumerate(files):
1996 if existing:
1996 if existing:
1997 if filename == '-':
1997 if filename == '-':
1998 raise util.Abort(_('-e is incompatible with import from -'))
1998 raise util.Abort(_('-e is incompatible with import from -'))
1999 filename = normname(filename)
1999 filename = normname(filename)
2000 self.checkreservedname(filename)
2000 self.checkreservedname(filename)
2001 originpath = self.join(filename)
2001 originpath = self.join(filename)
2002 if not os.path.isfile(originpath):
2002 if not os.path.isfile(originpath):
2003 raise util.Abort(_("patch %s does not exist") % filename)
2003 raise util.Abort(_("patch %s does not exist") % filename)
2004
2004
2005 if patchname:
2005 if patchname:
2006 self.checkpatchname(patchname, force)
2006 self.checkpatchname(patchname, force)
2007
2007
2008 self.ui.write(_('renaming %s to %s\n')
2008 self.ui.write(_('renaming %s to %s\n')
2009 % (filename, patchname))
2009 % (filename, patchname))
2010 util.rename(originpath, self.join(patchname))
2010 util.rename(originpath, self.join(patchname))
2011 else:
2011 else:
2012 patchname = filename
2012 patchname = filename
2013
2013
2014 else:
2014 else:
2015 if filename == '-' and not patchname:
2015 if filename == '-' and not patchname:
2016 raise util.Abort(_('need --name to import a patch from -'))
2016 raise util.Abort(_('need --name to import a patch from -'))
2017 elif not patchname:
2017 elif not patchname:
2018 patchname = normname(os.path.basename(filename.rstrip('/')))
2018 patchname = normname(os.path.basename(filename.rstrip('/')))
2019 self.checkpatchname(patchname, force)
2019 self.checkpatchname(patchname, force)
2020 try:
2020 try:
2021 if filename == '-':
2021 if filename == '-':
2022 text = self.ui.fin.read()
2022 text = self.ui.fin.read()
2023 else:
2023 else:
2024 fp = hg.openpath(self.ui, filename)
2024 fp = hg.openpath(self.ui, filename)
2025 text = fp.read()
2025 text = fp.read()
2026 fp.close()
2026 fp.close()
2027 except (OSError, IOError):
2027 except (OSError, IOError):
2028 raise util.Abort(_("unable to read file %s") % filename)
2028 raise util.Abort(_("unable to read file %s") % filename)
2029 patchf = self.opener(patchname, "w")
2029 patchf = self.opener(patchname, "w")
2030 patchf.write(text)
2030 patchf.write(text)
2031 patchf.close()
2031 patchf.close()
2032 if not force:
2032 if not force:
2033 checkseries(patchname)
2033 checkseries(patchname)
2034 if patchname not in self.series:
2034 if patchname not in self.series:
2035 index = self.fullseriesend() + i
2035 index = self.fullseriesend() + i
2036 self.fullseries[index:index] = [patchname]
2036 self.fullseries[index:index] = [patchname]
2037 self.parseseries()
2037 self.parseseries()
2038 self.seriesdirty = True
2038 self.seriesdirty = True
2039 self.ui.warn(_("adding %s to series file\n") % patchname)
2039 self.ui.warn(_("adding %s to series file\n") % patchname)
2040 self.added.append(patchname)
2040 self.added.append(patchname)
2041 imported.append(patchname)
2041 imported.append(patchname)
2042 patchname = None
2042 patchname = None
2043
2043
2044 self.removeundo(repo)
2044 self.removeundo(repo)
2045 return imported
2045 return imported
2046
2046
2047 def fixkeepchangesopts(ui, opts):
2047 def fixkeepchangesopts(ui, opts):
2048 if (not ui.configbool('mq', 'keepchanges') or opts.get('force')
2048 if (not ui.configbool('mq', 'keepchanges') or opts.get('force')
2049 or opts.get('exact')):
2049 or opts.get('exact')):
2050 return opts
2050 return opts
2051 opts = dict(opts)
2051 opts = dict(opts)
2052 opts['keep_changes'] = True
2052 opts['keep_changes'] = True
2053 return opts
2053 return opts
2054
2054
2055 @command("qdelete|qremove|qrm",
2055 @command("qdelete|qremove|qrm",
2056 [('k', 'keep', None, _('keep patch file')),
2056 [('k', 'keep', None, _('keep patch file')),
2057 ('r', 'rev', [],
2057 ('r', 'rev', [],
2058 _('stop managing a revision (DEPRECATED)'), _('REV'))],
2058 _('stop managing a revision (DEPRECATED)'), _('REV'))],
2059 _('hg qdelete [-k] [PATCH]...'))
2059 _('hg qdelete [-k] [PATCH]...'))
2060 def delete(ui, repo, *patches, **opts):
2060 def delete(ui, repo, *patches, **opts):
2061 """remove patches from queue
2061 """remove patches from queue
2062
2062
2063 The patches must not be applied, and at least one patch is required. Exact
2063 The patches must not be applied, and at least one patch is required. Exact
2064 patch identifiers must be given. With -k/--keep, the patch files are
2064 patch identifiers must be given. With -k/--keep, the patch files are
2065 preserved in the patch directory.
2065 preserved in the patch directory.
2066
2066
2067 To stop managing a patch and move it into permanent history,
2067 To stop managing a patch and move it into permanent history,
2068 use the :hg:`qfinish` command."""
2068 use the :hg:`qfinish` command."""
2069 q = repo.mq
2069 q = repo.mq
2070 q.delete(repo, patches, opts)
2070 q.delete(repo, patches, opts)
2071 q.savedirty()
2071 q.savedirty()
2072 return 0
2072 return 0
2073
2073
2074 @command("qapplied",
2074 @command("qapplied",
2075 [('1', 'last', None, _('show only the preceding applied patch'))
2075 [('1', 'last', None, _('show only the preceding applied patch'))
2076 ] + seriesopts,
2076 ] + seriesopts,
2077 _('hg qapplied [-1] [-s] [PATCH]'))
2077 _('hg qapplied [-1] [-s] [PATCH]'))
2078 def applied(ui, repo, patch=None, **opts):
2078 def applied(ui, repo, patch=None, **opts):
2079 """print the patches already applied
2079 """print the patches already applied
2080
2080
2081 Returns 0 on success."""
2081 Returns 0 on success."""
2082
2082
2083 q = repo.mq
2083 q = repo.mq
2084
2084
2085 if patch:
2085 if patch:
2086 if patch not in q.series:
2086 if patch not in q.series:
2087 raise util.Abort(_("patch %s is not in series file") % patch)
2087 raise util.Abort(_("patch %s is not in series file") % patch)
2088 end = q.series.index(patch) + 1
2088 end = q.series.index(patch) + 1
2089 else:
2089 else:
2090 end = q.seriesend(True)
2090 end = q.seriesend(True)
2091
2091
2092 if opts.get('last') and not end:
2092 if opts.get('last') and not end:
2093 ui.write(_("no patches applied\n"))
2093 ui.write(_("no patches applied\n"))
2094 return 1
2094 return 1
2095 elif opts.get('last') and end == 1:
2095 elif opts.get('last') and end == 1:
2096 ui.write(_("only one patch applied\n"))
2096 ui.write(_("only one patch applied\n"))
2097 return 1
2097 return 1
2098 elif opts.get('last'):
2098 elif opts.get('last'):
2099 start = end - 2
2099 start = end - 2
2100 end = 1
2100 end = 1
2101 else:
2101 else:
2102 start = 0
2102 start = 0
2103
2103
2104 q.qseries(repo, length=end, start=start, status='A',
2104 q.qseries(repo, length=end, start=start, status='A',
2105 summary=opts.get('summary'))
2105 summary=opts.get('summary'))
2106
2106
2107
2107
2108 @command("qunapplied",
2108 @command("qunapplied",
2109 [('1', 'first', None, _('show only the first patch'))] + seriesopts,
2109 [('1', 'first', None, _('show only the first patch'))] + seriesopts,
2110 _('hg qunapplied [-1] [-s] [PATCH]'))
2110 _('hg qunapplied [-1] [-s] [PATCH]'))
2111 def unapplied(ui, repo, patch=None, **opts):
2111 def unapplied(ui, repo, patch=None, **opts):
2112 """print the patches not yet applied
2112 """print the patches not yet applied
2113
2113
2114 Returns 0 on success."""
2114 Returns 0 on success."""
2115
2115
2116 q = repo.mq
2116 q = repo.mq
2117 if patch:
2117 if patch:
2118 if patch not in q.series:
2118 if patch not in q.series:
2119 raise util.Abort(_("patch %s is not in series file") % patch)
2119 raise util.Abort(_("patch %s is not in series file") % patch)
2120 start = q.series.index(patch) + 1
2120 start = q.series.index(patch) + 1
2121 else:
2121 else:
2122 start = q.seriesend(True)
2122 start = q.seriesend(True)
2123
2123
2124 if start == len(q.series) and opts.get('first'):
2124 if start == len(q.series) and opts.get('first'):
2125 ui.write(_("all patches applied\n"))
2125 ui.write(_("all patches applied\n"))
2126 return 1
2126 return 1
2127
2127
2128 length = opts.get('first') and 1 or None
2128 length = opts.get('first') and 1 or None
2129 q.qseries(repo, start=start, length=length, status='U',
2129 q.qseries(repo, start=start, length=length, status='U',
2130 summary=opts.get('summary'))
2130 summary=opts.get('summary'))
2131
2131
2132 @command("qimport",
2132 @command("qimport",
2133 [('e', 'existing', None, _('import file in patch directory')),
2133 [('e', 'existing', None, _('import file in patch directory')),
2134 ('n', 'name', '',
2134 ('n', 'name', '',
2135 _('name of patch file'), _('NAME')),
2135 _('name of patch file'), _('NAME')),
2136 ('f', 'force', None, _('overwrite existing files')),
2136 ('f', 'force', None, _('overwrite existing files')),
2137 ('r', 'rev', [],
2137 ('r', 'rev', [],
2138 _('place existing revisions under mq control'), _('REV')),
2138 _('place existing revisions under mq control'), _('REV')),
2139 ('g', 'git', None, _('use git extended diff format')),
2139 ('g', 'git', None, _('use git extended diff format')),
2140 ('P', 'push', None, _('qpush after importing'))],
2140 ('P', 'push', None, _('qpush after importing'))],
2141 _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... [FILE]...'))
2141 _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... [FILE]...'))
2142 def qimport(ui, repo, *filename, **opts):
2142 def qimport(ui, repo, *filename, **opts):
2143 """import a patch or existing changeset
2143 """import a patch or existing changeset
2144
2144
2145 The patch is inserted into the series after the last applied
2145 The patch is inserted into the series after the last applied
2146 patch. If no patches have been applied, qimport prepends the patch
2146 patch. If no patches have been applied, qimport prepends the patch
2147 to the series.
2147 to the series.
2148
2148
2149 The patch will have the same name as its source file unless you
2149 The patch will have the same name as its source file unless you
2150 give it a new one with -n/--name.
2150 give it a new one with -n/--name.
2151
2151
2152 You can register an existing patch inside the patch directory with
2152 You can register an existing patch inside the patch directory with
2153 the -e/--existing flag.
2153 the -e/--existing flag.
2154
2154
2155 With -f/--force, an existing patch of the same name will be
2155 With -f/--force, an existing patch of the same name will be
2156 overwritten.
2156 overwritten.
2157
2157
2158 An existing changeset may be placed under mq control with -r/--rev
2158 An existing changeset may be placed under mq control with -r/--rev
2159 (e.g. qimport --rev tip -n patch will place tip under mq control).
2159 (e.g. qimport --rev tip -n patch will place tip under mq control).
2160 With -g/--git, patches imported with --rev will use the git diff
2160 With -g/--git, patches imported with --rev will use the git diff
2161 format. See the diffs help topic for information on why this is
2161 format. See the diffs help topic for information on why this is
2162 important for preserving rename/copy information and permission
2162 important for preserving rename/copy information and permission
2163 changes. Use :hg:`qfinish` to remove changesets from mq control.
2163 changes. Use :hg:`qfinish` to remove changesets from mq control.
2164
2164
2165 To import a patch from standard input, pass - as the patch file.
2165 To import a patch from standard input, pass - as the patch file.
2166 When importing from standard input, a patch name must be specified
2166 When importing from standard input, a patch name must be specified
2167 using the --name flag.
2167 using the --name flag.
2168
2168
2169 To import an existing patch while renaming it::
2169 To import an existing patch while renaming it::
2170
2170
2171 hg qimport -e existing-patch -n new-name
2171 hg qimport -e existing-patch -n new-name
2172
2172
2173 Returns 0 if import succeeded.
2173 Returns 0 if import succeeded.
2174 """
2174 """
2175 lock = repo.lock() # cause this may move phase
2175 lock = repo.lock() # cause this may move phase
2176 try:
2176 try:
2177 q = repo.mq
2177 q = repo.mq
2178 try:
2178 try:
2179 imported = q.qimport(
2179 imported = q.qimport(
2180 repo, filename, patchname=opts.get('name'),
2180 repo, filename, patchname=opts.get('name'),
2181 existing=opts.get('existing'), force=opts.get('force'),
2181 existing=opts.get('existing'), force=opts.get('force'),
2182 rev=opts.get('rev'), git=opts.get('git'))
2182 rev=opts.get('rev'), git=opts.get('git'))
2183 finally:
2183 finally:
2184 q.savedirty()
2184 q.savedirty()
2185 finally:
2185 finally:
2186 lock.release()
2186 lock.release()
2187
2187
2188 if imported and opts.get('push') and not opts.get('rev'):
2188 if imported and opts.get('push') and not opts.get('rev'):
2189 return q.push(repo, imported[-1])
2189 return q.push(repo, imported[-1])
2190 return 0
2190 return 0
2191
2191
2192 def qinit(ui, repo, create):
2192 def qinit(ui, repo, create):
2193 """initialize a new queue repository
2193 """initialize a new queue repository
2194
2194
2195 This command also creates a series file for ordering patches, and
2195 This command also creates a series file for ordering patches, and
2196 an mq-specific .hgignore file in the queue repository, to exclude
2196 an mq-specific .hgignore file in the queue repository, to exclude
2197 the status and guards files (these contain mostly transient state).
2197 the status and guards files (these contain mostly transient state).
2198
2198
2199 Returns 0 if initialization succeeded."""
2199 Returns 0 if initialization succeeded."""
2200 q = repo.mq
2200 q = repo.mq
2201 r = q.init(repo, create)
2201 r = q.init(repo, create)
2202 q.savedirty()
2202 q.savedirty()
2203 if r:
2203 if r:
2204 if not os.path.exists(r.wjoin('.hgignore')):
2204 if not os.path.exists(r.wjoin('.hgignore')):
2205 fp = r.wopener('.hgignore', 'w')
2205 fp = r.wopener('.hgignore', 'w')
2206 fp.write('^\\.hg\n')
2206 fp.write('^\\.hg\n')
2207 fp.write('^\\.mq\n')
2207 fp.write('^\\.mq\n')
2208 fp.write('syntax: glob\n')
2208 fp.write('syntax: glob\n')
2209 fp.write('status\n')
2209 fp.write('status\n')
2210 fp.write('guards\n')
2210 fp.write('guards\n')
2211 fp.close()
2211 fp.close()
2212 if not os.path.exists(r.wjoin('series')):
2212 if not os.path.exists(r.wjoin('series')):
2213 r.wopener('series', 'w').close()
2213 r.wopener('series', 'w').close()
2214 r[None].add(['.hgignore', 'series'])
2214 r[None].add(['.hgignore', 'series'])
2215 commands.add(ui, r)
2215 commands.add(ui, r)
2216 return 0
2216 return 0
2217
2217
2218 @command("^qinit",
2218 @command("^qinit",
2219 [('c', 'create-repo', None, _('create queue repository'))],
2219 [('c', 'create-repo', None, _('create queue repository'))],
2220 _('hg qinit [-c]'))
2220 _('hg qinit [-c]'))
2221 def init(ui, repo, **opts):
2221 def init(ui, repo, **opts):
2222 """init a new queue repository (DEPRECATED)
2222 """init a new queue repository (DEPRECATED)
2223
2223
2224 The queue repository is unversioned by default. If
2224 The queue repository is unversioned by default. If
2225 -c/--create-repo is specified, qinit will create a separate nested
2225 -c/--create-repo is specified, qinit will create a separate nested
2226 repository for patches (qinit -c may also be run later to convert
2226 repository for patches (qinit -c may also be run later to convert
2227 an unversioned patch repository into a versioned one). You can use
2227 an unversioned patch repository into a versioned one). You can use
2228 qcommit to commit changes to this queue repository.
2228 qcommit to commit changes to this queue repository.
2229
2229
2230 This command is deprecated. Without -c, it's implied by other relevant
2230 This command is deprecated. Without -c, it's implied by other relevant
2231 commands. With -c, use :hg:`init --mq` instead."""
2231 commands. With -c, use :hg:`init --mq` instead."""
2232 return qinit(ui, repo, create=opts.get('create_repo'))
2232 return qinit(ui, repo, create=opts.get('create_repo'))
2233
2233
2234 @command("qclone",
2234 @command("qclone",
2235 [('', 'pull', None, _('use pull protocol to copy metadata')),
2235 [('', 'pull', None, _('use pull protocol to copy metadata')),
2236 ('U', 'noupdate', None,
2236 ('U', 'noupdate', None,
2237 _('do not update the new working directories')),
2237 _('do not update the new working directories')),
2238 ('', 'uncompressed', None,
2238 ('', 'uncompressed', None,
2239 _('use uncompressed transfer (fast over LAN)')),
2239 _('use uncompressed transfer (fast over LAN)')),
2240 ('p', 'patches', '',
2240 ('p', 'patches', '',
2241 _('location of source patch repository'), _('REPO')),
2241 _('location of source patch repository'), _('REPO')),
2242 ] + commands.remoteopts,
2242 ] + commands.remoteopts,
2243 _('hg qclone [OPTION]... SOURCE [DEST]'))
2243 _('hg qclone [OPTION]... SOURCE [DEST]'))
2244 def clone(ui, source, dest=None, **opts):
2244 def clone(ui, source, dest=None, **opts):
2245 '''clone main and patch repository at same time
2245 '''clone main and patch repository at same time
2246
2246
2247 If source is local, destination will have no patches applied. If
2247 If source is local, destination will have no patches applied. If
2248 source is remote, this command can not check if patches are
2248 source is remote, this command can not check if patches are
2249 applied in source, so cannot guarantee that patches are not
2249 applied in source, so cannot guarantee that patches are not
2250 applied in destination. If you clone remote repository, be sure
2250 applied in destination. If you clone remote repository, be sure
2251 before that it has no patches applied.
2251 before that it has no patches applied.
2252
2252
2253 Source patch repository is looked for in <src>/.hg/patches by
2253 Source patch repository is looked for in <src>/.hg/patches by
2254 default. Use -p <url> to change.
2254 default. Use -p <url> to change.
2255
2255
2256 The patch directory must be a nested Mercurial repository, as
2256 The patch directory must be a nested Mercurial repository, as
2257 would be created by :hg:`init --mq`.
2257 would be created by :hg:`init --mq`.
2258
2258
2259 Return 0 on success.
2259 Return 0 on success.
2260 '''
2260 '''
2261 def patchdir(repo):
2261 def patchdir(repo):
2262 """compute a patch repo url from a repo object"""
2262 """compute a patch repo url from a repo object"""
2263 url = repo.url()
2263 url = repo.url()
2264 if url.endswith('/'):
2264 if url.endswith('/'):
2265 url = url[:-1]
2265 url = url[:-1]
2266 return url + '/.hg/patches'
2266 return url + '/.hg/patches'
2267
2267
2268 # main repo (destination and sources)
2268 # main repo (destination and sources)
2269 if dest is None:
2269 if dest is None:
2270 dest = hg.defaultdest(source)
2270 dest = hg.defaultdest(source)
2271 sr = hg.peer(ui, opts, ui.expandpath(source))
2271 sr = hg.peer(ui, opts, ui.expandpath(source))
2272
2272
2273 # patches repo (source only)
2273 # patches repo (source only)
2274 if opts.get('patches'):
2274 if opts.get('patches'):
2275 patchespath = ui.expandpath(opts.get('patches'))
2275 patchespath = ui.expandpath(opts.get('patches'))
2276 else:
2276 else:
2277 patchespath = patchdir(sr)
2277 patchespath = patchdir(sr)
2278 try:
2278 try:
2279 hg.peer(ui, opts, patchespath)
2279 hg.peer(ui, opts, patchespath)
2280 except error.RepoError:
2280 except error.RepoError:
2281 raise util.Abort(_('versioned patch repository not found'
2281 raise util.Abort(_('versioned patch repository not found'
2282 ' (see init --mq)'))
2282 ' (see init --mq)'))
2283 qbase, destrev = None, None
2283 qbase, destrev = None, None
2284 if sr.local():
2284 if sr.local():
2285 repo = sr.local()
2285 repo = sr.local()
2286 if repo.mq.applied and repo[qbase].phase() != phases.secret:
2286 if repo.mq.applied and repo[qbase].phase() != phases.secret:
2287 qbase = repo.mq.applied[0].node
2287 qbase = repo.mq.applied[0].node
2288 if not hg.islocal(dest):
2288 if not hg.islocal(dest):
2289 heads = set(repo.heads())
2289 heads = set(repo.heads())
2290 destrev = list(heads.difference(repo.heads(qbase)))
2290 destrev = list(heads.difference(repo.heads(qbase)))
2291 destrev.append(repo.changelog.parents(qbase)[0])
2291 destrev.append(repo.changelog.parents(qbase)[0])
2292 elif sr.capable('lookup'):
2292 elif sr.capable('lookup'):
2293 try:
2293 try:
2294 qbase = sr.lookup('qbase')
2294 qbase = sr.lookup('qbase')
2295 except error.RepoError:
2295 except error.RepoError:
2296 pass
2296 pass
2297
2297
2298 ui.note(_('cloning main repository\n'))
2298 ui.note(_('cloning main repository\n'))
2299 sr, dr = hg.clone(ui, opts, sr.url(), dest,
2299 sr, dr = hg.clone(ui, opts, sr.url(), dest,
2300 pull=opts.get('pull'),
2300 pull=opts.get('pull'),
2301 rev=destrev,
2301 rev=destrev,
2302 update=False,
2302 update=False,
2303 stream=opts.get('uncompressed'))
2303 stream=opts.get('uncompressed'))
2304
2304
2305 ui.note(_('cloning patch repository\n'))
2305 ui.note(_('cloning patch repository\n'))
2306 hg.clone(ui, opts, opts.get('patches') or patchdir(sr), patchdir(dr),
2306 hg.clone(ui, opts, opts.get('patches') or patchdir(sr), patchdir(dr),
2307 pull=opts.get('pull'), update=not opts.get('noupdate'),
2307 pull=opts.get('pull'), update=not opts.get('noupdate'),
2308 stream=opts.get('uncompressed'))
2308 stream=opts.get('uncompressed'))
2309
2309
2310 if dr.local():
2310 if dr.local():
2311 repo = dr.local()
2311 repo = dr.local()
2312 if qbase:
2312 if qbase:
2313 ui.note(_('stripping applied patches from destination '
2313 ui.note(_('stripping applied patches from destination '
2314 'repository\n'))
2314 'repository\n'))
2315 repo.mq.strip(repo, [qbase], update=False, backup=None)
2315 repo.mq.strip(repo, [qbase], update=False, backup=None)
2316 if not opts.get('noupdate'):
2316 if not opts.get('noupdate'):
2317 ui.note(_('updating destination repository\n'))
2317 ui.note(_('updating destination repository\n'))
2318 hg.update(repo, repo.changelog.tip())
2318 hg.update(repo, repo.changelog.tip())
2319
2319
2320 @command("qcommit|qci",
2320 @command("qcommit|qci",
2321 commands.table["^commit|ci"][1],
2321 commands.table["^commit|ci"][1],
2322 _('hg qcommit [OPTION]... [FILE]...'))
2322 _('hg qcommit [OPTION]... [FILE]...'))
2323 def commit(ui, repo, *pats, **opts):
2323 def commit(ui, repo, *pats, **opts):
2324 """commit changes in the queue repository (DEPRECATED)
2324 """commit changes in the queue repository (DEPRECATED)
2325
2325
2326 This command is deprecated; use :hg:`commit --mq` instead."""
2326 This command is deprecated; use :hg:`commit --mq` instead."""
2327 q = repo.mq
2327 q = repo.mq
2328 r = q.qrepo()
2328 r = q.qrepo()
2329 if not r:
2329 if not r:
2330 raise util.Abort('no queue repository')
2330 raise util.Abort('no queue repository')
2331 commands.commit(r.ui, r, *pats, **opts)
2331 commands.commit(r.ui, r, *pats, **opts)
2332
2332
2333 @command("qseries",
2333 @command("qseries",
2334 [('m', 'missing', None, _('print patches not in series')),
2334 [('m', 'missing', None, _('print patches not in series')),
2335 ] + seriesopts,
2335 ] + seriesopts,
2336 _('hg qseries [-ms]'))
2336 _('hg qseries [-ms]'))
2337 def series(ui, repo, **opts):
2337 def series(ui, repo, **opts):
2338 """print the entire series file
2338 """print the entire series file
2339
2339
2340 Returns 0 on success."""
2340 Returns 0 on success."""
2341 repo.mq.qseries(repo, missing=opts.get('missing'),
2341 repo.mq.qseries(repo, missing=opts.get('missing'),
2342 summary=opts.get('summary'))
2342 summary=opts.get('summary'))
2343 return 0
2343 return 0
2344
2344
2345 @command("qtop", seriesopts, _('hg qtop [-s]'))
2345 @command("qtop", seriesopts, _('hg qtop [-s]'))
2346 def top(ui, repo, **opts):
2346 def top(ui, repo, **opts):
2347 """print the name of the current patch
2347 """print the name of the current patch
2348
2348
2349 Returns 0 on success."""
2349 Returns 0 on success."""
2350 q = repo.mq
2350 q = repo.mq
2351 t = q.applied and q.seriesend(True) or 0
2351 t = q.applied and q.seriesend(True) or 0
2352 if t:
2352 if t:
2353 q.qseries(repo, start=t - 1, length=1, status='A',
2353 q.qseries(repo, start=t - 1, length=1, status='A',
2354 summary=opts.get('summary'))
2354 summary=opts.get('summary'))
2355 else:
2355 else:
2356 ui.write(_("no patches applied\n"))
2356 ui.write(_("no patches applied\n"))
2357 return 1
2357 return 1
2358
2358
2359 @command("qnext", seriesopts, _('hg qnext [-s]'))
2359 @command("qnext", seriesopts, _('hg qnext [-s]'))
2360 def next(ui, repo, **opts):
2360 def next(ui, repo, **opts):
2361 """print the name of the next pushable patch
2361 """print the name of the next pushable patch
2362
2362
2363 Returns 0 on success."""
2363 Returns 0 on success."""
2364 q = repo.mq
2364 q = repo.mq
2365 end = q.seriesend()
2365 end = q.seriesend()
2366 if end == len(q.series):
2366 if end == len(q.series):
2367 ui.write(_("all patches applied\n"))
2367 ui.write(_("all patches applied\n"))
2368 return 1
2368 return 1
2369 q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
2369 q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
2370
2370
2371 @command("qprev", seriesopts, _('hg qprev [-s]'))
2371 @command("qprev", seriesopts, _('hg qprev [-s]'))
2372 def prev(ui, repo, **opts):
2372 def prev(ui, repo, **opts):
2373 """print the name of the preceding applied patch
2373 """print the name of the preceding applied patch
2374
2374
2375 Returns 0 on success."""
2375 Returns 0 on success."""
2376 q = repo.mq
2376 q = repo.mq
2377 l = len(q.applied)
2377 l = len(q.applied)
2378 if l == 1:
2378 if l == 1:
2379 ui.write(_("only one patch applied\n"))
2379 ui.write(_("only one patch applied\n"))
2380 return 1
2380 return 1
2381 if not l:
2381 if not l:
2382 ui.write(_("no patches applied\n"))
2382 ui.write(_("no patches applied\n"))
2383 return 1
2383 return 1
2384 idx = q.series.index(q.applied[-2].name)
2384 idx = q.series.index(q.applied[-2].name)
2385 q.qseries(repo, start=idx, length=1, status='A',
2385 q.qseries(repo, start=idx, length=1, status='A',
2386 summary=opts.get('summary'))
2386 summary=opts.get('summary'))
2387
2387
2388 def setupheaderopts(ui, opts):
2388 def setupheaderopts(ui, opts):
2389 if not opts.get('user') and opts.get('currentuser'):
2389 if not opts.get('user') and opts.get('currentuser'):
2390 opts['user'] = ui.username()
2390 opts['user'] = ui.username()
2391 if not opts.get('date') and opts.get('currentdate'):
2391 if not opts.get('date') and opts.get('currentdate'):
2392 opts['date'] = "%d %d" % util.makedate()
2392 opts['date'] = "%d %d" % util.makedate()
2393
2393
2394 @command("^qnew",
2394 @command("^qnew",
2395 [('e', 'edit', None, _('edit commit message')),
2395 [('e', 'edit', None, _('edit commit message')),
2396 ('f', 'force', None, _('import uncommitted changes (DEPRECATED)')),
2396 ('f', 'force', None, _('import uncommitted changes (DEPRECATED)')),
2397 ('g', 'git', None, _('use git extended diff format')),
2397 ('g', 'git', None, _('use git extended diff format')),
2398 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2398 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2399 ('u', 'user', '',
2399 ('u', 'user', '',
2400 _('add "From: <USER>" to patch'), _('USER')),
2400 _('add "From: <USER>" to patch'), _('USER')),
2401 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2401 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2402 ('d', 'date', '',
2402 ('d', 'date', '',
2403 _('add "Date: <DATE>" to patch'), _('DATE'))
2403 _('add "Date: <DATE>" to patch'), _('DATE'))
2404 ] + commands.walkopts + commands.commitopts,
2404 ] + commands.walkopts + commands.commitopts,
2405 _('hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...'))
2405 _('hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...'))
2406 def new(ui, repo, patch, *args, **opts):
2406 def new(ui, repo, patch, *args, **opts):
2407 """create a new patch
2407 """create a new patch
2408
2408
2409 qnew creates a new patch on top of the currently-applied patch (if
2409 qnew creates a new patch on top of the currently-applied patch (if
2410 any). The patch will be initialized with any outstanding changes
2410 any). The patch will be initialized with any outstanding changes
2411 in the working directory. You may also use -I/--include,
2411 in the working directory. You may also use -I/--include,
2412 -X/--exclude, and/or a list of files after the patch name to add
2412 -X/--exclude, and/or a list of files after the patch name to add
2413 only changes to matching files to the new patch, leaving the rest
2413 only changes to matching files to the new patch, leaving the rest
2414 as uncommitted modifications.
2414 as uncommitted modifications.
2415
2415
2416 -u/--user and -d/--date can be used to set the (given) user and
2416 -u/--user and -d/--date can be used to set the (given) user and
2417 date, respectively. -U/--currentuser and -D/--currentdate set user
2417 date, respectively. -U/--currentuser and -D/--currentdate set user
2418 to current user and date to current date.
2418 to current user and date to current date.
2419
2419
2420 -e/--edit, -m/--message or -l/--logfile set the patch header as
2420 -e/--edit, -m/--message or -l/--logfile set the patch header as
2421 well as the commit message. If none is specified, the header is
2421 well as the commit message. If none is specified, the header is
2422 empty and the commit message is '[mq]: PATCH'.
2422 empty and the commit message is '[mq]: PATCH'.
2423
2423
2424 Use the -g/--git option to keep the patch in the git extended diff
2424 Use the -g/--git option to keep the patch in the git extended diff
2425 format. Read the diffs help topic for more information on why this
2425 format. Read the diffs help topic for more information on why this
2426 is important for preserving permission changes and copy/rename
2426 is important for preserving permission changes and copy/rename
2427 information.
2427 information.
2428
2428
2429 Returns 0 on successful creation of a new patch.
2429 Returns 0 on successful creation of a new patch.
2430 """
2430 """
2431 msg = cmdutil.logmessage(ui, opts)
2431 msg = cmdutil.logmessage(ui, opts)
2432 def getmsg():
2432 def getmsg():
2433 return ui.edit(msg, opts.get('user') or ui.username())
2433 return ui.edit(msg, opts.get('user') or ui.username())
2434 q = repo.mq
2434 q = repo.mq
2435 opts['msg'] = msg
2435 opts['msg'] = msg
2436 if opts.get('edit'):
2436 if opts.get('edit'):
2437 opts['msg'] = getmsg
2437 opts['msg'] = getmsg
2438 else:
2438 else:
2439 opts['msg'] = msg
2439 opts['msg'] = msg
2440 setupheaderopts(ui, opts)
2440 setupheaderopts(ui, opts)
2441 q.new(repo, patch, *args, **opts)
2441 q.new(repo, patch, *args, **opts)
2442 q.savedirty()
2442 q.savedirty()
2443 return 0
2443 return 0
2444
2444
2445 @command("^qrefresh",
2445 @command("^qrefresh",
2446 [('e', 'edit', None, _('edit commit message')),
2446 [('e', 'edit', None, _('edit commit message')),
2447 ('g', 'git', None, _('use git extended diff format')),
2447 ('g', 'git', None, _('use git extended diff format')),
2448 ('s', 'short', None,
2448 ('s', 'short', None,
2449 _('refresh only files already in the patch and specified files')),
2449 _('refresh only files already in the patch and specified files')),
2450 ('U', 'currentuser', None,
2450 ('U', 'currentuser', None,
2451 _('add/update author field in patch with current user')),
2451 _('add/update author field in patch with current user')),
2452 ('u', 'user', '',
2452 ('u', 'user', '',
2453 _('add/update author field in patch with given user'), _('USER')),
2453 _('add/update author field in patch with given user'), _('USER')),
2454 ('D', 'currentdate', None,
2454 ('D', 'currentdate', None,
2455 _('add/update date field in patch with current date')),
2455 _('add/update date field in patch with current date')),
2456 ('d', 'date', '',
2456 ('d', 'date', '',
2457 _('add/update date field in patch with given date'), _('DATE'))
2457 _('add/update date field in patch with given date'), _('DATE'))
2458 ] + commands.walkopts + commands.commitopts,
2458 ] + commands.walkopts + commands.commitopts,
2459 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...'))
2459 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...'))
2460 def refresh(ui, repo, *pats, **opts):
2460 def refresh(ui, repo, *pats, **opts):
2461 """update the current patch
2461 """update the current patch
2462
2462
2463 If any file patterns are provided, the refreshed patch will
2463 If any file patterns are provided, the refreshed patch will
2464 contain only the modifications that match those patterns; the
2464 contain only the modifications that match those patterns; the
2465 remaining modifications will remain in the working directory.
2465 remaining modifications will remain in the working directory.
2466
2466
2467 If -s/--short is specified, files currently included in the patch
2467 If -s/--short is specified, files currently included in the patch
2468 will be refreshed just like matched files and remain in the patch.
2468 will be refreshed just like matched files and remain in the patch.
2469
2469
2470 If -e/--edit is specified, Mercurial will start your configured editor for
2470 If -e/--edit is specified, Mercurial will start your configured editor for
2471 you to enter a message. In case qrefresh fails, you will find a backup of
2471 you to enter a message. In case qrefresh fails, you will find a backup of
2472 your message in ``.hg/last-message.txt``.
2472 your message in ``.hg/last-message.txt``.
2473
2473
2474 hg add/remove/copy/rename work as usual, though you might want to
2474 hg add/remove/copy/rename work as usual, though you might want to
2475 use git-style patches (-g/--git or [diff] git=1) to track copies
2475 use git-style patches (-g/--git or [diff] git=1) to track copies
2476 and renames. See the diffs help topic for more information on the
2476 and renames. See the diffs help topic for more information on the
2477 git diff format.
2477 git diff format.
2478
2478
2479 Returns 0 on success.
2479 Returns 0 on success.
2480 """
2480 """
2481 q = repo.mq
2481 q = repo.mq
2482 message = cmdutil.logmessage(ui, opts)
2482 message = cmdutil.logmessage(ui, opts)
2483 if opts.get('edit'):
2483 if opts.get('edit'):
2484 if not q.applied:
2484 if not q.applied:
2485 ui.write(_("no patches applied\n"))
2485 ui.write(_("no patches applied\n"))
2486 return 1
2486 return 1
2487 if message:
2487 if message:
2488 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2488 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2489 patch = q.applied[-1].name
2489 patch = q.applied[-1].name
2490 ph = patchheader(q.join(patch), q.plainmode)
2490 ph = patchheader(q.join(patch), q.plainmode)
2491 message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
2491 message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
2492 # We don't want to lose the patch message if qrefresh fails (issue2062)
2492 # We don't want to lose the patch message if qrefresh fails (issue2062)
2493 repo.savecommitmessage(message)
2493 repo.savecommitmessage(message)
2494 setupheaderopts(ui, opts)
2494 setupheaderopts(ui, opts)
2495 wlock = repo.wlock()
2495 wlock = repo.wlock()
2496 try:
2496 try:
2497 ret = q.refresh(repo, pats, msg=message, **opts)
2497 ret = q.refresh(repo, pats, msg=message, **opts)
2498 q.savedirty()
2498 q.savedirty()
2499 return ret
2499 return ret
2500 finally:
2500 finally:
2501 wlock.release()
2501 wlock.release()
2502
2502
2503 @command("^qdiff",
2503 @command("^qdiff",
2504 commands.diffopts + commands.diffopts2 + commands.walkopts,
2504 commands.diffopts + commands.diffopts2 + commands.walkopts,
2505 _('hg qdiff [OPTION]... [FILE]...'))
2505 _('hg qdiff [OPTION]... [FILE]...'))
2506 def diff(ui, repo, *pats, **opts):
2506 def diff(ui, repo, *pats, **opts):
2507 """diff of the current patch and subsequent modifications
2507 """diff of the current patch and subsequent modifications
2508
2508
2509 Shows a diff which includes the current patch as well as any
2509 Shows a diff which includes the current patch as well as any
2510 changes which have been made in the working directory since the
2510 changes which have been made in the working directory since the
2511 last refresh (thus showing what the current patch would become
2511 last refresh (thus showing what the current patch would become
2512 after a qrefresh).
2512 after a qrefresh).
2513
2513
2514 Use :hg:`diff` if you only want to see the changes made since the
2514 Use :hg:`diff` if you only want to see the changes made since the
2515 last qrefresh, or :hg:`export qtip` if you want to see changes
2515 last qrefresh, or :hg:`export qtip` if you want to see changes
2516 made by the current patch without including changes made since the
2516 made by the current patch without including changes made since the
2517 qrefresh.
2517 qrefresh.
2518
2518
2519 Returns 0 on success.
2519 Returns 0 on success.
2520 """
2520 """
2521 repo.mq.diff(repo, pats, opts)
2521 repo.mq.diff(repo, pats, opts)
2522 return 0
2522 return 0
2523
2523
2524 @command('qfold',
2524 @command('qfold',
2525 [('e', 'edit', None, _('edit patch header')),
2525 [('e', 'edit', None, _('edit patch header')),
2526 ('k', 'keep', None, _('keep folded patch files')),
2526 ('k', 'keep', None, _('keep folded patch files')),
2527 ] + commands.commitopts,
2527 ] + commands.commitopts,
2528 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...'))
2528 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...'))
2529 def fold(ui, repo, *files, **opts):
2529 def fold(ui, repo, *files, **opts):
2530 """fold the named patches into the current patch
2530 """fold the named patches into the current patch
2531
2531
2532 Patches must not yet be applied. Each patch will be successively
2532 Patches must not yet be applied. Each patch will be successively
2533 applied to the current patch in the order given. If all the
2533 applied to the current patch in the order given. If all the
2534 patches apply successfully, the current patch will be refreshed
2534 patches apply successfully, the current patch will be refreshed
2535 with the new cumulative patch, and the folded patches will be
2535 with the new cumulative patch, and the folded patches will be
2536 deleted. With -k/--keep, the folded patch files will not be
2536 deleted. With -k/--keep, the folded patch files will not be
2537 removed afterwards.
2537 removed afterwards.
2538
2538
2539 The header for each folded patch will be concatenated with the
2539 The header for each folded patch will be concatenated with the
2540 current patch header, separated by a line of ``* * *``.
2540 current patch header, separated by a line of ``* * *``.
2541
2541
2542 Returns 0 on success."""
2542 Returns 0 on success."""
2543 q = repo.mq
2543 q = repo.mq
2544 if not files:
2544 if not files:
2545 raise util.Abort(_('qfold requires at least one patch name'))
2545 raise util.Abort(_('qfold requires at least one patch name'))
2546 if not q.checktoppatch(repo)[0]:
2546 if not q.checktoppatch(repo)[0]:
2547 raise util.Abort(_('no patches applied'))
2547 raise util.Abort(_('no patches applied'))
2548 q.checklocalchanges(repo)
2548 q.checklocalchanges(repo)
2549
2549
2550 message = cmdutil.logmessage(ui, opts)
2550 message = cmdutil.logmessage(ui, opts)
2551 if opts.get('edit'):
2551 if opts.get('edit'):
2552 if message:
2552 if message:
2553 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2553 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2554
2554
2555 parent = q.lookup('qtip')
2555 parent = q.lookup('qtip')
2556 patches = []
2556 patches = []
2557 messages = []
2557 messages = []
2558 for f in files:
2558 for f in files:
2559 p = q.lookup(f)
2559 p = q.lookup(f)
2560 if p in patches or p == parent:
2560 if p in patches or p == parent:
2561 ui.warn(_('skipping already folded patch %s\n') % p)
2561 ui.warn(_('skipping already folded patch %s\n') % p)
2562 if q.isapplied(p):
2562 if q.isapplied(p):
2563 raise util.Abort(_('qfold cannot fold already applied patch %s')
2563 raise util.Abort(_('qfold cannot fold already applied patch %s')
2564 % p)
2564 % p)
2565 patches.append(p)
2565 patches.append(p)
2566
2566
2567 for p in patches:
2567 for p in patches:
2568 if not message:
2568 if not message:
2569 ph = patchheader(q.join(p), q.plainmode)
2569 ph = patchheader(q.join(p), q.plainmode)
2570 if ph.message:
2570 if ph.message:
2571 messages.append(ph.message)
2571 messages.append(ph.message)
2572 pf = q.join(p)
2572 pf = q.join(p)
2573 (patchsuccess, files, fuzz) = q.patch(repo, pf)
2573 (patchsuccess, files, fuzz) = q.patch(repo, pf)
2574 if not patchsuccess:
2574 if not patchsuccess:
2575 raise util.Abort(_('error folding patch %s') % p)
2575 raise util.Abort(_('error folding patch %s') % p)
2576
2576
2577 if not message:
2577 if not message:
2578 ph = patchheader(q.join(parent), q.plainmode)
2578 ph = patchheader(q.join(parent), q.plainmode)
2579 message, user = ph.message, ph.user
2579 message, user = ph.message, ph.user
2580 for msg in messages:
2580 for msg in messages:
2581 message.append('* * *')
2581 message.append('* * *')
2582 message.extend(msg)
2582 message.extend(msg)
2583 message = '\n'.join(message)
2583 message = '\n'.join(message)
2584
2584
2585 if opts.get('edit'):
2585 if opts.get('edit'):
2586 message = ui.edit(message, user or ui.username())
2586 message = ui.edit(message, user or ui.username())
2587
2587
2588 diffopts = q.patchopts(q.diffopts(), *patches)
2588 diffopts = q.patchopts(q.diffopts(), *patches)
2589 wlock = repo.wlock()
2589 wlock = repo.wlock()
2590 try:
2590 try:
2591 q.refresh(repo, msg=message, git=diffopts.git)
2591 q.refresh(repo, msg=message, git=diffopts.git)
2592 q.delete(repo, patches, opts)
2592 q.delete(repo, patches, opts)
2593 q.savedirty()
2593 q.savedirty()
2594 finally:
2594 finally:
2595 wlock.release()
2595 wlock.release()
2596
2596
2597 @command("qgoto",
2597 @command("qgoto",
2598 [('', 'keep-changes', None,
2598 [('', 'keep-changes', None,
2599 _('tolerate non-conflicting local changes')),
2599 _('tolerate non-conflicting local changes')),
2600 ('f', 'force', None, _('overwrite any local changes')),
2600 ('f', 'force', None, _('overwrite any local changes')),
2601 ('', 'no-backup', None, _('do not save backup copies of files'))],
2601 ('', 'no-backup', None, _('do not save backup copies of files'))],
2602 _('hg qgoto [OPTION]... PATCH'))
2602 _('hg qgoto [OPTION]... PATCH'))
2603 def goto(ui, repo, patch, **opts):
2603 def goto(ui, repo, patch, **opts):
2604 '''push or pop patches until named patch is at top of stack
2604 '''push or pop patches until named patch is at top of stack
2605
2605
2606 Returns 0 on success.'''
2606 Returns 0 on success.'''
2607 opts = fixkeepchangesopts(ui, opts)
2607 opts = fixkeepchangesopts(ui, opts)
2608 q = repo.mq
2608 q = repo.mq
2609 patch = q.lookup(patch)
2609 patch = q.lookup(patch)
2610 nobackup = opts.get('no_backup')
2610 nobackup = opts.get('no_backup')
2611 keepchanges = opts.get('keep_changes')
2611 keepchanges = opts.get('keep_changes')
2612 if q.isapplied(patch):
2612 if q.isapplied(patch):
2613 ret = q.pop(repo, patch, force=opts.get('force'), nobackup=nobackup,
2613 ret = q.pop(repo, patch, force=opts.get('force'), nobackup=nobackup,
2614 keepchanges=keepchanges)
2614 keepchanges=keepchanges)
2615 else:
2615 else:
2616 ret = q.push(repo, patch, force=opts.get('force'), nobackup=nobackup,
2616 ret = q.push(repo, patch, force=opts.get('force'), nobackup=nobackup,
2617 keepchanges=keepchanges)
2617 keepchanges=keepchanges)
2618 q.savedirty()
2618 q.savedirty()
2619 return ret
2619 return ret
2620
2620
2621 @command("qguard",
2621 @command("qguard",
2622 [('l', 'list', None, _('list all patches and guards')),
2622 [('l', 'list', None, _('list all patches and guards')),
2623 ('n', 'none', None, _('drop all guards'))],
2623 ('n', 'none', None, _('drop all guards'))],
2624 _('hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]'))
2624 _('hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]'))
2625 def guard(ui, repo, *args, **opts):
2625 def guard(ui, repo, *args, **opts):
2626 '''set or print guards for a patch
2626 '''set or print guards for a patch
2627
2627
2628 Guards control whether a patch can be pushed. A patch with no
2628 Guards control whether a patch can be pushed. A patch with no
2629 guards is always pushed. A patch with a positive guard ("+foo") is
2629 guards is always pushed. A patch with a positive guard ("+foo") is
2630 pushed only if the :hg:`qselect` command has activated it. A patch with
2630 pushed only if the :hg:`qselect` command has activated it. A patch with
2631 a negative guard ("-foo") is never pushed if the :hg:`qselect` command
2631 a negative guard ("-foo") is never pushed if the :hg:`qselect` command
2632 has activated it.
2632 has activated it.
2633
2633
2634 With no arguments, print the currently active guards.
2634 With no arguments, print the currently active guards.
2635 With arguments, set guards for the named patch.
2635 With arguments, set guards for the named patch.
2636
2636
2637 .. note::
2637 .. note::
2638 Specifying negative guards now requires '--'.
2638 Specifying negative guards now requires '--'.
2639
2639
2640 To set guards on another patch::
2640 To set guards on another patch::
2641
2641
2642 hg qguard other.patch -- +2.6.17 -stable
2642 hg qguard other.patch -- +2.6.17 -stable
2643
2643
2644 Returns 0 on success.
2644 Returns 0 on success.
2645 '''
2645 '''
2646 def status(idx):
2646 def status(idx):
2647 guards = q.seriesguards[idx] or ['unguarded']
2647 guards = q.seriesguards[idx] or ['unguarded']
2648 if q.series[idx] in applied:
2648 if q.series[idx] in applied:
2649 state = 'applied'
2649 state = 'applied'
2650 elif q.pushable(idx)[0]:
2650 elif q.pushable(idx)[0]:
2651 state = 'unapplied'
2651 state = 'unapplied'
2652 else:
2652 else:
2653 state = 'guarded'
2653 state = 'guarded'
2654 label = 'qguard.patch qguard.%s qseries.%s' % (state, state)
2654 label = 'qguard.patch qguard.%s qseries.%s' % (state, state)
2655 ui.write('%s: ' % ui.label(q.series[idx], label))
2655 ui.write('%s: ' % ui.label(q.series[idx], label))
2656
2656
2657 for i, guard in enumerate(guards):
2657 for i, guard in enumerate(guards):
2658 if guard.startswith('+'):
2658 if guard.startswith('+'):
2659 ui.write(guard, label='qguard.positive')
2659 ui.write(guard, label='qguard.positive')
2660 elif guard.startswith('-'):
2660 elif guard.startswith('-'):
2661 ui.write(guard, label='qguard.negative')
2661 ui.write(guard, label='qguard.negative')
2662 else:
2662 else:
2663 ui.write(guard, label='qguard.unguarded')
2663 ui.write(guard, label='qguard.unguarded')
2664 if i != len(guards) - 1:
2664 if i != len(guards) - 1:
2665 ui.write(' ')
2665 ui.write(' ')
2666 ui.write('\n')
2666 ui.write('\n')
2667 q = repo.mq
2667 q = repo.mq
2668 applied = set(p.name for p in q.applied)
2668 applied = set(p.name for p in q.applied)
2669 patch = None
2669 patch = None
2670 args = list(args)
2670 args = list(args)
2671 if opts.get('list'):
2671 if opts.get('list'):
2672 if args or opts.get('none'):
2672 if args or opts.get('none'):
2673 raise util.Abort(_('cannot mix -l/--list with options or '
2673 raise util.Abort(_('cannot mix -l/--list with options or '
2674 'arguments'))
2674 'arguments'))
2675 for i in xrange(len(q.series)):
2675 for i in xrange(len(q.series)):
2676 status(i)
2676 status(i)
2677 return
2677 return
2678 if not args or args[0][0:1] in '-+':
2678 if not args or args[0][0:1] in '-+':
2679 if not q.applied:
2679 if not q.applied:
2680 raise util.Abort(_('no patches applied'))
2680 raise util.Abort(_('no patches applied'))
2681 patch = q.applied[-1].name
2681 patch = q.applied[-1].name
2682 if patch is None and args[0][0:1] not in '-+':
2682 if patch is None and args[0][0:1] not in '-+':
2683 patch = args.pop(0)
2683 patch = args.pop(0)
2684 if patch is None:
2684 if patch is None:
2685 raise util.Abort(_('no patch to work with'))
2685 raise util.Abort(_('no patch to work with'))
2686 if args or opts.get('none'):
2686 if args or opts.get('none'):
2687 idx = q.findseries(patch)
2687 idx = q.findseries(patch)
2688 if idx is None:
2688 if idx is None:
2689 raise util.Abort(_('no patch named %s') % patch)
2689 raise util.Abort(_('no patch named %s') % patch)
2690 q.setguards(idx, args)
2690 q.setguards(idx, args)
2691 q.savedirty()
2691 q.savedirty()
2692 else:
2692 else:
2693 status(q.series.index(q.lookup(patch)))
2693 status(q.series.index(q.lookup(patch)))
2694
2694
2695 @command("qheader", [], _('hg qheader [PATCH]'))
2695 @command("qheader", [], _('hg qheader [PATCH]'))
2696 def header(ui, repo, patch=None):
2696 def header(ui, repo, patch=None):
2697 """print the header of the topmost or specified patch
2697 """print the header of the topmost or specified patch
2698
2698
2699 Returns 0 on success."""
2699 Returns 0 on success."""
2700 q = repo.mq
2700 q = repo.mq
2701
2701
2702 if patch:
2702 if patch:
2703 patch = q.lookup(patch)
2703 patch = q.lookup(patch)
2704 else:
2704 else:
2705 if not q.applied:
2705 if not q.applied:
2706 ui.write(_('no patches applied\n'))
2706 ui.write(_('no patches applied\n'))
2707 return 1
2707 return 1
2708 patch = q.lookup('qtip')
2708 patch = q.lookup('qtip')
2709 ph = patchheader(q.join(patch), q.plainmode)
2709 ph = patchheader(q.join(patch), q.plainmode)
2710
2710
2711 ui.write('\n'.join(ph.message) + '\n')
2711 ui.write('\n'.join(ph.message) + '\n')
2712
2712
2713 def lastsavename(path):
2713 def lastsavename(path):
2714 (directory, base) = os.path.split(path)
2714 (directory, base) = os.path.split(path)
2715 names = os.listdir(directory)
2715 names = os.listdir(directory)
2716 namere = re.compile("%s.([0-9]+)" % base)
2716 namere = re.compile("%s.([0-9]+)" % base)
2717 maxindex = None
2717 maxindex = None
2718 maxname = None
2718 maxname = None
2719 for f in names:
2719 for f in names:
2720 m = namere.match(f)
2720 m = namere.match(f)
2721 if m:
2721 if m:
2722 index = int(m.group(1))
2722 index = int(m.group(1))
2723 if maxindex is None or index > maxindex:
2723 if maxindex is None or index > maxindex:
2724 maxindex = index
2724 maxindex = index
2725 maxname = f
2725 maxname = f
2726 if maxname:
2726 if maxname:
2727 return (os.path.join(directory, maxname), maxindex)
2727 return (os.path.join(directory, maxname), maxindex)
2728 return (None, None)
2728 return (None, None)
2729
2729
2730 def savename(path):
2730 def savename(path):
2731 (last, index) = lastsavename(path)
2731 (last, index) = lastsavename(path)
2732 if last is None:
2732 if last is None:
2733 index = 0
2733 index = 0
2734 newpath = path + ".%d" % (index + 1)
2734 newpath = path + ".%d" % (index + 1)
2735 return newpath
2735 return newpath
2736
2736
2737 @command("^qpush",
2737 @command("^qpush",
2738 [('', 'keep-changes', None,
2738 [('', 'keep-changes', None,
2739 _('tolerate non-conflicting local changes')),
2739 _('tolerate non-conflicting local changes')),
2740 ('f', 'force', None, _('apply on top of local changes')),
2740 ('f', 'force', None, _('apply on top of local changes')),
2741 ('e', 'exact', None,
2741 ('e', 'exact', None,
2742 _('apply the target patch to its recorded parent')),
2742 _('apply the target patch to its recorded parent')),
2743 ('l', 'list', None, _('list patch name in commit text')),
2743 ('l', 'list', None, _('list patch name in commit text')),
2744 ('a', 'all', None, _('apply all patches')),
2744 ('a', 'all', None, _('apply all patches')),
2745 ('m', 'merge', None, _('merge from another queue (DEPRECATED)')),
2745 ('m', 'merge', None, _('merge from another queue (DEPRECATED)')),
2746 ('n', 'name', '',
2746 ('n', 'name', '',
2747 _('merge queue name (DEPRECATED)'), _('NAME')),
2747 _('merge queue name (DEPRECATED)'), _('NAME')),
2748 ('', 'move', None,
2748 ('', 'move', None,
2749 _('reorder patch series and apply only the patch')),
2749 _('reorder patch series and apply only the patch')),
2750 ('', 'no-backup', None, _('do not save backup copies of files'))],
2750 ('', 'no-backup', None, _('do not save backup copies of files'))],
2751 _('hg qpush [-f] [-l] [-a] [--move] [PATCH | INDEX]'))
2751 _('hg qpush [-f] [-l] [-a] [--move] [PATCH | INDEX]'))
2752 def push(ui, repo, patch=None, **opts):
2752 def push(ui, repo, patch=None, **opts):
2753 """push the next patch onto the stack
2753 """push the next patch onto the stack
2754
2754
2755 By default, abort if the working directory contains uncommitted
2755 By default, abort if the working directory contains uncommitted
2756 changes. With --keep-changes, abort only if the uncommitted files
2756 changes. With --keep-changes, abort only if the uncommitted files
2757 overlap with patched files. With -f/--force, backup and patch over
2757 overlap with patched files. With -f/--force, backup and patch over
2758 uncommitted changes.
2758 uncommitted changes.
2759
2759
2760 Return 0 on success.
2760 Return 0 on success.
2761 """
2761 """
2762 q = repo.mq
2762 q = repo.mq
2763 mergeq = None
2763 mergeq = None
2764
2764
2765 opts = fixkeepchangesopts(ui, opts)
2765 opts = fixkeepchangesopts(ui, opts)
2766 if opts.get('merge'):
2766 if opts.get('merge'):
2767 if opts.get('name'):
2767 if opts.get('name'):
2768 newpath = repo.join(opts.get('name'))
2768 newpath = repo.join(opts.get('name'))
2769 else:
2769 else:
2770 newpath, i = lastsavename(q.path)
2770 newpath, i = lastsavename(q.path)
2771 if not newpath:
2771 if not newpath:
2772 ui.warn(_("no saved queues found, please use -n\n"))
2772 ui.warn(_("no saved queues found, please use -n\n"))
2773 return 1
2773 return 1
2774 mergeq = queue(ui, repo.path, newpath)
2774 mergeq = queue(ui, repo.path, newpath)
2775 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2775 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2776 ret = q.push(repo, patch, force=opts.get('force'), list=opts.get('list'),
2776 ret = q.push(repo, patch, force=opts.get('force'), list=opts.get('list'),
2777 mergeq=mergeq, all=opts.get('all'), move=opts.get('move'),
2777 mergeq=mergeq, all=opts.get('all'), move=opts.get('move'),
2778 exact=opts.get('exact'), nobackup=opts.get('no_backup'),
2778 exact=opts.get('exact'), nobackup=opts.get('no_backup'),
2779 keepchanges=opts.get('keep_changes'))
2779 keepchanges=opts.get('keep_changes'))
2780 return ret
2780 return ret
2781
2781
2782 @command("^qpop",
2782 @command("^qpop",
2783 [('a', 'all', None, _('pop all patches')),
2783 [('a', 'all', None, _('pop all patches')),
2784 ('n', 'name', '',
2784 ('n', 'name', '',
2785 _('queue name to pop (DEPRECATED)'), _('NAME')),
2785 _('queue name to pop (DEPRECATED)'), _('NAME')),
2786 ('', 'keep-changes', None,
2786 ('', 'keep-changes', None,
2787 _('tolerate non-conflicting local changes')),
2787 _('tolerate non-conflicting local changes')),
2788 ('f', 'force', None, _('forget any local changes to patched files')),
2788 ('f', 'force', None, _('forget any local changes to patched files')),
2789 ('', 'no-backup', None, _('do not save backup copies of files'))],
2789 ('', 'no-backup', None, _('do not save backup copies of files'))],
2790 _('hg qpop [-a] [-f] [PATCH | INDEX]'))
2790 _('hg qpop [-a] [-f] [PATCH | INDEX]'))
2791 def pop(ui, repo, patch=None, **opts):
2791 def pop(ui, repo, patch=None, **opts):
2792 """pop the current patch off the stack
2792 """pop the current patch off the stack
2793
2793
2794 Without argument, pops off the top of the patch stack. If given a
2794 Without argument, pops off the top of the patch stack. If given a
2795 patch name, keeps popping off patches until the named patch is at
2795 patch name, keeps popping off patches until the named patch is at
2796 the top of the stack.
2796 the top of the stack.
2797
2797
2798 By default, abort if the working directory contains uncommitted
2798 By default, abort if the working directory contains uncommitted
2799 changes. With --keep-changes, abort only if the uncommitted files
2799 changes. With --keep-changes, abort only if the uncommitted files
2800 overlap with patched files. With -f/--force, backup and discard
2800 overlap with patched files. With -f/--force, backup and discard
2801 changes made to such files.
2801 changes made to such files.
2802
2802
2803 Return 0 on success.
2803 Return 0 on success.
2804 """
2804 """
2805 opts = fixkeepchangesopts(ui, opts)
2805 opts = fixkeepchangesopts(ui, opts)
2806 localupdate = True
2806 localupdate = True
2807 if opts.get('name'):
2807 if opts.get('name'):
2808 q = queue(ui, repo.path, repo.join(opts.get('name')))
2808 q = queue(ui, repo.path, repo.join(opts.get('name')))
2809 ui.warn(_('using patch queue: %s\n') % q.path)
2809 ui.warn(_('using patch queue: %s\n') % q.path)
2810 localupdate = False
2810 localupdate = False
2811 else:
2811 else:
2812 q = repo.mq
2812 q = repo.mq
2813 ret = q.pop(repo, patch, force=opts.get('force'), update=localupdate,
2813 ret = q.pop(repo, patch, force=opts.get('force'), update=localupdate,
2814 all=opts.get('all'), nobackup=opts.get('no_backup'),
2814 all=opts.get('all'), nobackup=opts.get('no_backup'),
2815 keepchanges=opts.get('keep_changes'))
2815 keepchanges=opts.get('keep_changes'))
2816 q.savedirty()
2816 q.savedirty()
2817 return ret
2817 return ret
2818
2818
2819 @command("qrename|qmv", [], _('hg qrename PATCH1 [PATCH2]'))
2819 @command("qrename|qmv", [], _('hg qrename PATCH1 [PATCH2]'))
2820 def rename(ui, repo, patch, name=None, **opts):
2820 def rename(ui, repo, patch, name=None, **opts):
2821 """rename a patch
2821 """rename a patch
2822
2822
2823 With one argument, renames the current patch to PATCH1.
2823 With one argument, renames the current patch to PATCH1.
2824 With two arguments, renames PATCH1 to PATCH2.
2824 With two arguments, renames PATCH1 to PATCH2.
2825
2825
2826 Returns 0 on success."""
2826 Returns 0 on success."""
2827 q = repo.mq
2827 q = repo.mq
2828 if not name:
2828 if not name:
2829 name = patch
2829 name = patch
2830 patch = None
2830 patch = None
2831
2831
2832 if patch:
2832 if patch:
2833 patch = q.lookup(patch)
2833 patch = q.lookup(patch)
2834 else:
2834 else:
2835 if not q.applied:
2835 if not q.applied:
2836 ui.write(_('no patches applied\n'))
2836 ui.write(_('no patches applied\n'))
2837 return
2837 return
2838 patch = q.lookup('qtip')
2838 patch = q.lookup('qtip')
2839 absdest = q.join(name)
2839 absdest = q.join(name)
2840 if os.path.isdir(absdest):
2840 if os.path.isdir(absdest):
2841 name = normname(os.path.join(name, os.path.basename(patch)))
2841 name = normname(os.path.join(name, os.path.basename(patch)))
2842 absdest = q.join(name)
2842 absdest = q.join(name)
2843 q.checkpatchname(name)
2843 q.checkpatchname(name)
2844
2844
2845 ui.note(_('renaming %s to %s\n') % (patch, name))
2845 ui.note(_('renaming %s to %s\n') % (patch, name))
2846 i = q.findseries(patch)
2846 i = q.findseries(patch)
2847 guards = q.guard_re.findall(q.fullseries[i])
2847 guards = q.guard_re.findall(q.fullseries[i])
2848 q.fullseries[i] = name + ''.join([' #' + g for g in guards])
2848 q.fullseries[i] = name + ''.join([' #' + g for g in guards])
2849 q.parseseries()
2849 q.parseseries()
2850 q.seriesdirty = True
2850 q.seriesdirty = True
2851
2851
2852 info = q.isapplied(patch)
2852 info = q.isapplied(patch)
2853 if info:
2853 if info:
2854 q.applied[info[0]] = statusentry(info[1], name)
2854 q.applied[info[0]] = statusentry(info[1], name)
2855 q.applieddirty = True
2855 q.applieddirty = True
2856
2856
2857 destdir = os.path.dirname(absdest)
2857 destdir = os.path.dirname(absdest)
2858 if not os.path.isdir(destdir):
2858 if not os.path.isdir(destdir):
2859 os.makedirs(destdir)
2859 os.makedirs(destdir)
2860 util.rename(q.join(patch), absdest)
2860 util.rename(q.join(patch), absdest)
2861 r = q.qrepo()
2861 r = q.qrepo()
2862 if r and patch in r.dirstate:
2862 if r and patch in r.dirstate:
2863 wctx = r[None]
2863 wctx = r[None]
2864 wlock = r.wlock()
2864 wlock = r.wlock()
2865 try:
2865 try:
2866 if r.dirstate[patch] == 'a':
2866 if r.dirstate[patch] == 'a':
2867 r.dirstate.drop(patch)
2867 r.dirstate.drop(patch)
2868 r.dirstate.add(name)
2868 r.dirstate.add(name)
2869 else:
2869 else:
2870 wctx.copy(patch, name)
2870 wctx.copy(patch, name)
2871 wctx.forget([patch])
2871 wctx.forget([patch])
2872 finally:
2872 finally:
2873 wlock.release()
2873 wlock.release()
2874
2874
2875 q.savedirty()
2875 q.savedirty()
2876
2876
2877 @command("qrestore",
2877 @command("qrestore",
2878 [('d', 'delete', None, _('delete save entry')),
2878 [('d', 'delete', None, _('delete save entry')),
2879 ('u', 'update', None, _('update queue working directory'))],
2879 ('u', 'update', None, _('update queue working directory'))],
2880 _('hg qrestore [-d] [-u] REV'))
2880 _('hg qrestore [-d] [-u] REV'))
2881 def restore(ui, repo, rev, **opts):
2881 def restore(ui, repo, rev, **opts):
2882 """restore the queue state saved by a revision (DEPRECATED)
2882 """restore the queue state saved by a revision (DEPRECATED)
2883
2883
2884 This command is deprecated, use :hg:`rebase` instead."""
2884 This command is deprecated, use :hg:`rebase` instead."""
2885 rev = repo.lookup(rev)
2885 rev = repo.lookup(rev)
2886 q = repo.mq
2886 q = repo.mq
2887 q.restore(repo, rev, delete=opts.get('delete'),
2887 q.restore(repo, rev, delete=opts.get('delete'),
2888 qupdate=opts.get('update'))
2888 qupdate=opts.get('update'))
2889 q.savedirty()
2889 q.savedirty()
2890 return 0
2890 return 0
2891
2891
2892 @command("qsave",
2892 @command("qsave",
2893 [('c', 'copy', None, _('copy patch directory')),
2893 [('c', 'copy', None, _('copy patch directory')),
2894 ('n', 'name', '',
2894 ('n', 'name', '',
2895 _('copy directory name'), _('NAME')),
2895 _('copy directory name'), _('NAME')),
2896 ('e', 'empty', None, _('clear queue status file')),
2896 ('e', 'empty', None, _('clear queue status file')),
2897 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2897 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2898 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'))
2898 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'))
2899 def save(ui, repo, **opts):
2899 def save(ui, repo, **opts):
2900 """save current queue state (DEPRECATED)
2900 """save current queue state (DEPRECATED)
2901
2901
2902 This command is deprecated, use :hg:`rebase` instead."""
2902 This command is deprecated, use :hg:`rebase` instead."""
2903 q = repo.mq
2903 q = repo.mq
2904 message = cmdutil.logmessage(ui, opts)
2904 message = cmdutil.logmessage(ui, opts)
2905 ret = q.save(repo, msg=message)
2905 ret = q.save(repo, msg=message)
2906 if ret:
2906 if ret:
2907 return ret
2907 return ret
2908 q.savedirty() # save to .hg/patches before copying
2908 q.savedirty() # save to .hg/patches before copying
2909 if opts.get('copy'):
2909 if opts.get('copy'):
2910 path = q.path
2910 path = q.path
2911 if opts.get('name'):
2911 if opts.get('name'):
2912 newpath = os.path.join(q.basepath, opts.get('name'))
2912 newpath = os.path.join(q.basepath, opts.get('name'))
2913 if os.path.exists(newpath):
2913 if os.path.exists(newpath):
2914 if not os.path.isdir(newpath):
2914 if not os.path.isdir(newpath):
2915 raise util.Abort(_('destination %s exists and is not '
2915 raise util.Abort(_('destination %s exists and is not '
2916 'a directory') % newpath)
2916 'a directory') % newpath)
2917 if not opts.get('force'):
2917 if not opts.get('force'):
2918 raise util.Abort(_('destination %s exists, '
2918 raise util.Abort(_('destination %s exists, '
2919 'use -f to force') % newpath)
2919 'use -f to force') % newpath)
2920 else:
2920 else:
2921 newpath = savename(path)
2921 newpath = savename(path)
2922 ui.warn(_("copy %s to %s\n") % (path, newpath))
2922 ui.warn(_("copy %s to %s\n") % (path, newpath))
2923 util.copyfiles(path, newpath)
2923 util.copyfiles(path, newpath)
2924 if opts.get('empty'):
2924 if opts.get('empty'):
2925 del q.applied[:]
2925 del q.applied[:]
2926 q.applieddirty = True
2926 q.applieddirty = True
2927 q.savedirty()
2927 q.savedirty()
2928 return 0
2928 return 0
2929
2929
2930 @command("strip",
2930 @command("strip",
2931 [
2931 [
2932 ('r', 'rev', [], _('strip specified revision (optional, '
2932 ('r', 'rev', [], _('strip specified revision (optional, '
2933 'can specify revisions without this '
2933 'can specify revisions without this '
2934 'option)'), _('REV')),
2934 'option)'), _('REV')),
2935 ('f', 'force', None, _('force removal of changesets, discard '
2935 ('f', 'force', None, _('force removal of changesets, discard '
2936 'uncommitted changes (no backup)')),
2936 'uncommitted changes (no backup)')),
2937 ('b', 'backup', None, _('bundle only changesets with local revision'
2937 ('b', 'backup', None, _('bundle only changesets with local revision'
2938 ' number greater than REV which are not'
2938 ' number greater than REV which are not'
2939 ' descendants of REV (DEPRECATED)')),
2939 ' descendants of REV (DEPRECATED)')),
2940 ('', 'no-backup', None, _('no backups')),
2940 ('', 'no-backup', None, _('no backups')),
2941 ('', 'nobackup', None, _('no backups (DEPRECATED)')),
2941 ('', 'nobackup', None, _('no backups (DEPRECATED)')),
2942 ('n', '', None, _('ignored (DEPRECATED)')),
2942 ('n', '', None, _('ignored (DEPRECATED)')),
2943 ('k', 'keep', None, _("do not modify working copy during strip")),
2943 ('k', 'keep', None, _("do not modify working copy during strip")),
2944 ('B', 'bookmark', '', _("remove revs only reachable from given"
2944 ('B', 'bookmark', '', _("remove revs only reachable from given"
2945 " bookmark"))],
2945 " bookmark"))],
2946 _('hg strip [-k] [-f] [-n] [-B bookmark] [-r] REV...'))
2946 _('hg strip [-k] [-f] [-n] [-B bookmark] [-r] REV...'))
2947 def strip(ui, repo, *revs, **opts):
2947 def strip(ui, repo, *revs, **opts):
2948 """strip changesets and all their descendants from the repository
2948 """strip changesets and all their descendants from the repository
2949
2949
2950 The strip command removes the specified changesets and all their
2950 The strip command removes the specified changesets and all their
2951 descendants. If the working directory has uncommitted changes, the
2951 descendants. If the working directory has uncommitted changes, the
2952 operation is aborted unless the --force flag is supplied, in which
2952 operation is aborted unless the --force flag is supplied, in which
2953 case changes will be discarded.
2953 case changes will be discarded.
2954
2954
2955 If a parent of the working directory is stripped, then the working
2955 If a parent of the working directory is stripped, then the working
2956 directory will automatically be updated to the most recent
2956 directory will automatically be updated to the most recent
2957 available ancestor of the stripped parent after the operation
2957 available ancestor of the stripped parent after the operation
2958 completes.
2958 completes.
2959
2959
2960 Any stripped changesets are stored in ``.hg/strip-backup`` as a
2960 Any stripped changesets are stored in ``.hg/strip-backup`` as a
2961 bundle (see :hg:`help bundle` and :hg:`help unbundle`). They can
2961 bundle (see :hg:`help bundle` and :hg:`help unbundle`). They can
2962 be restored by running :hg:`unbundle .hg/strip-backup/BUNDLE`,
2962 be restored by running :hg:`unbundle .hg/strip-backup/BUNDLE`,
2963 where BUNDLE is the bundle file created by the strip. Note that
2963 where BUNDLE is the bundle file created by the strip. Note that
2964 the local revision numbers will in general be different after the
2964 the local revision numbers will in general be different after the
2965 restore.
2965 restore.
2966
2966
2967 Use the --no-backup option to discard the backup bundle once the
2967 Use the --no-backup option to discard the backup bundle once the
2968 operation completes.
2968 operation completes.
2969
2969
2970 Strip is not a history-rewriting operation and can be used on
2970 Strip is not a history-rewriting operation and can be used on
2971 changesets in the public phase. But if the stripped changesets have
2971 changesets in the public phase. But if the stripped changesets have
2972 been pushed to a remote repository you will likely pull them again.
2972 been pushed to a remote repository you will likely pull them again.
2973
2973
2974 Return 0 on success.
2974 Return 0 on success.
2975 """
2975 """
2976 backup = 'all'
2976 backup = 'all'
2977 if opts.get('backup'):
2977 if opts.get('backup'):
2978 backup = 'strip'
2978 backup = 'strip'
2979 elif opts.get('no_backup') or opts.get('nobackup'):
2979 elif opts.get('no_backup') or opts.get('nobackup'):
2980 backup = 'none'
2980 backup = 'none'
2981
2981
2982 cl = repo.changelog
2982 cl = repo.changelog
2983 revs = list(revs) + opts.get('rev')
2983 revs = list(revs) + opts.get('rev')
2984 revs = set(scmutil.revrange(repo, revs))
2984 revs = set(scmutil.revrange(repo, revs))
2985
2985
2986 if opts.get('bookmark'):
2986 if opts.get('bookmark'):
2987 mark = opts.get('bookmark')
2987 mark = opts.get('bookmark')
2988 marks = repo._bookmarks
2988 marks = repo._bookmarks
2989 if mark not in marks:
2989 if mark not in marks:
2990 raise util.Abort(_("bookmark '%s' not found") % mark)
2990 raise util.Abort(_("bookmark '%s' not found") % mark)
2991
2991
2992 # If the requested bookmark is not the only one pointing to a
2992 # If the requested bookmark is not the only one pointing to a
2993 # a revision we have to only delete the bookmark and not strip
2993 # a revision we have to only delete the bookmark and not strip
2994 # anything. revsets cannot detect that case.
2994 # anything. revsets cannot detect that case.
2995 uniquebm = True
2995 uniquebm = True
2996 for m, n in marks.iteritems():
2996 for m, n in marks.iteritems():
2997 if m != mark and n == repo[mark].node():
2997 if m != mark and n == repo[mark].node():
2998 uniquebm = False
2998 uniquebm = False
2999 break
2999 break
3000 if uniquebm:
3000 if uniquebm:
3001 rsrevs = repo.revs("ancestors(bookmark(%s)) - "
3001 rsrevs = repo.revs("ancestors(bookmark(%s)) - "
3002 "ancestors(head() and not bookmark(%s)) - "
3002 "ancestors(head() and not bookmark(%s)) - "
3003 "ancestors(bookmark() and not bookmark(%s))",
3003 "ancestors(bookmark() and not bookmark(%s))",
3004 mark, mark, mark)
3004 mark, mark, mark)
3005 revs.update(set(rsrevs))
3005 revs.update(set(rsrevs))
3006 if not revs:
3006 if not revs:
3007 del marks[mark]
3007 del marks[mark]
3008 marks.write()
3008 marks.write()
3009 ui.write(_("bookmark '%s' deleted\n") % mark)
3009 ui.write(_("bookmark '%s' deleted\n") % mark)
3010
3010
3011 if not revs:
3011 if not revs:
3012 raise util.Abort(_('empty revision set'))
3012 raise util.Abort(_('empty revision set'))
3013
3013
3014 descendants = set(cl.descendants(revs))
3014 descendants = set(cl.descendants(revs))
3015 strippedrevs = revs.union(descendants)
3015 strippedrevs = revs.union(descendants)
3016 roots = revs.difference(descendants)
3016 roots = revs.difference(descendants)
3017
3017
3018 update = False
3018 update = False
3019 # if one of the wdir parent is stripped we'll need
3019 # if one of the wdir parent is stripped we'll need
3020 # to update away to an earlier revision
3020 # to update away to an earlier revision
3021 for p in repo.dirstate.parents():
3021 for p in repo.dirstate.parents():
3022 if p != nullid and cl.rev(p) in strippedrevs:
3022 if p != nullid and cl.rev(p) in strippedrevs:
3023 update = True
3023 update = True
3024 break
3024 break
3025
3025
3026 rootnodes = set(cl.node(r) for r in roots)
3026 rootnodes = set(cl.node(r) for r in roots)
3027
3027
3028 q = repo.mq
3028 q = repo.mq
3029 if q.applied:
3029 if q.applied:
3030 # refresh queue state if we're about to strip
3030 # refresh queue state if we're about to strip
3031 # applied patches
3031 # applied patches
3032 if cl.rev(repo.lookup('qtip')) in strippedrevs:
3032 if cl.rev(repo.lookup('qtip')) in strippedrevs:
3033 q.applieddirty = True
3033 q.applieddirty = True
3034 start = 0
3034 start = 0
3035 end = len(q.applied)
3035 end = len(q.applied)
3036 for i, statusentry in enumerate(q.applied):
3036 for i, statusentry in enumerate(q.applied):
3037 if statusentry.node in rootnodes:
3037 if statusentry.node in rootnodes:
3038 # if one of the stripped roots is an applied
3038 # if one of the stripped roots is an applied
3039 # patch, only part of the queue is stripped
3039 # patch, only part of the queue is stripped
3040 start = i
3040 start = i
3041 break
3041 break
3042 del q.applied[start:end]
3042 del q.applied[start:end]
3043 q.savedirty()
3043 q.savedirty()
3044
3044
3045 revs = list(rootnodes)
3045 revs = list(rootnodes)
3046 if update and opts.get('keep'):
3046 if update and opts.get('keep'):
3047 wlock = repo.wlock()
3047 wlock = repo.wlock()
3048 try:
3048 try:
3049 urev = repo.mq.qparents(repo, revs[0])
3049 urev = repo.mq.qparents(repo, revs[0])
3050 repo.dirstate.rebuild(urev, repo[urev].manifest())
3050 repo.dirstate.rebuild(urev, repo[urev].manifest())
3051 repo.dirstate.write()
3051 repo.dirstate.write()
3052 update = False
3052 update = False
3053 finally:
3053 finally:
3054 wlock.release()
3054 wlock.release()
3055
3055
3056 if opts.get('bookmark'):
3056 if opts.get('bookmark'):
3057 del marks[mark]
3057 del marks[mark]
3058 marks.write()
3058 marks.write()
3059 ui.write(_("bookmark '%s' deleted\n") % mark)
3059 ui.write(_("bookmark '%s' deleted\n") % mark)
3060
3060
3061 repo.mq.strip(repo, revs, backup=backup, update=update,
3061 repo.mq.strip(repo, revs, backup=backup, update=update,
3062 force=opts.get('force'))
3062 force=opts.get('force'))
3063
3063
3064 return 0
3064 return 0
3065
3065
3066 @command("qselect",
3066 @command("qselect",
3067 [('n', 'none', None, _('disable all guards')),
3067 [('n', 'none', None, _('disable all guards')),
3068 ('s', 'series', None, _('list all guards in series file')),
3068 ('s', 'series', None, _('list all guards in series file')),
3069 ('', 'pop', None, _('pop to before first guarded applied patch')),
3069 ('', 'pop', None, _('pop to before first guarded applied patch')),
3070 ('', 'reapply', None, _('pop, then reapply patches'))],
3070 ('', 'reapply', None, _('pop, then reapply patches'))],
3071 _('hg qselect [OPTION]... [GUARD]...'))
3071 _('hg qselect [OPTION]... [GUARD]...'))
3072 def select(ui, repo, *args, **opts):
3072 def select(ui, repo, *args, **opts):
3073 '''set or print guarded patches to push
3073 '''set or print guarded patches to push
3074
3074
3075 Use the :hg:`qguard` command to set or print guards on patch, then use
3075 Use the :hg:`qguard` command to set or print guards on patch, then use
3076 qselect to tell mq which guards to use. A patch will be pushed if
3076 qselect to tell mq which guards to use. A patch will be pushed if
3077 it has no guards or any positive guards match the currently
3077 it has no guards or any positive guards match the currently
3078 selected guard, but will not be pushed if any negative guards
3078 selected guard, but will not be pushed if any negative guards
3079 match the current guard. For example::
3079 match the current guard. For example::
3080
3080
3081 qguard foo.patch -- -stable (negative guard)
3081 qguard foo.patch -- -stable (negative guard)
3082 qguard bar.patch +stable (positive guard)
3082 qguard bar.patch +stable (positive guard)
3083 qselect stable
3083 qselect stable
3084
3084
3085 This activates the "stable" guard. mq will skip foo.patch (because
3085 This activates the "stable" guard. mq will skip foo.patch (because
3086 it has a negative match) but push bar.patch (because it has a
3086 it has a negative match) but push bar.patch (because it has a
3087 positive match).
3087 positive match).
3088
3088
3089 With no arguments, prints the currently active guards.
3089 With no arguments, prints the currently active guards.
3090 With one argument, sets the active guard.
3090 With one argument, sets the active guard.
3091
3091
3092 Use -n/--none to deactivate guards (no other arguments needed).
3092 Use -n/--none to deactivate guards (no other arguments needed).
3093 When no guards are active, patches with positive guards are
3093 When no guards are active, patches with positive guards are
3094 skipped and patches with negative guards are pushed.
3094 skipped and patches with negative guards are pushed.
3095
3095
3096 qselect can change the guards on applied patches. It does not pop
3096 qselect can change the guards on applied patches. It does not pop
3097 guarded patches by default. Use --pop to pop back to the last
3097 guarded patches by default. Use --pop to pop back to the last
3098 applied patch that is not guarded. Use --reapply (which implies
3098 applied patch that is not guarded. Use --reapply (which implies
3099 --pop) to push back to the current patch afterwards, but skip
3099 --pop) to push back to the current patch afterwards, but skip
3100 guarded patches.
3100 guarded patches.
3101
3101
3102 Use -s/--series to print a list of all guards in the series file
3102 Use -s/--series to print a list of all guards in the series file
3103 (no other arguments needed). Use -v for more information.
3103 (no other arguments needed). Use -v for more information.
3104
3104
3105 Returns 0 on success.'''
3105 Returns 0 on success.'''
3106
3106
3107 q = repo.mq
3107 q = repo.mq
3108 guards = q.active()
3108 guards = q.active()
3109 if args or opts.get('none'):
3109 if args or opts.get('none'):
3110 old_unapplied = q.unapplied(repo)
3110 old_unapplied = q.unapplied(repo)
3111 old_guarded = [i for i in xrange(len(q.applied)) if
3111 old_guarded = [i for i in xrange(len(q.applied)) if
3112 not q.pushable(i)[0]]
3112 not q.pushable(i)[0]]
3113 q.setactive(args)
3113 q.setactive(args)
3114 q.savedirty()
3114 q.savedirty()
3115 if not args:
3115 if not args:
3116 ui.status(_('guards deactivated\n'))
3116 ui.status(_('guards deactivated\n'))
3117 if not opts.get('pop') and not opts.get('reapply'):
3117 if not opts.get('pop') and not opts.get('reapply'):
3118 unapplied = q.unapplied(repo)
3118 unapplied = q.unapplied(repo)
3119 guarded = [i for i in xrange(len(q.applied))
3119 guarded = [i for i in xrange(len(q.applied))
3120 if not q.pushable(i)[0]]
3120 if not q.pushable(i)[0]]
3121 if len(unapplied) != len(old_unapplied):
3121 if len(unapplied) != len(old_unapplied):
3122 ui.status(_('number of unguarded, unapplied patches has '
3122 ui.status(_('number of unguarded, unapplied patches has '
3123 'changed from %d to %d\n') %
3123 'changed from %d to %d\n') %
3124 (len(old_unapplied), len(unapplied)))
3124 (len(old_unapplied), len(unapplied)))
3125 if len(guarded) != len(old_guarded):
3125 if len(guarded) != len(old_guarded):
3126 ui.status(_('number of guarded, applied patches has changed '
3126 ui.status(_('number of guarded, applied patches has changed '
3127 'from %d to %d\n') %
3127 'from %d to %d\n') %
3128 (len(old_guarded), len(guarded)))
3128 (len(old_guarded), len(guarded)))
3129 elif opts.get('series'):
3129 elif opts.get('series'):
3130 guards = {}
3130 guards = {}
3131 noguards = 0
3131 noguards = 0
3132 for gs in q.seriesguards:
3132 for gs in q.seriesguards:
3133 if not gs:
3133 if not gs:
3134 noguards += 1
3134 noguards += 1
3135 for g in gs:
3135 for g in gs:
3136 guards.setdefault(g, 0)
3136 guards.setdefault(g, 0)
3137 guards[g] += 1
3137 guards[g] += 1
3138 if ui.verbose:
3138 if ui.verbose:
3139 guards['NONE'] = noguards
3139 guards['NONE'] = noguards
3140 guards = guards.items()
3140 guards = guards.items()
3141 guards.sort(key=lambda x: x[0][1:])
3141 guards.sort(key=lambda x: x[0][1:])
3142 if guards:
3142 if guards:
3143 ui.note(_('guards in series file:\n'))
3143 ui.note(_('guards in series file:\n'))
3144 for guard, count in guards:
3144 for guard, count in guards:
3145 ui.note('%2d ' % count)
3145 ui.note('%2d ' % count)
3146 ui.write(guard, '\n')
3146 ui.write(guard, '\n')
3147 else:
3147 else:
3148 ui.note(_('no guards in series file\n'))
3148 ui.note(_('no guards in series file\n'))
3149 else:
3149 else:
3150 if guards:
3150 if guards:
3151 ui.note(_('active guards:\n'))
3151 ui.note(_('active guards:\n'))
3152 for g in guards:
3152 for g in guards:
3153 ui.write(g, '\n')
3153 ui.write(g, '\n')
3154 else:
3154 else:
3155 ui.write(_('no active guards\n'))
3155 ui.write(_('no active guards\n'))
3156 reapply = opts.get('reapply') and q.applied and q.appliedname(-1)
3156 reapply = opts.get('reapply') and q.applied and q.appliedname(-1)
3157 popped = False
3157 popped = False
3158 if opts.get('pop') or opts.get('reapply'):
3158 if opts.get('pop') or opts.get('reapply'):
3159 for i in xrange(len(q.applied)):
3159 for i in xrange(len(q.applied)):
3160 pushable, reason = q.pushable(i)
3160 pushable, reason = q.pushable(i)
3161 if not pushable:
3161 if not pushable:
3162 ui.status(_('popping guarded patches\n'))
3162 ui.status(_('popping guarded patches\n'))
3163 popped = True
3163 popped = True
3164 if i == 0:
3164 if i == 0:
3165 q.pop(repo, all=True)
3165 q.pop(repo, all=True)
3166 else:
3166 else:
3167 q.pop(repo, str(i - 1))
3167 q.pop(repo, str(i - 1))
3168 break
3168 break
3169 if popped:
3169 if popped:
3170 try:
3170 try:
3171 if reapply:
3171 if reapply:
3172 ui.status(_('reapplying unguarded patches\n'))
3172 ui.status(_('reapplying unguarded patches\n'))
3173 q.push(repo, reapply)
3173 q.push(repo, reapply)
3174 finally:
3174 finally:
3175 q.savedirty()
3175 q.savedirty()
3176
3176
3177 @command("qfinish",
3177 @command("qfinish",
3178 [('a', 'applied', None, _('finish all applied changesets'))],
3178 [('a', 'applied', None, _('finish all applied changesets'))],
3179 _('hg qfinish [-a] [REV]...'))
3179 _('hg qfinish [-a] [REV]...'))
3180 def finish(ui, repo, *revrange, **opts):
3180 def finish(ui, repo, *revrange, **opts):
3181 """move applied patches into repository history
3181 """move applied patches into repository history
3182
3182
3183 Finishes the specified revisions (corresponding to applied
3183 Finishes the specified revisions (corresponding to applied
3184 patches) by moving them out of mq control into regular repository
3184 patches) by moving them out of mq control into regular repository
3185 history.
3185 history.
3186
3186
3187 Accepts a revision range or the -a/--applied option. If --applied
3187 Accepts a revision range or the -a/--applied option. If --applied
3188 is specified, all applied mq revisions are removed from mq
3188 is specified, all applied mq revisions are removed from mq
3189 control. Otherwise, the given revisions must be at the base of the
3189 control. Otherwise, the given revisions must be at the base of the
3190 stack of applied patches.
3190 stack of applied patches.
3191
3191
3192 This can be especially useful if your changes have been applied to
3192 This can be especially useful if your changes have been applied to
3193 an upstream repository, or if you are about to push your changes
3193 an upstream repository, or if you are about to push your changes
3194 to upstream.
3194 to upstream.
3195
3195
3196 Returns 0 on success.
3196 Returns 0 on success.
3197 """
3197 """
3198 if not opts.get('applied') and not revrange:
3198 if not opts.get('applied') and not revrange:
3199 raise util.Abort(_('no revisions specified'))
3199 raise util.Abort(_('no revisions specified'))
3200 elif opts.get('applied'):
3200 elif opts.get('applied'):
3201 revrange = ('qbase::qtip',) + revrange
3201 revrange = ('qbase::qtip',) + revrange
3202
3202
3203 q = repo.mq
3203 q = repo.mq
3204 if not q.applied:
3204 if not q.applied:
3205 ui.status(_('no patches applied\n'))
3205 ui.status(_('no patches applied\n'))
3206 return 0
3206 return 0
3207
3207
3208 revs = scmutil.revrange(repo, revrange)
3208 revs = scmutil.revrange(repo, revrange)
3209 if repo['.'].rev() in revs and repo[None].files():
3209 if repo['.'].rev() in revs and repo[None].files():
3210 ui.warn(_('warning: uncommitted changes in the working directory\n'))
3210 ui.warn(_('warning: uncommitted changes in the working directory\n'))
3211 # queue.finish may changes phases but leave the responsibility to lock the
3211 # queue.finish may changes phases but leave the responsibility to lock the
3212 # repo to the caller to avoid deadlock with wlock. This command code is
3212 # repo to the caller to avoid deadlock with wlock. This command code is
3213 # responsibility for this locking.
3213 # responsibility for this locking.
3214 lock = repo.lock()
3214 lock = repo.lock()
3215 try:
3215 try:
3216 q.finish(repo, revs)
3216 q.finish(repo, revs)
3217 q.savedirty()
3217 q.savedirty()
3218 finally:
3218 finally:
3219 lock.release()
3219 lock.release()
3220 return 0
3220 return 0
3221
3221
3222 @command("qqueue",
3222 @command("qqueue",
3223 [('l', 'list', False, _('list all available queues')),
3223 [('l', 'list', False, _('list all available queues')),
3224 ('', 'active', False, _('print name of active queue')),
3224 ('', 'active', False, _('print name of active queue')),
3225 ('c', 'create', False, _('create new queue')),
3225 ('c', 'create', False, _('create new queue')),
3226 ('', 'rename', False, _('rename active queue')),
3226 ('', 'rename', False, _('rename active queue')),
3227 ('', 'delete', False, _('delete reference to queue')),
3227 ('', 'delete', False, _('delete reference to queue')),
3228 ('', 'purge', False, _('delete queue, and remove patch dir')),
3228 ('', 'purge', False, _('delete queue, and remove patch dir')),
3229 ],
3229 ],
3230 _('[OPTION] [QUEUE]'))
3230 _('[OPTION] [QUEUE]'))
3231 def qqueue(ui, repo, name=None, **opts):
3231 def qqueue(ui, repo, name=None, **opts):
3232 '''manage multiple patch queues
3232 '''manage multiple patch queues
3233
3233
3234 Supports switching between different patch queues, as well as creating
3234 Supports switching between different patch queues, as well as creating
3235 new patch queues and deleting existing ones.
3235 new patch queues and deleting existing ones.
3236
3236
3237 Omitting a queue name or specifying -l/--list will show you the registered
3237 Omitting a queue name or specifying -l/--list will show you the registered
3238 queues - by default the "normal" patches queue is registered. The currently
3238 queues - by default the "normal" patches queue is registered. The currently
3239 active queue will be marked with "(active)". Specifying --active will print
3239 active queue will be marked with "(active)". Specifying --active will print
3240 only the name of the active queue.
3240 only the name of the active queue.
3241
3241
3242 To create a new queue, use -c/--create. The queue is automatically made
3242 To create a new queue, use -c/--create. The queue is automatically made
3243 active, except in the case where there are applied patches from the
3243 active, except in the case where there are applied patches from the
3244 currently active queue in the repository. Then the queue will only be
3244 currently active queue in the repository. Then the queue will only be
3245 created and switching will fail.
3245 created and switching will fail.
3246
3246
3247 To delete an existing queue, use --delete. You cannot delete the currently
3247 To delete an existing queue, use --delete. You cannot delete the currently
3248 active queue.
3248 active queue.
3249
3249
3250 Returns 0 on success.
3250 Returns 0 on success.
3251 '''
3251 '''
3252 q = repo.mq
3252 q = repo.mq
3253 _defaultqueue = 'patches'
3253 _defaultqueue = 'patches'
3254 _allqueues = 'patches.queues'
3254 _allqueues = 'patches.queues'
3255 _activequeue = 'patches.queue'
3255 _activequeue = 'patches.queue'
3256
3256
3257 def _getcurrent():
3257 def _getcurrent():
3258 cur = os.path.basename(q.path)
3258 cur = os.path.basename(q.path)
3259 if cur.startswith('patches-'):
3259 if cur.startswith('patches-'):
3260 cur = cur[8:]
3260 cur = cur[8:]
3261 return cur
3261 return cur
3262
3262
3263 def _noqueues():
3263 def _noqueues():
3264 try:
3264 try:
3265 fh = repo.opener(_allqueues, 'r')
3265 fh = repo.opener(_allqueues, 'r')
3266 fh.close()
3266 fh.close()
3267 except IOError:
3267 except IOError:
3268 return True
3268 return True
3269
3269
3270 return False
3270 return False
3271
3271
3272 def _getqueues():
3272 def _getqueues():
3273 current = _getcurrent()
3273 current = _getcurrent()
3274
3274
3275 try:
3275 try:
3276 fh = repo.opener(_allqueues, 'r')
3276 fh = repo.opener(_allqueues, 'r')
3277 queues = [queue.strip() for queue in fh if queue.strip()]
3277 queues = [queue.strip() for queue in fh if queue.strip()]
3278 fh.close()
3278 fh.close()
3279 if current not in queues:
3279 if current not in queues:
3280 queues.append(current)
3280 queues.append(current)
3281 except IOError:
3281 except IOError:
3282 queues = [_defaultqueue]
3282 queues = [_defaultqueue]
3283
3283
3284 return sorted(queues)
3284 return sorted(queues)
3285
3285
3286 def _setactive(name):
3286 def _setactive(name):
3287 if q.applied:
3287 if q.applied:
3288 raise util.Abort(_('new queue created, but cannot make active '
3288 raise util.Abort(_('new queue created, but cannot make active '
3289 'as patches are applied'))
3289 'as patches are applied'))
3290 _setactivenocheck(name)
3290 _setactivenocheck(name)
3291
3291
3292 def _setactivenocheck(name):
3292 def _setactivenocheck(name):
3293 fh = repo.opener(_activequeue, 'w')
3293 fh = repo.opener(_activequeue, 'w')
3294 if name != 'patches':
3294 if name != 'patches':
3295 fh.write(name)
3295 fh.write(name)
3296 fh.close()
3296 fh.close()
3297
3297
3298 def _addqueue(name):
3298 def _addqueue(name):
3299 fh = repo.opener(_allqueues, 'a')
3299 fh = repo.opener(_allqueues, 'a')
3300 fh.write('%s\n' % (name,))
3300 fh.write('%s\n' % (name,))
3301 fh.close()
3301 fh.close()
3302
3302
3303 def _queuedir(name):
3303 def _queuedir(name):
3304 if name == 'patches':
3304 if name == 'patches':
3305 return repo.join('patches')
3305 return repo.join('patches')
3306 else:
3306 else:
3307 return repo.join('patches-' + name)
3307 return repo.join('patches-' + name)
3308
3308
3309 def _validname(name):
3309 def _validname(name):
3310 for n in name:
3310 for n in name:
3311 if n in ':\\/.':
3311 if n in ':\\/.':
3312 return False
3312 return False
3313 return True
3313 return True
3314
3314
3315 def _delete(name):
3315 def _delete(name):
3316 if name not in existing:
3316 if name not in existing:
3317 raise util.Abort(_('cannot delete queue that does not exist'))
3317 raise util.Abort(_('cannot delete queue that does not exist'))
3318
3318
3319 current = _getcurrent()
3319 current = _getcurrent()
3320
3320
3321 if name == current:
3321 if name == current:
3322 raise util.Abort(_('cannot delete currently active queue'))
3322 raise util.Abort(_('cannot delete currently active queue'))
3323
3323
3324 fh = repo.opener('patches.queues.new', 'w')
3324 fh = repo.opener('patches.queues.new', 'w')
3325 for queue in existing:
3325 for queue in existing:
3326 if queue == name:
3326 if queue == name:
3327 continue
3327 continue
3328 fh.write('%s\n' % (queue,))
3328 fh.write('%s\n' % (queue,))
3329 fh.close()
3329 fh.close()
3330 util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
3330 util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
3331
3331
3332 if not name or opts.get('list') or opts.get('active'):
3332 if not name or opts.get('list') or opts.get('active'):
3333 current = _getcurrent()
3333 current = _getcurrent()
3334 if opts.get('active'):
3334 if opts.get('active'):
3335 ui.write('%s\n' % (current,))
3335 ui.write('%s\n' % (current,))
3336 return
3336 return
3337 for queue in _getqueues():
3337 for queue in _getqueues():
3338 ui.write('%s' % (queue,))
3338 ui.write('%s' % (queue,))
3339 if queue == current and not ui.quiet:
3339 if queue == current and not ui.quiet:
3340 ui.write(_(' (active)\n'))
3340 ui.write(_(' (active)\n'))
3341 else:
3341 else:
3342 ui.write('\n')
3342 ui.write('\n')
3343 return
3343 return
3344
3344
3345 if not _validname(name):
3345 if not _validname(name):
3346 raise util.Abort(
3346 raise util.Abort(
3347 _('invalid queue name, may not contain the characters ":\\/."'))
3347 _('invalid queue name, may not contain the characters ":\\/."'))
3348
3348
3349 existing = _getqueues()
3349 existing = _getqueues()
3350
3350
3351 if opts.get('create'):
3351 if opts.get('create'):
3352 if name in existing:
3352 if name in existing:
3353 raise util.Abort(_('queue "%s" already exists') % name)
3353 raise util.Abort(_('queue "%s" already exists') % name)
3354 if _noqueues():
3354 if _noqueues():
3355 _addqueue(_defaultqueue)
3355 _addqueue(_defaultqueue)
3356 _addqueue(name)
3356 _addqueue(name)
3357 _setactive(name)
3357 _setactive(name)
3358 elif opts.get('rename'):
3358 elif opts.get('rename'):
3359 current = _getcurrent()
3359 current = _getcurrent()
3360 if name == current:
3360 if name == current:
3361 raise util.Abort(_('can\'t rename "%s" to its current name') % name)
3361 raise util.Abort(_('can\'t rename "%s" to its current name') % name)
3362 if name in existing:
3362 if name in existing:
3363 raise util.Abort(_('queue "%s" already exists') % name)
3363 raise util.Abort(_('queue "%s" already exists') % name)
3364
3364
3365 olddir = _queuedir(current)
3365 olddir = _queuedir(current)
3366 newdir = _queuedir(name)
3366 newdir = _queuedir(name)
3367
3367
3368 if os.path.exists(newdir):
3368 if os.path.exists(newdir):
3369 raise util.Abort(_('non-queue directory "%s" already exists') %
3369 raise util.Abort(_('non-queue directory "%s" already exists') %
3370 newdir)
3370 newdir)
3371
3371
3372 fh = repo.opener('patches.queues.new', 'w')
3372 fh = repo.opener('patches.queues.new', 'w')
3373 for queue in existing:
3373 for queue in existing:
3374 if queue == current:
3374 if queue == current:
3375 fh.write('%s\n' % (name,))
3375 fh.write('%s\n' % (name,))
3376 if os.path.exists(olddir):
3376 if os.path.exists(olddir):
3377 util.rename(olddir, newdir)
3377 util.rename(olddir, newdir)
3378 else:
3378 else:
3379 fh.write('%s\n' % (queue,))
3379 fh.write('%s\n' % (queue,))
3380 fh.close()
3380 fh.close()
3381 util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
3381 util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
3382 _setactivenocheck(name)
3382 _setactivenocheck(name)
3383 elif opts.get('delete'):
3383 elif opts.get('delete'):
3384 _delete(name)
3384 _delete(name)
3385 elif opts.get('purge'):
3385 elif opts.get('purge'):
3386 if name in existing:
3386 if name in existing:
3387 _delete(name)
3387 _delete(name)
3388 qdir = _queuedir(name)
3388 qdir = _queuedir(name)
3389 if os.path.exists(qdir):
3389 if os.path.exists(qdir):
3390 shutil.rmtree(qdir)
3390 shutil.rmtree(qdir)
3391 else:
3391 else:
3392 if name not in existing:
3392 if name not in existing:
3393 raise util.Abort(_('use --create to create a new queue'))
3393 raise util.Abort(_('use --create to create a new queue'))
3394 _setactive(name)
3394 _setactive(name)
3395
3395
3396 def mqphasedefaults(repo, roots):
3396 def mqphasedefaults(repo, roots):
3397 """callback used to set mq changeset as secret when no phase data exists"""
3397 """callback used to set mq changeset as secret when no phase data exists"""
3398 if repo.mq.applied:
3398 if repo.mq.applied:
3399 if repo.ui.configbool('mq', 'secret', False):
3399 if repo.ui.configbool('mq', 'secret', False):
3400 mqphase = phases.secret
3400 mqphase = phases.secret
3401 else:
3401 else:
3402 mqphase = phases.draft
3402 mqphase = phases.draft
3403 qbase = repo[repo.mq.applied[0].node]
3403 qbase = repo[repo.mq.applied[0].node]
3404 roots[mqphase].add(qbase.node())
3404 roots[mqphase].add(qbase.node())
3405 return roots
3405 return roots
3406
3406
3407 def reposetup(ui, repo):
3407 def reposetup(ui, repo):
3408 class mqrepo(repo.__class__):
3408 class mqrepo(repo.__class__):
3409 @util.propertycache
3409 @util.propertycache
3410 def mq(self):
3410 def mq(self):
3411 return queue(self.ui, self.path)
3411 return queue(self.ui, self.path)
3412
3412
3413 def abortifwdirpatched(self, errmsg, force=False):
3413 def abortifwdirpatched(self, errmsg, force=False):
3414 if self.mq.applied and not force:
3414 if self.mq.applied and not force:
3415 parents = self.dirstate.parents()
3415 parents = self.dirstate.parents()
3416 patches = [s.node for s in self.mq.applied]
3416 patches = [s.node for s in self.mq.applied]
3417 if parents[0] in patches or parents[1] in patches:
3417 if parents[0] in patches or parents[1] in patches:
3418 raise util.Abort(errmsg)
3418 raise util.Abort(errmsg)
3419
3419
3420 def commit(self, text="", user=None, date=None, match=None,
3420 def commit(self, text="", user=None, date=None, match=None,
3421 force=False, editor=False, extra={}):
3421 force=False, editor=False, extra={}):
3422 self.abortifwdirpatched(
3422 self.abortifwdirpatched(
3423 _('cannot commit over an applied mq patch'),
3423 _('cannot commit over an applied mq patch'),
3424 force)
3424 force)
3425
3425
3426 return super(mqrepo, self).commit(text, user, date, match, force,
3426 return super(mqrepo, self).commit(text, user, date, match, force,
3427 editor, extra)
3427 editor, extra)
3428
3428
3429 def checkpush(self, force, revs):
3429 def checkpush(self, force, revs):
3430 if self.mq.applied and not force:
3430 if self.mq.applied and not force:
3431 outapplied = [e.node for e in self.mq.applied]
3431 outapplied = [e.node for e in self.mq.applied]
3432 if revs:
3432 if revs:
3433 # Assume applied patches have no non-patch descendants and
3433 # Assume applied patches have no non-patch descendants and
3434 # are not on remote already. Filtering any changeset not
3434 # are not on remote already. Filtering any changeset not
3435 # pushed.
3435 # pushed.
3436 heads = set(revs)
3436 heads = set(revs)
3437 for node in reversed(outapplied):
3437 for node in reversed(outapplied):
3438 if node in heads:
3438 if node in heads:
3439 break
3439 break
3440 else:
3440 else:
3441 outapplied.pop()
3441 outapplied.pop()
3442 # looking for pushed and shared changeset
3442 # looking for pushed and shared changeset
3443 for node in outapplied:
3443 for node in outapplied:
3444 if self[node].phase() < phases.secret:
3444 if self[node].phase() < phases.secret:
3445 raise util.Abort(_('source has mq patches applied'))
3445 raise util.Abort(_('source has mq patches applied'))
3446 # no non-secret patches pushed
3446 # no non-secret patches pushed
3447 super(mqrepo, self).checkpush(force, revs)
3447 super(mqrepo, self).checkpush(force, revs)
3448
3448
3449 def _findtags(self):
3449 def _findtags(self):
3450 '''augment tags from base class with patch tags'''
3450 '''augment tags from base class with patch tags'''
3451 result = super(mqrepo, self)._findtags()
3451 result = super(mqrepo, self)._findtags()
3452
3452
3453 q = self.mq
3453 q = self.mq
3454 if not q.applied:
3454 if not q.applied:
3455 return result
3455 return result
3456
3456
3457 mqtags = [(patch.node, patch.name) for patch in q.applied]
3457 mqtags = [(patch.node, patch.name) for patch in q.applied]
3458
3458
3459 try:
3459 try:
3460 # for now ignore filtering business
3460 # for now ignore filtering business
3461 self.unfiltered().changelog.rev(mqtags[-1][0])
3461 self.unfiltered().changelog.rev(mqtags[-1][0])
3462 except error.LookupError:
3462 except error.LookupError:
3463 self.ui.warn(_('mq status file refers to unknown node %s\n')
3463 self.ui.warn(_('mq status file refers to unknown node %s\n')
3464 % short(mqtags[-1][0]))
3464 % short(mqtags[-1][0]))
3465 return result
3465 return result
3466
3466
3467 mqtags.append((mqtags[-1][0], 'qtip'))
3467 mqtags.append((mqtags[-1][0], 'qtip'))
3468 mqtags.append((mqtags[0][0], 'qbase'))
3468 mqtags.append((mqtags[0][0], 'qbase'))
3469 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
3469 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
3470 tags = result[0]
3470 tags = result[0]
3471 for patch in mqtags:
3471 for patch in mqtags:
3472 if patch[1] in tags:
3472 if patch[1] in tags:
3473 self.ui.warn(_('tag %s overrides mq patch of the same '
3473 self.ui.warn(_('tag %s overrides mq patch of the same '
3474 'name\n') % patch[1])
3474 'name\n') % patch[1])
3475 else:
3475 else:
3476 tags[patch[1]] = patch[0]
3476 tags[patch[1]] = patch[0]
3477
3477
3478 return result
3478 return result
3479
3479
3480 def _branchtags(self, partial, lrev):
3480 def _cacheabletip(self):
3481 q = self.mq
3481 q = self.mq
3482 cl = self.changelog
3482 cl = self.changelog
3483 qbase = None
3483 qbase = None
3484 if not q.applied:
3484 if not q.applied:
3485 if getattr(self, '_committingpatch', False):
3485 if getattr(self, '_committingpatch', False):
3486 # Committing a new patch, must be tip
3486 # Committing a new patch, must be tip
3487 qbase = len(cl) - 1
3487 qbase = len(cl) - 1
3488 else:
3488 else:
3489 qbasenode = q.applied[0].node
3489 qbasenode = q.applied[0].node
3490 try:
3490 try:
3491 qbase = self.unfiltered().changelog.rev(qbasenode)
3491 qbase = self.unfiltered().changelog.rev(qbasenode)
3492 except error.LookupError:
3492 except error.LookupError:
3493 self.ui.warn(_('mq status file refers to unknown node %s\n')
3493 self.ui.warn(_('mq status file refers to unknown node %s\n')
3494 % short(qbasenode))
3494 % short(qbasenode))
3495 if qbase is None:
3495 ret = super(mqrepo, self)._cacheabletip()
3496 return super(mqrepo, self)._branchtags(partial, lrev)
3496 if qbase is not None:
3497
3497 ret = min(qbase - 1, ret)
3498 start = lrev + 1
3498 return ret
3499 if start < qbase:
3500 # update the cache (excluding the patches) and save it
3501 ctxgen = (self[r] for r in xrange(lrev + 1, qbase))
3502 self._updatebranchcache(partial, ctxgen)
3503 self._writebranchcache(partial, cl.node(qbase - 1), qbase - 1)
3504 start = qbase
3505 # if start = qbase, the cache is as updated as it should be.
3506 # if start > qbase, the cache includes (part of) the patches.
3507 # we might as well use it, but we won't save it.
3508
3509 # update the cache up to the tip
3510 ctxgen = (self[r] for r in xrange(start, len(cl)))
3511 self._updatebranchcache(partial, ctxgen)
3512
3513 return partial
3514
3499
3515 if repo.local():
3500 if repo.local():
3516 repo.__class__ = mqrepo
3501 repo.__class__ = mqrepo
3517
3502
3518 repo._phasedefaults.append(mqphasedefaults)
3503 repo._phasedefaults.append(mqphasedefaults)
3519
3504
3520 def mqimport(orig, ui, repo, *args, **kwargs):
3505 def mqimport(orig, ui, repo, *args, **kwargs):
3521 if (util.safehasattr(repo, 'abortifwdirpatched')
3506 if (util.safehasattr(repo, 'abortifwdirpatched')
3522 and not kwargs.get('no_commit', False)):
3507 and not kwargs.get('no_commit', False)):
3523 repo.abortifwdirpatched(_('cannot import over an applied patch'),
3508 repo.abortifwdirpatched(_('cannot import over an applied patch'),
3524 kwargs.get('force'))
3509 kwargs.get('force'))
3525 return orig(ui, repo, *args, **kwargs)
3510 return orig(ui, repo, *args, **kwargs)
3526
3511
3527 def mqinit(orig, ui, *args, **kwargs):
3512 def mqinit(orig, ui, *args, **kwargs):
3528 mq = kwargs.pop('mq', None)
3513 mq = kwargs.pop('mq', None)
3529
3514
3530 if not mq:
3515 if not mq:
3531 return orig(ui, *args, **kwargs)
3516 return orig(ui, *args, **kwargs)
3532
3517
3533 if args:
3518 if args:
3534 repopath = args[0]
3519 repopath = args[0]
3535 if not hg.islocal(repopath):
3520 if not hg.islocal(repopath):
3536 raise util.Abort(_('only a local queue repository '
3521 raise util.Abort(_('only a local queue repository '
3537 'may be initialized'))
3522 'may be initialized'))
3538 else:
3523 else:
3539 repopath = cmdutil.findrepo(os.getcwd())
3524 repopath = cmdutil.findrepo(os.getcwd())
3540 if not repopath:
3525 if not repopath:
3541 raise util.Abort(_('there is no Mercurial repository here '
3526 raise util.Abort(_('there is no Mercurial repository here '
3542 '(.hg not found)'))
3527 '(.hg not found)'))
3543 repo = hg.repository(ui, repopath)
3528 repo = hg.repository(ui, repopath)
3544 return qinit(ui, repo, True)
3529 return qinit(ui, repo, True)
3545
3530
3546 def mqcommand(orig, ui, repo, *args, **kwargs):
3531 def mqcommand(orig, ui, repo, *args, **kwargs):
3547 """Add --mq option to operate on patch repository instead of main"""
3532 """Add --mq option to operate on patch repository instead of main"""
3548
3533
3549 # some commands do not like getting unknown options
3534 # some commands do not like getting unknown options
3550 mq = kwargs.pop('mq', None)
3535 mq = kwargs.pop('mq', None)
3551
3536
3552 if not mq:
3537 if not mq:
3553 return orig(ui, repo, *args, **kwargs)
3538 return orig(ui, repo, *args, **kwargs)
3554
3539
3555 q = repo.mq
3540 q = repo.mq
3556 r = q.qrepo()
3541 r = q.qrepo()
3557 if not r:
3542 if not r:
3558 raise util.Abort(_('no queue repository'))
3543 raise util.Abort(_('no queue repository'))
3559 return orig(r.ui, r, *args, **kwargs)
3544 return orig(r.ui, r, *args, **kwargs)
3560
3545
3561 def summary(orig, ui, repo, *args, **kwargs):
3546 def summary(orig, ui, repo, *args, **kwargs):
3562 r = orig(ui, repo, *args, **kwargs)
3547 r = orig(ui, repo, *args, **kwargs)
3563 q = repo.mq
3548 q = repo.mq
3564 m = []
3549 m = []
3565 a, u = len(q.applied), len(q.unapplied(repo))
3550 a, u = len(q.applied), len(q.unapplied(repo))
3566 if a:
3551 if a:
3567 m.append(ui.label(_("%d applied"), 'qseries.applied') % a)
3552 m.append(ui.label(_("%d applied"), 'qseries.applied') % a)
3568 if u:
3553 if u:
3569 m.append(ui.label(_("%d unapplied"), 'qseries.unapplied') % u)
3554 m.append(ui.label(_("%d unapplied"), 'qseries.unapplied') % u)
3570 if m:
3555 if m:
3571 # i18n: column positioning for "hg summary"
3556 # i18n: column positioning for "hg summary"
3572 ui.write(_("mq: %s\n") % ', '.join(m))
3557 ui.write(_("mq: %s\n") % ', '.join(m))
3573 else:
3558 else:
3574 # i18n: column positioning for "hg summary"
3559 # i18n: column positioning for "hg summary"
3575 ui.note(_("mq: (empty queue)\n"))
3560 ui.note(_("mq: (empty queue)\n"))
3576 return r
3561 return r
3577
3562
3578 def revsetmq(repo, subset, x):
3563 def revsetmq(repo, subset, x):
3579 """``mq()``
3564 """``mq()``
3580 Changesets managed by MQ.
3565 Changesets managed by MQ.
3581 """
3566 """
3582 revset.getargs(x, 0, 0, _("mq takes no arguments"))
3567 revset.getargs(x, 0, 0, _("mq takes no arguments"))
3583 applied = set([repo[r.node].rev() for r in repo.mq.applied])
3568 applied = set([repo[r.node].rev() for r in repo.mq.applied])
3584 return [r for r in subset if r in applied]
3569 return [r for r in subset if r in applied]
3585
3570
3586 # tell hggettext to extract docstrings from these functions:
3571 # tell hggettext to extract docstrings from these functions:
3587 i18nfunctions = [revsetmq]
3572 i18nfunctions = [revsetmq]
3588
3573
3589 def extsetup(ui):
3574 def extsetup(ui):
3590 # Ensure mq wrappers are called first, regardless of extension load order by
3575 # Ensure mq wrappers are called first, regardless of extension load order by
3591 # NOT wrapping in uisetup() and instead deferring to init stage two here.
3576 # NOT wrapping in uisetup() and instead deferring to init stage two here.
3592 mqopt = [('', 'mq', None, _("operate on patch repository"))]
3577 mqopt = [('', 'mq', None, _("operate on patch repository"))]
3593
3578
3594 extensions.wrapcommand(commands.table, 'import', mqimport)
3579 extensions.wrapcommand(commands.table, 'import', mqimport)
3595 extensions.wrapcommand(commands.table, 'summary', summary)
3580 extensions.wrapcommand(commands.table, 'summary', summary)
3596
3581
3597 entry = extensions.wrapcommand(commands.table, 'init', mqinit)
3582 entry = extensions.wrapcommand(commands.table, 'init', mqinit)
3598 entry[1].extend(mqopt)
3583 entry[1].extend(mqopt)
3599
3584
3600 nowrap = set(commands.norepo.split(" "))
3585 nowrap = set(commands.norepo.split(" "))
3601
3586
3602 def dotable(cmdtable):
3587 def dotable(cmdtable):
3603 for cmd in cmdtable.keys():
3588 for cmd in cmdtable.keys():
3604 cmd = cmdutil.parsealiases(cmd)[0]
3589 cmd = cmdutil.parsealiases(cmd)[0]
3605 if cmd in nowrap:
3590 if cmd in nowrap:
3606 continue
3591 continue
3607 entry = extensions.wrapcommand(cmdtable, cmd, mqcommand)
3592 entry = extensions.wrapcommand(cmdtable, cmd, mqcommand)
3608 entry[1].extend(mqopt)
3593 entry[1].extend(mqopt)
3609
3594
3610 dotable(commands.table)
3595 dotable(commands.table)
3611
3596
3612 for extname, extmodule in extensions.extensions():
3597 for extname, extmodule in extensions.extensions():
3613 if extmodule.__file__ != __file__:
3598 if extmodule.__file__ != __file__:
3614 dotable(getattr(extmodule, 'cmdtable', {}))
3599 dotable(getattr(extmodule, 'cmdtable', {}))
3615
3600
3616 revset.symbols['mq'] = revsetmq
3601 revset.symbols['mq'] = revsetmq
3617
3602
3618 colortable = {'qguard.negative': 'red',
3603 colortable = {'qguard.negative': 'red',
3619 'qguard.positive': 'yellow',
3604 'qguard.positive': 'yellow',
3620 'qguard.unguarded': 'green',
3605 'qguard.unguarded': 'green',
3621 'qseries.applied': 'blue bold underline',
3606 'qseries.applied': 'blue bold underline',
3622 'qseries.guarded': 'black bold',
3607 'qseries.guarded': 'black bold',
3623 'qseries.missing': 'red bold',
3608 'qseries.missing': 'red bold',
3624 'qseries.unapplied': 'black bold'}
3609 'qseries.unapplied': 'black bold'}
3625
3610
3626 commands.inferrepo += " qnew qrefresh qdiff qcommit"
3611 commands.inferrepo += " qnew qrefresh qdiff qcommit"
@@ -1,2707 +1,2730 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import bin, hex, nullid, nullrev, short
7 from node import bin, hex, nullid, nullrev, short
8 from i18n import _
8 from i18n import _
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock, transaction, store, encoding, base85
11 import lock, transaction, store, encoding, base85
12 import scmutil, util, extensions, hook, error, revset
12 import scmutil, util, extensions, hook, error, revset
13 import match as matchmod
13 import match as matchmod
14 import merge as mergemod
14 import merge as mergemod
15 import tags as tagsmod
15 import tags as tagsmod
16 from lock import release
16 from lock import release
17 import weakref, errno, os, time, inspect
17 import weakref, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19 filecache = scmutil.filecache
19 filecache = scmutil.filecache
20
20
21 class repofilecache(filecache):
21 class repofilecache(filecache):
22 """All filecache usage on repo are done for logic that should be unfiltered
22 """All filecache usage on repo are done for logic that should be unfiltered
23 """
23 """
24
24
25 def __get__(self, repo, type=None):
25 def __get__(self, repo, type=None):
26 return super(repofilecache, self).__get__(repo.unfiltered(), type)
26 return super(repofilecache, self).__get__(repo.unfiltered(), type)
27 def __set__(self, repo, value):
27 def __set__(self, repo, value):
28 return super(repofilecache, self).__set__(repo.unfiltered(), value)
28 return super(repofilecache, self).__set__(repo.unfiltered(), value)
29 def __delete__(self, repo):
29 def __delete__(self, repo):
30 return super(repofilecache, self).__delete__(repo.unfiltered())
30 return super(repofilecache, self).__delete__(repo.unfiltered())
31
31
32 class storecache(repofilecache):
32 class storecache(repofilecache):
33 """filecache for files in the store"""
33 """filecache for files in the store"""
34 def join(self, obj, fname):
34 def join(self, obj, fname):
35 return obj.sjoin(fname)
35 return obj.sjoin(fname)
36
36
37 class unfilteredpropertycache(propertycache):
37 class unfilteredpropertycache(propertycache):
38 """propertycache that apply to unfiltered repo only"""
38 """propertycache that apply to unfiltered repo only"""
39
39
40 def __get__(self, repo, type=None):
40 def __get__(self, repo, type=None):
41 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
41 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
42
42
43 class filteredpropertycache(propertycache):
43 class filteredpropertycache(propertycache):
44 """propertycache that must take filtering in account"""
44 """propertycache that must take filtering in account"""
45
45
46 def cachevalue(self, obj, value):
46 def cachevalue(self, obj, value):
47 object.__setattr__(obj, self.name, value)
47 object.__setattr__(obj, self.name, value)
48
48
49
49
50 def hasunfilteredcache(repo, name):
50 def hasunfilteredcache(repo, name):
51 """check if an repo and a unfilteredproperty cached value for <name>"""
51 """check if an repo and a unfilteredproperty cached value for <name>"""
52 return name in vars(repo.unfiltered())
52 return name in vars(repo.unfiltered())
53
53
54 def unfilteredmethod(orig):
54 def unfilteredmethod(orig):
55 """decorate method that always need to be run on unfiltered version"""
55 """decorate method that always need to be run on unfiltered version"""
56 def wrapper(repo, *args, **kwargs):
56 def wrapper(repo, *args, **kwargs):
57 return orig(repo.unfiltered(), *args, **kwargs)
57 return orig(repo.unfiltered(), *args, **kwargs)
58 return wrapper
58 return wrapper
59
59
60 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
60 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
61 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
61 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
62
62
63 class localpeer(peer.peerrepository):
63 class localpeer(peer.peerrepository):
64 '''peer for a local repo; reflects only the most recent API'''
64 '''peer for a local repo; reflects only the most recent API'''
65
65
66 def __init__(self, repo, caps=MODERNCAPS):
66 def __init__(self, repo, caps=MODERNCAPS):
67 peer.peerrepository.__init__(self)
67 peer.peerrepository.__init__(self)
68 self._repo = repo
68 self._repo = repo
69 self.ui = repo.ui
69 self.ui = repo.ui
70 self._caps = repo._restrictcapabilities(caps)
70 self._caps = repo._restrictcapabilities(caps)
71 self.requirements = repo.requirements
71 self.requirements = repo.requirements
72 self.supportedformats = repo.supportedformats
72 self.supportedformats = repo.supportedformats
73
73
74 def close(self):
74 def close(self):
75 self._repo.close()
75 self._repo.close()
76
76
77 def _capabilities(self):
77 def _capabilities(self):
78 return self._caps
78 return self._caps
79
79
80 def local(self):
80 def local(self):
81 return self._repo
81 return self._repo
82
82
83 def canpush(self):
83 def canpush(self):
84 return True
84 return True
85
85
86 def url(self):
86 def url(self):
87 return self._repo.url()
87 return self._repo.url()
88
88
89 def lookup(self, key):
89 def lookup(self, key):
90 return self._repo.lookup(key)
90 return self._repo.lookup(key)
91
91
92 def branchmap(self):
92 def branchmap(self):
93 return discovery.visiblebranchmap(self._repo)
93 return discovery.visiblebranchmap(self._repo)
94
94
95 def heads(self):
95 def heads(self):
96 return discovery.visibleheads(self._repo)
96 return discovery.visibleheads(self._repo)
97
97
98 def known(self, nodes):
98 def known(self, nodes):
99 return self._repo.known(nodes)
99 return self._repo.known(nodes)
100
100
101 def getbundle(self, source, heads=None, common=None):
101 def getbundle(self, source, heads=None, common=None):
102 return self._repo.getbundle(source, heads=heads, common=common)
102 return self._repo.getbundle(source, heads=heads, common=common)
103
103
104 # TODO We might want to move the next two calls into legacypeer and add
104 # TODO We might want to move the next two calls into legacypeer and add
105 # unbundle instead.
105 # unbundle instead.
106
106
107 def lock(self):
107 def lock(self):
108 return self._repo.lock()
108 return self._repo.lock()
109
109
110 def addchangegroup(self, cg, source, url):
110 def addchangegroup(self, cg, source, url):
111 return self._repo.addchangegroup(cg, source, url)
111 return self._repo.addchangegroup(cg, source, url)
112
112
113 def pushkey(self, namespace, key, old, new):
113 def pushkey(self, namespace, key, old, new):
114 return self._repo.pushkey(namespace, key, old, new)
114 return self._repo.pushkey(namespace, key, old, new)
115
115
116 def listkeys(self, namespace):
116 def listkeys(self, namespace):
117 return self._repo.listkeys(namespace)
117 return self._repo.listkeys(namespace)
118
118
119 def debugwireargs(self, one, two, three=None, four=None, five=None):
119 def debugwireargs(self, one, two, three=None, four=None, five=None):
120 '''used to test argument passing over the wire'''
120 '''used to test argument passing over the wire'''
121 return "%s %s %s %s %s" % (one, two, three, four, five)
121 return "%s %s %s %s %s" % (one, two, three, four, five)
122
122
123 class locallegacypeer(localpeer):
123 class locallegacypeer(localpeer):
124 '''peer extension which implements legacy methods too; used for tests with
124 '''peer extension which implements legacy methods too; used for tests with
125 restricted capabilities'''
125 restricted capabilities'''
126
126
127 def __init__(self, repo):
127 def __init__(self, repo):
128 localpeer.__init__(self, repo, caps=LEGACYCAPS)
128 localpeer.__init__(self, repo, caps=LEGACYCAPS)
129
129
130 def branches(self, nodes):
130 def branches(self, nodes):
131 return self._repo.branches(nodes)
131 return self._repo.branches(nodes)
132
132
133 def between(self, pairs):
133 def between(self, pairs):
134 return self._repo.between(pairs)
134 return self._repo.between(pairs)
135
135
136 def changegroup(self, basenodes, source):
136 def changegroup(self, basenodes, source):
137 return self._repo.changegroup(basenodes, source)
137 return self._repo.changegroup(basenodes, source)
138
138
139 def changegroupsubset(self, bases, heads, source):
139 def changegroupsubset(self, bases, heads, source):
140 return self._repo.changegroupsubset(bases, heads, source)
140 return self._repo.changegroupsubset(bases, heads, source)
141
141
142 class localrepository(object):
142 class localrepository(object):
143
143
144 supportedformats = set(('revlogv1', 'generaldelta'))
144 supportedformats = set(('revlogv1', 'generaldelta'))
145 supported = supportedformats | set(('store', 'fncache', 'shared',
145 supported = supportedformats | set(('store', 'fncache', 'shared',
146 'dotencode'))
146 'dotencode'))
147 openerreqs = set(('revlogv1', 'generaldelta'))
147 openerreqs = set(('revlogv1', 'generaldelta'))
148 requirements = ['revlogv1']
148 requirements = ['revlogv1']
149
149
150 def _baserequirements(self, create):
150 def _baserequirements(self, create):
151 return self.requirements[:]
151 return self.requirements[:]
152
152
153 def __init__(self, baseui, path=None, create=False):
153 def __init__(self, baseui, path=None, create=False):
154 self.wvfs = scmutil.vfs(path, expand=True)
154 self.wvfs = scmutil.vfs(path, expand=True)
155 self.wopener = self.wvfs
155 self.wopener = self.wvfs
156 self.root = self.wvfs.base
156 self.root = self.wvfs.base
157 self.path = self.wvfs.join(".hg")
157 self.path = self.wvfs.join(".hg")
158 self.origroot = path
158 self.origroot = path
159 self.auditor = scmutil.pathauditor(self.root, self._checknested)
159 self.auditor = scmutil.pathauditor(self.root, self._checknested)
160 self.vfs = scmutil.vfs(self.path)
160 self.vfs = scmutil.vfs(self.path)
161 self.opener = self.vfs
161 self.opener = self.vfs
162 self.baseui = baseui
162 self.baseui = baseui
163 self.ui = baseui.copy()
163 self.ui = baseui.copy()
164 # A list of callback to shape the phase if no data were found.
164 # A list of callback to shape the phase if no data were found.
165 # Callback are in the form: func(repo, roots) --> processed root.
165 # Callback are in the form: func(repo, roots) --> processed root.
166 # This list it to be filled by extension during repo setup
166 # This list it to be filled by extension during repo setup
167 self._phasedefaults = []
167 self._phasedefaults = []
168 try:
168 try:
169 self.ui.readconfig(self.join("hgrc"), self.root)
169 self.ui.readconfig(self.join("hgrc"), self.root)
170 extensions.loadall(self.ui)
170 extensions.loadall(self.ui)
171 except IOError:
171 except IOError:
172 pass
172 pass
173
173
174 if not self.vfs.isdir():
174 if not self.vfs.isdir():
175 if create:
175 if create:
176 if not self.wvfs.exists():
176 if not self.wvfs.exists():
177 self.wvfs.makedirs()
177 self.wvfs.makedirs()
178 self.vfs.makedir(notindexed=True)
178 self.vfs.makedir(notindexed=True)
179 requirements = self._baserequirements(create)
179 requirements = self._baserequirements(create)
180 if self.ui.configbool('format', 'usestore', True):
180 if self.ui.configbool('format', 'usestore', True):
181 self.vfs.mkdir("store")
181 self.vfs.mkdir("store")
182 requirements.append("store")
182 requirements.append("store")
183 if self.ui.configbool('format', 'usefncache', True):
183 if self.ui.configbool('format', 'usefncache', True):
184 requirements.append("fncache")
184 requirements.append("fncache")
185 if self.ui.configbool('format', 'dotencode', True):
185 if self.ui.configbool('format', 'dotencode', True):
186 requirements.append('dotencode')
186 requirements.append('dotencode')
187 # create an invalid changelog
187 # create an invalid changelog
188 self.vfs.append(
188 self.vfs.append(
189 "00changelog.i",
189 "00changelog.i",
190 '\0\0\0\2' # represents revlogv2
190 '\0\0\0\2' # represents revlogv2
191 ' dummy changelog to prevent using the old repo layout'
191 ' dummy changelog to prevent using the old repo layout'
192 )
192 )
193 if self.ui.configbool('format', 'generaldelta', False):
193 if self.ui.configbool('format', 'generaldelta', False):
194 requirements.append("generaldelta")
194 requirements.append("generaldelta")
195 requirements = set(requirements)
195 requirements = set(requirements)
196 else:
196 else:
197 raise error.RepoError(_("repository %s not found") % path)
197 raise error.RepoError(_("repository %s not found") % path)
198 elif create:
198 elif create:
199 raise error.RepoError(_("repository %s already exists") % path)
199 raise error.RepoError(_("repository %s already exists") % path)
200 else:
200 else:
201 try:
201 try:
202 requirements = scmutil.readrequires(self.vfs, self.supported)
202 requirements = scmutil.readrequires(self.vfs, self.supported)
203 except IOError, inst:
203 except IOError, inst:
204 if inst.errno != errno.ENOENT:
204 if inst.errno != errno.ENOENT:
205 raise
205 raise
206 requirements = set()
206 requirements = set()
207
207
208 self.sharedpath = self.path
208 self.sharedpath = self.path
209 try:
209 try:
210 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
210 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
211 if not os.path.exists(s):
211 if not os.path.exists(s):
212 raise error.RepoError(
212 raise error.RepoError(
213 _('.hg/sharedpath points to nonexistent directory %s') % s)
213 _('.hg/sharedpath points to nonexistent directory %s') % s)
214 self.sharedpath = s
214 self.sharedpath = s
215 except IOError, inst:
215 except IOError, inst:
216 if inst.errno != errno.ENOENT:
216 if inst.errno != errno.ENOENT:
217 raise
217 raise
218
218
219 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
219 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
220 self.spath = self.store.path
220 self.spath = self.store.path
221 self.svfs = self.store.vfs
221 self.svfs = self.store.vfs
222 self.sopener = self.svfs
222 self.sopener = self.svfs
223 self.sjoin = self.store.join
223 self.sjoin = self.store.join
224 self.vfs.createmode = self.store.createmode
224 self.vfs.createmode = self.store.createmode
225 self._applyrequirements(requirements)
225 self._applyrequirements(requirements)
226 if create:
226 if create:
227 self._writerequirements()
227 self._writerequirements()
228
228
229
229
230 self._branchcache = None
230 self._branchcache = None
231 self._branchcachetip = None
231 self._branchcachetip = None
232 self.filterpats = {}
232 self.filterpats = {}
233 self._datafilters = {}
233 self._datafilters = {}
234 self._transref = self._lockref = self._wlockref = None
234 self._transref = self._lockref = self._wlockref = None
235
235
236 # A cache for various files under .hg/ that tracks file changes,
236 # A cache for various files under .hg/ that tracks file changes,
237 # (used by the filecache decorator)
237 # (used by the filecache decorator)
238 #
238 #
239 # Maps a property name to its util.filecacheentry
239 # Maps a property name to its util.filecacheentry
240 self._filecache = {}
240 self._filecache = {}
241
241
242 # hold sets of revision to be filtered
242 # hold sets of revision to be filtered
243 # should be cleared when something might have changed the filter value:
243 # should be cleared when something might have changed the filter value:
244 # - new changesets,
244 # - new changesets,
245 # - phase change,
245 # - phase change,
246 # - new obsolescence marker,
246 # - new obsolescence marker,
247 # - working directory parent change,
247 # - working directory parent change,
248 # - bookmark changes
248 # - bookmark changes
249 self.filteredrevcache = {}
249 self.filteredrevcache = {}
250
250
251 def close(self):
251 def close(self):
252 pass
252 pass
253
253
254 def _restrictcapabilities(self, caps):
254 def _restrictcapabilities(self, caps):
255 return caps
255 return caps
256
256
257 def _applyrequirements(self, requirements):
257 def _applyrequirements(self, requirements):
258 self.requirements = requirements
258 self.requirements = requirements
259 self.sopener.options = dict((r, 1) for r in requirements
259 self.sopener.options = dict((r, 1) for r in requirements
260 if r in self.openerreqs)
260 if r in self.openerreqs)
261
261
262 def _writerequirements(self):
262 def _writerequirements(self):
263 reqfile = self.opener("requires", "w")
263 reqfile = self.opener("requires", "w")
264 for r in self.requirements:
264 for r in self.requirements:
265 reqfile.write("%s\n" % r)
265 reqfile.write("%s\n" % r)
266 reqfile.close()
266 reqfile.close()
267
267
268 def _checknested(self, path):
268 def _checknested(self, path):
269 """Determine if path is a legal nested repository."""
269 """Determine if path is a legal nested repository."""
270 if not path.startswith(self.root):
270 if not path.startswith(self.root):
271 return False
271 return False
272 subpath = path[len(self.root) + 1:]
272 subpath = path[len(self.root) + 1:]
273 normsubpath = util.pconvert(subpath)
273 normsubpath = util.pconvert(subpath)
274
274
275 # XXX: Checking against the current working copy is wrong in
275 # XXX: Checking against the current working copy is wrong in
276 # the sense that it can reject things like
276 # the sense that it can reject things like
277 #
277 #
278 # $ hg cat -r 10 sub/x.txt
278 # $ hg cat -r 10 sub/x.txt
279 #
279 #
280 # if sub/ is no longer a subrepository in the working copy
280 # if sub/ is no longer a subrepository in the working copy
281 # parent revision.
281 # parent revision.
282 #
282 #
283 # However, it can of course also allow things that would have
283 # However, it can of course also allow things that would have
284 # been rejected before, such as the above cat command if sub/
284 # been rejected before, such as the above cat command if sub/
285 # is a subrepository now, but was a normal directory before.
285 # is a subrepository now, but was a normal directory before.
286 # The old path auditor would have rejected by mistake since it
286 # The old path auditor would have rejected by mistake since it
287 # panics when it sees sub/.hg/.
287 # panics when it sees sub/.hg/.
288 #
288 #
289 # All in all, checking against the working copy seems sensible
289 # All in all, checking against the working copy seems sensible
290 # since we want to prevent access to nested repositories on
290 # since we want to prevent access to nested repositories on
291 # the filesystem *now*.
291 # the filesystem *now*.
292 ctx = self[None]
292 ctx = self[None]
293 parts = util.splitpath(subpath)
293 parts = util.splitpath(subpath)
294 while parts:
294 while parts:
295 prefix = '/'.join(parts)
295 prefix = '/'.join(parts)
296 if prefix in ctx.substate:
296 if prefix in ctx.substate:
297 if prefix == normsubpath:
297 if prefix == normsubpath:
298 return True
298 return True
299 else:
299 else:
300 sub = ctx.sub(prefix)
300 sub = ctx.sub(prefix)
301 return sub.checknested(subpath[len(prefix) + 1:])
301 return sub.checknested(subpath[len(prefix) + 1:])
302 else:
302 else:
303 parts.pop()
303 parts.pop()
304 return False
304 return False
305
305
306 def peer(self):
306 def peer(self):
307 return localpeer(self) # not cached to avoid reference cycle
307 return localpeer(self) # not cached to avoid reference cycle
308
308
309 def unfiltered(self):
309 def unfiltered(self):
310 """Return unfiltered version of the repository
310 """Return unfiltered version of the repository
311
311
312 Intended to be ovewritten by filtered repo."""
312 Intended to be ovewritten by filtered repo."""
313 return self
313 return self
314
314
315 def filtered(self, name):
315 def filtered(self, name):
316 """Return a filtered version of a repository"""
316 """Return a filtered version of a repository"""
317 # build a new class with the mixin and the current class
317 # build a new class with the mixin and the current class
318 # (possibily subclass of the repo)
318 # (possibily subclass of the repo)
319 class proxycls(repoview.repoview, self.unfiltered().__class__):
319 class proxycls(repoview.repoview, self.unfiltered().__class__):
320 pass
320 pass
321 return proxycls(self, name)
321 return proxycls(self, name)
322
322
323 @repofilecache('bookmarks')
323 @repofilecache('bookmarks')
324 def _bookmarks(self):
324 def _bookmarks(self):
325 return bookmarks.bmstore(self)
325 return bookmarks.bmstore(self)
326
326
327 @repofilecache('bookmarks.current')
327 @repofilecache('bookmarks.current')
328 def _bookmarkcurrent(self):
328 def _bookmarkcurrent(self):
329 return bookmarks.readcurrent(self)
329 return bookmarks.readcurrent(self)
330
330
331 def bookmarkheads(self, bookmark):
331 def bookmarkheads(self, bookmark):
332 name = bookmark.split('@', 1)[0]
332 name = bookmark.split('@', 1)[0]
333 heads = []
333 heads = []
334 for mark, n in self._bookmarks.iteritems():
334 for mark, n in self._bookmarks.iteritems():
335 if mark.split('@', 1)[0] == name:
335 if mark.split('@', 1)[0] == name:
336 heads.append(n)
336 heads.append(n)
337 return heads
337 return heads
338
338
339 @storecache('phaseroots')
339 @storecache('phaseroots')
340 def _phasecache(self):
340 def _phasecache(self):
341 return phases.phasecache(self, self._phasedefaults)
341 return phases.phasecache(self, self._phasedefaults)
342
342
343 @storecache('obsstore')
343 @storecache('obsstore')
344 def obsstore(self):
344 def obsstore(self):
345 store = obsolete.obsstore(self.sopener)
345 store = obsolete.obsstore(self.sopener)
346 if store and not obsolete._enabled:
346 if store and not obsolete._enabled:
347 # message is rare enough to not be translated
347 # message is rare enough to not be translated
348 msg = 'obsolete feature not enabled but %i markers found!\n'
348 msg = 'obsolete feature not enabled but %i markers found!\n'
349 self.ui.warn(msg % len(list(store)))
349 self.ui.warn(msg % len(list(store)))
350 return store
350 return store
351
351
352 @unfilteredpropertycache
352 @unfilteredpropertycache
353 def hiddenrevs(self):
353 def hiddenrevs(self):
354 """hiddenrevs: revs that should be hidden by command and tools
354 """hiddenrevs: revs that should be hidden by command and tools
355
355
356 This set is carried on the repo to ease initialization and lazy
356 This set is carried on the repo to ease initialization and lazy
357 loading; it'll probably move back to changelog for efficiency and
357 loading; it'll probably move back to changelog for efficiency and
358 consistency reasons.
358 consistency reasons.
359
359
360 Note that the hiddenrevs will needs invalidations when
360 Note that the hiddenrevs will needs invalidations when
361 - a new changesets is added (possible unstable above extinct)
361 - a new changesets is added (possible unstable above extinct)
362 - a new obsolete marker is added (possible new extinct changeset)
362 - a new obsolete marker is added (possible new extinct changeset)
363
363
364 hidden changesets cannot have non-hidden descendants
364 hidden changesets cannot have non-hidden descendants
365 """
365 """
366 hidden = set()
366 hidden = set()
367 if self.obsstore:
367 if self.obsstore:
368 ### hide extinct changeset that are not accessible by any mean
368 ### hide extinct changeset that are not accessible by any mean
369 hiddenquery = 'extinct() - ::(. + bookmark())'
369 hiddenquery = 'extinct() - ::(. + bookmark())'
370 hidden.update(self.revs(hiddenquery))
370 hidden.update(self.revs(hiddenquery))
371 return hidden
371 return hidden
372
372
373 @storecache('00changelog.i')
373 @storecache('00changelog.i')
374 def changelog(self):
374 def changelog(self):
375 c = changelog.changelog(self.sopener)
375 c = changelog.changelog(self.sopener)
376 if 'HG_PENDING' in os.environ:
376 if 'HG_PENDING' in os.environ:
377 p = os.environ['HG_PENDING']
377 p = os.environ['HG_PENDING']
378 if p.startswith(self.root):
378 if p.startswith(self.root):
379 c.readpending('00changelog.i.a')
379 c.readpending('00changelog.i.a')
380 return c
380 return c
381
381
382 @storecache('00manifest.i')
382 @storecache('00manifest.i')
383 def manifest(self):
383 def manifest(self):
384 return manifest.manifest(self.sopener)
384 return manifest.manifest(self.sopener)
385
385
386 @repofilecache('dirstate')
386 @repofilecache('dirstate')
387 def dirstate(self):
387 def dirstate(self):
388 warned = [0]
388 warned = [0]
389 def validate(node):
389 def validate(node):
390 try:
390 try:
391 self.changelog.rev(node)
391 self.changelog.rev(node)
392 return node
392 return node
393 except error.LookupError:
393 except error.LookupError:
394 if not warned[0]:
394 if not warned[0]:
395 warned[0] = True
395 warned[0] = True
396 self.ui.warn(_("warning: ignoring unknown"
396 self.ui.warn(_("warning: ignoring unknown"
397 " working parent %s!\n") % short(node))
397 " working parent %s!\n") % short(node))
398 return nullid
398 return nullid
399
399
400 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
400 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
401
401
402 def __getitem__(self, changeid):
402 def __getitem__(self, changeid):
403 if changeid is None:
403 if changeid is None:
404 return context.workingctx(self)
404 return context.workingctx(self)
405 return context.changectx(self, changeid)
405 return context.changectx(self, changeid)
406
406
407 def __contains__(self, changeid):
407 def __contains__(self, changeid):
408 try:
408 try:
409 return bool(self.lookup(changeid))
409 return bool(self.lookup(changeid))
410 except error.RepoLookupError:
410 except error.RepoLookupError:
411 return False
411 return False
412
412
413 def __nonzero__(self):
413 def __nonzero__(self):
414 return True
414 return True
415
415
416 def __len__(self):
416 def __len__(self):
417 return len(self.changelog)
417 return len(self.changelog)
418
418
419 def __iter__(self):
419 def __iter__(self):
420 return iter(self.changelog)
420 return iter(self.changelog)
421
421
422 def revs(self, expr, *args):
422 def revs(self, expr, *args):
423 '''Return a list of revisions matching the given revset'''
423 '''Return a list of revisions matching the given revset'''
424 expr = revset.formatspec(expr, *args)
424 expr = revset.formatspec(expr, *args)
425 m = revset.match(None, expr)
425 m = revset.match(None, expr)
426 return [r for r in m(self, list(self))]
426 return [r for r in m(self, list(self))]
427
427
428 def set(self, expr, *args):
428 def set(self, expr, *args):
429 '''
429 '''
430 Yield a context for each matching revision, after doing arg
430 Yield a context for each matching revision, after doing arg
431 replacement via revset.formatspec
431 replacement via revset.formatspec
432 '''
432 '''
433 for r in self.revs(expr, *args):
433 for r in self.revs(expr, *args):
434 yield self[r]
434 yield self[r]
435
435
436 def url(self):
436 def url(self):
437 return 'file:' + self.root
437 return 'file:' + self.root
438
438
439 def hook(self, name, throw=False, **args):
439 def hook(self, name, throw=False, **args):
440 return hook.hook(self.ui, self, name, throw, **args)
440 return hook.hook(self.ui, self, name, throw, **args)
441
441
442 @unfilteredmethod
442 @unfilteredmethod
443 def _tag(self, names, node, message, local, user, date, extra={}):
443 def _tag(self, names, node, message, local, user, date, extra={}):
444 if isinstance(names, str):
444 if isinstance(names, str):
445 names = (names,)
445 names = (names,)
446
446
447 branches = self.branchmap()
447 branches = self.branchmap()
448 for name in names:
448 for name in names:
449 self.hook('pretag', throw=True, node=hex(node), tag=name,
449 self.hook('pretag', throw=True, node=hex(node), tag=name,
450 local=local)
450 local=local)
451 if name in branches:
451 if name in branches:
452 self.ui.warn(_("warning: tag %s conflicts with existing"
452 self.ui.warn(_("warning: tag %s conflicts with existing"
453 " branch name\n") % name)
453 " branch name\n") % name)
454
454
455 def writetags(fp, names, munge, prevtags):
455 def writetags(fp, names, munge, prevtags):
456 fp.seek(0, 2)
456 fp.seek(0, 2)
457 if prevtags and prevtags[-1] != '\n':
457 if prevtags and prevtags[-1] != '\n':
458 fp.write('\n')
458 fp.write('\n')
459 for name in names:
459 for name in names:
460 m = munge and munge(name) or name
460 m = munge and munge(name) or name
461 if (self._tagscache.tagtypes and
461 if (self._tagscache.tagtypes and
462 name in self._tagscache.tagtypes):
462 name in self._tagscache.tagtypes):
463 old = self.tags().get(name, nullid)
463 old = self.tags().get(name, nullid)
464 fp.write('%s %s\n' % (hex(old), m))
464 fp.write('%s %s\n' % (hex(old), m))
465 fp.write('%s %s\n' % (hex(node), m))
465 fp.write('%s %s\n' % (hex(node), m))
466 fp.close()
466 fp.close()
467
467
468 prevtags = ''
468 prevtags = ''
469 if local:
469 if local:
470 try:
470 try:
471 fp = self.opener('localtags', 'r+')
471 fp = self.opener('localtags', 'r+')
472 except IOError:
472 except IOError:
473 fp = self.opener('localtags', 'a')
473 fp = self.opener('localtags', 'a')
474 else:
474 else:
475 prevtags = fp.read()
475 prevtags = fp.read()
476
476
477 # local tags are stored in the current charset
477 # local tags are stored in the current charset
478 writetags(fp, names, None, prevtags)
478 writetags(fp, names, None, prevtags)
479 for name in names:
479 for name in names:
480 self.hook('tag', node=hex(node), tag=name, local=local)
480 self.hook('tag', node=hex(node), tag=name, local=local)
481 return
481 return
482
482
483 try:
483 try:
484 fp = self.wfile('.hgtags', 'rb+')
484 fp = self.wfile('.hgtags', 'rb+')
485 except IOError, e:
485 except IOError, e:
486 if e.errno != errno.ENOENT:
486 if e.errno != errno.ENOENT:
487 raise
487 raise
488 fp = self.wfile('.hgtags', 'ab')
488 fp = self.wfile('.hgtags', 'ab')
489 else:
489 else:
490 prevtags = fp.read()
490 prevtags = fp.read()
491
491
492 # committed tags are stored in UTF-8
492 # committed tags are stored in UTF-8
493 writetags(fp, names, encoding.fromlocal, prevtags)
493 writetags(fp, names, encoding.fromlocal, prevtags)
494
494
495 fp.close()
495 fp.close()
496
496
497 self.invalidatecaches()
497 self.invalidatecaches()
498
498
499 if '.hgtags' not in self.dirstate:
499 if '.hgtags' not in self.dirstate:
500 self[None].add(['.hgtags'])
500 self[None].add(['.hgtags'])
501
501
502 m = matchmod.exact(self.root, '', ['.hgtags'])
502 m = matchmod.exact(self.root, '', ['.hgtags'])
503 tagnode = self.commit(message, user, date, extra=extra, match=m)
503 tagnode = self.commit(message, user, date, extra=extra, match=m)
504
504
505 for name in names:
505 for name in names:
506 self.hook('tag', node=hex(node), tag=name, local=local)
506 self.hook('tag', node=hex(node), tag=name, local=local)
507
507
508 return tagnode
508 return tagnode
509
509
510 def tag(self, names, node, message, local, user, date):
510 def tag(self, names, node, message, local, user, date):
511 '''tag a revision with one or more symbolic names.
511 '''tag a revision with one or more symbolic names.
512
512
513 names is a list of strings or, when adding a single tag, names may be a
513 names is a list of strings or, when adding a single tag, names may be a
514 string.
514 string.
515
515
516 if local is True, the tags are stored in a per-repository file.
516 if local is True, the tags are stored in a per-repository file.
517 otherwise, they are stored in the .hgtags file, and a new
517 otherwise, they are stored in the .hgtags file, and a new
518 changeset is committed with the change.
518 changeset is committed with the change.
519
519
520 keyword arguments:
520 keyword arguments:
521
521
522 local: whether to store tags in non-version-controlled file
522 local: whether to store tags in non-version-controlled file
523 (default False)
523 (default False)
524
524
525 message: commit message to use if committing
525 message: commit message to use if committing
526
526
527 user: name of user to use if committing
527 user: name of user to use if committing
528
528
529 date: date tuple to use if committing'''
529 date: date tuple to use if committing'''
530
530
531 if not local:
531 if not local:
532 for x in self.status()[:5]:
532 for x in self.status()[:5]:
533 if '.hgtags' in x:
533 if '.hgtags' in x:
534 raise util.Abort(_('working copy of .hgtags is changed '
534 raise util.Abort(_('working copy of .hgtags is changed '
535 '(please commit .hgtags manually)'))
535 '(please commit .hgtags manually)'))
536
536
537 self.tags() # instantiate the cache
537 self.tags() # instantiate the cache
538 self._tag(names, node, message, local, user, date)
538 self._tag(names, node, message, local, user, date)
539
539
540 @filteredpropertycache
540 @filteredpropertycache
541 def _tagscache(self):
541 def _tagscache(self):
542 '''Returns a tagscache object that contains various tags related
542 '''Returns a tagscache object that contains various tags related
543 caches.'''
543 caches.'''
544
544
545 # This simplifies its cache management by having one decorated
545 # This simplifies its cache management by having one decorated
546 # function (this one) and the rest simply fetch things from it.
546 # function (this one) and the rest simply fetch things from it.
547 class tagscache(object):
547 class tagscache(object):
548 def __init__(self):
548 def __init__(self):
549 # These two define the set of tags for this repository. tags
549 # These two define the set of tags for this repository. tags
550 # maps tag name to node; tagtypes maps tag name to 'global' or
550 # maps tag name to node; tagtypes maps tag name to 'global' or
551 # 'local'. (Global tags are defined by .hgtags across all
551 # 'local'. (Global tags are defined by .hgtags across all
552 # heads, and local tags are defined in .hg/localtags.)
552 # heads, and local tags are defined in .hg/localtags.)
553 # They constitute the in-memory cache of tags.
553 # They constitute the in-memory cache of tags.
554 self.tags = self.tagtypes = None
554 self.tags = self.tagtypes = None
555
555
556 self.nodetagscache = self.tagslist = None
556 self.nodetagscache = self.tagslist = None
557
557
558 cache = tagscache()
558 cache = tagscache()
559 cache.tags, cache.tagtypes = self._findtags()
559 cache.tags, cache.tagtypes = self._findtags()
560
560
561 return cache
561 return cache
562
562
563 def tags(self):
563 def tags(self):
564 '''return a mapping of tag to node'''
564 '''return a mapping of tag to node'''
565 t = {}
565 t = {}
566 if self.changelog.filteredrevs:
566 if self.changelog.filteredrevs:
567 tags, tt = self._findtags()
567 tags, tt = self._findtags()
568 else:
568 else:
569 tags = self._tagscache.tags
569 tags = self._tagscache.tags
570 for k, v in tags.iteritems():
570 for k, v in tags.iteritems():
571 try:
571 try:
572 # ignore tags to unknown nodes
572 # ignore tags to unknown nodes
573 self.changelog.rev(v)
573 self.changelog.rev(v)
574 t[k] = v
574 t[k] = v
575 except (error.LookupError, ValueError):
575 except (error.LookupError, ValueError):
576 pass
576 pass
577 return t
577 return t
578
578
579 def _findtags(self):
579 def _findtags(self):
580 '''Do the hard work of finding tags. Return a pair of dicts
580 '''Do the hard work of finding tags. Return a pair of dicts
581 (tags, tagtypes) where tags maps tag name to node, and tagtypes
581 (tags, tagtypes) where tags maps tag name to node, and tagtypes
582 maps tag name to a string like \'global\' or \'local\'.
582 maps tag name to a string like \'global\' or \'local\'.
583 Subclasses or extensions are free to add their own tags, but
583 Subclasses or extensions are free to add their own tags, but
584 should be aware that the returned dicts will be retained for the
584 should be aware that the returned dicts will be retained for the
585 duration of the localrepo object.'''
585 duration of the localrepo object.'''
586
586
587 # XXX what tagtype should subclasses/extensions use? Currently
587 # XXX what tagtype should subclasses/extensions use? Currently
588 # mq and bookmarks add tags, but do not set the tagtype at all.
588 # mq and bookmarks add tags, but do not set the tagtype at all.
589 # Should each extension invent its own tag type? Should there
589 # Should each extension invent its own tag type? Should there
590 # be one tagtype for all such "virtual" tags? Or is the status
590 # be one tagtype for all such "virtual" tags? Or is the status
591 # quo fine?
591 # quo fine?
592
592
593 alltags = {} # map tag name to (node, hist)
593 alltags = {} # map tag name to (node, hist)
594 tagtypes = {}
594 tagtypes = {}
595
595
596 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
596 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
597 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
597 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
598
598
599 # Build the return dicts. Have to re-encode tag names because
599 # Build the return dicts. Have to re-encode tag names because
600 # the tags module always uses UTF-8 (in order not to lose info
600 # the tags module always uses UTF-8 (in order not to lose info
601 # writing to the cache), but the rest of Mercurial wants them in
601 # writing to the cache), but the rest of Mercurial wants them in
602 # local encoding.
602 # local encoding.
603 tags = {}
603 tags = {}
604 for (name, (node, hist)) in alltags.iteritems():
604 for (name, (node, hist)) in alltags.iteritems():
605 if node != nullid:
605 if node != nullid:
606 tags[encoding.tolocal(name)] = node
606 tags[encoding.tolocal(name)] = node
607 tags['tip'] = self.changelog.tip()
607 tags['tip'] = self.changelog.tip()
608 tagtypes = dict([(encoding.tolocal(name), value)
608 tagtypes = dict([(encoding.tolocal(name), value)
609 for (name, value) in tagtypes.iteritems()])
609 for (name, value) in tagtypes.iteritems()])
610 return (tags, tagtypes)
610 return (tags, tagtypes)
611
611
612 def tagtype(self, tagname):
612 def tagtype(self, tagname):
613 '''
613 '''
614 return the type of the given tag. result can be:
614 return the type of the given tag. result can be:
615
615
616 'local' : a local tag
616 'local' : a local tag
617 'global' : a global tag
617 'global' : a global tag
618 None : tag does not exist
618 None : tag does not exist
619 '''
619 '''
620
620
621 return self._tagscache.tagtypes.get(tagname)
621 return self._tagscache.tagtypes.get(tagname)
622
622
623 def tagslist(self):
623 def tagslist(self):
624 '''return a list of tags ordered by revision'''
624 '''return a list of tags ordered by revision'''
625 if not self._tagscache.tagslist:
625 if not self._tagscache.tagslist:
626 l = []
626 l = []
627 for t, n in self.tags().iteritems():
627 for t, n in self.tags().iteritems():
628 r = self.changelog.rev(n)
628 r = self.changelog.rev(n)
629 l.append((r, t, n))
629 l.append((r, t, n))
630 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
630 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
631
631
632 return self._tagscache.tagslist
632 return self._tagscache.tagslist
633
633
634 def nodetags(self, node):
634 def nodetags(self, node):
635 '''return the tags associated with a node'''
635 '''return the tags associated with a node'''
636 if not self._tagscache.nodetagscache:
636 if not self._tagscache.nodetagscache:
637 nodetagscache = {}
637 nodetagscache = {}
638 for t, n in self._tagscache.tags.iteritems():
638 for t, n in self._tagscache.tags.iteritems():
639 nodetagscache.setdefault(n, []).append(t)
639 nodetagscache.setdefault(n, []).append(t)
640 for tags in nodetagscache.itervalues():
640 for tags in nodetagscache.itervalues():
641 tags.sort()
641 tags.sort()
642 self._tagscache.nodetagscache = nodetagscache
642 self._tagscache.nodetagscache = nodetagscache
643 return self._tagscache.nodetagscache.get(node, [])
643 return self._tagscache.nodetagscache.get(node, [])
644
644
645 def nodebookmarks(self, node):
645 def nodebookmarks(self, node):
646 marks = []
646 marks = []
647 for bookmark, n in self._bookmarks.iteritems():
647 for bookmark, n in self._bookmarks.iteritems():
648 if n == node:
648 if n == node:
649 marks.append(bookmark)
649 marks.append(bookmark)
650 return sorted(marks)
650 return sorted(marks)
651
651
652 def _cacheabletip(self):
653 """tip-most revision stable enought to used in persistent cache
654
655 This function is overwritten by MQ to ensure we do not write cache for
656 a part of the history that will likely change.
657
658 Efficient handling of filtered revision in branchcache should offer a
659 better alternative. But we are using this approach until it is ready.
660 """
661 cl = self.changelog
662 return cl.rev(cl.tip())
663
652 def _branchtags(self, partial, lrev):
664 def _branchtags(self, partial, lrev):
653 # TODO: rename this function?
665 # TODO: rename this function?
666 cl = self.changelog
667 catip = self._cacheabletip()
668 # if lrev == catip: cache is already up to date
669 # if lrev > catip: we have uncachable element in `partial` can't write
670 # on disk
671 if lrev < catip:
672 ctxgen = (self[r] for r in cl.revs(lrev + 1, catip))
673 self._updatebranchcache(partial, ctxgen)
674 self._writebranchcache(partial, cl.node(catip), catip)
675 lrev = catip
676 # If cacheable tip were lower than actual tip, we need to update the
677 # cache up to tip. This update (from cacheable to actual tip) is not
678 # written to disk since it's not cacheable.
654 tiprev = len(self) - 1
679 tiprev = len(self) - 1
655 if lrev != tiprev:
680 if lrev < tiprev:
656 ctxgen = (self[r] for r in self.changelog.revs(lrev + 1, tiprev))
681 ctxgen = (self[r] for r in cl.revs(lrev + 1, tiprev))
657 self._updatebranchcache(partial, ctxgen)
682 self._updatebranchcache(partial, ctxgen)
658 self._writebranchcache(partial, self.changelog.tip(), tiprev)
659
660 return partial
683 return partial
661
684
662 @unfilteredmethod # Until we get a smarter cache management
685 @unfilteredmethod # Until we get a smarter cache management
663 def updatebranchcache(self):
686 def updatebranchcache(self):
664 tip = self.changelog.tip()
687 tip = self.changelog.tip()
665 if self._branchcache is not None and self._branchcachetip == tip:
688 if self._branchcache is not None and self._branchcachetip == tip:
666 return
689 return
667
690
668 oldtip = self._branchcachetip
691 oldtip = self._branchcachetip
669 self._branchcachetip = tip
692 self._branchcachetip = tip
670 if oldtip is None or oldtip not in self.changelog.nodemap:
693 if oldtip is None or oldtip not in self.changelog.nodemap:
671 partial, last, lrev = self._readbranchcache()
694 partial, last, lrev = self._readbranchcache()
672 else:
695 else:
673 lrev = self.changelog.rev(oldtip)
696 lrev = self.changelog.rev(oldtip)
674 partial = self._branchcache
697 partial = self._branchcache
675
698
676 self._branchtags(partial, lrev)
699 self._branchtags(partial, lrev)
677 # this private cache holds all heads (not just the branch tips)
700 # this private cache holds all heads (not just the branch tips)
678 self._branchcache = partial
701 self._branchcache = partial
679
702
680 def branchmap(self):
703 def branchmap(self):
681 '''returns a dictionary {branch: [branchheads]}'''
704 '''returns a dictionary {branch: [branchheads]}'''
682 if self.changelog.filteredrevs:
705 if self.changelog.filteredrevs:
683 # some changeset are excluded we can't use the cache
706 # some changeset are excluded we can't use the cache
684 branchmap = {}
707 branchmap = {}
685 self._updatebranchcache(branchmap, (self[r] for r in self))
708 self._updatebranchcache(branchmap, (self[r] for r in self))
686 return branchmap
709 return branchmap
687 else:
710 else:
688 self.updatebranchcache()
711 self.updatebranchcache()
689 return self._branchcache
712 return self._branchcache
690
713
691
714
692 def _branchtip(self, heads):
715 def _branchtip(self, heads):
693 '''return the tipmost branch head in heads'''
716 '''return the tipmost branch head in heads'''
694 tip = heads[-1]
717 tip = heads[-1]
695 for h in reversed(heads):
718 for h in reversed(heads):
696 if not self[h].closesbranch():
719 if not self[h].closesbranch():
697 tip = h
720 tip = h
698 break
721 break
699 return tip
722 return tip
700
723
701 def branchtip(self, branch):
724 def branchtip(self, branch):
702 '''return the tip node for a given branch'''
725 '''return the tip node for a given branch'''
703 if branch not in self.branchmap():
726 if branch not in self.branchmap():
704 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
727 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
705 return self._branchtip(self.branchmap()[branch])
728 return self._branchtip(self.branchmap()[branch])
706
729
707 def branchtags(self):
730 def branchtags(self):
708 '''return a dict where branch names map to the tipmost head of
731 '''return a dict where branch names map to the tipmost head of
709 the branch, open heads come before closed'''
732 the branch, open heads come before closed'''
710 bt = {}
733 bt = {}
711 for bn, heads in self.branchmap().iteritems():
734 for bn, heads in self.branchmap().iteritems():
712 bt[bn] = self._branchtip(heads)
735 bt[bn] = self._branchtip(heads)
713 return bt
736 return bt
714
737
715 @unfilteredmethod # Until we get a smarter cache management
738 @unfilteredmethod # Until we get a smarter cache management
716 def _readbranchcache(self):
739 def _readbranchcache(self):
717 partial = {}
740 partial = {}
718 try:
741 try:
719 f = self.opener("cache/branchheads")
742 f = self.opener("cache/branchheads")
720 lines = f.read().split('\n')
743 lines = f.read().split('\n')
721 f.close()
744 f.close()
722 except (IOError, OSError):
745 except (IOError, OSError):
723 return {}, nullid, nullrev
746 return {}, nullid, nullrev
724
747
725 try:
748 try:
726 last, lrev = lines.pop(0).split(" ", 1)
749 last, lrev = lines.pop(0).split(" ", 1)
727 last, lrev = bin(last), int(lrev)
750 last, lrev = bin(last), int(lrev)
728 if lrev >= len(self) or self[lrev].node() != last:
751 if lrev >= len(self) or self[lrev].node() != last:
729 # invalidate the cache
752 # invalidate the cache
730 raise ValueError('invalidating branch cache (tip differs)')
753 raise ValueError('invalidating branch cache (tip differs)')
731 for l in lines:
754 for l in lines:
732 if not l:
755 if not l:
733 continue
756 continue
734 node, label = l.split(" ", 1)
757 node, label = l.split(" ", 1)
735 label = encoding.tolocal(label.strip())
758 label = encoding.tolocal(label.strip())
736 if not node in self:
759 if not node in self:
737 raise ValueError('invalidating branch cache because node '+
760 raise ValueError('invalidating branch cache because node '+
738 '%s does not exist' % node)
761 '%s does not exist' % node)
739 partial.setdefault(label, []).append(bin(node))
762 partial.setdefault(label, []).append(bin(node))
740 except KeyboardInterrupt:
763 except KeyboardInterrupt:
741 raise
764 raise
742 except Exception, inst:
765 except Exception, inst:
743 if self.ui.debugflag:
766 if self.ui.debugflag:
744 self.ui.warn(str(inst), '\n')
767 self.ui.warn(str(inst), '\n')
745 partial, last, lrev = {}, nullid, nullrev
768 partial, last, lrev = {}, nullid, nullrev
746 return partial, last, lrev
769 return partial, last, lrev
747
770
748 @unfilteredmethod # Until we get a smarter cache management
771 @unfilteredmethod # Until we get a smarter cache management
749 def _writebranchcache(self, branches, tip, tiprev):
772 def _writebranchcache(self, branches, tip, tiprev):
750 try:
773 try:
751 f = self.opener("cache/branchheads", "w", atomictemp=True)
774 f = self.opener("cache/branchheads", "w", atomictemp=True)
752 f.write("%s %s\n" % (hex(tip), tiprev))
775 f.write("%s %s\n" % (hex(tip), tiprev))
753 for label, nodes in branches.iteritems():
776 for label, nodes in branches.iteritems():
754 for node in nodes:
777 for node in nodes:
755 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
778 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
756 f.close()
779 f.close()
757 except (IOError, OSError):
780 except (IOError, OSError):
758 pass
781 pass
759
782
760 @unfilteredmethod # Until we get a smarter cache management
783 @unfilteredmethod # Until we get a smarter cache management
761 def _updatebranchcache(self, partial, ctxgen):
784 def _updatebranchcache(self, partial, ctxgen):
762 """Given a branchhead cache, partial, that may have extra nodes or be
785 """Given a branchhead cache, partial, that may have extra nodes or be
763 missing heads, and a generator of nodes that are at least a superset of
786 missing heads, and a generator of nodes that are at least a superset of
764 heads missing, this function updates partial to be correct.
787 heads missing, this function updates partial to be correct.
765 """
788 """
766 # collect new branch entries
789 # collect new branch entries
767 newbranches = {}
790 newbranches = {}
768 for c in ctxgen:
791 for c in ctxgen:
769 newbranches.setdefault(c.branch(), []).append(c.node())
792 newbranches.setdefault(c.branch(), []).append(c.node())
770 # if older branchheads are reachable from new ones, they aren't
793 # if older branchheads are reachable from new ones, they aren't
771 # really branchheads. Note checking parents is insufficient:
794 # really branchheads. Note checking parents is insufficient:
772 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
795 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
773 for branch, newnodes in newbranches.iteritems():
796 for branch, newnodes in newbranches.iteritems():
774 bheads = partial.setdefault(branch, [])
797 bheads = partial.setdefault(branch, [])
775 # Remove candidate heads that no longer are in the repo (e.g., as
798 # Remove candidate heads that no longer are in the repo (e.g., as
776 # the result of a strip that just happened). Avoid using 'node in
799 # the result of a strip that just happened). Avoid using 'node in
777 # self' here because that dives down into branchcache code somewhat
800 # self' here because that dives down into branchcache code somewhat
778 # recursively.
801 # recursively.
779 bheadrevs = [self.changelog.rev(node) for node in bheads
802 bheadrevs = [self.changelog.rev(node) for node in bheads
780 if self.changelog.hasnode(node)]
803 if self.changelog.hasnode(node)]
781 newheadrevs = [self.changelog.rev(node) for node in newnodes
804 newheadrevs = [self.changelog.rev(node) for node in newnodes
782 if self.changelog.hasnode(node)]
805 if self.changelog.hasnode(node)]
783 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
806 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
784 # Remove duplicates - nodes that are in newheadrevs and are already
807 # Remove duplicates - nodes that are in newheadrevs and are already
785 # in bheadrevs. This can happen if you strip a node whose parent
808 # in bheadrevs. This can happen if you strip a node whose parent
786 # was already a head (because they're on different branches).
809 # was already a head (because they're on different branches).
787 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
810 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
788
811
789 # Starting from tip means fewer passes over reachable. If we know
812 # Starting from tip means fewer passes over reachable. If we know
790 # the new candidates are not ancestors of existing heads, we don't
813 # the new candidates are not ancestors of existing heads, we don't
791 # have to examine ancestors of existing heads
814 # have to examine ancestors of existing heads
792 if ctxisnew:
815 if ctxisnew:
793 iterrevs = sorted(newheadrevs)
816 iterrevs = sorted(newheadrevs)
794 else:
817 else:
795 iterrevs = list(bheadrevs)
818 iterrevs = list(bheadrevs)
796
819
797 # This loop prunes out two kinds of heads - heads that are
820 # This loop prunes out two kinds of heads - heads that are
798 # superseded by a head in newheadrevs, and newheadrevs that are not
821 # superseded by a head in newheadrevs, and newheadrevs that are not
799 # heads because an existing head is their descendant.
822 # heads because an existing head is their descendant.
800 while iterrevs:
823 while iterrevs:
801 latest = iterrevs.pop()
824 latest = iterrevs.pop()
802 if latest not in bheadrevs:
825 if latest not in bheadrevs:
803 continue
826 continue
804 ancestors = set(self.changelog.ancestors([latest],
827 ancestors = set(self.changelog.ancestors([latest],
805 bheadrevs[0]))
828 bheadrevs[0]))
806 if ancestors:
829 if ancestors:
807 bheadrevs = [b for b in bheadrevs if b not in ancestors]
830 bheadrevs = [b for b in bheadrevs if b not in ancestors]
808 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
831 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
809
832
810 # There may be branches that cease to exist when the last commit in the
833 # There may be branches that cease to exist when the last commit in the
811 # branch was stripped. This code filters them out. Note that the
834 # branch was stripped. This code filters them out. Note that the
812 # branch that ceased to exist may not be in newbranches because
835 # branch that ceased to exist may not be in newbranches because
813 # newbranches is the set of candidate heads, which when you strip the
836 # newbranches is the set of candidate heads, which when you strip the
814 # last commit in a branch will be the parent branch.
837 # last commit in a branch will be the parent branch.
815 for branch in partial.keys():
838 for branch in partial.keys():
816 nodes = [head for head in partial[branch]
839 nodes = [head for head in partial[branch]
817 if self.changelog.hasnode(head)]
840 if self.changelog.hasnode(head)]
818 if not nodes:
841 if not nodes:
819 del partial[branch]
842 del partial[branch]
820
843
821 def lookup(self, key):
844 def lookup(self, key):
822 return self[key].node()
845 return self[key].node()
823
846
824 def lookupbranch(self, key, remote=None):
847 def lookupbranch(self, key, remote=None):
825 repo = remote or self
848 repo = remote or self
826 if key in repo.branchmap():
849 if key in repo.branchmap():
827 return key
850 return key
828
851
829 repo = (remote and remote.local()) and remote or self
852 repo = (remote and remote.local()) and remote or self
830 return repo[key].branch()
853 return repo[key].branch()
831
854
832 def known(self, nodes):
855 def known(self, nodes):
833 nm = self.changelog.nodemap
856 nm = self.changelog.nodemap
834 pc = self._phasecache
857 pc = self._phasecache
835 result = []
858 result = []
836 for n in nodes:
859 for n in nodes:
837 r = nm.get(n)
860 r = nm.get(n)
838 resp = not (r is None or pc.phase(self, r) >= phases.secret)
861 resp = not (r is None or pc.phase(self, r) >= phases.secret)
839 result.append(resp)
862 result.append(resp)
840 return result
863 return result
841
864
842 def local(self):
865 def local(self):
843 return self
866 return self
844
867
845 def cancopy(self):
868 def cancopy(self):
846 return self.local() # so statichttprepo's override of local() works
869 return self.local() # so statichttprepo's override of local() works
847
870
848 def join(self, f):
871 def join(self, f):
849 return os.path.join(self.path, f)
872 return os.path.join(self.path, f)
850
873
851 def wjoin(self, f):
874 def wjoin(self, f):
852 return os.path.join(self.root, f)
875 return os.path.join(self.root, f)
853
876
854 def file(self, f):
877 def file(self, f):
855 if f[0] == '/':
878 if f[0] == '/':
856 f = f[1:]
879 f = f[1:]
857 return filelog.filelog(self.sopener, f)
880 return filelog.filelog(self.sopener, f)
858
881
859 def changectx(self, changeid):
882 def changectx(self, changeid):
860 return self[changeid]
883 return self[changeid]
861
884
862 def parents(self, changeid=None):
885 def parents(self, changeid=None):
863 '''get list of changectxs for parents of changeid'''
886 '''get list of changectxs for parents of changeid'''
864 return self[changeid].parents()
887 return self[changeid].parents()
865
888
866 def setparents(self, p1, p2=nullid):
889 def setparents(self, p1, p2=nullid):
867 copies = self.dirstate.setparents(p1, p2)
890 copies = self.dirstate.setparents(p1, p2)
868 if copies:
891 if copies:
869 # Adjust copy records, the dirstate cannot do it, it
892 # Adjust copy records, the dirstate cannot do it, it
870 # requires access to parents manifests. Preserve them
893 # requires access to parents manifests. Preserve them
871 # only for entries added to first parent.
894 # only for entries added to first parent.
872 pctx = self[p1]
895 pctx = self[p1]
873 for f in copies:
896 for f in copies:
874 if f not in pctx and copies[f] in pctx:
897 if f not in pctx and copies[f] in pctx:
875 self.dirstate.copy(copies[f], f)
898 self.dirstate.copy(copies[f], f)
876
899
877 def filectx(self, path, changeid=None, fileid=None):
900 def filectx(self, path, changeid=None, fileid=None):
878 """changeid can be a changeset revision, node, or tag.
901 """changeid can be a changeset revision, node, or tag.
879 fileid can be a file revision or node."""
902 fileid can be a file revision or node."""
880 return context.filectx(self, path, changeid, fileid)
903 return context.filectx(self, path, changeid, fileid)
881
904
882 def getcwd(self):
905 def getcwd(self):
883 return self.dirstate.getcwd()
906 return self.dirstate.getcwd()
884
907
885 def pathto(self, f, cwd=None):
908 def pathto(self, f, cwd=None):
886 return self.dirstate.pathto(f, cwd)
909 return self.dirstate.pathto(f, cwd)
887
910
888 def wfile(self, f, mode='r'):
911 def wfile(self, f, mode='r'):
889 return self.wopener(f, mode)
912 return self.wopener(f, mode)
890
913
891 def _link(self, f):
914 def _link(self, f):
892 return os.path.islink(self.wjoin(f))
915 return os.path.islink(self.wjoin(f))
893
916
894 def _loadfilter(self, filter):
917 def _loadfilter(self, filter):
895 if filter not in self.filterpats:
918 if filter not in self.filterpats:
896 l = []
919 l = []
897 for pat, cmd in self.ui.configitems(filter):
920 for pat, cmd in self.ui.configitems(filter):
898 if cmd == '!':
921 if cmd == '!':
899 continue
922 continue
900 mf = matchmod.match(self.root, '', [pat])
923 mf = matchmod.match(self.root, '', [pat])
901 fn = None
924 fn = None
902 params = cmd
925 params = cmd
903 for name, filterfn in self._datafilters.iteritems():
926 for name, filterfn in self._datafilters.iteritems():
904 if cmd.startswith(name):
927 if cmd.startswith(name):
905 fn = filterfn
928 fn = filterfn
906 params = cmd[len(name):].lstrip()
929 params = cmd[len(name):].lstrip()
907 break
930 break
908 if not fn:
931 if not fn:
909 fn = lambda s, c, **kwargs: util.filter(s, c)
932 fn = lambda s, c, **kwargs: util.filter(s, c)
910 # Wrap old filters not supporting keyword arguments
933 # Wrap old filters not supporting keyword arguments
911 if not inspect.getargspec(fn)[2]:
934 if not inspect.getargspec(fn)[2]:
912 oldfn = fn
935 oldfn = fn
913 fn = lambda s, c, **kwargs: oldfn(s, c)
936 fn = lambda s, c, **kwargs: oldfn(s, c)
914 l.append((mf, fn, params))
937 l.append((mf, fn, params))
915 self.filterpats[filter] = l
938 self.filterpats[filter] = l
916 return self.filterpats[filter]
939 return self.filterpats[filter]
917
940
918 def _filter(self, filterpats, filename, data):
941 def _filter(self, filterpats, filename, data):
919 for mf, fn, cmd in filterpats:
942 for mf, fn, cmd in filterpats:
920 if mf(filename):
943 if mf(filename):
921 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
944 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
922 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
945 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
923 break
946 break
924
947
925 return data
948 return data
926
949
927 @unfilteredpropertycache
950 @unfilteredpropertycache
928 def _encodefilterpats(self):
951 def _encodefilterpats(self):
929 return self._loadfilter('encode')
952 return self._loadfilter('encode')
930
953
931 @unfilteredpropertycache
954 @unfilteredpropertycache
932 def _decodefilterpats(self):
955 def _decodefilterpats(self):
933 return self._loadfilter('decode')
956 return self._loadfilter('decode')
934
957
935 def adddatafilter(self, name, filter):
958 def adddatafilter(self, name, filter):
936 self._datafilters[name] = filter
959 self._datafilters[name] = filter
937
960
938 def wread(self, filename):
961 def wread(self, filename):
939 if self._link(filename):
962 if self._link(filename):
940 data = os.readlink(self.wjoin(filename))
963 data = os.readlink(self.wjoin(filename))
941 else:
964 else:
942 data = self.wopener.read(filename)
965 data = self.wopener.read(filename)
943 return self._filter(self._encodefilterpats, filename, data)
966 return self._filter(self._encodefilterpats, filename, data)
944
967
945 def wwrite(self, filename, data, flags):
968 def wwrite(self, filename, data, flags):
946 data = self._filter(self._decodefilterpats, filename, data)
969 data = self._filter(self._decodefilterpats, filename, data)
947 if 'l' in flags:
970 if 'l' in flags:
948 self.wopener.symlink(data, filename)
971 self.wopener.symlink(data, filename)
949 else:
972 else:
950 self.wopener.write(filename, data)
973 self.wopener.write(filename, data)
951 if 'x' in flags:
974 if 'x' in flags:
952 util.setflags(self.wjoin(filename), False, True)
975 util.setflags(self.wjoin(filename), False, True)
953
976
954 def wwritedata(self, filename, data):
977 def wwritedata(self, filename, data):
955 return self._filter(self._decodefilterpats, filename, data)
978 return self._filter(self._decodefilterpats, filename, data)
956
979
957 def transaction(self, desc):
980 def transaction(self, desc):
958 tr = self._transref and self._transref() or None
981 tr = self._transref and self._transref() or None
959 if tr and tr.running():
982 if tr and tr.running():
960 return tr.nest()
983 return tr.nest()
961
984
962 # abort here if the journal already exists
985 # abort here if the journal already exists
963 if os.path.exists(self.sjoin("journal")):
986 if os.path.exists(self.sjoin("journal")):
964 raise error.RepoError(
987 raise error.RepoError(
965 _("abandoned transaction found - run hg recover"))
988 _("abandoned transaction found - run hg recover"))
966
989
967 self._writejournal(desc)
990 self._writejournal(desc)
968 renames = [(x, undoname(x)) for x in self._journalfiles()]
991 renames = [(x, undoname(x)) for x in self._journalfiles()]
969
992
970 tr = transaction.transaction(self.ui.warn, self.sopener,
993 tr = transaction.transaction(self.ui.warn, self.sopener,
971 self.sjoin("journal"),
994 self.sjoin("journal"),
972 aftertrans(renames),
995 aftertrans(renames),
973 self.store.createmode)
996 self.store.createmode)
974 self._transref = weakref.ref(tr)
997 self._transref = weakref.ref(tr)
975 return tr
998 return tr
976
999
977 def _journalfiles(self):
1000 def _journalfiles(self):
978 return (self.sjoin('journal'), self.join('journal.dirstate'),
1001 return (self.sjoin('journal'), self.join('journal.dirstate'),
979 self.join('journal.branch'), self.join('journal.desc'),
1002 self.join('journal.branch'), self.join('journal.desc'),
980 self.join('journal.bookmarks'),
1003 self.join('journal.bookmarks'),
981 self.sjoin('journal.phaseroots'))
1004 self.sjoin('journal.phaseroots'))
982
1005
983 def undofiles(self):
1006 def undofiles(self):
984 return [undoname(x) for x in self._journalfiles()]
1007 return [undoname(x) for x in self._journalfiles()]
985
1008
986 def _writejournal(self, desc):
1009 def _writejournal(self, desc):
987 self.opener.write("journal.dirstate",
1010 self.opener.write("journal.dirstate",
988 self.opener.tryread("dirstate"))
1011 self.opener.tryread("dirstate"))
989 self.opener.write("journal.branch",
1012 self.opener.write("journal.branch",
990 encoding.fromlocal(self.dirstate.branch()))
1013 encoding.fromlocal(self.dirstate.branch()))
991 self.opener.write("journal.desc",
1014 self.opener.write("journal.desc",
992 "%d\n%s\n" % (len(self), desc))
1015 "%d\n%s\n" % (len(self), desc))
993 self.opener.write("journal.bookmarks",
1016 self.opener.write("journal.bookmarks",
994 self.opener.tryread("bookmarks"))
1017 self.opener.tryread("bookmarks"))
995 self.sopener.write("journal.phaseroots",
1018 self.sopener.write("journal.phaseroots",
996 self.sopener.tryread("phaseroots"))
1019 self.sopener.tryread("phaseroots"))
997
1020
998 def recover(self):
1021 def recover(self):
999 lock = self.lock()
1022 lock = self.lock()
1000 try:
1023 try:
1001 if os.path.exists(self.sjoin("journal")):
1024 if os.path.exists(self.sjoin("journal")):
1002 self.ui.status(_("rolling back interrupted transaction\n"))
1025 self.ui.status(_("rolling back interrupted transaction\n"))
1003 transaction.rollback(self.sopener, self.sjoin("journal"),
1026 transaction.rollback(self.sopener, self.sjoin("journal"),
1004 self.ui.warn)
1027 self.ui.warn)
1005 self.invalidate()
1028 self.invalidate()
1006 return True
1029 return True
1007 else:
1030 else:
1008 self.ui.warn(_("no interrupted transaction available\n"))
1031 self.ui.warn(_("no interrupted transaction available\n"))
1009 return False
1032 return False
1010 finally:
1033 finally:
1011 lock.release()
1034 lock.release()
1012
1035
1013 def rollback(self, dryrun=False, force=False):
1036 def rollback(self, dryrun=False, force=False):
1014 wlock = lock = None
1037 wlock = lock = None
1015 try:
1038 try:
1016 wlock = self.wlock()
1039 wlock = self.wlock()
1017 lock = self.lock()
1040 lock = self.lock()
1018 if os.path.exists(self.sjoin("undo")):
1041 if os.path.exists(self.sjoin("undo")):
1019 return self._rollback(dryrun, force)
1042 return self._rollback(dryrun, force)
1020 else:
1043 else:
1021 self.ui.warn(_("no rollback information available\n"))
1044 self.ui.warn(_("no rollback information available\n"))
1022 return 1
1045 return 1
1023 finally:
1046 finally:
1024 release(lock, wlock)
1047 release(lock, wlock)
1025
1048
1026 @unfilteredmethod # Until we get smarter cache management
1049 @unfilteredmethod # Until we get smarter cache management
1027 def _rollback(self, dryrun, force):
1050 def _rollback(self, dryrun, force):
1028 ui = self.ui
1051 ui = self.ui
1029 try:
1052 try:
1030 args = self.opener.read('undo.desc').splitlines()
1053 args = self.opener.read('undo.desc').splitlines()
1031 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1054 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1032 if len(args) >= 3:
1055 if len(args) >= 3:
1033 detail = args[2]
1056 detail = args[2]
1034 oldtip = oldlen - 1
1057 oldtip = oldlen - 1
1035
1058
1036 if detail and ui.verbose:
1059 if detail and ui.verbose:
1037 msg = (_('repository tip rolled back to revision %s'
1060 msg = (_('repository tip rolled back to revision %s'
1038 ' (undo %s: %s)\n')
1061 ' (undo %s: %s)\n')
1039 % (oldtip, desc, detail))
1062 % (oldtip, desc, detail))
1040 else:
1063 else:
1041 msg = (_('repository tip rolled back to revision %s'
1064 msg = (_('repository tip rolled back to revision %s'
1042 ' (undo %s)\n')
1065 ' (undo %s)\n')
1043 % (oldtip, desc))
1066 % (oldtip, desc))
1044 except IOError:
1067 except IOError:
1045 msg = _('rolling back unknown transaction\n')
1068 msg = _('rolling back unknown transaction\n')
1046 desc = None
1069 desc = None
1047
1070
1048 if not force and self['.'] != self['tip'] and desc == 'commit':
1071 if not force and self['.'] != self['tip'] and desc == 'commit':
1049 raise util.Abort(
1072 raise util.Abort(
1050 _('rollback of last commit while not checked out '
1073 _('rollback of last commit while not checked out '
1051 'may lose data'), hint=_('use -f to force'))
1074 'may lose data'), hint=_('use -f to force'))
1052
1075
1053 ui.status(msg)
1076 ui.status(msg)
1054 if dryrun:
1077 if dryrun:
1055 return 0
1078 return 0
1056
1079
1057 parents = self.dirstate.parents()
1080 parents = self.dirstate.parents()
1058 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
1081 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
1059 if os.path.exists(self.join('undo.bookmarks')):
1082 if os.path.exists(self.join('undo.bookmarks')):
1060 util.rename(self.join('undo.bookmarks'),
1083 util.rename(self.join('undo.bookmarks'),
1061 self.join('bookmarks'))
1084 self.join('bookmarks'))
1062 if os.path.exists(self.sjoin('undo.phaseroots')):
1085 if os.path.exists(self.sjoin('undo.phaseroots')):
1063 util.rename(self.sjoin('undo.phaseroots'),
1086 util.rename(self.sjoin('undo.phaseroots'),
1064 self.sjoin('phaseroots'))
1087 self.sjoin('phaseroots'))
1065 self.invalidate()
1088 self.invalidate()
1066
1089
1067 # Discard all cache entries to force reloading everything.
1090 # Discard all cache entries to force reloading everything.
1068 self._filecache.clear()
1091 self._filecache.clear()
1069
1092
1070 parentgone = (parents[0] not in self.changelog.nodemap or
1093 parentgone = (parents[0] not in self.changelog.nodemap or
1071 parents[1] not in self.changelog.nodemap)
1094 parents[1] not in self.changelog.nodemap)
1072 if parentgone:
1095 if parentgone:
1073 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1096 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1074 try:
1097 try:
1075 branch = self.opener.read('undo.branch')
1098 branch = self.opener.read('undo.branch')
1076 self.dirstate.setbranch(encoding.tolocal(branch))
1099 self.dirstate.setbranch(encoding.tolocal(branch))
1077 except IOError:
1100 except IOError:
1078 ui.warn(_('named branch could not be reset: '
1101 ui.warn(_('named branch could not be reset: '
1079 'current branch is still \'%s\'\n')
1102 'current branch is still \'%s\'\n')
1080 % self.dirstate.branch())
1103 % self.dirstate.branch())
1081
1104
1082 self.dirstate.invalidate()
1105 self.dirstate.invalidate()
1083 parents = tuple([p.rev() for p in self.parents()])
1106 parents = tuple([p.rev() for p in self.parents()])
1084 if len(parents) > 1:
1107 if len(parents) > 1:
1085 ui.status(_('working directory now based on '
1108 ui.status(_('working directory now based on '
1086 'revisions %d and %d\n') % parents)
1109 'revisions %d and %d\n') % parents)
1087 else:
1110 else:
1088 ui.status(_('working directory now based on '
1111 ui.status(_('working directory now based on '
1089 'revision %d\n') % parents)
1112 'revision %d\n') % parents)
1090 # TODO: if we know which new heads may result from this rollback, pass
1113 # TODO: if we know which new heads may result from this rollback, pass
1091 # them to destroy(), which will prevent the branchhead cache from being
1114 # them to destroy(), which will prevent the branchhead cache from being
1092 # invalidated.
1115 # invalidated.
1093 self.destroyed()
1116 self.destroyed()
1094 return 0
1117 return 0
1095
1118
1096 def invalidatecaches(self):
1119 def invalidatecaches(self):
1097
1120
1098 if '_tagscache' in vars(self):
1121 if '_tagscache' in vars(self):
1099 # can't use delattr on proxy
1122 # can't use delattr on proxy
1100 del self.__dict__['_tagscache']
1123 del self.__dict__['_tagscache']
1101
1124
1102 self.unfiltered()._branchcache = None # in UTF-8
1125 self.unfiltered()._branchcache = None # in UTF-8
1103 self.unfiltered()._branchcachetip = None
1126 self.unfiltered()._branchcachetip = None
1104 self.invalidatevolatilesets()
1127 self.invalidatevolatilesets()
1105
1128
1106 def invalidatevolatilesets(self):
1129 def invalidatevolatilesets(self):
1107 self.filteredrevcache.clear()
1130 self.filteredrevcache.clear()
1108 obsolete.clearobscaches(self)
1131 obsolete.clearobscaches(self)
1109 if 'hiddenrevs' in vars(self):
1132 if 'hiddenrevs' in vars(self):
1110 del self.hiddenrevs
1133 del self.hiddenrevs
1111
1134
1112 def invalidatedirstate(self):
1135 def invalidatedirstate(self):
1113 '''Invalidates the dirstate, causing the next call to dirstate
1136 '''Invalidates the dirstate, causing the next call to dirstate
1114 to check if it was modified since the last time it was read,
1137 to check if it was modified since the last time it was read,
1115 rereading it if it has.
1138 rereading it if it has.
1116
1139
1117 This is different to dirstate.invalidate() that it doesn't always
1140 This is different to dirstate.invalidate() that it doesn't always
1118 rereads the dirstate. Use dirstate.invalidate() if you want to
1141 rereads the dirstate. Use dirstate.invalidate() if you want to
1119 explicitly read the dirstate again (i.e. restoring it to a previous
1142 explicitly read the dirstate again (i.e. restoring it to a previous
1120 known good state).'''
1143 known good state).'''
1121 if hasunfilteredcache(self, 'dirstate'):
1144 if hasunfilteredcache(self, 'dirstate'):
1122 for k in self.dirstate._filecache:
1145 for k in self.dirstate._filecache:
1123 try:
1146 try:
1124 delattr(self.dirstate, k)
1147 delattr(self.dirstate, k)
1125 except AttributeError:
1148 except AttributeError:
1126 pass
1149 pass
1127 delattr(self.unfiltered(), 'dirstate')
1150 delattr(self.unfiltered(), 'dirstate')
1128
1151
1129 def invalidate(self):
1152 def invalidate(self):
1130 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
1153 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
1131 for k in self._filecache:
1154 for k in self._filecache:
1132 # dirstate is invalidated separately in invalidatedirstate()
1155 # dirstate is invalidated separately in invalidatedirstate()
1133 if k == 'dirstate':
1156 if k == 'dirstate':
1134 continue
1157 continue
1135
1158
1136 try:
1159 try:
1137 delattr(unfiltered, k)
1160 delattr(unfiltered, k)
1138 except AttributeError:
1161 except AttributeError:
1139 pass
1162 pass
1140 self.invalidatecaches()
1163 self.invalidatecaches()
1141
1164
1142 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1165 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1143 try:
1166 try:
1144 l = lock.lock(lockname, 0, releasefn, desc=desc)
1167 l = lock.lock(lockname, 0, releasefn, desc=desc)
1145 except error.LockHeld, inst:
1168 except error.LockHeld, inst:
1146 if not wait:
1169 if not wait:
1147 raise
1170 raise
1148 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1171 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1149 (desc, inst.locker))
1172 (desc, inst.locker))
1150 # default to 600 seconds timeout
1173 # default to 600 seconds timeout
1151 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1174 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1152 releasefn, desc=desc)
1175 releasefn, desc=desc)
1153 if acquirefn:
1176 if acquirefn:
1154 acquirefn()
1177 acquirefn()
1155 return l
1178 return l
1156
1179
1157 def _afterlock(self, callback):
1180 def _afterlock(self, callback):
1158 """add a callback to the current repository lock.
1181 """add a callback to the current repository lock.
1159
1182
1160 The callback will be executed on lock release."""
1183 The callback will be executed on lock release."""
1161 l = self._lockref and self._lockref()
1184 l = self._lockref and self._lockref()
1162 if l:
1185 if l:
1163 l.postrelease.append(callback)
1186 l.postrelease.append(callback)
1164 else:
1187 else:
1165 callback()
1188 callback()
1166
1189
1167 def lock(self, wait=True):
1190 def lock(self, wait=True):
1168 '''Lock the repository store (.hg/store) and return a weak reference
1191 '''Lock the repository store (.hg/store) and return a weak reference
1169 to the lock. Use this before modifying the store (e.g. committing or
1192 to the lock. Use this before modifying the store (e.g. committing or
1170 stripping). If you are opening a transaction, get a lock as well.)'''
1193 stripping). If you are opening a transaction, get a lock as well.)'''
1171 l = self._lockref and self._lockref()
1194 l = self._lockref and self._lockref()
1172 if l is not None and l.held:
1195 if l is not None and l.held:
1173 l.lock()
1196 l.lock()
1174 return l
1197 return l
1175
1198
1176 def unlock():
1199 def unlock():
1177 self.store.write()
1200 self.store.write()
1178 if hasunfilteredcache(self, '_phasecache'):
1201 if hasunfilteredcache(self, '_phasecache'):
1179 self._phasecache.write()
1202 self._phasecache.write()
1180 for k, ce in self._filecache.items():
1203 for k, ce in self._filecache.items():
1181 if k == 'dirstate':
1204 if k == 'dirstate':
1182 continue
1205 continue
1183 ce.refresh()
1206 ce.refresh()
1184
1207
1185 l = self._lock(self.sjoin("lock"), wait, unlock,
1208 l = self._lock(self.sjoin("lock"), wait, unlock,
1186 self.invalidate, _('repository %s') % self.origroot)
1209 self.invalidate, _('repository %s') % self.origroot)
1187 self._lockref = weakref.ref(l)
1210 self._lockref = weakref.ref(l)
1188 return l
1211 return l
1189
1212
1190 def wlock(self, wait=True):
1213 def wlock(self, wait=True):
1191 '''Lock the non-store parts of the repository (everything under
1214 '''Lock the non-store parts of the repository (everything under
1192 .hg except .hg/store) and return a weak reference to the lock.
1215 .hg except .hg/store) and return a weak reference to the lock.
1193 Use this before modifying files in .hg.'''
1216 Use this before modifying files in .hg.'''
1194 l = self._wlockref and self._wlockref()
1217 l = self._wlockref and self._wlockref()
1195 if l is not None and l.held:
1218 if l is not None and l.held:
1196 l.lock()
1219 l.lock()
1197 return l
1220 return l
1198
1221
1199 def unlock():
1222 def unlock():
1200 self.dirstate.write()
1223 self.dirstate.write()
1201 ce = self._filecache.get('dirstate')
1224 ce = self._filecache.get('dirstate')
1202 if ce:
1225 if ce:
1203 ce.refresh()
1226 ce.refresh()
1204
1227
1205 l = self._lock(self.join("wlock"), wait, unlock,
1228 l = self._lock(self.join("wlock"), wait, unlock,
1206 self.invalidatedirstate, _('working directory of %s') %
1229 self.invalidatedirstate, _('working directory of %s') %
1207 self.origroot)
1230 self.origroot)
1208 self._wlockref = weakref.ref(l)
1231 self._wlockref = weakref.ref(l)
1209 return l
1232 return l
1210
1233
1211 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1234 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1212 """
1235 """
1213 commit an individual file as part of a larger transaction
1236 commit an individual file as part of a larger transaction
1214 """
1237 """
1215
1238
1216 fname = fctx.path()
1239 fname = fctx.path()
1217 text = fctx.data()
1240 text = fctx.data()
1218 flog = self.file(fname)
1241 flog = self.file(fname)
1219 fparent1 = manifest1.get(fname, nullid)
1242 fparent1 = manifest1.get(fname, nullid)
1220 fparent2 = fparent2o = manifest2.get(fname, nullid)
1243 fparent2 = fparent2o = manifest2.get(fname, nullid)
1221
1244
1222 meta = {}
1245 meta = {}
1223 copy = fctx.renamed()
1246 copy = fctx.renamed()
1224 if copy and copy[0] != fname:
1247 if copy and copy[0] != fname:
1225 # Mark the new revision of this file as a copy of another
1248 # Mark the new revision of this file as a copy of another
1226 # file. This copy data will effectively act as a parent
1249 # file. This copy data will effectively act as a parent
1227 # of this new revision. If this is a merge, the first
1250 # of this new revision. If this is a merge, the first
1228 # parent will be the nullid (meaning "look up the copy data")
1251 # parent will be the nullid (meaning "look up the copy data")
1229 # and the second one will be the other parent. For example:
1252 # and the second one will be the other parent. For example:
1230 #
1253 #
1231 # 0 --- 1 --- 3 rev1 changes file foo
1254 # 0 --- 1 --- 3 rev1 changes file foo
1232 # \ / rev2 renames foo to bar and changes it
1255 # \ / rev2 renames foo to bar and changes it
1233 # \- 2 -/ rev3 should have bar with all changes and
1256 # \- 2 -/ rev3 should have bar with all changes and
1234 # should record that bar descends from
1257 # should record that bar descends from
1235 # bar in rev2 and foo in rev1
1258 # bar in rev2 and foo in rev1
1236 #
1259 #
1237 # this allows this merge to succeed:
1260 # this allows this merge to succeed:
1238 #
1261 #
1239 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1262 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1240 # \ / merging rev3 and rev4 should use bar@rev2
1263 # \ / merging rev3 and rev4 should use bar@rev2
1241 # \- 2 --- 4 as the merge base
1264 # \- 2 --- 4 as the merge base
1242 #
1265 #
1243
1266
1244 cfname = copy[0]
1267 cfname = copy[0]
1245 crev = manifest1.get(cfname)
1268 crev = manifest1.get(cfname)
1246 newfparent = fparent2
1269 newfparent = fparent2
1247
1270
1248 if manifest2: # branch merge
1271 if manifest2: # branch merge
1249 if fparent2 == nullid or crev is None: # copied on remote side
1272 if fparent2 == nullid or crev is None: # copied on remote side
1250 if cfname in manifest2:
1273 if cfname in manifest2:
1251 crev = manifest2[cfname]
1274 crev = manifest2[cfname]
1252 newfparent = fparent1
1275 newfparent = fparent1
1253
1276
1254 # find source in nearest ancestor if we've lost track
1277 # find source in nearest ancestor if we've lost track
1255 if not crev:
1278 if not crev:
1256 self.ui.debug(" %s: searching for copy revision for %s\n" %
1279 self.ui.debug(" %s: searching for copy revision for %s\n" %
1257 (fname, cfname))
1280 (fname, cfname))
1258 for ancestor in self[None].ancestors():
1281 for ancestor in self[None].ancestors():
1259 if cfname in ancestor:
1282 if cfname in ancestor:
1260 crev = ancestor[cfname].filenode()
1283 crev = ancestor[cfname].filenode()
1261 break
1284 break
1262
1285
1263 if crev:
1286 if crev:
1264 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1287 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1265 meta["copy"] = cfname
1288 meta["copy"] = cfname
1266 meta["copyrev"] = hex(crev)
1289 meta["copyrev"] = hex(crev)
1267 fparent1, fparent2 = nullid, newfparent
1290 fparent1, fparent2 = nullid, newfparent
1268 else:
1291 else:
1269 self.ui.warn(_("warning: can't find ancestor for '%s' "
1292 self.ui.warn(_("warning: can't find ancestor for '%s' "
1270 "copied from '%s'!\n") % (fname, cfname))
1293 "copied from '%s'!\n") % (fname, cfname))
1271
1294
1272 elif fparent2 != nullid:
1295 elif fparent2 != nullid:
1273 # is one parent an ancestor of the other?
1296 # is one parent an ancestor of the other?
1274 fparentancestor = flog.ancestor(fparent1, fparent2)
1297 fparentancestor = flog.ancestor(fparent1, fparent2)
1275 if fparentancestor == fparent1:
1298 if fparentancestor == fparent1:
1276 fparent1, fparent2 = fparent2, nullid
1299 fparent1, fparent2 = fparent2, nullid
1277 elif fparentancestor == fparent2:
1300 elif fparentancestor == fparent2:
1278 fparent2 = nullid
1301 fparent2 = nullid
1279
1302
1280 # is the file changed?
1303 # is the file changed?
1281 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1304 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1282 changelist.append(fname)
1305 changelist.append(fname)
1283 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1306 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1284
1307
1285 # are just the flags changed during merge?
1308 # are just the flags changed during merge?
1286 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1309 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1287 changelist.append(fname)
1310 changelist.append(fname)
1288
1311
1289 return fparent1
1312 return fparent1
1290
1313
1291 @unfilteredmethod
1314 @unfilteredmethod
1292 def commit(self, text="", user=None, date=None, match=None, force=False,
1315 def commit(self, text="", user=None, date=None, match=None, force=False,
1293 editor=False, extra={}):
1316 editor=False, extra={}):
1294 """Add a new revision to current repository.
1317 """Add a new revision to current repository.
1295
1318
1296 Revision information is gathered from the working directory,
1319 Revision information is gathered from the working directory,
1297 match can be used to filter the committed files. If editor is
1320 match can be used to filter the committed files. If editor is
1298 supplied, it is called to get a commit message.
1321 supplied, it is called to get a commit message.
1299 """
1322 """
1300
1323
1301 def fail(f, msg):
1324 def fail(f, msg):
1302 raise util.Abort('%s: %s' % (f, msg))
1325 raise util.Abort('%s: %s' % (f, msg))
1303
1326
1304 if not match:
1327 if not match:
1305 match = matchmod.always(self.root, '')
1328 match = matchmod.always(self.root, '')
1306
1329
1307 if not force:
1330 if not force:
1308 vdirs = []
1331 vdirs = []
1309 match.dir = vdirs.append
1332 match.dir = vdirs.append
1310 match.bad = fail
1333 match.bad = fail
1311
1334
1312 wlock = self.wlock()
1335 wlock = self.wlock()
1313 try:
1336 try:
1314 wctx = self[None]
1337 wctx = self[None]
1315 merge = len(wctx.parents()) > 1
1338 merge = len(wctx.parents()) > 1
1316
1339
1317 if (not force and merge and match and
1340 if (not force and merge and match and
1318 (match.files() or match.anypats())):
1341 (match.files() or match.anypats())):
1319 raise util.Abort(_('cannot partially commit a merge '
1342 raise util.Abort(_('cannot partially commit a merge '
1320 '(do not specify files or patterns)'))
1343 '(do not specify files or patterns)'))
1321
1344
1322 changes = self.status(match=match, clean=force)
1345 changes = self.status(match=match, clean=force)
1323 if force:
1346 if force:
1324 changes[0].extend(changes[6]) # mq may commit unchanged files
1347 changes[0].extend(changes[6]) # mq may commit unchanged files
1325
1348
1326 # check subrepos
1349 # check subrepos
1327 subs = []
1350 subs = []
1328 commitsubs = set()
1351 commitsubs = set()
1329 newstate = wctx.substate.copy()
1352 newstate = wctx.substate.copy()
1330 # only manage subrepos and .hgsubstate if .hgsub is present
1353 # only manage subrepos and .hgsubstate if .hgsub is present
1331 if '.hgsub' in wctx:
1354 if '.hgsub' in wctx:
1332 # we'll decide whether to track this ourselves, thanks
1355 # we'll decide whether to track this ourselves, thanks
1333 if '.hgsubstate' in changes[0]:
1356 if '.hgsubstate' in changes[0]:
1334 changes[0].remove('.hgsubstate')
1357 changes[0].remove('.hgsubstate')
1335 if '.hgsubstate' in changes[2]:
1358 if '.hgsubstate' in changes[2]:
1336 changes[2].remove('.hgsubstate')
1359 changes[2].remove('.hgsubstate')
1337
1360
1338 # compare current state to last committed state
1361 # compare current state to last committed state
1339 # build new substate based on last committed state
1362 # build new substate based on last committed state
1340 oldstate = wctx.p1().substate
1363 oldstate = wctx.p1().substate
1341 for s in sorted(newstate.keys()):
1364 for s in sorted(newstate.keys()):
1342 if not match(s):
1365 if not match(s):
1343 # ignore working copy, use old state if present
1366 # ignore working copy, use old state if present
1344 if s in oldstate:
1367 if s in oldstate:
1345 newstate[s] = oldstate[s]
1368 newstate[s] = oldstate[s]
1346 continue
1369 continue
1347 if not force:
1370 if not force:
1348 raise util.Abort(
1371 raise util.Abort(
1349 _("commit with new subrepo %s excluded") % s)
1372 _("commit with new subrepo %s excluded") % s)
1350 if wctx.sub(s).dirty(True):
1373 if wctx.sub(s).dirty(True):
1351 if not self.ui.configbool('ui', 'commitsubrepos'):
1374 if not self.ui.configbool('ui', 'commitsubrepos'):
1352 raise util.Abort(
1375 raise util.Abort(
1353 _("uncommitted changes in subrepo %s") % s,
1376 _("uncommitted changes in subrepo %s") % s,
1354 hint=_("use --subrepos for recursive commit"))
1377 hint=_("use --subrepos for recursive commit"))
1355 subs.append(s)
1378 subs.append(s)
1356 commitsubs.add(s)
1379 commitsubs.add(s)
1357 else:
1380 else:
1358 bs = wctx.sub(s).basestate()
1381 bs = wctx.sub(s).basestate()
1359 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1382 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1360 if oldstate.get(s, (None, None, None))[1] != bs:
1383 if oldstate.get(s, (None, None, None))[1] != bs:
1361 subs.append(s)
1384 subs.append(s)
1362
1385
1363 # check for removed subrepos
1386 # check for removed subrepos
1364 for p in wctx.parents():
1387 for p in wctx.parents():
1365 r = [s for s in p.substate if s not in newstate]
1388 r = [s for s in p.substate if s not in newstate]
1366 subs += [s for s in r if match(s)]
1389 subs += [s for s in r if match(s)]
1367 if subs:
1390 if subs:
1368 if (not match('.hgsub') and
1391 if (not match('.hgsub') and
1369 '.hgsub' in (wctx.modified() + wctx.added())):
1392 '.hgsub' in (wctx.modified() + wctx.added())):
1370 raise util.Abort(
1393 raise util.Abort(
1371 _("can't commit subrepos without .hgsub"))
1394 _("can't commit subrepos without .hgsub"))
1372 changes[0].insert(0, '.hgsubstate')
1395 changes[0].insert(0, '.hgsubstate')
1373
1396
1374 elif '.hgsub' in changes[2]:
1397 elif '.hgsub' in changes[2]:
1375 # clean up .hgsubstate when .hgsub is removed
1398 # clean up .hgsubstate when .hgsub is removed
1376 if ('.hgsubstate' in wctx and
1399 if ('.hgsubstate' in wctx and
1377 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1400 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1378 changes[2].insert(0, '.hgsubstate')
1401 changes[2].insert(0, '.hgsubstate')
1379
1402
1380 # make sure all explicit patterns are matched
1403 # make sure all explicit patterns are matched
1381 if not force and match.files():
1404 if not force and match.files():
1382 matched = set(changes[0] + changes[1] + changes[2])
1405 matched = set(changes[0] + changes[1] + changes[2])
1383
1406
1384 for f in match.files():
1407 for f in match.files():
1385 f = self.dirstate.normalize(f)
1408 f = self.dirstate.normalize(f)
1386 if f == '.' or f in matched or f in wctx.substate:
1409 if f == '.' or f in matched or f in wctx.substate:
1387 continue
1410 continue
1388 if f in changes[3]: # missing
1411 if f in changes[3]: # missing
1389 fail(f, _('file not found!'))
1412 fail(f, _('file not found!'))
1390 if f in vdirs: # visited directory
1413 if f in vdirs: # visited directory
1391 d = f + '/'
1414 d = f + '/'
1392 for mf in matched:
1415 for mf in matched:
1393 if mf.startswith(d):
1416 if mf.startswith(d):
1394 break
1417 break
1395 else:
1418 else:
1396 fail(f, _("no match under directory!"))
1419 fail(f, _("no match under directory!"))
1397 elif f not in self.dirstate:
1420 elif f not in self.dirstate:
1398 fail(f, _("file not tracked!"))
1421 fail(f, _("file not tracked!"))
1399
1422
1400 if (not force and not extra.get("close") and not merge
1423 if (not force and not extra.get("close") and not merge
1401 and not (changes[0] or changes[1] or changes[2])
1424 and not (changes[0] or changes[1] or changes[2])
1402 and wctx.branch() == wctx.p1().branch()):
1425 and wctx.branch() == wctx.p1().branch()):
1403 return None
1426 return None
1404
1427
1405 if merge and changes[3]:
1428 if merge and changes[3]:
1406 raise util.Abort(_("cannot commit merge with missing files"))
1429 raise util.Abort(_("cannot commit merge with missing files"))
1407
1430
1408 ms = mergemod.mergestate(self)
1431 ms = mergemod.mergestate(self)
1409 for f in changes[0]:
1432 for f in changes[0]:
1410 if f in ms and ms[f] == 'u':
1433 if f in ms and ms[f] == 'u':
1411 raise util.Abort(_("unresolved merge conflicts "
1434 raise util.Abort(_("unresolved merge conflicts "
1412 "(see hg help resolve)"))
1435 "(see hg help resolve)"))
1413
1436
1414 cctx = context.workingctx(self, text, user, date, extra, changes)
1437 cctx = context.workingctx(self, text, user, date, extra, changes)
1415 if editor:
1438 if editor:
1416 cctx._text = editor(self, cctx, subs)
1439 cctx._text = editor(self, cctx, subs)
1417 edited = (text != cctx._text)
1440 edited = (text != cctx._text)
1418
1441
1419 # commit subs and write new state
1442 # commit subs and write new state
1420 if subs:
1443 if subs:
1421 for s in sorted(commitsubs):
1444 for s in sorted(commitsubs):
1422 sub = wctx.sub(s)
1445 sub = wctx.sub(s)
1423 self.ui.status(_('committing subrepository %s\n') %
1446 self.ui.status(_('committing subrepository %s\n') %
1424 subrepo.subrelpath(sub))
1447 subrepo.subrelpath(sub))
1425 sr = sub.commit(cctx._text, user, date)
1448 sr = sub.commit(cctx._text, user, date)
1426 newstate[s] = (newstate[s][0], sr)
1449 newstate[s] = (newstate[s][0], sr)
1427 subrepo.writestate(self, newstate)
1450 subrepo.writestate(self, newstate)
1428
1451
1429 # Save commit message in case this transaction gets rolled back
1452 # Save commit message in case this transaction gets rolled back
1430 # (e.g. by a pretxncommit hook). Leave the content alone on
1453 # (e.g. by a pretxncommit hook). Leave the content alone on
1431 # the assumption that the user will use the same editor again.
1454 # the assumption that the user will use the same editor again.
1432 msgfn = self.savecommitmessage(cctx._text)
1455 msgfn = self.savecommitmessage(cctx._text)
1433
1456
1434 p1, p2 = self.dirstate.parents()
1457 p1, p2 = self.dirstate.parents()
1435 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1458 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1436 try:
1459 try:
1437 self.hook("precommit", throw=True, parent1=hookp1,
1460 self.hook("precommit", throw=True, parent1=hookp1,
1438 parent2=hookp2)
1461 parent2=hookp2)
1439 ret = self.commitctx(cctx, True)
1462 ret = self.commitctx(cctx, True)
1440 except: # re-raises
1463 except: # re-raises
1441 if edited:
1464 if edited:
1442 self.ui.write(
1465 self.ui.write(
1443 _('note: commit message saved in %s\n') % msgfn)
1466 _('note: commit message saved in %s\n') % msgfn)
1444 raise
1467 raise
1445
1468
1446 # update bookmarks, dirstate and mergestate
1469 # update bookmarks, dirstate and mergestate
1447 bookmarks.update(self, [p1, p2], ret)
1470 bookmarks.update(self, [p1, p2], ret)
1448 for f in changes[0] + changes[1]:
1471 for f in changes[0] + changes[1]:
1449 self.dirstate.normal(f)
1472 self.dirstate.normal(f)
1450 for f in changes[2]:
1473 for f in changes[2]:
1451 self.dirstate.drop(f)
1474 self.dirstate.drop(f)
1452 self.dirstate.setparents(ret)
1475 self.dirstate.setparents(ret)
1453 ms.reset()
1476 ms.reset()
1454 finally:
1477 finally:
1455 wlock.release()
1478 wlock.release()
1456
1479
1457 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1480 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1458 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1481 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1459 self._afterlock(commithook)
1482 self._afterlock(commithook)
1460 return ret
1483 return ret
1461
1484
1462 @unfilteredmethod
1485 @unfilteredmethod
1463 def commitctx(self, ctx, error=False):
1486 def commitctx(self, ctx, error=False):
1464 """Add a new revision to current repository.
1487 """Add a new revision to current repository.
1465 Revision information is passed via the context argument.
1488 Revision information is passed via the context argument.
1466 """
1489 """
1467
1490
1468 tr = lock = None
1491 tr = lock = None
1469 removed = list(ctx.removed())
1492 removed = list(ctx.removed())
1470 p1, p2 = ctx.p1(), ctx.p2()
1493 p1, p2 = ctx.p1(), ctx.p2()
1471 user = ctx.user()
1494 user = ctx.user()
1472
1495
1473 lock = self.lock()
1496 lock = self.lock()
1474 try:
1497 try:
1475 tr = self.transaction("commit")
1498 tr = self.transaction("commit")
1476 trp = weakref.proxy(tr)
1499 trp = weakref.proxy(tr)
1477
1500
1478 if ctx.files():
1501 if ctx.files():
1479 m1 = p1.manifest().copy()
1502 m1 = p1.manifest().copy()
1480 m2 = p2.manifest()
1503 m2 = p2.manifest()
1481
1504
1482 # check in files
1505 # check in files
1483 new = {}
1506 new = {}
1484 changed = []
1507 changed = []
1485 linkrev = len(self)
1508 linkrev = len(self)
1486 for f in sorted(ctx.modified() + ctx.added()):
1509 for f in sorted(ctx.modified() + ctx.added()):
1487 self.ui.note(f + "\n")
1510 self.ui.note(f + "\n")
1488 try:
1511 try:
1489 fctx = ctx[f]
1512 fctx = ctx[f]
1490 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1513 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1491 changed)
1514 changed)
1492 m1.set(f, fctx.flags())
1515 m1.set(f, fctx.flags())
1493 except OSError, inst:
1516 except OSError, inst:
1494 self.ui.warn(_("trouble committing %s!\n") % f)
1517 self.ui.warn(_("trouble committing %s!\n") % f)
1495 raise
1518 raise
1496 except IOError, inst:
1519 except IOError, inst:
1497 errcode = getattr(inst, 'errno', errno.ENOENT)
1520 errcode = getattr(inst, 'errno', errno.ENOENT)
1498 if error or errcode and errcode != errno.ENOENT:
1521 if error or errcode and errcode != errno.ENOENT:
1499 self.ui.warn(_("trouble committing %s!\n") % f)
1522 self.ui.warn(_("trouble committing %s!\n") % f)
1500 raise
1523 raise
1501 else:
1524 else:
1502 removed.append(f)
1525 removed.append(f)
1503
1526
1504 # update manifest
1527 # update manifest
1505 m1.update(new)
1528 m1.update(new)
1506 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1529 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1507 drop = [f for f in removed if f in m1]
1530 drop = [f for f in removed if f in m1]
1508 for f in drop:
1531 for f in drop:
1509 del m1[f]
1532 del m1[f]
1510 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1533 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1511 p2.manifestnode(), (new, drop))
1534 p2.manifestnode(), (new, drop))
1512 files = changed + removed
1535 files = changed + removed
1513 else:
1536 else:
1514 mn = p1.manifestnode()
1537 mn = p1.manifestnode()
1515 files = []
1538 files = []
1516
1539
1517 # update changelog
1540 # update changelog
1518 self.changelog.delayupdate()
1541 self.changelog.delayupdate()
1519 n = self.changelog.add(mn, files, ctx.description(),
1542 n = self.changelog.add(mn, files, ctx.description(),
1520 trp, p1.node(), p2.node(),
1543 trp, p1.node(), p2.node(),
1521 user, ctx.date(), ctx.extra().copy())
1544 user, ctx.date(), ctx.extra().copy())
1522 p = lambda: self.changelog.writepending() and self.root or ""
1545 p = lambda: self.changelog.writepending() and self.root or ""
1523 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1546 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1524 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1547 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1525 parent2=xp2, pending=p)
1548 parent2=xp2, pending=p)
1526 self.changelog.finalize(trp)
1549 self.changelog.finalize(trp)
1527 # set the new commit is proper phase
1550 # set the new commit is proper phase
1528 targetphase = phases.newcommitphase(self.ui)
1551 targetphase = phases.newcommitphase(self.ui)
1529 if targetphase:
1552 if targetphase:
1530 # retract boundary do not alter parent changeset.
1553 # retract boundary do not alter parent changeset.
1531 # if a parent have higher the resulting phase will
1554 # if a parent have higher the resulting phase will
1532 # be compliant anyway
1555 # be compliant anyway
1533 #
1556 #
1534 # if minimal phase was 0 we don't need to retract anything
1557 # if minimal phase was 0 we don't need to retract anything
1535 phases.retractboundary(self, targetphase, [n])
1558 phases.retractboundary(self, targetphase, [n])
1536 tr.close()
1559 tr.close()
1537 self.updatebranchcache()
1560 self.updatebranchcache()
1538 return n
1561 return n
1539 finally:
1562 finally:
1540 if tr:
1563 if tr:
1541 tr.release()
1564 tr.release()
1542 lock.release()
1565 lock.release()
1543
1566
1544 @unfilteredmethod
1567 @unfilteredmethod
1545 def destroyed(self, newheadnodes=None):
1568 def destroyed(self, newheadnodes=None):
1546 '''Inform the repository that nodes have been destroyed.
1569 '''Inform the repository that nodes have been destroyed.
1547 Intended for use by strip and rollback, so there's a common
1570 Intended for use by strip and rollback, so there's a common
1548 place for anything that has to be done after destroying history.
1571 place for anything that has to be done after destroying history.
1549
1572
1550 If you know the branchheadcache was uptodate before nodes were removed
1573 If you know the branchheadcache was uptodate before nodes were removed
1551 and you also know the set of candidate new heads that may have resulted
1574 and you also know the set of candidate new heads that may have resulted
1552 from the destruction, you can set newheadnodes. This will enable the
1575 from the destruction, you can set newheadnodes. This will enable the
1553 code to update the branchheads cache, rather than having future code
1576 code to update the branchheads cache, rather than having future code
1554 decide it's invalid and regenerating it from scratch.
1577 decide it's invalid and regenerating it from scratch.
1555 '''
1578 '''
1556 # If we have info, newheadnodes, on how to update the branch cache, do
1579 # If we have info, newheadnodes, on how to update the branch cache, do
1557 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1580 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1558 # will be caught the next time it is read.
1581 # will be caught the next time it is read.
1559 if newheadnodes:
1582 if newheadnodes:
1560 tiprev = len(self) - 1
1583 tiprev = len(self) - 1
1561 ctxgen = (self[node] for node in newheadnodes
1584 ctxgen = (self[node] for node in newheadnodes
1562 if self.changelog.hasnode(node))
1585 if self.changelog.hasnode(node))
1563 self._updatebranchcache(self._branchcache, ctxgen)
1586 self._updatebranchcache(self._branchcache, ctxgen)
1564 self._writebranchcache(self._branchcache, self.changelog.tip(),
1587 self._writebranchcache(self._branchcache, self.changelog.tip(),
1565 tiprev)
1588 tiprev)
1566
1589
1567 # Ensure the persistent tag cache is updated. Doing it now
1590 # Ensure the persistent tag cache is updated. Doing it now
1568 # means that the tag cache only has to worry about destroyed
1591 # means that the tag cache only has to worry about destroyed
1569 # heads immediately after a strip/rollback. That in turn
1592 # heads immediately after a strip/rollback. That in turn
1570 # guarantees that "cachetip == currenttip" (comparing both rev
1593 # guarantees that "cachetip == currenttip" (comparing both rev
1571 # and node) always means no nodes have been added or destroyed.
1594 # and node) always means no nodes have been added or destroyed.
1572
1595
1573 # XXX this is suboptimal when qrefresh'ing: we strip the current
1596 # XXX this is suboptimal when qrefresh'ing: we strip the current
1574 # head, refresh the tag cache, then immediately add a new head.
1597 # head, refresh the tag cache, then immediately add a new head.
1575 # But I think doing it this way is necessary for the "instant
1598 # But I think doing it this way is necessary for the "instant
1576 # tag cache retrieval" case to work.
1599 # tag cache retrieval" case to work.
1577 self.invalidatecaches()
1600 self.invalidatecaches()
1578
1601
1579 # Discard all cache entries to force reloading everything.
1602 # Discard all cache entries to force reloading everything.
1580 self._filecache.clear()
1603 self._filecache.clear()
1581
1604
1582 def walk(self, match, node=None):
1605 def walk(self, match, node=None):
1583 '''
1606 '''
1584 walk recursively through the directory tree or a given
1607 walk recursively through the directory tree or a given
1585 changeset, finding all files matched by the match
1608 changeset, finding all files matched by the match
1586 function
1609 function
1587 '''
1610 '''
1588 return self[node].walk(match)
1611 return self[node].walk(match)
1589
1612
1590 def status(self, node1='.', node2=None, match=None,
1613 def status(self, node1='.', node2=None, match=None,
1591 ignored=False, clean=False, unknown=False,
1614 ignored=False, clean=False, unknown=False,
1592 listsubrepos=False):
1615 listsubrepos=False):
1593 """return status of files between two nodes or node and working
1616 """return status of files between two nodes or node and working
1594 directory.
1617 directory.
1595
1618
1596 If node1 is None, use the first dirstate parent instead.
1619 If node1 is None, use the first dirstate parent instead.
1597 If node2 is None, compare node1 with working directory.
1620 If node2 is None, compare node1 with working directory.
1598 """
1621 """
1599
1622
1600 def mfmatches(ctx):
1623 def mfmatches(ctx):
1601 mf = ctx.manifest().copy()
1624 mf = ctx.manifest().copy()
1602 if match.always():
1625 if match.always():
1603 return mf
1626 return mf
1604 for fn in mf.keys():
1627 for fn in mf.keys():
1605 if not match(fn):
1628 if not match(fn):
1606 del mf[fn]
1629 del mf[fn]
1607 return mf
1630 return mf
1608
1631
1609 if isinstance(node1, context.changectx):
1632 if isinstance(node1, context.changectx):
1610 ctx1 = node1
1633 ctx1 = node1
1611 else:
1634 else:
1612 ctx1 = self[node1]
1635 ctx1 = self[node1]
1613 if isinstance(node2, context.changectx):
1636 if isinstance(node2, context.changectx):
1614 ctx2 = node2
1637 ctx2 = node2
1615 else:
1638 else:
1616 ctx2 = self[node2]
1639 ctx2 = self[node2]
1617
1640
1618 working = ctx2.rev() is None
1641 working = ctx2.rev() is None
1619 parentworking = working and ctx1 == self['.']
1642 parentworking = working and ctx1 == self['.']
1620 match = match or matchmod.always(self.root, self.getcwd())
1643 match = match or matchmod.always(self.root, self.getcwd())
1621 listignored, listclean, listunknown = ignored, clean, unknown
1644 listignored, listclean, listunknown = ignored, clean, unknown
1622
1645
1623 # load earliest manifest first for caching reasons
1646 # load earliest manifest first for caching reasons
1624 if not working and ctx2.rev() < ctx1.rev():
1647 if not working and ctx2.rev() < ctx1.rev():
1625 ctx2.manifest()
1648 ctx2.manifest()
1626
1649
1627 if not parentworking:
1650 if not parentworking:
1628 def bad(f, msg):
1651 def bad(f, msg):
1629 # 'f' may be a directory pattern from 'match.files()',
1652 # 'f' may be a directory pattern from 'match.files()',
1630 # so 'f not in ctx1' is not enough
1653 # so 'f not in ctx1' is not enough
1631 if f not in ctx1 and f not in ctx1.dirs():
1654 if f not in ctx1 and f not in ctx1.dirs():
1632 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1655 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1633 match.bad = bad
1656 match.bad = bad
1634
1657
1635 if working: # we need to scan the working dir
1658 if working: # we need to scan the working dir
1636 subrepos = []
1659 subrepos = []
1637 if '.hgsub' in self.dirstate:
1660 if '.hgsub' in self.dirstate:
1638 subrepos = ctx2.substate.keys()
1661 subrepos = ctx2.substate.keys()
1639 s = self.dirstate.status(match, subrepos, listignored,
1662 s = self.dirstate.status(match, subrepos, listignored,
1640 listclean, listunknown)
1663 listclean, listunknown)
1641 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1664 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1642
1665
1643 # check for any possibly clean files
1666 # check for any possibly clean files
1644 if parentworking and cmp:
1667 if parentworking and cmp:
1645 fixup = []
1668 fixup = []
1646 # do a full compare of any files that might have changed
1669 # do a full compare of any files that might have changed
1647 for f in sorted(cmp):
1670 for f in sorted(cmp):
1648 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1671 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1649 or ctx1[f].cmp(ctx2[f])):
1672 or ctx1[f].cmp(ctx2[f])):
1650 modified.append(f)
1673 modified.append(f)
1651 else:
1674 else:
1652 fixup.append(f)
1675 fixup.append(f)
1653
1676
1654 # update dirstate for files that are actually clean
1677 # update dirstate for files that are actually clean
1655 if fixup:
1678 if fixup:
1656 if listclean:
1679 if listclean:
1657 clean += fixup
1680 clean += fixup
1658
1681
1659 try:
1682 try:
1660 # updating the dirstate is optional
1683 # updating the dirstate is optional
1661 # so we don't wait on the lock
1684 # so we don't wait on the lock
1662 wlock = self.wlock(False)
1685 wlock = self.wlock(False)
1663 try:
1686 try:
1664 for f in fixup:
1687 for f in fixup:
1665 self.dirstate.normal(f)
1688 self.dirstate.normal(f)
1666 finally:
1689 finally:
1667 wlock.release()
1690 wlock.release()
1668 except error.LockError:
1691 except error.LockError:
1669 pass
1692 pass
1670
1693
1671 if not parentworking:
1694 if not parentworking:
1672 mf1 = mfmatches(ctx1)
1695 mf1 = mfmatches(ctx1)
1673 if working:
1696 if working:
1674 # we are comparing working dir against non-parent
1697 # we are comparing working dir against non-parent
1675 # generate a pseudo-manifest for the working dir
1698 # generate a pseudo-manifest for the working dir
1676 mf2 = mfmatches(self['.'])
1699 mf2 = mfmatches(self['.'])
1677 for f in cmp + modified + added:
1700 for f in cmp + modified + added:
1678 mf2[f] = None
1701 mf2[f] = None
1679 mf2.set(f, ctx2.flags(f))
1702 mf2.set(f, ctx2.flags(f))
1680 for f in removed:
1703 for f in removed:
1681 if f in mf2:
1704 if f in mf2:
1682 del mf2[f]
1705 del mf2[f]
1683 else:
1706 else:
1684 # we are comparing two revisions
1707 # we are comparing two revisions
1685 deleted, unknown, ignored = [], [], []
1708 deleted, unknown, ignored = [], [], []
1686 mf2 = mfmatches(ctx2)
1709 mf2 = mfmatches(ctx2)
1687
1710
1688 modified, added, clean = [], [], []
1711 modified, added, clean = [], [], []
1689 withflags = mf1.withflags() | mf2.withflags()
1712 withflags = mf1.withflags() | mf2.withflags()
1690 for fn in mf2:
1713 for fn in mf2:
1691 if fn in mf1:
1714 if fn in mf1:
1692 if (fn not in deleted and
1715 if (fn not in deleted and
1693 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1716 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1694 (mf1[fn] != mf2[fn] and
1717 (mf1[fn] != mf2[fn] and
1695 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1718 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1696 modified.append(fn)
1719 modified.append(fn)
1697 elif listclean:
1720 elif listclean:
1698 clean.append(fn)
1721 clean.append(fn)
1699 del mf1[fn]
1722 del mf1[fn]
1700 elif fn not in deleted:
1723 elif fn not in deleted:
1701 added.append(fn)
1724 added.append(fn)
1702 removed = mf1.keys()
1725 removed = mf1.keys()
1703
1726
1704 if working and modified and not self.dirstate._checklink:
1727 if working and modified and not self.dirstate._checklink:
1705 # Symlink placeholders may get non-symlink-like contents
1728 # Symlink placeholders may get non-symlink-like contents
1706 # via user error or dereferencing by NFS or Samba servers,
1729 # via user error or dereferencing by NFS or Samba servers,
1707 # so we filter out any placeholders that don't look like a
1730 # so we filter out any placeholders that don't look like a
1708 # symlink
1731 # symlink
1709 sane = []
1732 sane = []
1710 for f in modified:
1733 for f in modified:
1711 if ctx2.flags(f) == 'l':
1734 if ctx2.flags(f) == 'l':
1712 d = ctx2[f].data()
1735 d = ctx2[f].data()
1713 if len(d) >= 1024 or '\n' in d or util.binary(d):
1736 if len(d) >= 1024 or '\n' in d or util.binary(d):
1714 self.ui.debug('ignoring suspect symlink placeholder'
1737 self.ui.debug('ignoring suspect symlink placeholder'
1715 ' "%s"\n' % f)
1738 ' "%s"\n' % f)
1716 continue
1739 continue
1717 sane.append(f)
1740 sane.append(f)
1718 modified = sane
1741 modified = sane
1719
1742
1720 r = modified, added, removed, deleted, unknown, ignored, clean
1743 r = modified, added, removed, deleted, unknown, ignored, clean
1721
1744
1722 if listsubrepos:
1745 if listsubrepos:
1723 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1746 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1724 if working:
1747 if working:
1725 rev2 = None
1748 rev2 = None
1726 else:
1749 else:
1727 rev2 = ctx2.substate[subpath][1]
1750 rev2 = ctx2.substate[subpath][1]
1728 try:
1751 try:
1729 submatch = matchmod.narrowmatcher(subpath, match)
1752 submatch = matchmod.narrowmatcher(subpath, match)
1730 s = sub.status(rev2, match=submatch, ignored=listignored,
1753 s = sub.status(rev2, match=submatch, ignored=listignored,
1731 clean=listclean, unknown=listunknown,
1754 clean=listclean, unknown=listunknown,
1732 listsubrepos=True)
1755 listsubrepos=True)
1733 for rfiles, sfiles in zip(r, s):
1756 for rfiles, sfiles in zip(r, s):
1734 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1757 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1735 except error.LookupError:
1758 except error.LookupError:
1736 self.ui.status(_("skipping missing subrepository: %s\n")
1759 self.ui.status(_("skipping missing subrepository: %s\n")
1737 % subpath)
1760 % subpath)
1738
1761
1739 for l in r:
1762 for l in r:
1740 l.sort()
1763 l.sort()
1741 return r
1764 return r
1742
1765
1743 def heads(self, start=None):
1766 def heads(self, start=None):
1744 heads = self.changelog.heads(start)
1767 heads = self.changelog.heads(start)
1745 # sort the output in rev descending order
1768 # sort the output in rev descending order
1746 return sorted(heads, key=self.changelog.rev, reverse=True)
1769 return sorted(heads, key=self.changelog.rev, reverse=True)
1747
1770
1748 def branchheads(self, branch=None, start=None, closed=False):
1771 def branchheads(self, branch=None, start=None, closed=False):
1749 '''return a (possibly filtered) list of heads for the given branch
1772 '''return a (possibly filtered) list of heads for the given branch
1750
1773
1751 Heads are returned in topological order, from newest to oldest.
1774 Heads are returned in topological order, from newest to oldest.
1752 If branch is None, use the dirstate branch.
1775 If branch is None, use the dirstate branch.
1753 If start is not None, return only heads reachable from start.
1776 If start is not None, return only heads reachable from start.
1754 If closed is True, return heads that are marked as closed as well.
1777 If closed is True, return heads that are marked as closed as well.
1755 '''
1778 '''
1756 if branch is None:
1779 if branch is None:
1757 branch = self[None].branch()
1780 branch = self[None].branch()
1758 branches = self.branchmap()
1781 branches = self.branchmap()
1759 if branch not in branches:
1782 if branch not in branches:
1760 return []
1783 return []
1761 # the cache returns heads ordered lowest to highest
1784 # the cache returns heads ordered lowest to highest
1762 bheads = list(reversed(branches[branch]))
1785 bheads = list(reversed(branches[branch]))
1763 if start is not None:
1786 if start is not None:
1764 # filter out the heads that cannot be reached from startrev
1787 # filter out the heads that cannot be reached from startrev
1765 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1788 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1766 bheads = [h for h in bheads if h in fbheads]
1789 bheads = [h for h in bheads if h in fbheads]
1767 if not closed:
1790 if not closed:
1768 bheads = [h for h in bheads if not self[h].closesbranch()]
1791 bheads = [h for h in bheads if not self[h].closesbranch()]
1769 return bheads
1792 return bheads
1770
1793
1771 def branches(self, nodes):
1794 def branches(self, nodes):
1772 if not nodes:
1795 if not nodes:
1773 nodes = [self.changelog.tip()]
1796 nodes = [self.changelog.tip()]
1774 b = []
1797 b = []
1775 for n in nodes:
1798 for n in nodes:
1776 t = n
1799 t = n
1777 while True:
1800 while True:
1778 p = self.changelog.parents(n)
1801 p = self.changelog.parents(n)
1779 if p[1] != nullid or p[0] == nullid:
1802 if p[1] != nullid or p[0] == nullid:
1780 b.append((t, n, p[0], p[1]))
1803 b.append((t, n, p[0], p[1]))
1781 break
1804 break
1782 n = p[0]
1805 n = p[0]
1783 return b
1806 return b
1784
1807
1785 def between(self, pairs):
1808 def between(self, pairs):
1786 r = []
1809 r = []
1787
1810
1788 for top, bottom in pairs:
1811 for top, bottom in pairs:
1789 n, l, i = top, [], 0
1812 n, l, i = top, [], 0
1790 f = 1
1813 f = 1
1791
1814
1792 while n != bottom and n != nullid:
1815 while n != bottom and n != nullid:
1793 p = self.changelog.parents(n)[0]
1816 p = self.changelog.parents(n)[0]
1794 if i == f:
1817 if i == f:
1795 l.append(n)
1818 l.append(n)
1796 f = f * 2
1819 f = f * 2
1797 n = p
1820 n = p
1798 i += 1
1821 i += 1
1799
1822
1800 r.append(l)
1823 r.append(l)
1801
1824
1802 return r
1825 return r
1803
1826
1804 def pull(self, remote, heads=None, force=False):
1827 def pull(self, remote, heads=None, force=False):
1805 # don't open transaction for nothing or you break future useful
1828 # don't open transaction for nothing or you break future useful
1806 # rollback call
1829 # rollback call
1807 tr = None
1830 tr = None
1808 trname = 'pull\n' + util.hidepassword(remote.url())
1831 trname = 'pull\n' + util.hidepassword(remote.url())
1809 lock = self.lock()
1832 lock = self.lock()
1810 try:
1833 try:
1811 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1834 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1812 force=force)
1835 force=force)
1813 common, fetch, rheads = tmp
1836 common, fetch, rheads = tmp
1814 if not fetch:
1837 if not fetch:
1815 self.ui.status(_("no changes found\n"))
1838 self.ui.status(_("no changes found\n"))
1816 added = []
1839 added = []
1817 result = 0
1840 result = 0
1818 else:
1841 else:
1819 tr = self.transaction(trname)
1842 tr = self.transaction(trname)
1820 if heads is None and list(common) == [nullid]:
1843 if heads is None and list(common) == [nullid]:
1821 self.ui.status(_("requesting all changes\n"))
1844 self.ui.status(_("requesting all changes\n"))
1822 elif heads is None and remote.capable('changegroupsubset'):
1845 elif heads is None and remote.capable('changegroupsubset'):
1823 # issue1320, avoid a race if remote changed after discovery
1846 # issue1320, avoid a race if remote changed after discovery
1824 heads = rheads
1847 heads = rheads
1825
1848
1826 if remote.capable('getbundle'):
1849 if remote.capable('getbundle'):
1827 cg = remote.getbundle('pull', common=common,
1850 cg = remote.getbundle('pull', common=common,
1828 heads=heads or rheads)
1851 heads=heads or rheads)
1829 elif heads is None:
1852 elif heads is None:
1830 cg = remote.changegroup(fetch, 'pull')
1853 cg = remote.changegroup(fetch, 'pull')
1831 elif not remote.capable('changegroupsubset'):
1854 elif not remote.capable('changegroupsubset'):
1832 raise util.Abort(_("partial pull cannot be done because "
1855 raise util.Abort(_("partial pull cannot be done because "
1833 "other repository doesn't support "
1856 "other repository doesn't support "
1834 "changegroupsubset."))
1857 "changegroupsubset."))
1835 else:
1858 else:
1836 cg = remote.changegroupsubset(fetch, heads, 'pull')
1859 cg = remote.changegroupsubset(fetch, heads, 'pull')
1837 clstart = len(self.changelog)
1860 clstart = len(self.changelog)
1838 result = self.addchangegroup(cg, 'pull', remote.url())
1861 result = self.addchangegroup(cg, 'pull', remote.url())
1839 clend = len(self.changelog)
1862 clend = len(self.changelog)
1840 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1863 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1841
1864
1842 # compute target subset
1865 # compute target subset
1843 if heads is None:
1866 if heads is None:
1844 # We pulled every thing possible
1867 # We pulled every thing possible
1845 # sync on everything common
1868 # sync on everything common
1846 subset = common + added
1869 subset = common + added
1847 else:
1870 else:
1848 # We pulled a specific subset
1871 # We pulled a specific subset
1849 # sync on this subset
1872 # sync on this subset
1850 subset = heads
1873 subset = heads
1851
1874
1852 # Get remote phases data from remote
1875 # Get remote phases data from remote
1853 remotephases = remote.listkeys('phases')
1876 remotephases = remote.listkeys('phases')
1854 publishing = bool(remotephases.get('publishing', False))
1877 publishing = bool(remotephases.get('publishing', False))
1855 if remotephases and not publishing:
1878 if remotephases and not publishing:
1856 # remote is new and unpublishing
1879 # remote is new and unpublishing
1857 pheads, _dr = phases.analyzeremotephases(self, subset,
1880 pheads, _dr = phases.analyzeremotephases(self, subset,
1858 remotephases)
1881 remotephases)
1859 phases.advanceboundary(self, phases.public, pheads)
1882 phases.advanceboundary(self, phases.public, pheads)
1860 phases.advanceboundary(self, phases.draft, subset)
1883 phases.advanceboundary(self, phases.draft, subset)
1861 else:
1884 else:
1862 # Remote is old or publishing all common changesets
1885 # Remote is old or publishing all common changesets
1863 # should be seen as public
1886 # should be seen as public
1864 phases.advanceboundary(self, phases.public, subset)
1887 phases.advanceboundary(self, phases.public, subset)
1865
1888
1866 if obsolete._enabled:
1889 if obsolete._enabled:
1867 self.ui.debug('fetching remote obsolete markers\n')
1890 self.ui.debug('fetching remote obsolete markers\n')
1868 remoteobs = remote.listkeys('obsolete')
1891 remoteobs = remote.listkeys('obsolete')
1869 if 'dump0' in remoteobs:
1892 if 'dump0' in remoteobs:
1870 if tr is None:
1893 if tr is None:
1871 tr = self.transaction(trname)
1894 tr = self.transaction(trname)
1872 for key in sorted(remoteobs, reverse=True):
1895 for key in sorted(remoteobs, reverse=True):
1873 if key.startswith('dump'):
1896 if key.startswith('dump'):
1874 data = base85.b85decode(remoteobs[key])
1897 data = base85.b85decode(remoteobs[key])
1875 self.obsstore.mergemarkers(tr, data)
1898 self.obsstore.mergemarkers(tr, data)
1876 self.invalidatevolatilesets()
1899 self.invalidatevolatilesets()
1877 if tr is not None:
1900 if tr is not None:
1878 tr.close()
1901 tr.close()
1879 finally:
1902 finally:
1880 if tr is not None:
1903 if tr is not None:
1881 tr.release()
1904 tr.release()
1882 lock.release()
1905 lock.release()
1883
1906
1884 return result
1907 return result
1885
1908
1886 def checkpush(self, force, revs):
1909 def checkpush(self, force, revs):
1887 """Extensions can override this function if additional checks have
1910 """Extensions can override this function if additional checks have
1888 to be performed before pushing, or call it if they override push
1911 to be performed before pushing, or call it if they override push
1889 command.
1912 command.
1890 """
1913 """
1891 pass
1914 pass
1892
1915
1893 def push(self, remote, force=False, revs=None, newbranch=False):
1916 def push(self, remote, force=False, revs=None, newbranch=False):
1894 '''Push outgoing changesets (limited by revs) from the current
1917 '''Push outgoing changesets (limited by revs) from the current
1895 repository to remote. Return an integer:
1918 repository to remote. Return an integer:
1896 - None means nothing to push
1919 - None means nothing to push
1897 - 0 means HTTP error
1920 - 0 means HTTP error
1898 - 1 means we pushed and remote head count is unchanged *or*
1921 - 1 means we pushed and remote head count is unchanged *or*
1899 we have outgoing changesets but refused to push
1922 we have outgoing changesets but refused to push
1900 - other values as described by addchangegroup()
1923 - other values as described by addchangegroup()
1901 '''
1924 '''
1902 # there are two ways to push to remote repo:
1925 # there are two ways to push to remote repo:
1903 #
1926 #
1904 # addchangegroup assumes local user can lock remote
1927 # addchangegroup assumes local user can lock remote
1905 # repo (local filesystem, old ssh servers).
1928 # repo (local filesystem, old ssh servers).
1906 #
1929 #
1907 # unbundle assumes local user cannot lock remote repo (new ssh
1930 # unbundle assumes local user cannot lock remote repo (new ssh
1908 # servers, http servers).
1931 # servers, http servers).
1909
1932
1910 if not remote.canpush():
1933 if not remote.canpush():
1911 raise util.Abort(_("destination does not support push"))
1934 raise util.Abort(_("destination does not support push"))
1912 unfi = self.unfiltered()
1935 unfi = self.unfiltered()
1913 # get local lock as we might write phase data
1936 # get local lock as we might write phase data
1914 locallock = self.lock()
1937 locallock = self.lock()
1915 try:
1938 try:
1916 self.checkpush(force, revs)
1939 self.checkpush(force, revs)
1917 lock = None
1940 lock = None
1918 unbundle = remote.capable('unbundle')
1941 unbundle = remote.capable('unbundle')
1919 if not unbundle:
1942 if not unbundle:
1920 lock = remote.lock()
1943 lock = remote.lock()
1921 try:
1944 try:
1922 # discovery
1945 # discovery
1923 fci = discovery.findcommonincoming
1946 fci = discovery.findcommonincoming
1924 commoninc = fci(unfi, remote, force=force)
1947 commoninc = fci(unfi, remote, force=force)
1925 common, inc, remoteheads = commoninc
1948 common, inc, remoteheads = commoninc
1926 fco = discovery.findcommonoutgoing
1949 fco = discovery.findcommonoutgoing
1927 outgoing = fco(unfi, remote, onlyheads=revs,
1950 outgoing = fco(unfi, remote, onlyheads=revs,
1928 commoninc=commoninc, force=force)
1951 commoninc=commoninc, force=force)
1929
1952
1930
1953
1931 if not outgoing.missing:
1954 if not outgoing.missing:
1932 # nothing to push
1955 # nothing to push
1933 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1956 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1934 ret = None
1957 ret = None
1935 else:
1958 else:
1936 # something to push
1959 # something to push
1937 if not force:
1960 if not force:
1938 # if self.obsstore == False --> no obsolete
1961 # if self.obsstore == False --> no obsolete
1939 # then, save the iteration
1962 # then, save the iteration
1940 if unfi.obsstore:
1963 if unfi.obsstore:
1941 # this message are here for 80 char limit reason
1964 # this message are here for 80 char limit reason
1942 mso = _("push includes obsolete changeset: %s!")
1965 mso = _("push includes obsolete changeset: %s!")
1943 msu = _("push includes unstable changeset: %s!")
1966 msu = _("push includes unstable changeset: %s!")
1944 msb = _("push includes bumped changeset: %s!")
1967 msb = _("push includes bumped changeset: %s!")
1945 msd = _("push includes divergent changeset: %s!")
1968 msd = _("push includes divergent changeset: %s!")
1946 # If we are to push if there is at least one
1969 # If we are to push if there is at least one
1947 # obsolete or unstable changeset in missing, at
1970 # obsolete or unstable changeset in missing, at
1948 # least one of the missinghead will be obsolete or
1971 # least one of the missinghead will be obsolete or
1949 # unstable. So checking heads only is ok
1972 # unstable. So checking heads only is ok
1950 for node in outgoing.missingheads:
1973 for node in outgoing.missingheads:
1951 ctx = unfi[node]
1974 ctx = unfi[node]
1952 if ctx.obsolete():
1975 if ctx.obsolete():
1953 raise util.Abort(mso % ctx)
1976 raise util.Abort(mso % ctx)
1954 elif ctx.unstable():
1977 elif ctx.unstable():
1955 raise util.Abort(msu % ctx)
1978 raise util.Abort(msu % ctx)
1956 elif ctx.bumped():
1979 elif ctx.bumped():
1957 raise util.Abort(msb % ctx)
1980 raise util.Abort(msb % ctx)
1958 elif ctx.divergent():
1981 elif ctx.divergent():
1959 raise util.Abort(msd % ctx)
1982 raise util.Abort(msd % ctx)
1960 discovery.checkheads(unfi, remote, outgoing,
1983 discovery.checkheads(unfi, remote, outgoing,
1961 remoteheads, newbranch,
1984 remoteheads, newbranch,
1962 bool(inc))
1985 bool(inc))
1963
1986
1964 # create a changegroup from local
1987 # create a changegroup from local
1965 if revs is None and not outgoing.excluded:
1988 if revs is None and not outgoing.excluded:
1966 # push everything,
1989 # push everything,
1967 # use the fast path, no race possible on push
1990 # use the fast path, no race possible on push
1968 cg = self._changegroup(outgoing.missing, 'push')
1991 cg = self._changegroup(outgoing.missing, 'push')
1969 else:
1992 else:
1970 cg = self.getlocalbundle('push', outgoing)
1993 cg = self.getlocalbundle('push', outgoing)
1971
1994
1972 # apply changegroup to remote
1995 # apply changegroup to remote
1973 if unbundle:
1996 if unbundle:
1974 # local repo finds heads on server, finds out what
1997 # local repo finds heads on server, finds out what
1975 # revs it must push. once revs transferred, if server
1998 # revs it must push. once revs transferred, if server
1976 # finds it has different heads (someone else won
1999 # finds it has different heads (someone else won
1977 # commit/push race), server aborts.
2000 # commit/push race), server aborts.
1978 if force:
2001 if force:
1979 remoteheads = ['force']
2002 remoteheads = ['force']
1980 # ssh: return remote's addchangegroup()
2003 # ssh: return remote's addchangegroup()
1981 # http: return remote's addchangegroup() or 0 for error
2004 # http: return remote's addchangegroup() or 0 for error
1982 ret = remote.unbundle(cg, remoteheads, 'push')
2005 ret = remote.unbundle(cg, remoteheads, 'push')
1983 else:
2006 else:
1984 # we return an integer indicating remote head count
2007 # we return an integer indicating remote head count
1985 # change
2008 # change
1986 ret = remote.addchangegroup(cg, 'push', self.url())
2009 ret = remote.addchangegroup(cg, 'push', self.url())
1987
2010
1988 if ret:
2011 if ret:
1989 # push succeed, synchronize target of the push
2012 # push succeed, synchronize target of the push
1990 cheads = outgoing.missingheads
2013 cheads = outgoing.missingheads
1991 elif revs is None:
2014 elif revs is None:
1992 # All out push fails. synchronize all common
2015 # All out push fails. synchronize all common
1993 cheads = outgoing.commonheads
2016 cheads = outgoing.commonheads
1994 else:
2017 else:
1995 # I want cheads = heads(::missingheads and ::commonheads)
2018 # I want cheads = heads(::missingheads and ::commonheads)
1996 # (missingheads is revs with secret changeset filtered out)
2019 # (missingheads is revs with secret changeset filtered out)
1997 #
2020 #
1998 # This can be expressed as:
2021 # This can be expressed as:
1999 # cheads = ( (missingheads and ::commonheads)
2022 # cheads = ( (missingheads and ::commonheads)
2000 # + (commonheads and ::missingheads))"
2023 # + (commonheads and ::missingheads))"
2001 # )
2024 # )
2002 #
2025 #
2003 # while trying to push we already computed the following:
2026 # while trying to push we already computed the following:
2004 # common = (::commonheads)
2027 # common = (::commonheads)
2005 # missing = ((commonheads::missingheads) - commonheads)
2028 # missing = ((commonheads::missingheads) - commonheads)
2006 #
2029 #
2007 # We can pick:
2030 # We can pick:
2008 # * missingheads part of common (::commonheads)
2031 # * missingheads part of common (::commonheads)
2009 common = set(outgoing.common)
2032 common = set(outgoing.common)
2010 cheads = [node for node in revs if node in common]
2033 cheads = [node for node in revs if node in common]
2011 # and
2034 # and
2012 # * commonheads parents on missing
2035 # * commonheads parents on missing
2013 revset = unfi.set('%ln and parents(roots(%ln))',
2036 revset = unfi.set('%ln and parents(roots(%ln))',
2014 outgoing.commonheads,
2037 outgoing.commonheads,
2015 outgoing.missing)
2038 outgoing.missing)
2016 cheads.extend(c.node() for c in revset)
2039 cheads.extend(c.node() for c in revset)
2017 # even when we don't push, exchanging phase data is useful
2040 # even when we don't push, exchanging phase data is useful
2018 remotephases = remote.listkeys('phases')
2041 remotephases = remote.listkeys('phases')
2019 if not remotephases: # old server or public only repo
2042 if not remotephases: # old server or public only repo
2020 phases.advanceboundary(self, phases.public, cheads)
2043 phases.advanceboundary(self, phases.public, cheads)
2021 # don't push any phase data as there is nothing to push
2044 # don't push any phase data as there is nothing to push
2022 else:
2045 else:
2023 ana = phases.analyzeremotephases(self, cheads, remotephases)
2046 ana = phases.analyzeremotephases(self, cheads, remotephases)
2024 pheads, droots = ana
2047 pheads, droots = ana
2025 ### Apply remote phase on local
2048 ### Apply remote phase on local
2026 if remotephases.get('publishing', False):
2049 if remotephases.get('publishing', False):
2027 phases.advanceboundary(self, phases.public, cheads)
2050 phases.advanceboundary(self, phases.public, cheads)
2028 else: # publish = False
2051 else: # publish = False
2029 phases.advanceboundary(self, phases.public, pheads)
2052 phases.advanceboundary(self, phases.public, pheads)
2030 phases.advanceboundary(self, phases.draft, cheads)
2053 phases.advanceboundary(self, phases.draft, cheads)
2031 ### Apply local phase on remote
2054 ### Apply local phase on remote
2032
2055
2033 # Get the list of all revs draft on remote by public here.
2056 # Get the list of all revs draft on remote by public here.
2034 # XXX Beware that revset break if droots is not strictly
2057 # XXX Beware that revset break if droots is not strictly
2035 # XXX root we may want to ensure it is but it is costly
2058 # XXX root we may want to ensure it is but it is costly
2036 outdated = unfi.set('heads((%ln::%ln) and public())',
2059 outdated = unfi.set('heads((%ln::%ln) and public())',
2037 droots, cheads)
2060 droots, cheads)
2038 for newremotehead in outdated:
2061 for newremotehead in outdated:
2039 r = remote.pushkey('phases',
2062 r = remote.pushkey('phases',
2040 newremotehead.hex(),
2063 newremotehead.hex(),
2041 str(phases.draft),
2064 str(phases.draft),
2042 str(phases.public))
2065 str(phases.public))
2043 if not r:
2066 if not r:
2044 self.ui.warn(_('updating %s to public failed!\n')
2067 self.ui.warn(_('updating %s to public failed!\n')
2045 % newremotehead)
2068 % newremotehead)
2046 self.ui.debug('try to push obsolete markers to remote\n')
2069 self.ui.debug('try to push obsolete markers to remote\n')
2047 if (obsolete._enabled and self.obsstore and
2070 if (obsolete._enabled and self.obsstore and
2048 'obsolete' in remote.listkeys('namespaces')):
2071 'obsolete' in remote.listkeys('namespaces')):
2049 rslts = []
2072 rslts = []
2050 remotedata = self.listkeys('obsolete')
2073 remotedata = self.listkeys('obsolete')
2051 for key in sorted(remotedata, reverse=True):
2074 for key in sorted(remotedata, reverse=True):
2052 # reverse sort to ensure we end with dump0
2075 # reverse sort to ensure we end with dump0
2053 data = remotedata[key]
2076 data = remotedata[key]
2054 rslts.append(remote.pushkey('obsolete', key, '', data))
2077 rslts.append(remote.pushkey('obsolete', key, '', data))
2055 if [r for r in rslts if not r]:
2078 if [r for r in rslts if not r]:
2056 msg = _('failed to push some obsolete markers!\n')
2079 msg = _('failed to push some obsolete markers!\n')
2057 self.ui.warn(msg)
2080 self.ui.warn(msg)
2058 finally:
2081 finally:
2059 if lock is not None:
2082 if lock is not None:
2060 lock.release()
2083 lock.release()
2061 finally:
2084 finally:
2062 locallock.release()
2085 locallock.release()
2063
2086
2064 self.ui.debug("checking for updated bookmarks\n")
2087 self.ui.debug("checking for updated bookmarks\n")
2065 rb = remote.listkeys('bookmarks')
2088 rb = remote.listkeys('bookmarks')
2066 for k in rb.keys():
2089 for k in rb.keys():
2067 if k in unfi._bookmarks:
2090 if k in unfi._bookmarks:
2068 nr, nl = rb[k], hex(self._bookmarks[k])
2091 nr, nl = rb[k], hex(self._bookmarks[k])
2069 if nr in unfi:
2092 if nr in unfi:
2070 cr = unfi[nr]
2093 cr = unfi[nr]
2071 cl = unfi[nl]
2094 cl = unfi[nl]
2072 if bookmarks.validdest(unfi, cr, cl):
2095 if bookmarks.validdest(unfi, cr, cl):
2073 r = remote.pushkey('bookmarks', k, nr, nl)
2096 r = remote.pushkey('bookmarks', k, nr, nl)
2074 if r:
2097 if r:
2075 self.ui.status(_("updating bookmark %s\n") % k)
2098 self.ui.status(_("updating bookmark %s\n") % k)
2076 else:
2099 else:
2077 self.ui.warn(_('updating bookmark %s'
2100 self.ui.warn(_('updating bookmark %s'
2078 ' failed!\n') % k)
2101 ' failed!\n') % k)
2079
2102
2080 return ret
2103 return ret
2081
2104
2082 def changegroupinfo(self, nodes, source):
2105 def changegroupinfo(self, nodes, source):
2083 if self.ui.verbose or source == 'bundle':
2106 if self.ui.verbose or source == 'bundle':
2084 self.ui.status(_("%d changesets found\n") % len(nodes))
2107 self.ui.status(_("%d changesets found\n") % len(nodes))
2085 if self.ui.debugflag:
2108 if self.ui.debugflag:
2086 self.ui.debug("list of changesets:\n")
2109 self.ui.debug("list of changesets:\n")
2087 for node in nodes:
2110 for node in nodes:
2088 self.ui.debug("%s\n" % hex(node))
2111 self.ui.debug("%s\n" % hex(node))
2089
2112
2090 def changegroupsubset(self, bases, heads, source):
2113 def changegroupsubset(self, bases, heads, source):
2091 """Compute a changegroup consisting of all the nodes that are
2114 """Compute a changegroup consisting of all the nodes that are
2092 descendants of any of the bases and ancestors of any of the heads.
2115 descendants of any of the bases and ancestors of any of the heads.
2093 Return a chunkbuffer object whose read() method will return
2116 Return a chunkbuffer object whose read() method will return
2094 successive changegroup chunks.
2117 successive changegroup chunks.
2095
2118
2096 It is fairly complex as determining which filenodes and which
2119 It is fairly complex as determining which filenodes and which
2097 manifest nodes need to be included for the changeset to be complete
2120 manifest nodes need to be included for the changeset to be complete
2098 is non-trivial.
2121 is non-trivial.
2099
2122
2100 Another wrinkle is doing the reverse, figuring out which changeset in
2123 Another wrinkle is doing the reverse, figuring out which changeset in
2101 the changegroup a particular filenode or manifestnode belongs to.
2124 the changegroup a particular filenode or manifestnode belongs to.
2102 """
2125 """
2103 cl = self.changelog
2126 cl = self.changelog
2104 if not bases:
2127 if not bases:
2105 bases = [nullid]
2128 bases = [nullid]
2106 csets, bases, heads = cl.nodesbetween(bases, heads)
2129 csets, bases, heads = cl.nodesbetween(bases, heads)
2107 # We assume that all ancestors of bases are known
2130 # We assume that all ancestors of bases are known
2108 common = cl.ancestors([cl.rev(n) for n in bases])
2131 common = cl.ancestors([cl.rev(n) for n in bases])
2109 return self._changegroupsubset(common, csets, heads, source)
2132 return self._changegroupsubset(common, csets, heads, source)
2110
2133
2111 def getlocalbundle(self, source, outgoing):
2134 def getlocalbundle(self, source, outgoing):
2112 """Like getbundle, but taking a discovery.outgoing as an argument.
2135 """Like getbundle, but taking a discovery.outgoing as an argument.
2113
2136
2114 This is only implemented for local repos and reuses potentially
2137 This is only implemented for local repos and reuses potentially
2115 precomputed sets in outgoing."""
2138 precomputed sets in outgoing."""
2116 if not outgoing.missing:
2139 if not outgoing.missing:
2117 return None
2140 return None
2118 return self._changegroupsubset(outgoing.common,
2141 return self._changegroupsubset(outgoing.common,
2119 outgoing.missing,
2142 outgoing.missing,
2120 outgoing.missingheads,
2143 outgoing.missingheads,
2121 source)
2144 source)
2122
2145
2123 def getbundle(self, source, heads=None, common=None):
2146 def getbundle(self, source, heads=None, common=None):
2124 """Like changegroupsubset, but returns the set difference between the
2147 """Like changegroupsubset, but returns the set difference between the
2125 ancestors of heads and the ancestors common.
2148 ancestors of heads and the ancestors common.
2126
2149
2127 If heads is None, use the local heads. If common is None, use [nullid].
2150 If heads is None, use the local heads. If common is None, use [nullid].
2128
2151
2129 The nodes in common might not all be known locally due to the way the
2152 The nodes in common might not all be known locally due to the way the
2130 current discovery protocol works.
2153 current discovery protocol works.
2131 """
2154 """
2132 cl = self.changelog
2155 cl = self.changelog
2133 if common:
2156 if common:
2134 hasnode = cl.hasnode
2157 hasnode = cl.hasnode
2135 common = [n for n in common if hasnode(n)]
2158 common = [n for n in common if hasnode(n)]
2136 else:
2159 else:
2137 common = [nullid]
2160 common = [nullid]
2138 if not heads:
2161 if not heads:
2139 heads = cl.heads()
2162 heads = cl.heads()
2140 return self.getlocalbundle(source,
2163 return self.getlocalbundle(source,
2141 discovery.outgoing(cl, common, heads))
2164 discovery.outgoing(cl, common, heads))
2142
2165
2143 @unfilteredmethod
2166 @unfilteredmethod
2144 def _changegroupsubset(self, commonrevs, csets, heads, source):
2167 def _changegroupsubset(self, commonrevs, csets, heads, source):
2145
2168
2146 cl = self.changelog
2169 cl = self.changelog
2147 mf = self.manifest
2170 mf = self.manifest
2148 mfs = {} # needed manifests
2171 mfs = {} # needed manifests
2149 fnodes = {} # needed file nodes
2172 fnodes = {} # needed file nodes
2150 changedfiles = set()
2173 changedfiles = set()
2151 fstate = ['', {}]
2174 fstate = ['', {}]
2152 count = [0, 0]
2175 count = [0, 0]
2153
2176
2154 # can we go through the fast path ?
2177 # can we go through the fast path ?
2155 heads.sort()
2178 heads.sort()
2156 if heads == sorted(self.heads()):
2179 if heads == sorted(self.heads()):
2157 return self._changegroup(csets, source)
2180 return self._changegroup(csets, source)
2158
2181
2159 # slow path
2182 # slow path
2160 self.hook('preoutgoing', throw=True, source=source)
2183 self.hook('preoutgoing', throw=True, source=source)
2161 self.changegroupinfo(csets, source)
2184 self.changegroupinfo(csets, source)
2162
2185
2163 # filter any nodes that claim to be part of the known set
2186 # filter any nodes that claim to be part of the known set
2164 def prune(revlog, missing):
2187 def prune(revlog, missing):
2165 rr, rl = revlog.rev, revlog.linkrev
2188 rr, rl = revlog.rev, revlog.linkrev
2166 return [n for n in missing
2189 return [n for n in missing
2167 if rl(rr(n)) not in commonrevs]
2190 if rl(rr(n)) not in commonrevs]
2168
2191
2169 progress = self.ui.progress
2192 progress = self.ui.progress
2170 _bundling = _('bundling')
2193 _bundling = _('bundling')
2171 _changesets = _('changesets')
2194 _changesets = _('changesets')
2172 _manifests = _('manifests')
2195 _manifests = _('manifests')
2173 _files = _('files')
2196 _files = _('files')
2174
2197
2175 def lookup(revlog, x):
2198 def lookup(revlog, x):
2176 if revlog == cl:
2199 if revlog == cl:
2177 c = cl.read(x)
2200 c = cl.read(x)
2178 changedfiles.update(c[3])
2201 changedfiles.update(c[3])
2179 mfs.setdefault(c[0], x)
2202 mfs.setdefault(c[0], x)
2180 count[0] += 1
2203 count[0] += 1
2181 progress(_bundling, count[0],
2204 progress(_bundling, count[0],
2182 unit=_changesets, total=count[1])
2205 unit=_changesets, total=count[1])
2183 return x
2206 return x
2184 elif revlog == mf:
2207 elif revlog == mf:
2185 clnode = mfs[x]
2208 clnode = mfs[x]
2186 mdata = mf.readfast(x)
2209 mdata = mf.readfast(x)
2187 for f, n in mdata.iteritems():
2210 for f, n in mdata.iteritems():
2188 if f in changedfiles:
2211 if f in changedfiles:
2189 fnodes[f].setdefault(n, clnode)
2212 fnodes[f].setdefault(n, clnode)
2190 count[0] += 1
2213 count[0] += 1
2191 progress(_bundling, count[0],
2214 progress(_bundling, count[0],
2192 unit=_manifests, total=count[1])
2215 unit=_manifests, total=count[1])
2193 return clnode
2216 return clnode
2194 else:
2217 else:
2195 progress(_bundling, count[0], item=fstate[0],
2218 progress(_bundling, count[0], item=fstate[0],
2196 unit=_files, total=count[1])
2219 unit=_files, total=count[1])
2197 return fstate[1][x]
2220 return fstate[1][x]
2198
2221
2199 bundler = changegroup.bundle10(lookup)
2222 bundler = changegroup.bundle10(lookup)
2200 reorder = self.ui.config('bundle', 'reorder', 'auto')
2223 reorder = self.ui.config('bundle', 'reorder', 'auto')
2201 if reorder == 'auto':
2224 if reorder == 'auto':
2202 reorder = None
2225 reorder = None
2203 else:
2226 else:
2204 reorder = util.parsebool(reorder)
2227 reorder = util.parsebool(reorder)
2205
2228
2206 def gengroup():
2229 def gengroup():
2207 # Create a changenode group generator that will call our functions
2230 # Create a changenode group generator that will call our functions
2208 # back to lookup the owning changenode and collect information.
2231 # back to lookup the owning changenode and collect information.
2209 count[:] = [0, len(csets)]
2232 count[:] = [0, len(csets)]
2210 for chunk in cl.group(csets, bundler, reorder=reorder):
2233 for chunk in cl.group(csets, bundler, reorder=reorder):
2211 yield chunk
2234 yield chunk
2212 progress(_bundling, None)
2235 progress(_bundling, None)
2213
2236
2214 # Create a generator for the manifestnodes that calls our lookup
2237 # Create a generator for the manifestnodes that calls our lookup
2215 # and data collection functions back.
2238 # and data collection functions back.
2216 for f in changedfiles:
2239 for f in changedfiles:
2217 fnodes[f] = {}
2240 fnodes[f] = {}
2218 count[:] = [0, len(mfs)]
2241 count[:] = [0, len(mfs)]
2219 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2242 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2220 yield chunk
2243 yield chunk
2221 progress(_bundling, None)
2244 progress(_bundling, None)
2222
2245
2223 mfs.clear()
2246 mfs.clear()
2224
2247
2225 # Go through all our files in order sorted by name.
2248 # Go through all our files in order sorted by name.
2226 count[:] = [0, len(changedfiles)]
2249 count[:] = [0, len(changedfiles)]
2227 for fname in sorted(changedfiles):
2250 for fname in sorted(changedfiles):
2228 filerevlog = self.file(fname)
2251 filerevlog = self.file(fname)
2229 if not len(filerevlog):
2252 if not len(filerevlog):
2230 raise util.Abort(_("empty or missing revlog for %s")
2253 raise util.Abort(_("empty or missing revlog for %s")
2231 % fname)
2254 % fname)
2232 fstate[0] = fname
2255 fstate[0] = fname
2233 fstate[1] = fnodes.pop(fname, {})
2256 fstate[1] = fnodes.pop(fname, {})
2234
2257
2235 nodelist = prune(filerevlog, fstate[1])
2258 nodelist = prune(filerevlog, fstate[1])
2236 if nodelist:
2259 if nodelist:
2237 count[0] += 1
2260 count[0] += 1
2238 yield bundler.fileheader(fname)
2261 yield bundler.fileheader(fname)
2239 for chunk in filerevlog.group(nodelist, bundler, reorder):
2262 for chunk in filerevlog.group(nodelist, bundler, reorder):
2240 yield chunk
2263 yield chunk
2241
2264
2242 # Signal that no more groups are left.
2265 # Signal that no more groups are left.
2243 yield bundler.close()
2266 yield bundler.close()
2244 progress(_bundling, None)
2267 progress(_bundling, None)
2245
2268
2246 if csets:
2269 if csets:
2247 self.hook('outgoing', node=hex(csets[0]), source=source)
2270 self.hook('outgoing', node=hex(csets[0]), source=source)
2248
2271
2249 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2272 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2250
2273
2251 def changegroup(self, basenodes, source):
2274 def changegroup(self, basenodes, source):
2252 # to avoid a race we use changegroupsubset() (issue1320)
2275 # to avoid a race we use changegroupsubset() (issue1320)
2253 return self.changegroupsubset(basenodes, self.heads(), source)
2276 return self.changegroupsubset(basenodes, self.heads(), source)
2254
2277
2255 @unfilteredmethod
2278 @unfilteredmethod
2256 def _changegroup(self, nodes, source):
2279 def _changegroup(self, nodes, source):
2257 """Compute the changegroup of all nodes that we have that a recipient
2280 """Compute the changegroup of all nodes that we have that a recipient
2258 doesn't. Return a chunkbuffer object whose read() method will return
2281 doesn't. Return a chunkbuffer object whose read() method will return
2259 successive changegroup chunks.
2282 successive changegroup chunks.
2260
2283
2261 This is much easier than the previous function as we can assume that
2284 This is much easier than the previous function as we can assume that
2262 the recipient has any changenode we aren't sending them.
2285 the recipient has any changenode we aren't sending them.
2263
2286
2264 nodes is the set of nodes to send"""
2287 nodes is the set of nodes to send"""
2265
2288
2266 cl = self.changelog
2289 cl = self.changelog
2267 mf = self.manifest
2290 mf = self.manifest
2268 mfs = {}
2291 mfs = {}
2269 changedfiles = set()
2292 changedfiles = set()
2270 fstate = ['']
2293 fstate = ['']
2271 count = [0, 0]
2294 count = [0, 0]
2272
2295
2273 self.hook('preoutgoing', throw=True, source=source)
2296 self.hook('preoutgoing', throw=True, source=source)
2274 self.changegroupinfo(nodes, source)
2297 self.changegroupinfo(nodes, source)
2275
2298
2276 revset = set([cl.rev(n) for n in nodes])
2299 revset = set([cl.rev(n) for n in nodes])
2277
2300
2278 def gennodelst(log):
2301 def gennodelst(log):
2279 ln, llr = log.node, log.linkrev
2302 ln, llr = log.node, log.linkrev
2280 return [ln(r) for r in log if llr(r) in revset]
2303 return [ln(r) for r in log if llr(r) in revset]
2281
2304
2282 progress = self.ui.progress
2305 progress = self.ui.progress
2283 _bundling = _('bundling')
2306 _bundling = _('bundling')
2284 _changesets = _('changesets')
2307 _changesets = _('changesets')
2285 _manifests = _('manifests')
2308 _manifests = _('manifests')
2286 _files = _('files')
2309 _files = _('files')
2287
2310
2288 def lookup(revlog, x):
2311 def lookup(revlog, x):
2289 if revlog == cl:
2312 if revlog == cl:
2290 c = cl.read(x)
2313 c = cl.read(x)
2291 changedfiles.update(c[3])
2314 changedfiles.update(c[3])
2292 mfs.setdefault(c[0], x)
2315 mfs.setdefault(c[0], x)
2293 count[0] += 1
2316 count[0] += 1
2294 progress(_bundling, count[0],
2317 progress(_bundling, count[0],
2295 unit=_changesets, total=count[1])
2318 unit=_changesets, total=count[1])
2296 return x
2319 return x
2297 elif revlog == mf:
2320 elif revlog == mf:
2298 count[0] += 1
2321 count[0] += 1
2299 progress(_bundling, count[0],
2322 progress(_bundling, count[0],
2300 unit=_manifests, total=count[1])
2323 unit=_manifests, total=count[1])
2301 return cl.node(revlog.linkrev(revlog.rev(x)))
2324 return cl.node(revlog.linkrev(revlog.rev(x)))
2302 else:
2325 else:
2303 progress(_bundling, count[0], item=fstate[0],
2326 progress(_bundling, count[0], item=fstate[0],
2304 total=count[1], unit=_files)
2327 total=count[1], unit=_files)
2305 return cl.node(revlog.linkrev(revlog.rev(x)))
2328 return cl.node(revlog.linkrev(revlog.rev(x)))
2306
2329
2307 bundler = changegroup.bundle10(lookup)
2330 bundler = changegroup.bundle10(lookup)
2308 reorder = self.ui.config('bundle', 'reorder', 'auto')
2331 reorder = self.ui.config('bundle', 'reorder', 'auto')
2309 if reorder == 'auto':
2332 if reorder == 'auto':
2310 reorder = None
2333 reorder = None
2311 else:
2334 else:
2312 reorder = util.parsebool(reorder)
2335 reorder = util.parsebool(reorder)
2313
2336
2314 def gengroup():
2337 def gengroup():
2315 '''yield a sequence of changegroup chunks (strings)'''
2338 '''yield a sequence of changegroup chunks (strings)'''
2316 # construct a list of all changed files
2339 # construct a list of all changed files
2317
2340
2318 count[:] = [0, len(nodes)]
2341 count[:] = [0, len(nodes)]
2319 for chunk in cl.group(nodes, bundler, reorder=reorder):
2342 for chunk in cl.group(nodes, bundler, reorder=reorder):
2320 yield chunk
2343 yield chunk
2321 progress(_bundling, None)
2344 progress(_bundling, None)
2322
2345
2323 count[:] = [0, len(mfs)]
2346 count[:] = [0, len(mfs)]
2324 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2347 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2325 yield chunk
2348 yield chunk
2326 progress(_bundling, None)
2349 progress(_bundling, None)
2327
2350
2328 count[:] = [0, len(changedfiles)]
2351 count[:] = [0, len(changedfiles)]
2329 for fname in sorted(changedfiles):
2352 for fname in sorted(changedfiles):
2330 filerevlog = self.file(fname)
2353 filerevlog = self.file(fname)
2331 if not len(filerevlog):
2354 if not len(filerevlog):
2332 raise util.Abort(_("empty or missing revlog for %s")
2355 raise util.Abort(_("empty or missing revlog for %s")
2333 % fname)
2356 % fname)
2334 fstate[0] = fname
2357 fstate[0] = fname
2335 nodelist = gennodelst(filerevlog)
2358 nodelist = gennodelst(filerevlog)
2336 if nodelist:
2359 if nodelist:
2337 count[0] += 1
2360 count[0] += 1
2338 yield bundler.fileheader(fname)
2361 yield bundler.fileheader(fname)
2339 for chunk in filerevlog.group(nodelist, bundler, reorder):
2362 for chunk in filerevlog.group(nodelist, bundler, reorder):
2340 yield chunk
2363 yield chunk
2341 yield bundler.close()
2364 yield bundler.close()
2342 progress(_bundling, None)
2365 progress(_bundling, None)
2343
2366
2344 if nodes:
2367 if nodes:
2345 self.hook('outgoing', node=hex(nodes[0]), source=source)
2368 self.hook('outgoing', node=hex(nodes[0]), source=source)
2346
2369
2347 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2370 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2348
2371
2349 @unfilteredmethod
2372 @unfilteredmethod
2350 def addchangegroup(self, source, srctype, url, emptyok=False):
2373 def addchangegroup(self, source, srctype, url, emptyok=False):
2351 """Add the changegroup returned by source.read() to this repo.
2374 """Add the changegroup returned by source.read() to this repo.
2352 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2375 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2353 the URL of the repo where this changegroup is coming from.
2376 the URL of the repo where this changegroup is coming from.
2354
2377
2355 Return an integer summarizing the change to this repo:
2378 Return an integer summarizing the change to this repo:
2356 - nothing changed or no source: 0
2379 - nothing changed or no source: 0
2357 - more heads than before: 1+added heads (2..n)
2380 - more heads than before: 1+added heads (2..n)
2358 - fewer heads than before: -1-removed heads (-2..-n)
2381 - fewer heads than before: -1-removed heads (-2..-n)
2359 - number of heads stays the same: 1
2382 - number of heads stays the same: 1
2360 """
2383 """
2361 def csmap(x):
2384 def csmap(x):
2362 self.ui.debug("add changeset %s\n" % short(x))
2385 self.ui.debug("add changeset %s\n" % short(x))
2363 return len(cl)
2386 return len(cl)
2364
2387
2365 def revmap(x):
2388 def revmap(x):
2366 return cl.rev(x)
2389 return cl.rev(x)
2367
2390
2368 if not source:
2391 if not source:
2369 return 0
2392 return 0
2370
2393
2371 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2394 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2372
2395
2373 changesets = files = revisions = 0
2396 changesets = files = revisions = 0
2374 efiles = set()
2397 efiles = set()
2375
2398
2376 # write changelog data to temp files so concurrent readers will not see
2399 # write changelog data to temp files so concurrent readers will not see
2377 # inconsistent view
2400 # inconsistent view
2378 cl = self.changelog
2401 cl = self.changelog
2379 cl.delayupdate()
2402 cl.delayupdate()
2380 oldheads = cl.heads()
2403 oldheads = cl.heads()
2381
2404
2382 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2405 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2383 try:
2406 try:
2384 trp = weakref.proxy(tr)
2407 trp = weakref.proxy(tr)
2385 # pull off the changeset group
2408 # pull off the changeset group
2386 self.ui.status(_("adding changesets\n"))
2409 self.ui.status(_("adding changesets\n"))
2387 clstart = len(cl)
2410 clstart = len(cl)
2388 class prog(object):
2411 class prog(object):
2389 step = _('changesets')
2412 step = _('changesets')
2390 count = 1
2413 count = 1
2391 ui = self.ui
2414 ui = self.ui
2392 total = None
2415 total = None
2393 def __call__(self):
2416 def __call__(self):
2394 self.ui.progress(self.step, self.count, unit=_('chunks'),
2417 self.ui.progress(self.step, self.count, unit=_('chunks'),
2395 total=self.total)
2418 total=self.total)
2396 self.count += 1
2419 self.count += 1
2397 pr = prog()
2420 pr = prog()
2398 source.callback = pr
2421 source.callback = pr
2399
2422
2400 source.changelogheader()
2423 source.changelogheader()
2401 srccontent = cl.addgroup(source, csmap, trp)
2424 srccontent = cl.addgroup(source, csmap, trp)
2402 if not (srccontent or emptyok):
2425 if not (srccontent or emptyok):
2403 raise util.Abort(_("received changelog group is empty"))
2426 raise util.Abort(_("received changelog group is empty"))
2404 clend = len(cl)
2427 clend = len(cl)
2405 changesets = clend - clstart
2428 changesets = clend - clstart
2406 for c in xrange(clstart, clend):
2429 for c in xrange(clstart, clend):
2407 efiles.update(self[c].files())
2430 efiles.update(self[c].files())
2408 efiles = len(efiles)
2431 efiles = len(efiles)
2409 self.ui.progress(_('changesets'), None)
2432 self.ui.progress(_('changesets'), None)
2410
2433
2411 # pull off the manifest group
2434 # pull off the manifest group
2412 self.ui.status(_("adding manifests\n"))
2435 self.ui.status(_("adding manifests\n"))
2413 pr.step = _('manifests')
2436 pr.step = _('manifests')
2414 pr.count = 1
2437 pr.count = 1
2415 pr.total = changesets # manifests <= changesets
2438 pr.total = changesets # manifests <= changesets
2416 # no need to check for empty manifest group here:
2439 # no need to check for empty manifest group here:
2417 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2440 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2418 # no new manifest will be created and the manifest group will
2441 # no new manifest will be created and the manifest group will
2419 # be empty during the pull
2442 # be empty during the pull
2420 source.manifestheader()
2443 source.manifestheader()
2421 self.manifest.addgroup(source, revmap, trp)
2444 self.manifest.addgroup(source, revmap, trp)
2422 self.ui.progress(_('manifests'), None)
2445 self.ui.progress(_('manifests'), None)
2423
2446
2424 needfiles = {}
2447 needfiles = {}
2425 if self.ui.configbool('server', 'validate', default=False):
2448 if self.ui.configbool('server', 'validate', default=False):
2426 # validate incoming csets have their manifests
2449 # validate incoming csets have their manifests
2427 for cset in xrange(clstart, clend):
2450 for cset in xrange(clstart, clend):
2428 mfest = self.changelog.read(self.changelog.node(cset))[0]
2451 mfest = self.changelog.read(self.changelog.node(cset))[0]
2429 mfest = self.manifest.readdelta(mfest)
2452 mfest = self.manifest.readdelta(mfest)
2430 # store file nodes we must see
2453 # store file nodes we must see
2431 for f, n in mfest.iteritems():
2454 for f, n in mfest.iteritems():
2432 needfiles.setdefault(f, set()).add(n)
2455 needfiles.setdefault(f, set()).add(n)
2433
2456
2434 # process the files
2457 # process the files
2435 self.ui.status(_("adding file changes\n"))
2458 self.ui.status(_("adding file changes\n"))
2436 pr.step = _('files')
2459 pr.step = _('files')
2437 pr.count = 1
2460 pr.count = 1
2438 pr.total = efiles
2461 pr.total = efiles
2439 source.callback = None
2462 source.callback = None
2440
2463
2441 while True:
2464 while True:
2442 chunkdata = source.filelogheader()
2465 chunkdata = source.filelogheader()
2443 if not chunkdata:
2466 if not chunkdata:
2444 break
2467 break
2445 f = chunkdata["filename"]
2468 f = chunkdata["filename"]
2446 self.ui.debug("adding %s revisions\n" % f)
2469 self.ui.debug("adding %s revisions\n" % f)
2447 pr()
2470 pr()
2448 fl = self.file(f)
2471 fl = self.file(f)
2449 o = len(fl)
2472 o = len(fl)
2450 if not fl.addgroup(source, revmap, trp):
2473 if not fl.addgroup(source, revmap, trp):
2451 raise util.Abort(_("received file revlog group is empty"))
2474 raise util.Abort(_("received file revlog group is empty"))
2452 revisions += len(fl) - o
2475 revisions += len(fl) - o
2453 files += 1
2476 files += 1
2454 if f in needfiles:
2477 if f in needfiles:
2455 needs = needfiles[f]
2478 needs = needfiles[f]
2456 for new in xrange(o, len(fl)):
2479 for new in xrange(o, len(fl)):
2457 n = fl.node(new)
2480 n = fl.node(new)
2458 if n in needs:
2481 if n in needs:
2459 needs.remove(n)
2482 needs.remove(n)
2460 if not needs:
2483 if not needs:
2461 del needfiles[f]
2484 del needfiles[f]
2462 self.ui.progress(_('files'), None)
2485 self.ui.progress(_('files'), None)
2463
2486
2464 for f, needs in needfiles.iteritems():
2487 for f, needs in needfiles.iteritems():
2465 fl = self.file(f)
2488 fl = self.file(f)
2466 for n in needs:
2489 for n in needs:
2467 try:
2490 try:
2468 fl.rev(n)
2491 fl.rev(n)
2469 except error.LookupError:
2492 except error.LookupError:
2470 raise util.Abort(
2493 raise util.Abort(
2471 _('missing file data for %s:%s - run hg verify') %
2494 _('missing file data for %s:%s - run hg verify') %
2472 (f, hex(n)))
2495 (f, hex(n)))
2473
2496
2474 dh = 0
2497 dh = 0
2475 if oldheads:
2498 if oldheads:
2476 heads = cl.heads()
2499 heads = cl.heads()
2477 dh = len(heads) - len(oldheads)
2500 dh = len(heads) - len(oldheads)
2478 for h in heads:
2501 for h in heads:
2479 if h not in oldheads and self[h].closesbranch():
2502 if h not in oldheads and self[h].closesbranch():
2480 dh -= 1
2503 dh -= 1
2481 htext = ""
2504 htext = ""
2482 if dh:
2505 if dh:
2483 htext = _(" (%+d heads)") % dh
2506 htext = _(" (%+d heads)") % dh
2484
2507
2485 self.ui.status(_("added %d changesets"
2508 self.ui.status(_("added %d changesets"
2486 " with %d changes to %d files%s\n")
2509 " with %d changes to %d files%s\n")
2487 % (changesets, revisions, files, htext))
2510 % (changesets, revisions, files, htext))
2488 self.invalidatevolatilesets()
2511 self.invalidatevolatilesets()
2489
2512
2490 if changesets > 0:
2513 if changesets > 0:
2491 p = lambda: cl.writepending() and self.root or ""
2514 p = lambda: cl.writepending() and self.root or ""
2492 self.hook('pretxnchangegroup', throw=True,
2515 self.hook('pretxnchangegroup', throw=True,
2493 node=hex(cl.node(clstart)), source=srctype,
2516 node=hex(cl.node(clstart)), source=srctype,
2494 url=url, pending=p)
2517 url=url, pending=p)
2495
2518
2496 added = [cl.node(r) for r in xrange(clstart, clend)]
2519 added = [cl.node(r) for r in xrange(clstart, clend)]
2497 publishing = self.ui.configbool('phases', 'publish', True)
2520 publishing = self.ui.configbool('phases', 'publish', True)
2498 if srctype == 'push':
2521 if srctype == 'push':
2499 # Old server can not push the boundary themself.
2522 # Old server can not push the boundary themself.
2500 # New server won't push the boundary if changeset already
2523 # New server won't push the boundary if changeset already
2501 # existed locally as secrete
2524 # existed locally as secrete
2502 #
2525 #
2503 # We should not use added here but the list of all change in
2526 # We should not use added here but the list of all change in
2504 # the bundle
2527 # the bundle
2505 if publishing:
2528 if publishing:
2506 phases.advanceboundary(self, phases.public, srccontent)
2529 phases.advanceboundary(self, phases.public, srccontent)
2507 else:
2530 else:
2508 phases.advanceboundary(self, phases.draft, srccontent)
2531 phases.advanceboundary(self, phases.draft, srccontent)
2509 phases.retractboundary(self, phases.draft, added)
2532 phases.retractboundary(self, phases.draft, added)
2510 elif srctype != 'strip':
2533 elif srctype != 'strip':
2511 # publishing only alter behavior during push
2534 # publishing only alter behavior during push
2512 #
2535 #
2513 # strip should not touch boundary at all
2536 # strip should not touch boundary at all
2514 phases.retractboundary(self, phases.draft, added)
2537 phases.retractboundary(self, phases.draft, added)
2515
2538
2516 # make changelog see real files again
2539 # make changelog see real files again
2517 cl.finalize(trp)
2540 cl.finalize(trp)
2518
2541
2519 tr.close()
2542 tr.close()
2520
2543
2521 if changesets > 0:
2544 if changesets > 0:
2522 self.updatebranchcache()
2545 self.updatebranchcache()
2523 def runhooks():
2546 def runhooks():
2524 # forcefully update the on-disk branch cache
2547 # forcefully update the on-disk branch cache
2525 self.ui.debug("updating the branch cache\n")
2548 self.ui.debug("updating the branch cache\n")
2526 self.hook("changegroup", node=hex(cl.node(clstart)),
2549 self.hook("changegroup", node=hex(cl.node(clstart)),
2527 source=srctype, url=url)
2550 source=srctype, url=url)
2528
2551
2529 for n in added:
2552 for n in added:
2530 self.hook("incoming", node=hex(n), source=srctype,
2553 self.hook("incoming", node=hex(n), source=srctype,
2531 url=url)
2554 url=url)
2532 self._afterlock(runhooks)
2555 self._afterlock(runhooks)
2533
2556
2534 finally:
2557 finally:
2535 tr.release()
2558 tr.release()
2536 # never return 0 here:
2559 # never return 0 here:
2537 if dh < 0:
2560 if dh < 0:
2538 return dh - 1
2561 return dh - 1
2539 else:
2562 else:
2540 return dh + 1
2563 return dh + 1
2541
2564
2542 def stream_in(self, remote, requirements):
2565 def stream_in(self, remote, requirements):
2543 lock = self.lock()
2566 lock = self.lock()
2544 try:
2567 try:
2545 # Save remote branchmap. We will use it later
2568 # Save remote branchmap. We will use it later
2546 # to speed up branchcache creation
2569 # to speed up branchcache creation
2547 rbranchmap = None
2570 rbranchmap = None
2548 if remote.capable("branchmap"):
2571 if remote.capable("branchmap"):
2549 rbranchmap = remote.branchmap()
2572 rbranchmap = remote.branchmap()
2550
2573
2551 fp = remote.stream_out()
2574 fp = remote.stream_out()
2552 l = fp.readline()
2575 l = fp.readline()
2553 try:
2576 try:
2554 resp = int(l)
2577 resp = int(l)
2555 except ValueError:
2578 except ValueError:
2556 raise error.ResponseError(
2579 raise error.ResponseError(
2557 _('unexpected response from remote server:'), l)
2580 _('unexpected response from remote server:'), l)
2558 if resp == 1:
2581 if resp == 1:
2559 raise util.Abort(_('operation forbidden by server'))
2582 raise util.Abort(_('operation forbidden by server'))
2560 elif resp == 2:
2583 elif resp == 2:
2561 raise util.Abort(_('locking the remote repository failed'))
2584 raise util.Abort(_('locking the remote repository failed'))
2562 elif resp != 0:
2585 elif resp != 0:
2563 raise util.Abort(_('the server sent an unknown error code'))
2586 raise util.Abort(_('the server sent an unknown error code'))
2564 self.ui.status(_('streaming all changes\n'))
2587 self.ui.status(_('streaming all changes\n'))
2565 l = fp.readline()
2588 l = fp.readline()
2566 try:
2589 try:
2567 total_files, total_bytes = map(int, l.split(' ', 1))
2590 total_files, total_bytes = map(int, l.split(' ', 1))
2568 except (ValueError, TypeError):
2591 except (ValueError, TypeError):
2569 raise error.ResponseError(
2592 raise error.ResponseError(
2570 _('unexpected response from remote server:'), l)
2593 _('unexpected response from remote server:'), l)
2571 self.ui.status(_('%d files to transfer, %s of data\n') %
2594 self.ui.status(_('%d files to transfer, %s of data\n') %
2572 (total_files, util.bytecount(total_bytes)))
2595 (total_files, util.bytecount(total_bytes)))
2573 handled_bytes = 0
2596 handled_bytes = 0
2574 self.ui.progress(_('clone'), 0, total=total_bytes)
2597 self.ui.progress(_('clone'), 0, total=total_bytes)
2575 start = time.time()
2598 start = time.time()
2576 for i in xrange(total_files):
2599 for i in xrange(total_files):
2577 # XXX doesn't support '\n' or '\r' in filenames
2600 # XXX doesn't support '\n' or '\r' in filenames
2578 l = fp.readline()
2601 l = fp.readline()
2579 try:
2602 try:
2580 name, size = l.split('\0', 1)
2603 name, size = l.split('\0', 1)
2581 size = int(size)
2604 size = int(size)
2582 except (ValueError, TypeError):
2605 except (ValueError, TypeError):
2583 raise error.ResponseError(
2606 raise error.ResponseError(
2584 _('unexpected response from remote server:'), l)
2607 _('unexpected response from remote server:'), l)
2585 if self.ui.debugflag:
2608 if self.ui.debugflag:
2586 self.ui.debug('adding %s (%s)\n' %
2609 self.ui.debug('adding %s (%s)\n' %
2587 (name, util.bytecount(size)))
2610 (name, util.bytecount(size)))
2588 # for backwards compat, name was partially encoded
2611 # for backwards compat, name was partially encoded
2589 ofp = self.sopener(store.decodedir(name), 'w')
2612 ofp = self.sopener(store.decodedir(name), 'w')
2590 for chunk in util.filechunkiter(fp, limit=size):
2613 for chunk in util.filechunkiter(fp, limit=size):
2591 handled_bytes += len(chunk)
2614 handled_bytes += len(chunk)
2592 self.ui.progress(_('clone'), handled_bytes,
2615 self.ui.progress(_('clone'), handled_bytes,
2593 total=total_bytes)
2616 total=total_bytes)
2594 ofp.write(chunk)
2617 ofp.write(chunk)
2595 ofp.close()
2618 ofp.close()
2596 elapsed = time.time() - start
2619 elapsed = time.time() - start
2597 if elapsed <= 0:
2620 if elapsed <= 0:
2598 elapsed = 0.001
2621 elapsed = 0.001
2599 self.ui.progress(_('clone'), None)
2622 self.ui.progress(_('clone'), None)
2600 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2623 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2601 (util.bytecount(total_bytes), elapsed,
2624 (util.bytecount(total_bytes), elapsed,
2602 util.bytecount(total_bytes / elapsed)))
2625 util.bytecount(total_bytes / elapsed)))
2603
2626
2604 # new requirements = old non-format requirements +
2627 # new requirements = old non-format requirements +
2605 # new format-related
2628 # new format-related
2606 # requirements from the streamed-in repository
2629 # requirements from the streamed-in repository
2607 requirements.update(set(self.requirements) - self.supportedformats)
2630 requirements.update(set(self.requirements) - self.supportedformats)
2608 self._applyrequirements(requirements)
2631 self._applyrequirements(requirements)
2609 self._writerequirements()
2632 self._writerequirements()
2610
2633
2611 if rbranchmap:
2634 if rbranchmap:
2612 rbheads = []
2635 rbheads = []
2613 for bheads in rbranchmap.itervalues():
2636 for bheads in rbranchmap.itervalues():
2614 rbheads.extend(bheads)
2637 rbheads.extend(bheads)
2615
2638
2616 self.branchcache = rbranchmap
2639 self.branchcache = rbranchmap
2617 if rbheads:
2640 if rbheads:
2618 rtiprev = max((int(self.changelog.rev(node))
2641 rtiprev = max((int(self.changelog.rev(node))
2619 for node in rbheads))
2642 for node in rbheads))
2620 self._writebranchcache(self.branchcache,
2643 self._writebranchcache(self.branchcache,
2621 self[rtiprev].node(), rtiprev)
2644 self[rtiprev].node(), rtiprev)
2622 self.invalidate()
2645 self.invalidate()
2623 return len(self.heads()) + 1
2646 return len(self.heads()) + 1
2624 finally:
2647 finally:
2625 lock.release()
2648 lock.release()
2626
2649
2627 def clone(self, remote, heads=[], stream=False):
2650 def clone(self, remote, heads=[], stream=False):
2628 '''clone remote repository.
2651 '''clone remote repository.
2629
2652
2630 keyword arguments:
2653 keyword arguments:
2631 heads: list of revs to clone (forces use of pull)
2654 heads: list of revs to clone (forces use of pull)
2632 stream: use streaming clone if possible'''
2655 stream: use streaming clone if possible'''
2633
2656
2634 # now, all clients that can request uncompressed clones can
2657 # now, all clients that can request uncompressed clones can
2635 # read repo formats supported by all servers that can serve
2658 # read repo formats supported by all servers that can serve
2636 # them.
2659 # them.
2637
2660
2638 # if revlog format changes, client will have to check version
2661 # if revlog format changes, client will have to check version
2639 # and format flags on "stream" capability, and use
2662 # and format flags on "stream" capability, and use
2640 # uncompressed only if compatible.
2663 # uncompressed only if compatible.
2641
2664
2642 if not stream:
2665 if not stream:
2643 # if the server explicitly prefers to stream (for fast LANs)
2666 # if the server explicitly prefers to stream (for fast LANs)
2644 stream = remote.capable('stream-preferred')
2667 stream = remote.capable('stream-preferred')
2645
2668
2646 if stream and not heads:
2669 if stream and not heads:
2647 # 'stream' means remote revlog format is revlogv1 only
2670 # 'stream' means remote revlog format is revlogv1 only
2648 if remote.capable('stream'):
2671 if remote.capable('stream'):
2649 return self.stream_in(remote, set(('revlogv1',)))
2672 return self.stream_in(remote, set(('revlogv1',)))
2650 # otherwise, 'streamreqs' contains the remote revlog format
2673 # otherwise, 'streamreqs' contains the remote revlog format
2651 streamreqs = remote.capable('streamreqs')
2674 streamreqs = remote.capable('streamreqs')
2652 if streamreqs:
2675 if streamreqs:
2653 streamreqs = set(streamreqs.split(','))
2676 streamreqs = set(streamreqs.split(','))
2654 # if we support it, stream in and adjust our requirements
2677 # if we support it, stream in and adjust our requirements
2655 if not streamreqs - self.supportedformats:
2678 if not streamreqs - self.supportedformats:
2656 return self.stream_in(remote, streamreqs)
2679 return self.stream_in(remote, streamreqs)
2657 return self.pull(remote, heads)
2680 return self.pull(remote, heads)
2658
2681
2659 def pushkey(self, namespace, key, old, new):
2682 def pushkey(self, namespace, key, old, new):
2660 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2683 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2661 old=old, new=new)
2684 old=old, new=new)
2662 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2685 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2663 ret = pushkey.push(self, namespace, key, old, new)
2686 ret = pushkey.push(self, namespace, key, old, new)
2664 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2687 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2665 ret=ret)
2688 ret=ret)
2666 return ret
2689 return ret
2667
2690
2668 def listkeys(self, namespace):
2691 def listkeys(self, namespace):
2669 self.hook('prelistkeys', throw=True, namespace=namespace)
2692 self.hook('prelistkeys', throw=True, namespace=namespace)
2670 self.ui.debug('listing keys for "%s"\n' % namespace)
2693 self.ui.debug('listing keys for "%s"\n' % namespace)
2671 values = pushkey.list(self, namespace)
2694 values = pushkey.list(self, namespace)
2672 self.hook('listkeys', namespace=namespace, values=values)
2695 self.hook('listkeys', namespace=namespace, values=values)
2673 return values
2696 return values
2674
2697
2675 def debugwireargs(self, one, two, three=None, four=None, five=None):
2698 def debugwireargs(self, one, two, three=None, four=None, five=None):
2676 '''used to test argument passing over the wire'''
2699 '''used to test argument passing over the wire'''
2677 return "%s %s %s %s %s" % (one, two, three, four, five)
2700 return "%s %s %s %s %s" % (one, two, three, four, five)
2678
2701
2679 def savecommitmessage(self, text):
2702 def savecommitmessage(self, text):
2680 fp = self.opener('last-message.txt', 'wb')
2703 fp = self.opener('last-message.txt', 'wb')
2681 try:
2704 try:
2682 fp.write(text)
2705 fp.write(text)
2683 finally:
2706 finally:
2684 fp.close()
2707 fp.close()
2685 return self.pathto(fp.name[len(self.root) + 1:])
2708 return self.pathto(fp.name[len(self.root) + 1:])
2686
2709
2687 # used to avoid circular references so destructors work
2710 # used to avoid circular references so destructors work
2688 def aftertrans(files):
2711 def aftertrans(files):
2689 renamefiles = [tuple(t) for t in files]
2712 renamefiles = [tuple(t) for t in files]
2690 def a():
2713 def a():
2691 for src, dest in renamefiles:
2714 for src, dest in renamefiles:
2692 try:
2715 try:
2693 util.rename(src, dest)
2716 util.rename(src, dest)
2694 except OSError: # journal file does not yet exist
2717 except OSError: # journal file does not yet exist
2695 pass
2718 pass
2696 return a
2719 return a
2697
2720
2698 def undoname(fn):
2721 def undoname(fn):
2699 base, name = os.path.split(fn)
2722 base, name = os.path.split(fn)
2700 assert name.startswith('journal')
2723 assert name.startswith('journal')
2701 return os.path.join(base, name.replace('journal', 'undo', 1))
2724 return os.path.join(base, name.replace('journal', 'undo', 1))
2702
2725
2703 def instance(ui, path, create):
2726 def instance(ui, path, create):
2704 return localrepository(ui, util.urllocalpath(path), create)
2727 return localrepository(ui, util.urllocalpath(path), create)
2705
2728
2706 def islocal(path):
2729 def islocal(path):
2707 return True
2730 return True
General Comments 0
You need to be logged in to leave comments. Login now