##// END OF EJS Templates
i18n: mark more strings for translation
Benoit Boissinot -
r10510:f77f3383 stable
parent child Browse files
Show More
@@ -1,327 +1,327 b''
1 # Mercurial extension to provide the 'hg bookmark' command
1 # Mercurial extension to provide the 'hg bookmark' command
2 #
2 #
3 # Copyright 2008 David Soria Parra <dsp@php.net>
3 # Copyright 2008 David Soria Parra <dsp@php.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''track a line of development with movable markers
8 '''track a line of development with movable markers
9
9
10 Bookmarks are local movable markers to changesets. Every bookmark
10 Bookmarks are local movable markers to changesets. Every bookmark
11 points to a changeset identified by its hash. If you commit a
11 points to a changeset identified by its hash. If you commit a
12 changeset that is based on a changeset that has a bookmark on it, the
12 changeset that is based on a changeset that has a bookmark on it, the
13 bookmark shifts to the new changeset.
13 bookmark shifts to the new changeset.
14
14
15 It is possible to use bookmark names in every revision lookup (e.g. hg
15 It is possible to use bookmark names in every revision lookup (e.g. hg
16 merge, hg update).
16 merge, hg update).
17
17
18 By default, when several bookmarks point to the same changeset, they
18 By default, when several bookmarks point to the same changeset, they
19 will all move forward together. It is possible to obtain a more
19 will all move forward together. It is possible to obtain a more
20 git-like experience by adding the following configuration option to
20 git-like experience by adding the following configuration option to
21 your .hgrc::
21 your .hgrc::
22
22
23 [bookmarks]
23 [bookmarks]
24 track.current = True
24 track.current = True
25
25
26 This will cause Mercurial to track the bookmark that you are currently
26 This will cause Mercurial to track the bookmark that you are currently
27 using, and only update it. This is similar to git's approach to
27 using, and only update it. This is similar to git's approach to
28 branching.
28 branching.
29 '''
29 '''
30
30
31 from mercurial.i18n import _
31 from mercurial.i18n import _
32 from mercurial.node import nullid, nullrev, hex, short
32 from mercurial.node import nullid, nullrev, hex, short
33 from mercurial import util, commands, repair, extensions
33 from mercurial import util, commands, repair, extensions
34 import os
34 import os
35
35
36 def write(repo):
36 def write(repo):
37 '''Write bookmarks
37 '''Write bookmarks
38
38
39 Write the given bookmark => hash dictionary to the .hg/bookmarks file
39 Write the given bookmark => hash dictionary to the .hg/bookmarks file
40 in a format equal to those of localtags.
40 in a format equal to those of localtags.
41
41
42 We also store a backup of the previous state in undo.bookmarks that
42 We also store a backup of the previous state in undo.bookmarks that
43 can be copied back on rollback.
43 can be copied back on rollback.
44 '''
44 '''
45 refs = repo._bookmarks
45 refs = repo._bookmarks
46 if os.path.exists(repo.join('bookmarks')):
46 if os.path.exists(repo.join('bookmarks')):
47 util.copyfile(repo.join('bookmarks'), repo.join('undo.bookmarks'))
47 util.copyfile(repo.join('bookmarks'), repo.join('undo.bookmarks'))
48 if repo._bookmarkcurrent not in refs:
48 if repo._bookmarkcurrent not in refs:
49 setcurrent(repo, None)
49 setcurrent(repo, None)
50 wlock = repo.wlock()
50 wlock = repo.wlock()
51 try:
51 try:
52 file = repo.opener('bookmarks', 'w', atomictemp=True)
52 file = repo.opener('bookmarks', 'w', atomictemp=True)
53 for refspec, node in refs.iteritems():
53 for refspec, node in refs.iteritems():
54 file.write("%s %s\n" % (hex(node), refspec))
54 file.write("%s %s\n" % (hex(node), refspec))
55 file.rename()
55 file.rename()
56 finally:
56 finally:
57 wlock.release()
57 wlock.release()
58
58
59 def setcurrent(repo, mark):
59 def setcurrent(repo, mark):
60 '''Set the name of the bookmark that we are currently on
60 '''Set the name of the bookmark that we are currently on
61
61
62 Set the name of the bookmark that we are on (hg update <bookmark>).
62 Set the name of the bookmark that we are on (hg update <bookmark>).
63 The name is recorded in .hg/bookmarks.current
63 The name is recorded in .hg/bookmarks.current
64 '''
64 '''
65 current = repo._bookmarkcurrent
65 current = repo._bookmarkcurrent
66 if current == mark:
66 if current == mark:
67 return
67 return
68
68
69 refs = repo._bookmarks
69 refs = repo._bookmarks
70
70
71 # do not update if we do update to a rev equal to the current bookmark
71 # do not update if we do update to a rev equal to the current bookmark
72 if (mark and mark not in refs and
72 if (mark and mark not in refs and
73 current and refs[current] == repo.changectx('.').node()):
73 current and refs[current] == repo.changectx('.').node()):
74 return
74 return
75 if mark not in refs:
75 if mark not in refs:
76 mark = ''
76 mark = ''
77 wlock = repo.wlock()
77 wlock = repo.wlock()
78 try:
78 try:
79 file = repo.opener('bookmarks.current', 'w', atomictemp=True)
79 file = repo.opener('bookmarks.current', 'w', atomictemp=True)
80 file.write(mark)
80 file.write(mark)
81 file.rename()
81 file.rename()
82 finally:
82 finally:
83 wlock.release()
83 wlock.release()
84 repo._bookmarkcurrent = mark
84 repo._bookmarkcurrent = mark
85
85
86 def bookmark(ui, repo, mark=None, rev=None, force=False, delete=False, rename=None):
86 def bookmark(ui, repo, mark=None, rev=None, force=False, delete=False, rename=None):
87 '''track a line of development with movable markers
87 '''track a line of development with movable markers
88
88
89 Bookmarks are pointers to certain commits that move when
89 Bookmarks are pointers to certain commits that move when
90 committing. Bookmarks are local. They can be renamed, copied and
90 committing. Bookmarks are local. They can be renamed, copied and
91 deleted. It is possible to use bookmark names in 'hg merge' and
91 deleted. It is possible to use bookmark names in 'hg merge' and
92 'hg update' to merge and update respectively to a given bookmark.
92 'hg update' to merge and update respectively to a given bookmark.
93
93
94 You can use 'hg bookmark NAME' to set a bookmark on the working
94 You can use 'hg bookmark NAME' to set a bookmark on the working
95 directory's parent revision with the given name. If you specify
95 directory's parent revision with the given name. If you specify
96 a revision using -r REV (where REV may be an existing bookmark),
96 a revision using -r REV (where REV may be an existing bookmark),
97 the bookmark is assigned to that revision.
97 the bookmark is assigned to that revision.
98 '''
98 '''
99 hexfn = ui.debugflag and hex or short
99 hexfn = ui.debugflag and hex or short
100 marks = repo._bookmarks
100 marks = repo._bookmarks
101 cur = repo.changectx('.').node()
101 cur = repo.changectx('.').node()
102
102
103 if rename:
103 if rename:
104 if rename not in marks:
104 if rename not in marks:
105 raise util.Abort(_("a bookmark of this name does not exist"))
105 raise util.Abort(_("a bookmark of this name does not exist"))
106 if mark in marks and not force:
106 if mark in marks and not force:
107 raise util.Abort(_("a bookmark of the same name already exists"))
107 raise util.Abort(_("a bookmark of the same name already exists"))
108 if mark is None:
108 if mark is None:
109 raise util.Abort(_("new bookmark name required"))
109 raise util.Abort(_("new bookmark name required"))
110 marks[mark] = marks[rename]
110 marks[mark] = marks[rename]
111 del marks[rename]
111 del marks[rename]
112 if repo._bookmarkcurrent == rename:
112 if repo._bookmarkcurrent == rename:
113 setcurrent(repo, mark)
113 setcurrent(repo, mark)
114 write(repo)
114 write(repo)
115 return
115 return
116
116
117 if delete:
117 if delete:
118 if mark is None:
118 if mark is None:
119 raise util.Abort(_("bookmark name required"))
119 raise util.Abort(_("bookmark name required"))
120 if mark not in marks:
120 if mark not in marks:
121 raise util.Abort(_("a bookmark of this name does not exist"))
121 raise util.Abort(_("a bookmark of this name does not exist"))
122 if mark == repo._bookmarkcurrent:
122 if mark == repo._bookmarkcurrent:
123 setcurrent(repo, None)
123 setcurrent(repo, None)
124 del marks[mark]
124 del marks[mark]
125 write(repo)
125 write(repo)
126 return
126 return
127
127
128 if mark != None:
128 if mark != None:
129 if "\n" in mark:
129 if "\n" in mark:
130 raise util.Abort(_("bookmark name cannot contain newlines"))
130 raise util.Abort(_("bookmark name cannot contain newlines"))
131 mark = mark.strip()
131 mark = mark.strip()
132 if mark in marks and not force:
132 if mark in marks and not force:
133 raise util.Abort(_("a bookmark of the same name already exists"))
133 raise util.Abort(_("a bookmark of the same name already exists"))
134 if ((mark in repo.branchtags() or mark == repo.dirstate.branch())
134 if ((mark in repo.branchtags() or mark == repo.dirstate.branch())
135 and not force):
135 and not force):
136 raise util.Abort(
136 raise util.Abort(
137 _("a bookmark cannot have the name of an existing branch"))
137 _("a bookmark cannot have the name of an existing branch"))
138 if rev:
138 if rev:
139 marks[mark] = repo.lookup(rev)
139 marks[mark] = repo.lookup(rev)
140 else:
140 else:
141 marks[mark] = repo.changectx('.').node()
141 marks[mark] = repo.changectx('.').node()
142 setcurrent(repo, mark)
142 setcurrent(repo, mark)
143 write(repo)
143 write(repo)
144 return
144 return
145
145
146 if mark is None:
146 if mark is None:
147 if rev:
147 if rev:
148 raise util.Abort(_("bookmark name required"))
148 raise util.Abort(_("bookmark name required"))
149 if len(marks) == 0:
149 if len(marks) == 0:
150 ui.status("no bookmarks set\n")
150 ui.status(_("no bookmarks set\n"))
151 else:
151 else:
152 for bmark, n in marks.iteritems():
152 for bmark, n in marks.iteritems():
153 if ui.configbool('bookmarks', 'track.current'):
153 if ui.configbool('bookmarks', 'track.current'):
154 current = repo._bookmarkcurrent
154 current = repo._bookmarkcurrent
155 prefix = (bmark == current and n == cur) and '*' or ' '
155 prefix = (bmark == current and n == cur) and '*' or ' '
156 else:
156 else:
157 prefix = (n == cur) and '*' or ' '
157 prefix = (n == cur) and '*' or ' '
158
158
159 if ui.quiet:
159 if ui.quiet:
160 ui.write("%s\n" % bmark)
160 ui.write("%s\n" % bmark)
161 else:
161 else:
162 ui.write(" %s %-25s %d:%s\n" % (
162 ui.write(" %s %-25s %d:%s\n" % (
163 prefix, bmark, repo.changelog.rev(n), hexfn(n)))
163 prefix, bmark, repo.changelog.rev(n), hexfn(n)))
164 return
164 return
165
165
166 def _revstostrip(changelog, node):
166 def _revstostrip(changelog, node):
167 srev = changelog.rev(node)
167 srev = changelog.rev(node)
168 tostrip = [srev]
168 tostrip = [srev]
169 saveheads = []
169 saveheads = []
170 for r in xrange(srev, len(changelog)):
170 for r in xrange(srev, len(changelog)):
171 parents = changelog.parentrevs(r)
171 parents = changelog.parentrevs(r)
172 if parents[0] in tostrip or parents[1] in tostrip:
172 if parents[0] in tostrip or parents[1] in tostrip:
173 tostrip.append(r)
173 tostrip.append(r)
174 if parents[1] != nullrev:
174 if parents[1] != nullrev:
175 for p in parents:
175 for p in parents:
176 if p not in tostrip and p > srev:
176 if p not in tostrip and p > srev:
177 saveheads.append(p)
177 saveheads.append(p)
178 return [r for r in tostrip if r not in saveheads]
178 return [r for r in tostrip if r not in saveheads]
179
179
180 def strip(oldstrip, ui, repo, node, backup="all"):
180 def strip(oldstrip, ui, repo, node, backup="all"):
181 """Strip bookmarks if revisions are stripped using
181 """Strip bookmarks if revisions are stripped using
182 the mercurial.strip method. This usually happens during
182 the mercurial.strip method. This usually happens during
183 qpush and qpop"""
183 qpush and qpop"""
184 revisions = _revstostrip(repo.changelog, node)
184 revisions = _revstostrip(repo.changelog, node)
185 marks = repo._bookmarks
185 marks = repo._bookmarks
186 update = []
186 update = []
187 for mark, n in marks.iteritems():
187 for mark, n in marks.iteritems():
188 if repo.changelog.rev(n) in revisions:
188 if repo.changelog.rev(n) in revisions:
189 update.append(mark)
189 update.append(mark)
190 oldstrip(ui, repo, node, backup)
190 oldstrip(ui, repo, node, backup)
191 if len(update) > 0:
191 if len(update) > 0:
192 for m in update:
192 for m in update:
193 marks[m] = repo.changectx('.').node()
193 marks[m] = repo.changectx('.').node()
194 write(repo)
194 write(repo)
195
195
196 def reposetup(ui, repo):
196 def reposetup(ui, repo):
197 if not repo.local():
197 if not repo.local():
198 return
198 return
199
199
200 class bookmark_repo(repo.__class__):
200 class bookmark_repo(repo.__class__):
201
201
202 @util.propertycache
202 @util.propertycache
203 def _bookmarks(self):
203 def _bookmarks(self):
204 '''Parse .hg/bookmarks file and return a dictionary
204 '''Parse .hg/bookmarks file and return a dictionary
205
205
206 Bookmarks are stored as {HASH}\\s{NAME}\\n (localtags format) values
206 Bookmarks are stored as {HASH}\\s{NAME}\\n (localtags format) values
207 in the .hg/bookmarks file. They are read returned as a dictionary
207 in the .hg/bookmarks file. They are read returned as a dictionary
208 with name => hash values.
208 with name => hash values.
209 '''
209 '''
210 try:
210 try:
211 bookmarks = {}
211 bookmarks = {}
212 for line in self.opener('bookmarks'):
212 for line in self.opener('bookmarks'):
213 sha, refspec = line.strip().split(' ', 1)
213 sha, refspec = line.strip().split(' ', 1)
214 bookmarks[refspec] = super(bookmark_repo, self).lookup(sha)
214 bookmarks[refspec] = super(bookmark_repo, self).lookup(sha)
215 except:
215 except:
216 pass
216 pass
217 return bookmarks
217 return bookmarks
218
218
219 @util.propertycache
219 @util.propertycache
220 def _bookmarkcurrent(self):
220 def _bookmarkcurrent(self):
221 '''Get the current bookmark
221 '''Get the current bookmark
222
222
223 If we use gittishsh branches we have a current bookmark that
223 If we use gittishsh branches we have a current bookmark that
224 we are on. This function returns the name of the bookmark. It
224 we are on. This function returns the name of the bookmark. It
225 is stored in .hg/bookmarks.current
225 is stored in .hg/bookmarks.current
226 '''
226 '''
227 mark = None
227 mark = None
228 if os.path.exists(self.join('bookmarks.current')):
228 if os.path.exists(self.join('bookmarks.current')):
229 file = self.opener('bookmarks.current')
229 file = self.opener('bookmarks.current')
230 # No readline() in posixfile_nt, reading everything is cheap
230 # No readline() in posixfile_nt, reading everything is cheap
231 mark = (file.readlines() or [''])[0]
231 mark = (file.readlines() or [''])[0]
232 if mark == '':
232 if mark == '':
233 mark = None
233 mark = None
234 file.close()
234 file.close()
235 return mark
235 return mark
236
236
237 def rollback(self):
237 def rollback(self):
238 if os.path.exists(self.join('undo.bookmarks')):
238 if os.path.exists(self.join('undo.bookmarks')):
239 util.rename(self.join('undo.bookmarks'), self.join('bookmarks'))
239 util.rename(self.join('undo.bookmarks'), self.join('bookmarks'))
240 return super(bookmark_repo, self).rollback()
240 return super(bookmark_repo, self).rollback()
241
241
242 def lookup(self, key):
242 def lookup(self, key):
243 if key in self._bookmarks:
243 if key in self._bookmarks:
244 key = self._bookmarks[key]
244 key = self._bookmarks[key]
245 return super(bookmark_repo, self).lookup(key)
245 return super(bookmark_repo, self).lookup(key)
246
246
247 def _bookmarksupdate(self, parents, node):
247 def _bookmarksupdate(self, parents, node):
248 marks = self._bookmarks
248 marks = self._bookmarks
249 update = False
249 update = False
250 if ui.configbool('bookmarks', 'track.current'):
250 if ui.configbool('bookmarks', 'track.current'):
251 mark = self._bookmarkcurrent
251 mark = self._bookmarkcurrent
252 if mark and marks[mark] in parents:
252 if mark and marks[mark] in parents:
253 marks[mark] = node
253 marks[mark] = node
254 update = True
254 update = True
255 else:
255 else:
256 for mark, n in marks.items():
256 for mark, n in marks.items():
257 if n in parents:
257 if n in parents:
258 marks[mark] = node
258 marks[mark] = node
259 update = True
259 update = True
260 if update:
260 if update:
261 write(self)
261 write(self)
262
262
263 def commitctx(self, ctx, error=False):
263 def commitctx(self, ctx, error=False):
264 """Add a revision to the repository and
264 """Add a revision to the repository and
265 move the bookmark"""
265 move the bookmark"""
266 wlock = self.wlock() # do both commit and bookmark with lock held
266 wlock = self.wlock() # do both commit and bookmark with lock held
267 try:
267 try:
268 node = super(bookmark_repo, self).commitctx(ctx, error)
268 node = super(bookmark_repo, self).commitctx(ctx, error)
269 if node is None:
269 if node is None:
270 return None
270 return None
271 parents = self.changelog.parents(node)
271 parents = self.changelog.parents(node)
272 if parents[1] == nullid:
272 if parents[1] == nullid:
273 parents = (parents[0],)
273 parents = (parents[0],)
274
274
275 self._bookmarksupdate(parents, node)
275 self._bookmarksupdate(parents, node)
276 return node
276 return node
277 finally:
277 finally:
278 wlock.release()
278 wlock.release()
279
279
280 def addchangegroup(self, source, srctype, url, emptyok=False):
280 def addchangegroup(self, source, srctype, url, emptyok=False):
281 parents = self.dirstate.parents()
281 parents = self.dirstate.parents()
282
282
283 result = super(bookmark_repo, self).addchangegroup(
283 result = super(bookmark_repo, self).addchangegroup(
284 source, srctype, url, emptyok)
284 source, srctype, url, emptyok)
285 if result > 1:
285 if result > 1:
286 # We have more heads than before
286 # We have more heads than before
287 return result
287 return result
288 node = self.changelog.tip()
288 node = self.changelog.tip()
289
289
290 self._bookmarksupdate(parents, node)
290 self._bookmarksupdate(parents, node)
291 return result
291 return result
292
292
293 def _findtags(self):
293 def _findtags(self):
294 """Merge bookmarks with normal tags"""
294 """Merge bookmarks with normal tags"""
295 (tags, tagtypes) = super(bookmark_repo, self)._findtags()
295 (tags, tagtypes) = super(bookmark_repo, self)._findtags()
296 tags.update(self._bookmarks)
296 tags.update(self._bookmarks)
297 return (tags, tagtypes)
297 return (tags, tagtypes)
298
298
299 repo.__class__ = bookmark_repo
299 repo.__class__ = bookmark_repo
300
300
301 def uisetup(ui):
301 def uisetup(ui):
302 extensions.wrapfunction(repair, "strip", strip)
302 extensions.wrapfunction(repair, "strip", strip)
303 if ui.configbool('bookmarks', 'track.current'):
303 if ui.configbool('bookmarks', 'track.current'):
304 extensions.wrapcommand(commands.table, 'update', updatecurbookmark)
304 extensions.wrapcommand(commands.table, 'update', updatecurbookmark)
305
305
306 def updatecurbookmark(orig, ui, repo, *args, **opts):
306 def updatecurbookmark(orig, ui, repo, *args, **opts):
307 '''Set the current bookmark
307 '''Set the current bookmark
308
308
309 If the user updates to a bookmark we update the .hg/bookmarks.current
309 If the user updates to a bookmark we update the .hg/bookmarks.current
310 file.
310 file.
311 '''
311 '''
312 res = orig(ui, repo, *args, **opts)
312 res = orig(ui, repo, *args, **opts)
313 rev = opts['rev']
313 rev = opts['rev']
314 if not rev and len(args) > 0:
314 if not rev and len(args) > 0:
315 rev = args[0]
315 rev = args[0]
316 setcurrent(repo, rev)
316 setcurrent(repo, rev)
317 return res
317 return res
318
318
319 cmdtable = {
319 cmdtable = {
320 "bookmarks":
320 "bookmarks":
321 (bookmark,
321 (bookmark,
322 [('f', 'force', False, _('force')),
322 [('f', 'force', False, _('force')),
323 ('r', 'rev', '', _('revision')),
323 ('r', 'rev', '', _('revision')),
324 ('d', 'delete', False, _('delete a given bookmark')),
324 ('d', 'delete', False, _('delete a given bookmark')),
325 ('m', 'rename', '', _('rename a given bookmark'))],
325 ('m', 'rename', '', _('rename a given bookmark'))],
326 _('hg bookmarks [-f] [-d] [-m NAME] [-r REV] [NAME]')),
326 _('hg bookmarks [-f] [-d] [-m NAME] [-r REV] [NAME]')),
327 }
327 }
@@ -1,286 +1,286 b''
1 # Copyright 2005, 2006 Benoit Boissinot <benoit.boissinot@ens-lyon.org>
1 # Copyright 2005, 2006 Benoit Boissinot <benoit.boissinot@ens-lyon.org>
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 '''commands to sign and verify changesets'''
6 '''commands to sign and verify changesets'''
7
7
8 import os, tempfile, binascii
8 import os, tempfile, binascii
9 from mercurial import util, commands, match
9 from mercurial import util, commands, match
10 from mercurial import node as hgnode
10 from mercurial import node as hgnode
11 from mercurial.i18n import _
11 from mercurial.i18n import _
12
12
13 class gpg(object):
13 class gpg(object):
14 def __init__(self, path, key=None):
14 def __init__(self, path, key=None):
15 self.path = path
15 self.path = path
16 self.key = (key and " --local-user \"%s\"" % key) or ""
16 self.key = (key and " --local-user \"%s\"" % key) or ""
17
17
18 def sign(self, data):
18 def sign(self, data):
19 gpgcmd = "%s --sign --detach-sign%s" % (self.path, self.key)
19 gpgcmd = "%s --sign --detach-sign%s" % (self.path, self.key)
20 return util.filter(data, gpgcmd)
20 return util.filter(data, gpgcmd)
21
21
22 def verify(self, data, sig):
22 def verify(self, data, sig):
23 """ returns of the good and bad signatures"""
23 """ returns of the good and bad signatures"""
24 sigfile = datafile = None
24 sigfile = datafile = None
25 try:
25 try:
26 # create temporary files
26 # create temporary files
27 fd, sigfile = tempfile.mkstemp(prefix="hg-gpg-", suffix=".sig")
27 fd, sigfile = tempfile.mkstemp(prefix="hg-gpg-", suffix=".sig")
28 fp = os.fdopen(fd, 'wb')
28 fp = os.fdopen(fd, 'wb')
29 fp.write(sig)
29 fp.write(sig)
30 fp.close()
30 fp.close()
31 fd, datafile = tempfile.mkstemp(prefix="hg-gpg-", suffix=".txt")
31 fd, datafile = tempfile.mkstemp(prefix="hg-gpg-", suffix=".txt")
32 fp = os.fdopen(fd, 'wb')
32 fp = os.fdopen(fd, 'wb')
33 fp.write(data)
33 fp.write(data)
34 fp.close()
34 fp.close()
35 gpgcmd = ("%s --logger-fd 1 --status-fd 1 --verify "
35 gpgcmd = ("%s --logger-fd 1 --status-fd 1 --verify "
36 "\"%s\" \"%s\"" % (self.path, sigfile, datafile))
36 "\"%s\" \"%s\"" % (self.path, sigfile, datafile))
37 ret = util.filter("", gpgcmd)
37 ret = util.filter("", gpgcmd)
38 finally:
38 finally:
39 for f in (sigfile, datafile):
39 for f in (sigfile, datafile):
40 try:
40 try:
41 if f:
41 if f:
42 os.unlink(f)
42 os.unlink(f)
43 except:
43 except:
44 pass
44 pass
45 keys = []
45 keys = []
46 key, fingerprint = None, None
46 key, fingerprint = None, None
47 err = ""
47 err = ""
48 for l in ret.splitlines():
48 for l in ret.splitlines():
49 # see DETAILS in the gnupg documentation
49 # see DETAILS in the gnupg documentation
50 # filter the logger output
50 # filter the logger output
51 if not l.startswith("[GNUPG:]"):
51 if not l.startswith("[GNUPG:]"):
52 continue
52 continue
53 l = l[9:]
53 l = l[9:]
54 if l.startswith("ERRSIG"):
54 if l.startswith("ERRSIG"):
55 err = _("error while verifying signature")
55 err = _("error while verifying signature")
56 break
56 break
57 elif l.startswith("VALIDSIG"):
57 elif l.startswith("VALIDSIG"):
58 # fingerprint of the primary key
58 # fingerprint of the primary key
59 fingerprint = l.split()[10]
59 fingerprint = l.split()[10]
60 elif (l.startswith("GOODSIG") or
60 elif (l.startswith("GOODSIG") or
61 l.startswith("EXPSIG") or
61 l.startswith("EXPSIG") or
62 l.startswith("EXPKEYSIG") or
62 l.startswith("EXPKEYSIG") or
63 l.startswith("BADSIG")):
63 l.startswith("BADSIG")):
64 if key is not None:
64 if key is not None:
65 keys.append(key + [fingerprint])
65 keys.append(key + [fingerprint])
66 key = l.split(" ", 2)
66 key = l.split(" ", 2)
67 fingerprint = None
67 fingerprint = None
68 if err:
68 if err:
69 return err, []
69 return err, []
70 if key is not None:
70 if key is not None:
71 keys.append(key + [fingerprint])
71 keys.append(key + [fingerprint])
72 return err, keys
72 return err, keys
73
73
74 def newgpg(ui, **opts):
74 def newgpg(ui, **opts):
75 """create a new gpg instance"""
75 """create a new gpg instance"""
76 gpgpath = ui.config("gpg", "cmd", "gpg")
76 gpgpath = ui.config("gpg", "cmd", "gpg")
77 gpgkey = opts.get('key')
77 gpgkey = opts.get('key')
78 if not gpgkey:
78 if not gpgkey:
79 gpgkey = ui.config("gpg", "key", None)
79 gpgkey = ui.config("gpg", "key", None)
80 return gpg(gpgpath, gpgkey)
80 return gpg(gpgpath, gpgkey)
81
81
82 def sigwalk(repo):
82 def sigwalk(repo):
83 """
83 """
84 walk over every sigs, yields a couple
84 walk over every sigs, yields a couple
85 ((node, version, sig), (filename, linenumber))
85 ((node, version, sig), (filename, linenumber))
86 """
86 """
87 def parsefile(fileiter, context):
87 def parsefile(fileiter, context):
88 ln = 1
88 ln = 1
89 for l in fileiter:
89 for l in fileiter:
90 if not l:
90 if not l:
91 continue
91 continue
92 yield (l.split(" ", 2), (context, ln))
92 yield (l.split(" ", 2), (context, ln))
93 ln += 1
93 ln += 1
94
94
95 # read the heads
95 # read the heads
96 fl = repo.file(".hgsigs")
96 fl = repo.file(".hgsigs")
97 for r in reversed(fl.heads()):
97 for r in reversed(fl.heads()):
98 fn = ".hgsigs|%s" % hgnode.short(r)
98 fn = ".hgsigs|%s" % hgnode.short(r)
99 for item in parsefile(fl.read(r).splitlines(), fn):
99 for item in parsefile(fl.read(r).splitlines(), fn):
100 yield item
100 yield item
101 try:
101 try:
102 # read local signatures
102 # read local signatures
103 fn = "localsigs"
103 fn = "localsigs"
104 for item in parsefile(repo.opener(fn), fn):
104 for item in parsefile(repo.opener(fn), fn):
105 yield item
105 yield item
106 except IOError:
106 except IOError:
107 pass
107 pass
108
108
109 def getkeys(ui, repo, mygpg, sigdata, context):
109 def getkeys(ui, repo, mygpg, sigdata, context):
110 """get the keys who signed a data"""
110 """get the keys who signed a data"""
111 fn, ln = context
111 fn, ln = context
112 node, version, sig = sigdata
112 node, version, sig = sigdata
113 prefix = "%s:%d" % (fn, ln)
113 prefix = "%s:%d" % (fn, ln)
114 node = hgnode.bin(node)
114 node = hgnode.bin(node)
115
115
116 data = node2txt(repo, node, version)
116 data = node2txt(repo, node, version)
117 sig = binascii.a2b_base64(sig)
117 sig = binascii.a2b_base64(sig)
118 err, keys = mygpg.verify(data, sig)
118 err, keys = mygpg.verify(data, sig)
119 if err:
119 if err:
120 ui.warn("%s:%d %s\n" % (fn, ln , err))
120 ui.warn("%s:%d %s\n" % (fn, ln , err))
121 return None
121 return None
122
122
123 validkeys = []
123 validkeys = []
124 # warn for expired key and/or sigs
124 # warn for expired key and/or sigs
125 for key in keys:
125 for key in keys:
126 if key[0] == "BADSIG":
126 if key[0] == "BADSIG":
127 ui.write(_("%s Bad signature from \"%s\"\n") % (prefix, key[2]))
127 ui.write(_("%s Bad signature from \"%s\"\n") % (prefix, key[2]))
128 continue
128 continue
129 if key[0] == "EXPSIG":
129 if key[0] == "EXPSIG":
130 ui.write(_("%s Note: Signature has expired"
130 ui.write(_("%s Note: Signature has expired"
131 " (signed by: \"%s\")\n") % (prefix, key[2]))
131 " (signed by: \"%s\")\n") % (prefix, key[2]))
132 elif key[0] == "EXPKEYSIG":
132 elif key[0] == "EXPKEYSIG":
133 ui.write(_("%s Note: This key has expired"
133 ui.write(_("%s Note: This key has expired"
134 " (signed by: \"%s\")\n") % (prefix, key[2]))
134 " (signed by: \"%s\")\n") % (prefix, key[2]))
135 validkeys.append((key[1], key[2], key[3]))
135 validkeys.append((key[1], key[2], key[3]))
136 return validkeys
136 return validkeys
137
137
138 def sigs(ui, repo):
138 def sigs(ui, repo):
139 """list signed changesets"""
139 """list signed changesets"""
140 mygpg = newgpg(ui)
140 mygpg = newgpg(ui)
141 revs = {}
141 revs = {}
142
142
143 for data, context in sigwalk(repo):
143 for data, context in sigwalk(repo):
144 node, version, sig = data
144 node, version, sig = data
145 fn, ln = context
145 fn, ln = context
146 try:
146 try:
147 n = repo.lookup(node)
147 n = repo.lookup(node)
148 except KeyError:
148 except KeyError:
149 ui.warn(_("%s:%d node does not exist\n") % (fn, ln))
149 ui.warn(_("%s:%d node does not exist\n") % (fn, ln))
150 continue
150 continue
151 r = repo.changelog.rev(n)
151 r = repo.changelog.rev(n)
152 keys = getkeys(ui, repo, mygpg, data, context)
152 keys = getkeys(ui, repo, mygpg, data, context)
153 if not keys:
153 if not keys:
154 continue
154 continue
155 revs.setdefault(r, [])
155 revs.setdefault(r, [])
156 revs[r].extend(keys)
156 revs[r].extend(keys)
157 for rev in sorted(revs, reverse=True):
157 for rev in sorted(revs, reverse=True):
158 for k in revs[rev]:
158 for k in revs[rev]:
159 r = "%5d:%s" % (rev, hgnode.hex(repo.changelog.node(rev)))
159 r = "%5d:%s" % (rev, hgnode.hex(repo.changelog.node(rev)))
160 ui.write("%-30s %s\n" % (keystr(ui, k), r))
160 ui.write("%-30s %s\n" % (keystr(ui, k), r))
161
161
162 def check(ui, repo, rev):
162 def check(ui, repo, rev):
163 """verify all the signatures there may be for a particular revision"""
163 """verify all the signatures there may be for a particular revision"""
164 mygpg = newgpg(ui)
164 mygpg = newgpg(ui)
165 rev = repo.lookup(rev)
165 rev = repo.lookup(rev)
166 hexrev = hgnode.hex(rev)
166 hexrev = hgnode.hex(rev)
167 keys = []
167 keys = []
168
168
169 for data, context in sigwalk(repo):
169 for data, context in sigwalk(repo):
170 node, version, sig = data
170 node, version, sig = data
171 if node == hexrev:
171 if node == hexrev:
172 k = getkeys(ui, repo, mygpg, data, context)
172 k = getkeys(ui, repo, mygpg, data, context)
173 if k:
173 if k:
174 keys.extend(k)
174 keys.extend(k)
175
175
176 if not keys:
176 if not keys:
177 ui.write(_("No valid signature for %s\n") % hgnode.short(rev))
177 ui.write(_("No valid signature for %s\n") % hgnode.short(rev))
178 return
178 return
179
179
180 # print summary
180 # print summary
181 ui.write("%s is signed by:\n" % hgnode.short(rev))
181 ui.write("%s is signed by:\n" % hgnode.short(rev))
182 for key in keys:
182 for key in keys:
183 ui.write(" %s\n" % keystr(ui, key))
183 ui.write(" %s\n" % keystr(ui, key))
184
184
185 def keystr(ui, key):
185 def keystr(ui, key):
186 """associate a string to a key (username, comment)"""
186 """associate a string to a key (username, comment)"""
187 keyid, user, fingerprint = key
187 keyid, user, fingerprint = key
188 comment = ui.config("gpg", fingerprint, None)
188 comment = ui.config("gpg", fingerprint, None)
189 if comment:
189 if comment:
190 return "%s (%s)" % (user, comment)
190 return "%s (%s)" % (user, comment)
191 else:
191 else:
192 return user
192 return user
193
193
194 def sign(ui, repo, *revs, **opts):
194 def sign(ui, repo, *revs, **opts):
195 """add a signature for the current or given revision
195 """add a signature for the current or given revision
196
196
197 If no revision is given, the parent of the working directory is used,
197 If no revision is given, the parent of the working directory is used,
198 or tip if no revision is checked out.
198 or tip if no revision is checked out.
199
199
200 See 'hg help dates' for a list of formats valid for -d/--date.
200 See 'hg help dates' for a list of formats valid for -d/--date.
201 """
201 """
202
202
203 mygpg = newgpg(ui, **opts)
203 mygpg = newgpg(ui, **opts)
204 sigver = "0"
204 sigver = "0"
205 sigmessage = ""
205 sigmessage = ""
206
206
207 date = opts.get('date')
207 date = opts.get('date')
208 if date:
208 if date:
209 opts['date'] = util.parsedate(date)
209 opts['date'] = util.parsedate(date)
210
210
211 if revs:
211 if revs:
212 nodes = [repo.lookup(n) for n in revs]
212 nodes = [repo.lookup(n) for n in revs]
213 else:
213 else:
214 nodes = [node for node in repo.dirstate.parents()
214 nodes = [node for node in repo.dirstate.parents()
215 if node != hgnode.nullid]
215 if node != hgnode.nullid]
216 if len(nodes) > 1:
216 if len(nodes) > 1:
217 raise util.Abort(_('uncommitted merge - please provide a '
217 raise util.Abort(_('uncommitted merge - please provide a '
218 'specific revision'))
218 'specific revision'))
219 if not nodes:
219 if not nodes:
220 nodes = [repo.changelog.tip()]
220 nodes = [repo.changelog.tip()]
221
221
222 for n in nodes:
222 for n in nodes:
223 hexnode = hgnode.hex(n)
223 hexnode = hgnode.hex(n)
224 ui.write("Signing %d:%s\n" % (repo.changelog.rev(n),
224 ui.write(_("Signing %d:%s\n") % (repo.changelog.rev(n),
225 hgnode.short(n)))
225 hgnode.short(n)))
226 # build data
226 # build data
227 data = node2txt(repo, n, sigver)
227 data = node2txt(repo, n, sigver)
228 sig = mygpg.sign(data)
228 sig = mygpg.sign(data)
229 if not sig:
229 if not sig:
230 raise util.Abort(_("Error while signing"))
230 raise util.Abort(_("Error while signing"))
231 sig = binascii.b2a_base64(sig)
231 sig = binascii.b2a_base64(sig)
232 sig = sig.replace("\n", "")
232 sig = sig.replace("\n", "")
233 sigmessage += "%s %s %s\n" % (hexnode, sigver, sig)
233 sigmessage += "%s %s %s\n" % (hexnode, sigver, sig)
234
234
235 # write it
235 # write it
236 if opts['local']:
236 if opts['local']:
237 repo.opener("localsigs", "ab").write(sigmessage)
237 repo.opener("localsigs", "ab").write(sigmessage)
238 return
238 return
239
239
240 for x in repo.status(unknown=True)[:5]:
240 for x in repo.status(unknown=True)[:5]:
241 if ".hgsigs" in x and not opts["force"]:
241 if ".hgsigs" in x and not opts["force"]:
242 raise util.Abort(_("working copy of .hgsigs is changed "
242 raise util.Abort(_("working copy of .hgsigs is changed "
243 "(please commit .hgsigs manually "
243 "(please commit .hgsigs manually "
244 "or use --force)"))
244 "or use --force)"))
245
245
246 repo.wfile(".hgsigs", "ab").write(sigmessage)
246 repo.wfile(".hgsigs", "ab").write(sigmessage)
247
247
248 if '.hgsigs' not in repo.dirstate:
248 if '.hgsigs' not in repo.dirstate:
249 repo.add([".hgsigs"])
249 repo.add([".hgsigs"])
250
250
251 if opts["no_commit"]:
251 if opts["no_commit"]:
252 return
252 return
253
253
254 message = opts['message']
254 message = opts['message']
255 if not message:
255 if not message:
256 # we don't translate commit messages
256 # we don't translate commit messages
257 message = "\n".join(["Added signature for changeset %s"
257 message = "\n".join(["Added signature for changeset %s"
258 % hgnode.short(n)
258 % hgnode.short(n)
259 for n in nodes])
259 for n in nodes])
260 try:
260 try:
261 m = match.exact(repo.root, '', ['.hgsigs'])
261 m = match.exact(repo.root, '', ['.hgsigs'])
262 repo.commit(message, opts['user'], opts['date'], match=m)
262 repo.commit(message, opts['user'], opts['date'], match=m)
263 except ValueError, inst:
263 except ValueError, inst:
264 raise util.Abort(str(inst))
264 raise util.Abort(str(inst))
265
265
266 def node2txt(repo, node, ver):
266 def node2txt(repo, node, ver):
267 """map a manifest into some text"""
267 """map a manifest into some text"""
268 if ver == "0":
268 if ver == "0":
269 return "%s\n" % hgnode.hex(node)
269 return "%s\n" % hgnode.hex(node)
270 else:
270 else:
271 raise util.Abort(_("unknown signature version"))
271 raise util.Abort(_("unknown signature version"))
272
272
273 cmdtable = {
273 cmdtable = {
274 "sign":
274 "sign":
275 (sign,
275 (sign,
276 [('l', 'local', None, _('make the signature local')),
276 [('l', 'local', None, _('make the signature local')),
277 ('f', 'force', None, _('sign even if the sigfile is modified')),
277 ('f', 'force', None, _('sign even if the sigfile is modified')),
278 ('', 'no-commit', None, _('do not commit the sigfile after signing')),
278 ('', 'no-commit', None, _('do not commit the sigfile after signing')),
279 ('k', 'key', '', _('the key id to sign with')),
279 ('k', 'key', '', _('the key id to sign with')),
280 ('m', 'message', '', _('commit message')),
280 ('m', 'message', '', _('commit message')),
281 ] + commands.commitopts2,
281 ] + commands.commitopts2,
282 _('hg sign [OPTION]... [REVISION]...')),
282 _('hg sign [OPTION]... [REVISION]...')),
283 "sigcheck": (check, [], _('hg sigcheck REVISION')),
283 "sigcheck": (check, [], _('hg sigcheck REVISION')),
284 "sigs": (sigs, [], _('hg sigs')),
284 "sigs": (sigs, [], _('hg sigs')),
285 }
285 }
286
286
@@ -1,2816 +1,2815 b''
1 # mq.py - patch queues for mercurial
1 # mq.py - patch queues for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''manage a stack of patches
8 '''manage a stack of patches
9
9
10 This extension lets you work with a stack of patches in a Mercurial
10 This extension lets you work with a stack of patches in a Mercurial
11 repository. It manages two stacks of patches - all known patches, and
11 repository. It manages two stacks of patches - all known patches, and
12 applied patches (subset of known patches).
12 applied patches (subset of known patches).
13
13
14 Known patches are represented as patch files in the .hg/patches
14 Known patches are represented as patch files in the .hg/patches
15 directory. Applied patches are both patch files and changesets.
15 directory. Applied patches are both patch files and changesets.
16
16
17 Common tasks (use "hg help command" for more details)::
17 Common tasks (use "hg help command" for more details)::
18
18
19 prepare repository to work with patches qinit
19 prepare repository to work with patches qinit
20 create new patch qnew
20 create new patch qnew
21 import existing patch qimport
21 import existing patch qimport
22
22
23 print patch series qseries
23 print patch series qseries
24 print applied patches qapplied
24 print applied patches qapplied
25
25
26 add known patch to applied stack qpush
26 add known patch to applied stack qpush
27 remove patch from applied stack qpop
27 remove patch from applied stack qpop
28 refresh contents of top applied patch qrefresh
28 refresh contents of top applied patch qrefresh
29
29
30 By default, mq will automatically use git patches when required to
30 By default, mq will automatically use git patches when required to
31 avoid losing file mode changes, copy records, binary files or empty
31 avoid losing file mode changes, copy records, binary files or empty
32 files creations or deletions. This behaviour can be configured with::
32 files creations or deletions. This behaviour can be configured with::
33
33
34 [mq]
34 [mq]
35 git = auto/keep/yes/no
35 git = auto/keep/yes/no
36
36
37 If set to 'keep', mq will obey the [diff] section configuration while
37 If set to 'keep', mq will obey the [diff] section configuration while
38 preserving existing git patches upon qrefresh. If set to 'yes' or
38 preserving existing git patches upon qrefresh. If set to 'yes' or
39 'no', mq will override the [diff] section and always generate git or
39 'no', mq will override the [diff] section and always generate git or
40 regular patches, possibly losing data in the second case.
40 regular patches, possibly losing data in the second case.
41 '''
41 '''
42
42
43 from mercurial.i18n import _
43 from mercurial.i18n import _
44 from mercurial.node import bin, hex, short, nullid, nullrev
44 from mercurial.node import bin, hex, short, nullid, nullrev
45 from mercurial.lock import release
45 from mercurial.lock import release
46 from mercurial import commands, cmdutil, hg, patch, util
46 from mercurial import commands, cmdutil, hg, patch, util
47 from mercurial import repair, extensions, url, error
47 from mercurial import repair, extensions, url, error
48 import os, sys, re, errno
48 import os, sys, re, errno
49
49
50 commands.norepo += " qclone"
50 commands.norepo += " qclone"
51
51
52 # Patch names looks like unix-file names.
52 # Patch names looks like unix-file names.
53 # They must be joinable with queue directory and result in the patch path.
53 # They must be joinable with queue directory and result in the patch path.
54 normname = util.normpath
54 normname = util.normpath
55
55
56 class statusentry(object):
56 class statusentry(object):
57 def __init__(self, rev, name=None):
57 def __init__(self, rev, name=None):
58 if not name:
58 if not name:
59 fields = rev.split(':', 1)
59 fields = rev.split(':', 1)
60 if len(fields) == 2:
60 if len(fields) == 2:
61 self.rev, self.name = fields
61 self.rev, self.name = fields
62 else:
62 else:
63 self.rev, self.name = None, None
63 self.rev, self.name = None, None
64 else:
64 else:
65 self.rev, self.name = rev, name
65 self.rev, self.name = rev, name
66
66
67 def __str__(self):
67 def __str__(self):
68 return self.rev + ':' + self.name
68 return self.rev + ':' + self.name
69
69
70 class patchheader(object):
70 class patchheader(object):
71 def __init__(self, pf, plainmode=False):
71 def __init__(self, pf, plainmode=False):
72 def eatdiff(lines):
72 def eatdiff(lines):
73 while lines:
73 while lines:
74 l = lines[-1]
74 l = lines[-1]
75 if (l.startswith("diff -") or
75 if (l.startswith("diff -") or
76 l.startswith("Index:") or
76 l.startswith("Index:") or
77 l.startswith("===========")):
77 l.startswith("===========")):
78 del lines[-1]
78 del lines[-1]
79 else:
79 else:
80 break
80 break
81 def eatempty(lines):
81 def eatempty(lines):
82 while lines:
82 while lines:
83 l = lines[-1]
83 l = lines[-1]
84 if re.match('\s*$', l):
84 if re.match('\s*$', l):
85 del lines[-1]
85 del lines[-1]
86 else:
86 else:
87 break
87 break
88
88
89 message = []
89 message = []
90 comments = []
90 comments = []
91 user = None
91 user = None
92 date = None
92 date = None
93 parent = None
93 parent = None
94 format = None
94 format = None
95 subject = None
95 subject = None
96 diffstart = 0
96 diffstart = 0
97
97
98 for line in file(pf):
98 for line in file(pf):
99 line = line.rstrip()
99 line = line.rstrip()
100 if line.startswith('diff --git'):
100 if line.startswith('diff --git'):
101 diffstart = 2
101 diffstart = 2
102 break
102 break
103 if diffstart:
103 if diffstart:
104 if line.startswith('+++ '):
104 if line.startswith('+++ '):
105 diffstart = 2
105 diffstart = 2
106 break
106 break
107 if line.startswith("--- "):
107 if line.startswith("--- "):
108 diffstart = 1
108 diffstart = 1
109 continue
109 continue
110 elif format == "hgpatch":
110 elif format == "hgpatch":
111 # parse values when importing the result of an hg export
111 # parse values when importing the result of an hg export
112 if line.startswith("# User "):
112 if line.startswith("# User "):
113 user = line[7:]
113 user = line[7:]
114 elif line.startswith("# Date "):
114 elif line.startswith("# Date "):
115 date = line[7:]
115 date = line[7:]
116 elif line.startswith("# Parent "):
116 elif line.startswith("# Parent "):
117 parent = line[9:]
117 parent = line[9:]
118 elif not line.startswith("# ") and line:
118 elif not line.startswith("# ") and line:
119 message.append(line)
119 message.append(line)
120 format = None
120 format = None
121 elif line == '# HG changeset patch':
121 elif line == '# HG changeset patch':
122 message = []
122 message = []
123 format = "hgpatch"
123 format = "hgpatch"
124 elif (format != "tagdone" and (line.startswith("Subject: ") or
124 elif (format != "tagdone" and (line.startswith("Subject: ") or
125 line.startswith("subject: "))):
125 line.startswith("subject: "))):
126 subject = line[9:]
126 subject = line[9:]
127 format = "tag"
127 format = "tag"
128 elif (format != "tagdone" and (line.startswith("From: ") or
128 elif (format != "tagdone" and (line.startswith("From: ") or
129 line.startswith("from: "))):
129 line.startswith("from: "))):
130 user = line[6:]
130 user = line[6:]
131 format = "tag"
131 format = "tag"
132 elif (format != "tagdone" and (line.startswith("Date: ") or
132 elif (format != "tagdone" and (line.startswith("Date: ") or
133 line.startswith("date: "))):
133 line.startswith("date: "))):
134 date = line[6:]
134 date = line[6:]
135 format = "tag"
135 format = "tag"
136 elif format == "tag" and line == "":
136 elif format == "tag" and line == "":
137 # when looking for tags (subject: from: etc) they
137 # when looking for tags (subject: from: etc) they
138 # end once you find a blank line in the source
138 # end once you find a blank line in the source
139 format = "tagdone"
139 format = "tagdone"
140 elif message or line:
140 elif message or line:
141 message.append(line)
141 message.append(line)
142 comments.append(line)
142 comments.append(line)
143
143
144 eatdiff(message)
144 eatdiff(message)
145 eatdiff(comments)
145 eatdiff(comments)
146 eatempty(message)
146 eatempty(message)
147 eatempty(comments)
147 eatempty(comments)
148
148
149 # make sure message isn't empty
149 # make sure message isn't empty
150 if format and format.startswith("tag") and subject:
150 if format and format.startswith("tag") and subject:
151 message.insert(0, "")
151 message.insert(0, "")
152 message.insert(0, subject)
152 message.insert(0, subject)
153
153
154 self.message = message
154 self.message = message
155 self.comments = comments
155 self.comments = comments
156 self.user = user
156 self.user = user
157 self.date = date
157 self.date = date
158 self.parent = parent
158 self.parent = parent
159 self.haspatch = diffstart > 1
159 self.haspatch = diffstart > 1
160 self.plainmode = plainmode
160 self.plainmode = plainmode
161
161
162 def setuser(self, user):
162 def setuser(self, user):
163 if not self.updateheader(['From: ', '# User '], user):
163 if not self.updateheader(['From: ', '# User '], user):
164 try:
164 try:
165 patchheaderat = self.comments.index('# HG changeset patch')
165 patchheaderat = self.comments.index('# HG changeset patch')
166 self.comments.insert(patchheaderat + 1, '# User ' + user)
166 self.comments.insert(patchheaderat + 1, '# User ' + user)
167 except ValueError:
167 except ValueError:
168 if self.plainmode or self._hasheader(['Date: ']):
168 if self.plainmode or self._hasheader(['Date: ']):
169 self.comments = ['From: ' + user] + self.comments
169 self.comments = ['From: ' + user] + self.comments
170 else:
170 else:
171 tmp = ['# HG changeset patch', '# User ' + user, '']
171 tmp = ['# HG changeset patch', '# User ' + user, '']
172 self.comments = tmp + self.comments
172 self.comments = tmp + self.comments
173 self.user = user
173 self.user = user
174
174
175 def setdate(self, date):
175 def setdate(self, date):
176 if not self.updateheader(['Date: ', '# Date '], date):
176 if not self.updateheader(['Date: ', '# Date '], date):
177 try:
177 try:
178 patchheaderat = self.comments.index('# HG changeset patch')
178 patchheaderat = self.comments.index('# HG changeset patch')
179 self.comments.insert(patchheaderat + 1, '# Date ' + date)
179 self.comments.insert(patchheaderat + 1, '# Date ' + date)
180 except ValueError:
180 except ValueError:
181 if self.plainmode or self._hasheader(['From: ']):
181 if self.plainmode or self._hasheader(['From: ']):
182 self.comments = ['Date: ' + date] + self.comments
182 self.comments = ['Date: ' + date] + self.comments
183 else:
183 else:
184 tmp = ['# HG changeset patch', '# Date ' + date, '']
184 tmp = ['# HG changeset patch', '# Date ' + date, '']
185 self.comments = tmp + self.comments
185 self.comments = tmp + self.comments
186 self.date = date
186 self.date = date
187
187
188 def setparent(self, parent):
188 def setparent(self, parent):
189 if not self.updateheader(['# Parent '], parent):
189 if not self.updateheader(['# Parent '], parent):
190 try:
190 try:
191 patchheaderat = self.comments.index('# HG changeset patch')
191 patchheaderat = self.comments.index('# HG changeset patch')
192 self.comments.insert(patchheaderat + 1, '# Parent ' + parent)
192 self.comments.insert(patchheaderat + 1, '# Parent ' + parent)
193 except ValueError:
193 except ValueError:
194 pass
194 pass
195 self.parent = parent
195 self.parent = parent
196
196
197 def setmessage(self, message):
197 def setmessage(self, message):
198 if self.comments:
198 if self.comments:
199 self._delmsg()
199 self._delmsg()
200 self.message = [message]
200 self.message = [message]
201 self.comments += self.message
201 self.comments += self.message
202
202
203 def updateheader(self, prefixes, new):
203 def updateheader(self, prefixes, new):
204 '''Update all references to a field in the patch header.
204 '''Update all references to a field in the patch header.
205 Return whether the field is present.'''
205 Return whether the field is present.'''
206 res = False
206 res = False
207 for prefix in prefixes:
207 for prefix in prefixes:
208 for i in xrange(len(self.comments)):
208 for i in xrange(len(self.comments)):
209 if self.comments[i].startswith(prefix):
209 if self.comments[i].startswith(prefix):
210 self.comments[i] = prefix + new
210 self.comments[i] = prefix + new
211 res = True
211 res = True
212 break
212 break
213 return res
213 return res
214
214
215 def _hasheader(self, prefixes):
215 def _hasheader(self, prefixes):
216 '''Check if a header starts with any of the given prefixes.'''
216 '''Check if a header starts with any of the given prefixes.'''
217 for prefix in prefixes:
217 for prefix in prefixes:
218 for comment in self.comments:
218 for comment in self.comments:
219 if comment.startswith(prefix):
219 if comment.startswith(prefix):
220 return True
220 return True
221 return False
221 return False
222
222
223 def __str__(self):
223 def __str__(self):
224 if not self.comments:
224 if not self.comments:
225 return ''
225 return ''
226 return '\n'.join(self.comments) + '\n\n'
226 return '\n'.join(self.comments) + '\n\n'
227
227
228 def _delmsg(self):
228 def _delmsg(self):
229 '''Remove existing message, keeping the rest of the comments fields.
229 '''Remove existing message, keeping the rest of the comments fields.
230 If comments contains 'subject: ', message will prepend
230 If comments contains 'subject: ', message will prepend
231 the field and a blank line.'''
231 the field and a blank line.'''
232 if self.message:
232 if self.message:
233 subj = 'subject: ' + self.message[0].lower()
233 subj = 'subject: ' + self.message[0].lower()
234 for i in xrange(len(self.comments)):
234 for i in xrange(len(self.comments)):
235 if subj == self.comments[i].lower():
235 if subj == self.comments[i].lower():
236 del self.comments[i]
236 del self.comments[i]
237 self.message = self.message[2:]
237 self.message = self.message[2:]
238 break
238 break
239 ci = 0
239 ci = 0
240 for mi in self.message:
240 for mi in self.message:
241 while mi != self.comments[ci]:
241 while mi != self.comments[ci]:
242 ci += 1
242 ci += 1
243 del self.comments[ci]
243 del self.comments[ci]
244
244
245 class queue(object):
245 class queue(object):
246 def __init__(self, ui, path, patchdir=None):
246 def __init__(self, ui, path, patchdir=None):
247 self.basepath = path
247 self.basepath = path
248 self.path = patchdir or os.path.join(path, "patches")
248 self.path = patchdir or os.path.join(path, "patches")
249 self.opener = util.opener(self.path)
249 self.opener = util.opener(self.path)
250 self.ui = ui
250 self.ui = ui
251 self.applied_dirty = 0
251 self.applied_dirty = 0
252 self.series_dirty = 0
252 self.series_dirty = 0
253 self.series_path = "series"
253 self.series_path = "series"
254 self.status_path = "status"
254 self.status_path = "status"
255 self.guards_path = "guards"
255 self.guards_path = "guards"
256 self.active_guards = None
256 self.active_guards = None
257 self.guards_dirty = False
257 self.guards_dirty = False
258 # Handle mq.git as a bool with extended values
258 # Handle mq.git as a bool with extended values
259 try:
259 try:
260 gitmode = ui.configbool('mq', 'git', None)
260 gitmode = ui.configbool('mq', 'git', None)
261 if gitmode is None:
261 if gitmode is None:
262 raise error.ConfigError()
262 raise error.ConfigError()
263 self.gitmode = gitmode and 'yes' or 'no'
263 self.gitmode = gitmode and 'yes' or 'no'
264 except error.ConfigError:
264 except error.ConfigError:
265 self.gitmode = ui.config('mq', 'git', 'auto').lower()
265 self.gitmode = ui.config('mq', 'git', 'auto').lower()
266 self.plainmode = ui.configbool('mq', 'plain', False)
266 self.plainmode = ui.configbool('mq', 'plain', False)
267
267
268 @util.propertycache
268 @util.propertycache
269 def applied(self):
269 def applied(self):
270 if os.path.exists(self.join(self.status_path)):
270 if os.path.exists(self.join(self.status_path)):
271 lines = self.opener(self.status_path).read().splitlines()
271 lines = self.opener(self.status_path).read().splitlines()
272 return [statusentry(l) for l in lines]
272 return [statusentry(l) for l in lines]
273 return []
273 return []
274
274
275 @util.propertycache
275 @util.propertycache
276 def full_series(self):
276 def full_series(self):
277 if os.path.exists(self.join(self.series_path)):
277 if os.path.exists(self.join(self.series_path)):
278 return self.opener(self.series_path).read().splitlines()
278 return self.opener(self.series_path).read().splitlines()
279 return []
279 return []
280
280
281 @util.propertycache
281 @util.propertycache
282 def series(self):
282 def series(self):
283 self.parse_series()
283 self.parse_series()
284 return self.series
284 return self.series
285
285
286 @util.propertycache
286 @util.propertycache
287 def series_guards(self):
287 def series_guards(self):
288 self.parse_series()
288 self.parse_series()
289 return self.series_guards
289 return self.series_guards
290
290
291 def invalidate(self):
291 def invalidate(self):
292 for a in 'applied full_series series series_guards'.split():
292 for a in 'applied full_series series series_guards'.split():
293 if a in self.__dict__:
293 if a in self.__dict__:
294 delattr(self, a)
294 delattr(self, a)
295 self.applied_dirty = 0
295 self.applied_dirty = 0
296 self.series_dirty = 0
296 self.series_dirty = 0
297 self.guards_dirty = False
297 self.guards_dirty = False
298 self.active_guards = None
298 self.active_guards = None
299
299
300 def diffopts(self, opts={}, patchfn=None):
300 def diffopts(self, opts={}, patchfn=None):
301 diffopts = patch.diffopts(self.ui, opts)
301 diffopts = patch.diffopts(self.ui, opts)
302 if self.gitmode == 'auto':
302 if self.gitmode == 'auto':
303 diffopts.upgrade = True
303 diffopts.upgrade = True
304 elif self.gitmode == 'keep':
304 elif self.gitmode == 'keep':
305 pass
305 pass
306 elif self.gitmode in ('yes', 'no'):
306 elif self.gitmode in ('yes', 'no'):
307 diffopts.git = self.gitmode == 'yes'
307 diffopts.git = self.gitmode == 'yes'
308 else:
308 else:
309 raise util.Abort(_('mq.git option can be auto/keep/yes/no'
309 raise util.Abort(_('mq.git option can be auto/keep/yes/no'
310 ' got %s') % self.gitmode)
310 ' got %s') % self.gitmode)
311 if patchfn:
311 if patchfn:
312 diffopts = self.patchopts(diffopts, patchfn)
312 diffopts = self.patchopts(diffopts, patchfn)
313 return diffopts
313 return diffopts
314
314
315 def patchopts(self, diffopts, *patches):
315 def patchopts(self, diffopts, *patches):
316 """Return a copy of input diff options with git set to true if
316 """Return a copy of input diff options with git set to true if
317 referenced patch is a git patch and should be preserved as such.
317 referenced patch is a git patch and should be preserved as such.
318 """
318 """
319 diffopts = diffopts.copy()
319 diffopts = diffopts.copy()
320 if not diffopts.git and self.gitmode == 'keep':
320 if not diffopts.git and self.gitmode == 'keep':
321 for patchfn in patches:
321 for patchfn in patches:
322 patchf = self.opener(patchfn, 'r')
322 patchf = self.opener(patchfn, 'r')
323 # if the patch was a git patch, refresh it as a git patch
323 # if the patch was a git patch, refresh it as a git patch
324 for line in patchf:
324 for line in patchf:
325 if line.startswith('diff --git'):
325 if line.startswith('diff --git'):
326 diffopts.git = True
326 diffopts.git = True
327 break
327 break
328 patchf.close()
328 patchf.close()
329 return diffopts
329 return diffopts
330
330
331 def join(self, *p):
331 def join(self, *p):
332 return os.path.join(self.path, *p)
332 return os.path.join(self.path, *p)
333
333
334 def find_series(self, patch):
334 def find_series(self, patch):
335 pre = re.compile("(\s*)([^#]+)")
335 pre = re.compile("(\s*)([^#]+)")
336 index = 0
336 index = 0
337 for l in self.full_series:
337 for l in self.full_series:
338 m = pre.match(l)
338 m = pre.match(l)
339 if m:
339 if m:
340 s = m.group(2)
340 s = m.group(2)
341 s = s.rstrip()
341 s = s.rstrip()
342 if s == patch:
342 if s == patch:
343 return index
343 return index
344 index += 1
344 index += 1
345 return None
345 return None
346
346
347 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
347 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
348
348
349 def parse_series(self):
349 def parse_series(self):
350 self.series = []
350 self.series = []
351 self.series_guards = []
351 self.series_guards = []
352 for l in self.full_series:
352 for l in self.full_series:
353 h = l.find('#')
353 h = l.find('#')
354 if h == -1:
354 if h == -1:
355 patch = l
355 patch = l
356 comment = ''
356 comment = ''
357 elif h == 0:
357 elif h == 0:
358 continue
358 continue
359 else:
359 else:
360 patch = l[:h]
360 patch = l[:h]
361 comment = l[h:]
361 comment = l[h:]
362 patch = patch.strip()
362 patch = patch.strip()
363 if patch:
363 if patch:
364 if patch in self.series:
364 if patch in self.series:
365 raise util.Abort(_('%s appears more than once in %s') %
365 raise util.Abort(_('%s appears more than once in %s') %
366 (patch, self.join(self.series_path)))
366 (patch, self.join(self.series_path)))
367 self.series.append(patch)
367 self.series.append(patch)
368 self.series_guards.append(self.guard_re.findall(comment))
368 self.series_guards.append(self.guard_re.findall(comment))
369
369
370 def check_guard(self, guard):
370 def check_guard(self, guard):
371 if not guard:
371 if not guard:
372 return _('guard cannot be an empty string')
372 return _('guard cannot be an empty string')
373 bad_chars = '# \t\r\n\f'
373 bad_chars = '# \t\r\n\f'
374 first = guard[0]
374 first = guard[0]
375 if first in '-+':
375 if first in '-+':
376 return (_('guard %r starts with invalid character: %r') %
376 return (_('guard %r starts with invalid character: %r') %
377 (guard, first))
377 (guard, first))
378 for c in bad_chars:
378 for c in bad_chars:
379 if c in guard:
379 if c in guard:
380 return _('invalid character in guard %r: %r') % (guard, c)
380 return _('invalid character in guard %r: %r') % (guard, c)
381
381
382 def set_active(self, guards):
382 def set_active(self, guards):
383 for guard in guards:
383 for guard in guards:
384 bad = self.check_guard(guard)
384 bad = self.check_guard(guard)
385 if bad:
385 if bad:
386 raise util.Abort(bad)
386 raise util.Abort(bad)
387 guards = sorted(set(guards))
387 guards = sorted(set(guards))
388 self.ui.debug('active guards: %s\n' % ' '.join(guards))
388 self.ui.debug('active guards: %s\n' % ' '.join(guards))
389 self.active_guards = guards
389 self.active_guards = guards
390 self.guards_dirty = True
390 self.guards_dirty = True
391
391
392 def active(self):
392 def active(self):
393 if self.active_guards is None:
393 if self.active_guards is None:
394 self.active_guards = []
394 self.active_guards = []
395 try:
395 try:
396 guards = self.opener(self.guards_path).read().split()
396 guards = self.opener(self.guards_path).read().split()
397 except IOError, err:
397 except IOError, err:
398 if err.errno != errno.ENOENT:
398 if err.errno != errno.ENOENT:
399 raise
399 raise
400 guards = []
400 guards = []
401 for i, guard in enumerate(guards):
401 for i, guard in enumerate(guards):
402 bad = self.check_guard(guard)
402 bad = self.check_guard(guard)
403 if bad:
403 if bad:
404 self.ui.warn('%s:%d: %s\n' %
404 self.ui.warn('%s:%d: %s\n' %
405 (self.join(self.guards_path), i + 1, bad))
405 (self.join(self.guards_path), i + 1, bad))
406 else:
406 else:
407 self.active_guards.append(guard)
407 self.active_guards.append(guard)
408 return self.active_guards
408 return self.active_guards
409
409
410 def set_guards(self, idx, guards):
410 def set_guards(self, idx, guards):
411 for g in guards:
411 for g in guards:
412 if len(g) < 2:
412 if len(g) < 2:
413 raise util.Abort(_('guard %r too short') % g)
413 raise util.Abort(_('guard %r too short') % g)
414 if g[0] not in '-+':
414 if g[0] not in '-+':
415 raise util.Abort(_('guard %r starts with invalid char') % g)
415 raise util.Abort(_('guard %r starts with invalid char') % g)
416 bad = self.check_guard(g[1:])
416 bad = self.check_guard(g[1:])
417 if bad:
417 if bad:
418 raise util.Abort(bad)
418 raise util.Abort(bad)
419 drop = self.guard_re.sub('', self.full_series[idx])
419 drop = self.guard_re.sub('', self.full_series[idx])
420 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
420 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
421 self.parse_series()
421 self.parse_series()
422 self.series_dirty = True
422 self.series_dirty = True
423
423
424 def pushable(self, idx):
424 def pushable(self, idx):
425 if isinstance(idx, str):
425 if isinstance(idx, str):
426 idx = self.series.index(idx)
426 idx = self.series.index(idx)
427 patchguards = self.series_guards[idx]
427 patchguards = self.series_guards[idx]
428 if not patchguards:
428 if not patchguards:
429 return True, None
429 return True, None
430 guards = self.active()
430 guards = self.active()
431 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
431 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
432 if exactneg:
432 if exactneg:
433 return False, exactneg[0]
433 return False, exactneg[0]
434 pos = [g for g in patchguards if g[0] == '+']
434 pos = [g for g in patchguards if g[0] == '+']
435 exactpos = [g for g in pos if g[1:] in guards]
435 exactpos = [g for g in pos if g[1:] in guards]
436 if pos:
436 if pos:
437 if exactpos:
437 if exactpos:
438 return True, exactpos[0]
438 return True, exactpos[0]
439 return False, pos
439 return False, pos
440 return True, ''
440 return True, ''
441
441
442 def explain_pushable(self, idx, all_patches=False):
442 def explain_pushable(self, idx, all_patches=False):
443 write = all_patches and self.ui.write or self.ui.warn
443 write = all_patches and self.ui.write or self.ui.warn
444 if all_patches or self.ui.verbose:
444 if all_patches or self.ui.verbose:
445 if isinstance(idx, str):
445 if isinstance(idx, str):
446 idx = self.series.index(idx)
446 idx = self.series.index(idx)
447 pushable, why = self.pushable(idx)
447 pushable, why = self.pushable(idx)
448 if all_patches and pushable:
448 if all_patches and pushable:
449 if why is None:
449 if why is None:
450 write(_('allowing %s - no guards in effect\n') %
450 write(_('allowing %s - no guards in effect\n') %
451 self.series[idx])
451 self.series[idx])
452 else:
452 else:
453 if not why:
453 if not why:
454 write(_('allowing %s - no matching negative guards\n') %
454 write(_('allowing %s - no matching negative guards\n') %
455 self.series[idx])
455 self.series[idx])
456 else:
456 else:
457 write(_('allowing %s - guarded by %r\n') %
457 write(_('allowing %s - guarded by %r\n') %
458 (self.series[idx], why))
458 (self.series[idx], why))
459 if not pushable:
459 if not pushable:
460 if why:
460 if why:
461 write(_('skipping %s - guarded by %r\n') %
461 write(_('skipping %s - guarded by %r\n') %
462 (self.series[idx], why))
462 (self.series[idx], why))
463 else:
463 else:
464 write(_('skipping %s - no matching guards\n') %
464 write(_('skipping %s - no matching guards\n') %
465 self.series[idx])
465 self.series[idx])
466
466
467 def save_dirty(self):
467 def save_dirty(self):
468 def write_list(items, path):
468 def write_list(items, path):
469 fp = self.opener(path, 'w')
469 fp = self.opener(path, 'w')
470 for i in items:
470 for i in items:
471 fp.write("%s\n" % i)
471 fp.write("%s\n" % i)
472 fp.close()
472 fp.close()
473 if self.applied_dirty:
473 if self.applied_dirty:
474 write_list(map(str, self.applied), self.status_path)
474 write_list(map(str, self.applied), self.status_path)
475 if self.series_dirty:
475 if self.series_dirty:
476 write_list(self.full_series, self.series_path)
476 write_list(self.full_series, self.series_path)
477 if self.guards_dirty:
477 if self.guards_dirty:
478 write_list(self.active_guards, self.guards_path)
478 write_list(self.active_guards, self.guards_path)
479
479
480 def removeundo(self, repo):
480 def removeundo(self, repo):
481 undo = repo.sjoin('undo')
481 undo = repo.sjoin('undo')
482 if not os.path.exists(undo):
482 if not os.path.exists(undo):
483 return
483 return
484 try:
484 try:
485 os.unlink(undo)
485 os.unlink(undo)
486 except OSError, inst:
486 except OSError, inst:
487 self.ui.warn(_('error removing undo: %s\n') % str(inst))
487 self.ui.warn(_('error removing undo: %s\n') % str(inst))
488
488
489 def printdiff(self, repo, diffopts, node1, node2=None, files=None,
489 def printdiff(self, repo, diffopts, node1, node2=None, files=None,
490 fp=None, changes=None, opts={}):
490 fp=None, changes=None, opts={}):
491 stat = opts.get('stat')
491 stat = opts.get('stat')
492 if stat:
492 if stat:
493 opts['unified'] = '0'
493 opts['unified'] = '0'
494
494
495 m = cmdutil.match(repo, files, opts)
495 m = cmdutil.match(repo, files, opts)
496 chunks = patch.diff(repo, node1, node2, m, changes, diffopts)
496 chunks = patch.diff(repo, node1, node2, m, changes, diffopts)
497 write = fp is None and repo.ui.write or fp.write
497 write = fp is None and repo.ui.write or fp.write
498 if stat:
498 if stat:
499 width = self.ui.interactive() and util.termwidth() or 80
499 width = self.ui.interactive() and util.termwidth() or 80
500 write(patch.diffstat(util.iterlines(chunks), width=width,
500 write(patch.diffstat(util.iterlines(chunks), width=width,
501 git=diffopts.git))
501 git=diffopts.git))
502 else:
502 else:
503 for chunk in chunks:
503 for chunk in chunks:
504 write(chunk)
504 write(chunk)
505
505
506 def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
506 def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
507 # first try just applying the patch
507 # first try just applying the patch
508 (err, n) = self.apply(repo, [patch], update_status=False,
508 (err, n) = self.apply(repo, [patch], update_status=False,
509 strict=True, merge=rev)
509 strict=True, merge=rev)
510
510
511 if err == 0:
511 if err == 0:
512 return (err, n)
512 return (err, n)
513
513
514 if n is None:
514 if n is None:
515 raise util.Abort(_("apply failed for patch %s") % patch)
515 raise util.Abort(_("apply failed for patch %s") % patch)
516
516
517 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
517 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
518
518
519 # apply failed, strip away that rev and merge.
519 # apply failed, strip away that rev and merge.
520 hg.clean(repo, head)
520 hg.clean(repo, head)
521 self.strip(repo, n, update=False, backup='strip')
521 self.strip(repo, n, update=False, backup='strip')
522
522
523 ctx = repo[rev]
523 ctx = repo[rev]
524 ret = hg.merge(repo, rev)
524 ret = hg.merge(repo, rev)
525 if ret:
525 if ret:
526 raise util.Abort(_("update returned %d") % ret)
526 raise util.Abort(_("update returned %d") % ret)
527 n = repo.commit(ctx.description(), ctx.user(), force=True)
527 n = repo.commit(ctx.description(), ctx.user(), force=True)
528 if n is None:
528 if n is None:
529 raise util.Abort(_("repo commit failed"))
529 raise util.Abort(_("repo commit failed"))
530 try:
530 try:
531 ph = patchheader(mergeq.join(patch), self.plainmode)
531 ph = patchheader(mergeq.join(patch), self.plainmode)
532 except:
532 except:
533 raise util.Abort(_("unable to read %s") % patch)
533 raise util.Abort(_("unable to read %s") % patch)
534
534
535 diffopts = self.patchopts(diffopts, patch)
535 diffopts = self.patchopts(diffopts, patch)
536 patchf = self.opener(patch, "w")
536 patchf = self.opener(patch, "w")
537 comments = str(ph)
537 comments = str(ph)
538 if comments:
538 if comments:
539 patchf.write(comments)
539 patchf.write(comments)
540 self.printdiff(repo, diffopts, head, n, fp=patchf)
540 self.printdiff(repo, diffopts, head, n, fp=patchf)
541 patchf.close()
541 patchf.close()
542 self.removeundo(repo)
542 self.removeundo(repo)
543 return (0, n)
543 return (0, n)
544
544
545 def qparents(self, repo, rev=None):
545 def qparents(self, repo, rev=None):
546 if rev is None:
546 if rev is None:
547 (p1, p2) = repo.dirstate.parents()
547 (p1, p2) = repo.dirstate.parents()
548 if p2 == nullid:
548 if p2 == nullid:
549 return p1
549 return p1
550 if len(self.applied) == 0:
550 if len(self.applied) == 0:
551 return None
551 return None
552 return bin(self.applied[-1].rev)
552 return bin(self.applied[-1].rev)
553 pp = repo.changelog.parents(rev)
553 pp = repo.changelog.parents(rev)
554 if pp[1] != nullid:
554 if pp[1] != nullid:
555 arevs = [x.rev for x in self.applied]
555 arevs = [x.rev for x in self.applied]
556 p0 = hex(pp[0])
556 p0 = hex(pp[0])
557 p1 = hex(pp[1])
557 p1 = hex(pp[1])
558 if p0 in arevs:
558 if p0 in arevs:
559 return pp[0]
559 return pp[0]
560 if p1 in arevs:
560 if p1 in arevs:
561 return pp[1]
561 return pp[1]
562 return pp[0]
562 return pp[0]
563
563
564 def mergepatch(self, repo, mergeq, series, diffopts):
564 def mergepatch(self, repo, mergeq, series, diffopts):
565 if len(self.applied) == 0:
565 if len(self.applied) == 0:
566 # each of the patches merged in will have two parents. This
566 # each of the patches merged in will have two parents. This
567 # can confuse the qrefresh, qdiff, and strip code because it
567 # can confuse the qrefresh, qdiff, and strip code because it
568 # needs to know which parent is actually in the patch queue.
568 # needs to know which parent is actually in the patch queue.
569 # so, we insert a merge marker with only one parent. This way
569 # so, we insert a merge marker with only one parent. This way
570 # the first patch in the queue is never a merge patch
570 # the first patch in the queue is never a merge patch
571 #
571 #
572 pname = ".hg.patches.merge.marker"
572 pname = ".hg.patches.merge.marker"
573 n = repo.commit('[mq]: merge marker', force=True)
573 n = repo.commit('[mq]: merge marker', force=True)
574 self.removeundo(repo)
574 self.removeundo(repo)
575 self.applied.append(statusentry(hex(n), pname))
575 self.applied.append(statusentry(hex(n), pname))
576 self.applied_dirty = 1
576 self.applied_dirty = 1
577
577
578 head = self.qparents(repo)
578 head = self.qparents(repo)
579
579
580 for patch in series:
580 for patch in series:
581 patch = mergeq.lookup(patch, strict=True)
581 patch = mergeq.lookup(patch, strict=True)
582 if not patch:
582 if not patch:
583 self.ui.warn(_("patch %s does not exist\n") % patch)
583 self.ui.warn(_("patch %s does not exist\n") % patch)
584 return (1, None)
584 return (1, None)
585 pushable, reason = self.pushable(patch)
585 pushable, reason = self.pushable(patch)
586 if not pushable:
586 if not pushable:
587 self.explain_pushable(patch, all_patches=True)
587 self.explain_pushable(patch, all_patches=True)
588 continue
588 continue
589 info = mergeq.isapplied(patch)
589 info = mergeq.isapplied(patch)
590 if not info:
590 if not info:
591 self.ui.warn(_("patch %s is not applied\n") % patch)
591 self.ui.warn(_("patch %s is not applied\n") % patch)
592 return (1, None)
592 return (1, None)
593 rev = bin(info[1])
593 rev = bin(info[1])
594 err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
594 err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
595 if head:
595 if head:
596 self.applied.append(statusentry(hex(head), patch))
596 self.applied.append(statusentry(hex(head), patch))
597 self.applied_dirty = 1
597 self.applied_dirty = 1
598 if err:
598 if err:
599 return (err, head)
599 return (err, head)
600 self.save_dirty()
600 self.save_dirty()
601 return (0, head)
601 return (0, head)
602
602
603 def patch(self, repo, patchfile):
603 def patch(self, repo, patchfile):
604 '''Apply patchfile to the working directory.
604 '''Apply patchfile to the working directory.
605 patchfile: name of patch file'''
605 patchfile: name of patch file'''
606 files = {}
606 files = {}
607 try:
607 try:
608 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
608 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
609 files=files, eolmode=None)
609 files=files, eolmode=None)
610 except Exception, inst:
610 except Exception, inst:
611 self.ui.note(str(inst) + '\n')
611 self.ui.note(str(inst) + '\n')
612 if not self.ui.verbose:
612 if not self.ui.verbose:
613 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
613 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
614 return (False, files, False)
614 return (False, files, False)
615
615
616 return (True, files, fuzz)
616 return (True, files, fuzz)
617
617
618 def apply(self, repo, series, list=False, update_status=True,
618 def apply(self, repo, series, list=False, update_status=True,
619 strict=False, patchdir=None, merge=None, all_files={}):
619 strict=False, patchdir=None, merge=None, all_files={}):
620 wlock = lock = tr = None
620 wlock = lock = tr = None
621 try:
621 try:
622 wlock = repo.wlock()
622 wlock = repo.wlock()
623 lock = repo.lock()
623 lock = repo.lock()
624 tr = repo.transaction()
624 tr = repo.transaction()
625 try:
625 try:
626 ret = self._apply(repo, series, list, update_status,
626 ret = self._apply(repo, series, list, update_status,
627 strict, patchdir, merge, all_files=all_files)
627 strict, patchdir, merge, all_files=all_files)
628 tr.close()
628 tr.close()
629 self.save_dirty()
629 self.save_dirty()
630 return ret
630 return ret
631 except:
631 except:
632 try:
632 try:
633 tr.abort()
633 tr.abort()
634 finally:
634 finally:
635 repo.invalidate()
635 repo.invalidate()
636 repo.dirstate.invalidate()
636 repo.dirstate.invalidate()
637 raise
637 raise
638 finally:
638 finally:
639 del tr
639 del tr
640 release(lock, wlock)
640 release(lock, wlock)
641 self.removeundo(repo)
641 self.removeundo(repo)
642
642
643 def _apply(self, repo, series, list=False, update_status=True,
643 def _apply(self, repo, series, list=False, update_status=True,
644 strict=False, patchdir=None, merge=None, all_files={}):
644 strict=False, patchdir=None, merge=None, all_files={}):
645 '''returns (error, hash)
645 '''returns (error, hash)
646 error = 1 for unable to read, 2 for patch failed, 3 for patch fuzz'''
646 error = 1 for unable to read, 2 for patch failed, 3 for patch fuzz'''
647 # TODO unify with commands.py
647 # TODO unify with commands.py
648 if not patchdir:
648 if not patchdir:
649 patchdir = self.path
649 patchdir = self.path
650 err = 0
650 err = 0
651 n = None
651 n = None
652 for patchname in series:
652 for patchname in series:
653 pushable, reason = self.pushable(patchname)
653 pushable, reason = self.pushable(patchname)
654 if not pushable:
654 if not pushable:
655 self.explain_pushable(patchname, all_patches=True)
655 self.explain_pushable(patchname, all_patches=True)
656 continue
656 continue
657 self.ui.status(_("applying %s\n") % patchname)
657 self.ui.status(_("applying %s\n") % patchname)
658 pf = os.path.join(patchdir, patchname)
658 pf = os.path.join(patchdir, patchname)
659
659
660 try:
660 try:
661 ph = patchheader(self.join(patchname), self.plainmode)
661 ph = patchheader(self.join(patchname), self.plainmode)
662 except:
662 except:
663 self.ui.warn(_("unable to read %s\n") % patchname)
663 self.ui.warn(_("unable to read %s\n") % patchname)
664 err = 1
664 err = 1
665 break
665 break
666
666
667 message = ph.message
667 message = ph.message
668 if not message:
668 if not message:
669 message = "imported patch %s\n" % patchname
669 message = "imported patch %s\n" % patchname
670 else:
670 else:
671 if list:
671 if list:
672 message.append("\nimported patch %s" % patchname)
672 message.append("\nimported patch %s" % patchname)
673 message = '\n'.join(message)
673 message = '\n'.join(message)
674
674
675 if ph.haspatch:
675 if ph.haspatch:
676 (patcherr, files, fuzz) = self.patch(repo, pf)
676 (patcherr, files, fuzz) = self.patch(repo, pf)
677 all_files.update(files)
677 all_files.update(files)
678 patcherr = not patcherr
678 patcherr = not patcherr
679 else:
679 else:
680 self.ui.warn(_("patch %s is empty\n") % patchname)
680 self.ui.warn(_("patch %s is empty\n") % patchname)
681 patcherr, files, fuzz = 0, [], 0
681 patcherr, files, fuzz = 0, [], 0
682
682
683 if merge and files:
683 if merge and files:
684 # Mark as removed/merged and update dirstate parent info
684 # Mark as removed/merged and update dirstate parent info
685 removed = []
685 removed = []
686 merged = []
686 merged = []
687 for f in files:
687 for f in files:
688 if os.path.exists(repo.wjoin(f)):
688 if os.path.exists(repo.wjoin(f)):
689 merged.append(f)
689 merged.append(f)
690 else:
690 else:
691 removed.append(f)
691 removed.append(f)
692 for f in removed:
692 for f in removed:
693 repo.dirstate.remove(f)
693 repo.dirstate.remove(f)
694 for f in merged:
694 for f in merged:
695 repo.dirstate.merge(f)
695 repo.dirstate.merge(f)
696 p1, p2 = repo.dirstate.parents()
696 p1, p2 = repo.dirstate.parents()
697 repo.dirstate.setparents(p1, merge)
697 repo.dirstate.setparents(p1, merge)
698
698
699 files = patch.updatedir(self.ui, repo, files)
699 files = patch.updatedir(self.ui, repo, files)
700 match = cmdutil.matchfiles(repo, files or [])
700 match = cmdutil.matchfiles(repo, files or [])
701 n = repo.commit(message, ph.user, ph.date, match=match, force=True)
701 n = repo.commit(message, ph.user, ph.date, match=match, force=True)
702
702
703 if n is None:
703 if n is None:
704 raise util.Abort(_("repo commit failed"))
704 raise util.Abort(_("repo commit failed"))
705
705
706 if update_status:
706 if update_status:
707 self.applied.append(statusentry(hex(n), patchname))
707 self.applied.append(statusentry(hex(n), patchname))
708
708
709 if patcherr:
709 if patcherr:
710 self.ui.warn(_("patch failed, rejects left in working dir\n"))
710 self.ui.warn(_("patch failed, rejects left in working dir\n"))
711 err = 2
711 err = 2
712 break
712 break
713
713
714 if fuzz and strict:
714 if fuzz and strict:
715 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
715 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
716 err = 3
716 err = 3
717 break
717 break
718 return (err, n)
718 return (err, n)
719
719
720 def _cleanup(self, patches, numrevs, keep=False):
720 def _cleanup(self, patches, numrevs, keep=False):
721 if not keep:
721 if not keep:
722 r = self.qrepo()
722 r = self.qrepo()
723 if r:
723 if r:
724 r.remove(patches, True)
724 r.remove(patches, True)
725 else:
725 else:
726 for p in patches:
726 for p in patches:
727 os.unlink(self.join(p))
727 os.unlink(self.join(p))
728
728
729 if numrevs:
729 if numrevs:
730 del self.applied[:numrevs]
730 del self.applied[:numrevs]
731 self.applied_dirty = 1
731 self.applied_dirty = 1
732
732
733 for i in sorted([self.find_series(p) for p in patches], reverse=True):
733 for i in sorted([self.find_series(p) for p in patches], reverse=True):
734 del self.full_series[i]
734 del self.full_series[i]
735 self.parse_series()
735 self.parse_series()
736 self.series_dirty = 1
736 self.series_dirty = 1
737
737
738 def _revpatches(self, repo, revs):
738 def _revpatches(self, repo, revs):
739 firstrev = repo[self.applied[0].rev].rev()
739 firstrev = repo[self.applied[0].rev].rev()
740 patches = []
740 patches = []
741 for i, rev in enumerate(revs):
741 for i, rev in enumerate(revs):
742
742
743 if rev < firstrev:
743 if rev < firstrev:
744 raise util.Abort(_('revision %d is not managed') % rev)
744 raise util.Abort(_('revision %d is not managed') % rev)
745
745
746 ctx = repo[rev]
746 ctx = repo[rev]
747 base = bin(self.applied[i].rev)
747 base = bin(self.applied[i].rev)
748 if ctx.node() != base:
748 if ctx.node() != base:
749 msg = _('cannot delete revision %d above applied patches')
749 msg = _('cannot delete revision %d above applied patches')
750 raise util.Abort(msg % rev)
750 raise util.Abort(msg % rev)
751
751
752 patch = self.applied[i].name
752 patch = self.applied[i].name
753 for fmt in ('[mq]: %s', 'imported patch %s'):
753 for fmt in ('[mq]: %s', 'imported patch %s'):
754 if ctx.description() == fmt % patch:
754 if ctx.description() == fmt % patch:
755 msg = _('patch %s finalized without changeset message\n')
755 msg = _('patch %s finalized without changeset message\n')
756 repo.ui.status(msg % patch)
756 repo.ui.status(msg % patch)
757 break
757 break
758
758
759 patches.append(patch)
759 patches.append(patch)
760 return patches
760 return patches
761
761
762 def finish(self, repo, revs):
762 def finish(self, repo, revs):
763 patches = self._revpatches(repo, sorted(revs))
763 patches = self._revpatches(repo, sorted(revs))
764 self._cleanup(patches, len(patches))
764 self._cleanup(patches, len(patches))
765
765
766 def delete(self, repo, patches, opts):
766 def delete(self, repo, patches, opts):
767 if not patches and not opts.get('rev'):
767 if not patches and not opts.get('rev'):
768 raise util.Abort(_('qdelete requires at least one revision or '
768 raise util.Abort(_('qdelete requires at least one revision or '
769 'patch name'))
769 'patch name'))
770
770
771 realpatches = []
771 realpatches = []
772 for patch in patches:
772 for patch in patches:
773 patch = self.lookup(patch, strict=True)
773 patch = self.lookup(patch, strict=True)
774 info = self.isapplied(patch)
774 info = self.isapplied(patch)
775 if info:
775 if info:
776 raise util.Abort(_("cannot delete applied patch %s") % patch)
776 raise util.Abort(_("cannot delete applied patch %s") % patch)
777 if patch not in self.series:
777 if patch not in self.series:
778 raise util.Abort(_("patch %s not in series file") % patch)
778 raise util.Abort(_("patch %s not in series file") % patch)
779 realpatches.append(patch)
779 realpatches.append(patch)
780
780
781 numrevs = 0
781 numrevs = 0
782 if opts.get('rev'):
782 if opts.get('rev'):
783 if not self.applied:
783 if not self.applied:
784 raise util.Abort(_('no patches applied'))
784 raise util.Abort(_('no patches applied'))
785 revs = cmdutil.revrange(repo, opts['rev'])
785 revs = cmdutil.revrange(repo, opts['rev'])
786 if len(revs) > 1 and revs[0] > revs[1]:
786 if len(revs) > 1 and revs[0] > revs[1]:
787 revs.reverse()
787 revs.reverse()
788 revpatches = self._revpatches(repo, revs)
788 revpatches = self._revpatches(repo, revs)
789 realpatches += revpatches
789 realpatches += revpatches
790 numrevs = len(revpatches)
790 numrevs = len(revpatches)
791
791
792 self._cleanup(realpatches, numrevs, opts.get('keep'))
792 self._cleanup(realpatches, numrevs, opts.get('keep'))
793
793
794 def check_toppatch(self, repo):
794 def check_toppatch(self, repo):
795 if len(self.applied) > 0:
795 if len(self.applied) > 0:
796 top = bin(self.applied[-1].rev)
796 top = bin(self.applied[-1].rev)
797 patch = self.applied[-1].name
797 patch = self.applied[-1].name
798 pp = repo.dirstate.parents()
798 pp = repo.dirstate.parents()
799 if top not in pp:
799 if top not in pp:
800 raise util.Abort(_("working directory revision is not qtip"))
800 raise util.Abort(_("working directory revision is not qtip"))
801 return top, patch
801 return top, patch
802 return None, None
802 return None, None
803
803
804 def check_localchanges(self, repo, force=False, refresh=True):
804 def check_localchanges(self, repo, force=False, refresh=True):
805 m, a, r, d = repo.status()[:4]
805 m, a, r, d = repo.status()[:4]
806 if (m or a or r or d) and not force:
806 if (m or a or r or d) and not force:
807 if refresh:
807 if refresh:
808 raise util.Abort(_("local changes found, refresh first"))
808 raise util.Abort(_("local changes found, refresh first"))
809 else:
809 else:
810 raise util.Abort(_("local changes found"))
810 raise util.Abort(_("local changes found"))
811 return m, a, r, d
811 return m, a, r, d
812
812
813 _reserved = ('series', 'status', 'guards')
813 _reserved = ('series', 'status', 'guards')
814 def check_reserved_name(self, name):
814 def check_reserved_name(self, name):
815 if (name in self._reserved or name.startswith('.hg')
815 if (name in self._reserved or name.startswith('.hg')
816 or name.startswith('.mq')):
816 or name.startswith('.mq')):
817 raise util.Abort(_('"%s" cannot be used as the name of a patch')
817 raise util.Abort(_('"%s" cannot be used as the name of a patch')
818 % name)
818 % name)
819
819
820 def new(self, repo, patchfn, *pats, **opts):
820 def new(self, repo, patchfn, *pats, **opts):
821 """options:
821 """options:
822 msg: a string or a no-argument function returning a string
822 msg: a string or a no-argument function returning a string
823 """
823 """
824 msg = opts.get('msg')
824 msg = opts.get('msg')
825 user = opts.get('user')
825 user = opts.get('user')
826 date = opts.get('date')
826 date = opts.get('date')
827 if date:
827 if date:
828 date = util.parsedate(date)
828 date = util.parsedate(date)
829 diffopts = self.diffopts({'git': opts.get('git')})
829 diffopts = self.diffopts({'git': opts.get('git')})
830 self.check_reserved_name(patchfn)
830 self.check_reserved_name(patchfn)
831 if os.path.exists(self.join(patchfn)):
831 if os.path.exists(self.join(patchfn)):
832 raise util.Abort(_('patch "%s" already exists') % patchfn)
832 raise util.Abort(_('patch "%s" already exists') % patchfn)
833 if opts.get('include') or opts.get('exclude') or pats:
833 if opts.get('include') or opts.get('exclude') or pats:
834 match = cmdutil.match(repo, pats, opts)
834 match = cmdutil.match(repo, pats, opts)
835 # detect missing files in pats
835 # detect missing files in pats
836 def badfn(f, msg):
836 def badfn(f, msg):
837 raise util.Abort('%s: %s' % (f, msg))
837 raise util.Abort('%s: %s' % (f, msg))
838 match.bad = badfn
838 match.bad = badfn
839 m, a, r, d = repo.status(match=match)[:4]
839 m, a, r, d = repo.status(match=match)[:4]
840 else:
840 else:
841 m, a, r, d = self.check_localchanges(repo, force=True)
841 m, a, r, d = self.check_localchanges(repo, force=True)
842 match = cmdutil.matchfiles(repo, m + a + r)
842 match = cmdutil.matchfiles(repo, m + a + r)
843 if len(repo[None].parents()) > 1:
843 if len(repo[None].parents()) > 1:
844 raise util.Abort(_('cannot manage merge changesets'))
844 raise util.Abort(_('cannot manage merge changesets'))
845 commitfiles = m + a + r
845 commitfiles = m + a + r
846 self.check_toppatch(repo)
846 self.check_toppatch(repo)
847 insert = self.full_series_end()
847 insert = self.full_series_end()
848 wlock = repo.wlock()
848 wlock = repo.wlock()
849 try:
849 try:
850 # if patch file write fails, abort early
850 # if patch file write fails, abort early
851 p = self.opener(patchfn, "w")
851 p = self.opener(patchfn, "w")
852 try:
852 try:
853 if self.plainmode:
853 if self.plainmode:
854 if user:
854 if user:
855 p.write("From: " + user + "\n")
855 p.write("From: " + user + "\n")
856 if not date:
856 if not date:
857 p.write("\n")
857 p.write("\n")
858 if date:
858 if date:
859 p.write("Date: %d %d\n\n" % date)
859 p.write("Date: %d %d\n\n" % date)
860 else:
860 else:
861 p.write("# HG changeset patch\n")
861 p.write("# HG changeset patch\n")
862 p.write("# Parent "
862 p.write("# Parent "
863 + hex(repo[None].parents()[0].node()) + "\n")
863 + hex(repo[None].parents()[0].node()) + "\n")
864 if user:
864 if user:
865 p.write("# User " + user + "\n")
865 p.write("# User " + user + "\n")
866 if date:
866 if date:
867 p.write("# Date %s %s\n\n" % date)
867 p.write("# Date %s %s\n\n" % date)
868 if hasattr(msg, '__call__'):
868 if hasattr(msg, '__call__'):
869 msg = msg()
869 msg = msg()
870 commitmsg = msg and msg or ("[mq]: %s" % patchfn)
870 commitmsg = msg and msg or ("[mq]: %s" % patchfn)
871 n = repo.commit(commitmsg, user, date, match=match, force=True)
871 n = repo.commit(commitmsg, user, date, match=match, force=True)
872 if n is None:
872 if n is None:
873 raise util.Abort(_("repo commit failed"))
873 raise util.Abort(_("repo commit failed"))
874 try:
874 try:
875 self.full_series[insert:insert] = [patchfn]
875 self.full_series[insert:insert] = [patchfn]
876 self.applied.append(statusentry(hex(n), patchfn))
876 self.applied.append(statusentry(hex(n), patchfn))
877 self.parse_series()
877 self.parse_series()
878 self.series_dirty = 1
878 self.series_dirty = 1
879 self.applied_dirty = 1
879 self.applied_dirty = 1
880 if msg:
880 if msg:
881 msg = msg + "\n\n"
881 msg = msg + "\n\n"
882 p.write(msg)
882 p.write(msg)
883 if commitfiles:
883 if commitfiles:
884 parent = self.qparents(repo, n)
884 parent = self.qparents(repo, n)
885 chunks = patch.diff(repo, node1=parent, node2=n,
885 chunks = patch.diff(repo, node1=parent, node2=n,
886 match=match, opts=diffopts)
886 match=match, opts=diffopts)
887 for chunk in chunks:
887 for chunk in chunks:
888 p.write(chunk)
888 p.write(chunk)
889 p.close()
889 p.close()
890 wlock.release()
890 wlock.release()
891 wlock = None
891 wlock = None
892 r = self.qrepo()
892 r = self.qrepo()
893 if r:
893 if r:
894 r.add([patchfn])
894 r.add([patchfn])
895 except:
895 except:
896 repo.rollback()
896 repo.rollback()
897 raise
897 raise
898 except Exception:
898 except Exception:
899 patchpath = self.join(patchfn)
899 patchpath = self.join(patchfn)
900 try:
900 try:
901 os.unlink(patchpath)
901 os.unlink(patchpath)
902 except:
902 except:
903 self.ui.warn(_('error unlinking %s\n') % patchpath)
903 self.ui.warn(_('error unlinking %s\n') % patchpath)
904 raise
904 raise
905 self.removeundo(repo)
905 self.removeundo(repo)
906 finally:
906 finally:
907 release(wlock)
907 release(wlock)
908
908
909 def strip(self, repo, rev, update=True, backup="all", force=None):
909 def strip(self, repo, rev, update=True, backup="all", force=None):
910 wlock = lock = None
910 wlock = lock = None
911 try:
911 try:
912 wlock = repo.wlock()
912 wlock = repo.wlock()
913 lock = repo.lock()
913 lock = repo.lock()
914
914
915 if update:
915 if update:
916 self.check_localchanges(repo, force=force, refresh=False)
916 self.check_localchanges(repo, force=force, refresh=False)
917 urev = self.qparents(repo, rev)
917 urev = self.qparents(repo, rev)
918 hg.clean(repo, urev)
918 hg.clean(repo, urev)
919 repo.dirstate.write()
919 repo.dirstate.write()
920
920
921 self.removeundo(repo)
921 self.removeundo(repo)
922 repair.strip(self.ui, repo, rev, backup)
922 repair.strip(self.ui, repo, rev, backup)
923 # strip may have unbundled a set of backed up revisions after
923 # strip may have unbundled a set of backed up revisions after
924 # the actual strip
924 # the actual strip
925 self.removeundo(repo)
925 self.removeundo(repo)
926 finally:
926 finally:
927 release(lock, wlock)
927 release(lock, wlock)
928
928
929 def isapplied(self, patch):
929 def isapplied(self, patch):
930 """returns (index, rev, patch)"""
930 """returns (index, rev, patch)"""
931 for i, a in enumerate(self.applied):
931 for i, a in enumerate(self.applied):
932 if a.name == patch:
932 if a.name == patch:
933 return (i, a.rev, a.name)
933 return (i, a.rev, a.name)
934 return None
934 return None
935
935
936 # if the exact patch name does not exist, we try a few
936 # if the exact patch name does not exist, we try a few
937 # variations. If strict is passed, we try only #1
937 # variations. If strict is passed, we try only #1
938 #
938 #
939 # 1) a number to indicate an offset in the series file
939 # 1) a number to indicate an offset in the series file
940 # 2) a unique substring of the patch name was given
940 # 2) a unique substring of the patch name was given
941 # 3) patchname[-+]num to indicate an offset in the series file
941 # 3) patchname[-+]num to indicate an offset in the series file
942 def lookup(self, patch, strict=False):
942 def lookup(self, patch, strict=False):
943 patch = patch and str(patch)
943 patch = patch and str(patch)
944
944
945 def partial_name(s):
945 def partial_name(s):
946 if s in self.series:
946 if s in self.series:
947 return s
947 return s
948 matches = [x for x in self.series if s in x]
948 matches = [x for x in self.series if s in x]
949 if len(matches) > 1:
949 if len(matches) > 1:
950 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
950 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
951 for m in matches:
951 for m in matches:
952 self.ui.warn(' %s\n' % m)
952 self.ui.warn(' %s\n' % m)
953 return None
953 return None
954 if matches:
954 if matches:
955 return matches[0]
955 return matches[0]
956 if len(self.series) > 0 and len(self.applied) > 0:
956 if len(self.series) > 0 and len(self.applied) > 0:
957 if s == 'qtip':
957 if s == 'qtip':
958 return self.series[self.series_end(True)-1]
958 return self.series[self.series_end(True)-1]
959 if s == 'qbase':
959 if s == 'qbase':
960 return self.series[0]
960 return self.series[0]
961 return None
961 return None
962
962
963 if patch is None:
963 if patch is None:
964 return None
964 return None
965 if patch in self.series:
965 if patch in self.series:
966 return patch
966 return patch
967
967
968 if not os.path.isfile(self.join(patch)):
968 if not os.path.isfile(self.join(patch)):
969 try:
969 try:
970 sno = int(patch)
970 sno = int(patch)
971 except (ValueError, OverflowError):
971 except (ValueError, OverflowError):
972 pass
972 pass
973 else:
973 else:
974 if -len(self.series) <= sno < len(self.series):
974 if -len(self.series) <= sno < len(self.series):
975 return self.series[sno]
975 return self.series[sno]
976
976
977 if not strict:
977 if not strict:
978 res = partial_name(patch)
978 res = partial_name(patch)
979 if res:
979 if res:
980 return res
980 return res
981 minus = patch.rfind('-')
981 minus = patch.rfind('-')
982 if minus >= 0:
982 if minus >= 0:
983 res = partial_name(patch[:minus])
983 res = partial_name(patch[:minus])
984 if res:
984 if res:
985 i = self.series.index(res)
985 i = self.series.index(res)
986 try:
986 try:
987 off = int(patch[minus + 1:] or 1)
987 off = int(patch[minus + 1:] or 1)
988 except (ValueError, OverflowError):
988 except (ValueError, OverflowError):
989 pass
989 pass
990 else:
990 else:
991 if i - off >= 0:
991 if i - off >= 0:
992 return self.series[i - off]
992 return self.series[i - off]
993 plus = patch.rfind('+')
993 plus = patch.rfind('+')
994 if plus >= 0:
994 if plus >= 0:
995 res = partial_name(patch[:plus])
995 res = partial_name(patch[:plus])
996 if res:
996 if res:
997 i = self.series.index(res)
997 i = self.series.index(res)
998 try:
998 try:
999 off = int(patch[plus + 1:] or 1)
999 off = int(patch[plus + 1:] or 1)
1000 except (ValueError, OverflowError):
1000 except (ValueError, OverflowError):
1001 pass
1001 pass
1002 else:
1002 else:
1003 if i + off < len(self.series):
1003 if i + off < len(self.series):
1004 return self.series[i + off]
1004 return self.series[i + off]
1005 raise util.Abort(_("patch %s not in series") % patch)
1005 raise util.Abort(_("patch %s not in series") % patch)
1006
1006
1007 def push(self, repo, patch=None, force=False, list=False,
1007 def push(self, repo, patch=None, force=False, list=False,
1008 mergeq=None, all=False):
1008 mergeq=None, all=False):
1009 diffopts = self.diffopts()
1009 diffopts = self.diffopts()
1010 wlock = repo.wlock()
1010 wlock = repo.wlock()
1011 try:
1011 try:
1012 heads = []
1012 heads = []
1013 for b, ls in repo.branchmap().iteritems():
1013 for b, ls in repo.branchmap().iteritems():
1014 heads += ls
1014 heads += ls
1015 if not heads:
1015 if not heads:
1016 heads = [nullid]
1016 heads = [nullid]
1017 if repo.dirstate.parents()[0] not in heads:
1017 if repo.dirstate.parents()[0] not in heads:
1018 self.ui.status(_("(working directory not at a head)\n"))
1018 self.ui.status(_("(working directory not at a head)\n"))
1019
1019
1020 if not self.series:
1020 if not self.series:
1021 self.ui.warn(_('no patches in series\n'))
1021 self.ui.warn(_('no patches in series\n'))
1022 return 0
1022 return 0
1023
1023
1024 patch = self.lookup(patch)
1024 patch = self.lookup(patch)
1025 # Suppose our series file is: A B C and the current 'top'
1025 # Suppose our series file is: A B C and the current 'top'
1026 # patch is B. qpush C should be performed (moving forward)
1026 # patch is B. qpush C should be performed (moving forward)
1027 # qpush B is a NOP (no change) qpush A is an error (can't
1027 # qpush B is a NOP (no change) qpush A is an error (can't
1028 # go backwards with qpush)
1028 # go backwards with qpush)
1029 if patch:
1029 if patch:
1030 info = self.isapplied(patch)
1030 info = self.isapplied(patch)
1031 if info:
1031 if info:
1032 if info[0] < len(self.applied) - 1:
1032 if info[0] < len(self.applied) - 1:
1033 raise util.Abort(
1033 raise util.Abort(
1034 _("cannot push to a previous patch: %s") % patch)
1034 _("cannot push to a previous patch: %s") % patch)
1035 self.ui.warn(
1035 self.ui.warn(
1036 _('qpush: %s is already at the top\n') % patch)
1036 _('qpush: %s is already at the top\n') % patch)
1037 return
1037 return
1038 pushable, reason = self.pushable(patch)
1038 pushable, reason = self.pushable(patch)
1039 if not pushable:
1039 if not pushable:
1040 if reason:
1040 if reason:
1041 reason = _('guarded by %r') % reason
1041 reason = _('guarded by %r') % reason
1042 else:
1042 else:
1043 reason = _('no matching guards')
1043 reason = _('no matching guards')
1044 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
1044 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
1045 return 1
1045 return 1
1046 elif all:
1046 elif all:
1047 patch = self.series[-1]
1047 patch = self.series[-1]
1048 if self.isapplied(patch):
1048 if self.isapplied(patch):
1049 self.ui.warn(_('all patches are currently applied\n'))
1049 self.ui.warn(_('all patches are currently applied\n'))
1050 return 0
1050 return 0
1051
1051
1052 # Following the above example, starting at 'top' of B:
1052 # Following the above example, starting at 'top' of B:
1053 # qpush should be performed (pushes C), but a subsequent
1053 # qpush should be performed (pushes C), but a subsequent
1054 # qpush without an argument is an error (nothing to
1054 # qpush without an argument is an error (nothing to
1055 # apply). This allows a loop of "...while hg qpush..." to
1055 # apply). This allows a loop of "...while hg qpush..." to
1056 # work as it detects an error when done
1056 # work as it detects an error when done
1057 start = self.series_end()
1057 start = self.series_end()
1058 if start == len(self.series):
1058 if start == len(self.series):
1059 self.ui.warn(_('patch series already fully applied\n'))
1059 self.ui.warn(_('patch series already fully applied\n'))
1060 return 1
1060 return 1
1061 if not force:
1061 if not force:
1062 self.check_localchanges(repo)
1062 self.check_localchanges(repo)
1063
1063
1064 self.applied_dirty = 1
1064 self.applied_dirty = 1
1065 if start > 0:
1065 if start > 0:
1066 self.check_toppatch(repo)
1066 self.check_toppatch(repo)
1067 if not patch:
1067 if not patch:
1068 patch = self.series[start]
1068 patch = self.series[start]
1069 end = start + 1
1069 end = start + 1
1070 else:
1070 else:
1071 end = self.series.index(patch, start) + 1
1071 end = self.series.index(patch, start) + 1
1072
1072
1073 s = self.series[start:end]
1073 s = self.series[start:end]
1074 all_files = {}
1074 all_files = {}
1075 try:
1075 try:
1076 if mergeq:
1076 if mergeq:
1077 ret = self.mergepatch(repo, mergeq, s, diffopts)
1077 ret = self.mergepatch(repo, mergeq, s, diffopts)
1078 else:
1078 else:
1079 ret = self.apply(repo, s, list, all_files=all_files)
1079 ret = self.apply(repo, s, list, all_files=all_files)
1080 except:
1080 except:
1081 self.ui.warn(_('cleaning up working directory...'))
1081 self.ui.warn(_('cleaning up working directory...'))
1082 node = repo.dirstate.parents()[0]
1082 node = repo.dirstate.parents()[0]
1083 hg.revert(repo, node, None)
1083 hg.revert(repo, node, None)
1084 unknown = repo.status(unknown=True)[4]
1084 unknown = repo.status(unknown=True)[4]
1085 # only remove unknown files that we know we touched or
1085 # only remove unknown files that we know we touched or
1086 # created while patching
1086 # created while patching
1087 for f in unknown:
1087 for f in unknown:
1088 if f in all_files:
1088 if f in all_files:
1089 util.unlink(repo.wjoin(f))
1089 util.unlink(repo.wjoin(f))
1090 self.ui.warn(_('done\n'))
1090 self.ui.warn(_('done\n'))
1091 raise
1091 raise
1092
1092
1093 if not self.applied:
1093 if not self.applied:
1094 return ret[0]
1094 return ret[0]
1095 top = self.applied[-1].name
1095 top = self.applied[-1].name
1096 if ret[0] and ret[0] > 1:
1096 if ret[0] and ret[0] > 1:
1097 msg = _("errors during apply, please fix and refresh %s\n")
1097 msg = _("errors during apply, please fix and refresh %s\n")
1098 self.ui.write(msg % top)
1098 self.ui.write(msg % top)
1099 else:
1099 else:
1100 self.ui.write(_("now at: %s\n") % top)
1100 self.ui.write(_("now at: %s\n") % top)
1101 return ret[0]
1101 return ret[0]
1102
1102
1103 finally:
1103 finally:
1104 wlock.release()
1104 wlock.release()
1105
1105
1106 def pop(self, repo, patch=None, force=False, update=True, all=False):
1106 def pop(self, repo, patch=None, force=False, update=True, all=False):
1107 def getfile(f, rev, flags):
1107 def getfile(f, rev, flags):
1108 t = repo.file(f).read(rev)
1108 t = repo.file(f).read(rev)
1109 repo.wwrite(f, t, flags)
1109 repo.wwrite(f, t, flags)
1110
1110
1111 wlock = repo.wlock()
1111 wlock = repo.wlock()
1112 try:
1112 try:
1113 if patch:
1113 if patch:
1114 # index, rev, patch
1114 # index, rev, patch
1115 info = self.isapplied(patch)
1115 info = self.isapplied(patch)
1116 if not info:
1116 if not info:
1117 patch = self.lookup(patch)
1117 patch = self.lookup(patch)
1118 info = self.isapplied(patch)
1118 info = self.isapplied(patch)
1119 if not info:
1119 if not info:
1120 raise util.Abort(_("patch %s is not applied") % patch)
1120 raise util.Abort(_("patch %s is not applied") % patch)
1121
1121
1122 if len(self.applied) == 0:
1122 if len(self.applied) == 0:
1123 # Allow qpop -a to work repeatedly,
1123 # Allow qpop -a to work repeatedly,
1124 # but not qpop without an argument
1124 # but not qpop without an argument
1125 self.ui.warn(_("no patches applied\n"))
1125 self.ui.warn(_("no patches applied\n"))
1126 return not all
1126 return not all
1127
1127
1128 if all:
1128 if all:
1129 start = 0
1129 start = 0
1130 elif patch:
1130 elif patch:
1131 start = info[0] + 1
1131 start = info[0] + 1
1132 else:
1132 else:
1133 start = len(self.applied) - 1
1133 start = len(self.applied) - 1
1134
1134
1135 if start >= len(self.applied):
1135 if start >= len(self.applied):
1136 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1136 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1137 return
1137 return
1138
1138
1139 if not update:
1139 if not update:
1140 parents = repo.dirstate.parents()
1140 parents = repo.dirstate.parents()
1141 rr = [bin(x.rev) for x in self.applied]
1141 rr = [bin(x.rev) for x in self.applied]
1142 for p in parents:
1142 for p in parents:
1143 if p in rr:
1143 if p in rr:
1144 self.ui.warn(_("qpop: forcing dirstate update\n"))
1144 self.ui.warn(_("qpop: forcing dirstate update\n"))
1145 update = True
1145 update = True
1146 else:
1146 else:
1147 parents = [p.hex() for p in repo[None].parents()]
1147 parents = [p.hex() for p in repo[None].parents()]
1148 needupdate = False
1148 needupdate = False
1149 for entry in self.applied[start:]:
1149 for entry in self.applied[start:]:
1150 if entry.rev in parents:
1150 if entry.rev in parents:
1151 needupdate = True
1151 needupdate = True
1152 break
1152 break
1153 update = needupdate
1153 update = needupdate
1154
1154
1155 if not force and update:
1155 if not force and update:
1156 self.check_localchanges(repo)
1156 self.check_localchanges(repo)
1157
1157
1158 self.applied_dirty = 1
1158 self.applied_dirty = 1
1159 end = len(self.applied)
1159 end = len(self.applied)
1160 rev = bin(self.applied[start].rev)
1160 rev = bin(self.applied[start].rev)
1161 if update:
1161 if update:
1162 top = self.check_toppatch(repo)[0]
1162 top = self.check_toppatch(repo)[0]
1163
1163
1164 try:
1164 try:
1165 heads = repo.changelog.heads(rev)
1165 heads = repo.changelog.heads(rev)
1166 except error.LookupError:
1166 except error.LookupError:
1167 node = short(rev)
1167 node = short(rev)
1168 raise util.Abort(_('trying to pop unknown node %s') % node)
1168 raise util.Abort(_('trying to pop unknown node %s') % node)
1169
1169
1170 if heads != [bin(self.applied[-1].rev)]:
1170 if heads != [bin(self.applied[-1].rev)]:
1171 raise util.Abort(_("popping would remove a revision not "
1171 raise util.Abort(_("popping would remove a revision not "
1172 "managed by this patch queue"))
1172 "managed by this patch queue"))
1173
1173
1174 # we know there are no local changes, so we can make a simplified
1174 # we know there are no local changes, so we can make a simplified
1175 # form of hg.update.
1175 # form of hg.update.
1176 if update:
1176 if update:
1177 qp = self.qparents(repo, rev)
1177 qp = self.qparents(repo, rev)
1178 changes = repo.changelog.read(qp)
1178 changes = repo.changelog.read(qp)
1179 mmap = repo.manifest.read(changes[0])
1179 mmap = repo.manifest.read(changes[0])
1180 m, a, r, d = repo.status(qp, top)[:4]
1180 m, a, r, d = repo.status(qp, top)[:4]
1181 if d:
1181 if d:
1182 raise util.Abort(_("deletions found between repo revs"))
1182 raise util.Abort(_("deletions found between repo revs"))
1183 for f in a:
1183 for f in a:
1184 try:
1184 try:
1185 os.unlink(repo.wjoin(f))
1185 os.unlink(repo.wjoin(f))
1186 except OSError, e:
1186 except OSError, e:
1187 if e.errno != errno.ENOENT:
1187 if e.errno != errno.ENOENT:
1188 raise
1188 raise
1189 try: os.removedirs(os.path.dirname(repo.wjoin(f)))
1189 try: os.removedirs(os.path.dirname(repo.wjoin(f)))
1190 except: pass
1190 except: pass
1191 repo.dirstate.forget(f)
1191 repo.dirstate.forget(f)
1192 for f in m:
1192 for f in m:
1193 getfile(f, mmap[f], mmap.flags(f))
1193 getfile(f, mmap[f], mmap.flags(f))
1194 for f in r:
1194 for f in r:
1195 getfile(f, mmap[f], mmap.flags(f))
1195 getfile(f, mmap[f], mmap.flags(f))
1196 for f in m + r:
1196 for f in m + r:
1197 repo.dirstate.normal(f)
1197 repo.dirstate.normal(f)
1198 repo.dirstate.setparents(qp, nullid)
1198 repo.dirstate.setparents(qp, nullid)
1199 for patch in reversed(self.applied[start:end]):
1199 for patch in reversed(self.applied[start:end]):
1200 self.ui.status(_("popping %s\n") % patch.name)
1200 self.ui.status(_("popping %s\n") % patch.name)
1201 del self.applied[start:end]
1201 del self.applied[start:end]
1202 self.strip(repo, rev, update=False, backup='strip')
1202 self.strip(repo, rev, update=False, backup='strip')
1203 if len(self.applied):
1203 if len(self.applied):
1204 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1204 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1205 else:
1205 else:
1206 self.ui.write(_("patch queue now empty\n"))
1206 self.ui.write(_("patch queue now empty\n"))
1207 finally:
1207 finally:
1208 wlock.release()
1208 wlock.release()
1209
1209
1210 def diff(self, repo, pats, opts):
1210 def diff(self, repo, pats, opts):
1211 top, patch = self.check_toppatch(repo)
1211 top, patch = self.check_toppatch(repo)
1212 if not top:
1212 if not top:
1213 self.ui.write(_("no patches applied\n"))
1213 self.ui.write(_("no patches applied\n"))
1214 return
1214 return
1215 qp = self.qparents(repo, top)
1215 qp = self.qparents(repo, top)
1216 if opts.get('reverse'):
1216 if opts.get('reverse'):
1217 node1, node2 = None, qp
1217 node1, node2 = None, qp
1218 else:
1218 else:
1219 node1, node2 = qp, None
1219 node1, node2 = qp, None
1220 diffopts = self.diffopts(opts, patch)
1220 diffopts = self.diffopts(opts, patch)
1221 self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
1221 self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
1222
1222
1223 def refresh(self, repo, pats=None, **opts):
1223 def refresh(self, repo, pats=None, **opts):
1224 if len(self.applied) == 0:
1224 if len(self.applied) == 0:
1225 self.ui.write(_("no patches applied\n"))
1225 self.ui.write(_("no patches applied\n"))
1226 return 1
1226 return 1
1227 msg = opts.get('msg', '').rstrip()
1227 msg = opts.get('msg', '').rstrip()
1228 newuser = opts.get('user')
1228 newuser = opts.get('user')
1229 newdate = opts.get('date')
1229 newdate = opts.get('date')
1230 if newdate:
1230 if newdate:
1231 newdate = '%d %d' % util.parsedate(newdate)
1231 newdate = '%d %d' % util.parsedate(newdate)
1232 wlock = repo.wlock()
1232 wlock = repo.wlock()
1233
1233
1234 try:
1234 try:
1235 self.check_toppatch(repo)
1235 self.check_toppatch(repo)
1236 (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
1236 (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
1237 top = bin(top)
1237 top = bin(top)
1238 if repo.changelog.heads(top) != [top]:
1238 if repo.changelog.heads(top) != [top]:
1239 raise util.Abort(_("cannot refresh a revision with children"))
1239 raise util.Abort(_("cannot refresh a revision with children"))
1240
1240
1241 cparents = repo.changelog.parents(top)
1241 cparents = repo.changelog.parents(top)
1242 patchparent = self.qparents(repo, top)
1242 patchparent = self.qparents(repo, top)
1243 ph = patchheader(self.join(patchfn), self.plainmode)
1243 ph = patchheader(self.join(patchfn), self.plainmode)
1244 diffopts = self.diffopts({'git': opts.get('git')}, patchfn)
1244 diffopts = self.diffopts({'git': opts.get('git')}, patchfn)
1245 if msg:
1245 if msg:
1246 ph.setmessage(msg)
1246 ph.setmessage(msg)
1247 if newuser:
1247 if newuser:
1248 ph.setuser(newuser)
1248 ph.setuser(newuser)
1249 if newdate:
1249 if newdate:
1250 ph.setdate(newdate)
1250 ph.setdate(newdate)
1251 ph.setparent(hex(patchparent))
1251 ph.setparent(hex(patchparent))
1252
1252
1253 # only commit new patch when write is complete
1253 # only commit new patch when write is complete
1254 patchf = self.opener(patchfn, 'w', atomictemp=True)
1254 patchf = self.opener(patchfn, 'w', atomictemp=True)
1255
1255
1256 comments = str(ph)
1256 comments = str(ph)
1257 if comments:
1257 if comments:
1258 patchf.write(comments)
1258 patchf.write(comments)
1259
1259
1260 # update the dirstate in place, strip off the qtip commit
1260 # update the dirstate in place, strip off the qtip commit
1261 # and then commit.
1261 # and then commit.
1262 #
1262 #
1263 # this should really read:
1263 # this should really read:
1264 # mm, dd, aa, aa2 = repo.status(tip, patchparent)[:4]
1264 # mm, dd, aa, aa2 = repo.status(tip, patchparent)[:4]
1265 # but we do it backwards to take advantage of manifest/chlog
1265 # but we do it backwards to take advantage of manifest/chlog
1266 # caching against the next repo.status call
1266 # caching against the next repo.status call
1267 mm, aa, dd, aa2 = repo.status(patchparent, top)[:4]
1267 mm, aa, dd, aa2 = repo.status(patchparent, top)[:4]
1268 changes = repo.changelog.read(top)
1268 changes = repo.changelog.read(top)
1269 man = repo.manifest.read(changes[0])
1269 man = repo.manifest.read(changes[0])
1270 aaa = aa[:]
1270 aaa = aa[:]
1271 matchfn = cmdutil.match(repo, pats, opts)
1271 matchfn = cmdutil.match(repo, pats, opts)
1272 # in short mode, we only diff the files included in the
1272 # in short mode, we only diff the files included in the
1273 # patch already plus specified files
1273 # patch already plus specified files
1274 if opts.get('short'):
1274 if opts.get('short'):
1275 # if amending a patch, we start with existing
1275 # if amending a patch, we start with existing
1276 # files plus specified files - unfiltered
1276 # files plus specified files - unfiltered
1277 match = cmdutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1277 match = cmdutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1278 # filter with inc/exl options
1278 # filter with inc/exl options
1279 matchfn = cmdutil.match(repo, opts=opts)
1279 matchfn = cmdutil.match(repo, opts=opts)
1280 else:
1280 else:
1281 match = cmdutil.matchall(repo)
1281 match = cmdutil.matchall(repo)
1282 m, a, r, d = repo.status(match=match)[:4]
1282 m, a, r, d = repo.status(match=match)[:4]
1283
1283
1284 # we might end up with files that were added between
1284 # we might end up with files that were added between
1285 # qtip and the dirstate parent, but then changed in the
1285 # qtip and the dirstate parent, but then changed in the
1286 # local dirstate. in this case, we want them to only
1286 # local dirstate. in this case, we want them to only
1287 # show up in the added section
1287 # show up in the added section
1288 for x in m:
1288 for x in m:
1289 if x not in aa:
1289 if x not in aa:
1290 mm.append(x)
1290 mm.append(x)
1291 # we might end up with files added by the local dirstate that
1291 # we might end up with files added by the local dirstate that
1292 # were deleted by the patch. In this case, they should only
1292 # were deleted by the patch. In this case, they should only
1293 # show up in the changed section.
1293 # show up in the changed section.
1294 for x in a:
1294 for x in a:
1295 if x in dd:
1295 if x in dd:
1296 del dd[dd.index(x)]
1296 del dd[dd.index(x)]
1297 mm.append(x)
1297 mm.append(x)
1298 else:
1298 else:
1299 aa.append(x)
1299 aa.append(x)
1300 # make sure any files deleted in the local dirstate
1300 # make sure any files deleted in the local dirstate
1301 # are not in the add or change column of the patch
1301 # are not in the add or change column of the patch
1302 forget = []
1302 forget = []
1303 for x in d + r:
1303 for x in d + r:
1304 if x in aa:
1304 if x in aa:
1305 del aa[aa.index(x)]
1305 del aa[aa.index(x)]
1306 forget.append(x)
1306 forget.append(x)
1307 continue
1307 continue
1308 elif x in mm:
1308 elif x in mm:
1309 del mm[mm.index(x)]
1309 del mm[mm.index(x)]
1310 dd.append(x)
1310 dd.append(x)
1311
1311
1312 m = list(set(mm))
1312 m = list(set(mm))
1313 r = list(set(dd))
1313 r = list(set(dd))
1314 a = list(set(aa))
1314 a = list(set(aa))
1315 c = [filter(matchfn, l) for l in (m, a, r)]
1315 c = [filter(matchfn, l) for l in (m, a, r)]
1316 match = cmdutil.matchfiles(repo, set(c[0] + c[1] + c[2]))
1316 match = cmdutil.matchfiles(repo, set(c[0] + c[1] + c[2]))
1317 chunks = patch.diff(repo, patchparent, match=match,
1317 chunks = patch.diff(repo, patchparent, match=match,
1318 changes=c, opts=diffopts)
1318 changes=c, opts=diffopts)
1319 for chunk in chunks:
1319 for chunk in chunks:
1320 patchf.write(chunk)
1320 patchf.write(chunk)
1321
1321
1322 try:
1322 try:
1323 if diffopts.git or diffopts.upgrade:
1323 if diffopts.git or diffopts.upgrade:
1324 copies = {}
1324 copies = {}
1325 for dst in a:
1325 for dst in a:
1326 src = repo.dirstate.copied(dst)
1326 src = repo.dirstate.copied(dst)
1327 # during qfold, the source file for copies may
1327 # during qfold, the source file for copies may
1328 # be removed. Treat this as a simple add.
1328 # be removed. Treat this as a simple add.
1329 if src is not None and src in repo.dirstate:
1329 if src is not None and src in repo.dirstate:
1330 copies.setdefault(src, []).append(dst)
1330 copies.setdefault(src, []).append(dst)
1331 repo.dirstate.add(dst)
1331 repo.dirstate.add(dst)
1332 # remember the copies between patchparent and qtip
1332 # remember the copies between patchparent and qtip
1333 for dst in aaa:
1333 for dst in aaa:
1334 f = repo.file(dst)
1334 f = repo.file(dst)
1335 src = f.renamed(man[dst])
1335 src = f.renamed(man[dst])
1336 if src:
1336 if src:
1337 copies.setdefault(src[0], []).extend(
1337 copies.setdefault(src[0], []).extend(
1338 copies.get(dst, []))
1338 copies.get(dst, []))
1339 if dst in a:
1339 if dst in a:
1340 copies[src[0]].append(dst)
1340 copies[src[0]].append(dst)
1341 # we can't copy a file created by the patch itself
1341 # we can't copy a file created by the patch itself
1342 if dst in copies:
1342 if dst in copies:
1343 del copies[dst]
1343 del copies[dst]
1344 for src, dsts in copies.iteritems():
1344 for src, dsts in copies.iteritems():
1345 for dst in dsts:
1345 for dst in dsts:
1346 repo.dirstate.copy(src, dst)
1346 repo.dirstate.copy(src, dst)
1347 else:
1347 else:
1348 for dst in a:
1348 for dst in a:
1349 repo.dirstate.add(dst)
1349 repo.dirstate.add(dst)
1350 # Drop useless copy information
1350 # Drop useless copy information
1351 for f in list(repo.dirstate.copies()):
1351 for f in list(repo.dirstate.copies()):
1352 repo.dirstate.copy(None, f)
1352 repo.dirstate.copy(None, f)
1353 for f in r:
1353 for f in r:
1354 repo.dirstate.remove(f)
1354 repo.dirstate.remove(f)
1355 # if the patch excludes a modified file, mark that
1355 # if the patch excludes a modified file, mark that
1356 # file with mtime=0 so status can see it.
1356 # file with mtime=0 so status can see it.
1357 mm = []
1357 mm = []
1358 for i in xrange(len(m)-1, -1, -1):
1358 for i in xrange(len(m)-1, -1, -1):
1359 if not matchfn(m[i]):
1359 if not matchfn(m[i]):
1360 mm.append(m[i])
1360 mm.append(m[i])
1361 del m[i]
1361 del m[i]
1362 for f in m:
1362 for f in m:
1363 repo.dirstate.normal(f)
1363 repo.dirstate.normal(f)
1364 for f in mm:
1364 for f in mm:
1365 repo.dirstate.normallookup(f)
1365 repo.dirstate.normallookup(f)
1366 for f in forget:
1366 for f in forget:
1367 repo.dirstate.forget(f)
1367 repo.dirstate.forget(f)
1368
1368
1369 if not msg:
1369 if not msg:
1370 if not ph.message:
1370 if not ph.message:
1371 message = "[mq]: %s\n" % patchfn
1371 message = "[mq]: %s\n" % patchfn
1372 else:
1372 else:
1373 message = "\n".join(ph.message)
1373 message = "\n".join(ph.message)
1374 else:
1374 else:
1375 message = msg
1375 message = msg
1376
1376
1377 user = ph.user or changes[1]
1377 user = ph.user or changes[1]
1378
1378
1379 # assumes strip can roll itself back if interrupted
1379 # assumes strip can roll itself back if interrupted
1380 repo.dirstate.setparents(*cparents)
1380 repo.dirstate.setparents(*cparents)
1381 self.applied.pop()
1381 self.applied.pop()
1382 self.applied_dirty = 1
1382 self.applied_dirty = 1
1383 self.strip(repo, top, update=False,
1383 self.strip(repo, top, update=False,
1384 backup='strip')
1384 backup='strip')
1385 except:
1385 except:
1386 repo.dirstate.invalidate()
1386 repo.dirstate.invalidate()
1387 raise
1387 raise
1388
1388
1389 try:
1389 try:
1390 # might be nice to attempt to roll back strip after this
1390 # might be nice to attempt to roll back strip after this
1391 patchf.rename()
1391 patchf.rename()
1392 n = repo.commit(message, user, ph.date, match=match,
1392 n = repo.commit(message, user, ph.date, match=match,
1393 force=True)
1393 force=True)
1394 self.applied.append(statusentry(hex(n), patchfn))
1394 self.applied.append(statusentry(hex(n), patchfn))
1395 except:
1395 except:
1396 ctx = repo[cparents[0]]
1396 ctx = repo[cparents[0]]
1397 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1397 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1398 self.save_dirty()
1398 self.save_dirty()
1399 self.ui.warn(_('refresh interrupted while patch was popped! '
1399 self.ui.warn(_('refresh interrupted while patch was popped! '
1400 '(revert --all, qpush to recover)\n'))
1400 '(revert --all, qpush to recover)\n'))
1401 raise
1401 raise
1402 finally:
1402 finally:
1403 wlock.release()
1403 wlock.release()
1404 self.removeundo(repo)
1404 self.removeundo(repo)
1405
1405
1406 def init(self, repo, create=False):
1406 def init(self, repo, create=False):
1407 if not create and os.path.isdir(self.path):
1407 if not create and os.path.isdir(self.path):
1408 raise util.Abort(_("patch queue directory already exists"))
1408 raise util.Abort(_("patch queue directory already exists"))
1409 try:
1409 try:
1410 os.mkdir(self.path)
1410 os.mkdir(self.path)
1411 except OSError, inst:
1411 except OSError, inst:
1412 if inst.errno != errno.EEXIST or not create:
1412 if inst.errno != errno.EEXIST or not create:
1413 raise
1413 raise
1414 if create:
1414 if create:
1415 return self.qrepo(create=True)
1415 return self.qrepo(create=True)
1416
1416
1417 def unapplied(self, repo, patch=None):
1417 def unapplied(self, repo, patch=None):
1418 if patch and patch not in self.series:
1418 if patch and patch not in self.series:
1419 raise util.Abort(_("patch %s is not in series file") % patch)
1419 raise util.Abort(_("patch %s is not in series file") % patch)
1420 if not patch:
1420 if not patch:
1421 start = self.series_end()
1421 start = self.series_end()
1422 else:
1422 else:
1423 start = self.series.index(patch) + 1
1423 start = self.series.index(patch) + 1
1424 unapplied = []
1424 unapplied = []
1425 for i in xrange(start, len(self.series)):
1425 for i in xrange(start, len(self.series)):
1426 pushable, reason = self.pushable(i)
1426 pushable, reason = self.pushable(i)
1427 if pushable:
1427 if pushable:
1428 unapplied.append((i, self.series[i]))
1428 unapplied.append((i, self.series[i]))
1429 self.explain_pushable(i)
1429 self.explain_pushable(i)
1430 return unapplied
1430 return unapplied
1431
1431
1432 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1432 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1433 summary=False):
1433 summary=False):
1434 def displayname(pfx, patchname):
1434 def displayname(pfx, patchname):
1435 if summary:
1435 if summary:
1436 ph = patchheader(self.join(patchname), self.plainmode)
1436 ph = patchheader(self.join(patchname), self.plainmode)
1437 msg = ph.message and ph.message[0] or ''
1437 msg = ph.message and ph.message[0] or ''
1438 if self.ui.interactive():
1438 if self.ui.interactive():
1439 width = util.termwidth() - len(pfx) - len(patchname) - 2
1439 width = util.termwidth() - len(pfx) - len(patchname) - 2
1440 if width > 0:
1440 if width > 0:
1441 msg = util.ellipsis(msg, width)
1441 msg = util.ellipsis(msg, width)
1442 else:
1442 else:
1443 msg = ''
1443 msg = ''
1444 msg = "%s%s: %s" % (pfx, patchname, msg)
1444 msg = "%s%s: %s" % (pfx, patchname, msg)
1445 else:
1445 else:
1446 msg = pfx + patchname
1446 msg = pfx + patchname
1447 self.ui.write(msg + '\n')
1447 self.ui.write(msg + '\n')
1448
1448
1449 applied = set([p.name for p in self.applied])
1449 applied = set([p.name for p in self.applied])
1450 if length is None:
1450 if length is None:
1451 length = len(self.series) - start
1451 length = len(self.series) - start
1452 if not missing:
1452 if not missing:
1453 if self.ui.verbose:
1453 if self.ui.verbose:
1454 idxwidth = len(str(start + length - 1))
1454 idxwidth = len(str(start + length - 1))
1455 for i in xrange(start, start + length):
1455 for i in xrange(start, start + length):
1456 patch = self.series[i]
1456 patch = self.series[i]
1457 if patch in applied:
1457 if patch in applied:
1458 stat = 'A'
1458 stat = 'A'
1459 elif self.pushable(i)[0]:
1459 elif self.pushable(i)[0]:
1460 stat = 'U'
1460 stat = 'U'
1461 else:
1461 else:
1462 stat = 'G'
1462 stat = 'G'
1463 pfx = ''
1463 pfx = ''
1464 if self.ui.verbose:
1464 if self.ui.verbose:
1465 pfx = '%*d %s ' % (idxwidth, i, stat)
1465 pfx = '%*d %s ' % (idxwidth, i, stat)
1466 elif status and status != stat:
1466 elif status and status != stat:
1467 continue
1467 continue
1468 displayname(pfx, patch)
1468 displayname(pfx, patch)
1469 else:
1469 else:
1470 msng_list = []
1470 msng_list = []
1471 for root, dirs, files in os.walk(self.path):
1471 for root, dirs, files in os.walk(self.path):
1472 d = root[len(self.path) + 1:]
1472 d = root[len(self.path) + 1:]
1473 for f in files:
1473 for f in files:
1474 fl = os.path.join(d, f)
1474 fl = os.path.join(d, f)
1475 if (fl not in self.series and
1475 if (fl not in self.series and
1476 fl not in (self.status_path, self.series_path,
1476 fl not in (self.status_path, self.series_path,
1477 self.guards_path)
1477 self.guards_path)
1478 and not fl.startswith('.')):
1478 and not fl.startswith('.')):
1479 msng_list.append(fl)
1479 msng_list.append(fl)
1480 for x in sorted(msng_list):
1480 for x in sorted(msng_list):
1481 pfx = self.ui.verbose and ('D ') or ''
1481 pfx = self.ui.verbose and ('D ') or ''
1482 displayname(pfx, x)
1482 displayname(pfx, x)
1483
1483
1484 def issaveline(self, l):
1484 def issaveline(self, l):
1485 if l.name == '.hg.patches.save.line':
1485 if l.name == '.hg.patches.save.line':
1486 return True
1486 return True
1487
1487
1488 def qrepo(self, create=False):
1488 def qrepo(self, create=False):
1489 if create or os.path.isdir(self.join(".hg")):
1489 if create or os.path.isdir(self.join(".hg")):
1490 return hg.repository(self.ui, path=self.path, create=create)
1490 return hg.repository(self.ui, path=self.path, create=create)
1491
1491
1492 def restore(self, repo, rev, delete=None, qupdate=None):
1492 def restore(self, repo, rev, delete=None, qupdate=None):
1493 c = repo.changelog.read(rev)
1493 c = repo.changelog.read(rev)
1494 desc = c[4].strip()
1494 desc = c[4].strip()
1495 lines = desc.splitlines()
1495 lines = desc.splitlines()
1496 i = 0
1496 i = 0
1497 datastart = None
1497 datastart = None
1498 series = []
1498 series = []
1499 applied = []
1499 applied = []
1500 qpp = None
1500 qpp = None
1501 for i, line in enumerate(lines):
1501 for i, line in enumerate(lines):
1502 if line == 'Patch Data:':
1502 if line == 'Patch Data:':
1503 datastart = i + 1
1503 datastart = i + 1
1504 elif line.startswith('Dirstate:'):
1504 elif line.startswith('Dirstate:'):
1505 l = line.rstrip()
1505 l = line.rstrip()
1506 l = l[10:].split(' ')
1506 l = l[10:].split(' ')
1507 qpp = [bin(x) for x in l]
1507 qpp = [bin(x) for x in l]
1508 elif datastart != None:
1508 elif datastart != None:
1509 l = line.rstrip()
1509 l = line.rstrip()
1510 se = statusentry(l)
1510 se = statusentry(l)
1511 file_ = se.name
1511 file_ = se.name
1512 if se.rev:
1512 if se.rev:
1513 applied.append(se)
1513 applied.append(se)
1514 else:
1514 else:
1515 series.append(file_)
1515 series.append(file_)
1516 if datastart is None:
1516 if datastart is None:
1517 self.ui.warn(_("No saved patch data found\n"))
1517 self.ui.warn(_("No saved patch data found\n"))
1518 return 1
1518 return 1
1519 self.ui.warn(_("restoring status: %s\n") % lines[0])
1519 self.ui.warn(_("restoring status: %s\n") % lines[0])
1520 self.full_series = series
1520 self.full_series = series
1521 self.applied = applied
1521 self.applied = applied
1522 self.parse_series()
1522 self.parse_series()
1523 self.series_dirty = 1
1523 self.series_dirty = 1
1524 self.applied_dirty = 1
1524 self.applied_dirty = 1
1525 heads = repo.changelog.heads()
1525 heads = repo.changelog.heads()
1526 if delete:
1526 if delete:
1527 if rev not in heads:
1527 if rev not in heads:
1528 self.ui.warn(_("save entry has children, leaving it alone\n"))
1528 self.ui.warn(_("save entry has children, leaving it alone\n"))
1529 else:
1529 else:
1530 self.ui.warn(_("removing save entry %s\n") % short(rev))
1530 self.ui.warn(_("removing save entry %s\n") % short(rev))
1531 pp = repo.dirstate.parents()
1531 pp = repo.dirstate.parents()
1532 if rev in pp:
1532 if rev in pp:
1533 update = True
1533 update = True
1534 else:
1534 else:
1535 update = False
1535 update = False
1536 self.strip(repo, rev, update=update, backup='strip')
1536 self.strip(repo, rev, update=update, backup='strip')
1537 if qpp:
1537 if qpp:
1538 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1538 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1539 (short(qpp[0]), short(qpp[1])))
1539 (short(qpp[0]), short(qpp[1])))
1540 if qupdate:
1540 if qupdate:
1541 self.ui.status(_("queue directory updating\n"))
1541 self.ui.status(_("queue directory updating\n"))
1542 r = self.qrepo()
1542 r = self.qrepo()
1543 if not r:
1543 if not r:
1544 self.ui.warn(_("Unable to load queue repository\n"))
1544 self.ui.warn(_("Unable to load queue repository\n"))
1545 return 1
1545 return 1
1546 hg.clean(r, qpp[0])
1546 hg.clean(r, qpp[0])
1547
1547
1548 def save(self, repo, msg=None):
1548 def save(self, repo, msg=None):
1549 if len(self.applied) == 0:
1549 if len(self.applied) == 0:
1550 self.ui.warn(_("save: no patches applied, exiting\n"))
1550 self.ui.warn(_("save: no patches applied, exiting\n"))
1551 return 1
1551 return 1
1552 if self.issaveline(self.applied[-1]):
1552 if self.issaveline(self.applied[-1]):
1553 self.ui.warn(_("status is already saved\n"))
1553 self.ui.warn(_("status is already saved\n"))
1554 return 1
1554 return 1
1555
1555
1556 ar = [':' + x for x in self.full_series]
1556 ar = [':' + x for x in self.full_series]
1557 if not msg:
1557 if not msg:
1558 msg = _("hg patches saved state")
1558 msg = _("hg patches saved state")
1559 else:
1559 else:
1560 msg = "hg patches: " + msg.rstrip('\r\n')
1560 msg = "hg patches: " + msg.rstrip('\r\n')
1561 r = self.qrepo()
1561 r = self.qrepo()
1562 if r:
1562 if r:
1563 pp = r.dirstate.parents()
1563 pp = r.dirstate.parents()
1564 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
1564 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
1565 msg += "\n\nPatch Data:\n"
1565 msg += "\n\nPatch Data:\n"
1566 text = msg + "\n".join([str(x) for x in self.applied]) + '\n' + (ar and
1566 text = msg + "\n".join([str(x) for x in self.applied]) + '\n' + (ar and
1567 "\n".join(ar) + '\n' or "")
1567 "\n".join(ar) + '\n' or "")
1568 n = repo.commit(text, force=True)
1568 n = repo.commit(text, force=True)
1569 if not n:
1569 if not n:
1570 self.ui.warn(_("repo commit failed\n"))
1570 self.ui.warn(_("repo commit failed\n"))
1571 return 1
1571 return 1
1572 self.applied.append(statusentry(hex(n),'.hg.patches.save.line'))
1572 self.applied.append(statusentry(hex(n),'.hg.patches.save.line'))
1573 self.applied_dirty = 1
1573 self.applied_dirty = 1
1574 self.removeundo(repo)
1574 self.removeundo(repo)
1575
1575
1576 def full_series_end(self):
1576 def full_series_end(self):
1577 if len(self.applied) > 0:
1577 if len(self.applied) > 0:
1578 p = self.applied[-1].name
1578 p = self.applied[-1].name
1579 end = self.find_series(p)
1579 end = self.find_series(p)
1580 if end is None:
1580 if end is None:
1581 return len(self.full_series)
1581 return len(self.full_series)
1582 return end + 1
1582 return end + 1
1583 return 0
1583 return 0
1584
1584
1585 def series_end(self, all_patches=False):
1585 def series_end(self, all_patches=False):
1586 """If all_patches is False, return the index of the next pushable patch
1586 """If all_patches is False, return the index of the next pushable patch
1587 in the series, or the series length. If all_patches is True, return the
1587 in the series, or the series length. If all_patches is True, return the
1588 index of the first patch past the last applied one.
1588 index of the first patch past the last applied one.
1589 """
1589 """
1590 end = 0
1590 end = 0
1591 def next(start):
1591 def next(start):
1592 if all_patches:
1592 if all_patches:
1593 return start
1593 return start
1594 i = start
1594 i = start
1595 while i < len(self.series):
1595 while i < len(self.series):
1596 p, reason = self.pushable(i)
1596 p, reason = self.pushable(i)
1597 if p:
1597 if p:
1598 break
1598 break
1599 self.explain_pushable(i)
1599 self.explain_pushable(i)
1600 i += 1
1600 i += 1
1601 return i
1601 return i
1602 if len(self.applied) > 0:
1602 if len(self.applied) > 0:
1603 p = self.applied[-1].name
1603 p = self.applied[-1].name
1604 try:
1604 try:
1605 end = self.series.index(p)
1605 end = self.series.index(p)
1606 except ValueError:
1606 except ValueError:
1607 return 0
1607 return 0
1608 return next(end + 1)
1608 return next(end + 1)
1609 return next(end)
1609 return next(end)
1610
1610
1611 def appliedname(self, index):
1611 def appliedname(self, index):
1612 pname = self.applied[index].name
1612 pname = self.applied[index].name
1613 if not self.ui.verbose:
1613 if not self.ui.verbose:
1614 p = pname
1614 p = pname
1615 else:
1615 else:
1616 p = str(self.series.index(pname)) + " " + pname
1616 p = str(self.series.index(pname)) + " " + pname
1617 return p
1617 return p
1618
1618
1619 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1619 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1620 force=None, git=False):
1620 force=None, git=False):
1621 def checkseries(patchname):
1621 def checkseries(patchname):
1622 if patchname in self.series:
1622 if patchname in self.series:
1623 raise util.Abort(_('patch %s is already in the series file')
1623 raise util.Abort(_('patch %s is already in the series file')
1624 % patchname)
1624 % patchname)
1625 def checkfile(patchname):
1625 def checkfile(patchname):
1626 if not force and os.path.exists(self.join(patchname)):
1626 if not force and os.path.exists(self.join(patchname)):
1627 raise util.Abort(_('patch "%s" already exists')
1627 raise util.Abort(_('patch "%s" already exists')
1628 % patchname)
1628 % patchname)
1629
1629
1630 if rev:
1630 if rev:
1631 if files:
1631 if files:
1632 raise util.Abort(_('option "-r" not valid when importing '
1632 raise util.Abort(_('option "-r" not valid when importing '
1633 'files'))
1633 'files'))
1634 rev = cmdutil.revrange(repo, rev)
1634 rev = cmdutil.revrange(repo, rev)
1635 rev.sort(reverse=True)
1635 rev.sort(reverse=True)
1636 if (len(files) > 1 or len(rev) > 1) and patchname:
1636 if (len(files) > 1 or len(rev) > 1) and patchname:
1637 raise util.Abort(_('option "-n" not valid when importing multiple '
1637 raise util.Abort(_('option "-n" not valid when importing multiple '
1638 'patches'))
1638 'patches'))
1639 i = 0
1639 i = 0
1640 added = []
1640 added = []
1641 if rev:
1641 if rev:
1642 # If mq patches are applied, we can only import revisions
1642 # If mq patches are applied, we can only import revisions
1643 # that form a linear path to qbase.
1643 # that form a linear path to qbase.
1644 # Otherwise, they should form a linear path to a head.
1644 # Otherwise, they should form a linear path to a head.
1645 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1645 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1646 if len(heads) > 1:
1646 if len(heads) > 1:
1647 raise util.Abort(_('revision %d is the root of more than one '
1647 raise util.Abort(_('revision %d is the root of more than one '
1648 'branch') % rev[-1])
1648 'branch') % rev[-1])
1649 if self.applied:
1649 if self.applied:
1650 base = hex(repo.changelog.node(rev[0]))
1650 base = hex(repo.changelog.node(rev[0]))
1651 if base in [n.rev for n in self.applied]:
1651 if base in [n.rev for n in self.applied]:
1652 raise util.Abort(_('revision %d is already managed')
1652 raise util.Abort(_('revision %d is already managed')
1653 % rev[0])
1653 % rev[0])
1654 if heads != [bin(self.applied[-1].rev)]:
1654 if heads != [bin(self.applied[-1].rev)]:
1655 raise util.Abort(_('revision %d is not the parent of '
1655 raise util.Abort(_('revision %d is not the parent of '
1656 'the queue') % rev[0])
1656 'the queue') % rev[0])
1657 base = repo.changelog.rev(bin(self.applied[0].rev))
1657 base = repo.changelog.rev(bin(self.applied[0].rev))
1658 lastparent = repo.changelog.parentrevs(base)[0]
1658 lastparent = repo.changelog.parentrevs(base)[0]
1659 else:
1659 else:
1660 if heads != [repo.changelog.node(rev[0])]:
1660 if heads != [repo.changelog.node(rev[0])]:
1661 raise util.Abort(_('revision %d has unmanaged children')
1661 raise util.Abort(_('revision %d has unmanaged children')
1662 % rev[0])
1662 % rev[0])
1663 lastparent = None
1663 lastparent = None
1664
1664
1665 diffopts = self.diffopts({'git': git})
1665 diffopts = self.diffopts({'git': git})
1666 for r in rev:
1666 for r in rev:
1667 p1, p2 = repo.changelog.parentrevs(r)
1667 p1, p2 = repo.changelog.parentrevs(r)
1668 n = repo.changelog.node(r)
1668 n = repo.changelog.node(r)
1669 if p2 != nullrev:
1669 if p2 != nullrev:
1670 raise util.Abort(_('cannot import merge revision %d') % r)
1670 raise util.Abort(_('cannot import merge revision %d') % r)
1671 if lastparent and lastparent != r:
1671 if lastparent and lastparent != r:
1672 raise util.Abort(_('revision %d is not the parent of %d')
1672 raise util.Abort(_('revision %d is not the parent of %d')
1673 % (r, lastparent))
1673 % (r, lastparent))
1674 lastparent = p1
1674 lastparent = p1
1675
1675
1676 if not patchname:
1676 if not patchname:
1677 patchname = normname('%d.diff' % r)
1677 patchname = normname('%d.diff' % r)
1678 self.check_reserved_name(patchname)
1678 self.check_reserved_name(patchname)
1679 checkseries(patchname)
1679 checkseries(patchname)
1680 checkfile(patchname)
1680 checkfile(patchname)
1681 self.full_series.insert(0, patchname)
1681 self.full_series.insert(0, patchname)
1682
1682
1683 patchf = self.opener(patchname, "w")
1683 patchf = self.opener(patchname, "w")
1684 patch.export(repo, [n], fp=patchf, opts=diffopts)
1684 patch.export(repo, [n], fp=patchf, opts=diffopts)
1685 patchf.close()
1685 patchf.close()
1686
1686
1687 se = statusentry(hex(n), patchname)
1687 se = statusentry(hex(n), patchname)
1688 self.applied.insert(0, se)
1688 self.applied.insert(0, se)
1689
1689
1690 added.append(patchname)
1690 added.append(patchname)
1691 patchname = None
1691 patchname = None
1692 self.parse_series()
1692 self.parse_series()
1693 self.applied_dirty = 1
1693 self.applied_dirty = 1
1694
1694
1695 for filename in files:
1695 for filename in files:
1696 if existing:
1696 if existing:
1697 if filename == '-':
1697 if filename == '-':
1698 raise util.Abort(_('-e is incompatible with import from -'))
1698 raise util.Abort(_('-e is incompatible with import from -'))
1699 if not patchname:
1699 if not patchname:
1700 patchname = normname(filename)
1700 patchname = normname(filename)
1701 self.check_reserved_name(patchname)
1701 self.check_reserved_name(patchname)
1702 if not os.path.isfile(self.join(patchname)):
1702 if not os.path.isfile(self.join(patchname)):
1703 raise util.Abort(_("patch %s does not exist") % patchname)
1703 raise util.Abort(_("patch %s does not exist") % patchname)
1704 else:
1704 else:
1705 try:
1705 try:
1706 if filename == '-':
1706 if filename == '-':
1707 if not patchname:
1707 if not patchname:
1708 raise util.Abort(
1708 raise util.Abort(
1709 _('need --name to import a patch from -'))
1709 _('need --name to import a patch from -'))
1710 text = sys.stdin.read()
1710 text = sys.stdin.read()
1711 else:
1711 else:
1712 text = url.open(self.ui, filename).read()
1712 text = url.open(self.ui, filename).read()
1713 except (OSError, IOError):
1713 except (OSError, IOError):
1714 raise util.Abort(_("unable to read %s") % filename)
1714 raise util.Abort(_("unable to read %s") % filename)
1715 if not patchname:
1715 if not patchname:
1716 patchname = normname(os.path.basename(filename))
1716 patchname = normname(os.path.basename(filename))
1717 self.check_reserved_name(patchname)
1717 self.check_reserved_name(patchname)
1718 checkfile(patchname)
1718 checkfile(patchname)
1719 patchf = self.opener(patchname, "w")
1719 patchf = self.opener(patchname, "w")
1720 patchf.write(text)
1720 patchf.write(text)
1721 if not force:
1721 if not force:
1722 checkseries(patchname)
1722 checkseries(patchname)
1723 if patchname not in self.series:
1723 if patchname not in self.series:
1724 index = self.full_series_end() + i
1724 index = self.full_series_end() + i
1725 self.full_series[index:index] = [patchname]
1725 self.full_series[index:index] = [patchname]
1726 self.parse_series()
1726 self.parse_series()
1727 self.ui.warn(_("adding %s to series file\n") % patchname)
1727 self.ui.warn(_("adding %s to series file\n") % patchname)
1728 i += 1
1728 i += 1
1729 added.append(patchname)
1729 added.append(patchname)
1730 patchname = None
1730 patchname = None
1731 self.series_dirty = 1
1731 self.series_dirty = 1
1732 qrepo = self.qrepo()
1732 qrepo = self.qrepo()
1733 if qrepo:
1733 if qrepo:
1734 qrepo.add(added)
1734 qrepo.add(added)
1735
1735
1736 def delete(ui, repo, *patches, **opts):
1736 def delete(ui, repo, *patches, **opts):
1737 """remove patches from queue
1737 """remove patches from queue
1738
1738
1739 The patches must not be applied, and at least one patch is required. With
1739 The patches must not be applied, and at least one patch is required. With
1740 -k/--keep, the patch files are preserved in the patch directory.
1740 -k/--keep, the patch files are preserved in the patch directory.
1741
1741
1742 To stop managing a patch and move it into permanent history,
1742 To stop managing a patch and move it into permanent history,
1743 use the qfinish command."""
1743 use the qfinish command."""
1744 q = repo.mq
1744 q = repo.mq
1745 q.delete(repo, patches, opts)
1745 q.delete(repo, patches, opts)
1746 q.save_dirty()
1746 q.save_dirty()
1747 return 0
1747 return 0
1748
1748
1749 def applied(ui, repo, patch=None, **opts):
1749 def applied(ui, repo, patch=None, **opts):
1750 """print the patches already applied"""
1750 """print the patches already applied"""
1751
1751
1752 q = repo.mq
1752 q = repo.mq
1753 l = len(q.applied)
1753 l = len(q.applied)
1754
1754
1755 if patch:
1755 if patch:
1756 if patch not in q.series:
1756 if patch not in q.series:
1757 raise util.Abort(_("patch %s is not in series file") % patch)
1757 raise util.Abort(_("patch %s is not in series file") % patch)
1758 end = q.series.index(patch) + 1
1758 end = q.series.index(patch) + 1
1759 else:
1759 else:
1760 end = q.series_end(True)
1760 end = q.series_end(True)
1761
1761
1762 if opts.get('last') and not end:
1762 if opts.get('last') and not end:
1763 ui.write(_("no patches applied\n"))
1763 ui.write(_("no patches applied\n"))
1764 return 1
1764 return 1
1765 elif opts.get('last') and end == 1:
1765 elif opts.get('last') and end == 1:
1766 ui.write(_("only one patch applied\n"))
1766 ui.write(_("only one patch applied\n"))
1767 return 1
1767 return 1
1768 elif opts.get('last'):
1768 elif opts.get('last'):
1769 start = end - 2
1769 start = end - 2
1770 end = 1
1770 end = 1
1771 else:
1771 else:
1772 start = 0
1772 start = 0
1773
1773
1774 return q.qseries(repo, length=end, start=start, status='A',
1774 return q.qseries(repo, length=end, start=start, status='A',
1775 summary=opts.get('summary'))
1775 summary=opts.get('summary'))
1776
1776
1777 def unapplied(ui, repo, patch=None, **opts):
1777 def unapplied(ui, repo, patch=None, **opts):
1778 """print the patches not yet applied"""
1778 """print the patches not yet applied"""
1779
1779
1780 q = repo.mq
1780 q = repo.mq
1781 if patch:
1781 if patch:
1782 if patch not in q.series:
1782 if patch not in q.series:
1783 raise util.Abort(_("patch %s is not in series file") % patch)
1783 raise util.Abort(_("patch %s is not in series file") % patch)
1784 start = q.series.index(patch) + 1
1784 start = q.series.index(patch) + 1
1785 else:
1785 else:
1786 start = q.series_end(True)
1786 start = q.series_end(True)
1787
1787
1788 if start == len(q.series) and opts.get('first'):
1788 if start == len(q.series) and opts.get('first'):
1789 ui.write(_("all patches applied\n"))
1789 ui.write(_("all patches applied\n"))
1790 return 1
1790 return 1
1791
1791
1792 length = opts.get('first') and 1 or None
1792 length = opts.get('first') and 1 or None
1793 return q.qseries(repo, start=start, length=length, status='U',
1793 return q.qseries(repo, start=start, length=length, status='U',
1794 summary=opts.get('summary'))
1794 summary=opts.get('summary'))
1795
1795
1796 def qimport(ui, repo, *filename, **opts):
1796 def qimport(ui, repo, *filename, **opts):
1797 """import a patch
1797 """import a patch
1798
1798
1799 The patch is inserted into the series after the last applied
1799 The patch is inserted into the series after the last applied
1800 patch. If no patches have been applied, qimport prepends the patch
1800 patch. If no patches have been applied, qimport prepends the patch
1801 to the series.
1801 to the series.
1802
1802
1803 The patch will have the same name as its source file unless you
1803 The patch will have the same name as its source file unless you
1804 give it a new one with -n/--name.
1804 give it a new one with -n/--name.
1805
1805
1806 You can register an existing patch inside the patch directory with
1806 You can register an existing patch inside the patch directory with
1807 the -e/--existing flag.
1807 the -e/--existing flag.
1808
1808
1809 With -f/--force, an existing patch of the same name will be
1809 With -f/--force, an existing patch of the same name will be
1810 overwritten.
1810 overwritten.
1811
1811
1812 An existing changeset may be placed under mq control with -r/--rev
1812 An existing changeset may be placed under mq control with -r/--rev
1813 (e.g. qimport --rev tip -n patch will place tip under mq control).
1813 (e.g. qimport --rev tip -n patch will place tip under mq control).
1814 With -g/--git, patches imported with --rev will use the git diff
1814 With -g/--git, patches imported with --rev will use the git diff
1815 format. See the diffs help topic for information on why this is
1815 format. See the diffs help topic for information on why this is
1816 important for preserving rename/copy information and permission
1816 important for preserving rename/copy information and permission
1817 changes.
1817 changes.
1818
1818
1819 To import a patch from standard input, pass - as the patch file.
1819 To import a patch from standard input, pass - as the patch file.
1820 When importing from standard input, a patch name must be specified
1820 When importing from standard input, a patch name must be specified
1821 using the --name flag.
1821 using the --name flag.
1822 """
1822 """
1823 q = repo.mq
1823 q = repo.mq
1824 q.qimport(repo, filename, patchname=opts['name'],
1824 q.qimport(repo, filename, patchname=opts['name'],
1825 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1825 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1826 git=opts['git'])
1826 git=opts['git'])
1827 q.save_dirty()
1827 q.save_dirty()
1828
1828
1829 if opts.get('push') and not opts.get('rev'):
1829 if opts.get('push') and not opts.get('rev'):
1830 return q.push(repo, None)
1830 return q.push(repo, None)
1831 return 0
1831 return 0
1832
1832
1833 def qinit(ui, repo, create):
1833 def qinit(ui, repo, create):
1834 """initialize a new queue repository
1834 """initialize a new queue repository
1835
1835
1836 This command also creates a series file for ordering patches, and
1836 This command also creates a series file for ordering patches, and
1837 an mq-specific .hgignore file in the queue repository, to exclude
1837 an mq-specific .hgignore file in the queue repository, to exclude
1838 the status and guards files (these contain mostly transient state)."""
1838 the status and guards files (these contain mostly transient state)."""
1839 q = repo.mq
1839 q = repo.mq
1840 r = q.init(repo, create)
1840 r = q.init(repo, create)
1841 q.save_dirty()
1841 q.save_dirty()
1842 if r:
1842 if r:
1843 if not os.path.exists(r.wjoin('.hgignore')):
1843 if not os.path.exists(r.wjoin('.hgignore')):
1844 fp = r.wopener('.hgignore', 'w')
1844 fp = r.wopener('.hgignore', 'w')
1845 fp.write('^\\.hg\n')
1845 fp.write('^\\.hg\n')
1846 fp.write('^\\.mq\n')
1846 fp.write('^\\.mq\n')
1847 fp.write('syntax: glob\n')
1847 fp.write('syntax: glob\n')
1848 fp.write('status\n')
1848 fp.write('status\n')
1849 fp.write('guards\n')
1849 fp.write('guards\n')
1850 fp.close()
1850 fp.close()
1851 if not os.path.exists(r.wjoin('series')):
1851 if not os.path.exists(r.wjoin('series')):
1852 r.wopener('series', 'w').close()
1852 r.wopener('series', 'w').close()
1853 r.add(['.hgignore', 'series'])
1853 r.add(['.hgignore', 'series'])
1854 commands.add(ui, r)
1854 commands.add(ui, r)
1855 return 0
1855 return 0
1856
1856
1857 def init(ui, repo, **opts):
1857 def init(ui, repo, **opts):
1858 """init a new queue repository (DEPRECATED)
1858 """init a new queue repository (DEPRECATED)
1859
1859
1860 The queue repository is unversioned by default. If
1860 The queue repository is unversioned by default. If
1861 -c/--create-repo is specified, qinit will create a separate nested
1861 -c/--create-repo is specified, qinit will create a separate nested
1862 repository for patches (qinit -c may also be run later to convert
1862 repository for patches (qinit -c may also be run later to convert
1863 an unversioned patch repository into a versioned one). You can use
1863 an unversioned patch repository into a versioned one). You can use
1864 qcommit to commit changes to this queue repository.
1864 qcommit to commit changes to this queue repository.
1865
1865
1866 This command is deprecated. Without -c, it's implied by other relevant
1866 This command is deprecated. Without -c, it's implied by other relevant
1867 commands. With -c, use hg init -Q instead."""
1867 commands. With -c, use hg init -Q instead."""
1868 return qinit(ui, repo, create=opts['create_repo'])
1868 return qinit(ui, repo, create=opts['create_repo'])
1869
1869
1870 def clone(ui, source, dest=None, **opts):
1870 def clone(ui, source, dest=None, **opts):
1871 '''clone main and patch repository at same time
1871 '''clone main and patch repository at same time
1872
1872
1873 If source is local, destination will have no patches applied. If
1873 If source is local, destination will have no patches applied. If
1874 source is remote, this command can not check if patches are
1874 source is remote, this command can not check if patches are
1875 applied in source, so cannot guarantee that patches are not
1875 applied in source, so cannot guarantee that patches are not
1876 applied in destination. If you clone remote repository, be sure
1876 applied in destination. If you clone remote repository, be sure
1877 before that it has no patches applied.
1877 before that it has no patches applied.
1878
1878
1879 Source patch repository is looked for in <src>/.hg/patches by
1879 Source patch repository is looked for in <src>/.hg/patches by
1880 default. Use -p <url> to change.
1880 default. Use -p <url> to change.
1881
1881
1882 The patch directory must be a nested Mercurial repository, as
1882 The patch directory must be a nested Mercurial repository, as
1883 would be created by qinit -c.
1883 would be created by qinit -c.
1884 '''
1884 '''
1885 def patchdir(repo):
1885 def patchdir(repo):
1886 url = repo.url()
1886 url = repo.url()
1887 if url.endswith('/'):
1887 if url.endswith('/'):
1888 url = url[:-1]
1888 url = url[:-1]
1889 return url + '/.hg/patches'
1889 return url + '/.hg/patches'
1890 if dest is None:
1890 if dest is None:
1891 dest = hg.defaultdest(source)
1891 dest = hg.defaultdest(source)
1892 sr = hg.repository(cmdutil.remoteui(ui, opts), ui.expandpath(source))
1892 sr = hg.repository(cmdutil.remoteui(ui, opts), ui.expandpath(source))
1893 if opts['patches']:
1893 if opts['patches']:
1894 patchespath = ui.expandpath(opts['patches'])
1894 patchespath = ui.expandpath(opts['patches'])
1895 else:
1895 else:
1896 patchespath = patchdir(sr)
1896 patchespath = patchdir(sr)
1897 try:
1897 try:
1898 hg.repository(ui, patchespath)
1898 hg.repository(ui, patchespath)
1899 except error.RepoError:
1899 except error.RepoError:
1900 raise util.Abort(_('versioned patch repository not found'
1900 raise util.Abort(_('versioned patch repository not found'
1901 ' (see qinit -c)'))
1901 ' (see qinit -c)'))
1902 qbase, destrev = None, None
1902 qbase, destrev = None, None
1903 if sr.local():
1903 if sr.local():
1904 if sr.mq.applied:
1904 if sr.mq.applied:
1905 qbase = bin(sr.mq.applied[0].rev)
1905 qbase = bin(sr.mq.applied[0].rev)
1906 if not hg.islocal(dest):
1906 if not hg.islocal(dest):
1907 heads = set(sr.heads())
1907 heads = set(sr.heads())
1908 destrev = list(heads.difference(sr.heads(qbase)))
1908 destrev = list(heads.difference(sr.heads(qbase)))
1909 destrev.append(sr.changelog.parents(qbase)[0])
1909 destrev.append(sr.changelog.parents(qbase)[0])
1910 elif sr.capable('lookup'):
1910 elif sr.capable('lookup'):
1911 try:
1911 try:
1912 qbase = sr.lookup('qbase')
1912 qbase = sr.lookup('qbase')
1913 except error.RepoError:
1913 except error.RepoError:
1914 pass
1914 pass
1915 ui.note(_('cloning main repository\n'))
1915 ui.note(_('cloning main repository\n'))
1916 sr, dr = hg.clone(ui, sr.url(), dest,
1916 sr, dr = hg.clone(ui, sr.url(), dest,
1917 pull=opts['pull'],
1917 pull=opts['pull'],
1918 rev=destrev,
1918 rev=destrev,
1919 update=False,
1919 update=False,
1920 stream=opts['uncompressed'])
1920 stream=opts['uncompressed'])
1921 ui.note(_('cloning patch repository\n'))
1921 ui.note(_('cloning patch repository\n'))
1922 hg.clone(ui, opts['patches'] or patchdir(sr), patchdir(dr),
1922 hg.clone(ui, opts['patches'] or patchdir(sr), patchdir(dr),
1923 pull=opts['pull'], update=not opts['noupdate'],
1923 pull=opts['pull'], update=not opts['noupdate'],
1924 stream=opts['uncompressed'])
1924 stream=opts['uncompressed'])
1925 if dr.local():
1925 if dr.local():
1926 if qbase:
1926 if qbase:
1927 ui.note(_('stripping applied patches from destination '
1927 ui.note(_('stripping applied patches from destination '
1928 'repository\n'))
1928 'repository\n'))
1929 dr.mq.strip(dr, qbase, update=False, backup=None)
1929 dr.mq.strip(dr, qbase, update=False, backup=None)
1930 if not opts['noupdate']:
1930 if not opts['noupdate']:
1931 ui.note(_('updating destination repository\n'))
1931 ui.note(_('updating destination repository\n'))
1932 hg.update(dr, dr.changelog.tip())
1932 hg.update(dr, dr.changelog.tip())
1933
1933
1934 def commit(ui, repo, *pats, **opts):
1934 def commit(ui, repo, *pats, **opts):
1935 """commit changes in the queue repository (DEPRECATED)
1935 """commit changes in the queue repository (DEPRECATED)
1936
1936
1937 This command is deprecated; use hg -Q commit instead."""
1937 This command is deprecated; use hg -Q commit instead."""
1938 q = repo.mq
1938 q = repo.mq
1939 r = q.qrepo()
1939 r = q.qrepo()
1940 if not r:
1940 if not r:
1941 raise util.Abort('no queue repository')
1941 raise util.Abort('no queue repository')
1942 commands.commit(r.ui, r, *pats, **opts)
1942 commands.commit(r.ui, r, *pats, **opts)
1943
1943
1944 def series(ui, repo, **opts):
1944 def series(ui, repo, **opts):
1945 """print the entire series file"""
1945 """print the entire series file"""
1946 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1946 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1947 return 0
1947 return 0
1948
1948
1949 def top(ui, repo, **opts):
1949 def top(ui, repo, **opts):
1950 """print the name of the current patch"""
1950 """print the name of the current patch"""
1951 q = repo.mq
1951 q = repo.mq
1952 t = q.applied and q.series_end(True) or 0
1952 t = q.applied and q.series_end(True) or 0
1953 if t:
1953 if t:
1954 return q.qseries(repo, start=t - 1, length=1, status='A',
1954 return q.qseries(repo, start=t - 1, length=1, status='A',
1955 summary=opts.get('summary'))
1955 summary=opts.get('summary'))
1956 else:
1956 else:
1957 ui.write(_("no patches applied\n"))
1957 ui.write(_("no patches applied\n"))
1958 return 1
1958 return 1
1959
1959
1960 def next(ui, repo, **opts):
1960 def next(ui, repo, **opts):
1961 """print the name of the next patch"""
1961 """print the name of the next patch"""
1962 q = repo.mq
1962 q = repo.mq
1963 end = q.series_end()
1963 end = q.series_end()
1964 if end == len(q.series):
1964 if end == len(q.series):
1965 ui.write(_("all patches applied\n"))
1965 ui.write(_("all patches applied\n"))
1966 return 1
1966 return 1
1967 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1967 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1968
1968
1969 def prev(ui, repo, **opts):
1969 def prev(ui, repo, **opts):
1970 """print the name of the previous patch"""
1970 """print the name of the previous patch"""
1971 q = repo.mq
1971 q = repo.mq
1972 l = len(q.applied)
1972 l = len(q.applied)
1973 if l == 1:
1973 if l == 1:
1974 ui.write(_("only one patch applied\n"))
1974 ui.write(_("only one patch applied\n"))
1975 return 1
1975 return 1
1976 if not l:
1976 if not l:
1977 ui.write(_("no patches applied\n"))
1977 ui.write(_("no patches applied\n"))
1978 return 1
1978 return 1
1979 return q.qseries(repo, start=l - 2, length=1, status='A',
1979 return q.qseries(repo, start=l - 2, length=1, status='A',
1980 summary=opts.get('summary'))
1980 summary=opts.get('summary'))
1981
1981
1982 def setupheaderopts(ui, opts):
1982 def setupheaderopts(ui, opts):
1983 if not opts.get('user') and opts.get('currentuser'):
1983 if not opts.get('user') and opts.get('currentuser'):
1984 opts['user'] = ui.username()
1984 opts['user'] = ui.username()
1985 if not opts.get('date') and opts.get('currentdate'):
1985 if not opts.get('date') and opts.get('currentdate'):
1986 opts['date'] = "%d %d" % util.makedate()
1986 opts['date'] = "%d %d" % util.makedate()
1987
1987
1988 def new(ui, repo, patch, *args, **opts):
1988 def new(ui, repo, patch, *args, **opts):
1989 """create a new patch
1989 """create a new patch
1990
1990
1991 qnew creates a new patch on top of the currently-applied patch (if
1991 qnew creates a new patch on top of the currently-applied patch (if
1992 any). It will refuse to run if there are any outstanding changes
1992 any). It will refuse to run if there are any outstanding changes
1993 unless -f/--force is specified, in which case the patch will be
1993 unless -f/--force is specified, in which case the patch will be
1994 initialized with them. You may also use -I/--include,
1994 initialized with them. You may also use -I/--include,
1995 -X/--exclude, and/or a list of files after the patch name to add
1995 -X/--exclude, and/or a list of files after the patch name to add
1996 only changes to matching files to the new patch, leaving the rest
1996 only changes to matching files to the new patch, leaving the rest
1997 as uncommitted modifications.
1997 as uncommitted modifications.
1998
1998
1999 -u/--user and -d/--date can be used to set the (given) user and
1999 -u/--user and -d/--date can be used to set the (given) user and
2000 date, respectively. -U/--currentuser and -D/--currentdate set user
2000 date, respectively. -U/--currentuser and -D/--currentdate set user
2001 to current user and date to current date.
2001 to current user and date to current date.
2002
2002
2003 -e/--edit, -m/--message or -l/--logfile set the patch header as
2003 -e/--edit, -m/--message or -l/--logfile set the patch header as
2004 well as the commit message. If none is specified, the header is
2004 well as the commit message. If none is specified, the header is
2005 empty and the commit message is '[mq]: PATCH'.
2005 empty and the commit message is '[mq]: PATCH'.
2006
2006
2007 Use the -g/--git option to keep the patch in the git extended diff
2007 Use the -g/--git option to keep the patch in the git extended diff
2008 format. Read the diffs help topic for more information on why this
2008 format. Read the diffs help topic for more information on why this
2009 is important for preserving permission changes and copy/rename
2009 is important for preserving permission changes and copy/rename
2010 information.
2010 information.
2011 """
2011 """
2012 msg = cmdutil.logmessage(opts)
2012 msg = cmdutil.logmessage(opts)
2013 def getmsg():
2013 def getmsg():
2014 return ui.edit(msg, ui.username())
2014 return ui.edit(msg, ui.username())
2015 q = repo.mq
2015 q = repo.mq
2016 opts['msg'] = msg
2016 opts['msg'] = msg
2017 if opts.get('edit'):
2017 if opts.get('edit'):
2018 opts['msg'] = getmsg
2018 opts['msg'] = getmsg
2019 else:
2019 else:
2020 opts['msg'] = msg
2020 opts['msg'] = msg
2021 setupheaderopts(ui, opts)
2021 setupheaderopts(ui, opts)
2022 q.new(repo, patch, *args, **opts)
2022 q.new(repo, patch, *args, **opts)
2023 q.save_dirty()
2023 q.save_dirty()
2024 return 0
2024 return 0
2025
2025
2026 def refresh(ui, repo, *pats, **opts):
2026 def refresh(ui, repo, *pats, **opts):
2027 """update the current patch
2027 """update the current patch
2028
2028
2029 If any file patterns are provided, the refreshed patch will
2029 If any file patterns are provided, the refreshed patch will
2030 contain only the modifications that match those patterns; the
2030 contain only the modifications that match those patterns; the
2031 remaining modifications will remain in the working directory.
2031 remaining modifications will remain in the working directory.
2032
2032
2033 If -s/--short is specified, files currently included in the patch
2033 If -s/--short is specified, files currently included in the patch
2034 will be refreshed just like matched files and remain in the patch.
2034 will be refreshed just like matched files and remain in the patch.
2035
2035
2036 hg add/remove/copy/rename work as usual, though you might want to
2036 hg add/remove/copy/rename work as usual, though you might want to
2037 use git-style patches (-g/--git or [diff] git=1) to track copies
2037 use git-style patches (-g/--git or [diff] git=1) to track copies
2038 and renames. See the diffs help topic for more information on the
2038 and renames. See the diffs help topic for more information on the
2039 git diff format.
2039 git diff format.
2040 """
2040 """
2041 q = repo.mq
2041 q = repo.mq
2042 message = cmdutil.logmessage(opts)
2042 message = cmdutil.logmessage(opts)
2043 if opts['edit']:
2043 if opts['edit']:
2044 if not q.applied:
2044 if not q.applied:
2045 ui.write(_("no patches applied\n"))
2045 ui.write(_("no patches applied\n"))
2046 return 1
2046 return 1
2047 if message:
2047 if message:
2048 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2048 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2049 patch = q.applied[-1].name
2049 patch = q.applied[-1].name
2050 ph = patchheader(q.join(patch), q.plainmode)
2050 ph = patchheader(q.join(patch), q.plainmode)
2051 message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
2051 message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
2052 setupheaderopts(ui, opts)
2052 setupheaderopts(ui, opts)
2053 ret = q.refresh(repo, pats, msg=message, **opts)
2053 ret = q.refresh(repo, pats, msg=message, **opts)
2054 q.save_dirty()
2054 q.save_dirty()
2055 return ret
2055 return ret
2056
2056
2057 def diff(ui, repo, *pats, **opts):
2057 def diff(ui, repo, *pats, **opts):
2058 """diff of the current patch and subsequent modifications
2058 """diff of the current patch and subsequent modifications
2059
2059
2060 Shows a diff which includes the current patch as well as any
2060 Shows a diff which includes the current patch as well as any
2061 changes which have been made in the working directory since the
2061 changes which have been made in the working directory since the
2062 last refresh (thus showing what the current patch would become
2062 last refresh (thus showing what the current patch would become
2063 after a qrefresh).
2063 after a qrefresh).
2064
2064
2065 Use 'hg diff' if you only want to see the changes made since the
2065 Use 'hg diff' if you only want to see the changes made since the
2066 last qrefresh, or 'hg export qtip' if you want to see changes made
2066 last qrefresh, or 'hg export qtip' if you want to see changes made
2067 by the current patch without including changes made since the
2067 by the current patch without including changes made since the
2068 qrefresh.
2068 qrefresh.
2069 """
2069 """
2070 repo.mq.diff(repo, pats, opts)
2070 repo.mq.diff(repo, pats, opts)
2071 return 0
2071 return 0
2072
2072
2073 def fold(ui, repo, *files, **opts):
2073 def fold(ui, repo, *files, **opts):
2074 """fold the named patches into the current patch
2074 """fold the named patches into the current patch
2075
2075
2076 Patches must not yet be applied. Each patch will be successively
2076 Patches must not yet be applied. Each patch will be successively
2077 applied to the current patch in the order given. If all the
2077 applied to the current patch in the order given. If all the
2078 patches apply successfully, the current patch will be refreshed
2078 patches apply successfully, the current patch will be refreshed
2079 with the new cumulative patch, and the folded patches will be
2079 with the new cumulative patch, and the folded patches will be
2080 deleted. With -k/--keep, the folded patch files will not be
2080 deleted. With -k/--keep, the folded patch files will not be
2081 removed afterwards.
2081 removed afterwards.
2082
2082
2083 The header for each folded patch will be concatenated with the
2083 The header for each folded patch will be concatenated with the
2084 current patch header, separated by a line of '* * *'."""
2084 current patch header, separated by a line of '* * *'."""
2085
2085
2086 q = repo.mq
2086 q = repo.mq
2087
2087
2088 if not files:
2088 if not files:
2089 raise util.Abort(_('qfold requires at least one patch name'))
2089 raise util.Abort(_('qfold requires at least one patch name'))
2090 if not q.check_toppatch(repo)[0]:
2090 if not q.check_toppatch(repo)[0]:
2091 raise util.Abort(_('No patches applied'))
2091 raise util.Abort(_('No patches applied'))
2092 q.check_localchanges(repo)
2092 q.check_localchanges(repo)
2093
2093
2094 message = cmdutil.logmessage(opts)
2094 message = cmdutil.logmessage(opts)
2095 if opts['edit']:
2095 if opts['edit']:
2096 if message:
2096 if message:
2097 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2097 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2098
2098
2099 parent = q.lookup('qtip')
2099 parent = q.lookup('qtip')
2100 patches = []
2100 patches = []
2101 messages = []
2101 messages = []
2102 for f in files:
2102 for f in files:
2103 p = q.lookup(f)
2103 p = q.lookup(f)
2104 if p in patches or p == parent:
2104 if p in patches or p == parent:
2105 ui.warn(_('Skipping already folded patch %s') % p)
2105 ui.warn(_('Skipping already folded patch %s') % p)
2106 if q.isapplied(p):
2106 if q.isapplied(p):
2107 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
2107 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
2108 patches.append(p)
2108 patches.append(p)
2109
2109
2110 for p in patches:
2110 for p in patches:
2111 if not message:
2111 if not message:
2112 ph = patchheader(q.join(p), q.plainmode)
2112 ph = patchheader(q.join(p), q.plainmode)
2113 if ph.message:
2113 if ph.message:
2114 messages.append(ph.message)
2114 messages.append(ph.message)
2115 pf = q.join(p)
2115 pf = q.join(p)
2116 (patchsuccess, files, fuzz) = q.patch(repo, pf)
2116 (patchsuccess, files, fuzz) = q.patch(repo, pf)
2117 if not patchsuccess:
2117 if not patchsuccess:
2118 raise util.Abort(_('Error folding patch %s') % p)
2118 raise util.Abort(_('Error folding patch %s') % p)
2119 patch.updatedir(ui, repo, files)
2119 patch.updatedir(ui, repo, files)
2120
2120
2121 if not message:
2121 if not message:
2122 ph = patchheader(q.join(parent), q.plainmode)
2122 ph = patchheader(q.join(parent), q.plainmode)
2123 message, user = ph.message, ph.user
2123 message, user = ph.message, ph.user
2124 for msg in messages:
2124 for msg in messages:
2125 message.append('* * *')
2125 message.append('* * *')
2126 message.extend(msg)
2126 message.extend(msg)
2127 message = '\n'.join(message)
2127 message = '\n'.join(message)
2128
2128
2129 if opts['edit']:
2129 if opts['edit']:
2130 message = ui.edit(message, user or ui.username())
2130 message = ui.edit(message, user or ui.username())
2131
2131
2132 diffopts = q.patchopts(q.diffopts(), *patches)
2132 diffopts = q.patchopts(q.diffopts(), *patches)
2133 q.refresh(repo, msg=message, git=diffopts.git)
2133 q.refresh(repo, msg=message, git=diffopts.git)
2134 q.delete(repo, patches, opts)
2134 q.delete(repo, patches, opts)
2135 q.save_dirty()
2135 q.save_dirty()
2136
2136
2137 def goto(ui, repo, patch, **opts):
2137 def goto(ui, repo, patch, **opts):
2138 '''push or pop patches until named patch is at top of stack'''
2138 '''push or pop patches until named patch is at top of stack'''
2139 q = repo.mq
2139 q = repo.mq
2140 patch = q.lookup(patch)
2140 patch = q.lookup(patch)
2141 if q.isapplied(patch):
2141 if q.isapplied(patch):
2142 ret = q.pop(repo, patch, force=opts['force'])
2142 ret = q.pop(repo, patch, force=opts['force'])
2143 else:
2143 else:
2144 ret = q.push(repo, patch, force=opts['force'])
2144 ret = q.push(repo, patch, force=opts['force'])
2145 q.save_dirty()
2145 q.save_dirty()
2146 return ret
2146 return ret
2147
2147
2148 def guard(ui, repo, *args, **opts):
2148 def guard(ui, repo, *args, **opts):
2149 '''set or print guards for a patch
2149 '''set or print guards for a patch
2150
2150
2151 Guards control whether a patch can be pushed. A patch with no
2151 Guards control whether a patch can be pushed. A patch with no
2152 guards is always pushed. A patch with a positive guard ("+foo") is
2152 guards is always pushed. A patch with a positive guard ("+foo") is
2153 pushed only if the qselect command has activated it. A patch with
2153 pushed only if the qselect command has activated it. A patch with
2154 a negative guard ("-foo") is never pushed if the qselect command
2154 a negative guard ("-foo") is never pushed if the qselect command
2155 has activated it.
2155 has activated it.
2156
2156
2157 With no arguments, print the currently active guards.
2157 With no arguments, print the currently active guards.
2158 With arguments, set guards for the named patch.
2158 With arguments, set guards for the named patch.
2159 NOTE: Specifying negative guards now requires '--'.
2159 NOTE: Specifying negative guards now requires '--'.
2160
2160
2161 To set guards on another patch::
2161 To set guards on another patch::
2162
2162
2163 hg qguard other.patch -- +2.6.17 -stable
2163 hg qguard other.patch -- +2.6.17 -stable
2164 '''
2164 '''
2165 def status(idx):
2165 def status(idx):
2166 guards = q.series_guards[idx] or ['unguarded']
2166 guards = q.series_guards[idx] or ['unguarded']
2167 ui.write('%s: %s\n' % (q.series[idx], ' '.join(guards)))
2167 ui.write('%s: %s\n' % (q.series[idx], ' '.join(guards)))
2168 q = repo.mq
2168 q = repo.mq
2169 patch = None
2169 patch = None
2170 args = list(args)
2170 args = list(args)
2171 if opts['list']:
2171 if opts['list']:
2172 if args or opts['none']:
2172 if args or opts['none']:
2173 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
2173 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
2174 for i in xrange(len(q.series)):
2174 for i in xrange(len(q.series)):
2175 status(i)
2175 status(i)
2176 return
2176 return
2177 if not args or args[0][0:1] in '-+':
2177 if not args or args[0][0:1] in '-+':
2178 if not q.applied:
2178 if not q.applied:
2179 raise util.Abort(_('no patches applied'))
2179 raise util.Abort(_('no patches applied'))
2180 patch = q.applied[-1].name
2180 patch = q.applied[-1].name
2181 if patch is None and args[0][0:1] not in '-+':
2181 if patch is None and args[0][0:1] not in '-+':
2182 patch = args.pop(0)
2182 patch = args.pop(0)
2183 if patch is None:
2183 if patch is None:
2184 raise util.Abort(_('no patch to work with'))
2184 raise util.Abort(_('no patch to work with'))
2185 if args or opts['none']:
2185 if args or opts['none']:
2186 idx = q.find_series(patch)
2186 idx = q.find_series(patch)
2187 if idx is None:
2187 if idx is None:
2188 raise util.Abort(_('no patch named %s') % patch)
2188 raise util.Abort(_('no patch named %s') % patch)
2189 q.set_guards(idx, args)
2189 q.set_guards(idx, args)
2190 q.save_dirty()
2190 q.save_dirty()
2191 else:
2191 else:
2192 status(q.series.index(q.lookup(patch)))
2192 status(q.series.index(q.lookup(patch)))
2193
2193
2194 def header(ui, repo, patch=None):
2194 def header(ui, repo, patch=None):
2195 """print the header of the topmost or specified patch"""
2195 """print the header of the topmost or specified patch"""
2196 q = repo.mq
2196 q = repo.mq
2197
2197
2198 if patch:
2198 if patch:
2199 patch = q.lookup(patch)
2199 patch = q.lookup(patch)
2200 else:
2200 else:
2201 if not q.applied:
2201 if not q.applied:
2202 ui.write('no patches applied\n')
2202 ui.write(_('no patches applied\n'))
2203 return 1
2203 return 1
2204 patch = q.lookup('qtip')
2204 patch = q.lookup('qtip')
2205 ph = patchheader(q.join(patch), q.plainmode)
2205 ph = patchheader(q.join(patch), q.plainmode)
2206
2206
2207 ui.write('\n'.join(ph.message) + '\n')
2207 ui.write('\n'.join(ph.message) + '\n')
2208
2208
2209 def lastsavename(path):
2209 def lastsavename(path):
2210 (directory, base) = os.path.split(path)
2210 (directory, base) = os.path.split(path)
2211 names = os.listdir(directory)
2211 names = os.listdir(directory)
2212 namere = re.compile("%s.([0-9]+)" % base)
2212 namere = re.compile("%s.([0-9]+)" % base)
2213 maxindex = None
2213 maxindex = None
2214 maxname = None
2214 maxname = None
2215 for f in names:
2215 for f in names:
2216 m = namere.match(f)
2216 m = namere.match(f)
2217 if m:
2217 if m:
2218 index = int(m.group(1))
2218 index = int(m.group(1))
2219 if maxindex is None or index > maxindex:
2219 if maxindex is None or index > maxindex:
2220 maxindex = index
2220 maxindex = index
2221 maxname = f
2221 maxname = f
2222 if maxname:
2222 if maxname:
2223 return (os.path.join(directory, maxname), maxindex)
2223 return (os.path.join(directory, maxname), maxindex)
2224 return (None, None)
2224 return (None, None)
2225
2225
2226 def savename(path):
2226 def savename(path):
2227 (last, index) = lastsavename(path)
2227 (last, index) = lastsavename(path)
2228 if last is None:
2228 if last is None:
2229 index = 0
2229 index = 0
2230 newpath = path + ".%d" % (index + 1)
2230 newpath = path + ".%d" % (index + 1)
2231 return newpath
2231 return newpath
2232
2232
2233 def push(ui, repo, patch=None, **opts):
2233 def push(ui, repo, patch=None, **opts):
2234 """push the next patch onto the stack
2234 """push the next patch onto the stack
2235
2235
2236 When -f/--force is applied, all local changes in patched files
2236 When -f/--force is applied, all local changes in patched files
2237 will be lost.
2237 will be lost.
2238 """
2238 """
2239 q = repo.mq
2239 q = repo.mq
2240 mergeq = None
2240 mergeq = None
2241
2241
2242 if opts['merge']:
2242 if opts['merge']:
2243 if opts['name']:
2243 if opts['name']:
2244 newpath = repo.join(opts['name'])
2244 newpath = repo.join(opts['name'])
2245 else:
2245 else:
2246 newpath, i = lastsavename(q.path)
2246 newpath, i = lastsavename(q.path)
2247 if not newpath:
2247 if not newpath:
2248 ui.warn(_("no saved queues found, please use -n\n"))
2248 ui.warn(_("no saved queues found, please use -n\n"))
2249 return 1
2249 return 1
2250 mergeq = queue(ui, repo.join(""), newpath)
2250 mergeq = queue(ui, repo.join(""), newpath)
2251 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2251 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2252 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
2252 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
2253 mergeq=mergeq, all=opts.get('all'))
2253 mergeq=mergeq, all=opts.get('all'))
2254 return ret
2254 return ret
2255
2255
2256 def pop(ui, repo, patch=None, **opts):
2256 def pop(ui, repo, patch=None, **opts):
2257 """pop the current patch off the stack
2257 """pop the current patch off the stack
2258
2258
2259 By default, pops off the top of the patch stack. If given a patch
2259 By default, pops off the top of the patch stack. If given a patch
2260 name, keeps popping off patches until the named patch is at the
2260 name, keeps popping off patches until the named patch is at the
2261 top of the stack.
2261 top of the stack.
2262 """
2262 """
2263 localupdate = True
2263 localupdate = True
2264 if opts['name']:
2264 if opts['name']:
2265 q = queue(ui, repo.join(""), repo.join(opts['name']))
2265 q = queue(ui, repo.join(""), repo.join(opts['name']))
2266 ui.warn(_('using patch queue: %s\n') % q.path)
2266 ui.warn(_('using patch queue: %s\n') % q.path)
2267 localupdate = False
2267 localupdate = False
2268 else:
2268 else:
2269 q = repo.mq
2269 q = repo.mq
2270 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
2270 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
2271 all=opts['all'])
2271 all=opts['all'])
2272 q.save_dirty()
2272 q.save_dirty()
2273 return ret
2273 return ret
2274
2274
2275 def rename(ui, repo, patch, name=None, **opts):
2275 def rename(ui, repo, patch, name=None, **opts):
2276 """rename a patch
2276 """rename a patch
2277
2277
2278 With one argument, renames the current patch to PATCH1.
2278 With one argument, renames the current patch to PATCH1.
2279 With two arguments, renames PATCH1 to PATCH2."""
2279 With two arguments, renames PATCH1 to PATCH2."""
2280
2280
2281 q = repo.mq
2281 q = repo.mq
2282
2282
2283 if not name:
2283 if not name:
2284 name = patch
2284 name = patch
2285 patch = None
2285 patch = None
2286
2286
2287 if patch:
2287 if patch:
2288 patch = q.lookup(patch)
2288 patch = q.lookup(patch)
2289 else:
2289 else:
2290 if not q.applied:
2290 if not q.applied:
2291 ui.write(_('no patches applied\n'))
2291 ui.write(_('no patches applied\n'))
2292 return
2292 return
2293 patch = q.lookup('qtip')
2293 patch = q.lookup('qtip')
2294 absdest = q.join(name)
2294 absdest = q.join(name)
2295 if os.path.isdir(absdest):
2295 if os.path.isdir(absdest):
2296 name = normname(os.path.join(name, os.path.basename(patch)))
2296 name = normname(os.path.join(name, os.path.basename(patch)))
2297 absdest = q.join(name)
2297 absdest = q.join(name)
2298 if os.path.exists(absdest):
2298 if os.path.exists(absdest):
2299 raise util.Abort(_('%s already exists') % absdest)
2299 raise util.Abort(_('%s already exists') % absdest)
2300
2300
2301 if name in q.series:
2301 if name in q.series:
2302 raise util.Abort(
2302 raise util.Abort(
2303 _('A patch named %s already exists in the series file') % name)
2303 _('A patch named %s already exists in the series file') % name)
2304
2304
2305 if ui.verbose:
2305 ui.note(_('renaming %s to %s\n') % (patch, name))
2306 ui.write('renaming %s to %s\n' % (patch, name))
2307 i = q.find_series(patch)
2306 i = q.find_series(patch)
2308 guards = q.guard_re.findall(q.full_series[i])
2307 guards = q.guard_re.findall(q.full_series[i])
2309 q.full_series[i] = name + ''.join([' #' + g for g in guards])
2308 q.full_series[i] = name + ''.join([' #' + g for g in guards])
2310 q.parse_series()
2309 q.parse_series()
2311 q.series_dirty = 1
2310 q.series_dirty = 1
2312
2311
2313 info = q.isapplied(patch)
2312 info = q.isapplied(patch)
2314 if info:
2313 if info:
2315 q.applied[info[0]] = statusentry(info[1], name)
2314 q.applied[info[0]] = statusentry(info[1], name)
2316 q.applied_dirty = 1
2315 q.applied_dirty = 1
2317
2316
2318 util.rename(q.join(patch), absdest)
2317 util.rename(q.join(patch), absdest)
2319 r = q.qrepo()
2318 r = q.qrepo()
2320 if r:
2319 if r:
2321 wlock = r.wlock()
2320 wlock = r.wlock()
2322 try:
2321 try:
2323 if r.dirstate[patch] == 'a':
2322 if r.dirstate[patch] == 'a':
2324 r.dirstate.forget(patch)
2323 r.dirstate.forget(patch)
2325 r.dirstate.add(name)
2324 r.dirstate.add(name)
2326 else:
2325 else:
2327 if r.dirstate[name] == 'r':
2326 if r.dirstate[name] == 'r':
2328 r.undelete([name])
2327 r.undelete([name])
2329 r.copy(patch, name)
2328 r.copy(patch, name)
2330 r.remove([patch], False)
2329 r.remove([patch], False)
2331 finally:
2330 finally:
2332 wlock.release()
2331 wlock.release()
2333
2332
2334 q.save_dirty()
2333 q.save_dirty()
2335
2334
2336 def restore(ui, repo, rev, **opts):
2335 def restore(ui, repo, rev, **opts):
2337 """restore the queue state saved by a revision (DEPRECATED)
2336 """restore the queue state saved by a revision (DEPRECATED)
2338
2337
2339 This command is deprecated, use rebase --mq instead."""
2338 This command is deprecated, use rebase --mq instead."""
2340 rev = repo.lookup(rev)
2339 rev = repo.lookup(rev)
2341 q = repo.mq
2340 q = repo.mq
2342 q.restore(repo, rev, delete=opts['delete'],
2341 q.restore(repo, rev, delete=opts['delete'],
2343 qupdate=opts['update'])
2342 qupdate=opts['update'])
2344 q.save_dirty()
2343 q.save_dirty()
2345 return 0
2344 return 0
2346
2345
2347 def save(ui, repo, **opts):
2346 def save(ui, repo, **opts):
2348 """save current queue state (DEPRECATED)
2347 """save current queue state (DEPRECATED)
2349
2348
2350 This command is deprecated, use rebase --mq instead."""
2349 This command is deprecated, use rebase --mq instead."""
2351 q = repo.mq
2350 q = repo.mq
2352 message = cmdutil.logmessage(opts)
2351 message = cmdutil.logmessage(opts)
2353 ret = q.save(repo, msg=message)
2352 ret = q.save(repo, msg=message)
2354 if ret:
2353 if ret:
2355 return ret
2354 return ret
2356 q.save_dirty()
2355 q.save_dirty()
2357 if opts['copy']:
2356 if opts['copy']:
2358 path = q.path
2357 path = q.path
2359 if opts['name']:
2358 if opts['name']:
2360 newpath = os.path.join(q.basepath, opts['name'])
2359 newpath = os.path.join(q.basepath, opts['name'])
2361 if os.path.exists(newpath):
2360 if os.path.exists(newpath):
2362 if not os.path.isdir(newpath):
2361 if not os.path.isdir(newpath):
2363 raise util.Abort(_('destination %s exists and is not '
2362 raise util.Abort(_('destination %s exists and is not '
2364 'a directory') % newpath)
2363 'a directory') % newpath)
2365 if not opts['force']:
2364 if not opts['force']:
2366 raise util.Abort(_('destination %s exists, '
2365 raise util.Abort(_('destination %s exists, '
2367 'use -f to force') % newpath)
2366 'use -f to force') % newpath)
2368 else:
2367 else:
2369 newpath = savename(path)
2368 newpath = savename(path)
2370 ui.warn(_("copy %s to %s\n") % (path, newpath))
2369 ui.warn(_("copy %s to %s\n") % (path, newpath))
2371 util.copyfiles(path, newpath)
2370 util.copyfiles(path, newpath)
2372 if opts['empty']:
2371 if opts['empty']:
2373 try:
2372 try:
2374 os.unlink(q.join(q.status_path))
2373 os.unlink(q.join(q.status_path))
2375 except:
2374 except:
2376 pass
2375 pass
2377 return 0
2376 return 0
2378
2377
2379 def strip(ui, repo, rev, **opts):
2378 def strip(ui, repo, rev, **opts):
2380 """strip a revision and all its descendants from the repository
2379 """strip a revision and all its descendants from the repository
2381
2380
2382 If one of the working directory's parent revisions is stripped, the
2381 If one of the working directory's parent revisions is stripped, the
2383 working directory will be updated to the parent of the stripped
2382 working directory will be updated to the parent of the stripped
2384 revision.
2383 revision.
2385 """
2384 """
2386 backup = 'all'
2385 backup = 'all'
2387 if opts['backup']:
2386 if opts['backup']:
2388 backup = 'strip'
2387 backup = 'strip'
2389 elif opts['nobackup']:
2388 elif opts['nobackup']:
2390 backup = 'none'
2389 backup = 'none'
2391
2390
2392 rev = repo.lookup(rev)
2391 rev = repo.lookup(rev)
2393 p = repo.dirstate.parents()
2392 p = repo.dirstate.parents()
2394 cl = repo.changelog
2393 cl = repo.changelog
2395 update = True
2394 update = True
2396 if p[0] == nullid:
2395 if p[0] == nullid:
2397 update = False
2396 update = False
2398 elif p[1] == nullid and rev != cl.ancestor(p[0], rev):
2397 elif p[1] == nullid and rev != cl.ancestor(p[0], rev):
2399 update = False
2398 update = False
2400 elif rev not in (cl.ancestor(p[0], rev), cl.ancestor(p[1], rev)):
2399 elif rev not in (cl.ancestor(p[0], rev), cl.ancestor(p[1], rev)):
2401 update = False
2400 update = False
2402
2401
2403 repo.mq.strip(repo, rev, backup=backup, update=update, force=opts['force'])
2402 repo.mq.strip(repo, rev, backup=backup, update=update, force=opts['force'])
2404 return 0
2403 return 0
2405
2404
2406 def select(ui, repo, *args, **opts):
2405 def select(ui, repo, *args, **opts):
2407 '''set or print guarded patches to push
2406 '''set or print guarded patches to push
2408
2407
2409 Use the qguard command to set or print guards on patch, then use
2408 Use the qguard command to set or print guards on patch, then use
2410 qselect to tell mq which guards to use. A patch will be pushed if
2409 qselect to tell mq which guards to use. A patch will be pushed if
2411 it has no guards or any positive guards match the currently
2410 it has no guards or any positive guards match the currently
2412 selected guard, but will not be pushed if any negative guards
2411 selected guard, but will not be pushed if any negative guards
2413 match the current guard. For example::
2412 match the current guard. For example::
2414
2413
2415 qguard foo.patch -stable (negative guard)
2414 qguard foo.patch -stable (negative guard)
2416 qguard bar.patch +stable (positive guard)
2415 qguard bar.patch +stable (positive guard)
2417 qselect stable
2416 qselect stable
2418
2417
2419 This activates the "stable" guard. mq will skip foo.patch (because
2418 This activates the "stable" guard. mq will skip foo.patch (because
2420 it has a negative match) but push bar.patch (because it has a
2419 it has a negative match) but push bar.patch (because it has a
2421 positive match).
2420 positive match).
2422
2421
2423 With no arguments, prints the currently active guards.
2422 With no arguments, prints the currently active guards.
2424 With one argument, sets the active guard.
2423 With one argument, sets the active guard.
2425
2424
2426 Use -n/--none to deactivate guards (no other arguments needed).
2425 Use -n/--none to deactivate guards (no other arguments needed).
2427 When no guards are active, patches with positive guards are
2426 When no guards are active, patches with positive guards are
2428 skipped and patches with negative guards are pushed.
2427 skipped and patches with negative guards are pushed.
2429
2428
2430 qselect can change the guards on applied patches. It does not pop
2429 qselect can change the guards on applied patches. It does not pop
2431 guarded patches by default. Use --pop to pop back to the last
2430 guarded patches by default. Use --pop to pop back to the last
2432 applied patch that is not guarded. Use --reapply (which implies
2431 applied patch that is not guarded. Use --reapply (which implies
2433 --pop) to push back to the current patch afterwards, but skip
2432 --pop) to push back to the current patch afterwards, but skip
2434 guarded patches.
2433 guarded patches.
2435
2434
2436 Use -s/--series to print a list of all guards in the series file
2435 Use -s/--series to print a list of all guards in the series file
2437 (no other arguments needed). Use -v for more information.'''
2436 (no other arguments needed). Use -v for more information.'''
2438
2437
2439 q = repo.mq
2438 q = repo.mq
2440 guards = q.active()
2439 guards = q.active()
2441 if args or opts['none']:
2440 if args or opts['none']:
2442 old_unapplied = q.unapplied(repo)
2441 old_unapplied = q.unapplied(repo)
2443 old_guarded = [i for i in xrange(len(q.applied)) if
2442 old_guarded = [i for i in xrange(len(q.applied)) if
2444 not q.pushable(i)[0]]
2443 not q.pushable(i)[0]]
2445 q.set_active(args)
2444 q.set_active(args)
2446 q.save_dirty()
2445 q.save_dirty()
2447 if not args:
2446 if not args:
2448 ui.status(_('guards deactivated\n'))
2447 ui.status(_('guards deactivated\n'))
2449 if not opts['pop'] and not opts['reapply']:
2448 if not opts['pop'] and not opts['reapply']:
2450 unapplied = q.unapplied(repo)
2449 unapplied = q.unapplied(repo)
2451 guarded = [i for i in xrange(len(q.applied))
2450 guarded = [i for i in xrange(len(q.applied))
2452 if not q.pushable(i)[0]]
2451 if not q.pushable(i)[0]]
2453 if len(unapplied) != len(old_unapplied):
2452 if len(unapplied) != len(old_unapplied):
2454 ui.status(_('number of unguarded, unapplied patches has '
2453 ui.status(_('number of unguarded, unapplied patches has '
2455 'changed from %d to %d\n') %
2454 'changed from %d to %d\n') %
2456 (len(old_unapplied), len(unapplied)))
2455 (len(old_unapplied), len(unapplied)))
2457 if len(guarded) != len(old_guarded):
2456 if len(guarded) != len(old_guarded):
2458 ui.status(_('number of guarded, applied patches has changed '
2457 ui.status(_('number of guarded, applied patches has changed '
2459 'from %d to %d\n') %
2458 'from %d to %d\n') %
2460 (len(old_guarded), len(guarded)))
2459 (len(old_guarded), len(guarded)))
2461 elif opts['series']:
2460 elif opts['series']:
2462 guards = {}
2461 guards = {}
2463 noguards = 0
2462 noguards = 0
2464 for gs in q.series_guards:
2463 for gs in q.series_guards:
2465 if not gs:
2464 if not gs:
2466 noguards += 1
2465 noguards += 1
2467 for g in gs:
2466 for g in gs:
2468 guards.setdefault(g, 0)
2467 guards.setdefault(g, 0)
2469 guards[g] += 1
2468 guards[g] += 1
2470 if ui.verbose:
2469 if ui.verbose:
2471 guards['NONE'] = noguards
2470 guards['NONE'] = noguards
2472 guards = guards.items()
2471 guards = guards.items()
2473 guards.sort(key=lambda x: x[0][1:])
2472 guards.sort(key=lambda x: x[0][1:])
2474 if guards:
2473 if guards:
2475 ui.note(_('guards in series file:\n'))
2474 ui.note(_('guards in series file:\n'))
2476 for guard, count in guards:
2475 for guard, count in guards:
2477 ui.note('%2d ' % count)
2476 ui.note('%2d ' % count)
2478 ui.write(guard, '\n')
2477 ui.write(guard, '\n')
2479 else:
2478 else:
2480 ui.note(_('no guards in series file\n'))
2479 ui.note(_('no guards in series file\n'))
2481 else:
2480 else:
2482 if guards:
2481 if guards:
2483 ui.note(_('active guards:\n'))
2482 ui.note(_('active guards:\n'))
2484 for g in guards:
2483 for g in guards:
2485 ui.write(g, '\n')
2484 ui.write(g, '\n')
2486 else:
2485 else:
2487 ui.write(_('no active guards\n'))
2486 ui.write(_('no active guards\n'))
2488 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2487 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2489 popped = False
2488 popped = False
2490 if opts['pop'] or opts['reapply']:
2489 if opts['pop'] or opts['reapply']:
2491 for i in xrange(len(q.applied)):
2490 for i in xrange(len(q.applied)):
2492 pushable, reason = q.pushable(i)
2491 pushable, reason = q.pushable(i)
2493 if not pushable:
2492 if not pushable:
2494 ui.status(_('popping guarded patches\n'))
2493 ui.status(_('popping guarded patches\n'))
2495 popped = True
2494 popped = True
2496 if i == 0:
2495 if i == 0:
2497 q.pop(repo, all=True)
2496 q.pop(repo, all=True)
2498 else:
2497 else:
2499 q.pop(repo, i - 1)
2498 q.pop(repo, i - 1)
2500 break
2499 break
2501 if popped:
2500 if popped:
2502 try:
2501 try:
2503 if reapply:
2502 if reapply:
2504 ui.status(_('reapplying unguarded patches\n'))
2503 ui.status(_('reapplying unguarded patches\n'))
2505 q.push(repo, reapply)
2504 q.push(repo, reapply)
2506 finally:
2505 finally:
2507 q.save_dirty()
2506 q.save_dirty()
2508
2507
2509 def finish(ui, repo, *revrange, **opts):
2508 def finish(ui, repo, *revrange, **opts):
2510 """move applied patches into repository history
2509 """move applied patches into repository history
2511
2510
2512 Finishes the specified revisions (corresponding to applied
2511 Finishes the specified revisions (corresponding to applied
2513 patches) by moving them out of mq control into regular repository
2512 patches) by moving them out of mq control into regular repository
2514 history.
2513 history.
2515
2514
2516 Accepts a revision range or the -a/--applied option. If --applied
2515 Accepts a revision range or the -a/--applied option. If --applied
2517 is specified, all applied mq revisions are removed from mq
2516 is specified, all applied mq revisions are removed from mq
2518 control. Otherwise, the given revisions must be at the base of the
2517 control. Otherwise, the given revisions must be at the base of the
2519 stack of applied patches.
2518 stack of applied patches.
2520
2519
2521 This can be especially useful if your changes have been applied to
2520 This can be especially useful if your changes have been applied to
2522 an upstream repository, or if you are about to push your changes
2521 an upstream repository, or if you are about to push your changes
2523 to upstream.
2522 to upstream.
2524 """
2523 """
2525 if not opts['applied'] and not revrange:
2524 if not opts['applied'] and not revrange:
2526 raise util.Abort(_('no revisions specified'))
2525 raise util.Abort(_('no revisions specified'))
2527 elif opts['applied']:
2526 elif opts['applied']:
2528 revrange = ('qbase:qtip',) + revrange
2527 revrange = ('qbase:qtip',) + revrange
2529
2528
2530 q = repo.mq
2529 q = repo.mq
2531 if not q.applied:
2530 if not q.applied:
2532 ui.status(_('no patches applied\n'))
2531 ui.status(_('no patches applied\n'))
2533 return 0
2532 return 0
2534
2533
2535 revs = cmdutil.revrange(repo, revrange)
2534 revs = cmdutil.revrange(repo, revrange)
2536 q.finish(repo, revs)
2535 q.finish(repo, revs)
2537 q.save_dirty()
2536 q.save_dirty()
2538 return 0
2537 return 0
2539
2538
2540 def reposetup(ui, repo):
2539 def reposetup(ui, repo):
2541 class mqrepo(repo.__class__):
2540 class mqrepo(repo.__class__):
2542 @util.propertycache
2541 @util.propertycache
2543 def mq(self):
2542 def mq(self):
2544 return queue(self.ui, self.join(""))
2543 return queue(self.ui, self.join(""))
2545
2544
2546 def abort_if_wdir_patched(self, errmsg, force=False):
2545 def abort_if_wdir_patched(self, errmsg, force=False):
2547 if self.mq.applied and not force:
2546 if self.mq.applied and not force:
2548 parent = hex(self.dirstate.parents()[0])
2547 parent = hex(self.dirstate.parents()[0])
2549 if parent in [s.rev for s in self.mq.applied]:
2548 if parent in [s.rev for s in self.mq.applied]:
2550 raise util.Abort(errmsg)
2549 raise util.Abort(errmsg)
2551
2550
2552 def commit(self, text="", user=None, date=None, match=None,
2551 def commit(self, text="", user=None, date=None, match=None,
2553 force=False, editor=False, extra={}):
2552 force=False, editor=False, extra={}):
2554 self.abort_if_wdir_patched(
2553 self.abort_if_wdir_patched(
2555 _('cannot commit over an applied mq patch'),
2554 _('cannot commit over an applied mq patch'),
2556 force)
2555 force)
2557
2556
2558 return super(mqrepo, self).commit(text, user, date, match, force,
2557 return super(mqrepo, self).commit(text, user, date, match, force,
2559 editor, extra)
2558 editor, extra)
2560
2559
2561 def push(self, remote, force=False, revs=None):
2560 def push(self, remote, force=False, revs=None):
2562 if self.mq.applied and not force and not revs:
2561 if self.mq.applied and not force and not revs:
2563 raise util.Abort(_('source has mq patches applied'))
2562 raise util.Abort(_('source has mq patches applied'))
2564 return super(mqrepo, self).push(remote, force, revs)
2563 return super(mqrepo, self).push(remote, force, revs)
2565
2564
2566 def _findtags(self):
2565 def _findtags(self):
2567 '''augment tags from base class with patch tags'''
2566 '''augment tags from base class with patch tags'''
2568 result = super(mqrepo, self)._findtags()
2567 result = super(mqrepo, self)._findtags()
2569
2568
2570 q = self.mq
2569 q = self.mq
2571 if not q.applied:
2570 if not q.applied:
2572 return result
2571 return result
2573
2572
2574 mqtags = [(bin(patch.rev), patch.name) for patch in q.applied]
2573 mqtags = [(bin(patch.rev), patch.name) for patch in q.applied]
2575
2574
2576 if mqtags[-1][0] not in self.changelog.nodemap:
2575 if mqtags[-1][0] not in self.changelog.nodemap:
2577 self.ui.warn(_('mq status file refers to unknown node %s\n')
2576 self.ui.warn(_('mq status file refers to unknown node %s\n')
2578 % short(mqtags[-1][0]))
2577 % short(mqtags[-1][0]))
2579 return result
2578 return result
2580
2579
2581 mqtags.append((mqtags[-1][0], 'qtip'))
2580 mqtags.append((mqtags[-1][0], 'qtip'))
2582 mqtags.append((mqtags[0][0], 'qbase'))
2581 mqtags.append((mqtags[0][0], 'qbase'))
2583 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2582 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2584 tags = result[0]
2583 tags = result[0]
2585 for patch in mqtags:
2584 for patch in mqtags:
2586 if patch[1] in tags:
2585 if patch[1] in tags:
2587 self.ui.warn(_('Tag %s overrides mq patch of the same name\n')
2586 self.ui.warn(_('Tag %s overrides mq patch of the same name\n')
2588 % patch[1])
2587 % patch[1])
2589 else:
2588 else:
2590 tags[patch[1]] = patch[0]
2589 tags[patch[1]] = patch[0]
2591
2590
2592 return result
2591 return result
2593
2592
2594 def _branchtags(self, partial, lrev):
2593 def _branchtags(self, partial, lrev):
2595 q = self.mq
2594 q = self.mq
2596 if not q.applied:
2595 if not q.applied:
2597 return super(mqrepo, self)._branchtags(partial, lrev)
2596 return super(mqrepo, self)._branchtags(partial, lrev)
2598
2597
2599 cl = self.changelog
2598 cl = self.changelog
2600 qbasenode = bin(q.applied[0].rev)
2599 qbasenode = bin(q.applied[0].rev)
2601 if qbasenode not in cl.nodemap:
2600 if qbasenode not in cl.nodemap:
2602 self.ui.warn(_('mq status file refers to unknown node %s\n')
2601 self.ui.warn(_('mq status file refers to unknown node %s\n')
2603 % short(qbasenode))
2602 % short(qbasenode))
2604 return super(mqrepo, self)._branchtags(partial, lrev)
2603 return super(mqrepo, self)._branchtags(partial, lrev)
2605
2604
2606 qbase = cl.rev(qbasenode)
2605 qbase = cl.rev(qbasenode)
2607 start = lrev + 1
2606 start = lrev + 1
2608 if start < qbase:
2607 if start < qbase:
2609 # update the cache (excluding the patches) and save it
2608 # update the cache (excluding the patches) and save it
2610 self._updatebranchcache(partial, lrev + 1, qbase)
2609 self._updatebranchcache(partial, lrev + 1, qbase)
2611 self._writebranchcache(partial, cl.node(qbase - 1), qbase - 1)
2610 self._writebranchcache(partial, cl.node(qbase - 1), qbase - 1)
2612 start = qbase
2611 start = qbase
2613 # if start = qbase, the cache is as updated as it should be.
2612 # if start = qbase, the cache is as updated as it should be.
2614 # if start > qbase, the cache includes (part of) the patches.
2613 # if start > qbase, the cache includes (part of) the patches.
2615 # we might as well use it, but we won't save it.
2614 # we might as well use it, but we won't save it.
2616
2615
2617 # update the cache up to the tip
2616 # update the cache up to the tip
2618 self._updatebranchcache(partial, start, len(cl))
2617 self._updatebranchcache(partial, start, len(cl))
2619
2618
2620 return partial
2619 return partial
2621
2620
2622 if repo.local():
2621 if repo.local():
2623 repo.__class__ = mqrepo
2622 repo.__class__ = mqrepo
2624
2623
2625 def mqimport(orig, ui, repo, *args, **kwargs):
2624 def mqimport(orig, ui, repo, *args, **kwargs):
2626 if (hasattr(repo, 'abort_if_wdir_patched')
2625 if (hasattr(repo, 'abort_if_wdir_patched')
2627 and not kwargs.get('no_commit', False)):
2626 and not kwargs.get('no_commit', False)):
2628 repo.abort_if_wdir_patched(_('cannot import over an applied patch'),
2627 repo.abort_if_wdir_patched(_('cannot import over an applied patch'),
2629 kwargs.get('force'))
2628 kwargs.get('force'))
2630 return orig(ui, repo, *args, **kwargs)
2629 return orig(ui, repo, *args, **kwargs)
2631
2630
2632 def mqinit(orig, ui, *args, **kwargs):
2631 def mqinit(orig, ui, *args, **kwargs):
2633 mq = kwargs['mq']
2632 mq = kwargs['mq']
2634 del kwargs['mq']
2633 del kwargs['mq']
2635
2634
2636 if not mq:
2635 if not mq:
2637 return orig(ui, *args, **kwargs)
2636 return orig(ui, *args, **kwargs)
2638
2637
2639 repopath = cmdutil.findrepo(os.getcwd())
2638 repopath = cmdutil.findrepo(os.getcwd())
2640 repo = hg.repository(ui, repopath)
2639 repo = hg.repository(ui, repopath)
2641 return qinit(ui, repo, True)
2640 return qinit(ui, repo, True)
2642
2641
2643 def mqcommand(orig, ui, repo, *args, **kwargs):
2642 def mqcommand(orig, ui, repo, *args, **kwargs):
2644 """Add --mq option to operate on patch repository instead of main"""
2643 """Add --mq option to operate on patch repository instead of main"""
2645
2644
2646 # some commands do not like getting unknown options
2645 # some commands do not like getting unknown options
2647 mq = kwargs['mq']
2646 mq = kwargs['mq']
2648 del kwargs['mq']
2647 del kwargs['mq']
2649
2648
2650 if not mq:
2649 if not mq:
2651 return orig(ui, repo, *args, **kwargs)
2650 return orig(ui, repo, *args, **kwargs)
2652
2651
2653 q = repo.mq
2652 q = repo.mq
2654 r = q.qrepo()
2653 r = q.qrepo()
2655 if not r:
2654 if not r:
2656 raise util.Abort('no queue repository')
2655 raise util.Abort('no queue repository')
2657 return orig(r.ui, r, *args, **kwargs)
2656 return orig(r.ui, r, *args, **kwargs)
2658
2657
2659 def uisetup(ui):
2658 def uisetup(ui):
2660 mqopt = [('Q', 'mq', None, _("operate on patch repository"))]
2659 mqopt = [('Q', 'mq', None, _("operate on patch repository"))]
2661
2660
2662 extensions.wrapcommand(commands.table, 'import', mqimport)
2661 extensions.wrapcommand(commands.table, 'import', mqimport)
2663
2662
2664 entry = extensions.wrapcommand(commands.table, 'init', mqinit)
2663 entry = extensions.wrapcommand(commands.table, 'init', mqinit)
2665 entry[1].extend(mqopt)
2664 entry[1].extend(mqopt)
2666
2665
2667 for cmd in commands.table.keys():
2666 for cmd in commands.table.keys():
2668 cmd = cmdutil.parsealiases(cmd)[0]
2667 cmd = cmdutil.parsealiases(cmd)[0]
2669 if cmd in commands.norepo:
2668 if cmd in commands.norepo:
2670 continue
2669 continue
2671 entry = extensions.wrapcommand(commands.table, cmd, mqcommand)
2670 entry = extensions.wrapcommand(commands.table, cmd, mqcommand)
2672 entry[1].extend(mqopt)
2671 entry[1].extend(mqopt)
2673
2672
2674 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2673 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2675
2674
2676 cmdtable = {
2675 cmdtable = {
2677 "qapplied":
2676 "qapplied":
2678 (applied,
2677 (applied,
2679 [('1', 'last', None, _('show only the last patch'))] + seriesopts,
2678 [('1', 'last', None, _('show only the last patch'))] + seriesopts,
2680 _('hg qapplied [-1] [-s] [PATCH]')),
2679 _('hg qapplied [-1] [-s] [PATCH]')),
2681 "qclone":
2680 "qclone":
2682 (clone,
2681 (clone,
2683 [('', 'pull', None, _('use pull protocol to copy metadata')),
2682 [('', 'pull', None, _('use pull protocol to copy metadata')),
2684 ('U', 'noupdate', None, _('do not update the new working directories')),
2683 ('U', 'noupdate', None, _('do not update the new working directories')),
2685 ('', 'uncompressed', None,
2684 ('', 'uncompressed', None,
2686 _('use uncompressed transfer (fast over LAN)')),
2685 _('use uncompressed transfer (fast over LAN)')),
2687 ('p', 'patches', '', _('location of source patch repository')),
2686 ('p', 'patches', '', _('location of source patch repository')),
2688 ] + commands.remoteopts,
2687 ] + commands.remoteopts,
2689 _('hg qclone [OPTION]... SOURCE [DEST]')),
2688 _('hg qclone [OPTION]... SOURCE [DEST]')),
2690 "qcommit|qci":
2689 "qcommit|qci":
2691 (commit,
2690 (commit,
2692 commands.table["^commit|ci"][1],
2691 commands.table["^commit|ci"][1],
2693 _('hg qcommit [OPTION]... [FILE]...')),
2692 _('hg qcommit [OPTION]... [FILE]...')),
2694 "^qdiff":
2693 "^qdiff":
2695 (diff,
2694 (diff,
2696 commands.diffopts + commands.diffopts2 + commands.walkopts,
2695 commands.diffopts + commands.diffopts2 + commands.walkopts,
2697 _('hg qdiff [OPTION]... [FILE]...')),
2696 _('hg qdiff [OPTION]... [FILE]...')),
2698 "qdelete|qremove|qrm":
2697 "qdelete|qremove|qrm":
2699 (delete,
2698 (delete,
2700 [('k', 'keep', None, _('keep patch file')),
2699 [('k', 'keep', None, _('keep patch file')),
2701 ('r', 'rev', [], _('stop managing a revision (DEPRECATED)'))],
2700 ('r', 'rev', [], _('stop managing a revision (DEPRECATED)'))],
2702 _('hg qdelete [-k] [-r REV]... [PATCH]...')),
2701 _('hg qdelete [-k] [-r REV]... [PATCH]...')),
2703 'qfold':
2702 'qfold':
2704 (fold,
2703 (fold,
2705 [('e', 'edit', None, _('edit patch header')),
2704 [('e', 'edit', None, _('edit patch header')),
2706 ('k', 'keep', None, _('keep folded patch files')),
2705 ('k', 'keep', None, _('keep folded patch files')),
2707 ] + commands.commitopts,
2706 ] + commands.commitopts,
2708 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')),
2707 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')),
2709 'qgoto':
2708 'qgoto':
2710 (goto,
2709 (goto,
2711 [('f', 'force', None, _('overwrite any local changes'))],
2710 [('f', 'force', None, _('overwrite any local changes'))],
2712 _('hg qgoto [OPTION]... PATCH')),
2711 _('hg qgoto [OPTION]... PATCH')),
2713 'qguard':
2712 'qguard':
2714 (guard,
2713 (guard,
2715 [('l', 'list', None, _('list all patches and guards')),
2714 [('l', 'list', None, _('list all patches and guards')),
2716 ('n', 'none', None, _('drop all guards'))],
2715 ('n', 'none', None, _('drop all guards'))],
2717 _('hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]')),
2716 _('hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]')),
2718 'qheader': (header, [], _('hg qheader [PATCH]')),
2717 'qheader': (header, [], _('hg qheader [PATCH]')),
2719 "^qimport":
2718 "^qimport":
2720 (qimport,
2719 (qimport,
2721 [('e', 'existing', None, _('import file in patch directory')),
2720 [('e', 'existing', None, _('import file in patch directory')),
2722 ('n', 'name', '', _('name of patch file')),
2721 ('n', 'name', '', _('name of patch file')),
2723 ('f', 'force', None, _('overwrite existing files')),
2722 ('f', 'force', None, _('overwrite existing files')),
2724 ('r', 'rev', [], _('place existing revisions under mq control')),
2723 ('r', 'rev', [], _('place existing revisions under mq control')),
2725 ('g', 'git', None, _('use git extended diff format')),
2724 ('g', 'git', None, _('use git extended diff format')),
2726 ('P', 'push', None, _('qpush after importing'))],
2725 ('P', 'push', None, _('qpush after importing'))],
2727 _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... FILE...')),
2726 _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... FILE...')),
2728 "^qinit":
2727 "^qinit":
2729 (init,
2728 (init,
2730 [('c', 'create-repo', None, _('create queue repository'))],
2729 [('c', 'create-repo', None, _('create queue repository'))],
2731 _('hg qinit [-c]')),
2730 _('hg qinit [-c]')),
2732 "qnew":
2731 "qnew":
2733 (new,
2732 (new,
2734 [('e', 'edit', None, _('edit commit message')),
2733 [('e', 'edit', None, _('edit commit message')),
2735 ('f', 'force', None, _('import uncommitted changes (DEPRECATED)')),
2734 ('f', 'force', None, _('import uncommitted changes (DEPRECATED)')),
2736 ('g', 'git', None, _('use git extended diff format')),
2735 ('g', 'git', None, _('use git extended diff format')),
2737 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2736 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2738 ('u', 'user', '', _('add "From: <given user>" to patch')),
2737 ('u', 'user', '', _('add "From: <given user>" to patch')),
2739 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2738 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2740 ('d', 'date', '', _('add "Date: <given date>" to patch'))
2739 ('d', 'date', '', _('add "Date: <given date>" to patch'))
2741 ] + commands.walkopts + commands.commitopts,
2740 ] + commands.walkopts + commands.commitopts,
2742 _('hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH [FILE]...')),
2741 _('hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH [FILE]...')),
2743 "qnext": (next, [] + seriesopts, _('hg qnext [-s]')),
2742 "qnext": (next, [] + seriesopts, _('hg qnext [-s]')),
2744 "qprev": (prev, [] + seriesopts, _('hg qprev [-s]')),
2743 "qprev": (prev, [] + seriesopts, _('hg qprev [-s]')),
2745 "^qpop":
2744 "^qpop":
2746 (pop,
2745 (pop,
2747 [('a', 'all', None, _('pop all patches')),
2746 [('a', 'all', None, _('pop all patches')),
2748 ('n', 'name', '', _('queue name to pop (DEPRECATED)')),
2747 ('n', 'name', '', _('queue name to pop (DEPRECATED)')),
2749 ('f', 'force', None, _('forget any local changes to patched files'))],
2748 ('f', 'force', None, _('forget any local changes to patched files'))],
2750 _('hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]')),
2749 _('hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]')),
2751 "^qpush":
2750 "^qpush":
2752 (push,
2751 (push,
2753 [('f', 'force', None, _('apply if the patch has rejects')),
2752 [('f', 'force', None, _('apply if the patch has rejects')),
2754 ('l', 'list', None, _('list patch name in commit text')),
2753 ('l', 'list', None, _('list patch name in commit text')),
2755 ('a', 'all', None, _('apply all patches')),
2754 ('a', 'all', None, _('apply all patches')),
2756 ('m', 'merge', None, _('merge from another queue (DEPRECATED)')),
2755 ('m', 'merge', None, _('merge from another queue (DEPRECATED)')),
2757 ('n', 'name', '', _('merge queue name (DEPRECATED)'))],
2756 ('n', 'name', '', _('merge queue name (DEPRECATED)'))],
2758 _('hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]')),
2757 _('hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]')),
2759 "^qrefresh":
2758 "^qrefresh":
2760 (refresh,
2759 (refresh,
2761 [('e', 'edit', None, _('edit commit message')),
2760 [('e', 'edit', None, _('edit commit message')),
2762 ('g', 'git', None, _('use git extended diff format')),
2761 ('g', 'git', None, _('use git extended diff format')),
2763 ('s', 'short', None,
2762 ('s', 'short', None,
2764 _('refresh only files already in the patch and specified files')),
2763 _('refresh only files already in the patch and specified files')),
2765 ('U', 'currentuser', None,
2764 ('U', 'currentuser', None,
2766 _('add/update author field in patch with current user')),
2765 _('add/update author field in patch with current user')),
2767 ('u', 'user', '',
2766 ('u', 'user', '',
2768 _('add/update author field in patch with given user')),
2767 _('add/update author field in patch with given user')),
2769 ('D', 'currentdate', None,
2768 ('D', 'currentdate', None,
2770 _('add/update date field in patch with current date')),
2769 _('add/update date field in patch with current date')),
2771 ('d', 'date', '',
2770 ('d', 'date', '',
2772 _('add/update date field in patch with given date'))
2771 _('add/update date field in patch with given date'))
2773 ] + commands.walkopts + commands.commitopts,
2772 ] + commands.walkopts + commands.commitopts,
2774 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')),
2773 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')),
2775 'qrename|qmv':
2774 'qrename|qmv':
2776 (rename, [], _('hg qrename PATCH1 [PATCH2]')),
2775 (rename, [], _('hg qrename PATCH1 [PATCH2]')),
2777 "qrestore":
2776 "qrestore":
2778 (restore,
2777 (restore,
2779 [('d', 'delete', None, _('delete save entry')),
2778 [('d', 'delete', None, _('delete save entry')),
2780 ('u', 'update', None, _('update queue working directory'))],
2779 ('u', 'update', None, _('update queue working directory'))],
2781 _('hg qrestore [-d] [-u] REV')),
2780 _('hg qrestore [-d] [-u] REV')),
2782 "qsave":
2781 "qsave":
2783 (save,
2782 (save,
2784 [('c', 'copy', None, _('copy patch directory')),
2783 [('c', 'copy', None, _('copy patch directory')),
2785 ('n', 'name', '', _('copy directory name')),
2784 ('n', 'name', '', _('copy directory name')),
2786 ('e', 'empty', None, _('clear queue status file')),
2785 ('e', 'empty', None, _('clear queue status file')),
2787 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2786 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2788 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')),
2787 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')),
2789 "qselect":
2788 "qselect":
2790 (select,
2789 (select,
2791 [('n', 'none', None, _('disable all guards')),
2790 [('n', 'none', None, _('disable all guards')),
2792 ('s', 'series', None, _('list all guards in series file')),
2791 ('s', 'series', None, _('list all guards in series file')),
2793 ('', 'pop', None, _('pop to before first guarded applied patch')),
2792 ('', 'pop', None, _('pop to before first guarded applied patch')),
2794 ('', 'reapply', None, _('pop, then reapply patches'))],
2793 ('', 'reapply', None, _('pop, then reapply patches'))],
2795 _('hg qselect [OPTION]... [GUARD]...')),
2794 _('hg qselect [OPTION]... [GUARD]...')),
2796 "qseries":
2795 "qseries":
2797 (series,
2796 (series,
2798 [('m', 'missing', None, _('print patches not in series')),
2797 [('m', 'missing', None, _('print patches not in series')),
2799 ] + seriesopts,
2798 ] + seriesopts,
2800 _('hg qseries [-ms]')),
2799 _('hg qseries [-ms]')),
2801 "^strip":
2800 "^strip":
2802 (strip,
2801 (strip,
2803 [('f', 'force', None, _('force removal with local changes')),
2802 [('f', 'force', None, _('force removal with local changes')),
2804 ('b', 'backup', None, _('bundle unrelated changesets')),
2803 ('b', 'backup', None, _('bundle unrelated changesets')),
2805 ('n', 'nobackup', None, _('no backups'))],
2804 ('n', 'nobackup', None, _('no backups'))],
2806 _('hg strip [-f] [-b] [-n] REV')),
2805 _('hg strip [-f] [-b] [-n] REV')),
2807 "qtop": (top, [] + seriesopts, _('hg qtop [-s]')),
2806 "qtop": (top, [] + seriesopts, _('hg qtop [-s]')),
2808 "qunapplied":
2807 "qunapplied":
2809 (unapplied,
2808 (unapplied,
2810 [('1', 'first', None, _('show only the first patch'))] + seriesopts,
2809 [('1', 'first', None, _('show only the first patch'))] + seriesopts,
2811 _('hg qunapplied [-1] [-s] [PATCH]')),
2810 _('hg qunapplied [-1] [-s] [PATCH]')),
2812 "qfinish":
2811 "qfinish":
2813 (finish,
2812 (finish,
2814 [('a', 'applied', None, _('finish all applied changesets'))],
2813 [('a', 'applied', None, _('finish all applied changesets'))],
2815 _('hg qfinish [-a] [REV]...')),
2814 _('hg qfinish [-a] [REV]...')),
2816 }
2815 }
@@ -1,605 +1,605 b''
1 # Patch transplanting extension for Mercurial
1 # Patch transplanting extension for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006, 2007 Brendan Cully <brendan@kublai.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''command to transplant changesets from another branch
8 '''command to transplant changesets from another branch
9
9
10 This extension allows you to transplant patches from another branch.
10 This extension allows you to transplant patches from another branch.
11
11
12 Transplanted patches are recorded in .hg/transplant/transplants, as a
12 Transplanted patches are recorded in .hg/transplant/transplants, as a
13 map from a changeset hash to its hash in the source repository.
13 map from a changeset hash to its hash in the source repository.
14 '''
14 '''
15
15
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 import os, tempfile
17 import os, tempfile
18 from mercurial import bundlerepo, changegroup, cmdutil, hg, merge, match
18 from mercurial import bundlerepo, changegroup, cmdutil, hg, merge, match
19 from mercurial import patch, revlog, util, error
19 from mercurial import patch, revlog, util, error
20
20
21 class transplantentry(object):
21 class transplantentry(object):
22 def __init__(self, lnode, rnode):
22 def __init__(self, lnode, rnode):
23 self.lnode = lnode
23 self.lnode = lnode
24 self.rnode = rnode
24 self.rnode = rnode
25
25
26 class transplants(object):
26 class transplants(object):
27 def __init__(self, path=None, transplantfile=None, opener=None):
27 def __init__(self, path=None, transplantfile=None, opener=None):
28 self.path = path
28 self.path = path
29 self.transplantfile = transplantfile
29 self.transplantfile = transplantfile
30 self.opener = opener
30 self.opener = opener
31
31
32 if not opener:
32 if not opener:
33 self.opener = util.opener(self.path)
33 self.opener = util.opener(self.path)
34 self.transplants = []
34 self.transplants = []
35 self.dirty = False
35 self.dirty = False
36 self.read()
36 self.read()
37
37
38 def read(self):
38 def read(self):
39 abspath = os.path.join(self.path, self.transplantfile)
39 abspath = os.path.join(self.path, self.transplantfile)
40 if self.transplantfile and os.path.exists(abspath):
40 if self.transplantfile and os.path.exists(abspath):
41 for line in self.opener(self.transplantfile).read().splitlines():
41 for line in self.opener(self.transplantfile).read().splitlines():
42 lnode, rnode = map(revlog.bin, line.split(':'))
42 lnode, rnode = map(revlog.bin, line.split(':'))
43 self.transplants.append(transplantentry(lnode, rnode))
43 self.transplants.append(transplantentry(lnode, rnode))
44
44
45 def write(self):
45 def write(self):
46 if self.dirty and self.transplantfile:
46 if self.dirty and self.transplantfile:
47 if not os.path.isdir(self.path):
47 if not os.path.isdir(self.path):
48 os.mkdir(self.path)
48 os.mkdir(self.path)
49 fp = self.opener(self.transplantfile, 'w')
49 fp = self.opener(self.transplantfile, 'w')
50 for c in self.transplants:
50 for c in self.transplants:
51 l, r = map(revlog.hex, (c.lnode, c.rnode))
51 l, r = map(revlog.hex, (c.lnode, c.rnode))
52 fp.write(l + ':' + r + '\n')
52 fp.write(l + ':' + r + '\n')
53 fp.close()
53 fp.close()
54 self.dirty = False
54 self.dirty = False
55
55
56 def get(self, rnode):
56 def get(self, rnode):
57 return [t for t in self.transplants if t.rnode == rnode]
57 return [t for t in self.transplants if t.rnode == rnode]
58
58
59 def set(self, lnode, rnode):
59 def set(self, lnode, rnode):
60 self.transplants.append(transplantentry(lnode, rnode))
60 self.transplants.append(transplantentry(lnode, rnode))
61 self.dirty = True
61 self.dirty = True
62
62
63 def remove(self, transplant):
63 def remove(self, transplant):
64 del self.transplants[self.transplants.index(transplant)]
64 del self.transplants[self.transplants.index(transplant)]
65 self.dirty = True
65 self.dirty = True
66
66
67 class transplanter(object):
67 class transplanter(object):
68 def __init__(self, ui, repo):
68 def __init__(self, ui, repo):
69 self.ui = ui
69 self.ui = ui
70 self.path = repo.join('transplant')
70 self.path = repo.join('transplant')
71 self.opener = util.opener(self.path)
71 self.opener = util.opener(self.path)
72 self.transplants = transplants(self.path, 'transplants',
72 self.transplants = transplants(self.path, 'transplants',
73 opener=self.opener)
73 opener=self.opener)
74
74
75 def applied(self, repo, node, parent):
75 def applied(self, repo, node, parent):
76 '''returns True if a node is already an ancestor of parent
76 '''returns True if a node is already an ancestor of parent
77 or has already been transplanted'''
77 or has already been transplanted'''
78 if hasnode(repo, node):
78 if hasnode(repo, node):
79 if node in repo.changelog.reachable(parent, stop=node):
79 if node in repo.changelog.reachable(parent, stop=node):
80 return True
80 return True
81 for t in self.transplants.get(node):
81 for t in self.transplants.get(node):
82 # it might have been stripped
82 # it might have been stripped
83 if not hasnode(repo, t.lnode):
83 if not hasnode(repo, t.lnode):
84 self.transplants.remove(t)
84 self.transplants.remove(t)
85 return False
85 return False
86 if t.lnode in repo.changelog.reachable(parent, stop=t.lnode):
86 if t.lnode in repo.changelog.reachable(parent, stop=t.lnode):
87 return True
87 return True
88 return False
88 return False
89
89
90 def apply(self, repo, source, revmap, merges, opts={}):
90 def apply(self, repo, source, revmap, merges, opts={}):
91 '''apply the revisions in revmap one by one in revision order'''
91 '''apply the revisions in revmap one by one in revision order'''
92 revs = sorted(revmap)
92 revs = sorted(revmap)
93 p1, p2 = repo.dirstate.parents()
93 p1, p2 = repo.dirstate.parents()
94 pulls = []
94 pulls = []
95 diffopts = patch.diffopts(self.ui, opts)
95 diffopts = patch.diffopts(self.ui, opts)
96 diffopts.git = True
96 diffopts.git = True
97
97
98 lock = wlock = None
98 lock = wlock = None
99 try:
99 try:
100 wlock = repo.wlock()
100 wlock = repo.wlock()
101 lock = repo.lock()
101 lock = repo.lock()
102 for rev in revs:
102 for rev in revs:
103 node = revmap[rev]
103 node = revmap[rev]
104 revstr = '%s:%s' % (rev, revlog.short(node))
104 revstr = '%s:%s' % (rev, revlog.short(node))
105
105
106 if self.applied(repo, node, p1):
106 if self.applied(repo, node, p1):
107 self.ui.warn(_('skipping already applied revision %s\n') %
107 self.ui.warn(_('skipping already applied revision %s\n') %
108 revstr)
108 revstr)
109 continue
109 continue
110
110
111 parents = source.changelog.parents(node)
111 parents = source.changelog.parents(node)
112 if not opts.get('filter'):
112 if not opts.get('filter'):
113 # If the changeset parent is the same as the
113 # If the changeset parent is the same as the
114 # wdir's parent, just pull it.
114 # wdir's parent, just pull it.
115 if parents[0] == p1:
115 if parents[0] == p1:
116 pulls.append(node)
116 pulls.append(node)
117 p1 = node
117 p1 = node
118 continue
118 continue
119 if pulls:
119 if pulls:
120 if source != repo:
120 if source != repo:
121 repo.pull(source, heads=pulls)
121 repo.pull(source, heads=pulls)
122 merge.update(repo, pulls[-1], False, False, None)
122 merge.update(repo, pulls[-1], False, False, None)
123 p1, p2 = repo.dirstate.parents()
123 p1, p2 = repo.dirstate.parents()
124 pulls = []
124 pulls = []
125
125
126 domerge = False
126 domerge = False
127 if node in merges:
127 if node in merges:
128 # pulling all the merge revs at once would mean we
128 # pulling all the merge revs at once would mean we
129 # couldn't transplant after the latest even if
129 # couldn't transplant after the latest even if
130 # transplants before them fail.
130 # transplants before them fail.
131 domerge = True
131 domerge = True
132 if not hasnode(repo, node):
132 if not hasnode(repo, node):
133 repo.pull(source, heads=[node])
133 repo.pull(source, heads=[node])
134
134
135 if parents[1] != revlog.nullid:
135 if parents[1] != revlog.nullid:
136 self.ui.note(_('skipping merge changeset %s:%s\n')
136 self.ui.note(_('skipping merge changeset %s:%s\n')
137 % (rev, revlog.short(node)))
137 % (rev, revlog.short(node)))
138 patchfile = None
138 patchfile = None
139 else:
139 else:
140 fd, patchfile = tempfile.mkstemp(prefix='hg-transplant-')
140 fd, patchfile = tempfile.mkstemp(prefix='hg-transplant-')
141 fp = os.fdopen(fd, 'w')
141 fp = os.fdopen(fd, 'w')
142 gen = patch.diff(source, parents[0], node, opts=diffopts)
142 gen = patch.diff(source, parents[0], node, opts=diffopts)
143 for chunk in gen:
143 for chunk in gen:
144 fp.write(chunk)
144 fp.write(chunk)
145 fp.close()
145 fp.close()
146
146
147 del revmap[rev]
147 del revmap[rev]
148 if patchfile or domerge:
148 if patchfile or domerge:
149 try:
149 try:
150 n = self.applyone(repo, node,
150 n = self.applyone(repo, node,
151 source.changelog.read(node),
151 source.changelog.read(node),
152 patchfile, merge=domerge,
152 patchfile, merge=domerge,
153 log=opts.get('log'),
153 log=opts.get('log'),
154 filter=opts.get('filter'))
154 filter=opts.get('filter'))
155 if n and domerge:
155 if n and domerge:
156 self.ui.status(_('%s merged at %s\n') % (revstr,
156 self.ui.status(_('%s merged at %s\n') % (revstr,
157 revlog.short(n)))
157 revlog.short(n)))
158 elif n:
158 elif n:
159 self.ui.status(_('%s transplanted to %s\n')
159 self.ui.status(_('%s transplanted to %s\n')
160 % (revlog.short(node),
160 % (revlog.short(node),
161 revlog.short(n)))
161 revlog.short(n)))
162 finally:
162 finally:
163 if patchfile:
163 if patchfile:
164 os.unlink(patchfile)
164 os.unlink(patchfile)
165 if pulls:
165 if pulls:
166 repo.pull(source, heads=pulls)
166 repo.pull(source, heads=pulls)
167 merge.update(repo, pulls[-1], False, False, None)
167 merge.update(repo, pulls[-1], False, False, None)
168 finally:
168 finally:
169 self.saveseries(revmap, merges)
169 self.saveseries(revmap, merges)
170 self.transplants.write()
170 self.transplants.write()
171 lock.release()
171 lock.release()
172 wlock.release()
172 wlock.release()
173
173
174 def filter(self, filter, changelog, patchfile):
174 def filter(self, filter, changelog, patchfile):
175 '''arbitrarily rewrite changeset before applying it'''
175 '''arbitrarily rewrite changeset before applying it'''
176
176
177 self.ui.status(_('filtering %s\n') % patchfile)
177 self.ui.status(_('filtering %s\n') % patchfile)
178 user, date, msg = (changelog[1], changelog[2], changelog[4])
178 user, date, msg = (changelog[1], changelog[2], changelog[4])
179
179
180 fd, headerfile = tempfile.mkstemp(prefix='hg-transplant-')
180 fd, headerfile = tempfile.mkstemp(prefix='hg-transplant-')
181 fp = os.fdopen(fd, 'w')
181 fp = os.fdopen(fd, 'w')
182 fp.write("# HG changeset patch\n")
182 fp.write("# HG changeset patch\n")
183 fp.write("# User %s\n" % user)
183 fp.write("# User %s\n" % user)
184 fp.write("# Date %d %d\n" % date)
184 fp.write("# Date %d %d\n" % date)
185 fp.write(msg + '\n')
185 fp.write(msg + '\n')
186 fp.close()
186 fp.close()
187
187
188 try:
188 try:
189 util.system('%s %s %s' % (filter, util.shellquote(headerfile),
189 util.system('%s %s %s' % (filter, util.shellquote(headerfile),
190 util.shellquote(patchfile)),
190 util.shellquote(patchfile)),
191 environ={'HGUSER': changelog[1]},
191 environ={'HGUSER': changelog[1]},
192 onerr=util.Abort, errprefix=_('filter failed'))
192 onerr=util.Abort, errprefix=_('filter failed'))
193 user, date, msg = self.parselog(file(headerfile))[1:4]
193 user, date, msg = self.parselog(file(headerfile))[1:4]
194 finally:
194 finally:
195 os.unlink(headerfile)
195 os.unlink(headerfile)
196
196
197 return (user, date, msg)
197 return (user, date, msg)
198
198
199 def applyone(self, repo, node, cl, patchfile, merge=False, log=False,
199 def applyone(self, repo, node, cl, patchfile, merge=False, log=False,
200 filter=None):
200 filter=None):
201 '''apply the patch in patchfile to the repository as a transplant'''
201 '''apply the patch in patchfile to the repository as a transplant'''
202 (manifest, user, (time, timezone), files, message) = cl[:5]
202 (manifest, user, (time, timezone), files, message) = cl[:5]
203 date = "%d %d" % (time, timezone)
203 date = "%d %d" % (time, timezone)
204 extra = {'transplant_source': node}
204 extra = {'transplant_source': node}
205 if filter:
205 if filter:
206 (user, date, message) = self.filter(filter, cl, patchfile)
206 (user, date, message) = self.filter(filter, cl, patchfile)
207
207
208 if log:
208 if log:
209 # we don't translate messages inserted into commits
209 # we don't translate messages inserted into commits
210 message += '\n(transplanted from %s)' % revlog.hex(node)
210 message += '\n(transplanted from %s)' % revlog.hex(node)
211
211
212 self.ui.status(_('applying %s\n') % revlog.short(node))
212 self.ui.status(_('applying %s\n') % revlog.short(node))
213 self.ui.note('%s %s\n%s\n' % (user, date, message))
213 self.ui.note('%s %s\n%s\n' % (user, date, message))
214
214
215 if not patchfile and not merge:
215 if not patchfile and not merge:
216 raise util.Abort(_('can only omit patchfile if merging'))
216 raise util.Abort(_('can only omit patchfile if merging'))
217 if patchfile:
217 if patchfile:
218 try:
218 try:
219 files = {}
219 files = {}
220 try:
220 try:
221 patch.patch(patchfile, self.ui, cwd=repo.root,
221 patch.patch(patchfile, self.ui, cwd=repo.root,
222 files=files, eolmode=None)
222 files=files, eolmode=None)
223 if not files:
223 if not files:
224 self.ui.warn(_('%s: empty changeset')
224 self.ui.warn(_('%s: empty changeset')
225 % revlog.hex(node))
225 % revlog.hex(node))
226 return None
226 return None
227 finally:
227 finally:
228 files = patch.updatedir(self.ui, repo, files)
228 files = patch.updatedir(self.ui, repo, files)
229 except Exception, inst:
229 except Exception, inst:
230 seriespath = os.path.join(self.path, 'series')
230 seriespath = os.path.join(self.path, 'series')
231 if os.path.exists(seriespath):
231 if os.path.exists(seriespath):
232 os.unlink(seriespath)
232 os.unlink(seriespath)
233 p1 = repo.dirstate.parents()[0]
233 p1 = repo.dirstate.parents()[0]
234 p2 = node
234 p2 = node
235 self.log(user, date, message, p1, p2, merge=merge)
235 self.log(user, date, message, p1, p2, merge=merge)
236 self.ui.write(str(inst) + '\n')
236 self.ui.write(str(inst) + '\n')
237 raise util.Abort(_('Fix up the merge and run '
237 raise util.Abort(_('Fix up the merge and run '
238 'hg transplant --continue'))
238 'hg transplant --continue'))
239 else:
239 else:
240 files = None
240 files = None
241 if merge:
241 if merge:
242 p1, p2 = repo.dirstate.parents()
242 p1, p2 = repo.dirstate.parents()
243 repo.dirstate.setparents(p1, node)
243 repo.dirstate.setparents(p1, node)
244 m = match.always(repo.root, '')
244 m = match.always(repo.root, '')
245 else:
245 else:
246 m = match.exact(repo.root, '', files)
246 m = match.exact(repo.root, '', files)
247
247
248 n = repo.commit(message, user, date, extra=extra, match=m)
248 n = repo.commit(message, user, date, extra=extra, match=m)
249 if not merge:
249 if not merge:
250 self.transplants.set(n, node)
250 self.transplants.set(n, node)
251
251
252 return n
252 return n
253
253
254 def resume(self, repo, source, opts=None):
254 def resume(self, repo, source, opts=None):
255 '''recover last transaction and apply remaining changesets'''
255 '''recover last transaction and apply remaining changesets'''
256 if os.path.exists(os.path.join(self.path, 'journal')):
256 if os.path.exists(os.path.join(self.path, 'journal')):
257 n, node = self.recover(repo)
257 n, node = self.recover(repo)
258 self.ui.status(_('%s transplanted as %s\n') % (revlog.short(node),
258 self.ui.status(_('%s transplanted as %s\n') % (revlog.short(node),
259 revlog.short(n)))
259 revlog.short(n)))
260 seriespath = os.path.join(self.path, 'series')
260 seriespath = os.path.join(self.path, 'series')
261 if not os.path.exists(seriespath):
261 if not os.path.exists(seriespath):
262 self.transplants.write()
262 self.transplants.write()
263 return
263 return
264 nodes, merges = self.readseries()
264 nodes, merges = self.readseries()
265 revmap = {}
265 revmap = {}
266 for n in nodes:
266 for n in nodes:
267 revmap[source.changelog.rev(n)] = n
267 revmap[source.changelog.rev(n)] = n
268 os.unlink(seriespath)
268 os.unlink(seriespath)
269
269
270 self.apply(repo, source, revmap, merges, opts)
270 self.apply(repo, source, revmap, merges, opts)
271
271
272 def recover(self, repo):
272 def recover(self, repo):
273 '''commit working directory using journal metadata'''
273 '''commit working directory using journal metadata'''
274 node, user, date, message, parents = self.readlog()
274 node, user, date, message, parents = self.readlog()
275 merge = len(parents) == 2
275 merge = len(parents) == 2
276
276
277 if not user or not date or not message or not parents[0]:
277 if not user or not date or not message or not parents[0]:
278 raise util.Abort(_('transplant log file is corrupt'))
278 raise util.Abort(_('transplant log file is corrupt'))
279
279
280 extra = {'transplant_source': node}
280 extra = {'transplant_source': node}
281 wlock = repo.wlock()
281 wlock = repo.wlock()
282 try:
282 try:
283 p1, p2 = repo.dirstate.parents()
283 p1, p2 = repo.dirstate.parents()
284 if p1 != parents[0]:
284 if p1 != parents[0]:
285 raise util.Abort(
285 raise util.Abort(
286 _('working dir not at transplant parent %s') %
286 _('working dir not at transplant parent %s') %
287 revlog.hex(parents[0]))
287 revlog.hex(parents[0]))
288 if merge:
288 if merge:
289 repo.dirstate.setparents(p1, parents[1])
289 repo.dirstate.setparents(p1, parents[1])
290 n = repo.commit(message, user, date, extra=extra)
290 n = repo.commit(message, user, date, extra=extra)
291 if not n:
291 if not n:
292 raise util.Abort(_('commit failed'))
292 raise util.Abort(_('commit failed'))
293 if not merge:
293 if not merge:
294 self.transplants.set(n, node)
294 self.transplants.set(n, node)
295 self.unlog()
295 self.unlog()
296
296
297 return n, node
297 return n, node
298 finally:
298 finally:
299 wlock.release()
299 wlock.release()
300
300
301 def readseries(self):
301 def readseries(self):
302 nodes = []
302 nodes = []
303 merges = []
303 merges = []
304 cur = nodes
304 cur = nodes
305 for line in self.opener('series').read().splitlines():
305 for line in self.opener('series').read().splitlines():
306 if line.startswith('# Merges'):
306 if line.startswith('# Merges'):
307 cur = merges
307 cur = merges
308 continue
308 continue
309 cur.append(revlog.bin(line))
309 cur.append(revlog.bin(line))
310
310
311 return (nodes, merges)
311 return (nodes, merges)
312
312
313 def saveseries(self, revmap, merges):
313 def saveseries(self, revmap, merges):
314 if not revmap:
314 if not revmap:
315 return
315 return
316
316
317 if not os.path.isdir(self.path):
317 if not os.path.isdir(self.path):
318 os.mkdir(self.path)
318 os.mkdir(self.path)
319 series = self.opener('series', 'w')
319 series = self.opener('series', 'w')
320 for rev in sorted(revmap):
320 for rev in sorted(revmap):
321 series.write(revlog.hex(revmap[rev]) + '\n')
321 series.write(revlog.hex(revmap[rev]) + '\n')
322 if merges:
322 if merges:
323 series.write('# Merges\n')
323 series.write('# Merges\n')
324 for m in merges:
324 for m in merges:
325 series.write(revlog.hex(m) + '\n')
325 series.write(revlog.hex(m) + '\n')
326 series.close()
326 series.close()
327
327
328 def parselog(self, fp):
328 def parselog(self, fp):
329 parents = []
329 parents = []
330 message = []
330 message = []
331 node = revlog.nullid
331 node = revlog.nullid
332 inmsg = False
332 inmsg = False
333 for line in fp.read().splitlines():
333 for line in fp.read().splitlines():
334 if inmsg:
334 if inmsg:
335 message.append(line)
335 message.append(line)
336 elif line.startswith('# User '):
336 elif line.startswith('# User '):
337 user = line[7:]
337 user = line[7:]
338 elif line.startswith('# Date '):
338 elif line.startswith('# Date '):
339 date = line[7:]
339 date = line[7:]
340 elif line.startswith('# Node ID '):
340 elif line.startswith('# Node ID '):
341 node = revlog.bin(line[10:])
341 node = revlog.bin(line[10:])
342 elif line.startswith('# Parent '):
342 elif line.startswith('# Parent '):
343 parents.append(revlog.bin(line[9:]))
343 parents.append(revlog.bin(line[9:]))
344 elif not line.startswith('#'):
344 elif not line.startswith('#'):
345 inmsg = True
345 inmsg = True
346 message.append(line)
346 message.append(line)
347 return (node, user, date, '\n'.join(message), parents)
347 return (node, user, date, '\n'.join(message), parents)
348
348
349 def log(self, user, date, message, p1, p2, merge=False):
349 def log(self, user, date, message, p1, p2, merge=False):
350 '''journal changelog metadata for later recover'''
350 '''journal changelog metadata for later recover'''
351
351
352 if not os.path.isdir(self.path):
352 if not os.path.isdir(self.path):
353 os.mkdir(self.path)
353 os.mkdir(self.path)
354 fp = self.opener('journal', 'w')
354 fp = self.opener('journal', 'w')
355 fp.write('# User %s\n' % user)
355 fp.write('# User %s\n' % user)
356 fp.write('# Date %s\n' % date)
356 fp.write('# Date %s\n' % date)
357 fp.write('# Node ID %s\n' % revlog.hex(p2))
357 fp.write('# Node ID %s\n' % revlog.hex(p2))
358 fp.write('# Parent ' + revlog.hex(p1) + '\n')
358 fp.write('# Parent ' + revlog.hex(p1) + '\n')
359 if merge:
359 if merge:
360 fp.write('# Parent ' + revlog.hex(p2) + '\n')
360 fp.write('# Parent ' + revlog.hex(p2) + '\n')
361 fp.write(message.rstrip() + '\n')
361 fp.write(message.rstrip() + '\n')
362 fp.close()
362 fp.close()
363
363
364 def readlog(self):
364 def readlog(self):
365 return self.parselog(self.opener('journal'))
365 return self.parselog(self.opener('journal'))
366
366
367 def unlog(self):
367 def unlog(self):
368 '''remove changelog journal'''
368 '''remove changelog journal'''
369 absdst = os.path.join(self.path, 'journal')
369 absdst = os.path.join(self.path, 'journal')
370 if os.path.exists(absdst):
370 if os.path.exists(absdst):
371 os.unlink(absdst)
371 os.unlink(absdst)
372
372
373 def transplantfilter(self, repo, source, root):
373 def transplantfilter(self, repo, source, root):
374 def matchfn(node):
374 def matchfn(node):
375 if self.applied(repo, node, root):
375 if self.applied(repo, node, root):
376 return False
376 return False
377 if source.changelog.parents(node)[1] != revlog.nullid:
377 if source.changelog.parents(node)[1] != revlog.nullid:
378 return False
378 return False
379 extra = source.changelog.read(node)[5]
379 extra = source.changelog.read(node)[5]
380 cnode = extra.get('transplant_source')
380 cnode = extra.get('transplant_source')
381 if cnode and self.applied(repo, cnode, root):
381 if cnode and self.applied(repo, cnode, root):
382 return False
382 return False
383 return True
383 return True
384
384
385 return matchfn
385 return matchfn
386
386
387 def hasnode(repo, node):
387 def hasnode(repo, node):
388 try:
388 try:
389 return repo.changelog.rev(node) != None
389 return repo.changelog.rev(node) != None
390 except error.RevlogError:
390 except error.RevlogError:
391 return False
391 return False
392
392
393 def browserevs(ui, repo, nodes, opts):
393 def browserevs(ui, repo, nodes, opts):
394 '''interactively transplant changesets'''
394 '''interactively transplant changesets'''
395 def browsehelp(ui):
395 def browsehelp(ui):
396 ui.write('y: transplant this changeset\n'
396 ui.write(_('y: transplant this changeset\n'
397 'n: skip this changeset\n'
397 'n: skip this changeset\n'
398 'm: merge at this changeset\n'
398 'm: merge at this changeset\n'
399 'p: show patch\n'
399 'p: show patch\n'
400 'c: commit selected changesets\n'
400 'c: commit selected changesets\n'
401 'q: cancel transplant\n'
401 'q: cancel transplant\n'
402 '?: show this help\n')
402 '?: show this help\n'))
403
403
404 displayer = cmdutil.show_changeset(ui, repo, opts)
404 displayer = cmdutil.show_changeset(ui, repo, opts)
405 transplants = []
405 transplants = []
406 merges = []
406 merges = []
407 for node in nodes:
407 for node in nodes:
408 displayer.show(repo[node])
408 displayer.show(repo[node])
409 action = None
409 action = None
410 while not action:
410 while not action:
411 action = ui.prompt(_('apply changeset? [ynmpcq?]:'))
411 action = ui.prompt(_('apply changeset? [ynmpcq?]:'))
412 if action == '?':
412 if action == '?':
413 browsehelp(ui)
413 browsehelp(ui)
414 action = None
414 action = None
415 elif action == 'p':
415 elif action == 'p':
416 parent = repo.changelog.parents(node)[0]
416 parent = repo.changelog.parents(node)[0]
417 for chunk in patch.diff(repo, parent, node):
417 for chunk in patch.diff(repo, parent, node):
418 ui.write(chunk)
418 ui.write(chunk)
419 action = None
419 action = None
420 elif action not in ('y', 'n', 'm', 'c', 'q'):
420 elif action not in ('y', 'n', 'm', 'c', 'q'):
421 ui.write('no such option\n')
421 ui.write(_('no such option\n'))
422 action = None
422 action = None
423 if action == 'y':
423 if action == 'y':
424 transplants.append(node)
424 transplants.append(node)
425 elif action == 'm':
425 elif action == 'm':
426 merges.append(node)
426 merges.append(node)
427 elif action == 'c':
427 elif action == 'c':
428 break
428 break
429 elif action == 'q':
429 elif action == 'q':
430 transplants = ()
430 transplants = ()
431 merges = ()
431 merges = ()
432 break
432 break
433 displayer.close()
433 displayer.close()
434 return (transplants, merges)
434 return (transplants, merges)
435
435
436 def transplant(ui, repo, *revs, **opts):
436 def transplant(ui, repo, *revs, **opts):
437 '''transplant changesets from another branch
437 '''transplant changesets from another branch
438
438
439 Selected changesets will be applied on top of the current working
439 Selected changesets will be applied on top of the current working
440 directory with the log of the original changeset. If --log is
440 directory with the log of the original changeset. If --log is
441 specified, log messages will have a comment appended of the form::
441 specified, log messages will have a comment appended of the form::
442
442
443 (transplanted from CHANGESETHASH)
443 (transplanted from CHANGESETHASH)
444
444
445 You can rewrite the changelog message with the --filter option.
445 You can rewrite the changelog message with the --filter option.
446 Its argument will be invoked with the current changelog message as
446 Its argument will be invoked with the current changelog message as
447 $1 and the patch as $2.
447 $1 and the patch as $2.
448
448
449 If --source/-s is specified, selects changesets from the named
449 If --source/-s is specified, selects changesets from the named
450 repository. If --branch/-b is specified, selects changesets from
450 repository. If --branch/-b is specified, selects changesets from
451 the branch holding the named revision, up to that revision. If
451 the branch holding the named revision, up to that revision. If
452 --all/-a is specified, all changesets on the branch will be
452 --all/-a is specified, all changesets on the branch will be
453 transplanted, otherwise you will be prompted to select the
453 transplanted, otherwise you will be prompted to select the
454 changesets you want.
454 changesets you want.
455
455
456 hg transplant --branch REVISION --all will rebase the selected
456 hg transplant --branch REVISION --all will rebase the selected
457 branch (up to the named revision) onto your current working
457 branch (up to the named revision) onto your current working
458 directory.
458 directory.
459
459
460 You can optionally mark selected transplanted changesets as merge
460 You can optionally mark selected transplanted changesets as merge
461 changesets. You will not be prompted to transplant any ancestors
461 changesets. You will not be prompted to transplant any ancestors
462 of a merged transplant, and you can merge descendants of them
462 of a merged transplant, and you can merge descendants of them
463 normally instead of transplanting them.
463 normally instead of transplanting them.
464
464
465 If no merges or revisions are provided, hg transplant will start
465 If no merges or revisions are provided, hg transplant will start
466 an interactive changeset browser.
466 an interactive changeset browser.
467
467
468 If a changeset application fails, you can fix the merge by hand
468 If a changeset application fails, you can fix the merge by hand
469 and then resume where you left off by calling hg transplant
469 and then resume where you left off by calling hg transplant
470 --continue/-c.
470 --continue/-c.
471 '''
471 '''
472 def getremotechanges(repo, url):
472 def getremotechanges(repo, url):
473 sourcerepo = ui.expandpath(url)
473 sourcerepo = ui.expandpath(url)
474 source = hg.repository(ui, sourcerepo)
474 source = hg.repository(ui, sourcerepo)
475 common, incoming, rheads = repo.findcommonincoming(source, force=True)
475 common, incoming, rheads = repo.findcommonincoming(source, force=True)
476 if not incoming:
476 if not incoming:
477 return (source, None, None)
477 return (source, None, None)
478
478
479 bundle = None
479 bundle = None
480 if not source.local():
480 if not source.local():
481 if source.capable('changegroupsubset'):
481 if source.capable('changegroupsubset'):
482 cg = source.changegroupsubset(incoming, rheads, 'incoming')
482 cg = source.changegroupsubset(incoming, rheads, 'incoming')
483 else:
483 else:
484 cg = source.changegroup(incoming, 'incoming')
484 cg = source.changegroup(incoming, 'incoming')
485 bundle = changegroup.writebundle(cg, None, 'HG10UN')
485 bundle = changegroup.writebundle(cg, None, 'HG10UN')
486 source = bundlerepo.bundlerepository(ui, repo.root, bundle)
486 source = bundlerepo.bundlerepository(ui, repo.root, bundle)
487
487
488 return (source, incoming, bundle)
488 return (source, incoming, bundle)
489
489
490 def incwalk(repo, incoming, branches, match=util.always):
490 def incwalk(repo, incoming, branches, match=util.always):
491 if not branches:
491 if not branches:
492 branches = None
492 branches = None
493 for node in repo.changelog.nodesbetween(incoming, branches)[0]:
493 for node in repo.changelog.nodesbetween(incoming, branches)[0]:
494 if match(node):
494 if match(node):
495 yield node
495 yield node
496
496
497 def transplantwalk(repo, root, branches, match=util.always):
497 def transplantwalk(repo, root, branches, match=util.always):
498 if not branches:
498 if not branches:
499 branches = repo.heads()
499 branches = repo.heads()
500 ancestors = []
500 ancestors = []
501 for branch in branches:
501 for branch in branches:
502 ancestors.append(repo.changelog.ancestor(root, branch))
502 ancestors.append(repo.changelog.ancestor(root, branch))
503 for node in repo.changelog.nodesbetween(ancestors, branches)[0]:
503 for node in repo.changelog.nodesbetween(ancestors, branches)[0]:
504 if match(node):
504 if match(node):
505 yield node
505 yield node
506
506
507 def checkopts(opts, revs):
507 def checkopts(opts, revs):
508 if opts.get('continue'):
508 if opts.get('continue'):
509 if opts.get('branch') or opts.get('all') or opts.get('merge'):
509 if opts.get('branch') or opts.get('all') or opts.get('merge'):
510 raise util.Abort(_('--continue is incompatible with '
510 raise util.Abort(_('--continue is incompatible with '
511 'branch, all or merge'))
511 'branch, all or merge'))
512 return
512 return
513 if not (opts.get('source') or revs or
513 if not (opts.get('source') or revs or
514 opts.get('merge') or opts.get('branch')):
514 opts.get('merge') or opts.get('branch')):
515 raise util.Abort(_('no source URL, branch tag or revision '
515 raise util.Abort(_('no source URL, branch tag or revision '
516 'list provided'))
516 'list provided'))
517 if opts.get('all'):
517 if opts.get('all'):
518 if not opts.get('branch'):
518 if not opts.get('branch'):
519 raise util.Abort(_('--all requires a branch revision'))
519 raise util.Abort(_('--all requires a branch revision'))
520 if revs:
520 if revs:
521 raise util.Abort(_('--all is incompatible with a '
521 raise util.Abort(_('--all is incompatible with a '
522 'revision list'))
522 'revision list'))
523
523
524 checkopts(opts, revs)
524 checkopts(opts, revs)
525
525
526 if not opts.get('log'):
526 if not opts.get('log'):
527 opts['log'] = ui.config('transplant', 'log')
527 opts['log'] = ui.config('transplant', 'log')
528 if not opts.get('filter'):
528 if not opts.get('filter'):
529 opts['filter'] = ui.config('transplant', 'filter')
529 opts['filter'] = ui.config('transplant', 'filter')
530
530
531 tp = transplanter(ui, repo)
531 tp = transplanter(ui, repo)
532
532
533 p1, p2 = repo.dirstate.parents()
533 p1, p2 = repo.dirstate.parents()
534 if len(repo) > 0 and p1 == revlog.nullid:
534 if len(repo) > 0 and p1 == revlog.nullid:
535 raise util.Abort(_('no revision checked out'))
535 raise util.Abort(_('no revision checked out'))
536 if not opts.get('continue'):
536 if not opts.get('continue'):
537 if p2 != revlog.nullid:
537 if p2 != revlog.nullid:
538 raise util.Abort(_('outstanding uncommitted merges'))
538 raise util.Abort(_('outstanding uncommitted merges'))
539 m, a, r, d = repo.status()[:4]
539 m, a, r, d = repo.status()[:4]
540 if m or a or r or d:
540 if m or a or r or d:
541 raise util.Abort(_('outstanding local changes'))
541 raise util.Abort(_('outstanding local changes'))
542
542
543 bundle = None
543 bundle = None
544 source = opts.get('source')
544 source = opts.get('source')
545 if source:
545 if source:
546 (source, incoming, bundle) = getremotechanges(repo, source)
546 (source, incoming, bundle) = getremotechanges(repo, source)
547 else:
547 else:
548 source = repo
548 source = repo
549
549
550 try:
550 try:
551 if opts.get('continue'):
551 if opts.get('continue'):
552 tp.resume(repo, source, opts)
552 tp.resume(repo, source, opts)
553 return
553 return
554
554
555 tf = tp.transplantfilter(repo, source, p1)
555 tf = tp.transplantfilter(repo, source, p1)
556 if opts.get('prune'):
556 if opts.get('prune'):
557 prune = [source.lookup(r)
557 prune = [source.lookup(r)
558 for r in cmdutil.revrange(source, opts.get('prune'))]
558 for r in cmdutil.revrange(source, opts.get('prune'))]
559 matchfn = lambda x: tf(x) and x not in prune
559 matchfn = lambda x: tf(x) and x not in prune
560 else:
560 else:
561 matchfn = tf
561 matchfn = tf
562 branches = map(source.lookup, opts.get('branch', ()))
562 branches = map(source.lookup, opts.get('branch', ()))
563 merges = map(source.lookup, opts.get('merge', ()))
563 merges = map(source.lookup, opts.get('merge', ()))
564 revmap = {}
564 revmap = {}
565 if revs:
565 if revs:
566 for r in cmdutil.revrange(source, revs):
566 for r in cmdutil.revrange(source, revs):
567 revmap[int(r)] = source.lookup(r)
567 revmap[int(r)] = source.lookup(r)
568 elif opts.get('all') or not merges:
568 elif opts.get('all') or not merges:
569 if source != repo:
569 if source != repo:
570 alltransplants = incwalk(source, incoming, branches,
570 alltransplants = incwalk(source, incoming, branches,
571 match=matchfn)
571 match=matchfn)
572 else:
572 else:
573 alltransplants = transplantwalk(source, p1, branches,
573 alltransplants = transplantwalk(source, p1, branches,
574 match=matchfn)
574 match=matchfn)
575 if opts.get('all'):
575 if opts.get('all'):
576 revs = alltransplants
576 revs = alltransplants
577 else:
577 else:
578 revs, newmerges = browserevs(ui, source, alltransplants, opts)
578 revs, newmerges = browserevs(ui, source, alltransplants, opts)
579 merges.extend(newmerges)
579 merges.extend(newmerges)
580 for r in revs:
580 for r in revs:
581 revmap[source.changelog.rev(r)] = r
581 revmap[source.changelog.rev(r)] = r
582 for r in merges:
582 for r in merges:
583 revmap[source.changelog.rev(r)] = r
583 revmap[source.changelog.rev(r)] = r
584
584
585 tp.apply(repo, source, revmap, merges, opts)
585 tp.apply(repo, source, revmap, merges, opts)
586 finally:
586 finally:
587 if bundle:
587 if bundle:
588 source.close()
588 source.close()
589 os.unlink(bundle)
589 os.unlink(bundle)
590
590
591 cmdtable = {
591 cmdtable = {
592 "transplant":
592 "transplant":
593 (transplant,
593 (transplant,
594 [('s', 'source', '', _('pull patches from REPOSITORY')),
594 [('s', 'source', '', _('pull patches from REPOSITORY')),
595 ('b', 'branch', [], _('pull patches from branch BRANCH')),
595 ('b', 'branch', [], _('pull patches from branch BRANCH')),
596 ('a', 'all', None, _('pull all changesets up to BRANCH')),
596 ('a', 'all', None, _('pull all changesets up to BRANCH')),
597 ('p', 'prune', [], _('skip over REV')),
597 ('p', 'prune', [], _('skip over REV')),
598 ('m', 'merge', [], _('merge at REV')),
598 ('m', 'merge', [], _('merge at REV')),
599 ('', 'log', None, _('append transplant info to log message')),
599 ('', 'log', None, _('append transplant info to log message')),
600 ('c', 'continue', None, _('continue last transplant session '
600 ('c', 'continue', None, _('continue last transplant session '
601 'after repair')),
601 'after repair')),
602 ('', 'filter', '', _('filter changesets through FILTER'))],
602 ('', 'filter', '', _('filter changesets through FILTER'))],
603 _('hg transplant [-s REPOSITORY] [-b BRANCH [-a]] [-p REV] '
603 _('hg transplant [-s REPOSITORY] [-b BRANCH [-a]] [-p REV] '
604 '[-m REV] [REV]...'))
604 '[-m REV] [REV]...'))
605 }
605 }
@@ -1,2216 +1,2216 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo
10 import repo, changegroup, subrepo
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16 import tags as tags_
16 import tags as tags_
17 from lock import release
17 from lock import release
18 import weakref, stat, errno, os, time, inspect
18 import weakref, stat, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20
20
21 class localrepository(repo.repository):
21 class localrepository(repo.repository):
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
23 supported = set('revlogv1 store fncache shared'.split())
23 supported = set('revlogv1 store fncache shared'.split())
24
24
25 def __init__(self, baseui, path=None, create=0):
25 def __init__(self, baseui, path=None, create=0):
26 repo.repository.__init__(self)
26 repo.repository.__init__(self)
27 self.root = os.path.realpath(path)
27 self.root = os.path.realpath(path)
28 self.path = os.path.join(self.root, ".hg")
28 self.path = os.path.join(self.root, ".hg")
29 self.origroot = path
29 self.origroot = path
30 self.opener = util.opener(self.path)
30 self.opener = util.opener(self.path)
31 self.wopener = util.opener(self.root)
31 self.wopener = util.opener(self.root)
32 self.baseui = baseui
32 self.baseui = baseui
33 self.ui = baseui.copy()
33 self.ui = baseui.copy()
34
34
35 try:
35 try:
36 self.ui.readconfig(self.join("hgrc"), self.root)
36 self.ui.readconfig(self.join("hgrc"), self.root)
37 extensions.loadall(self.ui)
37 extensions.loadall(self.ui)
38 except IOError:
38 except IOError:
39 pass
39 pass
40
40
41 if not os.path.isdir(self.path):
41 if not os.path.isdir(self.path):
42 if create:
42 if create:
43 if not os.path.exists(path):
43 if not os.path.exists(path):
44 os.mkdir(path)
44 os.mkdir(path)
45 os.mkdir(self.path)
45 os.mkdir(self.path)
46 requirements = ["revlogv1"]
46 requirements = ["revlogv1"]
47 if self.ui.configbool('format', 'usestore', True):
47 if self.ui.configbool('format', 'usestore', True):
48 os.mkdir(os.path.join(self.path, "store"))
48 os.mkdir(os.path.join(self.path, "store"))
49 requirements.append("store")
49 requirements.append("store")
50 if self.ui.configbool('format', 'usefncache', True):
50 if self.ui.configbool('format', 'usefncache', True):
51 requirements.append("fncache")
51 requirements.append("fncache")
52 # create an invalid changelog
52 # create an invalid changelog
53 self.opener("00changelog.i", "a").write(
53 self.opener("00changelog.i", "a").write(
54 '\0\0\0\2' # represents revlogv2
54 '\0\0\0\2' # represents revlogv2
55 ' dummy changelog to prevent using the old repo layout'
55 ' dummy changelog to prevent using the old repo layout'
56 )
56 )
57 reqfile = self.opener("requires", "w")
57 reqfile = self.opener("requires", "w")
58 for r in requirements:
58 for r in requirements:
59 reqfile.write("%s\n" % r)
59 reqfile.write("%s\n" % r)
60 reqfile.close()
60 reqfile.close()
61 else:
61 else:
62 raise error.RepoError(_("repository %s not found") % path)
62 raise error.RepoError(_("repository %s not found") % path)
63 elif create:
63 elif create:
64 raise error.RepoError(_("repository %s already exists") % path)
64 raise error.RepoError(_("repository %s already exists") % path)
65 else:
65 else:
66 # find requirements
66 # find requirements
67 requirements = set()
67 requirements = set()
68 try:
68 try:
69 requirements = set(self.opener("requires").read().splitlines())
69 requirements = set(self.opener("requires").read().splitlines())
70 except IOError, inst:
70 except IOError, inst:
71 if inst.errno != errno.ENOENT:
71 if inst.errno != errno.ENOENT:
72 raise
72 raise
73 for r in requirements - self.supported:
73 for r in requirements - self.supported:
74 raise error.RepoError(_("requirement '%s' not supported") % r)
74 raise error.RepoError(_("requirement '%s' not supported") % r)
75
75
76 self.sharedpath = self.path
76 self.sharedpath = self.path
77 try:
77 try:
78 s = os.path.realpath(self.opener("sharedpath").read())
78 s = os.path.realpath(self.opener("sharedpath").read())
79 if not os.path.exists(s):
79 if not os.path.exists(s):
80 raise error.RepoError(
80 raise error.RepoError(
81 _('.hg/sharedpath points to nonexistent directory %s') % s)
81 _('.hg/sharedpath points to nonexistent directory %s') % s)
82 self.sharedpath = s
82 self.sharedpath = s
83 except IOError, inst:
83 except IOError, inst:
84 if inst.errno != errno.ENOENT:
84 if inst.errno != errno.ENOENT:
85 raise
85 raise
86
86
87 self.store = store.store(requirements, self.sharedpath, util.opener)
87 self.store = store.store(requirements, self.sharedpath, util.opener)
88 self.spath = self.store.path
88 self.spath = self.store.path
89 self.sopener = self.store.opener
89 self.sopener = self.store.opener
90 self.sjoin = self.store.join
90 self.sjoin = self.store.join
91 self.opener.createmode = self.store.createmode
91 self.opener.createmode = self.store.createmode
92 self.sopener.options = {}
92 self.sopener.options = {}
93
93
94 # These two define the set of tags for this repository. _tags
94 # These two define the set of tags for this repository. _tags
95 # maps tag name to node; _tagtypes maps tag name to 'global' or
95 # maps tag name to node; _tagtypes maps tag name to 'global' or
96 # 'local'. (Global tags are defined by .hgtags across all
96 # 'local'. (Global tags are defined by .hgtags across all
97 # heads, and local tags are defined in .hg/localtags.) They
97 # heads, and local tags are defined in .hg/localtags.) They
98 # constitute the in-memory cache of tags.
98 # constitute the in-memory cache of tags.
99 self._tags = None
99 self._tags = None
100 self._tagtypes = None
100 self._tagtypes = None
101
101
102 self._branchcache = None # in UTF-8
102 self._branchcache = None # in UTF-8
103 self._branchcachetip = None
103 self._branchcachetip = None
104 self.nodetagscache = None
104 self.nodetagscache = None
105 self.filterpats = {}
105 self.filterpats = {}
106 self._datafilters = {}
106 self._datafilters = {}
107 self._transref = self._lockref = self._wlockref = None
107 self._transref = self._lockref = self._wlockref = None
108
108
109 @propertycache
109 @propertycache
110 def changelog(self):
110 def changelog(self):
111 c = changelog.changelog(self.sopener)
111 c = changelog.changelog(self.sopener)
112 if 'HG_PENDING' in os.environ:
112 if 'HG_PENDING' in os.environ:
113 p = os.environ['HG_PENDING']
113 p = os.environ['HG_PENDING']
114 if p.startswith(self.root):
114 if p.startswith(self.root):
115 c.readpending('00changelog.i.a')
115 c.readpending('00changelog.i.a')
116 self.sopener.options['defversion'] = c.version
116 self.sopener.options['defversion'] = c.version
117 return c
117 return c
118
118
119 @propertycache
119 @propertycache
120 def manifest(self):
120 def manifest(self):
121 return manifest.manifest(self.sopener)
121 return manifest.manifest(self.sopener)
122
122
123 @propertycache
123 @propertycache
124 def dirstate(self):
124 def dirstate(self):
125 return dirstate.dirstate(self.opener, self.ui, self.root)
125 return dirstate.dirstate(self.opener, self.ui, self.root)
126
126
127 def __getitem__(self, changeid):
127 def __getitem__(self, changeid):
128 if changeid is None:
128 if changeid is None:
129 return context.workingctx(self)
129 return context.workingctx(self)
130 return context.changectx(self, changeid)
130 return context.changectx(self, changeid)
131
131
132 def __contains__(self, changeid):
132 def __contains__(self, changeid):
133 try:
133 try:
134 return bool(self.lookup(changeid))
134 return bool(self.lookup(changeid))
135 except error.RepoLookupError:
135 except error.RepoLookupError:
136 return False
136 return False
137
137
138 def __nonzero__(self):
138 def __nonzero__(self):
139 return True
139 return True
140
140
141 def __len__(self):
141 def __len__(self):
142 return len(self.changelog)
142 return len(self.changelog)
143
143
144 def __iter__(self):
144 def __iter__(self):
145 for i in xrange(len(self)):
145 for i in xrange(len(self)):
146 yield i
146 yield i
147
147
148 def url(self):
148 def url(self):
149 return 'file:' + self.root
149 return 'file:' + self.root
150
150
151 def hook(self, name, throw=False, **args):
151 def hook(self, name, throw=False, **args):
152 return hook.hook(self.ui, self, name, throw, **args)
152 return hook.hook(self.ui, self, name, throw, **args)
153
153
154 tag_disallowed = ':\r\n'
154 tag_disallowed = ':\r\n'
155
155
156 def _tag(self, names, node, message, local, user, date, extra={}):
156 def _tag(self, names, node, message, local, user, date, extra={}):
157 if isinstance(names, str):
157 if isinstance(names, str):
158 allchars = names
158 allchars = names
159 names = (names,)
159 names = (names,)
160 else:
160 else:
161 allchars = ''.join(names)
161 allchars = ''.join(names)
162 for c in self.tag_disallowed:
162 for c in self.tag_disallowed:
163 if c in allchars:
163 if c in allchars:
164 raise util.Abort(_('%r cannot be used in a tag name') % c)
164 raise util.Abort(_('%r cannot be used in a tag name') % c)
165
165
166 for name in names:
166 for name in names:
167 self.hook('pretag', throw=True, node=hex(node), tag=name,
167 self.hook('pretag', throw=True, node=hex(node), tag=name,
168 local=local)
168 local=local)
169
169
170 def writetags(fp, names, munge, prevtags):
170 def writetags(fp, names, munge, prevtags):
171 fp.seek(0, 2)
171 fp.seek(0, 2)
172 if prevtags and prevtags[-1] != '\n':
172 if prevtags and prevtags[-1] != '\n':
173 fp.write('\n')
173 fp.write('\n')
174 for name in names:
174 for name in names:
175 m = munge and munge(name) or name
175 m = munge and munge(name) or name
176 if self._tagtypes and name in self._tagtypes:
176 if self._tagtypes and name in self._tagtypes:
177 old = self._tags.get(name, nullid)
177 old = self._tags.get(name, nullid)
178 fp.write('%s %s\n' % (hex(old), m))
178 fp.write('%s %s\n' % (hex(old), m))
179 fp.write('%s %s\n' % (hex(node), m))
179 fp.write('%s %s\n' % (hex(node), m))
180 fp.close()
180 fp.close()
181
181
182 prevtags = ''
182 prevtags = ''
183 if local:
183 if local:
184 try:
184 try:
185 fp = self.opener('localtags', 'r+')
185 fp = self.opener('localtags', 'r+')
186 except IOError:
186 except IOError:
187 fp = self.opener('localtags', 'a')
187 fp = self.opener('localtags', 'a')
188 else:
188 else:
189 prevtags = fp.read()
189 prevtags = fp.read()
190
190
191 # local tags are stored in the current charset
191 # local tags are stored in the current charset
192 writetags(fp, names, None, prevtags)
192 writetags(fp, names, None, prevtags)
193 for name in names:
193 for name in names:
194 self.hook('tag', node=hex(node), tag=name, local=local)
194 self.hook('tag', node=hex(node), tag=name, local=local)
195 return
195 return
196
196
197 try:
197 try:
198 fp = self.wfile('.hgtags', 'rb+')
198 fp = self.wfile('.hgtags', 'rb+')
199 except IOError:
199 except IOError:
200 fp = self.wfile('.hgtags', 'ab')
200 fp = self.wfile('.hgtags', 'ab')
201 else:
201 else:
202 prevtags = fp.read()
202 prevtags = fp.read()
203
203
204 # committed tags are stored in UTF-8
204 # committed tags are stored in UTF-8
205 writetags(fp, names, encoding.fromlocal, prevtags)
205 writetags(fp, names, encoding.fromlocal, prevtags)
206
206
207 if '.hgtags' not in self.dirstate:
207 if '.hgtags' not in self.dirstate:
208 self.add(['.hgtags'])
208 self.add(['.hgtags'])
209
209
210 m = match_.exact(self.root, '', ['.hgtags'])
210 m = match_.exact(self.root, '', ['.hgtags'])
211 tagnode = self.commit(message, user, date, extra=extra, match=m)
211 tagnode = self.commit(message, user, date, extra=extra, match=m)
212
212
213 for name in names:
213 for name in names:
214 self.hook('tag', node=hex(node), tag=name, local=local)
214 self.hook('tag', node=hex(node), tag=name, local=local)
215
215
216 return tagnode
216 return tagnode
217
217
218 def tag(self, names, node, message, local, user, date):
218 def tag(self, names, node, message, local, user, date):
219 '''tag a revision with one or more symbolic names.
219 '''tag a revision with one or more symbolic names.
220
220
221 names is a list of strings or, when adding a single tag, names may be a
221 names is a list of strings or, when adding a single tag, names may be a
222 string.
222 string.
223
223
224 if local is True, the tags are stored in a per-repository file.
224 if local is True, the tags are stored in a per-repository file.
225 otherwise, they are stored in the .hgtags file, and a new
225 otherwise, they are stored in the .hgtags file, and a new
226 changeset is committed with the change.
226 changeset is committed with the change.
227
227
228 keyword arguments:
228 keyword arguments:
229
229
230 local: whether to store tags in non-version-controlled file
230 local: whether to store tags in non-version-controlled file
231 (default False)
231 (default False)
232
232
233 message: commit message to use if committing
233 message: commit message to use if committing
234
234
235 user: name of user to use if committing
235 user: name of user to use if committing
236
236
237 date: date tuple to use if committing'''
237 date: date tuple to use if committing'''
238
238
239 for x in self.status()[:5]:
239 for x in self.status()[:5]:
240 if '.hgtags' in x:
240 if '.hgtags' in x:
241 raise util.Abort(_('working copy of .hgtags is changed '
241 raise util.Abort(_('working copy of .hgtags is changed '
242 '(please commit .hgtags manually)'))
242 '(please commit .hgtags manually)'))
243
243
244 self.tags() # instantiate the cache
244 self.tags() # instantiate the cache
245 self._tag(names, node, message, local, user, date)
245 self._tag(names, node, message, local, user, date)
246
246
247 def tags(self):
247 def tags(self):
248 '''return a mapping of tag to node'''
248 '''return a mapping of tag to node'''
249 if self._tags is None:
249 if self._tags is None:
250 (self._tags, self._tagtypes) = self._findtags()
250 (self._tags, self._tagtypes) = self._findtags()
251
251
252 return self._tags
252 return self._tags
253
253
254 def _findtags(self):
254 def _findtags(self):
255 '''Do the hard work of finding tags. Return a pair of dicts
255 '''Do the hard work of finding tags. Return a pair of dicts
256 (tags, tagtypes) where tags maps tag name to node, and tagtypes
256 (tags, tagtypes) where tags maps tag name to node, and tagtypes
257 maps tag name to a string like \'global\' or \'local\'.
257 maps tag name to a string like \'global\' or \'local\'.
258 Subclasses or extensions are free to add their own tags, but
258 Subclasses or extensions are free to add their own tags, but
259 should be aware that the returned dicts will be retained for the
259 should be aware that the returned dicts will be retained for the
260 duration of the localrepo object.'''
260 duration of the localrepo object.'''
261
261
262 # XXX what tagtype should subclasses/extensions use? Currently
262 # XXX what tagtype should subclasses/extensions use? Currently
263 # mq and bookmarks add tags, but do not set the tagtype at all.
263 # mq and bookmarks add tags, but do not set the tagtype at all.
264 # Should each extension invent its own tag type? Should there
264 # Should each extension invent its own tag type? Should there
265 # be one tagtype for all such "virtual" tags? Or is the status
265 # be one tagtype for all such "virtual" tags? Or is the status
266 # quo fine?
266 # quo fine?
267
267
268 alltags = {} # map tag name to (node, hist)
268 alltags = {} # map tag name to (node, hist)
269 tagtypes = {}
269 tagtypes = {}
270
270
271 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
271 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
272 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
272 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
273
273
274 # Build the return dicts. Have to re-encode tag names because
274 # Build the return dicts. Have to re-encode tag names because
275 # the tags module always uses UTF-8 (in order not to lose info
275 # the tags module always uses UTF-8 (in order not to lose info
276 # writing to the cache), but the rest of Mercurial wants them in
276 # writing to the cache), but the rest of Mercurial wants them in
277 # local encoding.
277 # local encoding.
278 tags = {}
278 tags = {}
279 for (name, (node, hist)) in alltags.iteritems():
279 for (name, (node, hist)) in alltags.iteritems():
280 if node != nullid:
280 if node != nullid:
281 tags[encoding.tolocal(name)] = node
281 tags[encoding.tolocal(name)] = node
282 tags['tip'] = self.changelog.tip()
282 tags['tip'] = self.changelog.tip()
283 tagtypes = dict([(encoding.tolocal(name), value)
283 tagtypes = dict([(encoding.tolocal(name), value)
284 for (name, value) in tagtypes.iteritems()])
284 for (name, value) in tagtypes.iteritems()])
285 return (tags, tagtypes)
285 return (tags, tagtypes)
286
286
287 def tagtype(self, tagname):
287 def tagtype(self, tagname):
288 '''
288 '''
289 return the type of the given tag. result can be:
289 return the type of the given tag. result can be:
290
290
291 'local' : a local tag
291 'local' : a local tag
292 'global' : a global tag
292 'global' : a global tag
293 None : tag does not exist
293 None : tag does not exist
294 '''
294 '''
295
295
296 self.tags()
296 self.tags()
297
297
298 return self._tagtypes.get(tagname)
298 return self._tagtypes.get(tagname)
299
299
300 def tagslist(self):
300 def tagslist(self):
301 '''return a list of tags ordered by revision'''
301 '''return a list of tags ordered by revision'''
302 l = []
302 l = []
303 for t, n in self.tags().iteritems():
303 for t, n in self.tags().iteritems():
304 try:
304 try:
305 r = self.changelog.rev(n)
305 r = self.changelog.rev(n)
306 except:
306 except:
307 r = -2 # sort to the beginning of the list if unknown
307 r = -2 # sort to the beginning of the list if unknown
308 l.append((r, t, n))
308 l.append((r, t, n))
309 return [(t, n) for r, t, n in sorted(l)]
309 return [(t, n) for r, t, n in sorted(l)]
310
310
311 def nodetags(self, node):
311 def nodetags(self, node):
312 '''return the tags associated with a node'''
312 '''return the tags associated with a node'''
313 if not self.nodetagscache:
313 if not self.nodetagscache:
314 self.nodetagscache = {}
314 self.nodetagscache = {}
315 for t, n in self.tags().iteritems():
315 for t, n in self.tags().iteritems():
316 self.nodetagscache.setdefault(n, []).append(t)
316 self.nodetagscache.setdefault(n, []).append(t)
317 return self.nodetagscache.get(node, [])
317 return self.nodetagscache.get(node, [])
318
318
319 def _branchtags(self, partial, lrev):
319 def _branchtags(self, partial, lrev):
320 # TODO: rename this function?
320 # TODO: rename this function?
321 tiprev = len(self) - 1
321 tiprev = len(self) - 1
322 if lrev != tiprev:
322 if lrev != tiprev:
323 self._updatebranchcache(partial, lrev + 1, tiprev + 1)
323 self._updatebranchcache(partial, lrev + 1, tiprev + 1)
324 self._writebranchcache(partial, self.changelog.tip(), tiprev)
324 self._writebranchcache(partial, self.changelog.tip(), tiprev)
325
325
326 return partial
326 return partial
327
327
328 def branchmap(self):
328 def branchmap(self):
329 '''returns a dictionary {branch: [branchheads]}'''
329 '''returns a dictionary {branch: [branchheads]}'''
330 tip = self.changelog.tip()
330 tip = self.changelog.tip()
331 if self._branchcache is not None and self._branchcachetip == tip:
331 if self._branchcache is not None and self._branchcachetip == tip:
332 return self._branchcache
332 return self._branchcache
333
333
334 oldtip = self._branchcachetip
334 oldtip = self._branchcachetip
335 self._branchcachetip = tip
335 self._branchcachetip = tip
336 if oldtip is None or oldtip not in self.changelog.nodemap:
336 if oldtip is None or oldtip not in self.changelog.nodemap:
337 partial, last, lrev = self._readbranchcache()
337 partial, last, lrev = self._readbranchcache()
338 else:
338 else:
339 lrev = self.changelog.rev(oldtip)
339 lrev = self.changelog.rev(oldtip)
340 partial = self._branchcache
340 partial = self._branchcache
341
341
342 self._branchtags(partial, lrev)
342 self._branchtags(partial, lrev)
343 # this private cache holds all heads (not just tips)
343 # this private cache holds all heads (not just tips)
344 self._branchcache = partial
344 self._branchcache = partial
345
345
346 return self._branchcache
346 return self._branchcache
347
347
348 def branchtags(self):
348 def branchtags(self):
349 '''return a dict where branch names map to the tipmost head of
349 '''return a dict where branch names map to the tipmost head of
350 the branch, open heads come before closed'''
350 the branch, open heads come before closed'''
351 bt = {}
351 bt = {}
352 for bn, heads in self.branchmap().iteritems():
352 for bn, heads in self.branchmap().iteritems():
353 tip = heads[-1]
353 tip = heads[-1]
354 for h in reversed(heads):
354 for h in reversed(heads):
355 if 'close' not in self.changelog.read(h)[5]:
355 if 'close' not in self.changelog.read(h)[5]:
356 tip = h
356 tip = h
357 break
357 break
358 bt[bn] = tip
358 bt[bn] = tip
359 return bt
359 return bt
360
360
361
361
362 def _readbranchcache(self):
362 def _readbranchcache(self):
363 partial = {}
363 partial = {}
364 try:
364 try:
365 f = self.opener("branchheads.cache")
365 f = self.opener("branchheads.cache")
366 lines = f.read().split('\n')
366 lines = f.read().split('\n')
367 f.close()
367 f.close()
368 except (IOError, OSError):
368 except (IOError, OSError):
369 return {}, nullid, nullrev
369 return {}, nullid, nullrev
370
370
371 try:
371 try:
372 last, lrev = lines.pop(0).split(" ", 1)
372 last, lrev = lines.pop(0).split(" ", 1)
373 last, lrev = bin(last), int(lrev)
373 last, lrev = bin(last), int(lrev)
374 if lrev >= len(self) or self[lrev].node() != last:
374 if lrev >= len(self) or self[lrev].node() != last:
375 # invalidate the cache
375 # invalidate the cache
376 raise ValueError('invalidating branch cache (tip differs)')
376 raise ValueError('invalidating branch cache (tip differs)')
377 for l in lines:
377 for l in lines:
378 if not l:
378 if not l:
379 continue
379 continue
380 node, label = l.split(" ", 1)
380 node, label = l.split(" ", 1)
381 partial.setdefault(label.strip(), []).append(bin(node))
381 partial.setdefault(label.strip(), []).append(bin(node))
382 except KeyboardInterrupt:
382 except KeyboardInterrupt:
383 raise
383 raise
384 except Exception, inst:
384 except Exception, inst:
385 if self.ui.debugflag:
385 if self.ui.debugflag:
386 self.ui.warn(str(inst), '\n')
386 self.ui.warn(str(inst), '\n')
387 partial, last, lrev = {}, nullid, nullrev
387 partial, last, lrev = {}, nullid, nullrev
388 return partial, last, lrev
388 return partial, last, lrev
389
389
390 def _writebranchcache(self, branches, tip, tiprev):
390 def _writebranchcache(self, branches, tip, tiprev):
391 try:
391 try:
392 f = self.opener("branchheads.cache", "w", atomictemp=True)
392 f = self.opener("branchheads.cache", "w", atomictemp=True)
393 f.write("%s %s\n" % (hex(tip), tiprev))
393 f.write("%s %s\n" % (hex(tip), tiprev))
394 for label, nodes in branches.iteritems():
394 for label, nodes in branches.iteritems():
395 for node in nodes:
395 for node in nodes:
396 f.write("%s %s\n" % (hex(node), label))
396 f.write("%s %s\n" % (hex(node), label))
397 f.rename()
397 f.rename()
398 except (IOError, OSError):
398 except (IOError, OSError):
399 pass
399 pass
400
400
401 def _updatebranchcache(self, partial, start, end):
401 def _updatebranchcache(self, partial, start, end):
402 # collect new branch entries
402 # collect new branch entries
403 newbranches = {}
403 newbranches = {}
404 for r in xrange(start, end):
404 for r in xrange(start, end):
405 c = self[r]
405 c = self[r]
406 newbranches.setdefault(c.branch(), []).append(c.node())
406 newbranches.setdefault(c.branch(), []).append(c.node())
407 # if older branchheads are reachable from new ones, they aren't
407 # if older branchheads are reachable from new ones, they aren't
408 # really branchheads. Note checking parents is insufficient:
408 # really branchheads. Note checking parents is insufficient:
409 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
409 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
410 for branch, newnodes in newbranches.iteritems():
410 for branch, newnodes in newbranches.iteritems():
411 bheads = partial.setdefault(branch, [])
411 bheads = partial.setdefault(branch, [])
412 bheads.extend(newnodes)
412 bheads.extend(newnodes)
413 if len(bheads) < 2:
413 if len(bheads) < 2:
414 continue
414 continue
415 newbheads = []
415 newbheads = []
416 # starting from tip means fewer passes over reachable
416 # starting from tip means fewer passes over reachable
417 while newnodes:
417 while newnodes:
418 latest = newnodes.pop()
418 latest = newnodes.pop()
419 if latest not in bheads:
419 if latest not in bheads:
420 continue
420 continue
421 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
421 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
422 reachable = self.changelog.reachable(latest, minbhrev)
422 reachable = self.changelog.reachable(latest, minbhrev)
423 bheads = [b for b in bheads if b not in reachable]
423 bheads = [b for b in bheads if b not in reachable]
424 newbheads.insert(0, latest)
424 newbheads.insert(0, latest)
425 bheads.extend(newbheads)
425 bheads.extend(newbheads)
426 partial[branch] = bheads
426 partial[branch] = bheads
427
427
428 def lookup(self, key):
428 def lookup(self, key):
429 if isinstance(key, int):
429 if isinstance(key, int):
430 return self.changelog.node(key)
430 return self.changelog.node(key)
431 elif key == '.':
431 elif key == '.':
432 return self.dirstate.parents()[0]
432 return self.dirstate.parents()[0]
433 elif key == 'null':
433 elif key == 'null':
434 return nullid
434 return nullid
435 elif key == 'tip':
435 elif key == 'tip':
436 return self.changelog.tip()
436 return self.changelog.tip()
437 n = self.changelog._match(key)
437 n = self.changelog._match(key)
438 if n:
438 if n:
439 return n
439 return n
440 if key in self.tags():
440 if key in self.tags():
441 return self.tags()[key]
441 return self.tags()[key]
442 if key in self.branchtags():
442 if key in self.branchtags():
443 return self.branchtags()[key]
443 return self.branchtags()[key]
444 n = self.changelog._partialmatch(key)
444 n = self.changelog._partialmatch(key)
445 if n:
445 if n:
446 return n
446 return n
447
447
448 # can't find key, check if it might have come from damaged dirstate
448 # can't find key, check if it might have come from damaged dirstate
449 if key in self.dirstate.parents():
449 if key in self.dirstate.parents():
450 raise error.Abort(_("working directory has unknown parent '%s'!")
450 raise error.Abort(_("working directory has unknown parent '%s'!")
451 % short(key))
451 % short(key))
452 try:
452 try:
453 if len(key) == 20:
453 if len(key) == 20:
454 key = hex(key)
454 key = hex(key)
455 except:
455 except:
456 pass
456 pass
457 raise error.RepoLookupError(_("unknown revision '%s'") % key)
457 raise error.RepoLookupError(_("unknown revision '%s'") % key)
458
458
459 def local(self):
459 def local(self):
460 return True
460 return True
461
461
462 def join(self, f):
462 def join(self, f):
463 return os.path.join(self.path, f)
463 return os.path.join(self.path, f)
464
464
465 def wjoin(self, f):
465 def wjoin(self, f):
466 return os.path.join(self.root, f)
466 return os.path.join(self.root, f)
467
467
468 def rjoin(self, f):
468 def rjoin(self, f):
469 return os.path.join(self.root, util.pconvert(f))
469 return os.path.join(self.root, util.pconvert(f))
470
470
471 def file(self, f):
471 def file(self, f):
472 if f[0] == '/':
472 if f[0] == '/':
473 f = f[1:]
473 f = f[1:]
474 return filelog.filelog(self.sopener, f)
474 return filelog.filelog(self.sopener, f)
475
475
476 def changectx(self, changeid):
476 def changectx(self, changeid):
477 return self[changeid]
477 return self[changeid]
478
478
479 def parents(self, changeid=None):
479 def parents(self, changeid=None):
480 '''get list of changectxs for parents of changeid'''
480 '''get list of changectxs for parents of changeid'''
481 return self[changeid].parents()
481 return self[changeid].parents()
482
482
483 def filectx(self, path, changeid=None, fileid=None):
483 def filectx(self, path, changeid=None, fileid=None):
484 """changeid can be a changeset revision, node, or tag.
484 """changeid can be a changeset revision, node, or tag.
485 fileid can be a file revision or node."""
485 fileid can be a file revision or node."""
486 return context.filectx(self, path, changeid, fileid)
486 return context.filectx(self, path, changeid, fileid)
487
487
488 def getcwd(self):
488 def getcwd(self):
489 return self.dirstate.getcwd()
489 return self.dirstate.getcwd()
490
490
491 def pathto(self, f, cwd=None):
491 def pathto(self, f, cwd=None):
492 return self.dirstate.pathto(f, cwd)
492 return self.dirstate.pathto(f, cwd)
493
493
494 def wfile(self, f, mode='r'):
494 def wfile(self, f, mode='r'):
495 return self.wopener(f, mode)
495 return self.wopener(f, mode)
496
496
497 def _link(self, f):
497 def _link(self, f):
498 return os.path.islink(self.wjoin(f))
498 return os.path.islink(self.wjoin(f))
499
499
500 def _filter(self, filter, filename, data):
500 def _filter(self, filter, filename, data):
501 if filter not in self.filterpats:
501 if filter not in self.filterpats:
502 l = []
502 l = []
503 for pat, cmd in self.ui.configitems(filter):
503 for pat, cmd in self.ui.configitems(filter):
504 if cmd == '!':
504 if cmd == '!':
505 continue
505 continue
506 mf = match_.match(self.root, '', [pat])
506 mf = match_.match(self.root, '', [pat])
507 fn = None
507 fn = None
508 params = cmd
508 params = cmd
509 for name, filterfn in self._datafilters.iteritems():
509 for name, filterfn in self._datafilters.iteritems():
510 if cmd.startswith(name):
510 if cmd.startswith(name):
511 fn = filterfn
511 fn = filterfn
512 params = cmd[len(name):].lstrip()
512 params = cmd[len(name):].lstrip()
513 break
513 break
514 if not fn:
514 if not fn:
515 fn = lambda s, c, **kwargs: util.filter(s, c)
515 fn = lambda s, c, **kwargs: util.filter(s, c)
516 # Wrap old filters not supporting keyword arguments
516 # Wrap old filters not supporting keyword arguments
517 if not inspect.getargspec(fn)[2]:
517 if not inspect.getargspec(fn)[2]:
518 oldfn = fn
518 oldfn = fn
519 fn = lambda s, c, **kwargs: oldfn(s, c)
519 fn = lambda s, c, **kwargs: oldfn(s, c)
520 l.append((mf, fn, params))
520 l.append((mf, fn, params))
521 self.filterpats[filter] = l
521 self.filterpats[filter] = l
522
522
523 for mf, fn, cmd in self.filterpats[filter]:
523 for mf, fn, cmd in self.filterpats[filter]:
524 if mf(filename):
524 if mf(filename):
525 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
525 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
526 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
526 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
527 break
527 break
528
528
529 return data
529 return data
530
530
531 def adddatafilter(self, name, filter):
531 def adddatafilter(self, name, filter):
532 self._datafilters[name] = filter
532 self._datafilters[name] = filter
533
533
534 def wread(self, filename):
534 def wread(self, filename):
535 if self._link(filename):
535 if self._link(filename):
536 data = os.readlink(self.wjoin(filename))
536 data = os.readlink(self.wjoin(filename))
537 else:
537 else:
538 data = self.wopener(filename, 'r').read()
538 data = self.wopener(filename, 'r').read()
539 return self._filter("encode", filename, data)
539 return self._filter("encode", filename, data)
540
540
541 def wwrite(self, filename, data, flags):
541 def wwrite(self, filename, data, flags):
542 data = self._filter("decode", filename, data)
542 data = self._filter("decode", filename, data)
543 try:
543 try:
544 os.unlink(self.wjoin(filename))
544 os.unlink(self.wjoin(filename))
545 except OSError:
545 except OSError:
546 pass
546 pass
547 if 'l' in flags:
547 if 'l' in flags:
548 self.wopener.symlink(data, filename)
548 self.wopener.symlink(data, filename)
549 else:
549 else:
550 self.wopener(filename, 'w').write(data)
550 self.wopener(filename, 'w').write(data)
551 if 'x' in flags:
551 if 'x' in flags:
552 util.set_flags(self.wjoin(filename), False, True)
552 util.set_flags(self.wjoin(filename), False, True)
553
553
554 def wwritedata(self, filename, data):
554 def wwritedata(self, filename, data):
555 return self._filter("decode", filename, data)
555 return self._filter("decode", filename, data)
556
556
557 def transaction(self):
557 def transaction(self):
558 tr = self._transref and self._transref() or None
558 tr = self._transref and self._transref() or None
559 if tr and tr.running():
559 if tr and tr.running():
560 return tr.nest()
560 return tr.nest()
561
561
562 # abort here if the journal already exists
562 # abort here if the journal already exists
563 if os.path.exists(self.sjoin("journal")):
563 if os.path.exists(self.sjoin("journal")):
564 raise error.RepoError(
564 raise error.RepoError(
565 _("abandoned transaction found - run hg recover"))
565 _("abandoned transaction found - run hg recover"))
566
566
567 # save dirstate for rollback
567 # save dirstate for rollback
568 try:
568 try:
569 ds = self.opener("dirstate").read()
569 ds = self.opener("dirstate").read()
570 except IOError:
570 except IOError:
571 ds = ""
571 ds = ""
572 self.opener("journal.dirstate", "w").write(ds)
572 self.opener("journal.dirstate", "w").write(ds)
573 self.opener("journal.branch", "w").write(self.dirstate.branch())
573 self.opener("journal.branch", "w").write(self.dirstate.branch())
574
574
575 renames = [(self.sjoin("journal"), self.sjoin("undo")),
575 renames = [(self.sjoin("journal"), self.sjoin("undo")),
576 (self.join("journal.dirstate"), self.join("undo.dirstate")),
576 (self.join("journal.dirstate"), self.join("undo.dirstate")),
577 (self.join("journal.branch"), self.join("undo.branch"))]
577 (self.join("journal.branch"), self.join("undo.branch"))]
578 tr = transaction.transaction(self.ui.warn, self.sopener,
578 tr = transaction.transaction(self.ui.warn, self.sopener,
579 self.sjoin("journal"),
579 self.sjoin("journal"),
580 aftertrans(renames),
580 aftertrans(renames),
581 self.store.createmode)
581 self.store.createmode)
582 self._transref = weakref.ref(tr)
582 self._transref = weakref.ref(tr)
583 return tr
583 return tr
584
584
585 def recover(self):
585 def recover(self):
586 lock = self.lock()
586 lock = self.lock()
587 try:
587 try:
588 if os.path.exists(self.sjoin("journal")):
588 if os.path.exists(self.sjoin("journal")):
589 self.ui.status(_("rolling back interrupted transaction\n"))
589 self.ui.status(_("rolling back interrupted transaction\n"))
590 transaction.rollback(self.sopener, self.sjoin("journal"),
590 transaction.rollback(self.sopener, self.sjoin("journal"),
591 self.ui.warn)
591 self.ui.warn)
592 self.invalidate()
592 self.invalidate()
593 return True
593 return True
594 else:
594 else:
595 self.ui.warn(_("no interrupted transaction available\n"))
595 self.ui.warn(_("no interrupted transaction available\n"))
596 return False
596 return False
597 finally:
597 finally:
598 lock.release()
598 lock.release()
599
599
600 def rollback(self):
600 def rollback(self):
601 wlock = lock = None
601 wlock = lock = None
602 try:
602 try:
603 wlock = self.wlock()
603 wlock = self.wlock()
604 lock = self.lock()
604 lock = self.lock()
605 if os.path.exists(self.sjoin("undo")):
605 if os.path.exists(self.sjoin("undo")):
606 self.ui.status(_("rolling back last transaction\n"))
606 self.ui.status(_("rolling back last transaction\n"))
607 transaction.rollback(self.sopener, self.sjoin("undo"),
607 transaction.rollback(self.sopener, self.sjoin("undo"),
608 self.ui.warn)
608 self.ui.warn)
609 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
609 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
610 try:
610 try:
611 branch = self.opener("undo.branch").read()
611 branch = self.opener("undo.branch").read()
612 self.dirstate.setbranch(branch)
612 self.dirstate.setbranch(branch)
613 except IOError:
613 except IOError:
614 self.ui.warn(_("Named branch could not be reset, "
614 self.ui.warn(_("Named branch could not be reset, "
615 "current branch still is: %s\n")
615 "current branch still is: %s\n")
616 % encoding.tolocal(self.dirstate.branch()))
616 % encoding.tolocal(self.dirstate.branch()))
617 self.invalidate()
617 self.invalidate()
618 self.dirstate.invalidate()
618 self.dirstate.invalidate()
619 self.destroyed()
619 self.destroyed()
620 else:
620 else:
621 self.ui.warn(_("no rollback information available\n"))
621 self.ui.warn(_("no rollback information available\n"))
622 finally:
622 finally:
623 release(lock, wlock)
623 release(lock, wlock)
624
624
625 def invalidate(self):
625 def invalidate(self):
626 for a in "changelog manifest".split():
626 for a in "changelog manifest".split():
627 if a in self.__dict__:
627 if a in self.__dict__:
628 delattr(self, a)
628 delattr(self, a)
629 self._tags = None
629 self._tags = None
630 self._tagtypes = None
630 self._tagtypes = None
631 self.nodetagscache = None
631 self.nodetagscache = None
632 self._branchcache = None # in UTF-8
632 self._branchcache = None # in UTF-8
633 self._branchcachetip = None
633 self._branchcachetip = None
634
634
635 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
635 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
636 try:
636 try:
637 l = lock.lock(lockname, 0, releasefn, desc=desc)
637 l = lock.lock(lockname, 0, releasefn, desc=desc)
638 except error.LockHeld, inst:
638 except error.LockHeld, inst:
639 if not wait:
639 if not wait:
640 raise
640 raise
641 self.ui.warn(_("waiting for lock on %s held by %r\n") %
641 self.ui.warn(_("waiting for lock on %s held by %r\n") %
642 (desc, inst.locker))
642 (desc, inst.locker))
643 # default to 600 seconds timeout
643 # default to 600 seconds timeout
644 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
644 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
645 releasefn, desc=desc)
645 releasefn, desc=desc)
646 if acquirefn:
646 if acquirefn:
647 acquirefn()
647 acquirefn()
648 return l
648 return l
649
649
650 def lock(self, wait=True):
650 def lock(self, wait=True):
651 '''Lock the repository store (.hg/store) and return a weak reference
651 '''Lock the repository store (.hg/store) and return a weak reference
652 to the lock. Use this before modifying the store (e.g. committing or
652 to the lock. Use this before modifying the store (e.g. committing or
653 stripping). If you are opening a transaction, get a lock as well.)'''
653 stripping). If you are opening a transaction, get a lock as well.)'''
654 l = self._lockref and self._lockref()
654 l = self._lockref and self._lockref()
655 if l is not None and l.held:
655 if l is not None and l.held:
656 l.lock()
656 l.lock()
657 return l
657 return l
658
658
659 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
659 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
660 _('repository %s') % self.origroot)
660 _('repository %s') % self.origroot)
661 self._lockref = weakref.ref(l)
661 self._lockref = weakref.ref(l)
662 return l
662 return l
663
663
664 def wlock(self, wait=True):
664 def wlock(self, wait=True):
665 '''Lock the non-store parts of the repository (everything under
665 '''Lock the non-store parts of the repository (everything under
666 .hg except .hg/store) and return a weak reference to the lock.
666 .hg except .hg/store) and return a weak reference to the lock.
667 Use this before modifying files in .hg.'''
667 Use this before modifying files in .hg.'''
668 l = self._wlockref and self._wlockref()
668 l = self._wlockref and self._wlockref()
669 if l is not None and l.held:
669 if l is not None and l.held:
670 l.lock()
670 l.lock()
671 return l
671 return l
672
672
673 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
673 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
674 self.dirstate.invalidate, _('working directory of %s') %
674 self.dirstate.invalidate, _('working directory of %s') %
675 self.origroot)
675 self.origroot)
676 self._wlockref = weakref.ref(l)
676 self._wlockref = weakref.ref(l)
677 return l
677 return l
678
678
679 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
679 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
680 """
680 """
681 commit an individual file as part of a larger transaction
681 commit an individual file as part of a larger transaction
682 """
682 """
683
683
684 fname = fctx.path()
684 fname = fctx.path()
685 text = fctx.data()
685 text = fctx.data()
686 flog = self.file(fname)
686 flog = self.file(fname)
687 fparent1 = manifest1.get(fname, nullid)
687 fparent1 = manifest1.get(fname, nullid)
688 fparent2 = fparent2o = manifest2.get(fname, nullid)
688 fparent2 = fparent2o = manifest2.get(fname, nullid)
689
689
690 meta = {}
690 meta = {}
691 copy = fctx.renamed()
691 copy = fctx.renamed()
692 if copy and copy[0] != fname:
692 if copy and copy[0] != fname:
693 # Mark the new revision of this file as a copy of another
693 # Mark the new revision of this file as a copy of another
694 # file. This copy data will effectively act as a parent
694 # file. This copy data will effectively act as a parent
695 # of this new revision. If this is a merge, the first
695 # of this new revision. If this is a merge, the first
696 # parent will be the nullid (meaning "look up the copy data")
696 # parent will be the nullid (meaning "look up the copy data")
697 # and the second one will be the other parent. For example:
697 # and the second one will be the other parent. For example:
698 #
698 #
699 # 0 --- 1 --- 3 rev1 changes file foo
699 # 0 --- 1 --- 3 rev1 changes file foo
700 # \ / rev2 renames foo to bar and changes it
700 # \ / rev2 renames foo to bar and changes it
701 # \- 2 -/ rev3 should have bar with all changes and
701 # \- 2 -/ rev3 should have bar with all changes and
702 # should record that bar descends from
702 # should record that bar descends from
703 # bar in rev2 and foo in rev1
703 # bar in rev2 and foo in rev1
704 #
704 #
705 # this allows this merge to succeed:
705 # this allows this merge to succeed:
706 #
706 #
707 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
707 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
708 # \ / merging rev3 and rev4 should use bar@rev2
708 # \ / merging rev3 and rev4 should use bar@rev2
709 # \- 2 --- 4 as the merge base
709 # \- 2 --- 4 as the merge base
710 #
710 #
711
711
712 cfname = copy[0]
712 cfname = copy[0]
713 crev = manifest1.get(cfname)
713 crev = manifest1.get(cfname)
714 newfparent = fparent2
714 newfparent = fparent2
715
715
716 if manifest2: # branch merge
716 if manifest2: # branch merge
717 if fparent2 == nullid or crev is None: # copied on remote side
717 if fparent2 == nullid or crev is None: # copied on remote side
718 if cfname in manifest2:
718 if cfname in manifest2:
719 crev = manifest2[cfname]
719 crev = manifest2[cfname]
720 newfparent = fparent1
720 newfparent = fparent1
721
721
722 # find source in nearest ancestor if we've lost track
722 # find source in nearest ancestor if we've lost track
723 if not crev:
723 if not crev:
724 self.ui.debug(" %s: searching for copy revision for %s\n" %
724 self.ui.debug(" %s: searching for copy revision for %s\n" %
725 (fname, cfname))
725 (fname, cfname))
726 for ancestor in self['.'].ancestors():
726 for ancestor in self['.'].ancestors():
727 if cfname in ancestor:
727 if cfname in ancestor:
728 crev = ancestor[cfname].filenode()
728 crev = ancestor[cfname].filenode()
729 break
729 break
730
730
731 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
731 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
732 meta["copy"] = cfname
732 meta["copy"] = cfname
733 meta["copyrev"] = hex(crev)
733 meta["copyrev"] = hex(crev)
734 fparent1, fparent2 = nullid, newfparent
734 fparent1, fparent2 = nullid, newfparent
735 elif fparent2 != nullid:
735 elif fparent2 != nullid:
736 # is one parent an ancestor of the other?
736 # is one parent an ancestor of the other?
737 fparentancestor = flog.ancestor(fparent1, fparent2)
737 fparentancestor = flog.ancestor(fparent1, fparent2)
738 if fparentancestor == fparent1:
738 if fparentancestor == fparent1:
739 fparent1, fparent2 = fparent2, nullid
739 fparent1, fparent2 = fparent2, nullid
740 elif fparentancestor == fparent2:
740 elif fparentancestor == fparent2:
741 fparent2 = nullid
741 fparent2 = nullid
742
742
743 # is the file changed?
743 # is the file changed?
744 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
744 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
745 changelist.append(fname)
745 changelist.append(fname)
746 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
746 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
747
747
748 # are just the flags changed during merge?
748 # are just the flags changed during merge?
749 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
749 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
750 changelist.append(fname)
750 changelist.append(fname)
751
751
752 return fparent1
752 return fparent1
753
753
754 def commit(self, text="", user=None, date=None, match=None, force=False,
754 def commit(self, text="", user=None, date=None, match=None, force=False,
755 editor=False, extra={}):
755 editor=False, extra={}):
756 """Add a new revision to current repository.
756 """Add a new revision to current repository.
757
757
758 Revision information is gathered from the working directory,
758 Revision information is gathered from the working directory,
759 match can be used to filter the committed files. If editor is
759 match can be used to filter the committed files. If editor is
760 supplied, it is called to get a commit message.
760 supplied, it is called to get a commit message.
761 """
761 """
762
762
763 def fail(f, msg):
763 def fail(f, msg):
764 raise util.Abort('%s: %s' % (f, msg))
764 raise util.Abort('%s: %s' % (f, msg))
765
765
766 if not match:
766 if not match:
767 match = match_.always(self.root, '')
767 match = match_.always(self.root, '')
768
768
769 if not force:
769 if not force:
770 vdirs = []
770 vdirs = []
771 match.dir = vdirs.append
771 match.dir = vdirs.append
772 match.bad = fail
772 match.bad = fail
773
773
774 wlock = self.wlock()
774 wlock = self.wlock()
775 try:
775 try:
776 p1, p2 = self.dirstate.parents()
776 p1, p2 = self.dirstate.parents()
777 wctx = self[None]
777 wctx = self[None]
778
778
779 if (not force and p2 != nullid and match and
779 if (not force and p2 != nullid and match and
780 (match.files() or match.anypats())):
780 (match.files() or match.anypats())):
781 raise util.Abort(_('cannot partially commit a merge '
781 raise util.Abort(_('cannot partially commit a merge '
782 '(do not specify files or patterns)'))
782 '(do not specify files or patterns)'))
783
783
784 changes = self.status(match=match, clean=force)
784 changes = self.status(match=match, clean=force)
785 if force:
785 if force:
786 changes[0].extend(changes[6]) # mq may commit unchanged files
786 changes[0].extend(changes[6]) # mq may commit unchanged files
787
787
788 # check subrepos
788 # check subrepos
789 subs = []
789 subs = []
790 for s in wctx.substate:
790 for s in wctx.substate:
791 if match(s) and wctx.sub(s).dirty():
791 if match(s) and wctx.sub(s).dirty():
792 subs.append(s)
792 subs.append(s)
793 if subs and '.hgsubstate' not in changes[0]:
793 if subs and '.hgsubstate' not in changes[0]:
794 changes[0].insert(0, '.hgsubstate')
794 changes[0].insert(0, '.hgsubstate')
795
795
796 # make sure all explicit patterns are matched
796 # make sure all explicit patterns are matched
797 if not force and match.files():
797 if not force and match.files():
798 matched = set(changes[0] + changes[1] + changes[2])
798 matched = set(changes[0] + changes[1] + changes[2])
799
799
800 for f in match.files():
800 for f in match.files():
801 if f == '.' or f in matched or f in wctx.substate:
801 if f == '.' or f in matched or f in wctx.substate:
802 continue
802 continue
803 if f in changes[3]: # missing
803 if f in changes[3]: # missing
804 fail(f, _('file not found!'))
804 fail(f, _('file not found!'))
805 if f in vdirs: # visited directory
805 if f in vdirs: # visited directory
806 d = f + '/'
806 d = f + '/'
807 for mf in matched:
807 for mf in matched:
808 if mf.startswith(d):
808 if mf.startswith(d):
809 break
809 break
810 else:
810 else:
811 fail(f, _("no match under directory!"))
811 fail(f, _("no match under directory!"))
812 elif f not in self.dirstate:
812 elif f not in self.dirstate:
813 fail(f, _("file not tracked!"))
813 fail(f, _("file not tracked!"))
814
814
815 if (not force and not extra.get("close") and p2 == nullid
815 if (not force and not extra.get("close") and p2 == nullid
816 and not (changes[0] or changes[1] or changes[2])
816 and not (changes[0] or changes[1] or changes[2])
817 and self[None].branch() == self['.'].branch()):
817 and self[None].branch() == self['.'].branch()):
818 return None
818 return None
819
819
820 ms = merge_.mergestate(self)
820 ms = merge_.mergestate(self)
821 for f in changes[0]:
821 for f in changes[0]:
822 if f in ms and ms[f] == 'u':
822 if f in ms and ms[f] == 'u':
823 raise util.Abort(_("unresolved merge conflicts "
823 raise util.Abort(_("unresolved merge conflicts "
824 "(see hg resolve)"))
824 "(see hg resolve)"))
825
825
826 cctx = context.workingctx(self, (p1, p2), text, user, date,
826 cctx = context.workingctx(self, (p1, p2), text, user, date,
827 extra, changes)
827 extra, changes)
828 if editor:
828 if editor:
829 cctx._text = editor(self, cctx, subs)
829 cctx._text = editor(self, cctx, subs)
830 edited = (text != cctx._text)
830 edited = (text != cctx._text)
831
831
832 # commit subs
832 # commit subs
833 if subs:
833 if subs:
834 state = wctx.substate.copy()
834 state = wctx.substate.copy()
835 for s in subs:
835 for s in subs:
836 self.ui.status(_('committing subrepository %s\n') % s)
836 self.ui.status(_('committing subrepository %s\n') % s)
837 sr = wctx.sub(s).commit(cctx._text, user, date)
837 sr = wctx.sub(s).commit(cctx._text, user, date)
838 state[s] = (state[s][0], sr)
838 state[s] = (state[s][0], sr)
839 subrepo.writestate(self, state)
839 subrepo.writestate(self, state)
840
840
841 # Save commit message in case this transaction gets rolled back
841 # Save commit message in case this transaction gets rolled back
842 # (e.g. by a pretxncommit hook). Leave the content alone on
842 # (e.g. by a pretxncommit hook). Leave the content alone on
843 # the assumption that the user will use the same editor again.
843 # the assumption that the user will use the same editor again.
844 msgfile = self.opener('last-message.txt', 'wb')
844 msgfile = self.opener('last-message.txt', 'wb')
845 msgfile.write(cctx._text)
845 msgfile.write(cctx._text)
846 msgfile.close()
846 msgfile.close()
847
847
848 try:
848 try:
849 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
849 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
850 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
850 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
851 ret = self.commitctx(cctx, True)
851 ret = self.commitctx(cctx, True)
852 except:
852 except:
853 if edited:
853 if edited:
854 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
854 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
855 self.ui.write(
855 self.ui.write(
856 _('note: commit message saved in %s\n') % msgfn)
856 _('note: commit message saved in %s\n') % msgfn)
857 raise
857 raise
858
858
859 # update dirstate and mergestate
859 # update dirstate and mergestate
860 for f in changes[0] + changes[1]:
860 for f in changes[0] + changes[1]:
861 self.dirstate.normal(f)
861 self.dirstate.normal(f)
862 for f in changes[2]:
862 for f in changes[2]:
863 self.dirstate.forget(f)
863 self.dirstate.forget(f)
864 self.dirstate.setparents(ret)
864 self.dirstate.setparents(ret)
865 ms.reset()
865 ms.reset()
866 finally:
866 finally:
867 wlock.release()
867 wlock.release()
868
868
869 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
869 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
870 return ret
870 return ret
871
871
872 def commitctx(self, ctx, error=False):
872 def commitctx(self, ctx, error=False):
873 """Add a new revision to current repository.
873 """Add a new revision to current repository.
874 Revision information is passed via the context argument.
874 Revision information is passed via the context argument.
875 """
875 """
876
876
877 tr = lock = None
877 tr = lock = None
878 removed = ctx.removed()
878 removed = ctx.removed()
879 p1, p2 = ctx.p1(), ctx.p2()
879 p1, p2 = ctx.p1(), ctx.p2()
880 m1 = p1.manifest().copy()
880 m1 = p1.manifest().copy()
881 m2 = p2.manifest()
881 m2 = p2.manifest()
882 user = ctx.user()
882 user = ctx.user()
883
883
884 lock = self.lock()
884 lock = self.lock()
885 try:
885 try:
886 tr = self.transaction()
886 tr = self.transaction()
887 trp = weakref.proxy(tr)
887 trp = weakref.proxy(tr)
888
888
889 # check in files
889 # check in files
890 new = {}
890 new = {}
891 changed = []
891 changed = []
892 linkrev = len(self)
892 linkrev = len(self)
893 for f in sorted(ctx.modified() + ctx.added()):
893 for f in sorted(ctx.modified() + ctx.added()):
894 self.ui.note(f + "\n")
894 self.ui.note(f + "\n")
895 try:
895 try:
896 fctx = ctx[f]
896 fctx = ctx[f]
897 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
897 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
898 changed)
898 changed)
899 m1.set(f, fctx.flags())
899 m1.set(f, fctx.flags())
900 except OSError, inst:
900 except OSError, inst:
901 self.ui.warn(_("trouble committing %s!\n") % f)
901 self.ui.warn(_("trouble committing %s!\n") % f)
902 raise
902 raise
903 except IOError, inst:
903 except IOError, inst:
904 errcode = getattr(inst, 'errno', errno.ENOENT)
904 errcode = getattr(inst, 'errno', errno.ENOENT)
905 if error or errcode and errcode != errno.ENOENT:
905 if error or errcode and errcode != errno.ENOENT:
906 self.ui.warn(_("trouble committing %s!\n") % f)
906 self.ui.warn(_("trouble committing %s!\n") % f)
907 raise
907 raise
908 else:
908 else:
909 removed.append(f)
909 removed.append(f)
910
910
911 # update manifest
911 # update manifest
912 m1.update(new)
912 m1.update(new)
913 removed = [f for f in sorted(removed) if f in m1 or f in m2]
913 removed = [f for f in sorted(removed) if f in m1 or f in m2]
914 drop = [f for f in removed if f in m1]
914 drop = [f for f in removed if f in m1]
915 for f in drop:
915 for f in drop:
916 del m1[f]
916 del m1[f]
917 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
917 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
918 p2.manifestnode(), (new, drop))
918 p2.manifestnode(), (new, drop))
919
919
920 # update changelog
920 # update changelog
921 self.changelog.delayupdate()
921 self.changelog.delayupdate()
922 n = self.changelog.add(mn, changed + removed, ctx.description(),
922 n = self.changelog.add(mn, changed + removed, ctx.description(),
923 trp, p1.node(), p2.node(),
923 trp, p1.node(), p2.node(),
924 user, ctx.date(), ctx.extra().copy())
924 user, ctx.date(), ctx.extra().copy())
925 p = lambda: self.changelog.writepending() and self.root or ""
925 p = lambda: self.changelog.writepending() and self.root or ""
926 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
926 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
927 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
927 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
928 parent2=xp2, pending=p)
928 parent2=xp2, pending=p)
929 self.changelog.finalize(trp)
929 self.changelog.finalize(trp)
930 tr.close()
930 tr.close()
931
931
932 if self._branchcache:
932 if self._branchcache:
933 self.branchtags()
933 self.branchtags()
934 return n
934 return n
935 finally:
935 finally:
936 del tr
936 del tr
937 lock.release()
937 lock.release()
938
938
939 def destroyed(self):
939 def destroyed(self):
940 '''Inform the repository that nodes have been destroyed.
940 '''Inform the repository that nodes have been destroyed.
941 Intended for use by strip and rollback, so there's a common
941 Intended for use by strip and rollback, so there's a common
942 place for anything that has to be done after destroying history.'''
942 place for anything that has to be done after destroying history.'''
943 # XXX it might be nice if we could take the list of destroyed
943 # XXX it might be nice if we could take the list of destroyed
944 # nodes, but I don't see an easy way for rollback() to do that
944 # nodes, but I don't see an easy way for rollback() to do that
945
945
946 # Ensure the persistent tag cache is updated. Doing it now
946 # Ensure the persistent tag cache is updated. Doing it now
947 # means that the tag cache only has to worry about destroyed
947 # means that the tag cache only has to worry about destroyed
948 # heads immediately after a strip/rollback. That in turn
948 # heads immediately after a strip/rollback. That in turn
949 # guarantees that "cachetip == currenttip" (comparing both rev
949 # guarantees that "cachetip == currenttip" (comparing both rev
950 # and node) always means no nodes have been added or destroyed.
950 # and node) always means no nodes have been added or destroyed.
951
951
952 # XXX this is suboptimal when qrefresh'ing: we strip the current
952 # XXX this is suboptimal when qrefresh'ing: we strip the current
953 # head, refresh the tag cache, then immediately add a new head.
953 # head, refresh the tag cache, then immediately add a new head.
954 # But I think doing it this way is necessary for the "instant
954 # But I think doing it this way is necessary for the "instant
955 # tag cache retrieval" case to work.
955 # tag cache retrieval" case to work.
956 tags_.findglobaltags(self.ui, self, {}, {})
956 tags_.findglobaltags(self.ui, self, {}, {})
957
957
958 def walk(self, match, node=None):
958 def walk(self, match, node=None):
959 '''
959 '''
960 walk recursively through the directory tree or a given
960 walk recursively through the directory tree or a given
961 changeset, finding all files matched by the match
961 changeset, finding all files matched by the match
962 function
962 function
963 '''
963 '''
964 return self[node].walk(match)
964 return self[node].walk(match)
965
965
966 def status(self, node1='.', node2=None, match=None,
966 def status(self, node1='.', node2=None, match=None,
967 ignored=False, clean=False, unknown=False):
967 ignored=False, clean=False, unknown=False):
968 """return status of files between two nodes or node and working directory
968 """return status of files between two nodes or node and working directory
969
969
970 If node1 is None, use the first dirstate parent instead.
970 If node1 is None, use the first dirstate parent instead.
971 If node2 is None, compare node1 with working directory.
971 If node2 is None, compare node1 with working directory.
972 """
972 """
973
973
974 def mfmatches(ctx):
974 def mfmatches(ctx):
975 mf = ctx.manifest().copy()
975 mf = ctx.manifest().copy()
976 for fn in mf.keys():
976 for fn in mf.keys():
977 if not match(fn):
977 if not match(fn):
978 del mf[fn]
978 del mf[fn]
979 return mf
979 return mf
980
980
981 if isinstance(node1, context.changectx):
981 if isinstance(node1, context.changectx):
982 ctx1 = node1
982 ctx1 = node1
983 else:
983 else:
984 ctx1 = self[node1]
984 ctx1 = self[node1]
985 if isinstance(node2, context.changectx):
985 if isinstance(node2, context.changectx):
986 ctx2 = node2
986 ctx2 = node2
987 else:
987 else:
988 ctx2 = self[node2]
988 ctx2 = self[node2]
989
989
990 working = ctx2.rev() is None
990 working = ctx2.rev() is None
991 parentworking = working and ctx1 == self['.']
991 parentworking = working and ctx1 == self['.']
992 match = match or match_.always(self.root, self.getcwd())
992 match = match or match_.always(self.root, self.getcwd())
993 listignored, listclean, listunknown = ignored, clean, unknown
993 listignored, listclean, listunknown = ignored, clean, unknown
994
994
995 # load earliest manifest first for caching reasons
995 # load earliest manifest first for caching reasons
996 if not working and ctx2.rev() < ctx1.rev():
996 if not working and ctx2.rev() < ctx1.rev():
997 ctx2.manifest()
997 ctx2.manifest()
998
998
999 if not parentworking:
999 if not parentworking:
1000 def bad(f, msg):
1000 def bad(f, msg):
1001 if f not in ctx1:
1001 if f not in ctx1:
1002 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1002 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1003 match.bad = bad
1003 match.bad = bad
1004
1004
1005 if working: # we need to scan the working dir
1005 if working: # we need to scan the working dir
1006 subrepos = ctx1.substate.keys()
1006 subrepos = ctx1.substate.keys()
1007 s = self.dirstate.status(match, subrepos, listignored,
1007 s = self.dirstate.status(match, subrepos, listignored,
1008 listclean, listunknown)
1008 listclean, listunknown)
1009 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1009 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1010
1010
1011 # check for any possibly clean files
1011 # check for any possibly clean files
1012 if parentworking and cmp:
1012 if parentworking and cmp:
1013 fixup = []
1013 fixup = []
1014 # do a full compare of any files that might have changed
1014 # do a full compare of any files that might have changed
1015 for f in sorted(cmp):
1015 for f in sorted(cmp):
1016 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1016 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1017 or ctx1[f].cmp(ctx2[f].data())):
1017 or ctx1[f].cmp(ctx2[f].data())):
1018 modified.append(f)
1018 modified.append(f)
1019 else:
1019 else:
1020 fixup.append(f)
1020 fixup.append(f)
1021
1021
1022 if listclean:
1022 if listclean:
1023 clean += fixup
1023 clean += fixup
1024
1024
1025 # update dirstate for files that are actually clean
1025 # update dirstate for files that are actually clean
1026 if fixup:
1026 if fixup:
1027 try:
1027 try:
1028 # updating the dirstate is optional
1028 # updating the dirstate is optional
1029 # so we don't wait on the lock
1029 # so we don't wait on the lock
1030 wlock = self.wlock(False)
1030 wlock = self.wlock(False)
1031 try:
1031 try:
1032 for f in fixup:
1032 for f in fixup:
1033 self.dirstate.normal(f)
1033 self.dirstate.normal(f)
1034 finally:
1034 finally:
1035 wlock.release()
1035 wlock.release()
1036 except error.LockError:
1036 except error.LockError:
1037 pass
1037 pass
1038
1038
1039 if not parentworking:
1039 if not parentworking:
1040 mf1 = mfmatches(ctx1)
1040 mf1 = mfmatches(ctx1)
1041 if working:
1041 if working:
1042 # we are comparing working dir against non-parent
1042 # we are comparing working dir against non-parent
1043 # generate a pseudo-manifest for the working dir
1043 # generate a pseudo-manifest for the working dir
1044 mf2 = mfmatches(self['.'])
1044 mf2 = mfmatches(self['.'])
1045 for f in cmp + modified + added:
1045 for f in cmp + modified + added:
1046 mf2[f] = None
1046 mf2[f] = None
1047 mf2.set(f, ctx2.flags(f))
1047 mf2.set(f, ctx2.flags(f))
1048 for f in removed:
1048 for f in removed:
1049 if f in mf2:
1049 if f in mf2:
1050 del mf2[f]
1050 del mf2[f]
1051 else:
1051 else:
1052 # we are comparing two revisions
1052 # we are comparing two revisions
1053 deleted, unknown, ignored = [], [], []
1053 deleted, unknown, ignored = [], [], []
1054 mf2 = mfmatches(ctx2)
1054 mf2 = mfmatches(ctx2)
1055
1055
1056 modified, added, clean = [], [], []
1056 modified, added, clean = [], [], []
1057 for fn in mf2:
1057 for fn in mf2:
1058 if fn in mf1:
1058 if fn in mf1:
1059 if (mf1.flags(fn) != mf2.flags(fn) or
1059 if (mf1.flags(fn) != mf2.flags(fn) or
1060 (mf1[fn] != mf2[fn] and
1060 (mf1[fn] != mf2[fn] and
1061 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1061 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1062 modified.append(fn)
1062 modified.append(fn)
1063 elif listclean:
1063 elif listclean:
1064 clean.append(fn)
1064 clean.append(fn)
1065 del mf1[fn]
1065 del mf1[fn]
1066 else:
1066 else:
1067 added.append(fn)
1067 added.append(fn)
1068 removed = mf1.keys()
1068 removed = mf1.keys()
1069
1069
1070 r = modified, added, removed, deleted, unknown, ignored, clean
1070 r = modified, added, removed, deleted, unknown, ignored, clean
1071 [l.sort() for l in r]
1071 [l.sort() for l in r]
1072 return r
1072 return r
1073
1073
1074 def add(self, list):
1074 def add(self, list):
1075 wlock = self.wlock()
1075 wlock = self.wlock()
1076 try:
1076 try:
1077 rejected = []
1077 rejected = []
1078 for f in list:
1078 for f in list:
1079 p = self.wjoin(f)
1079 p = self.wjoin(f)
1080 try:
1080 try:
1081 st = os.lstat(p)
1081 st = os.lstat(p)
1082 except:
1082 except:
1083 self.ui.warn(_("%s does not exist!\n") % f)
1083 self.ui.warn(_("%s does not exist!\n") % f)
1084 rejected.append(f)
1084 rejected.append(f)
1085 continue
1085 continue
1086 if st.st_size > 10000000:
1086 if st.st_size > 10000000:
1087 self.ui.warn(_("%s: files over 10MB may cause memory and"
1087 self.ui.warn(_("%s: files over 10MB may cause memory and"
1088 " performance problems\n"
1088 " performance problems\n"
1089 "(use 'hg revert %s' to unadd the file)\n")
1089 "(use 'hg revert %s' to unadd the file)\n")
1090 % (f, f))
1090 % (f, f))
1091 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1091 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1092 self.ui.warn(_("%s not added: only files and symlinks "
1092 self.ui.warn(_("%s not added: only files and symlinks "
1093 "supported currently\n") % f)
1093 "supported currently\n") % f)
1094 rejected.append(p)
1094 rejected.append(p)
1095 elif self.dirstate[f] in 'amn':
1095 elif self.dirstate[f] in 'amn':
1096 self.ui.warn(_("%s already tracked!\n") % f)
1096 self.ui.warn(_("%s already tracked!\n") % f)
1097 elif self.dirstate[f] == 'r':
1097 elif self.dirstate[f] == 'r':
1098 self.dirstate.normallookup(f)
1098 self.dirstate.normallookup(f)
1099 else:
1099 else:
1100 self.dirstate.add(f)
1100 self.dirstate.add(f)
1101 return rejected
1101 return rejected
1102 finally:
1102 finally:
1103 wlock.release()
1103 wlock.release()
1104
1104
1105 def forget(self, list):
1105 def forget(self, list):
1106 wlock = self.wlock()
1106 wlock = self.wlock()
1107 try:
1107 try:
1108 for f in list:
1108 for f in list:
1109 if self.dirstate[f] != 'a':
1109 if self.dirstate[f] != 'a':
1110 self.ui.warn(_("%s not added!\n") % f)
1110 self.ui.warn(_("%s not added!\n") % f)
1111 else:
1111 else:
1112 self.dirstate.forget(f)
1112 self.dirstate.forget(f)
1113 finally:
1113 finally:
1114 wlock.release()
1114 wlock.release()
1115
1115
1116 def remove(self, list, unlink=False):
1116 def remove(self, list, unlink=False):
1117 if unlink:
1117 if unlink:
1118 for f in list:
1118 for f in list:
1119 try:
1119 try:
1120 util.unlink(self.wjoin(f))
1120 util.unlink(self.wjoin(f))
1121 except OSError, inst:
1121 except OSError, inst:
1122 if inst.errno != errno.ENOENT:
1122 if inst.errno != errno.ENOENT:
1123 raise
1123 raise
1124 wlock = self.wlock()
1124 wlock = self.wlock()
1125 try:
1125 try:
1126 for f in list:
1126 for f in list:
1127 if unlink and os.path.exists(self.wjoin(f)):
1127 if unlink and os.path.exists(self.wjoin(f)):
1128 self.ui.warn(_("%s still exists!\n") % f)
1128 self.ui.warn(_("%s still exists!\n") % f)
1129 elif self.dirstate[f] == 'a':
1129 elif self.dirstate[f] == 'a':
1130 self.dirstate.forget(f)
1130 self.dirstate.forget(f)
1131 elif f not in self.dirstate:
1131 elif f not in self.dirstate:
1132 self.ui.warn(_("%s not tracked!\n") % f)
1132 self.ui.warn(_("%s not tracked!\n") % f)
1133 else:
1133 else:
1134 self.dirstate.remove(f)
1134 self.dirstate.remove(f)
1135 finally:
1135 finally:
1136 wlock.release()
1136 wlock.release()
1137
1137
1138 def undelete(self, list):
1138 def undelete(self, list):
1139 manifests = [self.manifest.read(self.changelog.read(p)[0])
1139 manifests = [self.manifest.read(self.changelog.read(p)[0])
1140 for p in self.dirstate.parents() if p != nullid]
1140 for p in self.dirstate.parents() if p != nullid]
1141 wlock = self.wlock()
1141 wlock = self.wlock()
1142 try:
1142 try:
1143 for f in list:
1143 for f in list:
1144 if self.dirstate[f] != 'r':
1144 if self.dirstate[f] != 'r':
1145 self.ui.warn(_("%s not removed!\n") % f)
1145 self.ui.warn(_("%s not removed!\n") % f)
1146 else:
1146 else:
1147 m = f in manifests[0] and manifests[0] or manifests[1]
1147 m = f in manifests[0] and manifests[0] or manifests[1]
1148 t = self.file(f).read(m[f])
1148 t = self.file(f).read(m[f])
1149 self.wwrite(f, t, m.flags(f))
1149 self.wwrite(f, t, m.flags(f))
1150 self.dirstate.normal(f)
1150 self.dirstate.normal(f)
1151 finally:
1151 finally:
1152 wlock.release()
1152 wlock.release()
1153
1153
1154 def copy(self, source, dest):
1154 def copy(self, source, dest):
1155 p = self.wjoin(dest)
1155 p = self.wjoin(dest)
1156 if not (os.path.exists(p) or os.path.islink(p)):
1156 if not (os.path.exists(p) or os.path.islink(p)):
1157 self.ui.warn(_("%s does not exist!\n") % dest)
1157 self.ui.warn(_("%s does not exist!\n") % dest)
1158 elif not (os.path.isfile(p) or os.path.islink(p)):
1158 elif not (os.path.isfile(p) or os.path.islink(p)):
1159 self.ui.warn(_("copy failed: %s is not a file or a "
1159 self.ui.warn(_("copy failed: %s is not a file or a "
1160 "symbolic link\n") % dest)
1160 "symbolic link\n") % dest)
1161 else:
1161 else:
1162 wlock = self.wlock()
1162 wlock = self.wlock()
1163 try:
1163 try:
1164 if self.dirstate[dest] in '?r':
1164 if self.dirstate[dest] in '?r':
1165 self.dirstate.add(dest)
1165 self.dirstate.add(dest)
1166 self.dirstate.copy(source, dest)
1166 self.dirstate.copy(source, dest)
1167 finally:
1167 finally:
1168 wlock.release()
1168 wlock.release()
1169
1169
1170 def heads(self, start=None):
1170 def heads(self, start=None):
1171 heads = self.changelog.heads(start)
1171 heads = self.changelog.heads(start)
1172 # sort the output in rev descending order
1172 # sort the output in rev descending order
1173 heads = [(-self.changelog.rev(h), h) for h in heads]
1173 heads = [(-self.changelog.rev(h), h) for h in heads]
1174 return [n for (r, n) in sorted(heads)]
1174 return [n for (r, n) in sorted(heads)]
1175
1175
1176 def branchheads(self, branch=None, start=None, closed=False):
1176 def branchheads(self, branch=None, start=None, closed=False):
1177 '''return a (possibly filtered) list of heads for the given branch
1177 '''return a (possibly filtered) list of heads for the given branch
1178
1178
1179 Heads are returned in topological order, from newest to oldest.
1179 Heads are returned in topological order, from newest to oldest.
1180 If branch is None, use the dirstate branch.
1180 If branch is None, use the dirstate branch.
1181 If start is not None, return only heads reachable from start.
1181 If start is not None, return only heads reachable from start.
1182 If closed is True, return heads that are marked as closed as well.
1182 If closed is True, return heads that are marked as closed as well.
1183 '''
1183 '''
1184 if branch is None:
1184 if branch is None:
1185 branch = self[None].branch()
1185 branch = self[None].branch()
1186 branches = self.branchmap()
1186 branches = self.branchmap()
1187 if branch not in branches:
1187 if branch not in branches:
1188 return []
1188 return []
1189 # the cache returns heads ordered lowest to highest
1189 # the cache returns heads ordered lowest to highest
1190 bheads = list(reversed(branches[branch]))
1190 bheads = list(reversed(branches[branch]))
1191 if start is not None:
1191 if start is not None:
1192 # filter out the heads that cannot be reached from startrev
1192 # filter out the heads that cannot be reached from startrev
1193 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1193 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1194 bheads = [h for h in bheads if h in fbheads]
1194 bheads = [h for h in bheads if h in fbheads]
1195 if not closed:
1195 if not closed:
1196 bheads = [h for h in bheads if
1196 bheads = [h for h in bheads if
1197 ('close' not in self.changelog.read(h)[5])]
1197 ('close' not in self.changelog.read(h)[5])]
1198 return bheads
1198 return bheads
1199
1199
1200 def branches(self, nodes):
1200 def branches(self, nodes):
1201 if not nodes:
1201 if not nodes:
1202 nodes = [self.changelog.tip()]
1202 nodes = [self.changelog.tip()]
1203 b = []
1203 b = []
1204 for n in nodes:
1204 for n in nodes:
1205 t = n
1205 t = n
1206 while 1:
1206 while 1:
1207 p = self.changelog.parents(n)
1207 p = self.changelog.parents(n)
1208 if p[1] != nullid or p[0] == nullid:
1208 if p[1] != nullid or p[0] == nullid:
1209 b.append((t, n, p[0], p[1]))
1209 b.append((t, n, p[0], p[1]))
1210 break
1210 break
1211 n = p[0]
1211 n = p[0]
1212 return b
1212 return b
1213
1213
1214 def between(self, pairs):
1214 def between(self, pairs):
1215 r = []
1215 r = []
1216
1216
1217 for top, bottom in pairs:
1217 for top, bottom in pairs:
1218 n, l, i = top, [], 0
1218 n, l, i = top, [], 0
1219 f = 1
1219 f = 1
1220
1220
1221 while n != bottom and n != nullid:
1221 while n != bottom and n != nullid:
1222 p = self.changelog.parents(n)[0]
1222 p = self.changelog.parents(n)[0]
1223 if i == f:
1223 if i == f:
1224 l.append(n)
1224 l.append(n)
1225 f = f * 2
1225 f = f * 2
1226 n = p
1226 n = p
1227 i += 1
1227 i += 1
1228
1228
1229 r.append(l)
1229 r.append(l)
1230
1230
1231 return r
1231 return r
1232
1232
1233 def findincoming(self, remote, base=None, heads=None, force=False):
1233 def findincoming(self, remote, base=None, heads=None, force=False):
1234 """Return list of roots of the subsets of missing nodes from remote
1234 """Return list of roots of the subsets of missing nodes from remote
1235
1235
1236 If base dict is specified, assume that these nodes and their parents
1236 If base dict is specified, assume that these nodes and their parents
1237 exist on the remote side and that no child of a node of base exists
1237 exist on the remote side and that no child of a node of base exists
1238 in both remote and self.
1238 in both remote and self.
1239 Furthermore base will be updated to include the nodes that exists
1239 Furthermore base will be updated to include the nodes that exists
1240 in self and remote but no children exists in self and remote.
1240 in self and remote but no children exists in self and remote.
1241 If a list of heads is specified, return only nodes which are heads
1241 If a list of heads is specified, return only nodes which are heads
1242 or ancestors of these heads.
1242 or ancestors of these heads.
1243
1243
1244 All the ancestors of base are in self and in remote.
1244 All the ancestors of base are in self and in remote.
1245 All the descendants of the list returned are missing in self.
1245 All the descendants of the list returned are missing in self.
1246 (and so we know that the rest of the nodes are missing in remote, see
1246 (and so we know that the rest of the nodes are missing in remote, see
1247 outgoing)
1247 outgoing)
1248 """
1248 """
1249 return self.findcommonincoming(remote, base, heads, force)[1]
1249 return self.findcommonincoming(remote, base, heads, force)[1]
1250
1250
1251 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1251 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1252 """Return a tuple (common, missing roots, heads) used to identify
1252 """Return a tuple (common, missing roots, heads) used to identify
1253 missing nodes from remote.
1253 missing nodes from remote.
1254
1254
1255 If base dict is specified, assume that these nodes and their parents
1255 If base dict is specified, assume that these nodes and their parents
1256 exist on the remote side and that no child of a node of base exists
1256 exist on the remote side and that no child of a node of base exists
1257 in both remote and self.
1257 in both remote and self.
1258 Furthermore base will be updated to include the nodes that exists
1258 Furthermore base will be updated to include the nodes that exists
1259 in self and remote but no children exists in self and remote.
1259 in self and remote but no children exists in self and remote.
1260 If a list of heads is specified, return only nodes which are heads
1260 If a list of heads is specified, return only nodes which are heads
1261 or ancestors of these heads.
1261 or ancestors of these heads.
1262
1262
1263 All the ancestors of base are in self and in remote.
1263 All the ancestors of base are in self and in remote.
1264 """
1264 """
1265 m = self.changelog.nodemap
1265 m = self.changelog.nodemap
1266 search = []
1266 search = []
1267 fetch = set()
1267 fetch = set()
1268 seen = set()
1268 seen = set()
1269 seenbranch = set()
1269 seenbranch = set()
1270 if base is None:
1270 if base is None:
1271 base = {}
1271 base = {}
1272
1272
1273 if not heads:
1273 if not heads:
1274 heads = remote.heads()
1274 heads = remote.heads()
1275
1275
1276 if self.changelog.tip() == nullid:
1276 if self.changelog.tip() == nullid:
1277 base[nullid] = 1
1277 base[nullid] = 1
1278 if heads != [nullid]:
1278 if heads != [nullid]:
1279 return [nullid], [nullid], list(heads)
1279 return [nullid], [nullid], list(heads)
1280 return [nullid], [], []
1280 return [nullid], [], []
1281
1281
1282 # assume we're closer to the tip than the root
1282 # assume we're closer to the tip than the root
1283 # and start by examining the heads
1283 # and start by examining the heads
1284 self.ui.status(_("searching for changes\n"))
1284 self.ui.status(_("searching for changes\n"))
1285
1285
1286 unknown = []
1286 unknown = []
1287 for h in heads:
1287 for h in heads:
1288 if h not in m:
1288 if h not in m:
1289 unknown.append(h)
1289 unknown.append(h)
1290 else:
1290 else:
1291 base[h] = 1
1291 base[h] = 1
1292
1292
1293 heads = unknown
1293 heads = unknown
1294 if not unknown:
1294 if not unknown:
1295 return base.keys(), [], []
1295 return base.keys(), [], []
1296
1296
1297 req = set(unknown)
1297 req = set(unknown)
1298 reqcnt = 0
1298 reqcnt = 0
1299
1299
1300 # search through remote branches
1300 # search through remote branches
1301 # a 'branch' here is a linear segment of history, with four parts:
1301 # a 'branch' here is a linear segment of history, with four parts:
1302 # head, root, first parent, second parent
1302 # head, root, first parent, second parent
1303 # (a branch always has two parents (or none) by definition)
1303 # (a branch always has two parents (or none) by definition)
1304 unknown = remote.branches(unknown)
1304 unknown = remote.branches(unknown)
1305 while unknown:
1305 while unknown:
1306 r = []
1306 r = []
1307 while unknown:
1307 while unknown:
1308 n = unknown.pop(0)
1308 n = unknown.pop(0)
1309 if n[0] in seen:
1309 if n[0] in seen:
1310 continue
1310 continue
1311
1311
1312 self.ui.debug("examining %s:%s\n"
1312 self.ui.debug("examining %s:%s\n"
1313 % (short(n[0]), short(n[1])))
1313 % (short(n[0]), short(n[1])))
1314 if n[0] == nullid: # found the end of the branch
1314 if n[0] == nullid: # found the end of the branch
1315 pass
1315 pass
1316 elif n in seenbranch:
1316 elif n in seenbranch:
1317 self.ui.debug("branch already found\n")
1317 self.ui.debug("branch already found\n")
1318 continue
1318 continue
1319 elif n[1] and n[1] in m: # do we know the base?
1319 elif n[1] and n[1] in m: # do we know the base?
1320 self.ui.debug("found incomplete branch %s:%s\n"
1320 self.ui.debug("found incomplete branch %s:%s\n"
1321 % (short(n[0]), short(n[1])))
1321 % (short(n[0]), short(n[1])))
1322 search.append(n[0:2]) # schedule branch range for scanning
1322 search.append(n[0:2]) # schedule branch range for scanning
1323 seenbranch.add(n)
1323 seenbranch.add(n)
1324 else:
1324 else:
1325 if n[1] not in seen and n[1] not in fetch:
1325 if n[1] not in seen and n[1] not in fetch:
1326 if n[2] in m and n[3] in m:
1326 if n[2] in m and n[3] in m:
1327 self.ui.debug("found new changeset %s\n" %
1327 self.ui.debug("found new changeset %s\n" %
1328 short(n[1]))
1328 short(n[1]))
1329 fetch.add(n[1]) # earliest unknown
1329 fetch.add(n[1]) # earliest unknown
1330 for p in n[2:4]:
1330 for p in n[2:4]:
1331 if p in m:
1331 if p in m:
1332 base[p] = 1 # latest known
1332 base[p] = 1 # latest known
1333
1333
1334 for p in n[2:4]:
1334 for p in n[2:4]:
1335 if p not in req and p not in m:
1335 if p not in req and p not in m:
1336 r.append(p)
1336 r.append(p)
1337 req.add(p)
1337 req.add(p)
1338 seen.add(n[0])
1338 seen.add(n[0])
1339
1339
1340 if r:
1340 if r:
1341 reqcnt += 1
1341 reqcnt += 1
1342 self.ui.progress(_('searching'), reqcnt, unit='queries')
1342 self.ui.progress(_('searching'), reqcnt, unit='queries')
1343 self.ui.debug("request %d: %s\n" %
1343 self.ui.debug("request %d: %s\n" %
1344 (reqcnt, " ".join(map(short, r))))
1344 (reqcnt, " ".join(map(short, r))))
1345 for p in xrange(0, len(r), 10):
1345 for p in xrange(0, len(r), 10):
1346 for b in remote.branches(r[p:p + 10]):
1346 for b in remote.branches(r[p:p + 10]):
1347 self.ui.debug("received %s:%s\n" %
1347 self.ui.debug("received %s:%s\n" %
1348 (short(b[0]), short(b[1])))
1348 (short(b[0]), short(b[1])))
1349 unknown.append(b)
1349 unknown.append(b)
1350
1350
1351 # do binary search on the branches we found
1351 # do binary search on the branches we found
1352 while search:
1352 while search:
1353 newsearch = []
1353 newsearch = []
1354 reqcnt += 1
1354 reqcnt += 1
1355 self.ui.progress(_('searching'), reqcnt, unit='queries')
1355 self.ui.progress(_('searching'), reqcnt, unit='queries')
1356 for n, l in zip(search, remote.between(search)):
1356 for n, l in zip(search, remote.between(search)):
1357 l.append(n[1])
1357 l.append(n[1])
1358 p = n[0]
1358 p = n[0]
1359 f = 1
1359 f = 1
1360 for i in l:
1360 for i in l:
1361 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1361 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1362 if i in m:
1362 if i in m:
1363 if f <= 2:
1363 if f <= 2:
1364 self.ui.debug("found new branch changeset %s\n" %
1364 self.ui.debug("found new branch changeset %s\n" %
1365 short(p))
1365 short(p))
1366 fetch.add(p)
1366 fetch.add(p)
1367 base[i] = 1
1367 base[i] = 1
1368 else:
1368 else:
1369 self.ui.debug("narrowed branch search to %s:%s\n"
1369 self.ui.debug("narrowed branch search to %s:%s\n"
1370 % (short(p), short(i)))
1370 % (short(p), short(i)))
1371 newsearch.append((p, i))
1371 newsearch.append((p, i))
1372 break
1372 break
1373 p, f = i, f * 2
1373 p, f = i, f * 2
1374 search = newsearch
1374 search = newsearch
1375
1375
1376 # sanity check our fetch list
1376 # sanity check our fetch list
1377 for f in fetch:
1377 for f in fetch:
1378 if f in m:
1378 if f in m:
1379 raise error.RepoError(_("already have changeset ")
1379 raise error.RepoError(_("already have changeset ")
1380 + short(f[:4]))
1380 + short(f[:4]))
1381
1381
1382 if base.keys() == [nullid]:
1382 if base.keys() == [nullid]:
1383 if force:
1383 if force:
1384 self.ui.warn(_("warning: repository is unrelated\n"))
1384 self.ui.warn(_("warning: repository is unrelated\n"))
1385 else:
1385 else:
1386 raise util.Abort(_("repository is unrelated"))
1386 raise util.Abort(_("repository is unrelated"))
1387
1387
1388 self.ui.debug("found new changesets starting at " +
1388 self.ui.debug("found new changesets starting at " +
1389 " ".join([short(f) for f in fetch]) + "\n")
1389 " ".join([short(f) for f in fetch]) + "\n")
1390
1390
1391 self.ui.progress(_('searching'), None, unit='queries')
1391 self.ui.progress(_('searching'), None, unit='queries')
1392 self.ui.debug("%d total queries\n" % reqcnt)
1392 self.ui.debug("%d total queries\n" % reqcnt)
1393
1393
1394 return base.keys(), list(fetch), heads
1394 return base.keys(), list(fetch), heads
1395
1395
1396 def findoutgoing(self, remote, base=None, heads=None, force=False):
1396 def findoutgoing(self, remote, base=None, heads=None, force=False):
1397 """Return list of nodes that are roots of subsets not in remote
1397 """Return list of nodes that are roots of subsets not in remote
1398
1398
1399 If base dict is specified, assume that these nodes and their parents
1399 If base dict is specified, assume that these nodes and their parents
1400 exist on the remote side.
1400 exist on the remote side.
1401 If a list of heads is specified, return only nodes which are heads
1401 If a list of heads is specified, return only nodes which are heads
1402 or ancestors of these heads, and return a second element which
1402 or ancestors of these heads, and return a second element which
1403 contains all remote heads which get new children.
1403 contains all remote heads which get new children.
1404 """
1404 """
1405 if base is None:
1405 if base is None:
1406 base = {}
1406 base = {}
1407 self.findincoming(remote, base, heads, force=force)
1407 self.findincoming(remote, base, heads, force=force)
1408
1408
1409 self.ui.debug("common changesets up to "
1409 self.ui.debug("common changesets up to "
1410 + " ".join(map(short, base.keys())) + "\n")
1410 + " ".join(map(short, base.keys())) + "\n")
1411
1411
1412 remain = set(self.changelog.nodemap)
1412 remain = set(self.changelog.nodemap)
1413
1413
1414 # prune everything remote has from the tree
1414 # prune everything remote has from the tree
1415 remain.remove(nullid)
1415 remain.remove(nullid)
1416 remove = base.keys()
1416 remove = base.keys()
1417 while remove:
1417 while remove:
1418 n = remove.pop(0)
1418 n = remove.pop(0)
1419 if n in remain:
1419 if n in remain:
1420 remain.remove(n)
1420 remain.remove(n)
1421 for p in self.changelog.parents(n):
1421 for p in self.changelog.parents(n):
1422 remove.append(p)
1422 remove.append(p)
1423
1423
1424 # find every node whose parents have been pruned
1424 # find every node whose parents have been pruned
1425 subset = []
1425 subset = []
1426 # find every remote head that will get new children
1426 # find every remote head that will get new children
1427 updated_heads = set()
1427 updated_heads = set()
1428 for n in remain:
1428 for n in remain:
1429 p1, p2 = self.changelog.parents(n)
1429 p1, p2 = self.changelog.parents(n)
1430 if p1 not in remain and p2 not in remain:
1430 if p1 not in remain and p2 not in remain:
1431 subset.append(n)
1431 subset.append(n)
1432 if heads:
1432 if heads:
1433 if p1 in heads:
1433 if p1 in heads:
1434 updated_heads.add(p1)
1434 updated_heads.add(p1)
1435 if p2 in heads:
1435 if p2 in heads:
1436 updated_heads.add(p2)
1436 updated_heads.add(p2)
1437
1437
1438 # this is the set of all roots we have to push
1438 # this is the set of all roots we have to push
1439 if heads:
1439 if heads:
1440 return subset, list(updated_heads)
1440 return subset, list(updated_heads)
1441 else:
1441 else:
1442 return subset
1442 return subset
1443
1443
1444 def pull(self, remote, heads=None, force=False):
1444 def pull(self, remote, heads=None, force=False):
1445 lock = self.lock()
1445 lock = self.lock()
1446 try:
1446 try:
1447 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1447 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1448 force=force)
1448 force=force)
1449 if fetch == [nullid]:
1449 if fetch == [nullid]:
1450 self.ui.status(_("requesting all changes\n"))
1450 self.ui.status(_("requesting all changes\n"))
1451
1451
1452 if not fetch:
1452 if not fetch:
1453 self.ui.status(_("no changes found\n"))
1453 self.ui.status(_("no changes found\n"))
1454 return 0
1454 return 0
1455
1455
1456 if heads is None and remote.capable('changegroupsubset'):
1456 if heads is None and remote.capable('changegroupsubset'):
1457 heads = rheads
1457 heads = rheads
1458
1458
1459 if heads is None:
1459 if heads is None:
1460 cg = remote.changegroup(fetch, 'pull')
1460 cg = remote.changegroup(fetch, 'pull')
1461 else:
1461 else:
1462 if not remote.capable('changegroupsubset'):
1462 if not remote.capable('changegroupsubset'):
1463 raise util.Abort(_("Partial pull cannot be done because "
1463 raise util.Abort(_("Partial pull cannot be done because "
1464 "other repository doesn't support "
1464 "other repository doesn't support "
1465 "changegroupsubset."))
1465 "changegroupsubset."))
1466 cg = remote.changegroupsubset(fetch, heads, 'pull')
1466 cg = remote.changegroupsubset(fetch, heads, 'pull')
1467 return self.addchangegroup(cg, 'pull', remote.url())
1467 return self.addchangegroup(cg, 'pull', remote.url())
1468 finally:
1468 finally:
1469 lock.release()
1469 lock.release()
1470
1470
1471 def push(self, remote, force=False, revs=None):
1471 def push(self, remote, force=False, revs=None):
1472 # there are two ways to push to remote repo:
1472 # there are two ways to push to remote repo:
1473 #
1473 #
1474 # addchangegroup assumes local user can lock remote
1474 # addchangegroup assumes local user can lock remote
1475 # repo (local filesystem, old ssh servers).
1475 # repo (local filesystem, old ssh servers).
1476 #
1476 #
1477 # unbundle assumes local user cannot lock remote repo (new ssh
1477 # unbundle assumes local user cannot lock remote repo (new ssh
1478 # servers, http servers).
1478 # servers, http servers).
1479
1479
1480 if remote.capable('unbundle'):
1480 if remote.capable('unbundle'):
1481 return self.push_unbundle(remote, force, revs)
1481 return self.push_unbundle(remote, force, revs)
1482 return self.push_addchangegroup(remote, force, revs)
1482 return self.push_addchangegroup(remote, force, revs)
1483
1483
1484 def prepush(self, remote, force, revs):
1484 def prepush(self, remote, force, revs):
1485 '''Analyze the local and remote repositories and determine which
1485 '''Analyze the local and remote repositories and determine which
1486 changesets need to be pushed to the remote. Return a tuple
1486 changesets need to be pushed to the remote. Return a tuple
1487 (changegroup, remoteheads). changegroup is a readable file-like
1487 (changegroup, remoteheads). changegroup is a readable file-like
1488 object whose read() returns successive changegroup chunks ready to
1488 object whose read() returns successive changegroup chunks ready to
1489 be sent over the wire. remoteheads is the list of remote heads.
1489 be sent over the wire. remoteheads is the list of remote heads.
1490 '''
1490 '''
1491 common = {}
1491 common = {}
1492 remote_heads = remote.heads()
1492 remote_heads = remote.heads()
1493 inc = self.findincoming(remote, common, remote_heads, force=force)
1493 inc = self.findincoming(remote, common, remote_heads, force=force)
1494
1494
1495 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1495 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1496 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1496 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1497
1497
1498 def checkbranch(lheads, rheads, updatelb, branchname=None):
1498 def checkbranch(lheads, rheads, updatelb, branchname=None):
1499 '''
1499 '''
1500 check whether there are more local heads than remote heads on
1500 check whether there are more local heads than remote heads on
1501 a specific branch.
1501 a specific branch.
1502
1502
1503 lheads: local branch heads
1503 lheads: local branch heads
1504 rheads: remote branch heads
1504 rheads: remote branch heads
1505 updatelb: outgoing local branch bases
1505 updatelb: outgoing local branch bases
1506 '''
1506 '''
1507
1507
1508 warn = 0
1508 warn = 0
1509
1509
1510 if not revs and len(lheads) > len(rheads):
1510 if not revs and len(lheads) > len(rheads):
1511 warn = 1
1511 warn = 1
1512 else:
1512 else:
1513 # add local heads involved in the push
1513 # add local heads involved in the push
1514 updatelheads = [self.changelog.heads(x, lheads)
1514 updatelheads = [self.changelog.heads(x, lheads)
1515 for x in updatelb]
1515 for x in updatelb]
1516 newheads = set(sum(updatelheads, [])) & set(lheads)
1516 newheads = set(sum(updatelheads, [])) & set(lheads)
1517
1517
1518 if not newheads:
1518 if not newheads:
1519 return True
1519 return True
1520
1520
1521 # add heads we don't have or that are not involved in the push
1521 # add heads we don't have or that are not involved in the push
1522 for r in rheads:
1522 for r in rheads:
1523 if r in self.changelog.nodemap:
1523 if r in self.changelog.nodemap:
1524 desc = self.changelog.heads(r, heads)
1524 desc = self.changelog.heads(r, heads)
1525 l = [h for h in heads if h in desc]
1525 l = [h for h in heads if h in desc]
1526 if not l:
1526 if not l:
1527 newheads.add(r)
1527 newheads.add(r)
1528 else:
1528 else:
1529 newheads.add(r)
1529 newheads.add(r)
1530 if len(newheads) > len(rheads):
1530 if len(newheads) > len(rheads):
1531 warn = 1
1531 warn = 1
1532
1532
1533 if warn:
1533 if warn:
1534 if branchname is not None:
1534 if branchname is not None:
1535 msg = _("abort: push creates new remote heads"
1535 msg = _("abort: push creates new remote heads"
1536 " on branch '%s'!\n") % branchname
1536 " on branch '%s'!\n") % branchname
1537 else:
1537 else:
1538 msg = _("abort: push creates new remote heads!\n")
1538 msg = _("abort: push creates new remote heads!\n")
1539 self.ui.warn(msg)
1539 self.ui.warn(msg)
1540 if len(lheads) > len(rheads):
1540 if len(lheads) > len(rheads):
1541 self.ui.status(_("(did you forget to merge?"
1541 self.ui.status(_("(did you forget to merge?"
1542 " use push -f to force)\n"))
1542 " use push -f to force)\n"))
1543 else:
1543 else:
1544 self.ui.status(_("(you should pull and merge or"
1544 self.ui.status(_("(you should pull and merge or"
1545 " use push -f to force)\n"))
1545 " use push -f to force)\n"))
1546 return False
1546 return False
1547 return True
1547 return True
1548
1548
1549 if not bases:
1549 if not bases:
1550 self.ui.status(_("no changes found\n"))
1550 self.ui.status(_("no changes found\n"))
1551 return None, 1
1551 return None, 1
1552 elif not force:
1552 elif not force:
1553 # Check for each named branch if we're creating new remote heads.
1553 # Check for each named branch if we're creating new remote heads.
1554 # To be a remote head after push, node must be either:
1554 # To be a remote head after push, node must be either:
1555 # - unknown locally
1555 # - unknown locally
1556 # - a local outgoing head descended from update
1556 # - a local outgoing head descended from update
1557 # - a remote head that's known locally and not
1557 # - a remote head that's known locally and not
1558 # ancestral to an outgoing head
1558 # ancestral to an outgoing head
1559 #
1559 #
1560 # New named branches cannot be created without --force.
1560 # New named branches cannot be created without --force.
1561
1561
1562 if remote_heads != [nullid]:
1562 if remote_heads != [nullid]:
1563 if remote.capable('branchmap'):
1563 if remote.capable('branchmap'):
1564 remotebrheads = remote.branchmap()
1564 remotebrheads = remote.branchmap()
1565
1565
1566 if not revs:
1566 if not revs:
1567 localbrheads = self.branchmap()
1567 localbrheads = self.branchmap()
1568 else:
1568 else:
1569 localbrheads = {}
1569 localbrheads = {}
1570 for n in heads:
1570 for n in heads:
1571 branch = self[n].branch()
1571 branch = self[n].branch()
1572 localbrheads.setdefault(branch, []).append(n)
1572 localbrheads.setdefault(branch, []).append(n)
1573
1573
1574 newbranches = list(set(localbrheads) - set(remotebrheads))
1574 newbranches = list(set(localbrheads) - set(remotebrheads))
1575 if newbranches: # new branch requires --force
1575 if newbranches: # new branch requires --force
1576 branchnames = ', '.join("%s" % b for b in newbranches)
1576 branchnames = ', '.join("%s" % b for b in newbranches)
1577 self.ui.warn(_("abort: push creates "
1577 self.ui.warn(_("abort: push creates "
1578 "new remote branches: %s!\n")
1578 "new remote branches: %s!\n")
1579 % branchnames)
1579 % branchnames)
1580 # propose 'push -b .' in the msg too?
1580 # propose 'push -b .' in the msg too?
1581 self.ui.status(_("(use 'hg push -f' to force)\n"))
1581 self.ui.status(_("(use 'hg push -f' to force)\n"))
1582 return None, 0
1582 return None, 0
1583 for branch, lheads in localbrheads.iteritems():
1583 for branch, lheads in localbrheads.iteritems():
1584 if branch in remotebrheads:
1584 if branch in remotebrheads:
1585 rheads = remotebrheads[branch]
1585 rheads = remotebrheads[branch]
1586 if not checkbranch(lheads, rheads, update, branch):
1586 if not checkbranch(lheads, rheads, update, branch):
1587 return None, 0
1587 return None, 0
1588 else:
1588 else:
1589 if not checkbranch(heads, remote_heads, update):
1589 if not checkbranch(heads, remote_heads, update):
1590 return None, 0
1590 return None, 0
1591
1591
1592 if inc:
1592 if inc:
1593 self.ui.warn(_("note: unsynced remote changes!\n"))
1593 self.ui.warn(_("note: unsynced remote changes!\n"))
1594
1594
1595
1595
1596 if revs is None:
1596 if revs is None:
1597 # use the fast path, no race possible on push
1597 # use the fast path, no race possible on push
1598 nodes = self.changelog.findmissing(common.keys())
1598 nodes = self.changelog.findmissing(common.keys())
1599 cg = self._changegroup(nodes, 'push')
1599 cg = self._changegroup(nodes, 'push')
1600 else:
1600 else:
1601 cg = self.changegroupsubset(update, revs, 'push')
1601 cg = self.changegroupsubset(update, revs, 'push')
1602 return cg, remote_heads
1602 return cg, remote_heads
1603
1603
1604 def push_addchangegroup(self, remote, force, revs):
1604 def push_addchangegroup(self, remote, force, revs):
1605 lock = remote.lock()
1605 lock = remote.lock()
1606 try:
1606 try:
1607 ret = self.prepush(remote, force, revs)
1607 ret = self.prepush(remote, force, revs)
1608 if ret[0] is not None:
1608 if ret[0] is not None:
1609 cg, remote_heads = ret
1609 cg, remote_heads = ret
1610 return remote.addchangegroup(cg, 'push', self.url())
1610 return remote.addchangegroup(cg, 'push', self.url())
1611 return ret[1]
1611 return ret[1]
1612 finally:
1612 finally:
1613 lock.release()
1613 lock.release()
1614
1614
1615 def push_unbundle(self, remote, force, revs):
1615 def push_unbundle(self, remote, force, revs):
1616 # local repo finds heads on server, finds out what revs it
1616 # local repo finds heads on server, finds out what revs it
1617 # must push. once revs transferred, if server finds it has
1617 # must push. once revs transferred, if server finds it has
1618 # different heads (someone else won commit/push race), server
1618 # different heads (someone else won commit/push race), server
1619 # aborts.
1619 # aborts.
1620
1620
1621 ret = self.prepush(remote, force, revs)
1621 ret = self.prepush(remote, force, revs)
1622 if ret[0] is not None:
1622 if ret[0] is not None:
1623 cg, remote_heads = ret
1623 cg, remote_heads = ret
1624 if force:
1624 if force:
1625 remote_heads = ['force']
1625 remote_heads = ['force']
1626 return remote.unbundle(cg, remote_heads, 'push')
1626 return remote.unbundle(cg, remote_heads, 'push')
1627 return ret[1]
1627 return ret[1]
1628
1628
1629 def changegroupinfo(self, nodes, source):
1629 def changegroupinfo(self, nodes, source):
1630 if self.ui.verbose or source == 'bundle':
1630 if self.ui.verbose or source == 'bundle':
1631 self.ui.status(_("%d changesets found\n") % len(nodes))
1631 self.ui.status(_("%d changesets found\n") % len(nodes))
1632 if self.ui.debugflag:
1632 if self.ui.debugflag:
1633 self.ui.debug("list of changesets:\n")
1633 self.ui.debug("list of changesets:\n")
1634 for node in nodes:
1634 for node in nodes:
1635 self.ui.debug("%s\n" % hex(node))
1635 self.ui.debug("%s\n" % hex(node))
1636
1636
1637 def changegroupsubset(self, bases, heads, source, extranodes=None):
1637 def changegroupsubset(self, bases, heads, source, extranodes=None):
1638 """Compute a changegroup consisting of all the nodes that are
1638 """Compute a changegroup consisting of all the nodes that are
1639 descendents of any of the bases and ancestors of any of the heads.
1639 descendents of any of the bases and ancestors of any of the heads.
1640 Return a chunkbuffer object whose read() method will return
1640 Return a chunkbuffer object whose read() method will return
1641 successive changegroup chunks.
1641 successive changegroup chunks.
1642
1642
1643 It is fairly complex as determining which filenodes and which
1643 It is fairly complex as determining which filenodes and which
1644 manifest nodes need to be included for the changeset to be complete
1644 manifest nodes need to be included for the changeset to be complete
1645 is non-trivial.
1645 is non-trivial.
1646
1646
1647 Another wrinkle is doing the reverse, figuring out which changeset in
1647 Another wrinkle is doing the reverse, figuring out which changeset in
1648 the changegroup a particular filenode or manifestnode belongs to.
1648 the changegroup a particular filenode or manifestnode belongs to.
1649
1649
1650 The caller can specify some nodes that must be included in the
1650 The caller can specify some nodes that must be included in the
1651 changegroup using the extranodes argument. It should be a dict
1651 changegroup using the extranodes argument. It should be a dict
1652 where the keys are the filenames (or 1 for the manifest), and the
1652 where the keys are the filenames (or 1 for the manifest), and the
1653 values are lists of (node, linknode) tuples, where node is a wanted
1653 values are lists of (node, linknode) tuples, where node is a wanted
1654 node and linknode is the changelog node that should be transmitted as
1654 node and linknode is the changelog node that should be transmitted as
1655 the linkrev.
1655 the linkrev.
1656 """
1656 """
1657
1657
1658 # Set up some initial variables
1658 # Set up some initial variables
1659 # Make it easy to refer to self.changelog
1659 # Make it easy to refer to self.changelog
1660 cl = self.changelog
1660 cl = self.changelog
1661 # msng is short for missing - compute the list of changesets in this
1661 # msng is short for missing - compute the list of changesets in this
1662 # changegroup.
1662 # changegroup.
1663 if not bases:
1663 if not bases:
1664 bases = [nullid]
1664 bases = [nullid]
1665 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1665 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1666
1666
1667 if extranodes is None:
1667 if extranodes is None:
1668 # can we go through the fast path ?
1668 # can we go through the fast path ?
1669 heads.sort()
1669 heads.sort()
1670 allheads = self.heads()
1670 allheads = self.heads()
1671 allheads.sort()
1671 allheads.sort()
1672 if heads == allheads:
1672 if heads == allheads:
1673 return self._changegroup(msng_cl_lst, source)
1673 return self._changegroup(msng_cl_lst, source)
1674
1674
1675 # slow path
1675 # slow path
1676 self.hook('preoutgoing', throw=True, source=source)
1676 self.hook('preoutgoing', throw=True, source=source)
1677
1677
1678 self.changegroupinfo(msng_cl_lst, source)
1678 self.changegroupinfo(msng_cl_lst, source)
1679 # Some bases may turn out to be superfluous, and some heads may be
1679 # Some bases may turn out to be superfluous, and some heads may be
1680 # too. nodesbetween will return the minimal set of bases and heads
1680 # too. nodesbetween will return the minimal set of bases and heads
1681 # necessary to re-create the changegroup.
1681 # necessary to re-create the changegroup.
1682
1682
1683 # Known heads are the list of heads that it is assumed the recipient
1683 # Known heads are the list of heads that it is assumed the recipient
1684 # of this changegroup will know about.
1684 # of this changegroup will know about.
1685 knownheads = set()
1685 knownheads = set()
1686 # We assume that all parents of bases are known heads.
1686 # We assume that all parents of bases are known heads.
1687 for n in bases:
1687 for n in bases:
1688 knownheads.update(cl.parents(n))
1688 knownheads.update(cl.parents(n))
1689 knownheads.discard(nullid)
1689 knownheads.discard(nullid)
1690 knownheads = list(knownheads)
1690 knownheads = list(knownheads)
1691 if knownheads:
1691 if knownheads:
1692 # Now that we know what heads are known, we can compute which
1692 # Now that we know what heads are known, we can compute which
1693 # changesets are known. The recipient must know about all
1693 # changesets are known. The recipient must know about all
1694 # changesets required to reach the known heads from the null
1694 # changesets required to reach the known heads from the null
1695 # changeset.
1695 # changeset.
1696 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1696 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1697 junk = None
1697 junk = None
1698 # Transform the list into a set.
1698 # Transform the list into a set.
1699 has_cl_set = set(has_cl_set)
1699 has_cl_set = set(has_cl_set)
1700 else:
1700 else:
1701 # If there were no known heads, the recipient cannot be assumed to
1701 # If there were no known heads, the recipient cannot be assumed to
1702 # know about any changesets.
1702 # know about any changesets.
1703 has_cl_set = set()
1703 has_cl_set = set()
1704
1704
1705 # Make it easy to refer to self.manifest
1705 # Make it easy to refer to self.manifest
1706 mnfst = self.manifest
1706 mnfst = self.manifest
1707 # We don't know which manifests are missing yet
1707 # We don't know which manifests are missing yet
1708 msng_mnfst_set = {}
1708 msng_mnfst_set = {}
1709 # Nor do we know which filenodes are missing.
1709 # Nor do we know which filenodes are missing.
1710 msng_filenode_set = {}
1710 msng_filenode_set = {}
1711
1711
1712 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1712 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1713 junk = None
1713 junk = None
1714
1714
1715 # A changeset always belongs to itself, so the changenode lookup
1715 # A changeset always belongs to itself, so the changenode lookup
1716 # function for a changenode is identity.
1716 # function for a changenode is identity.
1717 def identity(x):
1717 def identity(x):
1718 return x
1718 return x
1719
1719
1720 # If we determine that a particular file or manifest node must be a
1720 # If we determine that a particular file or manifest node must be a
1721 # node that the recipient of the changegroup will already have, we can
1721 # node that the recipient of the changegroup will already have, we can
1722 # also assume the recipient will have all the parents. This function
1722 # also assume the recipient will have all the parents. This function
1723 # prunes them from the set of missing nodes.
1723 # prunes them from the set of missing nodes.
1724 def prune_parents(revlog, hasset, msngset):
1724 def prune_parents(revlog, hasset, msngset):
1725 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1725 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1726 msngset.pop(revlog.node(r), None)
1726 msngset.pop(revlog.node(r), None)
1727
1727
1728 # Use the information collected in collect_manifests_and_files to say
1728 # Use the information collected in collect_manifests_and_files to say
1729 # which changenode any manifestnode belongs to.
1729 # which changenode any manifestnode belongs to.
1730 def lookup_manifest_link(mnfstnode):
1730 def lookup_manifest_link(mnfstnode):
1731 return msng_mnfst_set[mnfstnode]
1731 return msng_mnfst_set[mnfstnode]
1732
1732
1733 # A function generating function that sets up the initial environment
1733 # A function generating function that sets up the initial environment
1734 # the inner function.
1734 # the inner function.
1735 def filenode_collector(changedfiles):
1735 def filenode_collector(changedfiles):
1736 # This gathers information from each manifestnode included in the
1736 # This gathers information from each manifestnode included in the
1737 # changegroup about which filenodes the manifest node references
1737 # changegroup about which filenodes the manifest node references
1738 # so we can include those in the changegroup too.
1738 # so we can include those in the changegroup too.
1739 #
1739 #
1740 # It also remembers which changenode each filenode belongs to. It
1740 # It also remembers which changenode each filenode belongs to. It
1741 # does this by assuming the a filenode belongs to the changenode
1741 # does this by assuming the a filenode belongs to the changenode
1742 # the first manifest that references it belongs to.
1742 # the first manifest that references it belongs to.
1743 def collect_msng_filenodes(mnfstnode):
1743 def collect_msng_filenodes(mnfstnode):
1744 r = mnfst.rev(mnfstnode)
1744 r = mnfst.rev(mnfstnode)
1745 if r - 1 in mnfst.parentrevs(r):
1745 if r - 1 in mnfst.parentrevs(r):
1746 # If the previous rev is one of the parents,
1746 # If the previous rev is one of the parents,
1747 # we only need to see a diff.
1747 # we only need to see a diff.
1748 deltamf = mnfst.readdelta(mnfstnode)
1748 deltamf = mnfst.readdelta(mnfstnode)
1749 # For each line in the delta
1749 # For each line in the delta
1750 for f, fnode in deltamf.iteritems():
1750 for f, fnode in deltamf.iteritems():
1751 f = changedfiles.get(f, None)
1751 f = changedfiles.get(f, None)
1752 # And if the file is in the list of files we care
1752 # And if the file is in the list of files we care
1753 # about.
1753 # about.
1754 if f is not None:
1754 if f is not None:
1755 # Get the changenode this manifest belongs to
1755 # Get the changenode this manifest belongs to
1756 clnode = msng_mnfst_set[mnfstnode]
1756 clnode = msng_mnfst_set[mnfstnode]
1757 # Create the set of filenodes for the file if
1757 # Create the set of filenodes for the file if
1758 # there isn't one already.
1758 # there isn't one already.
1759 ndset = msng_filenode_set.setdefault(f, {})
1759 ndset = msng_filenode_set.setdefault(f, {})
1760 # And set the filenode's changelog node to the
1760 # And set the filenode's changelog node to the
1761 # manifest's if it hasn't been set already.
1761 # manifest's if it hasn't been set already.
1762 ndset.setdefault(fnode, clnode)
1762 ndset.setdefault(fnode, clnode)
1763 else:
1763 else:
1764 # Otherwise we need a full manifest.
1764 # Otherwise we need a full manifest.
1765 m = mnfst.read(mnfstnode)
1765 m = mnfst.read(mnfstnode)
1766 # For every file in we care about.
1766 # For every file in we care about.
1767 for f in changedfiles:
1767 for f in changedfiles:
1768 fnode = m.get(f, None)
1768 fnode = m.get(f, None)
1769 # If it's in the manifest
1769 # If it's in the manifest
1770 if fnode is not None:
1770 if fnode is not None:
1771 # See comments above.
1771 # See comments above.
1772 clnode = msng_mnfst_set[mnfstnode]
1772 clnode = msng_mnfst_set[mnfstnode]
1773 ndset = msng_filenode_set.setdefault(f, {})
1773 ndset = msng_filenode_set.setdefault(f, {})
1774 ndset.setdefault(fnode, clnode)
1774 ndset.setdefault(fnode, clnode)
1775 return collect_msng_filenodes
1775 return collect_msng_filenodes
1776
1776
1777 # We have a list of filenodes we think we need for a file, lets remove
1777 # We have a list of filenodes we think we need for a file, lets remove
1778 # all those we know the recipient must have.
1778 # all those we know the recipient must have.
1779 def prune_filenodes(f, filerevlog):
1779 def prune_filenodes(f, filerevlog):
1780 msngset = msng_filenode_set[f]
1780 msngset = msng_filenode_set[f]
1781 hasset = set()
1781 hasset = set()
1782 # If a 'missing' filenode thinks it belongs to a changenode we
1782 # If a 'missing' filenode thinks it belongs to a changenode we
1783 # assume the recipient must have, then the recipient must have
1783 # assume the recipient must have, then the recipient must have
1784 # that filenode.
1784 # that filenode.
1785 for n in msngset:
1785 for n in msngset:
1786 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1786 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1787 if clnode in has_cl_set:
1787 if clnode in has_cl_set:
1788 hasset.add(n)
1788 hasset.add(n)
1789 prune_parents(filerevlog, hasset, msngset)
1789 prune_parents(filerevlog, hasset, msngset)
1790
1790
1791 # A function generator function that sets up the a context for the
1791 # A function generator function that sets up the a context for the
1792 # inner function.
1792 # inner function.
1793 def lookup_filenode_link_func(fname):
1793 def lookup_filenode_link_func(fname):
1794 msngset = msng_filenode_set[fname]
1794 msngset = msng_filenode_set[fname]
1795 # Lookup the changenode the filenode belongs to.
1795 # Lookup the changenode the filenode belongs to.
1796 def lookup_filenode_link(fnode):
1796 def lookup_filenode_link(fnode):
1797 return msngset[fnode]
1797 return msngset[fnode]
1798 return lookup_filenode_link
1798 return lookup_filenode_link
1799
1799
1800 # Add the nodes that were explicitly requested.
1800 # Add the nodes that were explicitly requested.
1801 def add_extra_nodes(name, nodes):
1801 def add_extra_nodes(name, nodes):
1802 if not extranodes or name not in extranodes:
1802 if not extranodes or name not in extranodes:
1803 return
1803 return
1804
1804
1805 for node, linknode in extranodes[name]:
1805 for node, linknode in extranodes[name]:
1806 if node not in nodes:
1806 if node not in nodes:
1807 nodes[node] = linknode
1807 nodes[node] = linknode
1808
1808
1809 # Now that we have all theses utility functions to help out and
1809 # Now that we have all theses utility functions to help out and
1810 # logically divide up the task, generate the group.
1810 # logically divide up the task, generate the group.
1811 def gengroup():
1811 def gengroup():
1812 # The set of changed files starts empty.
1812 # The set of changed files starts empty.
1813 changedfiles = {}
1813 changedfiles = {}
1814 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1814 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1815
1815
1816 # Create a changenode group generator that will call our functions
1816 # Create a changenode group generator that will call our functions
1817 # back to lookup the owning changenode and collect information.
1817 # back to lookup the owning changenode and collect information.
1818 group = cl.group(msng_cl_lst, identity, collect)
1818 group = cl.group(msng_cl_lst, identity, collect)
1819 cnt = 0
1819 cnt = 0
1820 for chnk in group:
1820 for chnk in group:
1821 yield chnk
1821 yield chnk
1822 self.ui.progress(_('bundle changes'), cnt, unit='chunks')
1822 self.ui.progress(_('bundle changes'), cnt, unit='chunks')
1823 cnt += 1
1823 cnt += 1
1824 self.ui.progress(_('bundle changes'), None, unit='chunks')
1824 self.ui.progress(_('bundle changes'), None, unit='chunks')
1825
1825
1826
1826
1827 # Figure out which manifest nodes (of the ones we think might be
1827 # Figure out which manifest nodes (of the ones we think might be
1828 # part of the changegroup) the recipient must know about and
1828 # part of the changegroup) the recipient must know about and
1829 # remove them from the changegroup.
1829 # remove them from the changegroup.
1830 has_mnfst_set = set()
1830 has_mnfst_set = set()
1831 for n in msng_mnfst_set:
1831 for n in msng_mnfst_set:
1832 # If a 'missing' manifest thinks it belongs to a changenode
1832 # If a 'missing' manifest thinks it belongs to a changenode
1833 # the recipient is assumed to have, obviously the recipient
1833 # the recipient is assumed to have, obviously the recipient
1834 # must have that manifest.
1834 # must have that manifest.
1835 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1835 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1836 if linknode in has_cl_set:
1836 if linknode in has_cl_set:
1837 has_mnfst_set.add(n)
1837 has_mnfst_set.add(n)
1838 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1838 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1839 add_extra_nodes(1, msng_mnfst_set)
1839 add_extra_nodes(1, msng_mnfst_set)
1840 msng_mnfst_lst = msng_mnfst_set.keys()
1840 msng_mnfst_lst = msng_mnfst_set.keys()
1841 # Sort the manifestnodes by revision number.
1841 # Sort the manifestnodes by revision number.
1842 msng_mnfst_lst.sort(key=mnfst.rev)
1842 msng_mnfst_lst.sort(key=mnfst.rev)
1843 # Create a generator for the manifestnodes that calls our lookup
1843 # Create a generator for the manifestnodes that calls our lookup
1844 # and data collection functions back.
1844 # and data collection functions back.
1845 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1845 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1846 filenode_collector(changedfiles))
1846 filenode_collector(changedfiles))
1847 cnt = 0
1847 cnt = 0
1848 for chnk in group:
1848 for chnk in group:
1849 yield chnk
1849 yield chnk
1850 self.ui.progress(_('bundle manifests'), cnt, unit='chunks')
1850 self.ui.progress(_('bundle manifests'), cnt, unit='chunks')
1851 cnt += 1
1851 cnt += 1
1852 self.ui.progress(_('bundle manifests'), None, unit='chunks')
1852 self.ui.progress(_('bundle manifests'), None, unit='chunks')
1853
1853
1854 # These are no longer needed, dereference and toss the memory for
1854 # These are no longer needed, dereference and toss the memory for
1855 # them.
1855 # them.
1856 msng_mnfst_lst = None
1856 msng_mnfst_lst = None
1857 msng_mnfst_set.clear()
1857 msng_mnfst_set.clear()
1858
1858
1859 if extranodes:
1859 if extranodes:
1860 for fname in extranodes:
1860 for fname in extranodes:
1861 if isinstance(fname, int):
1861 if isinstance(fname, int):
1862 continue
1862 continue
1863 msng_filenode_set.setdefault(fname, {})
1863 msng_filenode_set.setdefault(fname, {})
1864 changedfiles[fname] = 1
1864 changedfiles[fname] = 1
1865 # Go through all our files in order sorted by name.
1865 # Go through all our files in order sorted by name.
1866 cnt = 0
1866 cnt = 0
1867 for fname in sorted(changedfiles):
1867 for fname in sorted(changedfiles):
1868 filerevlog = self.file(fname)
1868 filerevlog = self.file(fname)
1869 if not len(filerevlog):
1869 if not len(filerevlog):
1870 raise util.Abort(_("empty or missing revlog for %s") % fname)
1870 raise util.Abort(_("empty or missing revlog for %s") % fname)
1871 # Toss out the filenodes that the recipient isn't really
1871 # Toss out the filenodes that the recipient isn't really
1872 # missing.
1872 # missing.
1873 if fname in msng_filenode_set:
1873 if fname in msng_filenode_set:
1874 prune_filenodes(fname, filerevlog)
1874 prune_filenodes(fname, filerevlog)
1875 add_extra_nodes(fname, msng_filenode_set[fname])
1875 add_extra_nodes(fname, msng_filenode_set[fname])
1876 msng_filenode_lst = msng_filenode_set[fname].keys()
1876 msng_filenode_lst = msng_filenode_set[fname].keys()
1877 else:
1877 else:
1878 msng_filenode_lst = []
1878 msng_filenode_lst = []
1879 # If any filenodes are left, generate the group for them,
1879 # If any filenodes are left, generate the group for them,
1880 # otherwise don't bother.
1880 # otherwise don't bother.
1881 if len(msng_filenode_lst) > 0:
1881 if len(msng_filenode_lst) > 0:
1882 yield changegroup.chunkheader(len(fname))
1882 yield changegroup.chunkheader(len(fname))
1883 yield fname
1883 yield fname
1884 # Sort the filenodes by their revision #
1884 # Sort the filenodes by their revision #
1885 msng_filenode_lst.sort(key=filerevlog.rev)
1885 msng_filenode_lst.sort(key=filerevlog.rev)
1886 # Create a group generator and only pass in a changenode
1886 # Create a group generator and only pass in a changenode
1887 # lookup function as we need to collect no information
1887 # lookup function as we need to collect no information
1888 # from filenodes.
1888 # from filenodes.
1889 group = filerevlog.group(msng_filenode_lst,
1889 group = filerevlog.group(msng_filenode_lst,
1890 lookup_filenode_link_func(fname))
1890 lookup_filenode_link_func(fname))
1891 for chnk in group:
1891 for chnk in group:
1892 self.ui.progress(
1892 self.ui.progress(
1893 'bundle files', cnt, item=fname, unit='chunks')
1893 _('bundle files'), cnt, item=fname, unit='chunks')
1894 cnt += 1
1894 cnt += 1
1895 yield chnk
1895 yield chnk
1896 if fname in msng_filenode_set:
1896 if fname in msng_filenode_set:
1897 # Don't need this anymore, toss it to free memory.
1897 # Don't need this anymore, toss it to free memory.
1898 del msng_filenode_set[fname]
1898 del msng_filenode_set[fname]
1899 # Signal that no more groups are left.
1899 # Signal that no more groups are left.
1900 yield changegroup.closechunk()
1900 yield changegroup.closechunk()
1901 self.ui.progress(_('bundle files'), None, unit='chunks')
1901 self.ui.progress(_('bundle files'), None, unit='chunks')
1902
1902
1903 if msng_cl_lst:
1903 if msng_cl_lst:
1904 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1904 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1905
1905
1906 return util.chunkbuffer(gengroup())
1906 return util.chunkbuffer(gengroup())
1907
1907
1908 def changegroup(self, basenodes, source):
1908 def changegroup(self, basenodes, source):
1909 # to avoid a race we use changegroupsubset() (issue1320)
1909 # to avoid a race we use changegroupsubset() (issue1320)
1910 return self.changegroupsubset(basenodes, self.heads(), source)
1910 return self.changegroupsubset(basenodes, self.heads(), source)
1911
1911
1912 def _changegroup(self, nodes, source):
1912 def _changegroup(self, nodes, source):
1913 """Compute the changegroup of all nodes that we have that a recipient
1913 """Compute the changegroup of all nodes that we have that a recipient
1914 doesn't. Return a chunkbuffer object whose read() method will return
1914 doesn't. Return a chunkbuffer object whose read() method will return
1915 successive changegroup chunks.
1915 successive changegroup chunks.
1916
1916
1917 This is much easier than the previous function as we can assume that
1917 This is much easier than the previous function as we can assume that
1918 the recipient has any changenode we aren't sending them.
1918 the recipient has any changenode we aren't sending them.
1919
1919
1920 nodes is the set of nodes to send"""
1920 nodes is the set of nodes to send"""
1921
1921
1922 self.hook('preoutgoing', throw=True, source=source)
1922 self.hook('preoutgoing', throw=True, source=source)
1923
1923
1924 cl = self.changelog
1924 cl = self.changelog
1925 revset = set([cl.rev(n) for n in nodes])
1925 revset = set([cl.rev(n) for n in nodes])
1926 self.changegroupinfo(nodes, source)
1926 self.changegroupinfo(nodes, source)
1927
1927
1928 def identity(x):
1928 def identity(x):
1929 return x
1929 return x
1930
1930
1931 def gennodelst(log):
1931 def gennodelst(log):
1932 for r in log:
1932 for r in log:
1933 if log.linkrev(r) in revset:
1933 if log.linkrev(r) in revset:
1934 yield log.node(r)
1934 yield log.node(r)
1935
1935
1936 def lookuprevlink_func(revlog):
1936 def lookuprevlink_func(revlog):
1937 def lookuprevlink(n):
1937 def lookuprevlink(n):
1938 return cl.node(revlog.linkrev(revlog.rev(n)))
1938 return cl.node(revlog.linkrev(revlog.rev(n)))
1939 return lookuprevlink
1939 return lookuprevlink
1940
1940
1941 def gengroup():
1941 def gengroup():
1942 '''yield a sequence of changegroup chunks (strings)'''
1942 '''yield a sequence of changegroup chunks (strings)'''
1943 # construct a list of all changed files
1943 # construct a list of all changed files
1944 changedfiles = {}
1944 changedfiles = {}
1945 mmfs = {}
1945 mmfs = {}
1946 collect = changegroup.collector(cl, mmfs, changedfiles)
1946 collect = changegroup.collector(cl, mmfs, changedfiles)
1947
1947
1948 cnt = 0
1948 cnt = 0
1949 for chnk in cl.group(nodes, identity, collect):
1949 for chnk in cl.group(nodes, identity, collect):
1950 self.ui.progress(_('bundle changes'), cnt, unit='chunks')
1950 self.ui.progress(_('bundle changes'), cnt, unit='chunks')
1951 cnt += 1
1951 cnt += 1
1952 yield chnk
1952 yield chnk
1953 self.ui.progress(_('bundle changes'), None, unit='chunks')
1953 self.ui.progress(_('bundle changes'), None, unit='chunks')
1954
1954
1955 mnfst = self.manifest
1955 mnfst = self.manifest
1956 nodeiter = gennodelst(mnfst)
1956 nodeiter = gennodelst(mnfst)
1957 cnt = 0
1957 cnt = 0
1958 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1958 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1959 self.ui.progress(_('bundle manifests'), cnt, unit='chunks')
1959 self.ui.progress(_('bundle manifests'), cnt, unit='chunks')
1960 cnt += 1
1960 cnt += 1
1961 yield chnk
1961 yield chnk
1962 self.ui.progress(_('bundle manifests'), None, unit='chunks')
1962 self.ui.progress(_('bundle manifests'), None, unit='chunks')
1963
1963
1964 cnt = 0
1964 cnt = 0
1965 for fname in sorted(changedfiles):
1965 for fname in sorted(changedfiles):
1966 filerevlog = self.file(fname)
1966 filerevlog = self.file(fname)
1967 if not len(filerevlog):
1967 if not len(filerevlog):
1968 raise util.Abort(_("empty or missing revlog for %s") % fname)
1968 raise util.Abort(_("empty or missing revlog for %s") % fname)
1969 nodeiter = gennodelst(filerevlog)
1969 nodeiter = gennodelst(filerevlog)
1970 nodeiter = list(nodeiter)
1970 nodeiter = list(nodeiter)
1971 if nodeiter:
1971 if nodeiter:
1972 yield changegroup.chunkheader(len(fname))
1972 yield changegroup.chunkheader(len(fname))
1973 yield fname
1973 yield fname
1974 lookup = lookuprevlink_func(filerevlog)
1974 lookup = lookuprevlink_func(filerevlog)
1975 for chnk in filerevlog.group(nodeiter, lookup):
1975 for chnk in filerevlog.group(nodeiter, lookup):
1976 self.ui.progress(
1976 self.ui.progress(
1977 'bundle files', cnt, item=fname, unit='chunks')
1977 _('bundle files'), cnt, item=fname, unit='chunks')
1978 cnt += 1
1978 cnt += 1
1979 yield chnk
1979 yield chnk
1980 self.ui.progress('bundle files', None, unit='chunks')
1980 self.ui.progress(_('bundle files'), None, unit='chunks')
1981
1981
1982 yield changegroup.closechunk()
1982 yield changegroup.closechunk()
1983
1983
1984 if nodes:
1984 if nodes:
1985 self.hook('outgoing', node=hex(nodes[0]), source=source)
1985 self.hook('outgoing', node=hex(nodes[0]), source=source)
1986
1986
1987 return util.chunkbuffer(gengroup())
1987 return util.chunkbuffer(gengroup())
1988
1988
1989 def addchangegroup(self, source, srctype, url, emptyok=False):
1989 def addchangegroup(self, source, srctype, url, emptyok=False):
1990 """add changegroup to repo.
1990 """add changegroup to repo.
1991
1991
1992 return values:
1992 return values:
1993 - nothing changed or no source: 0
1993 - nothing changed or no source: 0
1994 - more heads than before: 1+added heads (2..n)
1994 - more heads than before: 1+added heads (2..n)
1995 - less heads than before: -1-removed heads (-2..-n)
1995 - less heads than before: -1-removed heads (-2..-n)
1996 - number of heads stays the same: 1
1996 - number of heads stays the same: 1
1997 """
1997 """
1998 def csmap(x):
1998 def csmap(x):
1999 self.ui.debug("add changeset %s\n" % short(x))
1999 self.ui.debug("add changeset %s\n" % short(x))
2000 return len(cl)
2000 return len(cl)
2001
2001
2002 def revmap(x):
2002 def revmap(x):
2003 return cl.rev(x)
2003 return cl.rev(x)
2004
2004
2005 if not source:
2005 if not source:
2006 return 0
2006 return 0
2007
2007
2008 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2008 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2009
2009
2010 changesets = files = revisions = 0
2010 changesets = files = revisions = 0
2011
2011
2012 # write changelog data to temp files so concurrent readers will not see
2012 # write changelog data to temp files so concurrent readers will not see
2013 # inconsistent view
2013 # inconsistent view
2014 cl = self.changelog
2014 cl = self.changelog
2015 cl.delayupdate()
2015 cl.delayupdate()
2016 oldheads = len(cl.heads())
2016 oldheads = len(cl.heads())
2017
2017
2018 tr = self.transaction()
2018 tr = self.transaction()
2019 try:
2019 try:
2020 trp = weakref.proxy(tr)
2020 trp = weakref.proxy(tr)
2021 # pull off the changeset group
2021 # pull off the changeset group
2022 self.ui.status(_("adding changesets\n"))
2022 self.ui.status(_("adding changesets\n"))
2023 clstart = len(cl)
2023 clstart = len(cl)
2024 class prog(object):
2024 class prog(object):
2025 step = _('changesets')
2025 step = _('changesets')
2026 count = 1
2026 count = 1
2027 ui = self.ui
2027 ui = self.ui
2028 def __call__(self):
2028 def __call__(self):
2029 self.ui.progress(self.step, self.count, unit='chunks')
2029 self.ui.progress(self.step, self.count, unit='chunks')
2030 self.count += 1
2030 self.count += 1
2031 pr = prog()
2031 pr = prog()
2032 chunkiter = changegroup.chunkiter(source, progress=pr)
2032 chunkiter = changegroup.chunkiter(source, progress=pr)
2033 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2033 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2034 raise util.Abort(_("received changelog group is empty"))
2034 raise util.Abort(_("received changelog group is empty"))
2035 clend = len(cl)
2035 clend = len(cl)
2036 changesets = clend - clstart
2036 changesets = clend - clstart
2037 self.ui.progress(_('changesets'), None)
2037 self.ui.progress(_('changesets'), None)
2038
2038
2039 # pull off the manifest group
2039 # pull off the manifest group
2040 self.ui.status(_("adding manifests\n"))
2040 self.ui.status(_("adding manifests\n"))
2041 pr.step = _('manifests')
2041 pr.step = _('manifests')
2042 pr.count = 1
2042 pr.count = 1
2043 chunkiter = changegroup.chunkiter(source, progress=pr)
2043 chunkiter = changegroup.chunkiter(source, progress=pr)
2044 # no need to check for empty manifest group here:
2044 # no need to check for empty manifest group here:
2045 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2045 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2046 # no new manifest will be created and the manifest group will
2046 # no new manifest will be created and the manifest group will
2047 # be empty during the pull
2047 # be empty during the pull
2048 self.manifest.addgroup(chunkiter, revmap, trp)
2048 self.manifest.addgroup(chunkiter, revmap, trp)
2049 self.ui.progress(_('manifests'), None)
2049 self.ui.progress(_('manifests'), None)
2050
2050
2051 needfiles = {}
2051 needfiles = {}
2052 if self.ui.configbool('server', 'validate', default=False):
2052 if self.ui.configbool('server', 'validate', default=False):
2053 # validate incoming csets have their manifests
2053 # validate incoming csets have their manifests
2054 for cset in xrange(clstart, clend):
2054 for cset in xrange(clstart, clend):
2055 mfest = self.changelog.read(self.changelog.node(cset))[0]
2055 mfest = self.changelog.read(self.changelog.node(cset))[0]
2056 mfest = self.manifest.readdelta(mfest)
2056 mfest = self.manifest.readdelta(mfest)
2057 # store file nodes we must see
2057 # store file nodes we must see
2058 for f, n in mfest.iteritems():
2058 for f, n in mfest.iteritems():
2059 needfiles.setdefault(f, set()).add(n)
2059 needfiles.setdefault(f, set()).add(n)
2060
2060
2061 # process the files
2061 # process the files
2062 self.ui.status(_("adding file changes\n"))
2062 self.ui.status(_("adding file changes\n"))
2063 pr.step = 'files'
2063 pr.step = 'files'
2064 pr.count = 1
2064 pr.count = 1
2065 while 1:
2065 while 1:
2066 f = changegroup.getchunk(source)
2066 f = changegroup.getchunk(source)
2067 if not f:
2067 if not f:
2068 break
2068 break
2069 self.ui.debug("adding %s revisions\n" % f)
2069 self.ui.debug("adding %s revisions\n" % f)
2070 fl = self.file(f)
2070 fl = self.file(f)
2071 o = len(fl)
2071 o = len(fl)
2072 chunkiter = changegroup.chunkiter(source, progress=pr)
2072 chunkiter = changegroup.chunkiter(source, progress=pr)
2073 if fl.addgroup(chunkiter, revmap, trp) is None:
2073 if fl.addgroup(chunkiter, revmap, trp) is None:
2074 raise util.Abort(_("received file revlog group is empty"))
2074 raise util.Abort(_("received file revlog group is empty"))
2075 revisions += len(fl) - o
2075 revisions += len(fl) - o
2076 files += 1
2076 files += 1
2077 if f in needfiles:
2077 if f in needfiles:
2078 needs = needfiles[f]
2078 needs = needfiles[f]
2079 for new in xrange(o, len(fl)):
2079 for new in xrange(o, len(fl)):
2080 n = fl.node(new)
2080 n = fl.node(new)
2081 if n in needs:
2081 if n in needs:
2082 needs.remove(n)
2082 needs.remove(n)
2083 if not needs:
2083 if not needs:
2084 del needfiles[f]
2084 del needfiles[f]
2085 self.ui.progress(_('files'), None)
2085 self.ui.progress(_('files'), None)
2086
2086
2087 for f, needs in needfiles.iteritems():
2087 for f, needs in needfiles.iteritems():
2088 fl = self.file(f)
2088 fl = self.file(f)
2089 for n in needs:
2089 for n in needs:
2090 try:
2090 try:
2091 fl.rev(n)
2091 fl.rev(n)
2092 except error.LookupError:
2092 except error.LookupError:
2093 raise util.Abort(
2093 raise util.Abort(
2094 _('missing file data for %s:%s - run hg verify') %
2094 _('missing file data for %s:%s - run hg verify') %
2095 (f, hex(n)))
2095 (f, hex(n)))
2096
2096
2097 newheads = len(cl.heads())
2097 newheads = len(cl.heads())
2098 heads = ""
2098 heads = ""
2099 if oldheads and newheads != oldheads:
2099 if oldheads and newheads != oldheads:
2100 heads = _(" (%+d heads)") % (newheads - oldheads)
2100 heads = _(" (%+d heads)") % (newheads - oldheads)
2101
2101
2102 self.ui.status(_("added %d changesets"
2102 self.ui.status(_("added %d changesets"
2103 " with %d changes to %d files%s\n")
2103 " with %d changes to %d files%s\n")
2104 % (changesets, revisions, files, heads))
2104 % (changesets, revisions, files, heads))
2105
2105
2106 if changesets > 0:
2106 if changesets > 0:
2107 p = lambda: cl.writepending() and self.root or ""
2107 p = lambda: cl.writepending() and self.root or ""
2108 self.hook('pretxnchangegroup', throw=True,
2108 self.hook('pretxnchangegroup', throw=True,
2109 node=hex(cl.node(clstart)), source=srctype,
2109 node=hex(cl.node(clstart)), source=srctype,
2110 url=url, pending=p)
2110 url=url, pending=p)
2111
2111
2112 # make changelog see real files again
2112 # make changelog see real files again
2113 cl.finalize(trp)
2113 cl.finalize(trp)
2114
2114
2115 tr.close()
2115 tr.close()
2116 finally:
2116 finally:
2117 del tr
2117 del tr
2118
2118
2119 if changesets > 0:
2119 if changesets > 0:
2120 # forcefully update the on-disk branch cache
2120 # forcefully update the on-disk branch cache
2121 self.ui.debug("updating the branch cache\n")
2121 self.ui.debug("updating the branch cache\n")
2122 self.branchtags()
2122 self.branchtags()
2123 self.hook("changegroup", node=hex(cl.node(clstart)),
2123 self.hook("changegroup", node=hex(cl.node(clstart)),
2124 source=srctype, url=url)
2124 source=srctype, url=url)
2125
2125
2126 for i in xrange(clstart, clend):
2126 for i in xrange(clstart, clend):
2127 self.hook("incoming", node=hex(cl.node(i)),
2127 self.hook("incoming", node=hex(cl.node(i)),
2128 source=srctype, url=url)
2128 source=srctype, url=url)
2129
2129
2130 # never return 0 here:
2130 # never return 0 here:
2131 if newheads < oldheads:
2131 if newheads < oldheads:
2132 return newheads - oldheads - 1
2132 return newheads - oldheads - 1
2133 else:
2133 else:
2134 return newheads - oldheads + 1
2134 return newheads - oldheads + 1
2135
2135
2136
2136
2137 def stream_in(self, remote):
2137 def stream_in(self, remote):
2138 fp = remote.stream_out()
2138 fp = remote.stream_out()
2139 l = fp.readline()
2139 l = fp.readline()
2140 try:
2140 try:
2141 resp = int(l)
2141 resp = int(l)
2142 except ValueError:
2142 except ValueError:
2143 raise error.ResponseError(
2143 raise error.ResponseError(
2144 _('Unexpected response from remote server:'), l)
2144 _('Unexpected response from remote server:'), l)
2145 if resp == 1:
2145 if resp == 1:
2146 raise util.Abort(_('operation forbidden by server'))
2146 raise util.Abort(_('operation forbidden by server'))
2147 elif resp == 2:
2147 elif resp == 2:
2148 raise util.Abort(_('locking the remote repository failed'))
2148 raise util.Abort(_('locking the remote repository failed'))
2149 elif resp != 0:
2149 elif resp != 0:
2150 raise util.Abort(_('the server sent an unknown error code'))
2150 raise util.Abort(_('the server sent an unknown error code'))
2151 self.ui.status(_('streaming all changes\n'))
2151 self.ui.status(_('streaming all changes\n'))
2152 l = fp.readline()
2152 l = fp.readline()
2153 try:
2153 try:
2154 total_files, total_bytes = map(int, l.split(' ', 1))
2154 total_files, total_bytes = map(int, l.split(' ', 1))
2155 except (ValueError, TypeError):
2155 except (ValueError, TypeError):
2156 raise error.ResponseError(
2156 raise error.ResponseError(
2157 _('Unexpected response from remote server:'), l)
2157 _('Unexpected response from remote server:'), l)
2158 self.ui.status(_('%d files to transfer, %s of data\n') %
2158 self.ui.status(_('%d files to transfer, %s of data\n') %
2159 (total_files, util.bytecount(total_bytes)))
2159 (total_files, util.bytecount(total_bytes)))
2160 start = time.time()
2160 start = time.time()
2161 for i in xrange(total_files):
2161 for i in xrange(total_files):
2162 # XXX doesn't support '\n' or '\r' in filenames
2162 # XXX doesn't support '\n' or '\r' in filenames
2163 l = fp.readline()
2163 l = fp.readline()
2164 try:
2164 try:
2165 name, size = l.split('\0', 1)
2165 name, size = l.split('\0', 1)
2166 size = int(size)
2166 size = int(size)
2167 except (ValueError, TypeError):
2167 except (ValueError, TypeError):
2168 raise error.ResponseError(
2168 raise error.ResponseError(
2169 _('Unexpected response from remote server:'), l)
2169 _('Unexpected response from remote server:'), l)
2170 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2170 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2171 # for backwards compat, name was partially encoded
2171 # for backwards compat, name was partially encoded
2172 ofp = self.sopener(store.decodedir(name), 'w')
2172 ofp = self.sopener(store.decodedir(name), 'w')
2173 for chunk in util.filechunkiter(fp, limit=size):
2173 for chunk in util.filechunkiter(fp, limit=size):
2174 ofp.write(chunk)
2174 ofp.write(chunk)
2175 ofp.close()
2175 ofp.close()
2176 elapsed = time.time() - start
2176 elapsed = time.time() - start
2177 if elapsed <= 0:
2177 if elapsed <= 0:
2178 elapsed = 0.001
2178 elapsed = 0.001
2179 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2179 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2180 (util.bytecount(total_bytes), elapsed,
2180 (util.bytecount(total_bytes), elapsed,
2181 util.bytecount(total_bytes / elapsed)))
2181 util.bytecount(total_bytes / elapsed)))
2182 self.invalidate()
2182 self.invalidate()
2183 return len(self.heads()) + 1
2183 return len(self.heads()) + 1
2184
2184
2185 def clone(self, remote, heads=[], stream=False):
2185 def clone(self, remote, heads=[], stream=False):
2186 '''clone remote repository.
2186 '''clone remote repository.
2187
2187
2188 keyword arguments:
2188 keyword arguments:
2189 heads: list of revs to clone (forces use of pull)
2189 heads: list of revs to clone (forces use of pull)
2190 stream: use streaming clone if possible'''
2190 stream: use streaming clone if possible'''
2191
2191
2192 # now, all clients that can request uncompressed clones can
2192 # now, all clients that can request uncompressed clones can
2193 # read repo formats supported by all servers that can serve
2193 # read repo formats supported by all servers that can serve
2194 # them.
2194 # them.
2195
2195
2196 # if revlog format changes, client will have to check version
2196 # if revlog format changes, client will have to check version
2197 # and format flags on "stream" capability, and use
2197 # and format flags on "stream" capability, and use
2198 # uncompressed only if compatible.
2198 # uncompressed only if compatible.
2199
2199
2200 if stream and not heads and remote.capable('stream'):
2200 if stream and not heads and remote.capable('stream'):
2201 return self.stream_in(remote)
2201 return self.stream_in(remote)
2202 return self.pull(remote, heads)
2202 return self.pull(remote, heads)
2203
2203
2204 # used to avoid circular references so destructors work
2204 # used to avoid circular references so destructors work
2205 def aftertrans(files):
2205 def aftertrans(files):
2206 renamefiles = [tuple(t) for t in files]
2206 renamefiles = [tuple(t) for t in files]
2207 def a():
2207 def a():
2208 for src, dest in renamefiles:
2208 for src, dest in renamefiles:
2209 util.rename(src, dest)
2209 util.rename(src, dest)
2210 return a
2210 return a
2211
2211
2212 def instance(ui, path, create):
2212 def instance(ui, path, create):
2213 return localrepository(ui, util.drop_scheme('file', path), create)
2213 return localrepository(ui, util.drop_scheme('file', path), create)
2214
2214
2215 def islocal(path):
2215 def islocal(path):
2216 return True
2216 return True
@@ -1,361 +1,361 b''
1 # subrepo.py - sub-repository handling for Mercurial
1 # subrepo.py - sub-repository handling for Mercurial
2 #
2 #
3 # Copyright 2009-2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2009-2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import errno, os, re, xml.dom.minidom, shutil
8 import errno, os, re, xml.dom.minidom, shutil
9 from i18n import _
9 from i18n import _
10 import config, util, node, error
10 import config, util, node, error
11 hg = None
11 hg = None
12
12
13 nullstate = ('', '', 'empty')
13 nullstate = ('', '', 'empty')
14
14
15 def state(ctx):
15 def state(ctx):
16 p = config.config()
16 p = config.config()
17 def read(f, sections=None, remap=None):
17 def read(f, sections=None, remap=None):
18 if f in ctx:
18 if f in ctx:
19 p.parse(f, ctx[f].data(), sections, remap, read)
19 p.parse(f, ctx[f].data(), sections, remap, read)
20 else:
20 else:
21 raise util.Abort(_("subrepo spec file %s not found") % f)
21 raise util.Abort(_("subrepo spec file %s not found") % f)
22
22
23 if '.hgsub' in ctx:
23 if '.hgsub' in ctx:
24 read('.hgsub')
24 read('.hgsub')
25
25
26 rev = {}
26 rev = {}
27 if '.hgsubstate' in ctx:
27 if '.hgsubstate' in ctx:
28 try:
28 try:
29 for l in ctx['.hgsubstate'].data().splitlines():
29 for l in ctx['.hgsubstate'].data().splitlines():
30 revision, path = l.split(" ", 1)
30 revision, path = l.split(" ", 1)
31 rev[path] = revision
31 rev[path] = revision
32 except IOError, err:
32 except IOError, err:
33 if err.errno != errno.ENOENT:
33 if err.errno != errno.ENOENT:
34 raise
34 raise
35
35
36 state = {}
36 state = {}
37 for path, src in p[''].items():
37 for path, src in p[''].items():
38 kind = 'hg'
38 kind = 'hg'
39 if src.startswith('['):
39 if src.startswith('['):
40 if ']' not in src:
40 if ']' not in src:
41 raise util.Abort(_('missing ] in subrepo source'))
41 raise util.Abort(_('missing ] in subrepo source'))
42 kind, src = src.split(']', 1)
42 kind, src = src.split(']', 1)
43 kind = kind[1:]
43 kind = kind[1:]
44 state[path] = (src.strip(), rev.get(path, ''), kind)
44 state[path] = (src.strip(), rev.get(path, ''), kind)
45
45
46 return state
46 return state
47
47
48 def writestate(repo, state):
48 def writestate(repo, state):
49 repo.wwrite('.hgsubstate',
49 repo.wwrite('.hgsubstate',
50 ''.join(['%s %s\n' % (state[s][1], s)
50 ''.join(['%s %s\n' % (state[s][1], s)
51 for s in sorted(state)]), '')
51 for s in sorted(state)]), '')
52
52
53 def submerge(repo, wctx, mctx, actx):
53 def submerge(repo, wctx, mctx, actx):
54 # working context, merging context, ancestor context
54 # working context, merging context, ancestor context
55 if mctx == actx: # backwards?
55 if mctx == actx: # backwards?
56 actx = wctx.p1()
56 actx = wctx.p1()
57 s1 = wctx.substate
57 s1 = wctx.substate
58 s2 = mctx.substate
58 s2 = mctx.substate
59 sa = actx.substate
59 sa = actx.substate
60 sm = {}
60 sm = {}
61
61
62 repo.ui.debug("subrepo merge %s %s %s\n" % (wctx, mctx, actx))
62 repo.ui.debug("subrepo merge %s %s %s\n" % (wctx, mctx, actx))
63
63
64 def debug(s, msg, r=""):
64 def debug(s, msg, r=""):
65 if r:
65 if r:
66 r = "%s:%s:%s" % r
66 r = "%s:%s:%s" % r
67 repo.ui.debug(" subrepo %s: %s %s\n" % (s, msg, r))
67 repo.ui.debug(" subrepo %s: %s %s\n" % (s, msg, r))
68
68
69 for s, l in s1.items():
69 for s, l in s1.items():
70 if wctx != actx and wctx.sub(s).dirty():
70 if wctx != actx and wctx.sub(s).dirty():
71 l = (l[0], l[1] + "+")
71 l = (l[0], l[1] + "+")
72 a = sa.get(s, nullstate)
72 a = sa.get(s, nullstate)
73 if s in s2:
73 if s in s2:
74 r = s2[s]
74 r = s2[s]
75 if l == r or r == a: # no change or local is newer
75 if l == r or r == a: # no change or local is newer
76 sm[s] = l
76 sm[s] = l
77 continue
77 continue
78 elif l == a: # other side changed
78 elif l == a: # other side changed
79 debug(s, "other changed, get", r)
79 debug(s, "other changed, get", r)
80 wctx.sub(s).get(r)
80 wctx.sub(s).get(r)
81 sm[s] = r
81 sm[s] = r
82 elif l[0] != r[0]: # sources differ
82 elif l[0] != r[0]: # sources differ
83 if repo.ui.promptchoice(
83 if repo.ui.promptchoice(
84 _(' subrepository sources for %s differ\n'
84 _(' subrepository sources for %s differ\n'
85 'use (l)ocal source (%s) or (r)emote source (%s)?')
85 'use (l)ocal source (%s) or (r)emote source (%s)?')
86 % (s, l[0], r[0]),
86 % (s, l[0], r[0]),
87 (_('&Local'), _('&Remote')), 0):
87 (_('&Local'), _('&Remote')), 0):
88 debug(s, "prompt changed, get", r)
88 debug(s, "prompt changed, get", r)
89 wctx.sub(s).get(r)
89 wctx.sub(s).get(r)
90 sm[s] = r
90 sm[s] = r
91 elif l[1] == a[1]: # local side is unchanged
91 elif l[1] == a[1]: # local side is unchanged
92 debug(s, "other side changed, get", r)
92 debug(s, "other side changed, get", r)
93 wctx.sub(s).get(r)
93 wctx.sub(s).get(r)
94 sm[s] = r
94 sm[s] = r
95 else:
95 else:
96 debug(s, "both sides changed, merge with", r)
96 debug(s, "both sides changed, merge with", r)
97 wctx.sub(s).merge(r)
97 wctx.sub(s).merge(r)
98 sm[s] = l
98 sm[s] = l
99 elif l == a: # remote removed, local unchanged
99 elif l == a: # remote removed, local unchanged
100 debug(s, "remote removed, remove")
100 debug(s, "remote removed, remove")
101 wctx.sub(s).remove()
101 wctx.sub(s).remove()
102 else:
102 else:
103 if repo.ui.promptchoice(
103 if repo.ui.promptchoice(
104 _(' local changed subrepository %s which remote removed\n'
104 _(' local changed subrepository %s which remote removed\n'
105 'use (c)hanged version or (d)elete?') % s,
105 'use (c)hanged version or (d)elete?') % s,
106 (_('&Changed'), _('&Delete')), 0):
106 (_('&Changed'), _('&Delete')), 0):
107 debug(s, "prompt remove")
107 debug(s, "prompt remove")
108 wctx.sub(s).remove()
108 wctx.sub(s).remove()
109
109
110 for s, r in s2.items():
110 for s, r in s2.items():
111 if s in s1:
111 if s in s1:
112 continue
112 continue
113 elif s not in sa:
113 elif s not in sa:
114 debug(s, "remote added, get", r)
114 debug(s, "remote added, get", r)
115 mctx.sub(s).get(r)
115 mctx.sub(s).get(r)
116 sm[s] = r
116 sm[s] = r
117 elif r != sa[s]:
117 elif r != sa[s]:
118 if repo.ui.promptchoice(
118 if repo.ui.promptchoice(
119 _(' remote changed subrepository %s which local removed\n'
119 _(' remote changed subrepository %s which local removed\n'
120 'use (c)hanged version or (d)elete?') % s,
120 'use (c)hanged version or (d)elete?') % s,
121 (_('&Changed'), _('&Delete')), 0) == 0:
121 (_('&Changed'), _('&Delete')), 0) == 0:
122 debug(s, "prompt recreate", r)
122 debug(s, "prompt recreate", r)
123 wctx.sub(s).get(r)
123 wctx.sub(s).get(r)
124 sm[s] = r
124 sm[s] = r
125
125
126 # record merged .hgsubstate
126 # record merged .hgsubstate
127 writestate(repo, sm)
127 writestate(repo, sm)
128
128
129 def _abssource(repo, push=False):
129 def _abssource(repo, push=False):
130 if hasattr(repo, '_subparent'):
130 if hasattr(repo, '_subparent'):
131 source = repo._subsource
131 source = repo._subsource
132 if source.startswith('/') or '://' in source:
132 if source.startswith('/') or '://' in source:
133 return source
133 return source
134 parent = _abssource(repo._subparent)
134 parent = _abssource(repo._subparent)
135 if '://' in parent:
135 if '://' in parent:
136 if parent[-1] == '/':
136 if parent[-1] == '/':
137 parent = parent[:-1]
137 parent = parent[:-1]
138 return parent + '/' + source
138 return parent + '/' + source
139 return os.path.join(parent, repo._subsource)
139 return os.path.join(parent, repo._subsource)
140 if push and repo.ui.config('paths', 'default-push'):
140 if push and repo.ui.config('paths', 'default-push'):
141 return repo.ui.config('paths', 'default-push', repo.root)
141 return repo.ui.config('paths', 'default-push', repo.root)
142 return repo.ui.config('paths', 'default', repo.root)
142 return repo.ui.config('paths', 'default', repo.root)
143
143
144 def subrepo(ctx, path):
144 def subrepo(ctx, path):
145 # subrepo inherently violates our import layering rules
145 # subrepo inherently violates our import layering rules
146 # because it wants to make repo objects from deep inside the stack
146 # because it wants to make repo objects from deep inside the stack
147 # so we manually delay the circular imports to not break
147 # so we manually delay the circular imports to not break
148 # scripts that don't use our demand-loading
148 # scripts that don't use our demand-loading
149 global hg
149 global hg
150 import hg as h
150 import hg as h
151 hg = h
151 hg = h
152
152
153 util.path_auditor(ctx._repo.root)(path)
153 util.path_auditor(ctx._repo.root)(path)
154 state = ctx.substate.get(path, nullstate)
154 state = ctx.substate.get(path, nullstate)
155 if state[2] not in types:
155 if state[2] not in types:
156 raise util.Abort(_('unknown subrepo type %s') % state[2])
156 raise util.Abort(_('unknown subrepo type %s') % state[2])
157 return types[state[2]](ctx, path, state[:2])
157 return types[state[2]](ctx, path, state[:2])
158
158
159 # subrepo classes need to implement the following methods:
159 # subrepo classes need to implement the following methods:
160 # __init__(self, ctx, path, state)
160 # __init__(self, ctx, path, state)
161 # dirty(self): returns true if the dirstate of the subrepo
161 # dirty(self): returns true if the dirstate of the subrepo
162 # does not match current stored state
162 # does not match current stored state
163 # commit(self, text, user, date): commit the current changes
163 # commit(self, text, user, date): commit the current changes
164 # to the subrepo with the given log message. Use given
164 # to the subrepo with the given log message. Use given
165 # user and date if possible. Return the new state of the subrepo.
165 # user and date if possible. Return the new state of the subrepo.
166 # remove(self): remove the subrepo (should verify the dirstate
166 # remove(self): remove the subrepo (should verify the dirstate
167 # is not dirty first)
167 # is not dirty first)
168 # get(self, state): run whatever commands are needed to put the
168 # get(self, state): run whatever commands are needed to put the
169 # subrepo into this state
169 # subrepo into this state
170 # merge(self, state): merge currently-saved state with the new state.
170 # merge(self, state): merge currently-saved state with the new state.
171 # push(self, force): perform whatever action is analagous to 'hg push'
171 # push(self, force): perform whatever action is analagous to 'hg push'
172 # This may be a no-op on some systems.
172 # This may be a no-op on some systems.
173
173
174 class hgsubrepo(object):
174 class hgsubrepo(object):
175 def __init__(self, ctx, path, state):
175 def __init__(self, ctx, path, state):
176 self._path = path
176 self._path = path
177 self._state = state
177 self._state = state
178 r = ctx._repo
178 r = ctx._repo
179 root = r.wjoin(path)
179 root = r.wjoin(path)
180 if os.path.exists(os.path.join(root, '.hg')):
180 if os.path.exists(os.path.join(root, '.hg')):
181 self._repo = hg.repository(r.ui, root)
181 self._repo = hg.repository(r.ui, root)
182 else:
182 else:
183 util.makedirs(root)
183 util.makedirs(root)
184 self._repo = hg.repository(r.ui, root, create=True)
184 self._repo = hg.repository(r.ui, root, create=True)
185 f = file(os.path.join(root, '.hg', 'hgrc'), 'w')
185 f = file(os.path.join(root, '.hg', 'hgrc'), 'w')
186 f.write('[paths]\ndefault = %s\n' % os.path.join(
186 f.write('[paths]\ndefault = %s\n' % os.path.join(
187 _abssource(ctx._repo), path))
187 _abssource(ctx._repo), path))
188 f.close()
188 f.close()
189 self._repo._subparent = r
189 self._repo._subparent = r
190 self._repo._subsource = state[0]
190 self._repo._subsource = state[0]
191
191
192 def dirty(self):
192 def dirty(self):
193 r = self._state[1]
193 r = self._state[1]
194 if r == '':
194 if r == '':
195 return True
195 return True
196 w = self._repo[None]
196 w = self._repo[None]
197 if w.p1() != self._repo[r]: # version checked out changed
197 if w.p1() != self._repo[r]: # version checked out changed
198 return True
198 return True
199 return w.dirty() # working directory changed
199 return w.dirty() # working directory changed
200
200
201 def commit(self, text, user, date):
201 def commit(self, text, user, date):
202 self._repo.ui.debug("committing subrepo %s\n" % self._path)
202 self._repo.ui.debug("committing subrepo %s\n" % self._path)
203 n = self._repo.commit(text, user, date)
203 n = self._repo.commit(text, user, date)
204 if not n:
204 if not n:
205 return self._repo['.'].hex() # different version checked out
205 return self._repo['.'].hex() # different version checked out
206 return node.hex(n)
206 return node.hex(n)
207
207
208 def remove(self):
208 def remove(self):
209 # we can't fully delete the repository as it may contain
209 # we can't fully delete the repository as it may contain
210 # local-only history
210 # local-only history
211 self._repo.ui.note(_('removing subrepo %s\n') % self._path)
211 self._repo.ui.note(_('removing subrepo %s\n') % self._path)
212 hg.clean(self._repo, node.nullid, False)
212 hg.clean(self._repo, node.nullid, False)
213
213
214 def _get(self, state):
214 def _get(self, state):
215 source, revision, kind = state
215 source, revision, kind = state
216 try:
216 try:
217 self._repo.lookup(revision)
217 self._repo.lookup(revision)
218 except error.RepoError:
218 except error.RepoError:
219 self._repo._subsource = source
219 self._repo._subsource = source
220 self._repo.ui.status(_('pulling subrepo %s\n') % self._path)
220 self._repo.ui.status(_('pulling subrepo %s\n') % self._path)
221 srcurl = _abssource(self._repo)
221 srcurl = _abssource(self._repo)
222 other = hg.repository(self._repo.ui, srcurl)
222 other = hg.repository(self._repo.ui, srcurl)
223 self._repo.pull(other)
223 self._repo.pull(other)
224
224
225 def get(self, state):
225 def get(self, state):
226 self._get(state)
226 self._get(state)
227 source, revision, kind = state
227 source, revision, kind = state
228 self._repo.ui.debug("getting subrepo %s\n" % self._path)
228 self._repo.ui.debug("getting subrepo %s\n" % self._path)
229 hg.clean(self._repo, revision, False)
229 hg.clean(self._repo, revision, False)
230
230
231 def merge(self, state):
231 def merge(self, state):
232 self._get(state)
232 self._get(state)
233 cur = self._repo['.']
233 cur = self._repo['.']
234 dst = self._repo[state[1]]
234 dst = self._repo[state[1]]
235 anc = dst.ancestor(cur)
235 anc = dst.ancestor(cur)
236 if anc == cur:
236 if anc == cur:
237 self._repo.ui.debug("updating subrepo %s\n" % self._path)
237 self._repo.ui.debug("updating subrepo %s\n" % self._path)
238 hg.update(self._repo, state[1])
238 hg.update(self._repo, state[1])
239 elif anc == dst:
239 elif anc == dst:
240 self._repo.ui.debug("skipping subrepo %s\n" % self._path)
240 self._repo.ui.debug("skipping subrepo %s\n" % self._path)
241 else:
241 else:
242 self._repo.ui.debug("merging subrepo %s\n" % self._path)
242 self._repo.ui.debug("merging subrepo %s\n" % self._path)
243 hg.merge(self._repo, state[1], remind=False)
243 hg.merge(self._repo, state[1], remind=False)
244
244
245 def push(self, force):
245 def push(self, force):
246 # push subrepos depth-first for coherent ordering
246 # push subrepos depth-first for coherent ordering
247 c = self._repo['']
247 c = self._repo['']
248 subs = c.substate # only repos that are committed
248 subs = c.substate # only repos that are committed
249 for s in sorted(subs):
249 for s in sorted(subs):
250 c.sub(s).push(force)
250 c.sub(s).push(force)
251
251
252 self._repo.ui.status(_('pushing subrepo %s\n') % self._path)
252 self._repo.ui.status(_('pushing subrepo %s\n') % self._path)
253 dsturl = _abssource(self._repo, True)
253 dsturl = _abssource(self._repo, True)
254 other = hg.repository(self._repo.ui, dsturl)
254 other = hg.repository(self._repo.ui, dsturl)
255 self._repo.push(other, force)
255 self._repo.push(other, force)
256
256
257 class svnsubrepo(object):
257 class svnsubrepo(object):
258 def __init__(self, ctx, path, state):
258 def __init__(self, ctx, path, state):
259 self._path = path
259 self._path = path
260 self._state = state
260 self._state = state
261 self._ctx = ctx
261 self._ctx = ctx
262 self._ui = ctx._repo.ui
262 self._ui = ctx._repo.ui
263
263
264 def _svncommand(self, commands):
264 def _svncommand(self, commands):
265 cmd = ['svn'] + commands + [self._path]
265 cmd = ['svn'] + commands + [self._path]
266 cmd = [util.shellquote(arg) for arg in cmd]
266 cmd = [util.shellquote(arg) for arg in cmd]
267 cmd = util.quotecommand(' '.join(cmd))
267 cmd = util.quotecommand(' '.join(cmd))
268 env = dict(os.environ)
268 env = dict(os.environ)
269 # Avoid localized output, preserve current locale for everything else.
269 # Avoid localized output, preserve current locale for everything else.
270 env['LC_MESSAGES'] = 'C'
270 env['LC_MESSAGES'] = 'C'
271 write, read, err = util.popen3(cmd, env=env, newlines=True)
271 write, read, err = util.popen3(cmd, env=env, newlines=True)
272 retdata = read.read()
272 retdata = read.read()
273 err = err.read().strip()
273 err = err.read().strip()
274 if err:
274 if err:
275 raise util.Abort(err)
275 raise util.Abort(err)
276 return retdata
276 return retdata
277
277
278 def _wcrev(self):
278 def _wcrev(self):
279 output = self._svncommand(['info', '--xml'])
279 output = self._svncommand(['info', '--xml'])
280 doc = xml.dom.minidom.parseString(output)
280 doc = xml.dom.minidom.parseString(output)
281 entries = doc.getElementsByTagName('entry')
281 entries = doc.getElementsByTagName('entry')
282 if not entries:
282 if not entries:
283 return 0
283 return 0
284 return int(entries[0].getAttribute('revision') or 0)
284 return int(entries[0].getAttribute('revision') or 0)
285
285
286 def _wcchanged(self):
286 def _wcchanged(self):
287 """Return (changes, extchanges) where changes is True
287 """Return (changes, extchanges) where changes is True
288 if the working directory was changed, and extchanges is
288 if the working directory was changed, and extchanges is
289 True if any of these changes concern an external entry.
289 True if any of these changes concern an external entry.
290 """
290 """
291 output = self._svncommand(['status', '--xml'])
291 output = self._svncommand(['status', '--xml'])
292 externals, changes = [], []
292 externals, changes = [], []
293 doc = xml.dom.minidom.parseString(output)
293 doc = xml.dom.minidom.parseString(output)
294 for e in doc.getElementsByTagName('entry'):
294 for e in doc.getElementsByTagName('entry'):
295 s = e.getElementsByTagName('wc-status')
295 s = e.getElementsByTagName('wc-status')
296 if not s:
296 if not s:
297 continue
297 continue
298 item = s[0].getAttribute('item')
298 item = s[0].getAttribute('item')
299 props = s[0].getAttribute('props')
299 props = s[0].getAttribute('props')
300 path = e.getAttribute('path')
300 path = e.getAttribute('path')
301 if item == 'external':
301 if item == 'external':
302 externals.append(path)
302 externals.append(path)
303 if (item not in ('', 'normal', 'unversioned', 'external')
303 if (item not in ('', 'normal', 'unversioned', 'external')
304 or props not in ('', 'none')):
304 or props not in ('', 'none')):
305 changes.append(path)
305 changes.append(path)
306 for path in changes:
306 for path in changes:
307 for ext in externals:
307 for ext in externals:
308 if path == ext or path.startswith(ext + os.sep):
308 if path == ext or path.startswith(ext + os.sep):
309 return True, True
309 return True, True
310 return bool(changes), False
310 return bool(changes), False
311
311
312 def dirty(self):
312 def dirty(self):
313 if self._wcrev() == self._state[1] and not self._wcchanged()[0]:
313 if self._wcrev() == self._state[1] and not self._wcchanged()[0]:
314 return False
314 return False
315 return True
315 return True
316
316
317 def commit(self, text, user, date):
317 def commit(self, text, user, date):
318 # user and date are out of our hands since svn is centralized
318 # user and date are out of our hands since svn is centralized
319 changed, extchanged = self._wcchanged()
319 changed, extchanged = self._wcchanged()
320 if not changed:
320 if not changed:
321 return self._wcrev()
321 return self._wcrev()
322 if extchanged:
322 if extchanged:
323 # Do not try to commit externals
323 # Do not try to commit externals
324 raise util.Abort(_('cannot commit svn externals'))
324 raise util.Abort(_('cannot commit svn externals'))
325 commitinfo = self._svncommand(['commit', '-m', text])
325 commitinfo = self._svncommand(['commit', '-m', text])
326 self._ui.status(commitinfo)
326 self._ui.status(commitinfo)
327 newrev = re.search('Committed revision ([\d]+).', commitinfo)
327 newrev = re.search('Committed revision ([\d]+).', commitinfo)
328 if not newrev:
328 if not newrev:
329 raise util.Abort(commitinfo.splitlines()[-1])
329 raise util.Abort(commitinfo.splitlines()[-1])
330 newrev = newrev.groups()[0]
330 newrev = newrev.groups()[0]
331 self._ui.status(self._svncommand(['update', '-r', newrev]))
331 self._ui.status(self._svncommand(['update', '-r', newrev]))
332 return newrev
332 return newrev
333
333
334 def remove(self):
334 def remove(self):
335 if self.dirty():
335 if self.dirty():
336 self._ui.warn(_('not removing repo %s because '
336 self._ui.warn(_('not removing repo %s because '
337 'it has changes.\n' % self._path))
337 'it has changes.\n' % self._path))
338 return
338 return
339 self._ui.note('removing subrepo %s\n' % self._path)
339 self._ui.note(_('removing subrepo %s\n') % self._path)
340 shutil.rmtree(self._ctx.repo.join(self._path))
340 shutil.rmtree(self._ctx.repo.join(self._path))
341
341
342 def get(self, state):
342 def get(self, state):
343 status = self._svncommand(['checkout', state[0], '--revision', state[1]])
343 status = self._svncommand(['checkout', state[0], '--revision', state[1]])
344 if not re.search('Checked out revision [\d]+.', status):
344 if not re.search('Checked out revision [\d]+.', status):
345 raise util.Abort(status.splitlines()[-1])
345 raise util.Abort(status.splitlines()[-1])
346 self._ui.status(status)
346 self._ui.status(status)
347
347
348 def merge(self, state):
348 def merge(self, state):
349 old = int(self._state[1])
349 old = int(self._state[1])
350 new = int(state[1])
350 new = int(state[1])
351 if new > old:
351 if new > old:
352 self.get(state)
352 self.get(state)
353
353
354 def push(self, force):
354 def push(self, force):
355 # nothing for svn
355 # nothing for svn
356 pass
356 pass
357
357
358 types = {
358 types = {
359 'hg': hgsubrepo,
359 'hg': hgsubrepo,
360 'svn': svnsubrepo,
360 'svn': svnsubrepo,
361 }
361 }
General Comments 0
You need to be logged in to leave comments. Login now