##// END OF EJS Templates
localrepo: factor _findtags() out of tags() (issue548)....
Greg Ward -
r9145:6b03f93b default
parent child Browse files
Show More
@@ -1,333 +1,330 b''
1 # Mercurial extension to provide the 'hg bookmark' command
1 # Mercurial extension to provide the 'hg bookmark' command
2 #
2 #
3 # Copyright 2008 David Soria Parra <dsp@php.net>
3 # Copyright 2008 David Soria Parra <dsp@php.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 '''track a line of development with movable markers
8 '''track a line of development with movable markers
9
9
10 Bookmarks are local movable markers to changesets. Every bookmark points to a
10 Bookmarks are local movable markers to changesets. Every bookmark points to a
11 changeset identified by its hash. If you commit a changeset that is based on a
11 changeset identified by its hash. If you commit a changeset that is based on a
12 changeset that has a bookmark on it, the bookmark shifts to the new changeset.
12 changeset that has a bookmark on it, the bookmark shifts to the new changeset.
13
13
14 It is possible to use bookmark names in every revision lookup (e.g. hg merge,
14 It is possible to use bookmark names in every revision lookup (e.g. hg merge,
15 hg update).
15 hg update).
16
16
17 By default, when several bookmarks point to the same changeset, they will all
17 By default, when several bookmarks point to the same changeset, they will all
18 move forward together. It is possible to obtain a more git-like experience by
18 move forward together. It is possible to obtain a more git-like experience by
19 adding the following configuration option to your .hgrc:
19 adding the following configuration option to your .hgrc:
20
20
21 [bookmarks]
21 [bookmarks]
22 track.current = True
22 track.current = True
23
23
24 This will cause Mercurial to track the bookmark that you are currently using,
24 This will cause Mercurial to track the bookmark that you are currently using,
25 and only update it. This is similar to git's approach to branching.
25 and only update it. This is similar to git's approach to branching.
26 '''
26 '''
27
27
28 from mercurial.i18n import _
28 from mercurial.i18n import _
29 from mercurial.node import nullid, nullrev, hex, short
29 from mercurial.node import nullid, nullrev, hex, short
30 from mercurial import util, commands, localrepo, repair, extensions
30 from mercurial import util, commands, localrepo, repair, extensions
31 import os
31 import os
32
32
33 def parse(repo):
33 def parse(repo):
34 '''Parse .hg/bookmarks file and return a dictionary
34 '''Parse .hg/bookmarks file and return a dictionary
35
35
36 Bookmarks are stored as {HASH}\\s{NAME}\\n (localtags format) values
36 Bookmarks are stored as {HASH}\\s{NAME}\\n (localtags format) values
37 in the .hg/bookmarks file. They are read by the parse() method and
37 in the .hg/bookmarks file. They are read by the parse() method and
38 returned as a dictionary with name => hash values.
38 returned as a dictionary with name => hash values.
39
39
40 The parsed dictionary is cached until a write() operation is done.
40 The parsed dictionary is cached until a write() operation is done.
41 '''
41 '''
42 try:
42 try:
43 if repo._bookmarks:
43 if repo._bookmarks:
44 return repo._bookmarks
44 return repo._bookmarks
45 repo._bookmarks = {}
45 repo._bookmarks = {}
46 for line in repo.opener('bookmarks'):
46 for line in repo.opener('bookmarks'):
47 sha, refspec = line.strip().split(' ', 1)
47 sha, refspec = line.strip().split(' ', 1)
48 repo._bookmarks[refspec] = repo.lookup(sha)
48 repo._bookmarks[refspec] = repo.lookup(sha)
49 except:
49 except:
50 pass
50 pass
51 return repo._bookmarks
51 return repo._bookmarks
52
52
53 def write(repo, refs):
53 def write(repo, refs):
54 '''Write bookmarks
54 '''Write bookmarks
55
55
56 Write the given bookmark => hash dictionary to the .hg/bookmarks file
56 Write the given bookmark => hash dictionary to the .hg/bookmarks file
57 in a format equal to those of localtags.
57 in a format equal to those of localtags.
58
58
59 We also store a backup of the previous state in undo.bookmarks that
59 We also store a backup of the previous state in undo.bookmarks that
60 can be copied back on rollback.
60 can be copied back on rollback.
61 '''
61 '''
62 if os.path.exists(repo.join('bookmarks')):
62 if os.path.exists(repo.join('bookmarks')):
63 util.copyfile(repo.join('bookmarks'), repo.join('undo.bookmarks'))
63 util.copyfile(repo.join('bookmarks'), repo.join('undo.bookmarks'))
64 if current(repo) not in refs:
64 if current(repo) not in refs:
65 setcurrent(repo, None)
65 setcurrent(repo, None)
66 wlock = repo.wlock()
66 wlock = repo.wlock()
67 try:
67 try:
68 file = repo.opener('bookmarks', 'w', atomictemp=True)
68 file = repo.opener('bookmarks', 'w', atomictemp=True)
69 for refspec, node in refs.iteritems():
69 for refspec, node in refs.iteritems():
70 file.write("%s %s\n" % (hex(node), refspec))
70 file.write("%s %s\n" % (hex(node), refspec))
71 file.rename()
71 file.rename()
72 finally:
72 finally:
73 wlock.release()
73 wlock.release()
74
74
75 def current(repo):
75 def current(repo):
76 '''Get the current bookmark
76 '''Get the current bookmark
77
77
78 If we use gittishsh branches we have a current bookmark that
78 If we use gittishsh branches we have a current bookmark that
79 we are on. This function returns the name of the bookmark. It
79 we are on. This function returns the name of the bookmark. It
80 is stored in .hg/bookmarks.current
80 is stored in .hg/bookmarks.current
81 '''
81 '''
82 if repo._bookmarkcurrent:
82 if repo._bookmarkcurrent:
83 return repo._bookmarkcurrent
83 return repo._bookmarkcurrent
84 mark = None
84 mark = None
85 if os.path.exists(repo.join('bookmarks.current')):
85 if os.path.exists(repo.join('bookmarks.current')):
86 file = repo.opener('bookmarks.current')
86 file = repo.opener('bookmarks.current')
87 # No readline() in posixfile_nt, reading everything is cheap
87 # No readline() in posixfile_nt, reading everything is cheap
88 mark = (file.readlines() or [''])[0]
88 mark = (file.readlines() or [''])[0]
89 if mark == '':
89 if mark == '':
90 mark = None
90 mark = None
91 file.close()
91 file.close()
92 repo._bookmarkcurrent = mark
92 repo._bookmarkcurrent = mark
93 return mark
93 return mark
94
94
95 def setcurrent(repo, mark):
95 def setcurrent(repo, mark):
96 '''Set the name of the bookmark that we are currently on
96 '''Set the name of the bookmark that we are currently on
97
97
98 Set the name of the bookmark that we are on (hg update <bookmark>).
98 Set the name of the bookmark that we are on (hg update <bookmark>).
99 The name is recorded in .hg/bookmarks.current
99 The name is recorded in .hg/bookmarks.current
100 '''
100 '''
101 if current(repo) == mark:
101 if current(repo) == mark:
102 return
102 return
103
103
104 refs = parse(repo)
104 refs = parse(repo)
105
105
106 # do not update if we do update to a rev equal to the current bookmark
106 # do not update if we do update to a rev equal to the current bookmark
107 if (mark and mark not in refs and
107 if (mark and mark not in refs and
108 current(repo) and refs[current(repo)] == repo.changectx('.').node()):
108 current(repo) and refs[current(repo)] == repo.changectx('.').node()):
109 return
109 return
110 if mark not in refs:
110 if mark not in refs:
111 mark = ''
111 mark = ''
112 wlock = repo.wlock()
112 wlock = repo.wlock()
113 try:
113 try:
114 file = repo.opener('bookmarks.current', 'w', atomictemp=True)
114 file = repo.opener('bookmarks.current', 'w', atomictemp=True)
115 file.write(mark)
115 file.write(mark)
116 file.rename()
116 file.rename()
117 finally:
117 finally:
118 wlock.release()
118 wlock.release()
119 repo._bookmarkcurrent = mark
119 repo._bookmarkcurrent = mark
120
120
121 def bookmark(ui, repo, mark=None, rev=None, force=False, delete=False, rename=None):
121 def bookmark(ui, repo, mark=None, rev=None, force=False, delete=False, rename=None):
122 '''track a line of development with movable markers
122 '''track a line of development with movable markers
123
123
124 Bookmarks are pointers to certain commits that move when committing.
124 Bookmarks are pointers to certain commits that move when committing.
125 Bookmarks are local. They can be renamed, copied and deleted. It is
125 Bookmarks are local. They can be renamed, copied and deleted. It is
126 possible to use bookmark names in 'hg merge' and 'hg update' to merge and
126 possible to use bookmark names in 'hg merge' and 'hg update' to merge and
127 update respectively to a given bookmark.
127 update respectively to a given bookmark.
128
128
129 You can use 'hg bookmark NAME' to set a bookmark on the working
129 You can use 'hg bookmark NAME' to set a bookmark on the working
130 directory's parent revision with the given name. If you specify a revision
130 directory's parent revision with the given name. If you specify a revision
131 using -r REV (where REV may be an existing bookmark), the bookmark is
131 using -r REV (where REV may be an existing bookmark), the bookmark is
132 assigned to that revision.
132 assigned to that revision.
133 '''
133 '''
134 hexfn = ui.debugflag and hex or short
134 hexfn = ui.debugflag and hex or short
135 marks = parse(repo)
135 marks = parse(repo)
136 cur = repo.changectx('.').node()
136 cur = repo.changectx('.').node()
137
137
138 if rename:
138 if rename:
139 if rename not in marks:
139 if rename not in marks:
140 raise util.Abort(_("a bookmark of this name does not exist"))
140 raise util.Abort(_("a bookmark of this name does not exist"))
141 if mark in marks and not force:
141 if mark in marks and not force:
142 raise util.Abort(_("a bookmark of the same name already exists"))
142 raise util.Abort(_("a bookmark of the same name already exists"))
143 if mark is None:
143 if mark is None:
144 raise util.Abort(_("new bookmark name required"))
144 raise util.Abort(_("new bookmark name required"))
145 marks[mark] = marks[rename]
145 marks[mark] = marks[rename]
146 del marks[rename]
146 del marks[rename]
147 if current(repo) == rename:
147 if current(repo) == rename:
148 setcurrent(repo, mark)
148 setcurrent(repo, mark)
149 write(repo, marks)
149 write(repo, marks)
150 return
150 return
151
151
152 if delete:
152 if delete:
153 if mark is None:
153 if mark is None:
154 raise util.Abort(_("bookmark name required"))
154 raise util.Abort(_("bookmark name required"))
155 if mark not in marks:
155 if mark not in marks:
156 raise util.Abort(_("a bookmark of this name does not exist"))
156 raise util.Abort(_("a bookmark of this name does not exist"))
157 if mark == current(repo):
157 if mark == current(repo):
158 setcurrent(repo, None)
158 setcurrent(repo, None)
159 del marks[mark]
159 del marks[mark]
160 write(repo, marks)
160 write(repo, marks)
161 return
161 return
162
162
163 if mark != None:
163 if mark != None:
164 if "\n" in mark:
164 if "\n" in mark:
165 raise util.Abort(_("bookmark name cannot contain newlines"))
165 raise util.Abort(_("bookmark name cannot contain newlines"))
166 mark = mark.strip()
166 mark = mark.strip()
167 if mark in marks and not force:
167 if mark in marks and not force:
168 raise util.Abort(_("a bookmark of the same name already exists"))
168 raise util.Abort(_("a bookmark of the same name already exists"))
169 if ((mark in repo.branchtags() or mark == repo.dirstate.branch())
169 if ((mark in repo.branchtags() or mark == repo.dirstate.branch())
170 and not force):
170 and not force):
171 raise util.Abort(
171 raise util.Abort(
172 _("a bookmark cannot have the name of an existing branch"))
172 _("a bookmark cannot have the name of an existing branch"))
173 if rev:
173 if rev:
174 marks[mark] = repo.lookup(rev)
174 marks[mark] = repo.lookup(rev)
175 else:
175 else:
176 marks[mark] = repo.changectx('.').node()
176 marks[mark] = repo.changectx('.').node()
177 setcurrent(repo, mark)
177 setcurrent(repo, mark)
178 write(repo, marks)
178 write(repo, marks)
179 return
179 return
180
180
181 if mark is None:
181 if mark is None:
182 if rev:
182 if rev:
183 raise util.Abort(_("bookmark name required"))
183 raise util.Abort(_("bookmark name required"))
184 if len(marks) == 0:
184 if len(marks) == 0:
185 ui.status("no bookmarks set\n")
185 ui.status("no bookmarks set\n")
186 else:
186 else:
187 for bmark, n in marks.iteritems():
187 for bmark, n in marks.iteritems():
188 if ui.configbool('bookmarks', 'track.current'):
188 if ui.configbool('bookmarks', 'track.current'):
189 prefix = (bmark == current(repo) and n == cur) and '*' or ' '
189 prefix = (bmark == current(repo) and n == cur) and '*' or ' '
190 else:
190 else:
191 prefix = (n == cur) and '*' or ' '
191 prefix = (n == cur) and '*' or ' '
192
192
193 ui.write(" %s %-25s %d:%s\n" % (
193 ui.write(" %s %-25s %d:%s\n" % (
194 prefix, bmark, repo.changelog.rev(n), hexfn(n)))
194 prefix, bmark, repo.changelog.rev(n), hexfn(n)))
195 return
195 return
196
196
197 def _revstostrip(changelog, node):
197 def _revstostrip(changelog, node):
198 srev = changelog.rev(node)
198 srev = changelog.rev(node)
199 tostrip = [srev]
199 tostrip = [srev]
200 saveheads = []
200 saveheads = []
201 for r in xrange(srev, len(changelog)):
201 for r in xrange(srev, len(changelog)):
202 parents = changelog.parentrevs(r)
202 parents = changelog.parentrevs(r)
203 if parents[0] in tostrip or parents[1] in tostrip:
203 if parents[0] in tostrip or parents[1] in tostrip:
204 tostrip.append(r)
204 tostrip.append(r)
205 if parents[1] != nullrev:
205 if parents[1] != nullrev:
206 for p in parents:
206 for p in parents:
207 if p not in tostrip and p > srev:
207 if p not in tostrip and p > srev:
208 saveheads.append(p)
208 saveheads.append(p)
209 return [r for r in tostrip if r not in saveheads]
209 return [r for r in tostrip if r not in saveheads]
210
210
211 def strip(oldstrip, ui, repo, node, backup="all"):
211 def strip(oldstrip, ui, repo, node, backup="all"):
212 """Strip bookmarks if revisions are stripped using
212 """Strip bookmarks if revisions are stripped using
213 the mercurial.strip method. This usually happens during
213 the mercurial.strip method. This usually happens during
214 qpush and qpop"""
214 qpush and qpop"""
215 revisions = _revstostrip(repo.changelog, node)
215 revisions = _revstostrip(repo.changelog, node)
216 marks = parse(repo)
216 marks = parse(repo)
217 update = []
217 update = []
218 for mark, n in marks.iteritems():
218 for mark, n in marks.iteritems():
219 if repo.changelog.rev(n) in revisions:
219 if repo.changelog.rev(n) in revisions:
220 update.append(mark)
220 update.append(mark)
221 oldstrip(ui, repo, node, backup)
221 oldstrip(ui, repo, node, backup)
222 if len(update) > 0:
222 if len(update) > 0:
223 for m in update:
223 for m in update:
224 marks[m] = repo.changectx('.').node()
224 marks[m] = repo.changectx('.').node()
225 write(repo, marks)
225 write(repo, marks)
226
226
227 def reposetup(ui, repo):
227 def reposetup(ui, repo):
228 if not isinstance(repo, localrepo.localrepository):
228 if not isinstance(repo, localrepo.localrepository):
229 return
229 return
230
230
231 # init a bookmark cache as otherwise we would get a infinite reading
231 # init a bookmark cache as otherwise we would get a infinite reading
232 # in lookup()
232 # in lookup()
233 repo._bookmarks = None
233 repo._bookmarks = None
234 repo._bookmarkcurrent = None
234 repo._bookmarkcurrent = None
235
235
236 class bookmark_repo(repo.__class__):
236 class bookmark_repo(repo.__class__):
237 def rollback(self):
237 def rollback(self):
238 if os.path.exists(self.join('undo.bookmarks')):
238 if os.path.exists(self.join('undo.bookmarks')):
239 util.rename(self.join('undo.bookmarks'), self.join('bookmarks'))
239 util.rename(self.join('undo.bookmarks'), self.join('bookmarks'))
240 return super(bookmark_repo, self).rollback()
240 return super(bookmark_repo, self).rollback()
241
241
242 def lookup(self, key):
242 def lookup(self, key):
243 if self._bookmarks is None:
243 if self._bookmarks is None:
244 self._bookmarks = parse(self)
244 self._bookmarks = parse(self)
245 if key in self._bookmarks:
245 if key in self._bookmarks:
246 key = self._bookmarks[key]
246 key = self._bookmarks[key]
247 return super(bookmark_repo, self).lookup(key)
247 return super(bookmark_repo, self).lookup(key)
248
248
249 def commit(self, *k, **kw):
249 def commit(self, *k, **kw):
250 """Add a revision to the repository and
250 """Add a revision to the repository and
251 move the bookmark"""
251 move the bookmark"""
252 wlock = self.wlock() # do both commit and bookmark with lock held
252 wlock = self.wlock() # do both commit and bookmark with lock held
253 try:
253 try:
254 node = super(bookmark_repo, self).commit(*k, **kw)
254 node = super(bookmark_repo, self).commit(*k, **kw)
255 if node is None:
255 if node is None:
256 return None
256 return None
257 parents = self.changelog.parents(node)
257 parents = self.changelog.parents(node)
258 if parents[1] == nullid:
258 if parents[1] == nullid:
259 parents = (parents[0],)
259 parents = (parents[0],)
260 marks = parse(self)
260 marks = parse(self)
261 update = False
261 update = False
262 for mark, n in marks.items():
262 for mark, n in marks.items():
263 if ui.configbool('bookmarks', 'track.current'):
263 if ui.configbool('bookmarks', 'track.current'):
264 if mark == current(self) and n in parents:
264 if mark == current(self) and n in parents:
265 marks[mark] = node
265 marks[mark] = node
266 update = True
266 update = True
267 else:
267 else:
268 if n in parents:
268 if n in parents:
269 marks[mark] = node
269 marks[mark] = node
270 update = True
270 update = True
271 if update:
271 if update:
272 write(self, marks)
272 write(self, marks)
273 return node
273 return node
274 finally:
274 finally:
275 wlock.release()
275 wlock.release()
276
276
277 def addchangegroup(self, source, srctype, url, emptyok=False):
277 def addchangegroup(self, source, srctype, url, emptyok=False):
278 parents = self.dirstate.parents()
278 parents = self.dirstate.parents()
279
279
280 result = super(bookmark_repo, self).addchangegroup(
280 result = super(bookmark_repo, self).addchangegroup(
281 source, srctype, url, emptyok)
281 source, srctype, url, emptyok)
282 if result > 1:
282 if result > 1:
283 # We have more heads than before
283 # We have more heads than before
284 return result
284 return result
285 node = self.changelog.tip()
285 node = self.changelog.tip()
286 marks = parse(self)
286 marks = parse(self)
287 update = False
287 update = False
288 for mark, n in marks.items():
288 for mark, n in marks.items():
289 if n in parents:
289 if n in parents:
290 marks[mark] = node
290 marks[mark] = node
291 update = True
291 update = True
292 if update:
292 if update:
293 write(self, marks)
293 write(self, marks)
294 return result
294 return result
295
295
296 def tags(self):
296 def _findtags(self):
297 """Merge bookmarks with normal tags"""
297 """Merge bookmarks with normal tags"""
298 if self.tagscache:
298 (tags, tagtypes) = super(bookmark_repo, self)._findtags()
299 return self.tagscache
299 tags.update(parse(self))
300
300 return (tags, tagtypes)
301 tagscache = super(bookmark_repo, self).tags()
302 tagscache.update(parse(self))
303 return tagscache
304
301
305 repo.__class__ = bookmark_repo
302 repo.__class__ = bookmark_repo
306
303
307 def uisetup(ui):
304 def uisetup(ui):
308 extensions.wrapfunction(repair, "strip", strip)
305 extensions.wrapfunction(repair, "strip", strip)
309 if ui.configbool('bookmarks', 'track.current'):
306 if ui.configbool('bookmarks', 'track.current'):
310 extensions.wrapcommand(commands.table, 'update', updatecurbookmark)
307 extensions.wrapcommand(commands.table, 'update', updatecurbookmark)
311
308
312 def updatecurbookmark(orig, ui, repo, *args, **opts):
309 def updatecurbookmark(orig, ui, repo, *args, **opts):
313 '''Set the current bookmark
310 '''Set the current bookmark
314
311
315 If the user updates to a bookmark we update the .hg/bookmarks.current
312 If the user updates to a bookmark we update the .hg/bookmarks.current
316 file.
313 file.
317 '''
314 '''
318 res = orig(ui, repo, *args, **opts)
315 res = orig(ui, repo, *args, **opts)
319 rev = opts['rev']
316 rev = opts['rev']
320 if not rev and len(args) > 0:
317 if not rev and len(args) > 0:
321 rev = args[0]
318 rev = args[0]
322 setcurrent(repo, rev)
319 setcurrent(repo, rev)
323 return res
320 return res
324
321
325 cmdtable = {
322 cmdtable = {
326 "bookmarks":
323 "bookmarks":
327 (bookmark,
324 (bookmark,
328 [('f', 'force', False, _('force')),
325 [('f', 'force', False, _('force')),
329 ('r', 'rev', '', _('revision')),
326 ('r', 'rev', '', _('revision')),
330 ('d', 'delete', False, _('delete a given bookmark')),
327 ('d', 'delete', False, _('delete a given bookmark')),
331 ('m', 'rename', '', _('rename a given bookmark'))],
328 ('m', 'rename', '', _('rename a given bookmark'))],
332 _('hg bookmarks [-f] [-d] [-m NAME] [-r REV] [NAME]')),
329 _('hg bookmarks [-f] [-d] [-m NAME] [-r REV] [NAME]')),
333 }
330 }
@@ -1,2618 +1,2617 b''
1 # mq.py - patch queues for mercurial
1 # mq.py - patch queues for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 '''manage a stack of patches
8 '''manage a stack of patches
9
9
10 This extension lets you work with a stack of patches in a Mercurial
10 This extension lets you work with a stack of patches in a Mercurial
11 repository. It manages two stacks of patches - all known patches, and applied
11 repository. It manages two stacks of patches - all known patches, and applied
12 patches (subset of known patches).
12 patches (subset of known patches).
13
13
14 Known patches are represented as patch files in the .hg/patches directory.
14 Known patches are represented as patch files in the .hg/patches directory.
15 Applied patches are both patch files and changesets.
15 Applied patches are both patch files and changesets.
16
16
17 Common tasks (use "hg help command" for more details):
17 Common tasks (use "hg help command" for more details):
18
18
19 prepare repository to work with patches qinit
19 prepare repository to work with patches qinit
20 create new patch qnew
20 create new patch qnew
21 import existing patch qimport
21 import existing patch qimport
22
22
23 print patch series qseries
23 print patch series qseries
24 print applied patches qapplied
24 print applied patches qapplied
25 print name of top applied patch qtop
25 print name of top applied patch qtop
26
26
27 add known patch to applied stack qpush
27 add known patch to applied stack qpush
28 remove patch from applied stack qpop
28 remove patch from applied stack qpop
29 refresh contents of top applied patch qrefresh
29 refresh contents of top applied patch qrefresh
30 '''
30 '''
31
31
32 from mercurial.i18n import _
32 from mercurial.i18n import _
33 from mercurial.node import bin, hex, short, nullid, nullrev
33 from mercurial.node import bin, hex, short, nullid, nullrev
34 from mercurial.lock import release
34 from mercurial.lock import release
35 from mercurial import commands, cmdutil, hg, patch, util
35 from mercurial import commands, cmdutil, hg, patch, util
36 from mercurial import repair, extensions, url, error
36 from mercurial import repair, extensions, url, error
37 import os, sys, re, errno
37 import os, sys, re, errno
38
38
39 commands.norepo += " qclone"
39 commands.norepo += " qclone"
40
40
41 # Patch names looks like unix-file names.
41 # Patch names looks like unix-file names.
42 # They must be joinable with queue directory and result in the patch path.
42 # They must be joinable with queue directory and result in the patch path.
43 normname = util.normpath
43 normname = util.normpath
44
44
45 class statusentry(object):
45 class statusentry(object):
46 def __init__(self, rev, name=None):
46 def __init__(self, rev, name=None):
47 if not name:
47 if not name:
48 fields = rev.split(':', 1)
48 fields = rev.split(':', 1)
49 if len(fields) == 2:
49 if len(fields) == 2:
50 self.rev, self.name = fields
50 self.rev, self.name = fields
51 else:
51 else:
52 self.rev, self.name = None, None
52 self.rev, self.name = None, None
53 else:
53 else:
54 self.rev, self.name = rev, name
54 self.rev, self.name = rev, name
55
55
56 def __str__(self):
56 def __str__(self):
57 return self.rev + ':' + self.name
57 return self.rev + ':' + self.name
58
58
59 class patchheader(object):
59 class patchheader(object):
60 def __init__(self, pf):
60 def __init__(self, pf):
61 def eatdiff(lines):
61 def eatdiff(lines):
62 while lines:
62 while lines:
63 l = lines[-1]
63 l = lines[-1]
64 if (l.startswith("diff -") or
64 if (l.startswith("diff -") or
65 l.startswith("Index:") or
65 l.startswith("Index:") or
66 l.startswith("===========")):
66 l.startswith("===========")):
67 del lines[-1]
67 del lines[-1]
68 else:
68 else:
69 break
69 break
70 def eatempty(lines):
70 def eatempty(lines):
71 while lines:
71 while lines:
72 l = lines[-1]
72 l = lines[-1]
73 if re.match('\s*$', l):
73 if re.match('\s*$', l):
74 del lines[-1]
74 del lines[-1]
75 else:
75 else:
76 break
76 break
77
77
78 message = []
78 message = []
79 comments = []
79 comments = []
80 user = None
80 user = None
81 date = None
81 date = None
82 format = None
82 format = None
83 subject = None
83 subject = None
84 diffstart = 0
84 diffstart = 0
85
85
86 for line in file(pf):
86 for line in file(pf):
87 line = line.rstrip()
87 line = line.rstrip()
88 if line.startswith('diff --git'):
88 if line.startswith('diff --git'):
89 diffstart = 2
89 diffstart = 2
90 break
90 break
91 if diffstart:
91 if diffstart:
92 if line.startswith('+++ '):
92 if line.startswith('+++ '):
93 diffstart = 2
93 diffstart = 2
94 break
94 break
95 if line.startswith("--- "):
95 if line.startswith("--- "):
96 diffstart = 1
96 diffstart = 1
97 continue
97 continue
98 elif format == "hgpatch":
98 elif format == "hgpatch":
99 # parse values when importing the result of an hg export
99 # parse values when importing the result of an hg export
100 if line.startswith("# User "):
100 if line.startswith("# User "):
101 user = line[7:]
101 user = line[7:]
102 elif line.startswith("# Date "):
102 elif line.startswith("# Date "):
103 date = line[7:]
103 date = line[7:]
104 elif not line.startswith("# ") and line:
104 elif not line.startswith("# ") and line:
105 message.append(line)
105 message.append(line)
106 format = None
106 format = None
107 elif line == '# HG changeset patch':
107 elif line == '# HG changeset patch':
108 format = "hgpatch"
108 format = "hgpatch"
109 elif (format != "tagdone" and (line.startswith("Subject: ") or
109 elif (format != "tagdone" and (line.startswith("Subject: ") or
110 line.startswith("subject: "))):
110 line.startswith("subject: "))):
111 subject = line[9:]
111 subject = line[9:]
112 format = "tag"
112 format = "tag"
113 elif (format != "tagdone" and (line.startswith("From: ") or
113 elif (format != "tagdone" and (line.startswith("From: ") or
114 line.startswith("from: "))):
114 line.startswith("from: "))):
115 user = line[6:]
115 user = line[6:]
116 format = "tag"
116 format = "tag"
117 elif format == "tag" and line == "":
117 elif format == "tag" and line == "":
118 # when looking for tags (subject: from: etc) they
118 # when looking for tags (subject: from: etc) they
119 # end once you find a blank line in the source
119 # end once you find a blank line in the source
120 format = "tagdone"
120 format = "tagdone"
121 elif message or line:
121 elif message or line:
122 message.append(line)
122 message.append(line)
123 comments.append(line)
123 comments.append(line)
124
124
125 eatdiff(message)
125 eatdiff(message)
126 eatdiff(comments)
126 eatdiff(comments)
127 eatempty(message)
127 eatempty(message)
128 eatempty(comments)
128 eatempty(comments)
129
129
130 # make sure message isn't empty
130 # make sure message isn't empty
131 if format and format.startswith("tag") and subject:
131 if format and format.startswith("tag") and subject:
132 message.insert(0, "")
132 message.insert(0, "")
133 message.insert(0, subject)
133 message.insert(0, subject)
134
134
135 self.message = message
135 self.message = message
136 self.comments = comments
136 self.comments = comments
137 self.user = user
137 self.user = user
138 self.date = date
138 self.date = date
139 self.haspatch = diffstart > 1
139 self.haspatch = diffstart > 1
140
140
141 def setuser(self, user):
141 def setuser(self, user):
142 if not self.updateheader(['From: ', '# User '], user):
142 if not self.updateheader(['From: ', '# User '], user):
143 try:
143 try:
144 patchheaderat = self.comments.index('# HG changeset patch')
144 patchheaderat = self.comments.index('# HG changeset patch')
145 self.comments.insert(patchheaderat + 1,'# User ' + user)
145 self.comments.insert(patchheaderat + 1,'# User ' + user)
146 except ValueError:
146 except ValueError:
147 self.comments = ['From: ' + user, ''] + self.comments
147 self.comments = ['From: ' + user, ''] + self.comments
148 self.user = user
148 self.user = user
149
149
150 def setdate(self, date):
150 def setdate(self, date):
151 if self.updateheader(['# Date '], date):
151 if self.updateheader(['# Date '], date):
152 self.date = date
152 self.date = date
153
153
154 def setmessage(self, message):
154 def setmessage(self, message):
155 if self.comments:
155 if self.comments:
156 self._delmsg()
156 self._delmsg()
157 self.message = [message]
157 self.message = [message]
158 self.comments += self.message
158 self.comments += self.message
159
159
160 def updateheader(self, prefixes, new):
160 def updateheader(self, prefixes, new):
161 '''Update all references to a field in the patch header.
161 '''Update all references to a field in the patch header.
162 Return whether the field is present.'''
162 Return whether the field is present.'''
163 res = False
163 res = False
164 for prefix in prefixes:
164 for prefix in prefixes:
165 for i in xrange(len(self.comments)):
165 for i in xrange(len(self.comments)):
166 if self.comments[i].startswith(prefix):
166 if self.comments[i].startswith(prefix):
167 self.comments[i] = prefix + new
167 self.comments[i] = prefix + new
168 res = True
168 res = True
169 break
169 break
170 return res
170 return res
171
171
172 def __str__(self):
172 def __str__(self):
173 if not self.comments:
173 if not self.comments:
174 return ''
174 return ''
175 return '\n'.join(self.comments) + '\n\n'
175 return '\n'.join(self.comments) + '\n\n'
176
176
177 def _delmsg(self):
177 def _delmsg(self):
178 '''Remove existing message, keeping the rest of the comments fields.
178 '''Remove existing message, keeping the rest of the comments fields.
179 If comments contains 'subject: ', message will prepend
179 If comments contains 'subject: ', message will prepend
180 the field and a blank line.'''
180 the field and a blank line.'''
181 if self.message:
181 if self.message:
182 subj = 'subject: ' + self.message[0].lower()
182 subj = 'subject: ' + self.message[0].lower()
183 for i in xrange(len(self.comments)):
183 for i in xrange(len(self.comments)):
184 if subj == self.comments[i].lower():
184 if subj == self.comments[i].lower():
185 del self.comments[i]
185 del self.comments[i]
186 self.message = self.message[2:]
186 self.message = self.message[2:]
187 break
187 break
188 ci = 0
188 ci = 0
189 for mi in self.message:
189 for mi in self.message:
190 while mi != self.comments[ci]:
190 while mi != self.comments[ci]:
191 ci += 1
191 ci += 1
192 del self.comments[ci]
192 del self.comments[ci]
193
193
194 class queue(object):
194 class queue(object):
195 def __init__(self, ui, path, patchdir=None):
195 def __init__(self, ui, path, patchdir=None):
196 self.basepath = path
196 self.basepath = path
197 self.path = patchdir or os.path.join(path, "patches")
197 self.path = patchdir or os.path.join(path, "patches")
198 self.opener = util.opener(self.path)
198 self.opener = util.opener(self.path)
199 self.ui = ui
199 self.ui = ui
200 self.applied_dirty = 0
200 self.applied_dirty = 0
201 self.series_dirty = 0
201 self.series_dirty = 0
202 self.series_path = "series"
202 self.series_path = "series"
203 self.status_path = "status"
203 self.status_path = "status"
204 self.guards_path = "guards"
204 self.guards_path = "guards"
205 self.active_guards = None
205 self.active_guards = None
206 self.guards_dirty = False
206 self.guards_dirty = False
207 self._diffopts = None
207 self._diffopts = None
208
208
209 @util.propertycache
209 @util.propertycache
210 def applied(self):
210 def applied(self):
211 if os.path.exists(self.join(self.status_path)):
211 if os.path.exists(self.join(self.status_path)):
212 lines = self.opener(self.status_path).read().splitlines()
212 lines = self.opener(self.status_path).read().splitlines()
213 return [statusentry(l) for l in lines]
213 return [statusentry(l) for l in lines]
214 return []
214 return []
215
215
216 @util.propertycache
216 @util.propertycache
217 def full_series(self):
217 def full_series(self):
218 if os.path.exists(self.join(self.series_path)):
218 if os.path.exists(self.join(self.series_path)):
219 return self.opener(self.series_path).read().splitlines()
219 return self.opener(self.series_path).read().splitlines()
220 return []
220 return []
221
221
222 @util.propertycache
222 @util.propertycache
223 def series(self):
223 def series(self):
224 self.parse_series()
224 self.parse_series()
225 return self.series
225 return self.series
226
226
227 @util.propertycache
227 @util.propertycache
228 def series_guards(self):
228 def series_guards(self):
229 self.parse_series()
229 self.parse_series()
230 return self.series_guards
230 return self.series_guards
231
231
232 def invalidate(self):
232 def invalidate(self):
233 for a in 'applied full_series series series_guards'.split():
233 for a in 'applied full_series series series_guards'.split():
234 if a in self.__dict__:
234 if a in self.__dict__:
235 delattr(self, a)
235 delattr(self, a)
236 self.applied_dirty = 0
236 self.applied_dirty = 0
237 self.series_dirty = 0
237 self.series_dirty = 0
238 self.guards_dirty = False
238 self.guards_dirty = False
239 self.active_guards = None
239 self.active_guards = None
240
240
241 def diffopts(self):
241 def diffopts(self):
242 if self._diffopts is None:
242 if self._diffopts is None:
243 self._diffopts = patch.diffopts(self.ui)
243 self._diffopts = patch.diffopts(self.ui)
244 return self._diffopts
244 return self._diffopts
245
245
246 def join(self, *p):
246 def join(self, *p):
247 return os.path.join(self.path, *p)
247 return os.path.join(self.path, *p)
248
248
249 def find_series(self, patch):
249 def find_series(self, patch):
250 pre = re.compile("(\s*)([^#]+)")
250 pre = re.compile("(\s*)([^#]+)")
251 index = 0
251 index = 0
252 for l in self.full_series:
252 for l in self.full_series:
253 m = pre.match(l)
253 m = pre.match(l)
254 if m:
254 if m:
255 s = m.group(2)
255 s = m.group(2)
256 s = s.rstrip()
256 s = s.rstrip()
257 if s == patch:
257 if s == patch:
258 return index
258 return index
259 index += 1
259 index += 1
260 return None
260 return None
261
261
262 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
262 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
263
263
264 def parse_series(self):
264 def parse_series(self):
265 self.series = []
265 self.series = []
266 self.series_guards = []
266 self.series_guards = []
267 for l in self.full_series:
267 for l in self.full_series:
268 h = l.find('#')
268 h = l.find('#')
269 if h == -1:
269 if h == -1:
270 patch = l
270 patch = l
271 comment = ''
271 comment = ''
272 elif h == 0:
272 elif h == 0:
273 continue
273 continue
274 else:
274 else:
275 patch = l[:h]
275 patch = l[:h]
276 comment = l[h:]
276 comment = l[h:]
277 patch = patch.strip()
277 patch = patch.strip()
278 if patch:
278 if patch:
279 if patch in self.series:
279 if patch in self.series:
280 raise util.Abort(_('%s appears more than once in %s') %
280 raise util.Abort(_('%s appears more than once in %s') %
281 (patch, self.join(self.series_path)))
281 (patch, self.join(self.series_path)))
282 self.series.append(patch)
282 self.series.append(patch)
283 self.series_guards.append(self.guard_re.findall(comment))
283 self.series_guards.append(self.guard_re.findall(comment))
284
284
285 def check_guard(self, guard):
285 def check_guard(self, guard):
286 if not guard:
286 if not guard:
287 return _('guard cannot be an empty string')
287 return _('guard cannot be an empty string')
288 bad_chars = '# \t\r\n\f'
288 bad_chars = '# \t\r\n\f'
289 first = guard[0]
289 first = guard[0]
290 if first in '-+':
290 if first in '-+':
291 return (_('guard %r starts with invalid character: %r') %
291 return (_('guard %r starts with invalid character: %r') %
292 (guard, first))
292 (guard, first))
293 for c in bad_chars:
293 for c in bad_chars:
294 if c in guard:
294 if c in guard:
295 return _('invalid character in guard %r: %r') % (guard, c)
295 return _('invalid character in guard %r: %r') % (guard, c)
296
296
297 def set_active(self, guards):
297 def set_active(self, guards):
298 for guard in guards:
298 for guard in guards:
299 bad = self.check_guard(guard)
299 bad = self.check_guard(guard)
300 if bad:
300 if bad:
301 raise util.Abort(bad)
301 raise util.Abort(bad)
302 guards = sorted(set(guards))
302 guards = sorted(set(guards))
303 self.ui.debug(_('active guards: %s\n') % ' '.join(guards))
303 self.ui.debug(_('active guards: %s\n') % ' '.join(guards))
304 self.active_guards = guards
304 self.active_guards = guards
305 self.guards_dirty = True
305 self.guards_dirty = True
306
306
307 def active(self):
307 def active(self):
308 if self.active_guards is None:
308 if self.active_guards is None:
309 self.active_guards = []
309 self.active_guards = []
310 try:
310 try:
311 guards = self.opener(self.guards_path).read().split()
311 guards = self.opener(self.guards_path).read().split()
312 except IOError, err:
312 except IOError, err:
313 if err.errno != errno.ENOENT: raise
313 if err.errno != errno.ENOENT: raise
314 guards = []
314 guards = []
315 for i, guard in enumerate(guards):
315 for i, guard in enumerate(guards):
316 bad = self.check_guard(guard)
316 bad = self.check_guard(guard)
317 if bad:
317 if bad:
318 self.ui.warn('%s:%d: %s\n' %
318 self.ui.warn('%s:%d: %s\n' %
319 (self.join(self.guards_path), i + 1, bad))
319 (self.join(self.guards_path), i + 1, bad))
320 else:
320 else:
321 self.active_guards.append(guard)
321 self.active_guards.append(guard)
322 return self.active_guards
322 return self.active_guards
323
323
324 def set_guards(self, idx, guards):
324 def set_guards(self, idx, guards):
325 for g in guards:
325 for g in guards:
326 if len(g) < 2:
326 if len(g) < 2:
327 raise util.Abort(_('guard %r too short') % g)
327 raise util.Abort(_('guard %r too short') % g)
328 if g[0] not in '-+':
328 if g[0] not in '-+':
329 raise util.Abort(_('guard %r starts with invalid char') % g)
329 raise util.Abort(_('guard %r starts with invalid char') % g)
330 bad = self.check_guard(g[1:])
330 bad = self.check_guard(g[1:])
331 if bad:
331 if bad:
332 raise util.Abort(bad)
332 raise util.Abort(bad)
333 drop = self.guard_re.sub('', self.full_series[idx])
333 drop = self.guard_re.sub('', self.full_series[idx])
334 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
334 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
335 self.parse_series()
335 self.parse_series()
336 self.series_dirty = True
336 self.series_dirty = True
337
337
338 def pushable(self, idx):
338 def pushable(self, idx):
339 if isinstance(idx, str):
339 if isinstance(idx, str):
340 idx = self.series.index(idx)
340 idx = self.series.index(idx)
341 patchguards = self.series_guards[idx]
341 patchguards = self.series_guards[idx]
342 if not patchguards:
342 if not patchguards:
343 return True, None
343 return True, None
344 guards = self.active()
344 guards = self.active()
345 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
345 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
346 if exactneg:
346 if exactneg:
347 return False, exactneg[0]
347 return False, exactneg[0]
348 pos = [g for g in patchguards if g[0] == '+']
348 pos = [g for g in patchguards if g[0] == '+']
349 exactpos = [g for g in pos if g[1:] in guards]
349 exactpos = [g for g in pos if g[1:] in guards]
350 if pos:
350 if pos:
351 if exactpos:
351 if exactpos:
352 return True, exactpos[0]
352 return True, exactpos[0]
353 return False, pos
353 return False, pos
354 return True, ''
354 return True, ''
355
355
356 def explain_pushable(self, idx, all_patches=False):
356 def explain_pushable(self, idx, all_patches=False):
357 write = all_patches and self.ui.write or self.ui.warn
357 write = all_patches and self.ui.write or self.ui.warn
358 if all_patches or self.ui.verbose:
358 if all_patches or self.ui.verbose:
359 if isinstance(idx, str):
359 if isinstance(idx, str):
360 idx = self.series.index(idx)
360 idx = self.series.index(idx)
361 pushable, why = self.pushable(idx)
361 pushable, why = self.pushable(idx)
362 if all_patches and pushable:
362 if all_patches and pushable:
363 if why is None:
363 if why is None:
364 write(_('allowing %s - no guards in effect\n') %
364 write(_('allowing %s - no guards in effect\n') %
365 self.series[idx])
365 self.series[idx])
366 else:
366 else:
367 if not why:
367 if not why:
368 write(_('allowing %s - no matching negative guards\n') %
368 write(_('allowing %s - no matching negative guards\n') %
369 self.series[idx])
369 self.series[idx])
370 else:
370 else:
371 write(_('allowing %s - guarded by %r\n') %
371 write(_('allowing %s - guarded by %r\n') %
372 (self.series[idx], why))
372 (self.series[idx], why))
373 if not pushable:
373 if not pushable:
374 if why:
374 if why:
375 write(_('skipping %s - guarded by %r\n') %
375 write(_('skipping %s - guarded by %r\n') %
376 (self.series[idx], why))
376 (self.series[idx], why))
377 else:
377 else:
378 write(_('skipping %s - no matching guards\n') %
378 write(_('skipping %s - no matching guards\n') %
379 self.series[idx])
379 self.series[idx])
380
380
381 def save_dirty(self):
381 def save_dirty(self):
382 def write_list(items, path):
382 def write_list(items, path):
383 fp = self.opener(path, 'w')
383 fp = self.opener(path, 'w')
384 for i in items:
384 for i in items:
385 fp.write("%s\n" % i)
385 fp.write("%s\n" % i)
386 fp.close()
386 fp.close()
387 if self.applied_dirty: write_list(map(str, self.applied), self.status_path)
387 if self.applied_dirty: write_list(map(str, self.applied), self.status_path)
388 if self.series_dirty: write_list(self.full_series, self.series_path)
388 if self.series_dirty: write_list(self.full_series, self.series_path)
389 if self.guards_dirty: write_list(self.active_guards, self.guards_path)
389 if self.guards_dirty: write_list(self.active_guards, self.guards_path)
390
390
391 def removeundo(self, repo):
391 def removeundo(self, repo):
392 undo = repo.sjoin('undo')
392 undo = repo.sjoin('undo')
393 if not os.path.exists(undo):
393 if not os.path.exists(undo):
394 return
394 return
395 try:
395 try:
396 os.unlink(undo)
396 os.unlink(undo)
397 except OSError, inst:
397 except OSError, inst:
398 self.ui.warn(_('error removing undo: %s\n') % str(inst))
398 self.ui.warn(_('error removing undo: %s\n') % str(inst))
399
399
400 def printdiff(self, repo, node1, node2=None, files=None,
400 def printdiff(self, repo, node1, node2=None, files=None,
401 fp=None, changes=None, opts={}):
401 fp=None, changes=None, opts={}):
402 m = cmdutil.match(repo, files, opts)
402 m = cmdutil.match(repo, files, opts)
403 chunks = patch.diff(repo, node1, node2, m, changes, self.diffopts())
403 chunks = patch.diff(repo, node1, node2, m, changes, self.diffopts())
404 write = fp is None and repo.ui.write or fp.write
404 write = fp is None and repo.ui.write or fp.write
405 for chunk in chunks:
405 for chunk in chunks:
406 write(chunk)
406 write(chunk)
407
407
408 def mergeone(self, repo, mergeq, head, patch, rev):
408 def mergeone(self, repo, mergeq, head, patch, rev):
409 # first try just applying the patch
409 # first try just applying the patch
410 (err, n) = self.apply(repo, [ patch ], update_status=False,
410 (err, n) = self.apply(repo, [ patch ], update_status=False,
411 strict=True, merge=rev)
411 strict=True, merge=rev)
412
412
413 if err == 0:
413 if err == 0:
414 return (err, n)
414 return (err, n)
415
415
416 if n is None:
416 if n is None:
417 raise util.Abort(_("apply failed for patch %s") % patch)
417 raise util.Abort(_("apply failed for patch %s") % patch)
418
418
419 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
419 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
420
420
421 # apply failed, strip away that rev and merge.
421 # apply failed, strip away that rev and merge.
422 hg.clean(repo, head)
422 hg.clean(repo, head)
423 self.strip(repo, n, update=False, backup='strip')
423 self.strip(repo, n, update=False, backup='strip')
424
424
425 ctx = repo[rev]
425 ctx = repo[rev]
426 ret = hg.merge(repo, rev)
426 ret = hg.merge(repo, rev)
427 if ret:
427 if ret:
428 raise util.Abort(_("update returned %d") % ret)
428 raise util.Abort(_("update returned %d") % ret)
429 n = repo.commit(ctx.description(), ctx.user(), force=True)
429 n = repo.commit(ctx.description(), ctx.user(), force=True)
430 if n is None:
430 if n is None:
431 raise util.Abort(_("repo commit failed"))
431 raise util.Abort(_("repo commit failed"))
432 try:
432 try:
433 ph = patchheader(mergeq.join(patch))
433 ph = patchheader(mergeq.join(patch))
434 except:
434 except:
435 raise util.Abort(_("unable to read %s") % patch)
435 raise util.Abort(_("unable to read %s") % patch)
436
436
437 patchf = self.opener(patch, "w")
437 patchf = self.opener(patch, "w")
438 comments = str(ph)
438 comments = str(ph)
439 if comments:
439 if comments:
440 patchf.write(comments)
440 patchf.write(comments)
441 self.printdiff(repo, head, n, fp=patchf)
441 self.printdiff(repo, head, n, fp=patchf)
442 patchf.close()
442 patchf.close()
443 self.removeundo(repo)
443 self.removeundo(repo)
444 return (0, n)
444 return (0, n)
445
445
446 def qparents(self, repo, rev=None):
446 def qparents(self, repo, rev=None):
447 if rev is None:
447 if rev is None:
448 (p1, p2) = repo.dirstate.parents()
448 (p1, p2) = repo.dirstate.parents()
449 if p2 == nullid:
449 if p2 == nullid:
450 return p1
450 return p1
451 if len(self.applied) == 0:
451 if len(self.applied) == 0:
452 return None
452 return None
453 return bin(self.applied[-1].rev)
453 return bin(self.applied[-1].rev)
454 pp = repo.changelog.parents(rev)
454 pp = repo.changelog.parents(rev)
455 if pp[1] != nullid:
455 if pp[1] != nullid:
456 arevs = [ x.rev for x in self.applied ]
456 arevs = [ x.rev for x in self.applied ]
457 p0 = hex(pp[0])
457 p0 = hex(pp[0])
458 p1 = hex(pp[1])
458 p1 = hex(pp[1])
459 if p0 in arevs:
459 if p0 in arevs:
460 return pp[0]
460 return pp[0]
461 if p1 in arevs:
461 if p1 in arevs:
462 return pp[1]
462 return pp[1]
463 return pp[0]
463 return pp[0]
464
464
465 def mergepatch(self, repo, mergeq, series):
465 def mergepatch(self, repo, mergeq, series):
466 if len(self.applied) == 0:
466 if len(self.applied) == 0:
467 # each of the patches merged in will have two parents. This
467 # each of the patches merged in will have two parents. This
468 # can confuse the qrefresh, qdiff, and strip code because it
468 # can confuse the qrefresh, qdiff, and strip code because it
469 # needs to know which parent is actually in the patch queue.
469 # needs to know which parent is actually in the patch queue.
470 # so, we insert a merge marker with only one parent. This way
470 # so, we insert a merge marker with only one parent. This way
471 # the first patch in the queue is never a merge patch
471 # the first patch in the queue is never a merge patch
472 #
472 #
473 pname = ".hg.patches.merge.marker"
473 pname = ".hg.patches.merge.marker"
474 n = repo.commit('[mq]: merge marker', force=True)
474 n = repo.commit('[mq]: merge marker', force=True)
475 self.removeundo(repo)
475 self.removeundo(repo)
476 self.applied.append(statusentry(hex(n), pname))
476 self.applied.append(statusentry(hex(n), pname))
477 self.applied_dirty = 1
477 self.applied_dirty = 1
478
478
479 head = self.qparents(repo)
479 head = self.qparents(repo)
480
480
481 for patch in series:
481 for patch in series:
482 patch = mergeq.lookup(patch, strict=True)
482 patch = mergeq.lookup(patch, strict=True)
483 if not patch:
483 if not patch:
484 self.ui.warn(_("patch %s does not exist\n") % patch)
484 self.ui.warn(_("patch %s does not exist\n") % patch)
485 return (1, None)
485 return (1, None)
486 pushable, reason = self.pushable(patch)
486 pushable, reason = self.pushable(patch)
487 if not pushable:
487 if not pushable:
488 self.explain_pushable(patch, all_patches=True)
488 self.explain_pushable(patch, all_patches=True)
489 continue
489 continue
490 info = mergeq.isapplied(patch)
490 info = mergeq.isapplied(patch)
491 if not info:
491 if not info:
492 self.ui.warn(_("patch %s is not applied\n") % patch)
492 self.ui.warn(_("patch %s is not applied\n") % patch)
493 return (1, None)
493 return (1, None)
494 rev = bin(info[1])
494 rev = bin(info[1])
495 (err, head) = self.mergeone(repo, mergeq, head, patch, rev)
495 (err, head) = self.mergeone(repo, mergeq, head, patch, rev)
496 if head:
496 if head:
497 self.applied.append(statusentry(hex(head), patch))
497 self.applied.append(statusentry(hex(head), patch))
498 self.applied_dirty = 1
498 self.applied_dirty = 1
499 if err:
499 if err:
500 return (err, head)
500 return (err, head)
501 self.save_dirty()
501 self.save_dirty()
502 return (0, head)
502 return (0, head)
503
503
504 def patch(self, repo, patchfile):
504 def patch(self, repo, patchfile):
505 '''Apply patchfile to the working directory.
505 '''Apply patchfile to the working directory.
506 patchfile: name of patch file'''
506 patchfile: name of patch file'''
507 files = {}
507 files = {}
508 try:
508 try:
509 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
509 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
510 files=files, eolmode=None)
510 files=files, eolmode=None)
511 except Exception, inst:
511 except Exception, inst:
512 self.ui.note(str(inst) + '\n')
512 self.ui.note(str(inst) + '\n')
513 if not self.ui.verbose:
513 if not self.ui.verbose:
514 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
514 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
515 return (False, files, False)
515 return (False, files, False)
516
516
517 return (True, files, fuzz)
517 return (True, files, fuzz)
518
518
519 def apply(self, repo, series, list=False, update_status=True,
519 def apply(self, repo, series, list=False, update_status=True,
520 strict=False, patchdir=None, merge=None, all_files={}):
520 strict=False, patchdir=None, merge=None, all_files={}):
521 wlock = lock = tr = None
521 wlock = lock = tr = None
522 try:
522 try:
523 wlock = repo.wlock()
523 wlock = repo.wlock()
524 lock = repo.lock()
524 lock = repo.lock()
525 tr = repo.transaction()
525 tr = repo.transaction()
526 try:
526 try:
527 ret = self._apply(repo, series, list, update_status,
527 ret = self._apply(repo, series, list, update_status,
528 strict, patchdir, merge, all_files=all_files)
528 strict, patchdir, merge, all_files=all_files)
529 tr.close()
529 tr.close()
530 self.save_dirty()
530 self.save_dirty()
531 return ret
531 return ret
532 except:
532 except:
533 try:
533 try:
534 tr.abort()
534 tr.abort()
535 finally:
535 finally:
536 repo.invalidate()
536 repo.invalidate()
537 repo.dirstate.invalidate()
537 repo.dirstate.invalidate()
538 raise
538 raise
539 finally:
539 finally:
540 del tr
540 del tr
541 release(lock, wlock)
541 release(lock, wlock)
542 self.removeundo(repo)
542 self.removeundo(repo)
543
543
544 def _apply(self, repo, series, list=False, update_status=True,
544 def _apply(self, repo, series, list=False, update_status=True,
545 strict=False, patchdir=None, merge=None, all_files={}):
545 strict=False, patchdir=None, merge=None, all_files={}):
546 '''returns (error, hash)
546 '''returns (error, hash)
547 error = 1 for unable to read, 2 for patch failed, 3 for patch fuzz'''
547 error = 1 for unable to read, 2 for patch failed, 3 for patch fuzz'''
548 # TODO unify with commands.py
548 # TODO unify with commands.py
549 if not patchdir:
549 if not patchdir:
550 patchdir = self.path
550 patchdir = self.path
551 err = 0
551 err = 0
552 n = None
552 n = None
553 for patchname in series:
553 for patchname in series:
554 pushable, reason = self.pushable(patchname)
554 pushable, reason = self.pushable(patchname)
555 if not pushable:
555 if not pushable:
556 self.explain_pushable(patchname, all_patches=True)
556 self.explain_pushable(patchname, all_patches=True)
557 continue
557 continue
558 self.ui.status(_("applying %s\n") % patchname)
558 self.ui.status(_("applying %s\n") % patchname)
559 pf = os.path.join(patchdir, patchname)
559 pf = os.path.join(patchdir, patchname)
560
560
561 try:
561 try:
562 ph = patchheader(self.join(patchname))
562 ph = patchheader(self.join(patchname))
563 except:
563 except:
564 self.ui.warn(_("unable to read %s\n") % patchname)
564 self.ui.warn(_("unable to read %s\n") % patchname)
565 err = 1
565 err = 1
566 break
566 break
567
567
568 message = ph.message
568 message = ph.message
569 if not message:
569 if not message:
570 message = _("imported patch %s\n") % patchname
570 message = _("imported patch %s\n") % patchname
571 else:
571 else:
572 if list:
572 if list:
573 message.append(_("\nimported patch %s") % patchname)
573 message.append(_("\nimported patch %s") % patchname)
574 message = '\n'.join(message)
574 message = '\n'.join(message)
575
575
576 if ph.haspatch:
576 if ph.haspatch:
577 (patcherr, files, fuzz) = self.patch(repo, pf)
577 (patcherr, files, fuzz) = self.patch(repo, pf)
578 all_files.update(files)
578 all_files.update(files)
579 patcherr = not patcherr
579 patcherr = not patcherr
580 else:
580 else:
581 self.ui.warn(_("patch %s is empty\n") % patchname)
581 self.ui.warn(_("patch %s is empty\n") % patchname)
582 patcherr, files, fuzz = 0, [], 0
582 patcherr, files, fuzz = 0, [], 0
583
583
584 if merge and files:
584 if merge and files:
585 # Mark as removed/merged and update dirstate parent info
585 # Mark as removed/merged and update dirstate parent info
586 removed = []
586 removed = []
587 merged = []
587 merged = []
588 for f in files:
588 for f in files:
589 if os.path.exists(repo.wjoin(f)):
589 if os.path.exists(repo.wjoin(f)):
590 merged.append(f)
590 merged.append(f)
591 else:
591 else:
592 removed.append(f)
592 removed.append(f)
593 for f in removed:
593 for f in removed:
594 repo.dirstate.remove(f)
594 repo.dirstate.remove(f)
595 for f in merged:
595 for f in merged:
596 repo.dirstate.merge(f)
596 repo.dirstate.merge(f)
597 p1, p2 = repo.dirstate.parents()
597 p1, p2 = repo.dirstate.parents()
598 repo.dirstate.setparents(p1, merge)
598 repo.dirstate.setparents(p1, merge)
599
599
600 files = patch.updatedir(self.ui, repo, files)
600 files = patch.updatedir(self.ui, repo, files)
601 match = cmdutil.matchfiles(repo, files or [])
601 match = cmdutil.matchfiles(repo, files or [])
602 n = repo.commit(message, ph.user, ph.date, match=match, force=True)
602 n = repo.commit(message, ph.user, ph.date, match=match, force=True)
603
603
604 if n is None:
604 if n is None:
605 raise util.Abort(_("repo commit failed"))
605 raise util.Abort(_("repo commit failed"))
606
606
607 if update_status:
607 if update_status:
608 self.applied.append(statusentry(hex(n), patchname))
608 self.applied.append(statusentry(hex(n), patchname))
609
609
610 if patcherr:
610 if patcherr:
611 self.ui.warn(_("patch failed, rejects left in working dir\n"))
611 self.ui.warn(_("patch failed, rejects left in working dir\n"))
612 err = 2
612 err = 2
613 break
613 break
614
614
615 if fuzz and strict:
615 if fuzz and strict:
616 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
616 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
617 err = 3
617 err = 3
618 break
618 break
619 return (err, n)
619 return (err, n)
620
620
621 def _cleanup(self, patches, numrevs, keep=False):
621 def _cleanup(self, patches, numrevs, keep=False):
622 if not keep:
622 if not keep:
623 r = self.qrepo()
623 r = self.qrepo()
624 if r:
624 if r:
625 r.remove(patches, True)
625 r.remove(patches, True)
626 else:
626 else:
627 for p in patches:
627 for p in patches:
628 os.unlink(self.join(p))
628 os.unlink(self.join(p))
629
629
630 if numrevs:
630 if numrevs:
631 del self.applied[:numrevs]
631 del self.applied[:numrevs]
632 self.applied_dirty = 1
632 self.applied_dirty = 1
633
633
634 for i in sorted([self.find_series(p) for p in patches], reverse=True):
634 for i in sorted([self.find_series(p) for p in patches], reverse=True):
635 del self.full_series[i]
635 del self.full_series[i]
636 self.parse_series()
636 self.parse_series()
637 self.series_dirty = 1
637 self.series_dirty = 1
638
638
639 def _revpatches(self, repo, revs):
639 def _revpatches(self, repo, revs):
640 firstrev = repo[self.applied[0].rev].rev()
640 firstrev = repo[self.applied[0].rev].rev()
641 patches = []
641 patches = []
642 for i, rev in enumerate(revs):
642 for i, rev in enumerate(revs):
643
643
644 if rev < firstrev:
644 if rev < firstrev:
645 raise util.Abort(_('revision %d is not managed') % rev)
645 raise util.Abort(_('revision %d is not managed') % rev)
646
646
647 ctx = repo[rev]
647 ctx = repo[rev]
648 base = bin(self.applied[i].rev)
648 base = bin(self.applied[i].rev)
649 if ctx.node() != base:
649 if ctx.node() != base:
650 msg = _('cannot delete revision %d above applied patches')
650 msg = _('cannot delete revision %d above applied patches')
651 raise util.Abort(msg % rev)
651 raise util.Abort(msg % rev)
652
652
653 patch = self.applied[i].name
653 patch = self.applied[i].name
654 for fmt in ('[mq]: %s', 'imported patch %s'):
654 for fmt in ('[mq]: %s', 'imported patch %s'):
655 if ctx.description() == fmt % patch:
655 if ctx.description() == fmt % patch:
656 msg = _('patch %s finalized without changeset message\n')
656 msg = _('patch %s finalized without changeset message\n')
657 repo.ui.status(msg % patch)
657 repo.ui.status(msg % patch)
658 break
658 break
659
659
660 patches.append(patch)
660 patches.append(patch)
661 return patches
661 return patches
662
662
663 def finish(self, repo, revs):
663 def finish(self, repo, revs):
664 patches = self._revpatches(repo, sorted(revs))
664 patches = self._revpatches(repo, sorted(revs))
665 self._cleanup(patches, len(patches))
665 self._cleanup(patches, len(patches))
666
666
667 def delete(self, repo, patches, opts):
667 def delete(self, repo, patches, opts):
668 if not patches and not opts.get('rev'):
668 if not patches and not opts.get('rev'):
669 raise util.Abort(_('qdelete requires at least one revision or '
669 raise util.Abort(_('qdelete requires at least one revision or '
670 'patch name'))
670 'patch name'))
671
671
672 realpatches = []
672 realpatches = []
673 for patch in patches:
673 for patch in patches:
674 patch = self.lookup(patch, strict=True)
674 patch = self.lookup(patch, strict=True)
675 info = self.isapplied(patch)
675 info = self.isapplied(patch)
676 if info:
676 if info:
677 raise util.Abort(_("cannot delete applied patch %s") % patch)
677 raise util.Abort(_("cannot delete applied patch %s") % patch)
678 if patch not in self.series:
678 if patch not in self.series:
679 raise util.Abort(_("patch %s not in series file") % patch)
679 raise util.Abort(_("patch %s not in series file") % patch)
680 realpatches.append(patch)
680 realpatches.append(patch)
681
681
682 numrevs = 0
682 numrevs = 0
683 if opts.get('rev'):
683 if opts.get('rev'):
684 if not self.applied:
684 if not self.applied:
685 raise util.Abort(_('no patches applied'))
685 raise util.Abort(_('no patches applied'))
686 revs = cmdutil.revrange(repo, opts['rev'])
686 revs = cmdutil.revrange(repo, opts['rev'])
687 if len(revs) > 1 and revs[0] > revs[1]:
687 if len(revs) > 1 and revs[0] > revs[1]:
688 revs.reverse()
688 revs.reverse()
689 revpatches = self._revpatches(repo, revs)
689 revpatches = self._revpatches(repo, revs)
690 realpatches += revpatches
690 realpatches += revpatches
691 numrevs = len(revpatches)
691 numrevs = len(revpatches)
692
692
693 self._cleanup(realpatches, numrevs, opts.get('keep'))
693 self._cleanup(realpatches, numrevs, opts.get('keep'))
694
694
695 def check_toppatch(self, repo):
695 def check_toppatch(self, repo):
696 if len(self.applied) > 0:
696 if len(self.applied) > 0:
697 top = bin(self.applied[-1].rev)
697 top = bin(self.applied[-1].rev)
698 pp = repo.dirstate.parents()
698 pp = repo.dirstate.parents()
699 if top not in pp:
699 if top not in pp:
700 raise util.Abort(_("working directory revision is not qtip"))
700 raise util.Abort(_("working directory revision is not qtip"))
701 return top
701 return top
702 return None
702 return None
703 def check_localchanges(self, repo, force=False, refresh=True):
703 def check_localchanges(self, repo, force=False, refresh=True):
704 m, a, r, d = repo.status()[:4]
704 m, a, r, d = repo.status()[:4]
705 if m or a or r or d:
705 if m or a or r or d:
706 if not force:
706 if not force:
707 if refresh:
707 if refresh:
708 raise util.Abort(_("local changes found, refresh first"))
708 raise util.Abort(_("local changes found, refresh first"))
709 else:
709 else:
710 raise util.Abort(_("local changes found"))
710 raise util.Abort(_("local changes found"))
711 return m, a, r, d
711 return m, a, r, d
712
712
713 _reserved = ('series', 'status', 'guards')
713 _reserved = ('series', 'status', 'guards')
714 def check_reserved_name(self, name):
714 def check_reserved_name(self, name):
715 if (name in self._reserved or name.startswith('.hg')
715 if (name in self._reserved or name.startswith('.hg')
716 or name.startswith('.mq')):
716 or name.startswith('.mq')):
717 raise util.Abort(_('"%s" cannot be used as the name of a patch')
717 raise util.Abort(_('"%s" cannot be used as the name of a patch')
718 % name)
718 % name)
719
719
720 def new(self, repo, patchfn, *pats, **opts):
720 def new(self, repo, patchfn, *pats, **opts):
721 """options:
721 """options:
722 msg: a string or a no-argument function returning a string
722 msg: a string or a no-argument function returning a string
723 """
723 """
724 msg = opts.get('msg')
724 msg = opts.get('msg')
725 force = opts.get('force')
725 force = opts.get('force')
726 user = opts.get('user')
726 user = opts.get('user')
727 date = opts.get('date')
727 date = opts.get('date')
728 if date:
728 if date:
729 date = util.parsedate(date)
729 date = util.parsedate(date)
730 self.check_reserved_name(patchfn)
730 self.check_reserved_name(patchfn)
731 if os.path.exists(self.join(patchfn)):
731 if os.path.exists(self.join(patchfn)):
732 raise util.Abort(_('patch "%s" already exists') % patchfn)
732 raise util.Abort(_('patch "%s" already exists') % patchfn)
733 if opts.get('include') or opts.get('exclude') or pats:
733 if opts.get('include') or opts.get('exclude') or pats:
734 match = cmdutil.match(repo, pats, opts)
734 match = cmdutil.match(repo, pats, opts)
735 # detect missing files in pats
735 # detect missing files in pats
736 def badfn(f, msg):
736 def badfn(f, msg):
737 raise util.Abort('%s: %s' % (f, msg))
737 raise util.Abort('%s: %s' % (f, msg))
738 match.bad = badfn
738 match.bad = badfn
739 m, a, r, d = repo.status(match=match)[:4]
739 m, a, r, d = repo.status(match=match)[:4]
740 else:
740 else:
741 m, a, r, d = self.check_localchanges(repo, force)
741 m, a, r, d = self.check_localchanges(repo, force)
742 match = cmdutil.matchfiles(repo, m + a + r)
742 match = cmdutil.matchfiles(repo, m + a + r)
743 commitfiles = m + a + r
743 commitfiles = m + a + r
744 self.check_toppatch(repo)
744 self.check_toppatch(repo)
745 insert = self.full_series_end()
745 insert = self.full_series_end()
746 wlock = repo.wlock()
746 wlock = repo.wlock()
747 try:
747 try:
748 # if patch file write fails, abort early
748 # if patch file write fails, abort early
749 p = self.opener(patchfn, "w")
749 p = self.opener(patchfn, "w")
750 try:
750 try:
751 if date:
751 if date:
752 p.write("# HG changeset patch\n")
752 p.write("# HG changeset patch\n")
753 if user:
753 if user:
754 p.write("# User " + user + "\n")
754 p.write("# User " + user + "\n")
755 p.write("# Date %d %d\n\n" % date)
755 p.write("# Date %d %d\n\n" % date)
756 elif user:
756 elif user:
757 p.write("From: " + user + "\n\n")
757 p.write("From: " + user + "\n\n")
758
758
759 if hasattr(msg, '__call__'):
759 if hasattr(msg, '__call__'):
760 msg = msg()
760 msg = msg()
761 commitmsg = msg and msg or ("[mq]: %s" % patchfn)
761 commitmsg = msg and msg or ("[mq]: %s" % patchfn)
762 n = repo.commit(commitmsg, user, date, match=match, force=True)
762 n = repo.commit(commitmsg, user, date, match=match, force=True)
763 if n is None:
763 if n is None:
764 raise util.Abort(_("repo commit failed"))
764 raise util.Abort(_("repo commit failed"))
765 try:
765 try:
766 self.full_series[insert:insert] = [patchfn]
766 self.full_series[insert:insert] = [patchfn]
767 self.applied.append(statusentry(hex(n), patchfn))
767 self.applied.append(statusentry(hex(n), patchfn))
768 self.parse_series()
768 self.parse_series()
769 self.series_dirty = 1
769 self.series_dirty = 1
770 self.applied_dirty = 1
770 self.applied_dirty = 1
771 if msg:
771 if msg:
772 msg = msg + "\n\n"
772 msg = msg + "\n\n"
773 p.write(msg)
773 p.write(msg)
774 if commitfiles:
774 if commitfiles:
775 diffopts = self.diffopts()
775 diffopts = self.diffopts()
776 if opts.get('git'): diffopts.git = True
776 if opts.get('git'): diffopts.git = True
777 parent = self.qparents(repo, n)
777 parent = self.qparents(repo, n)
778 chunks = patch.diff(repo, node1=parent, node2=n,
778 chunks = patch.diff(repo, node1=parent, node2=n,
779 match=match, opts=diffopts)
779 match=match, opts=diffopts)
780 for chunk in chunks:
780 for chunk in chunks:
781 p.write(chunk)
781 p.write(chunk)
782 p.close()
782 p.close()
783 wlock.release()
783 wlock.release()
784 wlock = None
784 wlock = None
785 r = self.qrepo()
785 r = self.qrepo()
786 if r: r.add([patchfn])
786 if r: r.add([patchfn])
787 except:
787 except:
788 repo.rollback()
788 repo.rollback()
789 raise
789 raise
790 except Exception:
790 except Exception:
791 patchpath = self.join(patchfn)
791 patchpath = self.join(patchfn)
792 try:
792 try:
793 os.unlink(patchpath)
793 os.unlink(patchpath)
794 except:
794 except:
795 self.ui.warn(_('error unlinking %s\n') % patchpath)
795 self.ui.warn(_('error unlinking %s\n') % patchpath)
796 raise
796 raise
797 self.removeundo(repo)
797 self.removeundo(repo)
798 finally:
798 finally:
799 release(wlock)
799 release(wlock)
800
800
801 def strip(self, repo, rev, update=True, backup="all", force=None):
801 def strip(self, repo, rev, update=True, backup="all", force=None):
802 wlock = lock = None
802 wlock = lock = None
803 try:
803 try:
804 wlock = repo.wlock()
804 wlock = repo.wlock()
805 lock = repo.lock()
805 lock = repo.lock()
806
806
807 if update:
807 if update:
808 self.check_localchanges(repo, force=force, refresh=False)
808 self.check_localchanges(repo, force=force, refresh=False)
809 urev = self.qparents(repo, rev)
809 urev = self.qparents(repo, rev)
810 hg.clean(repo, urev)
810 hg.clean(repo, urev)
811 repo.dirstate.write()
811 repo.dirstate.write()
812
812
813 self.removeundo(repo)
813 self.removeundo(repo)
814 repair.strip(self.ui, repo, rev, backup)
814 repair.strip(self.ui, repo, rev, backup)
815 # strip may have unbundled a set of backed up revisions after
815 # strip may have unbundled a set of backed up revisions after
816 # the actual strip
816 # the actual strip
817 self.removeundo(repo)
817 self.removeundo(repo)
818 finally:
818 finally:
819 release(lock, wlock)
819 release(lock, wlock)
820
820
821 def isapplied(self, patch):
821 def isapplied(self, patch):
822 """returns (index, rev, patch)"""
822 """returns (index, rev, patch)"""
823 for i, a in enumerate(self.applied):
823 for i, a in enumerate(self.applied):
824 if a.name == patch:
824 if a.name == patch:
825 return (i, a.rev, a.name)
825 return (i, a.rev, a.name)
826 return None
826 return None
827
827
828 # if the exact patch name does not exist, we try a few
828 # if the exact patch name does not exist, we try a few
829 # variations. If strict is passed, we try only #1
829 # variations. If strict is passed, we try only #1
830 #
830 #
831 # 1) a number to indicate an offset in the series file
831 # 1) a number to indicate an offset in the series file
832 # 2) a unique substring of the patch name was given
832 # 2) a unique substring of the patch name was given
833 # 3) patchname[-+]num to indicate an offset in the series file
833 # 3) patchname[-+]num to indicate an offset in the series file
834 def lookup(self, patch, strict=False):
834 def lookup(self, patch, strict=False):
835 patch = patch and str(patch)
835 patch = patch and str(patch)
836
836
837 def partial_name(s):
837 def partial_name(s):
838 if s in self.series:
838 if s in self.series:
839 return s
839 return s
840 matches = [x for x in self.series if s in x]
840 matches = [x for x in self.series if s in x]
841 if len(matches) > 1:
841 if len(matches) > 1:
842 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
842 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
843 for m in matches:
843 for m in matches:
844 self.ui.warn(' %s\n' % m)
844 self.ui.warn(' %s\n' % m)
845 return None
845 return None
846 if matches:
846 if matches:
847 return matches[0]
847 return matches[0]
848 if len(self.series) > 0 and len(self.applied) > 0:
848 if len(self.series) > 0 and len(self.applied) > 0:
849 if s == 'qtip':
849 if s == 'qtip':
850 return self.series[self.series_end(True)-1]
850 return self.series[self.series_end(True)-1]
851 if s == 'qbase':
851 if s == 'qbase':
852 return self.series[0]
852 return self.series[0]
853 return None
853 return None
854
854
855 if patch is None:
855 if patch is None:
856 return None
856 return None
857 if patch in self.series:
857 if patch in self.series:
858 return patch
858 return patch
859
859
860 if not os.path.isfile(self.join(patch)):
860 if not os.path.isfile(self.join(patch)):
861 try:
861 try:
862 sno = int(patch)
862 sno = int(patch)
863 except(ValueError, OverflowError):
863 except(ValueError, OverflowError):
864 pass
864 pass
865 else:
865 else:
866 if -len(self.series) <= sno < len(self.series):
866 if -len(self.series) <= sno < len(self.series):
867 return self.series[sno]
867 return self.series[sno]
868
868
869 if not strict:
869 if not strict:
870 res = partial_name(patch)
870 res = partial_name(patch)
871 if res:
871 if res:
872 return res
872 return res
873 minus = patch.rfind('-')
873 minus = patch.rfind('-')
874 if minus >= 0:
874 if minus >= 0:
875 res = partial_name(patch[:minus])
875 res = partial_name(patch[:minus])
876 if res:
876 if res:
877 i = self.series.index(res)
877 i = self.series.index(res)
878 try:
878 try:
879 off = int(patch[minus+1:] or 1)
879 off = int(patch[minus+1:] or 1)
880 except(ValueError, OverflowError):
880 except(ValueError, OverflowError):
881 pass
881 pass
882 else:
882 else:
883 if i - off >= 0:
883 if i - off >= 0:
884 return self.series[i - off]
884 return self.series[i - off]
885 plus = patch.rfind('+')
885 plus = patch.rfind('+')
886 if plus >= 0:
886 if plus >= 0:
887 res = partial_name(patch[:plus])
887 res = partial_name(patch[:plus])
888 if res:
888 if res:
889 i = self.series.index(res)
889 i = self.series.index(res)
890 try:
890 try:
891 off = int(patch[plus+1:] or 1)
891 off = int(patch[plus+1:] or 1)
892 except(ValueError, OverflowError):
892 except(ValueError, OverflowError):
893 pass
893 pass
894 else:
894 else:
895 if i + off < len(self.series):
895 if i + off < len(self.series):
896 return self.series[i + off]
896 return self.series[i + off]
897 raise util.Abort(_("patch %s not in series") % patch)
897 raise util.Abort(_("patch %s not in series") % patch)
898
898
899 def push(self, repo, patch=None, force=False, list=False,
899 def push(self, repo, patch=None, force=False, list=False,
900 mergeq=None, all=False):
900 mergeq=None, all=False):
901 wlock = repo.wlock()
901 wlock = repo.wlock()
902 try:
902 try:
903 if repo.dirstate.parents()[0] not in repo.heads():
903 if repo.dirstate.parents()[0] not in repo.heads():
904 self.ui.status(_("(working directory not at a head)\n"))
904 self.ui.status(_("(working directory not at a head)\n"))
905
905
906 if not self.series:
906 if not self.series:
907 self.ui.warn(_('no patches in series\n'))
907 self.ui.warn(_('no patches in series\n'))
908 return 0
908 return 0
909
909
910 patch = self.lookup(patch)
910 patch = self.lookup(patch)
911 # Suppose our series file is: A B C and the current 'top'
911 # Suppose our series file is: A B C and the current 'top'
912 # patch is B. qpush C should be performed (moving forward)
912 # patch is B. qpush C should be performed (moving forward)
913 # qpush B is a NOP (no change) qpush A is an error (can't
913 # qpush B is a NOP (no change) qpush A is an error (can't
914 # go backwards with qpush)
914 # go backwards with qpush)
915 if patch:
915 if patch:
916 info = self.isapplied(patch)
916 info = self.isapplied(patch)
917 if info:
917 if info:
918 if info[0] < len(self.applied) - 1:
918 if info[0] < len(self.applied) - 1:
919 raise util.Abort(
919 raise util.Abort(
920 _("cannot push to a previous patch: %s") % patch)
920 _("cannot push to a previous patch: %s") % patch)
921 self.ui.warn(
921 self.ui.warn(
922 _('qpush: %s is already at the top\n') % patch)
922 _('qpush: %s is already at the top\n') % patch)
923 return
923 return
924 pushable, reason = self.pushable(patch)
924 pushable, reason = self.pushable(patch)
925 if not pushable:
925 if not pushable:
926 if reason:
926 if reason:
927 reason = _('guarded by %r') % reason
927 reason = _('guarded by %r') % reason
928 else:
928 else:
929 reason = _('no matching guards')
929 reason = _('no matching guards')
930 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
930 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
931 return 1
931 return 1
932 elif all:
932 elif all:
933 patch = self.series[-1]
933 patch = self.series[-1]
934 if self.isapplied(patch):
934 if self.isapplied(patch):
935 self.ui.warn(_('all patches are currently applied\n'))
935 self.ui.warn(_('all patches are currently applied\n'))
936 return 0
936 return 0
937
937
938 # Following the above example, starting at 'top' of B:
938 # Following the above example, starting at 'top' of B:
939 # qpush should be performed (pushes C), but a subsequent
939 # qpush should be performed (pushes C), but a subsequent
940 # qpush without an argument is an error (nothing to
940 # qpush without an argument is an error (nothing to
941 # apply). This allows a loop of "...while hg qpush..." to
941 # apply). This allows a loop of "...while hg qpush..." to
942 # work as it detects an error when done
942 # work as it detects an error when done
943 start = self.series_end()
943 start = self.series_end()
944 if start == len(self.series):
944 if start == len(self.series):
945 self.ui.warn(_('patch series already fully applied\n'))
945 self.ui.warn(_('patch series already fully applied\n'))
946 return 1
946 return 1
947 if not force:
947 if not force:
948 self.check_localchanges(repo)
948 self.check_localchanges(repo)
949
949
950 self.applied_dirty = 1
950 self.applied_dirty = 1
951 if start > 0:
951 if start > 0:
952 self.check_toppatch(repo)
952 self.check_toppatch(repo)
953 if not patch:
953 if not patch:
954 patch = self.series[start]
954 patch = self.series[start]
955 end = start + 1
955 end = start + 1
956 else:
956 else:
957 end = self.series.index(patch, start) + 1
957 end = self.series.index(patch, start) + 1
958
958
959 s = self.series[start:end]
959 s = self.series[start:end]
960 all_files = {}
960 all_files = {}
961 try:
961 try:
962 if mergeq:
962 if mergeq:
963 ret = self.mergepatch(repo, mergeq, s)
963 ret = self.mergepatch(repo, mergeq, s)
964 else:
964 else:
965 ret = self.apply(repo, s, list, all_files=all_files)
965 ret = self.apply(repo, s, list, all_files=all_files)
966 except:
966 except:
967 self.ui.warn(_('cleaning up working directory...'))
967 self.ui.warn(_('cleaning up working directory...'))
968 node = repo.dirstate.parents()[0]
968 node = repo.dirstate.parents()[0]
969 hg.revert(repo, node, None)
969 hg.revert(repo, node, None)
970 unknown = repo.status(unknown=True)[4]
970 unknown = repo.status(unknown=True)[4]
971 # only remove unknown files that we know we touched or
971 # only remove unknown files that we know we touched or
972 # created while patching
972 # created while patching
973 for f in unknown:
973 for f in unknown:
974 if f in all_files:
974 if f in all_files:
975 util.unlink(repo.wjoin(f))
975 util.unlink(repo.wjoin(f))
976 self.ui.warn(_('done\n'))
976 self.ui.warn(_('done\n'))
977 raise
977 raise
978
978
979 top = self.applied[-1].name
979 top = self.applied[-1].name
980 if ret[0] and ret[0] > 1:
980 if ret[0] and ret[0] > 1:
981 msg = _("errors during apply, please fix and refresh %s\n")
981 msg = _("errors during apply, please fix and refresh %s\n")
982 self.ui.write(msg % top)
982 self.ui.write(msg % top)
983 else:
983 else:
984 self.ui.write(_("now at: %s\n") % top)
984 self.ui.write(_("now at: %s\n") % top)
985 return ret[0]
985 return ret[0]
986
986
987 finally:
987 finally:
988 wlock.release()
988 wlock.release()
989
989
990 def pop(self, repo, patch=None, force=False, update=True, all=False):
990 def pop(self, repo, patch=None, force=False, update=True, all=False):
991 def getfile(f, rev, flags):
991 def getfile(f, rev, flags):
992 t = repo.file(f).read(rev)
992 t = repo.file(f).read(rev)
993 repo.wwrite(f, t, flags)
993 repo.wwrite(f, t, flags)
994
994
995 wlock = repo.wlock()
995 wlock = repo.wlock()
996 try:
996 try:
997 if patch:
997 if patch:
998 # index, rev, patch
998 # index, rev, patch
999 info = self.isapplied(patch)
999 info = self.isapplied(patch)
1000 if not info:
1000 if not info:
1001 patch = self.lookup(patch)
1001 patch = self.lookup(patch)
1002 info = self.isapplied(patch)
1002 info = self.isapplied(patch)
1003 if not info:
1003 if not info:
1004 raise util.Abort(_("patch %s is not applied") % patch)
1004 raise util.Abort(_("patch %s is not applied") % patch)
1005
1005
1006 if len(self.applied) == 0:
1006 if len(self.applied) == 0:
1007 # Allow qpop -a to work repeatedly,
1007 # Allow qpop -a to work repeatedly,
1008 # but not qpop without an argument
1008 # but not qpop without an argument
1009 self.ui.warn(_("no patches applied\n"))
1009 self.ui.warn(_("no patches applied\n"))
1010 return not all
1010 return not all
1011
1011
1012 if all:
1012 if all:
1013 start = 0
1013 start = 0
1014 elif patch:
1014 elif patch:
1015 start = info[0] + 1
1015 start = info[0] + 1
1016 else:
1016 else:
1017 start = len(self.applied) - 1
1017 start = len(self.applied) - 1
1018
1018
1019 if start >= len(self.applied):
1019 if start >= len(self.applied):
1020 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1020 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1021 return
1021 return
1022
1022
1023 if not update:
1023 if not update:
1024 parents = repo.dirstate.parents()
1024 parents = repo.dirstate.parents()
1025 rr = [ bin(x.rev) for x in self.applied ]
1025 rr = [ bin(x.rev) for x in self.applied ]
1026 for p in parents:
1026 for p in parents:
1027 if p in rr:
1027 if p in rr:
1028 self.ui.warn(_("qpop: forcing dirstate update\n"))
1028 self.ui.warn(_("qpop: forcing dirstate update\n"))
1029 update = True
1029 update = True
1030 else:
1030 else:
1031 parents = [p.hex() for p in repo[None].parents()]
1031 parents = [p.hex() for p in repo[None].parents()]
1032 needupdate = False
1032 needupdate = False
1033 for entry in self.applied[start:]:
1033 for entry in self.applied[start:]:
1034 if entry.rev in parents:
1034 if entry.rev in parents:
1035 needupdate = True
1035 needupdate = True
1036 break
1036 break
1037 update = needupdate
1037 update = needupdate
1038
1038
1039 if not force and update:
1039 if not force and update:
1040 self.check_localchanges(repo)
1040 self.check_localchanges(repo)
1041
1041
1042 self.applied_dirty = 1
1042 self.applied_dirty = 1
1043 end = len(self.applied)
1043 end = len(self.applied)
1044 rev = bin(self.applied[start].rev)
1044 rev = bin(self.applied[start].rev)
1045 if update:
1045 if update:
1046 top = self.check_toppatch(repo)
1046 top = self.check_toppatch(repo)
1047
1047
1048 try:
1048 try:
1049 heads = repo.changelog.heads(rev)
1049 heads = repo.changelog.heads(rev)
1050 except error.LookupError:
1050 except error.LookupError:
1051 node = short(rev)
1051 node = short(rev)
1052 raise util.Abort(_('trying to pop unknown node %s') % node)
1052 raise util.Abort(_('trying to pop unknown node %s') % node)
1053
1053
1054 if heads != [bin(self.applied[-1].rev)]:
1054 if heads != [bin(self.applied[-1].rev)]:
1055 raise util.Abort(_("popping would remove a revision not "
1055 raise util.Abort(_("popping would remove a revision not "
1056 "managed by this patch queue"))
1056 "managed by this patch queue"))
1057
1057
1058 # we know there are no local changes, so we can make a simplified
1058 # we know there are no local changes, so we can make a simplified
1059 # form of hg.update.
1059 # form of hg.update.
1060 if update:
1060 if update:
1061 qp = self.qparents(repo, rev)
1061 qp = self.qparents(repo, rev)
1062 changes = repo.changelog.read(qp)
1062 changes = repo.changelog.read(qp)
1063 mmap = repo.manifest.read(changes[0])
1063 mmap = repo.manifest.read(changes[0])
1064 m, a, r, d = repo.status(qp, top)[:4]
1064 m, a, r, d = repo.status(qp, top)[:4]
1065 if d:
1065 if d:
1066 raise util.Abort(_("deletions found between repo revs"))
1066 raise util.Abort(_("deletions found between repo revs"))
1067 for f in m:
1067 for f in m:
1068 getfile(f, mmap[f], mmap.flags(f))
1068 getfile(f, mmap[f], mmap.flags(f))
1069 for f in r:
1069 for f in r:
1070 getfile(f, mmap[f], mmap.flags(f))
1070 getfile(f, mmap[f], mmap.flags(f))
1071 for f in m + r:
1071 for f in m + r:
1072 repo.dirstate.normal(f)
1072 repo.dirstate.normal(f)
1073 for f in a:
1073 for f in a:
1074 try:
1074 try:
1075 os.unlink(repo.wjoin(f))
1075 os.unlink(repo.wjoin(f))
1076 except OSError, e:
1076 except OSError, e:
1077 if e.errno != errno.ENOENT:
1077 if e.errno != errno.ENOENT:
1078 raise
1078 raise
1079 try: os.removedirs(os.path.dirname(repo.wjoin(f)))
1079 try: os.removedirs(os.path.dirname(repo.wjoin(f)))
1080 except: pass
1080 except: pass
1081 repo.dirstate.forget(f)
1081 repo.dirstate.forget(f)
1082 repo.dirstate.setparents(qp, nullid)
1082 repo.dirstate.setparents(qp, nullid)
1083 for patch in reversed(self.applied[start:end]):
1083 for patch in reversed(self.applied[start:end]):
1084 self.ui.status(_("popping %s\n") % patch.name)
1084 self.ui.status(_("popping %s\n") % patch.name)
1085 del self.applied[start:end]
1085 del self.applied[start:end]
1086 self.strip(repo, rev, update=False, backup='strip')
1086 self.strip(repo, rev, update=False, backup='strip')
1087 if len(self.applied):
1087 if len(self.applied):
1088 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1088 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1089 else:
1089 else:
1090 self.ui.write(_("patch queue now empty\n"))
1090 self.ui.write(_("patch queue now empty\n"))
1091 finally:
1091 finally:
1092 wlock.release()
1092 wlock.release()
1093
1093
1094 def diff(self, repo, pats, opts):
1094 def diff(self, repo, pats, opts):
1095 top = self.check_toppatch(repo)
1095 top = self.check_toppatch(repo)
1096 if not top:
1096 if not top:
1097 self.ui.write(_("no patches applied\n"))
1097 self.ui.write(_("no patches applied\n"))
1098 return
1098 return
1099 qp = self.qparents(repo, top)
1099 qp = self.qparents(repo, top)
1100 self._diffopts = patch.diffopts(self.ui, opts)
1100 self._diffopts = patch.diffopts(self.ui, opts)
1101 self.printdiff(repo, qp, files=pats, opts=opts)
1101 self.printdiff(repo, qp, files=pats, opts=opts)
1102
1102
1103 def refresh(self, repo, pats=None, **opts):
1103 def refresh(self, repo, pats=None, **opts):
1104 if len(self.applied) == 0:
1104 if len(self.applied) == 0:
1105 self.ui.write(_("no patches applied\n"))
1105 self.ui.write(_("no patches applied\n"))
1106 return 1
1106 return 1
1107 msg = opts.get('msg', '').rstrip()
1107 msg = opts.get('msg', '').rstrip()
1108 newuser = opts.get('user')
1108 newuser = opts.get('user')
1109 newdate = opts.get('date')
1109 newdate = opts.get('date')
1110 if newdate:
1110 if newdate:
1111 newdate = '%d %d' % util.parsedate(newdate)
1111 newdate = '%d %d' % util.parsedate(newdate)
1112 wlock = repo.wlock()
1112 wlock = repo.wlock()
1113 try:
1113 try:
1114 self.check_toppatch(repo)
1114 self.check_toppatch(repo)
1115 (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
1115 (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
1116 top = bin(top)
1116 top = bin(top)
1117 if repo.changelog.heads(top) != [top]:
1117 if repo.changelog.heads(top) != [top]:
1118 raise util.Abort(_("cannot refresh a revision with children"))
1118 raise util.Abort(_("cannot refresh a revision with children"))
1119 cparents = repo.changelog.parents(top)
1119 cparents = repo.changelog.parents(top)
1120 patchparent = self.qparents(repo, top)
1120 patchparent = self.qparents(repo, top)
1121 ph = patchheader(self.join(patchfn))
1121 ph = patchheader(self.join(patchfn))
1122
1122
1123 patchf = self.opener(patchfn, 'r')
1123 patchf = self.opener(patchfn, 'r')
1124
1124
1125 # if the patch was a git patch, refresh it as a git patch
1125 # if the patch was a git patch, refresh it as a git patch
1126 for line in patchf:
1126 for line in patchf:
1127 if line.startswith('diff --git'):
1127 if line.startswith('diff --git'):
1128 self.diffopts().git = True
1128 self.diffopts().git = True
1129 break
1129 break
1130
1130
1131 if msg:
1131 if msg:
1132 ph.setmessage(msg)
1132 ph.setmessage(msg)
1133 if newuser:
1133 if newuser:
1134 ph.setuser(newuser)
1134 ph.setuser(newuser)
1135 if newdate:
1135 if newdate:
1136 ph.setdate(newdate)
1136 ph.setdate(newdate)
1137
1137
1138 # only commit new patch when write is complete
1138 # only commit new patch when write is complete
1139 patchf = self.opener(patchfn, 'w', atomictemp=True)
1139 patchf = self.opener(patchfn, 'w', atomictemp=True)
1140
1140
1141 patchf.seek(0)
1141 patchf.seek(0)
1142 patchf.truncate()
1142 patchf.truncate()
1143
1143
1144 comments = str(ph)
1144 comments = str(ph)
1145 if comments:
1145 if comments:
1146 patchf.write(comments)
1146 patchf.write(comments)
1147
1147
1148 if opts.get('git'):
1148 if opts.get('git'):
1149 self.diffopts().git = True
1149 self.diffopts().git = True
1150 tip = repo.changelog.tip()
1150 tip = repo.changelog.tip()
1151 if top == tip:
1151 if top == tip:
1152 # if the top of our patch queue is also the tip, there is an
1152 # if the top of our patch queue is also the tip, there is an
1153 # optimization here. We update the dirstate in place and strip
1153 # optimization here. We update the dirstate in place and strip
1154 # off the tip commit. Then just commit the current directory
1154 # off the tip commit. Then just commit the current directory
1155 # tree. We can also send repo.commit the list of files
1155 # tree. We can also send repo.commit the list of files
1156 # changed to speed up the diff
1156 # changed to speed up the diff
1157 #
1157 #
1158 # in short mode, we only diff the files included in the
1158 # in short mode, we only diff the files included in the
1159 # patch already plus specified files
1159 # patch already plus specified files
1160 #
1160 #
1161 # this should really read:
1161 # this should really read:
1162 # mm, dd, aa, aa2 = repo.status(tip, patchparent)[:4]
1162 # mm, dd, aa, aa2 = repo.status(tip, patchparent)[:4]
1163 # but we do it backwards to take advantage of manifest/chlog
1163 # but we do it backwards to take advantage of manifest/chlog
1164 # caching against the next repo.status call
1164 # caching against the next repo.status call
1165 #
1165 #
1166 mm, aa, dd, aa2 = repo.status(patchparent, tip)[:4]
1166 mm, aa, dd, aa2 = repo.status(patchparent, tip)[:4]
1167 changes = repo.changelog.read(tip)
1167 changes = repo.changelog.read(tip)
1168 man = repo.manifest.read(changes[0])
1168 man = repo.manifest.read(changes[0])
1169 aaa = aa[:]
1169 aaa = aa[:]
1170 matchfn = cmdutil.match(repo, pats, opts)
1170 matchfn = cmdutil.match(repo, pats, opts)
1171 if opts.get('short'):
1171 if opts.get('short'):
1172 # if amending a patch, we start with existing
1172 # if amending a patch, we start with existing
1173 # files plus specified files - unfiltered
1173 # files plus specified files - unfiltered
1174 match = cmdutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1174 match = cmdutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1175 # filter with inc/exl options
1175 # filter with inc/exl options
1176 matchfn = cmdutil.match(repo, opts=opts)
1176 matchfn = cmdutil.match(repo, opts=opts)
1177 else:
1177 else:
1178 match = cmdutil.matchall(repo)
1178 match = cmdutil.matchall(repo)
1179 m, a, r, d = repo.status(match=match)[:4]
1179 m, a, r, d = repo.status(match=match)[:4]
1180
1180
1181 # we might end up with files that were added between
1181 # we might end up with files that were added between
1182 # tip and the dirstate parent, but then changed in the
1182 # tip and the dirstate parent, but then changed in the
1183 # local dirstate. in this case, we want them to only
1183 # local dirstate. in this case, we want them to only
1184 # show up in the added section
1184 # show up in the added section
1185 for x in m:
1185 for x in m:
1186 if x not in aa:
1186 if x not in aa:
1187 mm.append(x)
1187 mm.append(x)
1188 # we might end up with files added by the local dirstate that
1188 # we might end up with files added by the local dirstate that
1189 # were deleted by the patch. In this case, they should only
1189 # were deleted by the patch. In this case, they should only
1190 # show up in the changed section.
1190 # show up in the changed section.
1191 for x in a:
1191 for x in a:
1192 if x in dd:
1192 if x in dd:
1193 del dd[dd.index(x)]
1193 del dd[dd.index(x)]
1194 mm.append(x)
1194 mm.append(x)
1195 else:
1195 else:
1196 aa.append(x)
1196 aa.append(x)
1197 # make sure any files deleted in the local dirstate
1197 # make sure any files deleted in the local dirstate
1198 # are not in the add or change column of the patch
1198 # are not in the add or change column of the patch
1199 forget = []
1199 forget = []
1200 for x in d + r:
1200 for x in d + r:
1201 if x in aa:
1201 if x in aa:
1202 del aa[aa.index(x)]
1202 del aa[aa.index(x)]
1203 forget.append(x)
1203 forget.append(x)
1204 continue
1204 continue
1205 elif x in mm:
1205 elif x in mm:
1206 del mm[mm.index(x)]
1206 del mm[mm.index(x)]
1207 dd.append(x)
1207 dd.append(x)
1208
1208
1209 m = list(set(mm))
1209 m = list(set(mm))
1210 r = list(set(dd))
1210 r = list(set(dd))
1211 a = list(set(aa))
1211 a = list(set(aa))
1212 c = [filter(matchfn, l) for l in (m, a, r)]
1212 c = [filter(matchfn, l) for l in (m, a, r)]
1213 match = cmdutil.matchfiles(repo, set(c[0] + c[1] + c[2]))
1213 match = cmdutil.matchfiles(repo, set(c[0] + c[1] + c[2]))
1214 chunks = patch.diff(repo, patchparent, match=match,
1214 chunks = patch.diff(repo, patchparent, match=match,
1215 changes=c, opts=self.diffopts())
1215 changes=c, opts=self.diffopts())
1216 for chunk in chunks:
1216 for chunk in chunks:
1217 patchf.write(chunk)
1217 patchf.write(chunk)
1218
1218
1219 try:
1219 try:
1220 if self.diffopts().git:
1220 if self.diffopts().git:
1221 copies = {}
1221 copies = {}
1222 for dst in a:
1222 for dst in a:
1223 src = repo.dirstate.copied(dst)
1223 src = repo.dirstate.copied(dst)
1224 # during qfold, the source file for copies may
1224 # during qfold, the source file for copies may
1225 # be removed. Treat this as a simple add.
1225 # be removed. Treat this as a simple add.
1226 if src is not None and src in repo.dirstate:
1226 if src is not None and src in repo.dirstate:
1227 copies.setdefault(src, []).append(dst)
1227 copies.setdefault(src, []).append(dst)
1228 repo.dirstate.add(dst)
1228 repo.dirstate.add(dst)
1229 # remember the copies between patchparent and tip
1229 # remember the copies between patchparent and tip
1230 for dst in aaa:
1230 for dst in aaa:
1231 f = repo.file(dst)
1231 f = repo.file(dst)
1232 src = f.renamed(man[dst])
1232 src = f.renamed(man[dst])
1233 if src:
1233 if src:
1234 copies.setdefault(src[0], []).extend(copies.get(dst, []))
1234 copies.setdefault(src[0], []).extend(copies.get(dst, []))
1235 if dst in a:
1235 if dst in a:
1236 copies[src[0]].append(dst)
1236 copies[src[0]].append(dst)
1237 # we can't copy a file created by the patch itself
1237 # we can't copy a file created by the patch itself
1238 if dst in copies:
1238 if dst in copies:
1239 del copies[dst]
1239 del copies[dst]
1240 for src, dsts in copies.iteritems():
1240 for src, dsts in copies.iteritems():
1241 for dst in dsts:
1241 for dst in dsts:
1242 repo.dirstate.copy(src, dst)
1242 repo.dirstate.copy(src, dst)
1243 else:
1243 else:
1244 for dst in a:
1244 for dst in a:
1245 repo.dirstate.add(dst)
1245 repo.dirstate.add(dst)
1246 # Drop useless copy information
1246 # Drop useless copy information
1247 for f in list(repo.dirstate.copies()):
1247 for f in list(repo.dirstate.copies()):
1248 repo.dirstate.copy(None, f)
1248 repo.dirstate.copy(None, f)
1249 for f in r:
1249 for f in r:
1250 repo.dirstate.remove(f)
1250 repo.dirstate.remove(f)
1251 # if the patch excludes a modified file, mark that
1251 # if the patch excludes a modified file, mark that
1252 # file with mtime=0 so status can see it.
1252 # file with mtime=0 so status can see it.
1253 mm = []
1253 mm = []
1254 for i in xrange(len(m)-1, -1, -1):
1254 for i in xrange(len(m)-1, -1, -1):
1255 if not matchfn(m[i]):
1255 if not matchfn(m[i]):
1256 mm.append(m[i])
1256 mm.append(m[i])
1257 del m[i]
1257 del m[i]
1258 for f in m:
1258 for f in m:
1259 repo.dirstate.normal(f)
1259 repo.dirstate.normal(f)
1260 for f in mm:
1260 for f in mm:
1261 repo.dirstate.normallookup(f)
1261 repo.dirstate.normallookup(f)
1262 for f in forget:
1262 for f in forget:
1263 repo.dirstate.forget(f)
1263 repo.dirstate.forget(f)
1264
1264
1265 if not msg:
1265 if not msg:
1266 if not ph.message:
1266 if not ph.message:
1267 message = "[mq]: %s\n" % patchfn
1267 message = "[mq]: %s\n" % patchfn
1268 else:
1268 else:
1269 message = "\n".join(ph.message)
1269 message = "\n".join(ph.message)
1270 else:
1270 else:
1271 message = msg
1271 message = msg
1272
1272
1273 user = ph.user or changes[1]
1273 user = ph.user or changes[1]
1274
1274
1275 # assumes strip can roll itself back if interrupted
1275 # assumes strip can roll itself back if interrupted
1276 repo.dirstate.setparents(*cparents)
1276 repo.dirstate.setparents(*cparents)
1277 self.applied.pop()
1277 self.applied.pop()
1278 self.applied_dirty = 1
1278 self.applied_dirty = 1
1279 self.strip(repo, top, update=False,
1279 self.strip(repo, top, update=False,
1280 backup='strip')
1280 backup='strip')
1281 except:
1281 except:
1282 repo.dirstate.invalidate()
1282 repo.dirstate.invalidate()
1283 raise
1283 raise
1284
1284
1285 try:
1285 try:
1286 # might be nice to attempt to roll back strip after this
1286 # might be nice to attempt to roll back strip after this
1287 patchf.rename()
1287 patchf.rename()
1288 n = repo.commit(message, user, ph.date, match=match,
1288 n = repo.commit(message, user, ph.date, match=match,
1289 force=True)
1289 force=True)
1290 self.applied.append(statusentry(hex(n), patchfn))
1290 self.applied.append(statusentry(hex(n), patchfn))
1291 except:
1291 except:
1292 ctx = repo[cparents[0]]
1292 ctx = repo[cparents[0]]
1293 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1293 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1294 self.save_dirty()
1294 self.save_dirty()
1295 self.ui.warn(_('refresh interrupted while patch was popped! '
1295 self.ui.warn(_('refresh interrupted while patch was popped! '
1296 '(revert --all, qpush to recover)\n'))
1296 '(revert --all, qpush to recover)\n'))
1297 raise
1297 raise
1298 else:
1298 else:
1299 self.printdiff(repo, patchparent, fp=patchf)
1299 self.printdiff(repo, patchparent, fp=patchf)
1300 patchf.rename()
1300 patchf.rename()
1301 added = repo.status()[1]
1301 added = repo.status()[1]
1302 for a in added:
1302 for a in added:
1303 f = repo.wjoin(a)
1303 f = repo.wjoin(a)
1304 try:
1304 try:
1305 os.unlink(f)
1305 os.unlink(f)
1306 except OSError, e:
1306 except OSError, e:
1307 if e.errno != errno.ENOENT:
1307 if e.errno != errno.ENOENT:
1308 raise
1308 raise
1309 try: os.removedirs(os.path.dirname(f))
1309 try: os.removedirs(os.path.dirname(f))
1310 except: pass
1310 except: pass
1311 # forget the file copies in the dirstate
1311 # forget the file copies in the dirstate
1312 # push should readd the files later on
1312 # push should readd the files later on
1313 repo.dirstate.forget(a)
1313 repo.dirstate.forget(a)
1314 self.pop(repo, force=True)
1314 self.pop(repo, force=True)
1315 self.push(repo, force=True)
1315 self.push(repo, force=True)
1316 finally:
1316 finally:
1317 wlock.release()
1317 wlock.release()
1318 self.removeundo(repo)
1318 self.removeundo(repo)
1319
1319
1320 def init(self, repo, create=False):
1320 def init(self, repo, create=False):
1321 if not create and os.path.isdir(self.path):
1321 if not create and os.path.isdir(self.path):
1322 raise util.Abort(_("patch queue directory already exists"))
1322 raise util.Abort(_("patch queue directory already exists"))
1323 try:
1323 try:
1324 os.mkdir(self.path)
1324 os.mkdir(self.path)
1325 except OSError, inst:
1325 except OSError, inst:
1326 if inst.errno != errno.EEXIST or not create:
1326 if inst.errno != errno.EEXIST or not create:
1327 raise
1327 raise
1328 if create:
1328 if create:
1329 return self.qrepo(create=True)
1329 return self.qrepo(create=True)
1330
1330
1331 def unapplied(self, repo, patch=None):
1331 def unapplied(self, repo, patch=None):
1332 if patch and patch not in self.series:
1332 if patch and patch not in self.series:
1333 raise util.Abort(_("patch %s is not in series file") % patch)
1333 raise util.Abort(_("patch %s is not in series file") % patch)
1334 if not patch:
1334 if not patch:
1335 start = self.series_end()
1335 start = self.series_end()
1336 else:
1336 else:
1337 start = self.series.index(patch) + 1
1337 start = self.series.index(patch) + 1
1338 unapplied = []
1338 unapplied = []
1339 for i in xrange(start, len(self.series)):
1339 for i in xrange(start, len(self.series)):
1340 pushable, reason = self.pushable(i)
1340 pushable, reason = self.pushable(i)
1341 if pushable:
1341 if pushable:
1342 unapplied.append((i, self.series[i]))
1342 unapplied.append((i, self.series[i]))
1343 self.explain_pushable(i)
1343 self.explain_pushable(i)
1344 return unapplied
1344 return unapplied
1345
1345
1346 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1346 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1347 summary=False):
1347 summary=False):
1348 def displayname(pfx, patchname):
1348 def displayname(pfx, patchname):
1349 if summary:
1349 if summary:
1350 ph = patchheader(self.join(patchname))
1350 ph = patchheader(self.join(patchname))
1351 msg = ph.message
1351 msg = ph.message
1352 msg = msg and ': ' + msg[0] or ': '
1352 msg = msg and ': ' + msg[0] or ': '
1353 else:
1353 else:
1354 msg = ''
1354 msg = ''
1355 msg = "%s%s%s" % (pfx, patchname, msg)
1355 msg = "%s%s%s" % (pfx, patchname, msg)
1356 if self.ui.interactive():
1356 if self.ui.interactive():
1357 msg = util.ellipsis(msg, util.termwidth())
1357 msg = util.ellipsis(msg, util.termwidth())
1358 self.ui.write(msg + '\n')
1358 self.ui.write(msg + '\n')
1359
1359
1360 applied = set([p.name for p in self.applied])
1360 applied = set([p.name for p in self.applied])
1361 if length is None:
1361 if length is None:
1362 length = len(self.series) - start
1362 length = len(self.series) - start
1363 if not missing:
1363 if not missing:
1364 if self.ui.verbose:
1364 if self.ui.verbose:
1365 idxwidth = len(str(start+length - 1))
1365 idxwidth = len(str(start+length - 1))
1366 for i in xrange(start, start+length):
1366 for i in xrange(start, start+length):
1367 patch = self.series[i]
1367 patch = self.series[i]
1368 if patch in applied:
1368 if patch in applied:
1369 stat = 'A'
1369 stat = 'A'
1370 elif self.pushable(i)[0]:
1370 elif self.pushable(i)[0]:
1371 stat = 'U'
1371 stat = 'U'
1372 else:
1372 else:
1373 stat = 'G'
1373 stat = 'G'
1374 pfx = ''
1374 pfx = ''
1375 if self.ui.verbose:
1375 if self.ui.verbose:
1376 pfx = '%*d %s ' % (idxwidth, i, stat)
1376 pfx = '%*d %s ' % (idxwidth, i, stat)
1377 elif status and status != stat:
1377 elif status and status != stat:
1378 continue
1378 continue
1379 displayname(pfx, patch)
1379 displayname(pfx, patch)
1380 else:
1380 else:
1381 msng_list = []
1381 msng_list = []
1382 for root, dirs, files in os.walk(self.path):
1382 for root, dirs, files in os.walk(self.path):
1383 d = root[len(self.path) + 1:]
1383 d = root[len(self.path) + 1:]
1384 for f in files:
1384 for f in files:
1385 fl = os.path.join(d, f)
1385 fl = os.path.join(d, f)
1386 if (fl not in self.series and
1386 if (fl not in self.series and
1387 fl not in (self.status_path, self.series_path,
1387 fl not in (self.status_path, self.series_path,
1388 self.guards_path)
1388 self.guards_path)
1389 and not fl.startswith('.')):
1389 and not fl.startswith('.')):
1390 msng_list.append(fl)
1390 msng_list.append(fl)
1391 for x in sorted(msng_list):
1391 for x in sorted(msng_list):
1392 pfx = self.ui.verbose and ('D ') or ''
1392 pfx = self.ui.verbose and ('D ') or ''
1393 displayname(pfx, x)
1393 displayname(pfx, x)
1394
1394
1395 def issaveline(self, l):
1395 def issaveline(self, l):
1396 if l.name == '.hg.patches.save.line':
1396 if l.name == '.hg.patches.save.line':
1397 return True
1397 return True
1398
1398
1399 def qrepo(self, create=False):
1399 def qrepo(self, create=False):
1400 if create or os.path.isdir(self.join(".hg")):
1400 if create or os.path.isdir(self.join(".hg")):
1401 return hg.repository(self.ui, path=self.path, create=create)
1401 return hg.repository(self.ui, path=self.path, create=create)
1402
1402
1403 def restore(self, repo, rev, delete=None, qupdate=None):
1403 def restore(self, repo, rev, delete=None, qupdate=None):
1404 c = repo.changelog.read(rev)
1404 c = repo.changelog.read(rev)
1405 desc = c[4].strip()
1405 desc = c[4].strip()
1406 lines = desc.splitlines()
1406 lines = desc.splitlines()
1407 i = 0
1407 i = 0
1408 datastart = None
1408 datastart = None
1409 series = []
1409 series = []
1410 applied = []
1410 applied = []
1411 qpp = None
1411 qpp = None
1412 for i, line in enumerate(lines):
1412 for i, line in enumerate(lines):
1413 if line == 'Patch Data:':
1413 if line == 'Patch Data:':
1414 datastart = i + 1
1414 datastart = i + 1
1415 elif line.startswith('Dirstate:'):
1415 elif line.startswith('Dirstate:'):
1416 l = line.rstrip()
1416 l = line.rstrip()
1417 l = l[10:].split(' ')
1417 l = l[10:].split(' ')
1418 qpp = [ bin(x) for x in l ]
1418 qpp = [ bin(x) for x in l ]
1419 elif datastart != None:
1419 elif datastart != None:
1420 l = line.rstrip()
1420 l = line.rstrip()
1421 se = statusentry(l)
1421 se = statusentry(l)
1422 file_ = se.name
1422 file_ = se.name
1423 if se.rev:
1423 if se.rev:
1424 applied.append(se)
1424 applied.append(se)
1425 else:
1425 else:
1426 series.append(file_)
1426 series.append(file_)
1427 if datastart is None:
1427 if datastart is None:
1428 self.ui.warn(_("No saved patch data found\n"))
1428 self.ui.warn(_("No saved patch data found\n"))
1429 return 1
1429 return 1
1430 self.ui.warn(_("restoring status: %s\n") % lines[0])
1430 self.ui.warn(_("restoring status: %s\n") % lines[0])
1431 self.full_series = series
1431 self.full_series = series
1432 self.applied = applied
1432 self.applied = applied
1433 self.parse_series()
1433 self.parse_series()
1434 self.series_dirty = 1
1434 self.series_dirty = 1
1435 self.applied_dirty = 1
1435 self.applied_dirty = 1
1436 heads = repo.changelog.heads()
1436 heads = repo.changelog.heads()
1437 if delete:
1437 if delete:
1438 if rev not in heads:
1438 if rev not in heads:
1439 self.ui.warn(_("save entry has children, leaving it alone\n"))
1439 self.ui.warn(_("save entry has children, leaving it alone\n"))
1440 else:
1440 else:
1441 self.ui.warn(_("removing save entry %s\n") % short(rev))
1441 self.ui.warn(_("removing save entry %s\n") % short(rev))
1442 pp = repo.dirstate.parents()
1442 pp = repo.dirstate.parents()
1443 if rev in pp:
1443 if rev in pp:
1444 update = True
1444 update = True
1445 else:
1445 else:
1446 update = False
1446 update = False
1447 self.strip(repo, rev, update=update, backup='strip')
1447 self.strip(repo, rev, update=update, backup='strip')
1448 if qpp:
1448 if qpp:
1449 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1449 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1450 (short(qpp[0]), short(qpp[1])))
1450 (short(qpp[0]), short(qpp[1])))
1451 if qupdate:
1451 if qupdate:
1452 self.ui.status(_("queue directory updating\n"))
1452 self.ui.status(_("queue directory updating\n"))
1453 r = self.qrepo()
1453 r = self.qrepo()
1454 if not r:
1454 if not r:
1455 self.ui.warn(_("Unable to load queue repository\n"))
1455 self.ui.warn(_("Unable to load queue repository\n"))
1456 return 1
1456 return 1
1457 hg.clean(r, qpp[0])
1457 hg.clean(r, qpp[0])
1458
1458
1459 def save(self, repo, msg=None):
1459 def save(self, repo, msg=None):
1460 if len(self.applied) == 0:
1460 if len(self.applied) == 0:
1461 self.ui.warn(_("save: no patches applied, exiting\n"))
1461 self.ui.warn(_("save: no patches applied, exiting\n"))
1462 return 1
1462 return 1
1463 if self.issaveline(self.applied[-1]):
1463 if self.issaveline(self.applied[-1]):
1464 self.ui.warn(_("status is already saved\n"))
1464 self.ui.warn(_("status is already saved\n"))
1465 return 1
1465 return 1
1466
1466
1467 ar = [ ':' + x for x in self.full_series ]
1467 ar = [ ':' + x for x in self.full_series ]
1468 if not msg:
1468 if not msg:
1469 msg = _("hg patches saved state")
1469 msg = _("hg patches saved state")
1470 else:
1470 else:
1471 msg = "hg patches: " + msg.rstrip('\r\n')
1471 msg = "hg patches: " + msg.rstrip('\r\n')
1472 r = self.qrepo()
1472 r = self.qrepo()
1473 if r:
1473 if r:
1474 pp = r.dirstate.parents()
1474 pp = r.dirstate.parents()
1475 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
1475 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
1476 msg += "\n\nPatch Data:\n"
1476 msg += "\n\nPatch Data:\n"
1477 text = msg + "\n".join([str(x) for x in self.applied]) + '\n' + (ar and
1477 text = msg + "\n".join([str(x) for x in self.applied]) + '\n' + (ar and
1478 "\n".join(ar) + '\n' or "")
1478 "\n".join(ar) + '\n' or "")
1479 n = repo.commit(text, force=True)
1479 n = repo.commit(text, force=True)
1480 if not n:
1480 if not n:
1481 self.ui.warn(_("repo commit failed\n"))
1481 self.ui.warn(_("repo commit failed\n"))
1482 return 1
1482 return 1
1483 self.applied.append(statusentry(hex(n),'.hg.patches.save.line'))
1483 self.applied.append(statusentry(hex(n),'.hg.patches.save.line'))
1484 self.applied_dirty = 1
1484 self.applied_dirty = 1
1485 self.removeundo(repo)
1485 self.removeundo(repo)
1486
1486
1487 def full_series_end(self):
1487 def full_series_end(self):
1488 if len(self.applied) > 0:
1488 if len(self.applied) > 0:
1489 p = self.applied[-1].name
1489 p = self.applied[-1].name
1490 end = self.find_series(p)
1490 end = self.find_series(p)
1491 if end is None:
1491 if end is None:
1492 return len(self.full_series)
1492 return len(self.full_series)
1493 return end + 1
1493 return end + 1
1494 return 0
1494 return 0
1495
1495
1496 def series_end(self, all_patches=False):
1496 def series_end(self, all_patches=False):
1497 """If all_patches is False, return the index of the next pushable patch
1497 """If all_patches is False, return the index of the next pushable patch
1498 in the series, or the series length. If all_patches is True, return the
1498 in the series, or the series length. If all_patches is True, return the
1499 index of the first patch past the last applied one.
1499 index of the first patch past the last applied one.
1500 """
1500 """
1501 end = 0
1501 end = 0
1502 def next(start):
1502 def next(start):
1503 if all_patches:
1503 if all_patches:
1504 return start
1504 return start
1505 i = start
1505 i = start
1506 while i < len(self.series):
1506 while i < len(self.series):
1507 p, reason = self.pushable(i)
1507 p, reason = self.pushable(i)
1508 if p:
1508 if p:
1509 break
1509 break
1510 self.explain_pushable(i)
1510 self.explain_pushable(i)
1511 i += 1
1511 i += 1
1512 return i
1512 return i
1513 if len(self.applied) > 0:
1513 if len(self.applied) > 0:
1514 p = self.applied[-1].name
1514 p = self.applied[-1].name
1515 try:
1515 try:
1516 end = self.series.index(p)
1516 end = self.series.index(p)
1517 except ValueError:
1517 except ValueError:
1518 return 0
1518 return 0
1519 return next(end + 1)
1519 return next(end + 1)
1520 return next(end)
1520 return next(end)
1521
1521
1522 def appliedname(self, index):
1522 def appliedname(self, index):
1523 pname = self.applied[index].name
1523 pname = self.applied[index].name
1524 if not self.ui.verbose:
1524 if not self.ui.verbose:
1525 p = pname
1525 p = pname
1526 else:
1526 else:
1527 p = str(self.series.index(pname)) + " " + pname
1527 p = str(self.series.index(pname)) + " " + pname
1528 return p
1528 return p
1529
1529
1530 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1530 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1531 force=None, git=False):
1531 force=None, git=False):
1532 def checkseries(patchname):
1532 def checkseries(patchname):
1533 if patchname in self.series:
1533 if patchname in self.series:
1534 raise util.Abort(_('patch %s is already in the series file')
1534 raise util.Abort(_('patch %s is already in the series file')
1535 % patchname)
1535 % patchname)
1536 def checkfile(patchname):
1536 def checkfile(patchname):
1537 if not force and os.path.exists(self.join(patchname)):
1537 if not force and os.path.exists(self.join(patchname)):
1538 raise util.Abort(_('patch "%s" already exists')
1538 raise util.Abort(_('patch "%s" already exists')
1539 % patchname)
1539 % patchname)
1540
1540
1541 if rev:
1541 if rev:
1542 if files:
1542 if files:
1543 raise util.Abort(_('option "-r" not valid when importing '
1543 raise util.Abort(_('option "-r" not valid when importing '
1544 'files'))
1544 'files'))
1545 rev = cmdutil.revrange(repo, rev)
1545 rev = cmdutil.revrange(repo, rev)
1546 rev.sort(reverse=True)
1546 rev.sort(reverse=True)
1547 if (len(files) > 1 or len(rev) > 1) and patchname:
1547 if (len(files) > 1 or len(rev) > 1) and patchname:
1548 raise util.Abort(_('option "-n" not valid when importing multiple '
1548 raise util.Abort(_('option "-n" not valid when importing multiple '
1549 'patches'))
1549 'patches'))
1550 i = 0
1550 i = 0
1551 added = []
1551 added = []
1552 if rev:
1552 if rev:
1553 # If mq patches are applied, we can only import revisions
1553 # If mq patches are applied, we can only import revisions
1554 # that form a linear path to qbase.
1554 # that form a linear path to qbase.
1555 # Otherwise, they should form a linear path to a head.
1555 # Otherwise, they should form a linear path to a head.
1556 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1556 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1557 if len(heads) > 1:
1557 if len(heads) > 1:
1558 raise util.Abort(_('revision %d is the root of more than one '
1558 raise util.Abort(_('revision %d is the root of more than one '
1559 'branch') % rev[-1])
1559 'branch') % rev[-1])
1560 if self.applied:
1560 if self.applied:
1561 base = hex(repo.changelog.node(rev[0]))
1561 base = hex(repo.changelog.node(rev[0]))
1562 if base in [n.rev for n in self.applied]:
1562 if base in [n.rev for n in self.applied]:
1563 raise util.Abort(_('revision %d is already managed')
1563 raise util.Abort(_('revision %d is already managed')
1564 % rev[0])
1564 % rev[0])
1565 if heads != [bin(self.applied[-1].rev)]:
1565 if heads != [bin(self.applied[-1].rev)]:
1566 raise util.Abort(_('revision %d is not the parent of '
1566 raise util.Abort(_('revision %d is not the parent of '
1567 'the queue') % rev[0])
1567 'the queue') % rev[0])
1568 base = repo.changelog.rev(bin(self.applied[0].rev))
1568 base = repo.changelog.rev(bin(self.applied[0].rev))
1569 lastparent = repo.changelog.parentrevs(base)[0]
1569 lastparent = repo.changelog.parentrevs(base)[0]
1570 else:
1570 else:
1571 if heads != [repo.changelog.node(rev[0])]:
1571 if heads != [repo.changelog.node(rev[0])]:
1572 raise util.Abort(_('revision %d has unmanaged children')
1572 raise util.Abort(_('revision %d has unmanaged children')
1573 % rev[0])
1573 % rev[0])
1574 lastparent = None
1574 lastparent = None
1575
1575
1576 if git:
1576 if git:
1577 self.diffopts().git = True
1577 self.diffopts().git = True
1578
1578
1579 for r in rev:
1579 for r in rev:
1580 p1, p2 = repo.changelog.parentrevs(r)
1580 p1, p2 = repo.changelog.parentrevs(r)
1581 n = repo.changelog.node(r)
1581 n = repo.changelog.node(r)
1582 if p2 != nullrev:
1582 if p2 != nullrev:
1583 raise util.Abort(_('cannot import merge revision %d') % r)
1583 raise util.Abort(_('cannot import merge revision %d') % r)
1584 if lastparent and lastparent != r:
1584 if lastparent and lastparent != r:
1585 raise util.Abort(_('revision %d is not the parent of %d')
1585 raise util.Abort(_('revision %d is not the parent of %d')
1586 % (r, lastparent))
1586 % (r, lastparent))
1587 lastparent = p1
1587 lastparent = p1
1588
1588
1589 if not patchname:
1589 if not patchname:
1590 patchname = normname('%d.diff' % r)
1590 patchname = normname('%d.diff' % r)
1591 self.check_reserved_name(patchname)
1591 self.check_reserved_name(patchname)
1592 checkseries(patchname)
1592 checkseries(patchname)
1593 checkfile(patchname)
1593 checkfile(patchname)
1594 self.full_series.insert(0, patchname)
1594 self.full_series.insert(0, patchname)
1595
1595
1596 patchf = self.opener(patchname, "w")
1596 patchf = self.opener(patchname, "w")
1597 patch.export(repo, [n], fp=patchf, opts=self.diffopts())
1597 patch.export(repo, [n], fp=patchf, opts=self.diffopts())
1598 patchf.close()
1598 patchf.close()
1599
1599
1600 se = statusentry(hex(n), patchname)
1600 se = statusentry(hex(n), patchname)
1601 self.applied.insert(0, se)
1601 self.applied.insert(0, se)
1602
1602
1603 added.append(patchname)
1603 added.append(patchname)
1604 patchname = None
1604 patchname = None
1605 self.parse_series()
1605 self.parse_series()
1606 self.applied_dirty = 1
1606 self.applied_dirty = 1
1607
1607
1608 for filename in files:
1608 for filename in files:
1609 if existing:
1609 if existing:
1610 if filename == '-':
1610 if filename == '-':
1611 raise util.Abort(_('-e is incompatible with import from -'))
1611 raise util.Abort(_('-e is incompatible with import from -'))
1612 if not patchname:
1612 if not patchname:
1613 patchname = normname(filename)
1613 patchname = normname(filename)
1614 self.check_reserved_name(patchname)
1614 self.check_reserved_name(patchname)
1615 if not os.path.isfile(self.join(patchname)):
1615 if not os.path.isfile(self.join(patchname)):
1616 raise util.Abort(_("patch %s does not exist") % patchname)
1616 raise util.Abort(_("patch %s does not exist") % patchname)
1617 else:
1617 else:
1618 try:
1618 try:
1619 if filename == '-':
1619 if filename == '-':
1620 if not patchname:
1620 if not patchname:
1621 raise util.Abort(_('need --name to import a patch from -'))
1621 raise util.Abort(_('need --name to import a patch from -'))
1622 text = sys.stdin.read()
1622 text = sys.stdin.read()
1623 else:
1623 else:
1624 text = url.open(self.ui, filename).read()
1624 text = url.open(self.ui, filename).read()
1625 except (OSError, IOError):
1625 except (OSError, IOError):
1626 raise util.Abort(_("unable to read %s") % filename)
1626 raise util.Abort(_("unable to read %s") % filename)
1627 if not patchname:
1627 if not patchname:
1628 patchname = normname(os.path.basename(filename))
1628 patchname = normname(os.path.basename(filename))
1629 self.check_reserved_name(patchname)
1629 self.check_reserved_name(patchname)
1630 checkfile(patchname)
1630 checkfile(patchname)
1631 patchf = self.opener(patchname, "w")
1631 patchf = self.opener(patchname, "w")
1632 patchf.write(text)
1632 patchf.write(text)
1633 if not force:
1633 if not force:
1634 checkseries(patchname)
1634 checkseries(patchname)
1635 if patchname not in self.series:
1635 if patchname not in self.series:
1636 index = self.full_series_end() + i
1636 index = self.full_series_end() + i
1637 self.full_series[index:index] = [patchname]
1637 self.full_series[index:index] = [patchname]
1638 self.parse_series()
1638 self.parse_series()
1639 self.ui.warn(_("adding %s to series file\n") % patchname)
1639 self.ui.warn(_("adding %s to series file\n") % patchname)
1640 i += 1
1640 i += 1
1641 added.append(patchname)
1641 added.append(patchname)
1642 patchname = None
1642 patchname = None
1643 self.series_dirty = 1
1643 self.series_dirty = 1
1644 qrepo = self.qrepo()
1644 qrepo = self.qrepo()
1645 if qrepo:
1645 if qrepo:
1646 qrepo.add(added)
1646 qrepo.add(added)
1647
1647
1648 def delete(ui, repo, *patches, **opts):
1648 def delete(ui, repo, *patches, **opts):
1649 """remove patches from queue
1649 """remove patches from queue
1650
1650
1651 The patches must not be applied, and at least one patch is required. With
1651 The patches must not be applied, and at least one patch is required. With
1652 -k/--keep, the patch files are preserved in the patch directory.
1652 -k/--keep, the patch files are preserved in the patch directory.
1653
1653
1654 To stop managing a patch and move it into permanent history,
1654 To stop managing a patch and move it into permanent history,
1655 use the qfinish command."""
1655 use the qfinish command."""
1656 q = repo.mq
1656 q = repo.mq
1657 q.delete(repo, patches, opts)
1657 q.delete(repo, patches, opts)
1658 q.save_dirty()
1658 q.save_dirty()
1659 return 0
1659 return 0
1660
1660
1661 def applied(ui, repo, patch=None, **opts):
1661 def applied(ui, repo, patch=None, **opts):
1662 """print the patches already applied"""
1662 """print the patches already applied"""
1663 q = repo.mq
1663 q = repo.mq
1664 if patch:
1664 if patch:
1665 if patch not in q.series:
1665 if patch not in q.series:
1666 raise util.Abort(_("patch %s is not in series file") % patch)
1666 raise util.Abort(_("patch %s is not in series file") % patch)
1667 end = q.series.index(patch) + 1
1667 end = q.series.index(patch) + 1
1668 else:
1668 else:
1669 end = q.series_end(True)
1669 end = q.series_end(True)
1670 return q.qseries(repo, length=end, status='A', summary=opts.get('summary'))
1670 return q.qseries(repo, length=end, status='A', summary=opts.get('summary'))
1671
1671
1672 def unapplied(ui, repo, patch=None, **opts):
1672 def unapplied(ui, repo, patch=None, **opts):
1673 """print the patches not yet applied"""
1673 """print the patches not yet applied"""
1674 q = repo.mq
1674 q = repo.mq
1675 if patch:
1675 if patch:
1676 if patch not in q.series:
1676 if patch not in q.series:
1677 raise util.Abort(_("patch %s is not in series file") % patch)
1677 raise util.Abort(_("patch %s is not in series file") % patch)
1678 start = q.series.index(patch) + 1
1678 start = q.series.index(patch) + 1
1679 else:
1679 else:
1680 start = q.series_end(True)
1680 start = q.series_end(True)
1681 q.qseries(repo, start=start, status='U', summary=opts.get('summary'))
1681 q.qseries(repo, start=start, status='U', summary=opts.get('summary'))
1682
1682
1683 def qimport(ui, repo, *filename, **opts):
1683 def qimport(ui, repo, *filename, **opts):
1684 """import a patch
1684 """import a patch
1685
1685
1686 The patch is inserted into the series after the last applied patch. If no
1686 The patch is inserted into the series after the last applied patch. If no
1687 patches have been applied, qimport prepends the patch to the series.
1687 patches have been applied, qimport prepends the patch to the series.
1688
1688
1689 The patch will have the same name as its source file unless you give it a
1689 The patch will have the same name as its source file unless you give it a
1690 new one with -n/--name.
1690 new one with -n/--name.
1691
1691
1692 You can register an existing patch inside the patch directory with the
1692 You can register an existing patch inside the patch directory with the
1693 -e/--existing flag.
1693 -e/--existing flag.
1694
1694
1695 With -f/--force, an existing patch of the same name will be overwritten.
1695 With -f/--force, an existing patch of the same name will be overwritten.
1696
1696
1697 An existing changeset may be placed under mq control with -r/--rev (e.g.
1697 An existing changeset may be placed under mq control with -r/--rev (e.g.
1698 qimport --rev tip -n patch will place tip under mq control). With
1698 qimport --rev tip -n patch will place tip under mq control). With
1699 -g/--git, patches imported with --rev will use the git diff format. See
1699 -g/--git, patches imported with --rev will use the git diff format. See
1700 the diffs help topic for information on why this is important for
1700 the diffs help topic for information on why this is important for
1701 preserving rename/copy information and permission changes.
1701 preserving rename/copy information and permission changes.
1702
1702
1703 To import a patch from standard input, pass - as the patch file. When
1703 To import a patch from standard input, pass - as the patch file. When
1704 importing from standard input, a patch name must be specified using the
1704 importing from standard input, a patch name must be specified using the
1705 --name flag.
1705 --name flag.
1706 """
1706 """
1707 q = repo.mq
1707 q = repo.mq
1708 q.qimport(repo, filename, patchname=opts['name'],
1708 q.qimport(repo, filename, patchname=opts['name'],
1709 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1709 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1710 git=opts['git'])
1710 git=opts['git'])
1711 q.save_dirty()
1711 q.save_dirty()
1712
1712
1713 if opts.get('push') and not opts.get('rev'):
1713 if opts.get('push') and not opts.get('rev'):
1714 return q.push(repo, None)
1714 return q.push(repo, None)
1715 return 0
1715 return 0
1716
1716
1717 def init(ui, repo, **opts):
1717 def init(ui, repo, **opts):
1718 """init a new queue repository
1718 """init a new queue repository
1719
1719
1720 The queue repository is unversioned by default. If -c/--create-repo is
1720 The queue repository is unversioned by default. If -c/--create-repo is
1721 specified, qinit will create a separate nested repository for patches
1721 specified, qinit will create a separate nested repository for patches
1722 (qinit -c may also be run later to convert an unversioned patch repository
1722 (qinit -c may also be run later to convert an unversioned patch repository
1723 into a versioned one). You can use qcommit to commit changes to this queue
1723 into a versioned one). You can use qcommit to commit changes to this queue
1724 repository.
1724 repository.
1725 """
1725 """
1726 q = repo.mq
1726 q = repo.mq
1727 r = q.init(repo, create=opts['create_repo'])
1727 r = q.init(repo, create=opts['create_repo'])
1728 q.save_dirty()
1728 q.save_dirty()
1729 if r:
1729 if r:
1730 if not os.path.exists(r.wjoin('.hgignore')):
1730 if not os.path.exists(r.wjoin('.hgignore')):
1731 fp = r.wopener('.hgignore', 'w')
1731 fp = r.wopener('.hgignore', 'w')
1732 fp.write('^\\.hg\n')
1732 fp.write('^\\.hg\n')
1733 fp.write('^\\.mq\n')
1733 fp.write('^\\.mq\n')
1734 fp.write('syntax: glob\n')
1734 fp.write('syntax: glob\n')
1735 fp.write('status\n')
1735 fp.write('status\n')
1736 fp.write('guards\n')
1736 fp.write('guards\n')
1737 fp.close()
1737 fp.close()
1738 if not os.path.exists(r.wjoin('series')):
1738 if not os.path.exists(r.wjoin('series')):
1739 r.wopener('series', 'w').close()
1739 r.wopener('series', 'w').close()
1740 r.add(['.hgignore', 'series'])
1740 r.add(['.hgignore', 'series'])
1741 commands.add(ui, r)
1741 commands.add(ui, r)
1742 return 0
1742 return 0
1743
1743
1744 def clone(ui, source, dest=None, **opts):
1744 def clone(ui, source, dest=None, **opts):
1745 '''clone main and patch repository at same time
1745 '''clone main and patch repository at same time
1746
1746
1747 If source is local, destination will have no patches applied. If source is
1747 If source is local, destination will have no patches applied. If source is
1748 remote, this command can not check if patches are applied in source, so
1748 remote, this command can not check if patches are applied in source, so
1749 cannot guarantee that patches are not applied in destination. If you clone
1749 cannot guarantee that patches are not applied in destination. If you clone
1750 remote repository, be sure before that it has no patches applied.
1750 remote repository, be sure before that it has no patches applied.
1751
1751
1752 Source patch repository is looked for in <src>/.hg/patches by default. Use
1752 Source patch repository is looked for in <src>/.hg/patches by default. Use
1753 -p <url> to change.
1753 -p <url> to change.
1754
1754
1755 The patch directory must be a nested Mercurial repository, as would be
1755 The patch directory must be a nested Mercurial repository, as would be
1756 created by qinit -c.
1756 created by qinit -c.
1757 '''
1757 '''
1758 def patchdir(repo):
1758 def patchdir(repo):
1759 url = repo.url()
1759 url = repo.url()
1760 if url.endswith('/'):
1760 if url.endswith('/'):
1761 url = url[:-1]
1761 url = url[:-1]
1762 return url + '/.hg/patches'
1762 return url + '/.hg/patches'
1763 if dest is None:
1763 if dest is None:
1764 dest = hg.defaultdest(source)
1764 dest = hg.defaultdest(source)
1765 sr = hg.repository(cmdutil.remoteui(ui, opts), ui.expandpath(source))
1765 sr = hg.repository(cmdutil.remoteui(ui, opts), ui.expandpath(source))
1766 if opts['patches']:
1766 if opts['patches']:
1767 patchespath = ui.expandpath(opts['patches'])
1767 patchespath = ui.expandpath(opts['patches'])
1768 else:
1768 else:
1769 patchespath = patchdir(sr)
1769 patchespath = patchdir(sr)
1770 try:
1770 try:
1771 hg.repository(ui, patchespath)
1771 hg.repository(ui, patchespath)
1772 except error.RepoError:
1772 except error.RepoError:
1773 raise util.Abort(_('versioned patch repository not found'
1773 raise util.Abort(_('versioned patch repository not found'
1774 ' (see qinit -c)'))
1774 ' (see qinit -c)'))
1775 qbase, destrev = None, None
1775 qbase, destrev = None, None
1776 if sr.local():
1776 if sr.local():
1777 if sr.mq.applied:
1777 if sr.mq.applied:
1778 qbase = bin(sr.mq.applied[0].rev)
1778 qbase = bin(sr.mq.applied[0].rev)
1779 if not hg.islocal(dest):
1779 if not hg.islocal(dest):
1780 heads = set(sr.heads())
1780 heads = set(sr.heads())
1781 destrev = list(heads.difference(sr.heads(qbase)))
1781 destrev = list(heads.difference(sr.heads(qbase)))
1782 destrev.append(sr.changelog.parents(qbase)[0])
1782 destrev.append(sr.changelog.parents(qbase)[0])
1783 elif sr.capable('lookup'):
1783 elif sr.capable('lookup'):
1784 try:
1784 try:
1785 qbase = sr.lookup('qbase')
1785 qbase = sr.lookup('qbase')
1786 except error.RepoError:
1786 except error.RepoError:
1787 pass
1787 pass
1788 ui.note(_('cloning main repository\n'))
1788 ui.note(_('cloning main repository\n'))
1789 sr, dr = hg.clone(ui, sr.url(), dest,
1789 sr, dr = hg.clone(ui, sr.url(), dest,
1790 pull=opts['pull'],
1790 pull=opts['pull'],
1791 rev=destrev,
1791 rev=destrev,
1792 update=False,
1792 update=False,
1793 stream=opts['uncompressed'])
1793 stream=opts['uncompressed'])
1794 ui.note(_('cloning patch repository\n'))
1794 ui.note(_('cloning patch repository\n'))
1795 hg.clone(ui, opts['patches'] or patchdir(sr), patchdir(dr),
1795 hg.clone(ui, opts['patches'] or patchdir(sr), patchdir(dr),
1796 pull=opts['pull'], update=not opts['noupdate'],
1796 pull=opts['pull'], update=not opts['noupdate'],
1797 stream=opts['uncompressed'])
1797 stream=opts['uncompressed'])
1798 if dr.local():
1798 if dr.local():
1799 if qbase:
1799 if qbase:
1800 ui.note(_('stripping applied patches from destination '
1800 ui.note(_('stripping applied patches from destination '
1801 'repository\n'))
1801 'repository\n'))
1802 dr.mq.strip(dr, qbase, update=False, backup=None)
1802 dr.mq.strip(dr, qbase, update=False, backup=None)
1803 if not opts['noupdate']:
1803 if not opts['noupdate']:
1804 ui.note(_('updating destination repository\n'))
1804 ui.note(_('updating destination repository\n'))
1805 hg.update(dr, dr.changelog.tip())
1805 hg.update(dr, dr.changelog.tip())
1806
1806
1807 def commit(ui, repo, *pats, **opts):
1807 def commit(ui, repo, *pats, **opts):
1808 """commit changes in the queue repository"""
1808 """commit changes in the queue repository"""
1809 q = repo.mq
1809 q = repo.mq
1810 r = q.qrepo()
1810 r = q.qrepo()
1811 if not r: raise util.Abort('no queue repository')
1811 if not r: raise util.Abort('no queue repository')
1812 commands.commit(r.ui, r, *pats, **opts)
1812 commands.commit(r.ui, r, *pats, **opts)
1813
1813
1814 def series(ui, repo, **opts):
1814 def series(ui, repo, **opts):
1815 """print the entire series file"""
1815 """print the entire series file"""
1816 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1816 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1817 return 0
1817 return 0
1818
1818
1819 def top(ui, repo, **opts):
1819 def top(ui, repo, **opts):
1820 """print the name of the current patch"""
1820 """print the name of the current patch"""
1821 q = repo.mq
1821 q = repo.mq
1822 t = q.applied and q.series_end(True) or 0
1822 t = q.applied and q.series_end(True) or 0
1823 if t:
1823 if t:
1824 return q.qseries(repo, start=t-1, length=1, status='A',
1824 return q.qseries(repo, start=t-1, length=1, status='A',
1825 summary=opts.get('summary'))
1825 summary=opts.get('summary'))
1826 else:
1826 else:
1827 ui.write(_("no patches applied\n"))
1827 ui.write(_("no patches applied\n"))
1828 return 1
1828 return 1
1829
1829
1830 def next(ui, repo, **opts):
1830 def next(ui, repo, **opts):
1831 """print the name of the next patch"""
1831 """print the name of the next patch"""
1832 q = repo.mq
1832 q = repo.mq
1833 end = q.series_end()
1833 end = q.series_end()
1834 if end == len(q.series):
1834 if end == len(q.series):
1835 ui.write(_("all patches applied\n"))
1835 ui.write(_("all patches applied\n"))
1836 return 1
1836 return 1
1837 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1837 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1838
1838
1839 def prev(ui, repo, **opts):
1839 def prev(ui, repo, **opts):
1840 """print the name of the previous patch"""
1840 """print the name of the previous patch"""
1841 q = repo.mq
1841 q = repo.mq
1842 l = len(q.applied)
1842 l = len(q.applied)
1843 if l == 1:
1843 if l == 1:
1844 ui.write(_("only one patch applied\n"))
1844 ui.write(_("only one patch applied\n"))
1845 return 1
1845 return 1
1846 if not l:
1846 if not l:
1847 ui.write(_("no patches applied\n"))
1847 ui.write(_("no patches applied\n"))
1848 return 1
1848 return 1
1849 return q.qseries(repo, start=l-2, length=1, status='A',
1849 return q.qseries(repo, start=l-2, length=1, status='A',
1850 summary=opts.get('summary'))
1850 summary=opts.get('summary'))
1851
1851
1852 def setupheaderopts(ui, opts):
1852 def setupheaderopts(ui, opts):
1853 def do(opt,val):
1853 def do(opt,val):
1854 if not opts[opt] and opts['current' + opt]:
1854 if not opts[opt] and opts['current' + opt]:
1855 opts[opt] = val
1855 opts[opt] = val
1856 do('user', ui.username())
1856 do('user', ui.username())
1857 do('date', "%d %d" % util.makedate())
1857 do('date', "%d %d" % util.makedate())
1858
1858
1859 def new(ui, repo, patch, *args, **opts):
1859 def new(ui, repo, patch, *args, **opts):
1860 """create a new patch
1860 """create a new patch
1861
1861
1862 qnew creates a new patch on top of the currently-applied patch (if any).
1862 qnew creates a new patch on top of the currently-applied patch (if any).
1863 It will refuse to run if there are any outstanding changes unless
1863 It will refuse to run if there are any outstanding changes unless
1864 -f/--force is specified, in which case the patch will be initialized with
1864 -f/--force is specified, in which case the patch will be initialized with
1865 them. You may also use -I/--include, -X/--exclude, and/or a list of files
1865 them. You may also use -I/--include, -X/--exclude, and/or a list of files
1866 after the patch name to add only changes to matching files to the new
1866 after the patch name to add only changes to matching files to the new
1867 patch, leaving the rest as uncommitted modifications.
1867 patch, leaving the rest as uncommitted modifications.
1868
1868
1869 -u/--user and -d/--date can be used to set the (given) user and date,
1869 -u/--user and -d/--date can be used to set the (given) user and date,
1870 respectively. -U/--currentuser and -D/--currentdate set user to current
1870 respectively. -U/--currentuser and -D/--currentdate set user to current
1871 user and date to current date.
1871 user and date to current date.
1872
1872
1873 -e/--edit, -m/--message or -l/--logfile set the patch header as well as
1873 -e/--edit, -m/--message or -l/--logfile set the patch header as well as
1874 the commit message. If none is specified, the header is empty and the
1874 the commit message. If none is specified, the header is empty and the
1875 commit message is '[mq]: PATCH'.
1875 commit message is '[mq]: PATCH'.
1876
1876
1877 Use the -g/--git option to keep the patch in the git extended diff format.
1877 Use the -g/--git option to keep the patch in the git extended diff format.
1878 Read the diffs help topic for more information on why this is important
1878 Read the diffs help topic for more information on why this is important
1879 for preserving permission changes and copy/rename information.
1879 for preserving permission changes and copy/rename information.
1880 """
1880 """
1881 msg = cmdutil.logmessage(opts)
1881 msg = cmdutil.logmessage(opts)
1882 def getmsg(): return ui.edit(msg, ui.username())
1882 def getmsg(): return ui.edit(msg, ui.username())
1883 q = repo.mq
1883 q = repo.mq
1884 opts['msg'] = msg
1884 opts['msg'] = msg
1885 if opts.get('edit'):
1885 if opts.get('edit'):
1886 opts['msg'] = getmsg
1886 opts['msg'] = getmsg
1887 else:
1887 else:
1888 opts['msg'] = msg
1888 opts['msg'] = msg
1889 setupheaderopts(ui, opts)
1889 setupheaderopts(ui, opts)
1890 q.new(repo, patch, *args, **opts)
1890 q.new(repo, patch, *args, **opts)
1891 q.save_dirty()
1891 q.save_dirty()
1892 return 0
1892 return 0
1893
1893
1894 def refresh(ui, repo, *pats, **opts):
1894 def refresh(ui, repo, *pats, **opts):
1895 """update the current patch
1895 """update the current patch
1896
1896
1897 If any file patterns are provided, the refreshed patch will contain only
1897 If any file patterns are provided, the refreshed patch will contain only
1898 the modifications that match those patterns; the remaining modifications
1898 the modifications that match those patterns; the remaining modifications
1899 will remain in the working directory.
1899 will remain in the working directory.
1900
1900
1901 If -s/--short is specified, files currently included in the patch will be
1901 If -s/--short is specified, files currently included in the patch will be
1902 refreshed just like matched files and remain in the patch.
1902 refreshed just like matched files and remain in the patch.
1903
1903
1904 hg add/remove/copy/rename work as usual, though you might want to use
1904 hg add/remove/copy/rename work as usual, though you might want to use
1905 git-style patches (-g/--git or [diff] git=1) to track copies and renames.
1905 git-style patches (-g/--git or [diff] git=1) to track copies and renames.
1906 See the diffs help topic for more information on the git diff format.
1906 See the diffs help topic for more information on the git diff format.
1907 """
1907 """
1908 q = repo.mq
1908 q = repo.mq
1909 message = cmdutil.logmessage(opts)
1909 message = cmdutil.logmessage(opts)
1910 if opts['edit']:
1910 if opts['edit']:
1911 if not q.applied:
1911 if not q.applied:
1912 ui.write(_("no patches applied\n"))
1912 ui.write(_("no patches applied\n"))
1913 return 1
1913 return 1
1914 if message:
1914 if message:
1915 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1915 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1916 patch = q.applied[-1].name
1916 patch = q.applied[-1].name
1917 ph = patchheader(q.join(patch))
1917 ph = patchheader(q.join(patch))
1918 message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
1918 message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
1919 setupheaderopts(ui, opts)
1919 setupheaderopts(ui, opts)
1920 ret = q.refresh(repo, pats, msg=message, **opts)
1920 ret = q.refresh(repo, pats, msg=message, **opts)
1921 q.save_dirty()
1921 q.save_dirty()
1922 return ret
1922 return ret
1923
1923
1924 def diff(ui, repo, *pats, **opts):
1924 def diff(ui, repo, *pats, **opts):
1925 """diff of the current patch and subsequent modifications
1925 """diff of the current patch and subsequent modifications
1926
1926
1927 Shows a diff which includes the current patch as well as any changes which
1927 Shows a diff which includes the current patch as well as any changes which
1928 have been made in the working directory since the last refresh (thus
1928 have been made in the working directory since the last refresh (thus
1929 showing what the current patch would become after a qrefresh).
1929 showing what the current patch would become after a qrefresh).
1930
1930
1931 Use 'hg diff' if you only want to see the changes made since the last
1931 Use 'hg diff' if you only want to see the changes made since the last
1932 qrefresh, or 'hg export qtip' if you want to see changes made by the
1932 qrefresh, or 'hg export qtip' if you want to see changes made by the
1933 current patch without including changes made since the qrefresh.
1933 current patch without including changes made since the qrefresh.
1934 """
1934 """
1935 repo.mq.diff(repo, pats, opts)
1935 repo.mq.diff(repo, pats, opts)
1936 return 0
1936 return 0
1937
1937
1938 def fold(ui, repo, *files, **opts):
1938 def fold(ui, repo, *files, **opts):
1939 """fold the named patches into the current patch
1939 """fold the named patches into the current patch
1940
1940
1941 Patches must not yet be applied. Each patch will be successively applied
1941 Patches must not yet be applied. Each patch will be successively applied
1942 to the current patch in the order given. If all the patches apply
1942 to the current patch in the order given. If all the patches apply
1943 successfully, the current patch will be refreshed with the new cumulative
1943 successfully, the current patch will be refreshed with the new cumulative
1944 patch, and the folded patches will be deleted. With -k/--keep, the folded
1944 patch, and the folded patches will be deleted. With -k/--keep, the folded
1945 patch files will not be removed afterwards.
1945 patch files will not be removed afterwards.
1946
1946
1947 The header for each folded patch will be concatenated with the current
1947 The header for each folded patch will be concatenated with the current
1948 patch header, separated by a line of '* * *'.
1948 patch header, separated by a line of '* * *'.
1949 """
1949 """
1950
1950
1951 q = repo.mq
1951 q = repo.mq
1952
1952
1953 if not files:
1953 if not files:
1954 raise util.Abort(_('qfold requires at least one patch name'))
1954 raise util.Abort(_('qfold requires at least one patch name'))
1955 if not q.check_toppatch(repo):
1955 if not q.check_toppatch(repo):
1956 raise util.Abort(_('No patches applied'))
1956 raise util.Abort(_('No patches applied'))
1957 q.check_localchanges(repo)
1957 q.check_localchanges(repo)
1958
1958
1959 message = cmdutil.logmessage(opts)
1959 message = cmdutil.logmessage(opts)
1960 if opts['edit']:
1960 if opts['edit']:
1961 if message:
1961 if message:
1962 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1962 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1963
1963
1964 parent = q.lookup('qtip')
1964 parent = q.lookup('qtip')
1965 patches = []
1965 patches = []
1966 messages = []
1966 messages = []
1967 for f in files:
1967 for f in files:
1968 p = q.lookup(f)
1968 p = q.lookup(f)
1969 if p in patches or p == parent:
1969 if p in patches or p == parent:
1970 ui.warn(_('Skipping already folded patch %s') % p)
1970 ui.warn(_('Skipping already folded patch %s') % p)
1971 if q.isapplied(p):
1971 if q.isapplied(p):
1972 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
1972 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
1973 patches.append(p)
1973 patches.append(p)
1974
1974
1975 for p in patches:
1975 for p in patches:
1976 if not message:
1976 if not message:
1977 ph = patchheader(q.join(p))
1977 ph = patchheader(q.join(p))
1978 if ph.message:
1978 if ph.message:
1979 messages.append(ph.message)
1979 messages.append(ph.message)
1980 pf = q.join(p)
1980 pf = q.join(p)
1981 (patchsuccess, files, fuzz) = q.patch(repo, pf)
1981 (patchsuccess, files, fuzz) = q.patch(repo, pf)
1982 if not patchsuccess:
1982 if not patchsuccess:
1983 raise util.Abort(_('Error folding patch %s') % p)
1983 raise util.Abort(_('Error folding patch %s') % p)
1984 patch.updatedir(ui, repo, files)
1984 patch.updatedir(ui, repo, files)
1985
1985
1986 if not message:
1986 if not message:
1987 ph = patchheader(q.join(parent))
1987 ph = patchheader(q.join(parent))
1988 message, user = ph.message, ph.user
1988 message, user = ph.message, ph.user
1989 for msg in messages:
1989 for msg in messages:
1990 message.append('* * *')
1990 message.append('* * *')
1991 message.extend(msg)
1991 message.extend(msg)
1992 message = '\n'.join(message)
1992 message = '\n'.join(message)
1993
1993
1994 if opts['edit']:
1994 if opts['edit']:
1995 message = ui.edit(message, user or ui.username())
1995 message = ui.edit(message, user or ui.username())
1996
1996
1997 q.refresh(repo, msg=message)
1997 q.refresh(repo, msg=message)
1998 q.delete(repo, patches, opts)
1998 q.delete(repo, patches, opts)
1999 q.save_dirty()
1999 q.save_dirty()
2000
2000
2001 def goto(ui, repo, patch, **opts):
2001 def goto(ui, repo, patch, **opts):
2002 '''push or pop patches until named patch is at top of stack'''
2002 '''push or pop patches until named patch is at top of stack'''
2003 q = repo.mq
2003 q = repo.mq
2004 patch = q.lookup(patch)
2004 patch = q.lookup(patch)
2005 if q.isapplied(patch):
2005 if q.isapplied(patch):
2006 ret = q.pop(repo, patch, force=opts['force'])
2006 ret = q.pop(repo, patch, force=opts['force'])
2007 else:
2007 else:
2008 ret = q.push(repo, patch, force=opts['force'])
2008 ret = q.push(repo, patch, force=opts['force'])
2009 q.save_dirty()
2009 q.save_dirty()
2010 return ret
2010 return ret
2011
2011
2012 def guard(ui, repo, *args, **opts):
2012 def guard(ui, repo, *args, **opts):
2013 '''set or print guards for a patch
2013 '''set or print guards for a patch
2014
2014
2015 Guards control whether a patch can be pushed. A patch with no guards is
2015 Guards control whether a patch can be pushed. A patch with no guards is
2016 always pushed. A patch with a positive guard ("+foo") is pushed only if
2016 always pushed. A patch with a positive guard ("+foo") is pushed only if
2017 the qselect command has activated it. A patch with a negative guard
2017 the qselect command has activated it. A patch with a negative guard
2018 ("-foo") is never pushed if the qselect command has activated it.
2018 ("-foo") is never pushed if the qselect command has activated it.
2019
2019
2020 With no arguments, print the currently active guards. With arguments, set
2020 With no arguments, print the currently active guards. With arguments, set
2021 guards for the named patch.
2021 guards for the named patch.
2022 NOTE: Specifying negative guards now requires '--'.
2022 NOTE: Specifying negative guards now requires '--'.
2023
2023
2024 To set guards on another patch:
2024 To set guards on another patch:
2025 hg qguard -- other.patch +2.6.17 -stable
2025 hg qguard -- other.patch +2.6.17 -stable
2026 '''
2026 '''
2027 def status(idx):
2027 def status(idx):
2028 guards = q.series_guards[idx] or ['unguarded']
2028 guards = q.series_guards[idx] or ['unguarded']
2029 ui.write('%s: %s\n' % (q.series[idx], ' '.join(guards)))
2029 ui.write('%s: %s\n' % (q.series[idx], ' '.join(guards)))
2030 q = repo.mq
2030 q = repo.mq
2031 patch = None
2031 patch = None
2032 args = list(args)
2032 args = list(args)
2033 if opts['list']:
2033 if opts['list']:
2034 if args or opts['none']:
2034 if args or opts['none']:
2035 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
2035 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
2036 for i in xrange(len(q.series)):
2036 for i in xrange(len(q.series)):
2037 status(i)
2037 status(i)
2038 return
2038 return
2039 if not args or args[0][0:1] in '-+':
2039 if not args or args[0][0:1] in '-+':
2040 if not q.applied:
2040 if not q.applied:
2041 raise util.Abort(_('no patches applied'))
2041 raise util.Abort(_('no patches applied'))
2042 patch = q.applied[-1].name
2042 patch = q.applied[-1].name
2043 if patch is None and args[0][0:1] not in '-+':
2043 if patch is None and args[0][0:1] not in '-+':
2044 patch = args.pop(0)
2044 patch = args.pop(0)
2045 if patch is None:
2045 if patch is None:
2046 raise util.Abort(_('no patch to work with'))
2046 raise util.Abort(_('no patch to work with'))
2047 if args or opts['none']:
2047 if args or opts['none']:
2048 idx = q.find_series(patch)
2048 idx = q.find_series(patch)
2049 if idx is None:
2049 if idx is None:
2050 raise util.Abort(_('no patch named %s') % patch)
2050 raise util.Abort(_('no patch named %s') % patch)
2051 q.set_guards(idx, args)
2051 q.set_guards(idx, args)
2052 q.save_dirty()
2052 q.save_dirty()
2053 else:
2053 else:
2054 status(q.series.index(q.lookup(patch)))
2054 status(q.series.index(q.lookup(patch)))
2055
2055
2056 def header(ui, repo, patch=None):
2056 def header(ui, repo, patch=None):
2057 """print the header of the topmost or specified patch"""
2057 """print the header of the topmost or specified patch"""
2058 q = repo.mq
2058 q = repo.mq
2059
2059
2060 if patch:
2060 if patch:
2061 patch = q.lookup(patch)
2061 patch = q.lookup(patch)
2062 else:
2062 else:
2063 if not q.applied:
2063 if not q.applied:
2064 ui.write('no patches applied\n')
2064 ui.write('no patches applied\n')
2065 return 1
2065 return 1
2066 patch = q.lookup('qtip')
2066 patch = q.lookup('qtip')
2067 ph = patchheader(repo.mq.join(patch))
2067 ph = patchheader(repo.mq.join(patch))
2068
2068
2069 ui.write('\n'.join(ph.message) + '\n')
2069 ui.write('\n'.join(ph.message) + '\n')
2070
2070
2071 def lastsavename(path):
2071 def lastsavename(path):
2072 (directory, base) = os.path.split(path)
2072 (directory, base) = os.path.split(path)
2073 names = os.listdir(directory)
2073 names = os.listdir(directory)
2074 namere = re.compile("%s.([0-9]+)" % base)
2074 namere = re.compile("%s.([0-9]+)" % base)
2075 maxindex = None
2075 maxindex = None
2076 maxname = None
2076 maxname = None
2077 for f in names:
2077 for f in names:
2078 m = namere.match(f)
2078 m = namere.match(f)
2079 if m:
2079 if m:
2080 index = int(m.group(1))
2080 index = int(m.group(1))
2081 if maxindex is None or index > maxindex:
2081 if maxindex is None or index > maxindex:
2082 maxindex = index
2082 maxindex = index
2083 maxname = f
2083 maxname = f
2084 if maxname:
2084 if maxname:
2085 return (os.path.join(directory, maxname), maxindex)
2085 return (os.path.join(directory, maxname), maxindex)
2086 return (None, None)
2086 return (None, None)
2087
2087
2088 def savename(path):
2088 def savename(path):
2089 (last, index) = lastsavename(path)
2089 (last, index) = lastsavename(path)
2090 if last is None:
2090 if last is None:
2091 index = 0
2091 index = 0
2092 newpath = path + ".%d" % (index + 1)
2092 newpath = path + ".%d" % (index + 1)
2093 return newpath
2093 return newpath
2094
2094
2095 def push(ui, repo, patch=None, **opts):
2095 def push(ui, repo, patch=None, **opts):
2096 """push the next patch onto the stack
2096 """push the next patch onto the stack
2097
2097
2098 When -f/--force is applied, all local changes in patched files will be
2098 When -f/--force is applied, all local changes in patched files will be
2099 lost.
2099 lost.
2100 """
2100 """
2101 q = repo.mq
2101 q = repo.mq
2102 mergeq = None
2102 mergeq = None
2103
2103
2104 if opts['merge']:
2104 if opts['merge']:
2105 if opts['name']:
2105 if opts['name']:
2106 newpath = repo.join(opts['name'])
2106 newpath = repo.join(opts['name'])
2107 else:
2107 else:
2108 newpath, i = lastsavename(q.path)
2108 newpath, i = lastsavename(q.path)
2109 if not newpath:
2109 if not newpath:
2110 ui.warn(_("no saved queues found, please use -n\n"))
2110 ui.warn(_("no saved queues found, please use -n\n"))
2111 return 1
2111 return 1
2112 mergeq = queue(ui, repo.join(""), newpath)
2112 mergeq = queue(ui, repo.join(""), newpath)
2113 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2113 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2114 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
2114 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
2115 mergeq=mergeq, all=opts.get('all'))
2115 mergeq=mergeq, all=opts.get('all'))
2116 return ret
2116 return ret
2117
2117
2118 def pop(ui, repo, patch=None, **opts):
2118 def pop(ui, repo, patch=None, **opts):
2119 """pop the current patch off the stack
2119 """pop the current patch off the stack
2120
2120
2121 By default, pops off the top of the patch stack. If given a patch name,
2121 By default, pops off the top of the patch stack. If given a patch name,
2122 keeps popping off patches until the named patch is at the top of the
2122 keeps popping off patches until the named patch is at the top of the
2123 stack.
2123 stack.
2124 """
2124 """
2125 localupdate = True
2125 localupdate = True
2126 if opts['name']:
2126 if opts['name']:
2127 q = queue(ui, repo.join(""), repo.join(opts['name']))
2127 q = queue(ui, repo.join(""), repo.join(opts['name']))
2128 ui.warn(_('using patch queue: %s\n') % q.path)
2128 ui.warn(_('using patch queue: %s\n') % q.path)
2129 localupdate = False
2129 localupdate = False
2130 else:
2130 else:
2131 q = repo.mq
2131 q = repo.mq
2132 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
2132 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
2133 all=opts['all'])
2133 all=opts['all'])
2134 q.save_dirty()
2134 q.save_dirty()
2135 return ret
2135 return ret
2136
2136
2137 def rename(ui, repo, patch, name=None, **opts):
2137 def rename(ui, repo, patch, name=None, **opts):
2138 """rename a patch
2138 """rename a patch
2139
2139
2140 With one argument, renames the current patch to PATCH1.
2140 With one argument, renames the current patch to PATCH1.
2141 With two arguments, renames PATCH1 to PATCH2."""
2141 With two arguments, renames PATCH1 to PATCH2."""
2142
2142
2143 q = repo.mq
2143 q = repo.mq
2144
2144
2145 if not name:
2145 if not name:
2146 name = patch
2146 name = patch
2147 patch = None
2147 patch = None
2148
2148
2149 if patch:
2149 if patch:
2150 patch = q.lookup(patch)
2150 patch = q.lookup(patch)
2151 else:
2151 else:
2152 if not q.applied:
2152 if not q.applied:
2153 ui.write(_('no patches applied\n'))
2153 ui.write(_('no patches applied\n'))
2154 return
2154 return
2155 patch = q.lookup('qtip')
2155 patch = q.lookup('qtip')
2156 absdest = q.join(name)
2156 absdest = q.join(name)
2157 if os.path.isdir(absdest):
2157 if os.path.isdir(absdest):
2158 name = normname(os.path.join(name, os.path.basename(patch)))
2158 name = normname(os.path.join(name, os.path.basename(patch)))
2159 absdest = q.join(name)
2159 absdest = q.join(name)
2160 if os.path.exists(absdest):
2160 if os.path.exists(absdest):
2161 raise util.Abort(_('%s already exists') % absdest)
2161 raise util.Abort(_('%s already exists') % absdest)
2162
2162
2163 if name in q.series:
2163 if name in q.series:
2164 raise util.Abort(_('A patch named %s already exists in the series file') % name)
2164 raise util.Abort(_('A patch named %s already exists in the series file') % name)
2165
2165
2166 if ui.verbose:
2166 if ui.verbose:
2167 ui.write('renaming %s to %s\n' % (patch, name))
2167 ui.write('renaming %s to %s\n' % (patch, name))
2168 i = q.find_series(patch)
2168 i = q.find_series(patch)
2169 guards = q.guard_re.findall(q.full_series[i])
2169 guards = q.guard_re.findall(q.full_series[i])
2170 q.full_series[i] = name + ''.join([' #' + g for g in guards])
2170 q.full_series[i] = name + ''.join([' #' + g for g in guards])
2171 q.parse_series()
2171 q.parse_series()
2172 q.series_dirty = 1
2172 q.series_dirty = 1
2173
2173
2174 info = q.isapplied(patch)
2174 info = q.isapplied(patch)
2175 if info:
2175 if info:
2176 q.applied[info[0]] = statusentry(info[1], name)
2176 q.applied[info[0]] = statusentry(info[1], name)
2177 q.applied_dirty = 1
2177 q.applied_dirty = 1
2178
2178
2179 util.rename(q.join(patch), absdest)
2179 util.rename(q.join(patch), absdest)
2180 r = q.qrepo()
2180 r = q.qrepo()
2181 if r:
2181 if r:
2182 wlock = r.wlock()
2182 wlock = r.wlock()
2183 try:
2183 try:
2184 if r.dirstate[patch] == 'a':
2184 if r.dirstate[patch] == 'a':
2185 r.dirstate.forget(patch)
2185 r.dirstate.forget(patch)
2186 r.dirstate.add(name)
2186 r.dirstate.add(name)
2187 else:
2187 else:
2188 if r.dirstate[name] == 'r':
2188 if r.dirstate[name] == 'r':
2189 r.undelete([name])
2189 r.undelete([name])
2190 r.copy(patch, name)
2190 r.copy(patch, name)
2191 r.remove([patch], False)
2191 r.remove([patch], False)
2192 finally:
2192 finally:
2193 wlock.release()
2193 wlock.release()
2194
2194
2195 q.save_dirty()
2195 q.save_dirty()
2196
2196
2197 def restore(ui, repo, rev, **opts):
2197 def restore(ui, repo, rev, **opts):
2198 """restore the queue state saved by a revision"""
2198 """restore the queue state saved by a revision"""
2199 rev = repo.lookup(rev)
2199 rev = repo.lookup(rev)
2200 q = repo.mq
2200 q = repo.mq
2201 q.restore(repo, rev, delete=opts['delete'],
2201 q.restore(repo, rev, delete=opts['delete'],
2202 qupdate=opts['update'])
2202 qupdate=opts['update'])
2203 q.save_dirty()
2203 q.save_dirty()
2204 return 0
2204 return 0
2205
2205
2206 def save(ui, repo, **opts):
2206 def save(ui, repo, **opts):
2207 """save current queue state"""
2207 """save current queue state"""
2208 q = repo.mq
2208 q = repo.mq
2209 message = cmdutil.logmessage(opts)
2209 message = cmdutil.logmessage(opts)
2210 ret = q.save(repo, msg=message)
2210 ret = q.save(repo, msg=message)
2211 if ret:
2211 if ret:
2212 return ret
2212 return ret
2213 q.save_dirty()
2213 q.save_dirty()
2214 if opts['copy']:
2214 if opts['copy']:
2215 path = q.path
2215 path = q.path
2216 if opts['name']:
2216 if opts['name']:
2217 newpath = os.path.join(q.basepath, opts['name'])
2217 newpath = os.path.join(q.basepath, opts['name'])
2218 if os.path.exists(newpath):
2218 if os.path.exists(newpath):
2219 if not os.path.isdir(newpath):
2219 if not os.path.isdir(newpath):
2220 raise util.Abort(_('destination %s exists and is not '
2220 raise util.Abort(_('destination %s exists and is not '
2221 'a directory') % newpath)
2221 'a directory') % newpath)
2222 if not opts['force']:
2222 if not opts['force']:
2223 raise util.Abort(_('destination %s exists, '
2223 raise util.Abort(_('destination %s exists, '
2224 'use -f to force') % newpath)
2224 'use -f to force') % newpath)
2225 else:
2225 else:
2226 newpath = savename(path)
2226 newpath = savename(path)
2227 ui.warn(_("copy %s to %s\n") % (path, newpath))
2227 ui.warn(_("copy %s to %s\n") % (path, newpath))
2228 util.copyfiles(path, newpath)
2228 util.copyfiles(path, newpath)
2229 if opts['empty']:
2229 if opts['empty']:
2230 try:
2230 try:
2231 os.unlink(q.join(q.status_path))
2231 os.unlink(q.join(q.status_path))
2232 except:
2232 except:
2233 pass
2233 pass
2234 return 0
2234 return 0
2235
2235
2236 def strip(ui, repo, rev, **opts):
2236 def strip(ui, repo, rev, **opts):
2237 """strip a revision and all its descendants from the repository
2237 """strip a revision and all its descendants from the repository
2238
2238
2239 If one of the working directory's parent revisions is stripped, the
2239 If one of the working directory's parent revisions is stripped, the
2240 working directory will be updated to the parent of the stripped revision.
2240 working directory will be updated to the parent of the stripped revision.
2241 """
2241 """
2242 backup = 'all'
2242 backup = 'all'
2243 if opts['backup']:
2243 if opts['backup']:
2244 backup = 'strip'
2244 backup = 'strip'
2245 elif opts['nobackup']:
2245 elif opts['nobackup']:
2246 backup = 'none'
2246 backup = 'none'
2247
2247
2248 rev = repo.lookup(rev)
2248 rev = repo.lookup(rev)
2249 p = repo.dirstate.parents()
2249 p = repo.dirstate.parents()
2250 cl = repo.changelog
2250 cl = repo.changelog
2251 update = True
2251 update = True
2252 if p[0] == nullid:
2252 if p[0] == nullid:
2253 update = False
2253 update = False
2254 elif p[1] == nullid and rev != cl.ancestor(p[0], rev):
2254 elif p[1] == nullid and rev != cl.ancestor(p[0], rev):
2255 update = False
2255 update = False
2256 elif rev not in (cl.ancestor(p[0], rev), cl.ancestor(p[1], rev)):
2256 elif rev not in (cl.ancestor(p[0], rev), cl.ancestor(p[1], rev)):
2257 update = False
2257 update = False
2258
2258
2259 repo.mq.strip(repo, rev, backup=backup, update=update, force=opts['force'])
2259 repo.mq.strip(repo, rev, backup=backup, update=update, force=opts['force'])
2260 return 0
2260 return 0
2261
2261
2262 def select(ui, repo, *args, **opts):
2262 def select(ui, repo, *args, **opts):
2263 '''set or print guarded patches to push
2263 '''set or print guarded patches to push
2264
2264
2265 Use the qguard command to set or print guards on patch, then use qselect
2265 Use the qguard command to set or print guards on patch, then use qselect
2266 to tell mq which guards to use. A patch will be pushed if it has no guards
2266 to tell mq which guards to use. A patch will be pushed if it has no guards
2267 or any positive guards match the currently selected guard, but will not be
2267 or any positive guards match the currently selected guard, but will not be
2268 pushed if any negative guards match the current guard. For example:
2268 pushed if any negative guards match the current guard. For example:
2269
2269
2270 qguard foo.patch -stable (negative guard)
2270 qguard foo.patch -stable (negative guard)
2271 qguard bar.patch +stable (positive guard)
2271 qguard bar.patch +stable (positive guard)
2272 qselect stable
2272 qselect stable
2273
2273
2274 This activates the "stable" guard. mq will skip foo.patch (because it has
2274 This activates the "stable" guard. mq will skip foo.patch (because it has
2275 a negative match) but push bar.patch (because it has a positive match).
2275 a negative match) but push bar.patch (because it has a positive match).
2276
2276
2277 With no arguments, prints the currently active guards. With one argument,
2277 With no arguments, prints the currently active guards. With one argument,
2278 sets the active guard.
2278 sets the active guard.
2279
2279
2280 Use -n/--none to deactivate guards (no other arguments needed). When no
2280 Use -n/--none to deactivate guards (no other arguments needed). When no
2281 guards are active, patches with positive guards are skipped and patches
2281 guards are active, patches with positive guards are skipped and patches
2282 with negative guards are pushed.
2282 with negative guards are pushed.
2283
2283
2284 qselect can change the guards on applied patches. It does not pop guarded
2284 qselect can change the guards on applied patches. It does not pop guarded
2285 patches by default. Use --pop to pop back to the last applied patch that
2285 patches by default. Use --pop to pop back to the last applied patch that
2286 is not guarded. Use --reapply (which implies --pop) to push back to the
2286 is not guarded. Use --reapply (which implies --pop) to push back to the
2287 current patch afterwards, but skip guarded patches.
2287 current patch afterwards, but skip guarded patches.
2288
2288
2289 Use -s/--series to print a list of all guards in the series file (no other
2289 Use -s/--series to print a list of all guards in the series file (no other
2290 arguments needed). Use -v for more information.
2290 arguments needed). Use -v for more information.
2291 '''
2291 '''
2292
2292
2293 q = repo.mq
2293 q = repo.mq
2294 guards = q.active()
2294 guards = q.active()
2295 if args or opts['none']:
2295 if args or opts['none']:
2296 old_unapplied = q.unapplied(repo)
2296 old_unapplied = q.unapplied(repo)
2297 old_guarded = [i for i in xrange(len(q.applied)) if
2297 old_guarded = [i for i in xrange(len(q.applied)) if
2298 not q.pushable(i)[0]]
2298 not q.pushable(i)[0]]
2299 q.set_active(args)
2299 q.set_active(args)
2300 q.save_dirty()
2300 q.save_dirty()
2301 if not args:
2301 if not args:
2302 ui.status(_('guards deactivated\n'))
2302 ui.status(_('guards deactivated\n'))
2303 if not opts['pop'] and not opts['reapply']:
2303 if not opts['pop'] and not opts['reapply']:
2304 unapplied = q.unapplied(repo)
2304 unapplied = q.unapplied(repo)
2305 guarded = [i for i in xrange(len(q.applied))
2305 guarded = [i for i in xrange(len(q.applied))
2306 if not q.pushable(i)[0]]
2306 if not q.pushable(i)[0]]
2307 if len(unapplied) != len(old_unapplied):
2307 if len(unapplied) != len(old_unapplied):
2308 ui.status(_('number of unguarded, unapplied patches has '
2308 ui.status(_('number of unguarded, unapplied patches has '
2309 'changed from %d to %d\n') %
2309 'changed from %d to %d\n') %
2310 (len(old_unapplied), len(unapplied)))
2310 (len(old_unapplied), len(unapplied)))
2311 if len(guarded) != len(old_guarded):
2311 if len(guarded) != len(old_guarded):
2312 ui.status(_('number of guarded, applied patches has changed '
2312 ui.status(_('number of guarded, applied patches has changed '
2313 'from %d to %d\n') %
2313 'from %d to %d\n') %
2314 (len(old_guarded), len(guarded)))
2314 (len(old_guarded), len(guarded)))
2315 elif opts['series']:
2315 elif opts['series']:
2316 guards = {}
2316 guards = {}
2317 noguards = 0
2317 noguards = 0
2318 for gs in q.series_guards:
2318 for gs in q.series_guards:
2319 if not gs:
2319 if not gs:
2320 noguards += 1
2320 noguards += 1
2321 for g in gs:
2321 for g in gs:
2322 guards.setdefault(g, 0)
2322 guards.setdefault(g, 0)
2323 guards[g] += 1
2323 guards[g] += 1
2324 if ui.verbose:
2324 if ui.verbose:
2325 guards['NONE'] = noguards
2325 guards['NONE'] = noguards
2326 guards = guards.items()
2326 guards = guards.items()
2327 guards.sort(key=lambda x: x[0][1:])
2327 guards.sort(key=lambda x: x[0][1:])
2328 if guards:
2328 if guards:
2329 ui.note(_('guards in series file:\n'))
2329 ui.note(_('guards in series file:\n'))
2330 for guard, count in guards:
2330 for guard, count in guards:
2331 ui.note('%2d ' % count)
2331 ui.note('%2d ' % count)
2332 ui.write(guard, '\n')
2332 ui.write(guard, '\n')
2333 else:
2333 else:
2334 ui.note(_('no guards in series file\n'))
2334 ui.note(_('no guards in series file\n'))
2335 else:
2335 else:
2336 if guards:
2336 if guards:
2337 ui.note(_('active guards:\n'))
2337 ui.note(_('active guards:\n'))
2338 for g in guards:
2338 for g in guards:
2339 ui.write(g, '\n')
2339 ui.write(g, '\n')
2340 else:
2340 else:
2341 ui.write(_('no active guards\n'))
2341 ui.write(_('no active guards\n'))
2342 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2342 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2343 popped = False
2343 popped = False
2344 if opts['pop'] or opts['reapply']:
2344 if opts['pop'] or opts['reapply']:
2345 for i in xrange(len(q.applied)):
2345 for i in xrange(len(q.applied)):
2346 pushable, reason = q.pushable(i)
2346 pushable, reason = q.pushable(i)
2347 if not pushable:
2347 if not pushable:
2348 ui.status(_('popping guarded patches\n'))
2348 ui.status(_('popping guarded patches\n'))
2349 popped = True
2349 popped = True
2350 if i == 0:
2350 if i == 0:
2351 q.pop(repo, all=True)
2351 q.pop(repo, all=True)
2352 else:
2352 else:
2353 q.pop(repo, i-1)
2353 q.pop(repo, i-1)
2354 break
2354 break
2355 if popped:
2355 if popped:
2356 try:
2356 try:
2357 if reapply:
2357 if reapply:
2358 ui.status(_('reapplying unguarded patches\n'))
2358 ui.status(_('reapplying unguarded patches\n'))
2359 q.push(repo, reapply)
2359 q.push(repo, reapply)
2360 finally:
2360 finally:
2361 q.save_dirty()
2361 q.save_dirty()
2362
2362
2363 def finish(ui, repo, *revrange, **opts):
2363 def finish(ui, repo, *revrange, **opts):
2364 """move applied patches into repository history
2364 """move applied patches into repository history
2365
2365
2366 Finishes the specified revisions (corresponding to applied patches) by
2366 Finishes the specified revisions (corresponding to applied patches) by
2367 moving them out of mq control into regular repository history.
2367 moving them out of mq control into regular repository history.
2368
2368
2369 Accepts a revision range or the -a/--applied option. If --applied is
2369 Accepts a revision range or the -a/--applied option. If --applied is
2370 specified, all applied mq revisions are removed from mq control.
2370 specified, all applied mq revisions are removed from mq control.
2371 Otherwise, the given revisions must be at the base of the stack of applied
2371 Otherwise, the given revisions must be at the base of the stack of applied
2372 patches.
2372 patches.
2373
2373
2374 This can be especially useful if your changes have been applied to an
2374 This can be especially useful if your changes have been applied to an
2375 upstream repository, or if you are about to push your changes to upstream.
2375 upstream repository, or if you are about to push your changes to upstream.
2376 """
2376 """
2377 if not opts['applied'] and not revrange:
2377 if not opts['applied'] and not revrange:
2378 raise util.Abort(_('no revisions specified'))
2378 raise util.Abort(_('no revisions specified'))
2379 elif opts['applied']:
2379 elif opts['applied']:
2380 revrange = ('qbase:qtip',) + revrange
2380 revrange = ('qbase:qtip',) + revrange
2381
2381
2382 q = repo.mq
2382 q = repo.mq
2383 if not q.applied:
2383 if not q.applied:
2384 ui.status(_('no patches applied\n'))
2384 ui.status(_('no patches applied\n'))
2385 return 0
2385 return 0
2386
2386
2387 revs = cmdutil.revrange(repo, revrange)
2387 revs = cmdutil.revrange(repo, revrange)
2388 q.finish(repo, revs)
2388 q.finish(repo, revs)
2389 q.save_dirty()
2389 q.save_dirty()
2390 return 0
2390 return 0
2391
2391
2392 def reposetup(ui, repo):
2392 def reposetup(ui, repo):
2393 class mqrepo(repo.__class__):
2393 class mqrepo(repo.__class__):
2394 @util.propertycache
2394 @util.propertycache
2395 def mq(self):
2395 def mq(self):
2396 return queue(self.ui, self.join(""))
2396 return queue(self.ui, self.join(""))
2397
2397
2398 def abort_if_wdir_patched(self, errmsg, force=False):
2398 def abort_if_wdir_patched(self, errmsg, force=False):
2399 if self.mq.applied and not force:
2399 if self.mq.applied and not force:
2400 parent = hex(self.dirstate.parents()[0])
2400 parent = hex(self.dirstate.parents()[0])
2401 if parent in [s.rev for s in self.mq.applied]:
2401 if parent in [s.rev for s in self.mq.applied]:
2402 raise util.Abort(errmsg)
2402 raise util.Abort(errmsg)
2403
2403
2404 def commit(self, text="", user=None, date=None, match=None,
2404 def commit(self, text="", user=None, date=None, match=None,
2405 force=False, editor=False, extra={}):
2405 force=False, editor=False, extra={}):
2406 self.abort_if_wdir_patched(
2406 self.abort_if_wdir_patched(
2407 _('cannot commit over an applied mq patch'),
2407 _('cannot commit over an applied mq patch'),
2408 force)
2408 force)
2409
2409
2410 return super(mqrepo, self).commit(text, user, date, match, force,
2410 return super(mqrepo, self).commit(text, user, date, match, force,
2411 editor, extra)
2411 editor, extra)
2412
2412
2413 def push(self, remote, force=False, revs=None):
2413 def push(self, remote, force=False, revs=None):
2414 if self.mq.applied and not force and not revs:
2414 if self.mq.applied and not force and not revs:
2415 raise util.Abort(_('source has mq patches applied'))
2415 raise util.Abort(_('source has mq patches applied'))
2416 return super(mqrepo, self).push(remote, force, revs)
2416 return super(mqrepo, self).push(remote, force, revs)
2417
2417
2418 def tags(self):
2418 def _findtags(self):
2419 if self.tagscache:
2419 '''augment tags from base class with patch tags'''
2420 return self.tagscache
2420 result = super(mqrepo, self)._findtags()
2421
2422 tagscache = super(mqrepo, self).tags()
2423
2421
2424 q = self.mq
2422 q = self.mq
2425 if not q.applied:
2423 if not q.applied:
2426 return tagscache
2424 return result
2427
2425
2428 mqtags = [(bin(patch.rev), patch.name) for patch in q.applied]
2426 mqtags = [(bin(patch.rev), patch.name) for patch in q.applied]
2429
2427
2430 if mqtags[-1][0] not in self.changelog.nodemap:
2428 if mqtags[-1][0] not in self.changelog.nodemap:
2431 self.ui.warn(_('mq status file refers to unknown node %s\n')
2429 self.ui.warn(_('mq status file refers to unknown node %s\n')
2432 % short(mqtags[-1][0]))
2430 % short(mqtags[-1][0]))
2433 return tagscache
2431 return result
2434
2432
2435 mqtags.append((mqtags[-1][0], 'qtip'))
2433 mqtags.append((mqtags[-1][0], 'qtip'))
2436 mqtags.append((mqtags[0][0], 'qbase'))
2434 mqtags.append((mqtags[0][0], 'qbase'))
2437 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2435 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2436 tags = result[0]
2438 for patch in mqtags:
2437 for patch in mqtags:
2439 if patch[1] in tagscache:
2438 if patch[1] in tags:
2440 self.ui.warn(_('Tag %s overrides mq patch of the same name\n')
2439 self.ui.warn(_('Tag %s overrides mq patch of the same name\n')
2441 % patch[1])
2440 % patch[1])
2442 else:
2441 else:
2443 tagscache[patch[1]] = patch[0]
2442 tags[patch[1]] = patch[0]
2444
2443
2445 return tagscache
2444 return result
2446
2445
2447 def _branchtags(self, partial, lrev):
2446 def _branchtags(self, partial, lrev):
2448 q = self.mq
2447 q = self.mq
2449 if not q.applied:
2448 if not q.applied:
2450 return super(mqrepo, self)._branchtags(partial, lrev)
2449 return super(mqrepo, self)._branchtags(partial, lrev)
2451
2450
2452 cl = self.changelog
2451 cl = self.changelog
2453 qbasenode = bin(q.applied[0].rev)
2452 qbasenode = bin(q.applied[0].rev)
2454 if qbasenode not in cl.nodemap:
2453 if qbasenode not in cl.nodemap:
2455 self.ui.warn(_('mq status file refers to unknown node %s\n')
2454 self.ui.warn(_('mq status file refers to unknown node %s\n')
2456 % short(qbasenode))
2455 % short(qbasenode))
2457 return super(mqrepo, self)._branchtags(partial, lrev)
2456 return super(mqrepo, self)._branchtags(partial, lrev)
2458
2457
2459 qbase = cl.rev(qbasenode)
2458 qbase = cl.rev(qbasenode)
2460 start = lrev + 1
2459 start = lrev + 1
2461 if start < qbase:
2460 if start < qbase:
2462 # update the cache (excluding the patches) and save it
2461 # update the cache (excluding the patches) and save it
2463 self._updatebranchcache(partial, lrev+1, qbase)
2462 self._updatebranchcache(partial, lrev+1, qbase)
2464 self._writebranchcache(partial, cl.node(qbase-1), qbase-1)
2463 self._writebranchcache(partial, cl.node(qbase-1), qbase-1)
2465 start = qbase
2464 start = qbase
2466 # if start = qbase, the cache is as updated as it should be.
2465 # if start = qbase, the cache is as updated as it should be.
2467 # if start > qbase, the cache includes (part of) the patches.
2466 # if start > qbase, the cache includes (part of) the patches.
2468 # we might as well use it, but we won't save it.
2467 # we might as well use it, but we won't save it.
2469
2468
2470 # update the cache up to the tip
2469 # update the cache up to the tip
2471 self._updatebranchcache(partial, start, len(cl))
2470 self._updatebranchcache(partial, start, len(cl))
2472
2471
2473 return partial
2472 return partial
2474
2473
2475 if repo.local():
2474 if repo.local():
2476 repo.__class__ = mqrepo
2475 repo.__class__ = mqrepo
2477
2476
2478 def mqimport(orig, ui, repo, *args, **kwargs):
2477 def mqimport(orig, ui, repo, *args, **kwargs):
2479 if hasattr(repo, 'abort_if_wdir_patched'):
2478 if hasattr(repo, 'abort_if_wdir_patched'):
2480 repo.abort_if_wdir_patched(_('cannot import over an applied patch'),
2479 repo.abort_if_wdir_patched(_('cannot import over an applied patch'),
2481 kwargs.get('force'))
2480 kwargs.get('force'))
2482 return orig(ui, repo, *args, **kwargs)
2481 return orig(ui, repo, *args, **kwargs)
2483
2482
2484 def uisetup(ui):
2483 def uisetup(ui):
2485 extensions.wrapcommand(commands.table, 'import', mqimport)
2484 extensions.wrapcommand(commands.table, 'import', mqimport)
2486
2485
2487 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2486 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2488
2487
2489 cmdtable = {
2488 cmdtable = {
2490 "qapplied": (applied, [] + seriesopts, _('hg qapplied [-s] [PATCH]')),
2489 "qapplied": (applied, [] + seriesopts, _('hg qapplied [-s] [PATCH]')),
2491 "qclone":
2490 "qclone":
2492 (clone,
2491 (clone,
2493 [('', 'pull', None, _('use pull protocol to copy metadata')),
2492 [('', 'pull', None, _('use pull protocol to copy metadata')),
2494 ('U', 'noupdate', None, _('do not update the new working directories')),
2493 ('U', 'noupdate', None, _('do not update the new working directories')),
2495 ('', 'uncompressed', None,
2494 ('', 'uncompressed', None,
2496 _('use uncompressed transfer (fast over LAN)')),
2495 _('use uncompressed transfer (fast over LAN)')),
2497 ('p', 'patches', '', _('location of source patch repository')),
2496 ('p', 'patches', '', _('location of source patch repository')),
2498 ] + commands.remoteopts,
2497 ] + commands.remoteopts,
2499 _('hg qclone [OPTION]... SOURCE [DEST]')),
2498 _('hg qclone [OPTION]... SOURCE [DEST]')),
2500 "qcommit|qci":
2499 "qcommit|qci":
2501 (commit,
2500 (commit,
2502 commands.table["^commit|ci"][1],
2501 commands.table["^commit|ci"][1],
2503 _('hg qcommit [OPTION]... [FILE]...')),
2502 _('hg qcommit [OPTION]... [FILE]...')),
2504 "^qdiff":
2503 "^qdiff":
2505 (diff,
2504 (diff,
2506 commands.diffopts + commands.diffopts2 + commands.walkopts,
2505 commands.diffopts + commands.diffopts2 + commands.walkopts,
2507 _('hg qdiff [OPTION]... [FILE]...')),
2506 _('hg qdiff [OPTION]... [FILE]...')),
2508 "qdelete|qremove|qrm":
2507 "qdelete|qremove|qrm":
2509 (delete,
2508 (delete,
2510 [('k', 'keep', None, _('keep patch file')),
2509 [('k', 'keep', None, _('keep patch file')),
2511 ('r', 'rev', [], _('stop managing a revision (DEPRECATED)'))],
2510 ('r', 'rev', [], _('stop managing a revision (DEPRECATED)'))],
2512 _('hg qdelete [-k] [-r REV]... [PATCH]...')),
2511 _('hg qdelete [-k] [-r REV]... [PATCH]...')),
2513 'qfold':
2512 'qfold':
2514 (fold,
2513 (fold,
2515 [('e', 'edit', None, _('edit patch header')),
2514 [('e', 'edit', None, _('edit patch header')),
2516 ('k', 'keep', None, _('keep folded patch files')),
2515 ('k', 'keep', None, _('keep folded patch files')),
2517 ] + commands.commitopts,
2516 ] + commands.commitopts,
2518 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')),
2517 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')),
2519 'qgoto':
2518 'qgoto':
2520 (goto,
2519 (goto,
2521 [('f', 'force', None, _('overwrite any local changes'))],
2520 [('f', 'force', None, _('overwrite any local changes'))],
2522 _('hg qgoto [OPTION]... PATCH')),
2521 _('hg qgoto [OPTION]... PATCH')),
2523 'qguard':
2522 'qguard':
2524 (guard,
2523 (guard,
2525 [('l', 'list', None, _('list all patches and guards')),
2524 [('l', 'list', None, _('list all patches and guards')),
2526 ('n', 'none', None, _('drop all guards'))],
2525 ('n', 'none', None, _('drop all guards'))],
2527 _('hg qguard [-l] [-n] -- [PATCH] [+GUARD]... [-GUARD]...')),
2526 _('hg qguard [-l] [-n] -- [PATCH] [+GUARD]... [-GUARD]...')),
2528 'qheader': (header, [], _('hg qheader [PATCH]')),
2527 'qheader': (header, [], _('hg qheader [PATCH]')),
2529 "^qimport":
2528 "^qimport":
2530 (qimport,
2529 (qimport,
2531 [('e', 'existing', None, _('import file in patch directory')),
2530 [('e', 'existing', None, _('import file in patch directory')),
2532 ('n', 'name', '', _('name of patch file')),
2531 ('n', 'name', '', _('name of patch file')),
2533 ('f', 'force', None, _('overwrite existing files')),
2532 ('f', 'force', None, _('overwrite existing files')),
2534 ('r', 'rev', [], _('place existing revisions under mq control')),
2533 ('r', 'rev', [], _('place existing revisions under mq control')),
2535 ('g', 'git', None, _('use git extended diff format')),
2534 ('g', 'git', None, _('use git extended diff format')),
2536 ('P', 'push', None, _('qpush after importing'))],
2535 ('P', 'push', None, _('qpush after importing'))],
2537 _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... FILE...')),
2536 _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... FILE...')),
2538 "^qinit":
2537 "^qinit":
2539 (init,
2538 (init,
2540 [('c', 'create-repo', None, _('create queue repository'))],
2539 [('c', 'create-repo', None, _('create queue repository'))],
2541 _('hg qinit [-c]')),
2540 _('hg qinit [-c]')),
2542 "qnew":
2541 "qnew":
2543 (new,
2542 (new,
2544 [('e', 'edit', None, _('edit commit message')),
2543 [('e', 'edit', None, _('edit commit message')),
2545 ('f', 'force', None, _('import uncommitted changes into patch')),
2544 ('f', 'force', None, _('import uncommitted changes into patch')),
2546 ('g', 'git', None, _('use git extended diff format')),
2545 ('g', 'git', None, _('use git extended diff format')),
2547 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2546 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2548 ('u', 'user', '', _('add "From: <given user>" to patch')),
2547 ('u', 'user', '', _('add "From: <given user>" to patch')),
2549 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2548 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2550 ('d', 'date', '', _('add "Date: <given date>" to patch'))
2549 ('d', 'date', '', _('add "Date: <given date>" to patch'))
2551 ] + commands.walkopts + commands.commitopts,
2550 ] + commands.walkopts + commands.commitopts,
2552 _('hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH [FILE]...')),
2551 _('hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH [FILE]...')),
2553 "qnext": (next, [] + seriesopts, _('hg qnext [-s]')),
2552 "qnext": (next, [] + seriesopts, _('hg qnext [-s]')),
2554 "qprev": (prev, [] + seriesopts, _('hg qprev [-s]')),
2553 "qprev": (prev, [] + seriesopts, _('hg qprev [-s]')),
2555 "^qpop":
2554 "^qpop":
2556 (pop,
2555 (pop,
2557 [('a', 'all', None, _('pop all patches')),
2556 [('a', 'all', None, _('pop all patches')),
2558 ('n', 'name', '', _('queue name to pop')),
2557 ('n', 'name', '', _('queue name to pop')),
2559 ('f', 'force', None, _('forget any local changes'))],
2558 ('f', 'force', None, _('forget any local changes'))],
2560 _('hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]')),
2559 _('hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]')),
2561 "^qpush":
2560 "^qpush":
2562 (push,
2561 (push,
2563 [('f', 'force', None, _('apply if the patch has rejects')),
2562 [('f', 'force', None, _('apply if the patch has rejects')),
2564 ('l', 'list', None, _('list patch name in commit text')),
2563 ('l', 'list', None, _('list patch name in commit text')),
2565 ('a', 'all', None, _('apply all patches')),
2564 ('a', 'all', None, _('apply all patches')),
2566 ('m', 'merge', None, _('merge from another queue')),
2565 ('m', 'merge', None, _('merge from another queue')),
2567 ('n', 'name', '', _('merge queue name'))],
2566 ('n', 'name', '', _('merge queue name'))],
2568 _('hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]')),
2567 _('hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]')),
2569 "^qrefresh":
2568 "^qrefresh":
2570 (refresh,
2569 (refresh,
2571 [('e', 'edit', None, _('edit commit message')),
2570 [('e', 'edit', None, _('edit commit message')),
2572 ('g', 'git', None, _('use git extended diff format')),
2571 ('g', 'git', None, _('use git extended diff format')),
2573 ('s', 'short', None, _('refresh only files already in the patch and specified files')),
2572 ('s', 'short', None, _('refresh only files already in the patch and specified files')),
2574 ('U', 'currentuser', None, _('add/update "From: <current user>" in patch')),
2573 ('U', 'currentuser', None, _('add/update "From: <current user>" in patch')),
2575 ('u', 'user', '', _('add/update "From: <given user>" in patch')),
2574 ('u', 'user', '', _('add/update "From: <given user>" in patch')),
2576 ('D', 'currentdate', None, _('update "Date: <current date>" in patch (if present)')),
2575 ('D', 'currentdate', None, _('update "Date: <current date>" in patch (if present)')),
2577 ('d', 'date', '', _('update "Date: <given date>" in patch (if present)'))
2576 ('d', 'date', '', _('update "Date: <given date>" in patch (if present)'))
2578 ] + commands.walkopts + commands.commitopts,
2577 ] + commands.walkopts + commands.commitopts,
2579 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')),
2578 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')),
2580 'qrename|qmv':
2579 'qrename|qmv':
2581 (rename, [], _('hg qrename PATCH1 [PATCH2]')),
2580 (rename, [], _('hg qrename PATCH1 [PATCH2]')),
2582 "qrestore":
2581 "qrestore":
2583 (restore,
2582 (restore,
2584 [('d', 'delete', None, _('delete save entry')),
2583 [('d', 'delete', None, _('delete save entry')),
2585 ('u', 'update', None, _('update queue working directory'))],
2584 ('u', 'update', None, _('update queue working directory'))],
2586 _('hg qrestore [-d] [-u] REV')),
2585 _('hg qrestore [-d] [-u] REV')),
2587 "qsave":
2586 "qsave":
2588 (save,
2587 (save,
2589 [('c', 'copy', None, _('copy patch directory')),
2588 [('c', 'copy', None, _('copy patch directory')),
2590 ('n', 'name', '', _('copy directory name')),
2589 ('n', 'name', '', _('copy directory name')),
2591 ('e', 'empty', None, _('clear queue status file')),
2590 ('e', 'empty', None, _('clear queue status file')),
2592 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2591 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2593 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')),
2592 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')),
2594 "qselect":
2593 "qselect":
2595 (select,
2594 (select,
2596 [('n', 'none', None, _('disable all guards')),
2595 [('n', 'none', None, _('disable all guards')),
2597 ('s', 'series', None, _('list all guards in series file')),
2596 ('s', 'series', None, _('list all guards in series file')),
2598 ('', 'pop', None, _('pop to before first guarded applied patch')),
2597 ('', 'pop', None, _('pop to before first guarded applied patch')),
2599 ('', 'reapply', None, _('pop, then reapply patches'))],
2598 ('', 'reapply', None, _('pop, then reapply patches'))],
2600 _('hg qselect [OPTION]... [GUARD]...')),
2599 _('hg qselect [OPTION]... [GUARD]...')),
2601 "qseries":
2600 "qseries":
2602 (series,
2601 (series,
2603 [('m', 'missing', None, _('print patches not in series')),
2602 [('m', 'missing', None, _('print patches not in series')),
2604 ] + seriesopts,
2603 ] + seriesopts,
2605 _('hg qseries [-ms]')),
2604 _('hg qseries [-ms]')),
2606 "^strip":
2605 "^strip":
2607 (strip,
2606 (strip,
2608 [('f', 'force', None, _('force removal with local changes')),
2607 [('f', 'force', None, _('force removal with local changes')),
2609 ('b', 'backup', None, _('bundle unrelated changesets')),
2608 ('b', 'backup', None, _('bundle unrelated changesets')),
2610 ('n', 'nobackup', None, _('no backups'))],
2609 ('n', 'nobackup', None, _('no backups'))],
2611 _('hg strip [-f] [-b] [-n] REV')),
2610 _('hg strip [-f] [-b] [-n] REV')),
2612 "qtop": (top, [] + seriesopts, _('hg qtop [-s]')),
2611 "qtop": (top, [] + seriesopts, _('hg qtop [-s]')),
2613 "qunapplied": (unapplied, [] + seriesopts, _('hg qunapplied [-s] [PATCH]')),
2612 "qunapplied": (unapplied, [] + seriesopts, _('hg qunapplied [-s] [PATCH]')),
2614 "qfinish":
2613 "qfinish":
2615 (finish,
2614 (finish,
2616 [('a', 'applied', None, _('finish all applied changesets'))],
2615 [('a', 'applied', None, _('finish all applied changesets'))],
2617 _('hg qfinish [-a] [REV]...')),
2616 _('hg qfinish [-a] [REV]...')),
2618 }
2617 }
@@ -1,2180 +1,2194 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo
10 import repo, changegroup, subrepo
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16 from lock import release
16 from lock import release
17 import weakref, stat, errno, os, time, inspect
17 import weakref, stat, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19
19
20 class localrepository(repo.repository):
20 class localrepository(repo.repository):
21 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
21 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
22 supported = set('revlogv1 store fncache shared'.split())
22 supported = set('revlogv1 store fncache shared'.split())
23
23
24 def __init__(self, baseui, path=None, create=0):
24 def __init__(self, baseui, path=None, create=0):
25 repo.repository.__init__(self)
25 repo.repository.__init__(self)
26 self.root = os.path.realpath(path)
26 self.root = os.path.realpath(path)
27 self.path = os.path.join(self.root, ".hg")
27 self.path = os.path.join(self.root, ".hg")
28 self.origroot = path
28 self.origroot = path
29 self.opener = util.opener(self.path)
29 self.opener = util.opener(self.path)
30 self.wopener = util.opener(self.root)
30 self.wopener = util.opener(self.root)
31 self.baseui = baseui
31 self.baseui = baseui
32 self.ui = baseui.copy()
32 self.ui = baseui.copy()
33
33
34 try:
34 try:
35 self.ui.readconfig(self.join("hgrc"), self.root)
35 self.ui.readconfig(self.join("hgrc"), self.root)
36 extensions.loadall(self.ui)
36 extensions.loadall(self.ui)
37 except IOError:
37 except IOError:
38 pass
38 pass
39
39
40 if not os.path.isdir(self.path):
40 if not os.path.isdir(self.path):
41 if create:
41 if create:
42 if not os.path.exists(path):
42 if not os.path.exists(path):
43 os.mkdir(path)
43 os.mkdir(path)
44 os.mkdir(self.path)
44 os.mkdir(self.path)
45 requirements = ["revlogv1"]
45 requirements = ["revlogv1"]
46 if self.ui.configbool('format', 'usestore', True):
46 if self.ui.configbool('format', 'usestore', True):
47 os.mkdir(os.path.join(self.path, "store"))
47 os.mkdir(os.path.join(self.path, "store"))
48 requirements.append("store")
48 requirements.append("store")
49 if self.ui.configbool('format', 'usefncache', True):
49 if self.ui.configbool('format', 'usefncache', True):
50 requirements.append("fncache")
50 requirements.append("fncache")
51 # create an invalid changelog
51 # create an invalid changelog
52 self.opener("00changelog.i", "a").write(
52 self.opener("00changelog.i", "a").write(
53 '\0\0\0\2' # represents revlogv2
53 '\0\0\0\2' # represents revlogv2
54 ' dummy changelog to prevent using the old repo layout'
54 ' dummy changelog to prevent using the old repo layout'
55 )
55 )
56 reqfile = self.opener("requires", "w")
56 reqfile = self.opener("requires", "w")
57 for r in requirements:
57 for r in requirements:
58 reqfile.write("%s\n" % r)
58 reqfile.write("%s\n" % r)
59 reqfile.close()
59 reqfile.close()
60 else:
60 else:
61 raise error.RepoError(_("repository %s not found") % path)
61 raise error.RepoError(_("repository %s not found") % path)
62 elif create:
62 elif create:
63 raise error.RepoError(_("repository %s already exists") % path)
63 raise error.RepoError(_("repository %s already exists") % path)
64 else:
64 else:
65 # find requirements
65 # find requirements
66 requirements = set()
66 requirements = set()
67 try:
67 try:
68 requirements = set(self.opener("requires").read().splitlines())
68 requirements = set(self.opener("requires").read().splitlines())
69 except IOError, inst:
69 except IOError, inst:
70 if inst.errno != errno.ENOENT:
70 if inst.errno != errno.ENOENT:
71 raise
71 raise
72 for r in requirements - self.supported:
72 for r in requirements - self.supported:
73 raise error.RepoError(_("requirement '%s' not supported") % r)
73 raise error.RepoError(_("requirement '%s' not supported") % r)
74
74
75 self.sharedpath = self.path
75 self.sharedpath = self.path
76 try:
76 try:
77 s = os.path.realpath(self.opener("sharedpath").read())
77 s = os.path.realpath(self.opener("sharedpath").read())
78 if not os.path.exists(s):
78 if not os.path.exists(s):
79 raise error.RepoError(
79 raise error.RepoError(
80 _('.hg/sharedpath points to nonexistent directory %s') % s)
80 _('.hg/sharedpath points to nonexistent directory %s') % s)
81 self.sharedpath = s
81 self.sharedpath = s
82 except IOError, inst:
82 except IOError, inst:
83 if inst.errno != errno.ENOENT:
83 if inst.errno != errno.ENOENT:
84 raise
84 raise
85
85
86 self.store = store.store(requirements, self.sharedpath, util.opener)
86 self.store = store.store(requirements, self.sharedpath, util.opener)
87 self.spath = self.store.path
87 self.spath = self.store.path
88 self.sopener = self.store.opener
88 self.sopener = self.store.opener
89 self.sjoin = self.store.join
89 self.sjoin = self.store.join
90 self.opener.createmode = self.store.createmode
90 self.opener.createmode = self.store.createmode
91
91
92 self.tagscache = None
92 self.tagscache = None
93 self._tagstypecache = None
93 self._tagstypecache = None
94 self.branchcache = None
94 self.branchcache = None
95 self._ubranchcache = None # UTF-8 version of branchcache
95 self._ubranchcache = None # UTF-8 version of branchcache
96 self._branchcachetip = None
96 self._branchcachetip = None
97 self.nodetagscache = None
97 self.nodetagscache = None
98 self.filterpats = {}
98 self.filterpats = {}
99 self._datafilters = {}
99 self._datafilters = {}
100 self._transref = self._lockref = self._wlockref = None
100 self._transref = self._lockref = self._wlockref = None
101
101
102 @propertycache
102 @propertycache
103 def changelog(self):
103 def changelog(self):
104 c = changelog.changelog(self.sopener)
104 c = changelog.changelog(self.sopener)
105 if 'HG_PENDING' in os.environ:
105 if 'HG_PENDING' in os.environ:
106 p = os.environ['HG_PENDING']
106 p = os.environ['HG_PENDING']
107 if p.startswith(self.root):
107 if p.startswith(self.root):
108 c.readpending('00changelog.i.a')
108 c.readpending('00changelog.i.a')
109 self.sopener.defversion = c.version
109 self.sopener.defversion = c.version
110 return c
110 return c
111
111
112 @propertycache
112 @propertycache
113 def manifest(self):
113 def manifest(self):
114 return manifest.manifest(self.sopener)
114 return manifest.manifest(self.sopener)
115
115
116 @propertycache
116 @propertycache
117 def dirstate(self):
117 def dirstate(self):
118 return dirstate.dirstate(self.opener, self.ui, self.root)
118 return dirstate.dirstate(self.opener, self.ui, self.root)
119
119
120 def __getitem__(self, changeid):
120 def __getitem__(self, changeid):
121 if changeid is None:
121 if changeid is None:
122 return context.workingctx(self)
122 return context.workingctx(self)
123 return context.changectx(self, changeid)
123 return context.changectx(self, changeid)
124
124
125 def __nonzero__(self):
125 def __nonzero__(self):
126 return True
126 return True
127
127
128 def __len__(self):
128 def __len__(self):
129 return len(self.changelog)
129 return len(self.changelog)
130
130
131 def __iter__(self):
131 def __iter__(self):
132 for i in xrange(len(self)):
132 for i in xrange(len(self)):
133 yield i
133 yield i
134
134
135 def url(self):
135 def url(self):
136 return 'file:' + self.root
136 return 'file:' + self.root
137
137
138 def hook(self, name, throw=False, **args):
138 def hook(self, name, throw=False, **args):
139 return hook.hook(self.ui, self, name, throw, **args)
139 return hook.hook(self.ui, self, name, throw, **args)
140
140
141 tag_disallowed = ':\r\n'
141 tag_disallowed = ':\r\n'
142
142
143 def _tag(self, names, node, message, local, user, date, extra={}):
143 def _tag(self, names, node, message, local, user, date, extra={}):
144 if isinstance(names, str):
144 if isinstance(names, str):
145 allchars = names
145 allchars = names
146 names = (names,)
146 names = (names,)
147 else:
147 else:
148 allchars = ''.join(names)
148 allchars = ''.join(names)
149 for c in self.tag_disallowed:
149 for c in self.tag_disallowed:
150 if c in allchars:
150 if c in allchars:
151 raise util.Abort(_('%r cannot be used in a tag name') % c)
151 raise util.Abort(_('%r cannot be used in a tag name') % c)
152
152
153 for name in names:
153 for name in names:
154 self.hook('pretag', throw=True, node=hex(node), tag=name,
154 self.hook('pretag', throw=True, node=hex(node), tag=name,
155 local=local)
155 local=local)
156
156
157 def writetags(fp, names, munge, prevtags):
157 def writetags(fp, names, munge, prevtags):
158 fp.seek(0, 2)
158 fp.seek(0, 2)
159 if prevtags and prevtags[-1] != '\n':
159 if prevtags and prevtags[-1] != '\n':
160 fp.write('\n')
160 fp.write('\n')
161 for name in names:
161 for name in names:
162 m = munge and munge(name) or name
162 m = munge and munge(name) or name
163 if self._tagstypecache and name in self._tagstypecache:
163 if self._tagstypecache and name in self._tagstypecache:
164 old = self.tagscache.get(name, nullid)
164 old = self.tagscache.get(name, nullid)
165 fp.write('%s %s\n' % (hex(old), m))
165 fp.write('%s %s\n' % (hex(old), m))
166 fp.write('%s %s\n' % (hex(node), m))
166 fp.write('%s %s\n' % (hex(node), m))
167 fp.close()
167 fp.close()
168
168
169 prevtags = ''
169 prevtags = ''
170 if local:
170 if local:
171 try:
171 try:
172 fp = self.opener('localtags', 'r+')
172 fp = self.opener('localtags', 'r+')
173 except IOError:
173 except IOError:
174 fp = self.opener('localtags', 'a')
174 fp = self.opener('localtags', 'a')
175 else:
175 else:
176 prevtags = fp.read()
176 prevtags = fp.read()
177
177
178 # local tags are stored in the current charset
178 # local tags are stored in the current charset
179 writetags(fp, names, None, prevtags)
179 writetags(fp, names, None, prevtags)
180 for name in names:
180 for name in names:
181 self.hook('tag', node=hex(node), tag=name, local=local)
181 self.hook('tag', node=hex(node), tag=name, local=local)
182 return
182 return
183
183
184 try:
184 try:
185 fp = self.wfile('.hgtags', 'rb+')
185 fp = self.wfile('.hgtags', 'rb+')
186 except IOError:
186 except IOError:
187 fp = self.wfile('.hgtags', 'ab')
187 fp = self.wfile('.hgtags', 'ab')
188 else:
188 else:
189 prevtags = fp.read()
189 prevtags = fp.read()
190
190
191 # committed tags are stored in UTF-8
191 # committed tags are stored in UTF-8
192 writetags(fp, names, encoding.fromlocal, prevtags)
192 writetags(fp, names, encoding.fromlocal, prevtags)
193
193
194 if '.hgtags' not in self.dirstate:
194 if '.hgtags' not in self.dirstate:
195 self.add(['.hgtags'])
195 self.add(['.hgtags'])
196
196
197 m = match_.exact(self.root, '', ['.hgtags'])
197 m = match_.exact(self.root, '', ['.hgtags'])
198 tagnode = self.commit(message, user, date, extra=extra, match=m)
198 tagnode = self.commit(message, user, date, extra=extra, match=m)
199
199
200 for name in names:
200 for name in names:
201 self.hook('tag', node=hex(node), tag=name, local=local)
201 self.hook('tag', node=hex(node), tag=name, local=local)
202
202
203 return tagnode
203 return tagnode
204
204
205 def tag(self, names, node, message, local, user, date):
205 def tag(self, names, node, message, local, user, date):
206 '''tag a revision with one or more symbolic names.
206 '''tag a revision with one or more symbolic names.
207
207
208 names is a list of strings or, when adding a single tag, names may be a
208 names is a list of strings or, when adding a single tag, names may be a
209 string.
209 string.
210
210
211 if local is True, the tags are stored in a per-repository file.
211 if local is True, the tags are stored in a per-repository file.
212 otherwise, they are stored in the .hgtags file, and a new
212 otherwise, they are stored in the .hgtags file, and a new
213 changeset is committed with the change.
213 changeset is committed with the change.
214
214
215 keyword arguments:
215 keyword arguments:
216
216
217 local: whether to store tags in non-version-controlled file
217 local: whether to store tags in non-version-controlled file
218 (default False)
218 (default False)
219
219
220 message: commit message to use if committing
220 message: commit message to use if committing
221
221
222 user: name of user to use if committing
222 user: name of user to use if committing
223
223
224 date: date tuple to use if committing'''
224 date: date tuple to use if committing'''
225
225
226 for x in self.status()[:5]:
226 for x in self.status()[:5]:
227 if '.hgtags' in x:
227 if '.hgtags' in x:
228 raise util.Abort(_('working copy of .hgtags is changed '
228 raise util.Abort(_('working copy of .hgtags is changed '
229 '(please commit .hgtags manually)'))
229 '(please commit .hgtags manually)'))
230
230
231 self.tags() # instantiate the cache
231 self.tags() # instantiate the cache
232 self._tag(names, node, message, local, user, date)
232 self._tag(names, node, message, local, user, date)
233
233
234 def tags(self):
234 def tags(self):
235 '''return a mapping of tag to node'''
235 '''return a mapping of tag to node'''
236 if self.tagscache:
236 if self.tagscache is None:
237 return self.tagscache
237 (self.tagscache, self._tagstypecache) = self._findtags()
238
239 return self.tagscache
240
241 def _findtags(self):
242 '''Do the hard work of finding tags. Return a pair of dicts
243 (tags, tagtypes) where tags maps tag name to node, and tagtypes
244 maps tag name to a string like \'global\' or \'local\'.
245 Subclasses or extensions are free to add their own tags, but
246 should be aware that the returned dicts will be retained for the
247 duration of the localrepo object.'''
248
249 # XXX what tagtype should subclasses/extensions use? Currently
250 # mq and bookmarks add tags, but do not set the tagtype at all.
251 # Should each extension invent its own tag type? Should there
252 # be one tagtype for all such "virtual" tags? Or is the status
253 # quo fine?
238
254
239 globaltags = {}
255 globaltags = {}
240 tagtypes = {}
256 tagtypes = {}
241
257
242 def readtags(lines, fn, tagtype):
258 def readtags(lines, fn, tagtype):
243 filetags = {}
259 filetags = {}
244 count = 0
260 count = 0
245
261
246 def warn(msg):
262 def warn(msg):
247 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
263 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
248
264
249 for l in lines:
265 for l in lines:
250 count += 1
266 count += 1
251 if not l:
267 if not l:
252 continue
268 continue
253 s = l.split(" ", 1)
269 s = l.split(" ", 1)
254 if len(s) != 2:
270 if len(s) != 2:
255 warn(_("cannot parse entry"))
271 warn(_("cannot parse entry"))
256 continue
272 continue
257 node, key = s
273 node, key = s
258 key = encoding.tolocal(key.strip()) # stored in UTF-8
274 key = encoding.tolocal(key.strip()) # stored in UTF-8
259 try:
275 try:
260 bin_n = bin(node)
276 bin_n = bin(node)
261 except TypeError:
277 except TypeError:
262 warn(_("node '%s' is not well formed") % node)
278 warn(_("node '%s' is not well formed") % node)
263 continue
279 continue
264 if bin_n not in self.changelog.nodemap:
280 if bin_n not in self.changelog.nodemap:
265 # silently ignore as pull -r might cause this
281 # silently ignore as pull -r might cause this
266 continue
282 continue
267
283
268 h = []
284 h = []
269 if key in filetags:
285 if key in filetags:
270 n, h = filetags[key]
286 n, h = filetags[key]
271 h.append(n)
287 h.append(n)
272 filetags[key] = (bin_n, h)
288 filetags[key] = (bin_n, h)
273
289
274 for k, nh in filetags.iteritems():
290 for k, nh in filetags.iteritems():
275 if k not in globaltags:
291 if k not in globaltags:
276 globaltags[k] = nh
292 globaltags[k] = nh
277 tagtypes[k] = tagtype
293 tagtypes[k] = tagtype
278 continue
294 continue
279
295
280 # we prefer the global tag if:
296 # we prefer the global tag if:
281 # it supercedes us OR
297 # it supercedes us OR
282 # mutual supercedes and it has a higher rank
298 # mutual supercedes and it has a higher rank
283 # otherwise we win because we're tip-most
299 # otherwise we win because we're tip-most
284 an, ah = nh
300 an, ah = nh
285 bn, bh = globaltags[k]
301 bn, bh = globaltags[k]
286 if (bn != an and an in bh and
302 if (bn != an and an in bh and
287 (bn not in ah or len(bh) > len(ah))):
303 (bn not in ah or len(bh) > len(ah))):
288 an = bn
304 an = bn
289 ah.extend([n for n in bh if n not in ah])
305 ah.extend([n for n in bh if n not in ah])
290 globaltags[k] = an, ah
306 globaltags[k] = an, ah
291 tagtypes[k] = tagtype
307 tagtypes[k] = tagtype
292
308
293 seen = set()
309 seen = set()
294 f = None
310 f = None
295 ctxs = []
311 ctxs = []
296 for node in self.heads():
312 for node in self.heads():
297 try:
313 try:
298 fnode = self[node].filenode('.hgtags')
314 fnode = self[node].filenode('.hgtags')
299 except error.LookupError:
315 except error.LookupError:
300 continue
316 continue
301 if fnode not in seen:
317 if fnode not in seen:
302 seen.add(fnode)
318 seen.add(fnode)
303 if not f:
319 if not f:
304 f = self.filectx('.hgtags', fileid=fnode)
320 f = self.filectx('.hgtags', fileid=fnode)
305 else:
321 else:
306 f = f.filectx(fnode)
322 f = f.filectx(fnode)
307 ctxs.append(f)
323 ctxs.append(f)
308
324
309 # read the tags file from each head, ending with the tip
325 # read the tags file from each head, ending with the tip
310 for f in reversed(ctxs):
326 for f in reversed(ctxs):
311 readtags(f.data().splitlines(), f, "global")
327 readtags(f.data().splitlines(), f, "global")
312
328
313 try:
329 try:
314 data = encoding.fromlocal(self.opener("localtags").read())
330 data = encoding.fromlocal(self.opener("localtags").read())
315 # localtags are stored in the local character set
331 # localtags are stored in the local character set
316 # while the internal tag table is stored in UTF-8
332 # while the internal tag table is stored in UTF-8
317 readtags(data.splitlines(), "localtags", "local")
333 readtags(data.splitlines(), "localtags", "local")
318 except IOError:
334 except IOError:
319 pass
335 pass
320
336
321 self.tagscache = {}
337 tags = {}
322 self._tagstypecache = {}
323 for k, nh in globaltags.iteritems():
338 for k, nh in globaltags.iteritems():
324 n = nh[0]
339 n = nh[0]
325 if n != nullid:
340 if n != nullid:
326 self.tagscache[k] = n
341 tags[k] = n
327 self._tagstypecache[k] = tagtypes[k]
342 tags['tip'] = self.changelog.tip()
328 self.tagscache['tip'] = self.changelog.tip()
343 return (tags, tagtypes)
329 return self.tagscache
330
344
331 def tagtype(self, tagname):
345 def tagtype(self, tagname):
332 '''
346 '''
333 return the type of the given tag. result can be:
347 return the type of the given tag. result can be:
334
348
335 'local' : a local tag
349 'local' : a local tag
336 'global' : a global tag
350 'global' : a global tag
337 None : tag does not exist
351 None : tag does not exist
338 '''
352 '''
339
353
340 self.tags()
354 self.tags()
341
355
342 return self._tagstypecache.get(tagname)
356 return self._tagstypecache.get(tagname)
343
357
344 def tagslist(self):
358 def tagslist(self):
345 '''return a list of tags ordered by revision'''
359 '''return a list of tags ordered by revision'''
346 l = []
360 l = []
347 for t, n in self.tags().iteritems():
361 for t, n in self.tags().iteritems():
348 try:
362 try:
349 r = self.changelog.rev(n)
363 r = self.changelog.rev(n)
350 except:
364 except:
351 r = -2 # sort to the beginning of the list if unknown
365 r = -2 # sort to the beginning of the list if unknown
352 l.append((r, t, n))
366 l.append((r, t, n))
353 return [(t, n) for r, t, n in sorted(l)]
367 return [(t, n) for r, t, n in sorted(l)]
354
368
355 def nodetags(self, node):
369 def nodetags(self, node):
356 '''return the tags associated with a node'''
370 '''return the tags associated with a node'''
357 if not self.nodetagscache:
371 if not self.nodetagscache:
358 self.nodetagscache = {}
372 self.nodetagscache = {}
359 for t, n in self.tags().iteritems():
373 for t, n in self.tags().iteritems():
360 self.nodetagscache.setdefault(n, []).append(t)
374 self.nodetagscache.setdefault(n, []).append(t)
361 return self.nodetagscache.get(node, [])
375 return self.nodetagscache.get(node, [])
362
376
363 def _branchtags(self, partial, lrev):
377 def _branchtags(self, partial, lrev):
364 # TODO: rename this function?
378 # TODO: rename this function?
365 tiprev = len(self) - 1
379 tiprev = len(self) - 1
366 if lrev != tiprev:
380 if lrev != tiprev:
367 self._updatebranchcache(partial, lrev+1, tiprev+1)
381 self._updatebranchcache(partial, lrev+1, tiprev+1)
368 self._writebranchcache(partial, self.changelog.tip(), tiprev)
382 self._writebranchcache(partial, self.changelog.tip(), tiprev)
369
383
370 return partial
384 return partial
371
385
372 def branchmap(self):
386 def branchmap(self):
373 tip = self.changelog.tip()
387 tip = self.changelog.tip()
374 if self.branchcache is not None and self._branchcachetip == tip:
388 if self.branchcache is not None and self._branchcachetip == tip:
375 return self.branchcache
389 return self.branchcache
376
390
377 oldtip = self._branchcachetip
391 oldtip = self._branchcachetip
378 self._branchcachetip = tip
392 self._branchcachetip = tip
379 if self.branchcache is None:
393 if self.branchcache is None:
380 self.branchcache = {} # avoid recursion in changectx
394 self.branchcache = {} # avoid recursion in changectx
381 else:
395 else:
382 self.branchcache.clear() # keep using the same dict
396 self.branchcache.clear() # keep using the same dict
383 if oldtip is None or oldtip not in self.changelog.nodemap:
397 if oldtip is None or oldtip not in self.changelog.nodemap:
384 partial, last, lrev = self._readbranchcache()
398 partial, last, lrev = self._readbranchcache()
385 else:
399 else:
386 lrev = self.changelog.rev(oldtip)
400 lrev = self.changelog.rev(oldtip)
387 partial = self._ubranchcache
401 partial = self._ubranchcache
388
402
389 self._branchtags(partial, lrev)
403 self._branchtags(partial, lrev)
390 # this private cache holds all heads (not just tips)
404 # this private cache holds all heads (not just tips)
391 self._ubranchcache = partial
405 self._ubranchcache = partial
392
406
393 # the branch cache is stored on disk as UTF-8, but in the local
407 # the branch cache is stored on disk as UTF-8, but in the local
394 # charset internally
408 # charset internally
395 for k, v in partial.iteritems():
409 for k, v in partial.iteritems():
396 self.branchcache[encoding.tolocal(k)] = v
410 self.branchcache[encoding.tolocal(k)] = v
397 return self.branchcache
411 return self.branchcache
398
412
399
413
400 def branchtags(self):
414 def branchtags(self):
401 '''return a dict where branch names map to the tipmost head of
415 '''return a dict where branch names map to the tipmost head of
402 the branch, open heads come before closed'''
416 the branch, open heads come before closed'''
403 bt = {}
417 bt = {}
404 for bn, heads in self.branchmap().iteritems():
418 for bn, heads in self.branchmap().iteritems():
405 head = None
419 head = None
406 for i in range(len(heads)-1, -1, -1):
420 for i in range(len(heads)-1, -1, -1):
407 h = heads[i]
421 h = heads[i]
408 if 'close' not in self.changelog.read(h)[5]:
422 if 'close' not in self.changelog.read(h)[5]:
409 head = h
423 head = h
410 break
424 break
411 # no open heads were found
425 # no open heads were found
412 if head is None:
426 if head is None:
413 head = heads[-1]
427 head = heads[-1]
414 bt[bn] = head
428 bt[bn] = head
415 return bt
429 return bt
416
430
417
431
418 def _readbranchcache(self):
432 def _readbranchcache(self):
419 partial = {}
433 partial = {}
420 try:
434 try:
421 f = self.opener("branchheads.cache")
435 f = self.opener("branchheads.cache")
422 lines = f.read().split('\n')
436 lines = f.read().split('\n')
423 f.close()
437 f.close()
424 except (IOError, OSError):
438 except (IOError, OSError):
425 return {}, nullid, nullrev
439 return {}, nullid, nullrev
426
440
427 try:
441 try:
428 last, lrev = lines.pop(0).split(" ", 1)
442 last, lrev = lines.pop(0).split(" ", 1)
429 last, lrev = bin(last), int(lrev)
443 last, lrev = bin(last), int(lrev)
430 if lrev >= len(self) or self[lrev].node() != last:
444 if lrev >= len(self) or self[lrev].node() != last:
431 # invalidate the cache
445 # invalidate the cache
432 raise ValueError('invalidating branch cache (tip differs)')
446 raise ValueError('invalidating branch cache (tip differs)')
433 for l in lines:
447 for l in lines:
434 if not l: continue
448 if not l: continue
435 node, label = l.split(" ", 1)
449 node, label = l.split(" ", 1)
436 partial.setdefault(label.strip(), []).append(bin(node))
450 partial.setdefault(label.strip(), []).append(bin(node))
437 except KeyboardInterrupt:
451 except KeyboardInterrupt:
438 raise
452 raise
439 except Exception, inst:
453 except Exception, inst:
440 if self.ui.debugflag:
454 if self.ui.debugflag:
441 self.ui.warn(str(inst), '\n')
455 self.ui.warn(str(inst), '\n')
442 partial, last, lrev = {}, nullid, nullrev
456 partial, last, lrev = {}, nullid, nullrev
443 return partial, last, lrev
457 return partial, last, lrev
444
458
445 def _writebranchcache(self, branches, tip, tiprev):
459 def _writebranchcache(self, branches, tip, tiprev):
446 try:
460 try:
447 f = self.opener("branchheads.cache", "w", atomictemp=True)
461 f = self.opener("branchheads.cache", "w", atomictemp=True)
448 f.write("%s %s\n" % (hex(tip), tiprev))
462 f.write("%s %s\n" % (hex(tip), tiprev))
449 for label, nodes in branches.iteritems():
463 for label, nodes in branches.iteritems():
450 for node in nodes:
464 for node in nodes:
451 f.write("%s %s\n" % (hex(node), label))
465 f.write("%s %s\n" % (hex(node), label))
452 f.rename()
466 f.rename()
453 except (IOError, OSError):
467 except (IOError, OSError):
454 pass
468 pass
455
469
456 def _updatebranchcache(self, partial, start, end):
470 def _updatebranchcache(self, partial, start, end):
457 # collect new branch entries
471 # collect new branch entries
458 newbranches = {}
472 newbranches = {}
459 for r in xrange(start, end):
473 for r in xrange(start, end):
460 c = self[r]
474 c = self[r]
461 newbranches.setdefault(c.branch(), []).append(c.node())
475 newbranches.setdefault(c.branch(), []).append(c.node())
462 # if older branchheads are reachable from new ones, they aren't
476 # if older branchheads are reachable from new ones, they aren't
463 # really branchheads. Note checking parents is insufficient:
477 # really branchheads. Note checking parents is insufficient:
464 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
478 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
465 for branch, newnodes in newbranches.iteritems():
479 for branch, newnodes in newbranches.iteritems():
466 bheads = partial.setdefault(branch, [])
480 bheads = partial.setdefault(branch, [])
467 bheads.extend(newnodes)
481 bheads.extend(newnodes)
468 if len(bheads) < 2:
482 if len(bheads) < 2:
469 continue
483 continue
470 newbheads = []
484 newbheads = []
471 # starting from tip means fewer passes over reachable
485 # starting from tip means fewer passes over reachable
472 while newnodes:
486 while newnodes:
473 latest = newnodes.pop()
487 latest = newnodes.pop()
474 if latest not in bheads:
488 if latest not in bheads:
475 continue
489 continue
476 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
490 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
477 reachable = self.changelog.reachable(latest, minbhrev)
491 reachable = self.changelog.reachable(latest, minbhrev)
478 bheads = [b for b in bheads if b not in reachable]
492 bheads = [b for b in bheads if b not in reachable]
479 newbheads.insert(0, latest)
493 newbheads.insert(0, latest)
480 bheads.extend(newbheads)
494 bheads.extend(newbheads)
481 partial[branch] = bheads
495 partial[branch] = bheads
482
496
483 def lookup(self, key):
497 def lookup(self, key):
484 if isinstance(key, int):
498 if isinstance(key, int):
485 return self.changelog.node(key)
499 return self.changelog.node(key)
486 elif key == '.':
500 elif key == '.':
487 return self.dirstate.parents()[0]
501 return self.dirstate.parents()[0]
488 elif key == 'null':
502 elif key == 'null':
489 return nullid
503 return nullid
490 elif key == 'tip':
504 elif key == 'tip':
491 return self.changelog.tip()
505 return self.changelog.tip()
492 n = self.changelog._match(key)
506 n = self.changelog._match(key)
493 if n:
507 if n:
494 return n
508 return n
495 if key in self.tags():
509 if key in self.tags():
496 return self.tags()[key]
510 return self.tags()[key]
497 if key in self.branchtags():
511 if key in self.branchtags():
498 return self.branchtags()[key]
512 return self.branchtags()[key]
499 n = self.changelog._partialmatch(key)
513 n = self.changelog._partialmatch(key)
500 if n:
514 if n:
501 return n
515 return n
502
516
503 # can't find key, check if it might have come from damaged dirstate
517 # can't find key, check if it might have come from damaged dirstate
504 if key in self.dirstate.parents():
518 if key in self.dirstate.parents():
505 raise error.Abort(_("working directory has unknown parent '%s'!")
519 raise error.Abort(_("working directory has unknown parent '%s'!")
506 % short(key))
520 % short(key))
507 try:
521 try:
508 if len(key) == 20:
522 if len(key) == 20:
509 key = hex(key)
523 key = hex(key)
510 except:
524 except:
511 pass
525 pass
512 raise error.RepoError(_("unknown revision '%s'") % key)
526 raise error.RepoError(_("unknown revision '%s'") % key)
513
527
514 def local(self):
528 def local(self):
515 return True
529 return True
516
530
517 def join(self, f):
531 def join(self, f):
518 return os.path.join(self.path, f)
532 return os.path.join(self.path, f)
519
533
520 def wjoin(self, f):
534 def wjoin(self, f):
521 return os.path.join(self.root, f)
535 return os.path.join(self.root, f)
522
536
523 def rjoin(self, f):
537 def rjoin(self, f):
524 return os.path.join(self.root, util.pconvert(f))
538 return os.path.join(self.root, util.pconvert(f))
525
539
526 def file(self, f):
540 def file(self, f):
527 if f[0] == '/':
541 if f[0] == '/':
528 f = f[1:]
542 f = f[1:]
529 return filelog.filelog(self.sopener, f)
543 return filelog.filelog(self.sopener, f)
530
544
531 def changectx(self, changeid):
545 def changectx(self, changeid):
532 return self[changeid]
546 return self[changeid]
533
547
534 def parents(self, changeid=None):
548 def parents(self, changeid=None):
535 '''get list of changectxs for parents of changeid'''
549 '''get list of changectxs for parents of changeid'''
536 return self[changeid].parents()
550 return self[changeid].parents()
537
551
538 def filectx(self, path, changeid=None, fileid=None):
552 def filectx(self, path, changeid=None, fileid=None):
539 """changeid can be a changeset revision, node, or tag.
553 """changeid can be a changeset revision, node, or tag.
540 fileid can be a file revision or node."""
554 fileid can be a file revision or node."""
541 return context.filectx(self, path, changeid, fileid)
555 return context.filectx(self, path, changeid, fileid)
542
556
543 def getcwd(self):
557 def getcwd(self):
544 return self.dirstate.getcwd()
558 return self.dirstate.getcwd()
545
559
546 def pathto(self, f, cwd=None):
560 def pathto(self, f, cwd=None):
547 return self.dirstate.pathto(f, cwd)
561 return self.dirstate.pathto(f, cwd)
548
562
549 def wfile(self, f, mode='r'):
563 def wfile(self, f, mode='r'):
550 return self.wopener(f, mode)
564 return self.wopener(f, mode)
551
565
552 def _link(self, f):
566 def _link(self, f):
553 return os.path.islink(self.wjoin(f))
567 return os.path.islink(self.wjoin(f))
554
568
555 def _filter(self, filter, filename, data):
569 def _filter(self, filter, filename, data):
556 if filter not in self.filterpats:
570 if filter not in self.filterpats:
557 l = []
571 l = []
558 for pat, cmd in self.ui.configitems(filter):
572 for pat, cmd in self.ui.configitems(filter):
559 if cmd == '!':
573 if cmd == '!':
560 continue
574 continue
561 mf = match_.match(self.root, '', [pat])
575 mf = match_.match(self.root, '', [pat])
562 fn = None
576 fn = None
563 params = cmd
577 params = cmd
564 for name, filterfn in self._datafilters.iteritems():
578 for name, filterfn in self._datafilters.iteritems():
565 if cmd.startswith(name):
579 if cmd.startswith(name):
566 fn = filterfn
580 fn = filterfn
567 params = cmd[len(name):].lstrip()
581 params = cmd[len(name):].lstrip()
568 break
582 break
569 if not fn:
583 if not fn:
570 fn = lambda s, c, **kwargs: util.filter(s, c)
584 fn = lambda s, c, **kwargs: util.filter(s, c)
571 # Wrap old filters not supporting keyword arguments
585 # Wrap old filters not supporting keyword arguments
572 if not inspect.getargspec(fn)[2]:
586 if not inspect.getargspec(fn)[2]:
573 oldfn = fn
587 oldfn = fn
574 fn = lambda s, c, **kwargs: oldfn(s, c)
588 fn = lambda s, c, **kwargs: oldfn(s, c)
575 l.append((mf, fn, params))
589 l.append((mf, fn, params))
576 self.filterpats[filter] = l
590 self.filterpats[filter] = l
577
591
578 for mf, fn, cmd in self.filterpats[filter]:
592 for mf, fn, cmd in self.filterpats[filter]:
579 if mf(filename):
593 if mf(filename):
580 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
594 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
581 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
595 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
582 break
596 break
583
597
584 return data
598 return data
585
599
586 def adddatafilter(self, name, filter):
600 def adddatafilter(self, name, filter):
587 self._datafilters[name] = filter
601 self._datafilters[name] = filter
588
602
589 def wread(self, filename):
603 def wread(self, filename):
590 if self._link(filename):
604 if self._link(filename):
591 data = os.readlink(self.wjoin(filename))
605 data = os.readlink(self.wjoin(filename))
592 else:
606 else:
593 data = self.wopener(filename, 'r').read()
607 data = self.wopener(filename, 'r').read()
594 return self._filter("encode", filename, data)
608 return self._filter("encode", filename, data)
595
609
596 def wwrite(self, filename, data, flags):
610 def wwrite(self, filename, data, flags):
597 data = self._filter("decode", filename, data)
611 data = self._filter("decode", filename, data)
598 try:
612 try:
599 os.unlink(self.wjoin(filename))
613 os.unlink(self.wjoin(filename))
600 except OSError:
614 except OSError:
601 pass
615 pass
602 if 'l' in flags:
616 if 'l' in flags:
603 self.wopener.symlink(data, filename)
617 self.wopener.symlink(data, filename)
604 else:
618 else:
605 self.wopener(filename, 'w').write(data)
619 self.wopener(filename, 'w').write(data)
606 if 'x' in flags:
620 if 'x' in flags:
607 util.set_flags(self.wjoin(filename), False, True)
621 util.set_flags(self.wjoin(filename), False, True)
608
622
609 def wwritedata(self, filename, data):
623 def wwritedata(self, filename, data):
610 return self._filter("decode", filename, data)
624 return self._filter("decode", filename, data)
611
625
612 def transaction(self):
626 def transaction(self):
613 tr = self._transref and self._transref() or None
627 tr = self._transref and self._transref() or None
614 if tr and tr.running():
628 if tr and tr.running():
615 return tr.nest()
629 return tr.nest()
616
630
617 # abort here if the journal already exists
631 # abort here if the journal already exists
618 if os.path.exists(self.sjoin("journal")):
632 if os.path.exists(self.sjoin("journal")):
619 raise error.RepoError(_("journal already exists - run hg recover"))
633 raise error.RepoError(_("journal already exists - run hg recover"))
620
634
621 # save dirstate for rollback
635 # save dirstate for rollback
622 try:
636 try:
623 ds = self.opener("dirstate").read()
637 ds = self.opener("dirstate").read()
624 except IOError:
638 except IOError:
625 ds = ""
639 ds = ""
626 self.opener("journal.dirstate", "w").write(ds)
640 self.opener("journal.dirstate", "w").write(ds)
627 self.opener("journal.branch", "w").write(self.dirstate.branch())
641 self.opener("journal.branch", "w").write(self.dirstate.branch())
628
642
629 renames = [(self.sjoin("journal"), self.sjoin("undo")),
643 renames = [(self.sjoin("journal"), self.sjoin("undo")),
630 (self.join("journal.dirstate"), self.join("undo.dirstate")),
644 (self.join("journal.dirstate"), self.join("undo.dirstate")),
631 (self.join("journal.branch"), self.join("undo.branch"))]
645 (self.join("journal.branch"), self.join("undo.branch"))]
632 tr = transaction.transaction(self.ui.warn, self.sopener,
646 tr = transaction.transaction(self.ui.warn, self.sopener,
633 self.sjoin("journal"),
647 self.sjoin("journal"),
634 aftertrans(renames),
648 aftertrans(renames),
635 self.store.createmode)
649 self.store.createmode)
636 self._transref = weakref.ref(tr)
650 self._transref = weakref.ref(tr)
637 return tr
651 return tr
638
652
639 def recover(self):
653 def recover(self):
640 lock = self.lock()
654 lock = self.lock()
641 try:
655 try:
642 if os.path.exists(self.sjoin("journal")):
656 if os.path.exists(self.sjoin("journal")):
643 self.ui.status(_("rolling back interrupted transaction\n"))
657 self.ui.status(_("rolling back interrupted transaction\n"))
644 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
658 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
645 self.invalidate()
659 self.invalidate()
646 return True
660 return True
647 else:
661 else:
648 self.ui.warn(_("no interrupted transaction available\n"))
662 self.ui.warn(_("no interrupted transaction available\n"))
649 return False
663 return False
650 finally:
664 finally:
651 lock.release()
665 lock.release()
652
666
653 def rollback(self):
667 def rollback(self):
654 wlock = lock = None
668 wlock = lock = None
655 try:
669 try:
656 wlock = self.wlock()
670 wlock = self.wlock()
657 lock = self.lock()
671 lock = self.lock()
658 if os.path.exists(self.sjoin("undo")):
672 if os.path.exists(self.sjoin("undo")):
659 self.ui.status(_("rolling back last transaction\n"))
673 self.ui.status(_("rolling back last transaction\n"))
660 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
674 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
661 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
675 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
662 try:
676 try:
663 branch = self.opener("undo.branch").read()
677 branch = self.opener("undo.branch").read()
664 self.dirstate.setbranch(branch)
678 self.dirstate.setbranch(branch)
665 except IOError:
679 except IOError:
666 self.ui.warn(_("Named branch could not be reset, "
680 self.ui.warn(_("Named branch could not be reset, "
667 "current branch still is: %s\n")
681 "current branch still is: %s\n")
668 % encoding.tolocal(self.dirstate.branch()))
682 % encoding.tolocal(self.dirstate.branch()))
669 self.invalidate()
683 self.invalidate()
670 self.dirstate.invalidate()
684 self.dirstate.invalidate()
671 else:
685 else:
672 self.ui.warn(_("no rollback information available\n"))
686 self.ui.warn(_("no rollback information available\n"))
673 finally:
687 finally:
674 release(lock, wlock)
688 release(lock, wlock)
675
689
676 def invalidate(self):
690 def invalidate(self):
677 for a in "changelog manifest".split():
691 for a in "changelog manifest".split():
678 if a in self.__dict__:
692 if a in self.__dict__:
679 delattr(self, a)
693 delattr(self, a)
680 self.tagscache = None
694 self.tagscache = None
681 self._tagstypecache = None
695 self._tagstypecache = None
682 self.nodetagscache = None
696 self.nodetagscache = None
683 self.branchcache = None
697 self.branchcache = None
684 self._ubranchcache = None
698 self._ubranchcache = None
685 self._branchcachetip = None
699 self._branchcachetip = None
686
700
687 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
701 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
688 try:
702 try:
689 l = lock.lock(lockname, 0, releasefn, desc=desc)
703 l = lock.lock(lockname, 0, releasefn, desc=desc)
690 except error.LockHeld, inst:
704 except error.LockHeld, inst:
691 if not wait:
705 if not wait:
692 raise
706 raise
693 self.ui.warn(_("waiting for lock on %s held by %r\n") %
707 self.ui.warn(_("waiting for lock on %s held by %r\n") %
694 (desc, inst.locker))
708 (desc, inst.locker))
695 # default to 600 seconds timeout
709 # default to 600 seconds timeout
696 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
710 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
697 releasefn, desc=desc)
711 releasefn, desc=desc)
698 if acquirefn:
712 if acquirefn:
699 acquirefn()
713 acquirefn()
700 return l
714 return l
701
715
702 def lock(self, wait=True):
716 def lock(self, wait=True):
703 l = self._lockref and self._lockref()
717 l = self._lockref and self._lockref()
704 if l is not None and l.held:
718 if l is not None and l.held:
705 l.lock()
719 l.lock()
706 return l
720 return l
707
721
708 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
722 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
709 _('repository %s') % self.origroot)
723 _('repository %s') % self.origroot)
710 self._lockref = weakref.ref(l)
724 self._lockref = weakref.ref(l)
711 return l
725 return l
712
726
713 def wlock(self, wait=True):
727 def wlock(self, wait=True):
714 l = self._wlockref and self._wlockref()
728 l = self._wlockref and self._wlockref()
715 if l is not None and l.held:
729 if l is not None and l.held:
716 l.lock()
730 l.lock()
717 return l
731 return l
718
732
719 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
733 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
720 self.dirstate.invalidate, _('working directory of %s') %
734 self.dirstate.invalidate, _('working directory of %s') %
721 self.origroot)
735 self.origroot)
722 self._wlockref = weakref.ref(l)
736 self._wlockref = weakref.ref(l)
723 return l
737 return l
724
738
725 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
739 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
726 """
740 """
727 commit an individual file as part of a larger transaction
741 commit an individual file as part of a larger transaction
728 """
742 """
729
743
730 fname = fctx.path()
744 fname = fctx.path()
731 text = fctx.data()
745 text = fctx.data()
732 flog = self.file(fname)
746 flog = self.file(fname)
733 fparent1 = manifest1.get(fname, nullid)
747 fparent1 = manifest1.get(fname, nullid)
734 fparent2 = fparent2o = manifest2.get(fname, nullid)
748 fparent2 = fparent2o = manifest2.get(fname, nullid)
735
749
736 meta = {}
750 meta = {}
737 copy = fctx.renamed()
751 copy = fctx.renamed()
738 if copy and copy[0] != fname:
752 if copy and copy[0] != fname:
739 # Mark the new revision of this file as a copy of another
753 # Mark the new revision of this file as a copy of another
740 # file. This copy data will effectively act as a parent
754 # file. This copy data will effectively act as a parent
741 # of this new revision. If this is a merge, the first
755 # of this new revision. If this is a merge, the first
742 # parent will be the nullid (meaning "look up the copy data")
756 # parent will be the nullid (meaning "look up the copy data")
743 # and the second one will be the other parent. For example:
757 # and the second one will be the other parent. For example:
744 #
758 #
745 # 0 --- 1 --- 3 rev1 changes file foo
759 # 0 --- 1 --- 3 rev1 changes file foo
746 # \ / rev2 renames foo to bar and changes it
760 # \ / rev2 renames foo to bar and changes it
747 # \- 2 -/ rev3 should have bar with all changes and
761 # \- 2 -/ rev3 should have bar with all changes and
748 # should record that bar descends from
762 # should record that bar descends from
749 # bar in rev2 and foo in rev1
763 # bar in rev2 and foo in rev1
750 #
764 #
751 # this allows this merge to succeed:
765 # this allows this merge to succeed:
752 #
766 #
753 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
767 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
754 # \ / merging rev3 and rev4 should use bar@rev2
768 # \ / merging rev3 and rev4 should use bar@rev2
755 # \- 2 --- 4 as the merge base
769 # \- 2 --- 4 as the merge base
756 #
770 #
757
771
758 cfname = copy[0]
772 cfname = copy[0]
759 crev = manifest1.get(cfname)
773 crev = manifest1.get(cfname)
760 newfparent = fparent2
774 newfparent = fparent2
761
775
762 if manifest2: # branch merge
776 if manifest2: # branch merge
763 if fparent2 == nullid or crev is None: # copied on remote side
777 if fparent2 == nullid or crev is None: # copied on remote side
764 if cfname in manifest2:
778 if cfname in manifest2:
765 crev = manifest2[cfname]
779 crev = manifest2[cfname]
766 newfparent = fparent1
780 newfparent = fparent1
767
781
768 # find source in nearest ancestor if we've lost track
782 # find source in nearest ancestor if we've lost track
769 if not crev:
783 if not crev:
770 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
784 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
771 (fname, cfname))
785 (fname, cfname))
772 for ancestor in self['.'].ancestors():
786 for ancestor in self['.'].ancestors():
773 if cfname in ancestor:
787 if cfname in ancestor:
774 crev = ancestor[cfname].filenode()
788 crev = ancestor[cfname].filenode()
775 break
789 break
776
790
777 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
791 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
778 meta["copy"] = cfname
792 meta["copy"] = cfname
779 meta["copyrev"] = hex(crev)
793 meta["copyrev"] = hex(crev)
780 fparent1, fparent2 = nullid, newfparent
794 fparent1, fparent2 = nullid, newfparent
781 elif fparent2 != nullid:
795 elif fparent2 != nullid:
782 # is one parent an ancestor of the other?
796 # is one parent an ancestor of the other?
783 fparentancestor = flog.ancestor(fparent1, fparent2)
797 fparentancestor = flog.ancestor(fparent1, fparent2)
784 if fparentancestor == fparent1:
798 if fparentancestor == fparent1:
785 fparent1, fparent2 = fparent2, nullid
799 fparent1, fparent2 = fparent2, nullid
786 elif fparentancestor == fparent2:
800 elif fparentancestor == fparent2:
787 fparent2 = nullid
801 fparent2 = nullid
788
802
789 # is the file changed?
803 # is the file changed?
790 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
804 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
791 changelist.append(fname)
805 changelist.append(fname)
792 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
806 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
793
807
794 # are just the flags changed during merge?
808 # are just the flags changed during merge?
795 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
809 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
796 changelist.append(fname)
810 changelist.append(fname)
797
811
798 return fparent1
812 return fparent1
799
813
800 def commit(self, text="", user=None, date=None, match=None, force=False,
814 def commit(self, text="", user=None, date=None, match=None, force=False,
801 editor=False, extra={}):
815 editor=False, extra={}):
802 """Add a new revision to current repository.
816 """Add a new revision to current repository.
803
817
804 Revision information is gathered from the working directory,
818 Revision information is gathered from the working directory,
805 match can be used to filter the committed files. If editor is
819 match can be used to filter the committed files. If editor is
806 supplied, it is called to get a commit message.
820 supplied, it is called to get a commit message.
807 """
821 """
808
822
809 def fail(f, msg):
823 def fail(f, msg):
810 raise util.Abort('%s: %s' % (f, msg))
824 raise util.Abort('%s: %s' % (f, msg))
811
825
812 if not match:
826 if not match:
813 match = match_.always(self.root, '')
827 match = match_.always(self.root, '')
814
828
815 if not force:
829 if not force:
816 vdirs = []
830 vdirs = []
817 match.dir = vdirs.append
831 match.dir = vdirs.append
818 match.bad = fail
832 match.bad = fail
819
833
820 wlock = self.wlock()
834 wlock = self.wlock()
821 try:
835 try:
822 p1, p2 = self.dirstate.parents()
836 p1, p2 = self.dirstate.parents()
823 wctx = self[None]
837 wctx = self[None]
824
838
825 if (not force and p2 != nullid and match and
839 if (not force and p2 != nullid and match and
826 (match.files() or match.anypats())):
840 (match.files() or match.anypats())):
827 raise util.Abort(_('cannot partially commit a merge '
841 raise util.Abort(_('cannot partially commit a merge '
828 '(do not specify files or patterns)'))
842 '(do not specify files or patterns)'))
829
843
830 changes = self.status(match=match, clean=force)
844 changes = self.status(match=match, clean=force)
831 if force:
845 if force:
832 changes[0].extend(changes[6]) # mq may commit unchanged files
846 changes[0].extend(changes[6]) # mq may commit unchanged files
833
847
834 # check subrepos
848 # check subrepos
835 subs = []
849 subs = []
836 for s in wctx.substate:
850 for s in wctx.substate:
837 if match(s) and wctx.sub(s).dirty():
851 if match(s) and wctx.sub(s).dirty():
838 subs.append(s)
852 subs.append(s)
839 if subs and '.hgsubstate' not in changes[0]:
853 if subs and '.hgsubstate' not in changes[0]:
840 changes[0].insert(0, '.hgsubstate')
854 changes[0].insert(0, '.hgsubstate')
841
855
842 # make sure all explicit patterns are matched
856 # make sure all explicit patterns are matched
843 if not force and match.files():
857 if not force and match.files():
844 matched = set(changes[0] + changes[1] + changes[2])
858 matched = set(changes[0] + changes[1] + changes[2])
845
859
846 for f in match.files():
860 for f in match.files():
847 if f == '.' or f in matched or f in wctx.substate:
861 if f == '.' or f in matched or f in wctx.substate:
848 continue
862 continue
849 if f in changes[3]: # missing
863 if f in changes[3]: # missing
850 fail(f, _('file not found!'))
864 fail(f, _('file not found!'))
851 if f in vdirs: # visited directory
865 if f in vdirs: # visited directory
852 d = f + '/'
866 d = f + '/'
853 for mf in matched:
867 for mf in matched:
854 if mf.startswith(d):
868 if mf.startswith(d):
855 break
869 break
856 else:
870 else:
857 fail(f, _("no match under directory!"))
871 fail(f, _("no match under directory!"))
858 elif f not in self.dirstate:
872 elif f not in self.dirstate:
859 fail(f, _("file not tracked!"))
873 fail(f, _("file not tracked!"))
860
874
861 if (not force and not extra.get("close") and p2 == nullid
875 if (not force and not extra.get("close") and p2 == nullid
862 and not (changes[0] or changes[1] or changes[2])
876 and not (changes[0] or changes[1] or changes[2])
863 and self[None].branch() == self['.'].branch()):
877 and self[None].branch() == self['.'].branch()):
864 return None
878 return None
865
879
866 ms = merge_.mergestate(self)
880 ms = merge_.mergestate(self)
867 for f in changes[0]:
881 for f in changes[0]:
868 if f in ms and ms[f] == 'u':
882 if f in ms and ms[f] == 'u':
869 raise util.Abort(_("unresolved merge conflicts "
883 raise util.Abort(_("unresolved merge conflicts "
870 "(see hg resolve)"))
884 "(see hg resolve)"))
871
885
872 cctx = context.workingctx(self, (p1, p2), text, user, date,
886 cctx = context.workingctx(self, (p1, p2), text, user, date,
873 extra, changes)
887 extra, changes)
874 if editor:
888 if editor:
875 cctx._text = editor(self, cctx, subs)
889 cctx._text = editor(self, cctx, subs)
876
890
877 # commit subs
891 # commit subs
878 if subs:
892 if subs:
879 state = wctx.substate.copy()
893 state = wctx.substate.copy()
880 for s in subs:
894 for s in subs:
881 self.ui.status(_('committing subrepository %s\n') % s)
895 self.ui.status(_('committing subrepository %s\n') % s)
882 sr = wctx.sub(s).commit(cctx._text, user, date)
896 sr = wctx.sub(s).commit(cctx._text, user, date)
883 state[s] = (state[s][0], sr)
897 state[s] = (state[s][0], sr)
884 subrepo.writestate(self, state)
898 subrepo.writestate(self, state)
885
899
886 ret = self.commitctx(cctx, True)
900 ret = self.commitctx(cctx, True)
887
901
888 # update dirstate and mergestate
902 # update dirstate and mergestate
889 for f in changes[0] + changes[1]:
903 for f in changes[0] + changes[1]:
890 self.dirstate.normal(f)
904 self.dirstate.normal(f)
891 for f in changes[2]:
905 for f in changes[2]:
892 self.dirstate.forget(f)
906 self.dirstate.forget(f)
893 self.dirstate.setparents(ret)
907 self.dirstate.setparents(ret)
894 ms.reset()
908 ms.reset()
895
909
896 return ret
910 return ret
897
911
898 finally:
912 finally:
899 wlock.release()
913 wlock.release()
900
914
901 def commitctx(self, ctx, error=False):
915 def commitctx(self, ctx, error=False):
902 """Add a new revision to current repository.
916 """Add a new revision to current repository.
903
917
904 Revision information is passed via the context argument.
918 Revision information is passed via the context argument.
905 """
919 """
906
920
907 tr = lock = None
921 tr = lock = None
908 removed = ctx.removed()
922 removed = ctx.removed()
909 p1, p2 = ctx.p1(), ctx.p2()
923 p1, p2 = ctx.p1(), ctx.p2()
910 m1 = p1.manifest().copy()
924 m1 = p1.manifest().copy()
911 m2 = p2.manifest()
925 m2 = p2.manifest()
912 user = ctx.user()
926 user = ctx.user()
913
927
914 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
928 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
915 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
929 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
916
930
917 lock = self.lock()
931 lock = self.lock()
918 try:
932 try:
919 tr = self.transaction()
933 tr = self.transaction()
920 trp = weakref.proxy(tr)
934 trp = weakref.proxy(tr)
921
935
922 # check in files
936 # check in files
923 new = {}
937 new = {}
924 changed = []
938 changed = []
925 linkrev = len(self)
939 linkrev = len(self)
926 for f in sorted(ctx.modified() + ctx.added()):
940 for f in sorted(ctx.modified() + ctx.added()):
927 self.ui.note(f + "\n")
941 self.ui.note(f + "\n")
928 try:
942 try:
929 fctx = ctx[f]
943 fctx = ctx[f]
930 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
944 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
931 changed)
945 changed)
932 m1.set(f, fctx.flags())
946 m1.set(f, fctx.flags())
933 except (OSError, IOError):
947 except (OSError, IOError):
934 if error:
948 if error:
935 self.ui.warn(_("trouble committing %s!\n") % f)
949 self.ui.warn(_("trouble committing %s!\n") % f)
936 raise
950 raise
937 else:
951 else:
938 removed.append(f)
952 removed.append(f)
939
953
940 # update manifest
954 # update manifest
941 m1.update(new)
955 m1.update(new)
942 removed = [f for f in sorted(removed) if f in m1 or f in m2]
956 removed = [f for f in sorted(removed) if f in m1 or f in m2]
943 drop = [f for f in removed if f in m1]
957 drop = [f for f in removed if f in m1]
944 for f in drop:
958 for f in drop:
945 del m1[f]
959 del m1[f]
946 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
960 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
947 p2.manifestnode(), (new, drop))
961 p2.manifestnode(), (new, drop))
948
962
949 # update changelog
963 # update changelog
950 self.changelog.delayupdate()
964 self.changelog.delayupdate()
951 n = self.changelog.add(mn, changed + removed, ctx.description(),
965 n = self.changelog.add(mn, changed + removed, ctx.description(),
952 trp, p1.node(), p2.node(),
966 trp, p1.node(), p2.node(),
953 user, ctx.date(), ctx.extra().copy())
967 user, ctx.date(), ctx.extra().copy())
954 p = lambda: self.changelog.writepending() and self.root or ""
968 p = lambda: self.changelog.writepending() and self.root or ""
955 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
969 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
956 parent2=xp2, pending=p)
970 parent2=xp2, pending=p)
957 self.changelog.finalize(trp)
971 self.changelog.finalize(trp)
958 tr.close()
972 tr.close()
959
973
960 if self.branchcache:
974 if self.branchcache:
961 self.branchtags()
975 self.branchtags()
962
976
963 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
977 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
964 return n
978 return n
965 finally:
979 finally:
966 del tr
980 del tr
967 lock.release()
981 lock.release()
968
982
969 def walk(self, match, node=None):
983 def walk(self, match, node=None):
970 '''
984 '''
971 walk recursively through the directory tree or a given
985 walk recursively through the directory tree or a given
972 changeset, finding all files matched by the match
986 changeset, finding all files matched by the match
973 function
987 function
974 '''
988 '''
975 return self[node].walk(match)
989 return self[node].walk(match)
976
990
977 def status(self, node1='.', node2=None, match=None,
991 def status(self, node1='.', node2=None, match=None,
978 ignored=False, clean=False, unknown=False):
992 ignored=False, clean=False, unknown=False):
979 """return status of files between two nodes or node and working directory
993 """return status of files between two nodes or node and working directory
980
994
981 If node1 is None, use the first dirstate parent instead.
995 If node1 is None, use the first dirstate parent instead.
982 If node2 is None, compare node1 with working directory.
996 If node2 is None, compare node1 with working directory.
983 """
997 """
984
998
985 def mfmatches(ctx):
999 def mfmatches(ctx):
986 mf = ctx.manifest().copy()
1000 mf = ctx.manifest().copy()
987 for fn in mf.keys():
1001 for fn in mf.keys():
988 if not match(fn):
1002 if not match(fn):
989 del mf[fn]
1003 del mf[fn]
990 return mf
1004 return mf
991
1005
992 if isinstance(node1, context.changectx):
1006 if isinstance(node1, context.changectx):
993 ctx1 = node1
1007 ctx1 = node1
994 else:
1008 else:
995 ctx1 = self[node1]
1009 ctx1 = self[node1]
996 if isinstance(node2, context.changectx):
1010 if isinstance(node2, context.changectx):
997 ctx2 = node2
1011 ctx2 = node2
998 else:
1012 else:
999 ctx2 = self[node2]
1013 ctx2 = self[node2]
1000
1014
1001 working = ctx2.rev() is None
1015 working = ctx2.rev() is None
1002 parentworking = working and ctx1 == self['.']
1016 parentworking = working and ctx1 == self['.']
1003 match = match or match_.always(self.root, self.getcwd())
1017 match = match or match_.always(self.root, self.getcwd())
1004 listignored, listclean, listunknown = ignored, clean, unknown
1018 listignored, listclean, listunknown = ignored, clean, unknown
1005
1019
1006 # load earliest manifest first for caching reasons
1020 # load earliest manifest first for caching reasons
1007 if not working and ctx2.rev() < ctx1.rev():
1021 if not working and ctx2.rev() < ctx1.rev():
1008 ctx2.manifest()
1022 ctx2.manifest()
1009
1023
1010 if not parentworking:
1024 if not parentworking:
1011 def bad(f, msg):
1025 def bad(f, msg):
1012 if f not in ctx1:
1026 if f not in ctx1:
1013 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1027 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1014 match.bad = bad
1028 match.bad = bad
1015
1029
1016 if working: # we need to scan the working dir
1030 if working: # we need to scan the working dir
1017 s = self.dirstate.status(match, listignored, listclean, listunknown)
1031 s = self.dirstate.status(match, listignored, listclean, listunknown)
1018 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1032 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1019
1033
1020 # check for any possibly clean files
1034 # check for any possibly clean files
1021 if parentworking and cmp:
1035 if parentworking and cmp:
1022 fixup = []
1036 fixup = []
1023 # do a full compare of any files that might have changed
1037 # do a full compare of any files that might have changed
1024 for f in sorted(cmp):
1038 for f in sorted(cmp):
1025 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1039 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1026 or ctx1[f].cmp(ctx2[f].data())):
1040 or ctx1[f].cmp(ctx2[f].data())):
1027 modified.append(f)
1041 modified.append(f)
1028 else:
1042 else:
1029 fixup.append(f)
1043 fixup.append(f)
1030
1044
1031 if listclean:
1045 if listclean:
1032 clean += fixup
1046 clean += fixup
1033
1047
1034 # update dirstate for files that are actually clean
1048 # update dirstate for files that are actually clean
1035 if fixup:
1049 if fixup:
1036 try:
1050 try:
1037 # updating the dirstate is optional
1051 # updating the dirstate is optional
1038 # so we don't wait on the lock
1052 # so we don't wait on the lock
1039 wlock = self.wlock(False)
1053 wlock = self.wlock(False)
1040 try:
1054 try:
1041 for f in fixup:
1055 for f in fixup:
1042 self.dirstate.normal(f)
1056 self.dirstate.normal(f)
1043 finally:
1057 finally:
1044 wlock.release()
1058 wlock.release()
1045 except error.LockError:
1059 except error.LockError:
1046 pass
1060 pass
1047
1061
1048 if not parentworking:
1062 if not parentworking:
1049 mf1 = mfmatches(ctx1)
1063 mf1 = mfmatches(ctx1)
1050 if working:
1064 if working:
1051 # we are comparing working dir against non-parent
1065 # we are comparing working dir against non-parent
1052 # generate a pseudo-manifest for the working dir
1066 # generate a pseudo-manifest for the working dir
1053 mf2 = mfmatches(self['.'])
1067 mf2 = mfmatches(self['.'])
1054 for f in cmp + modified + added:
1068 for f in cmp + modified + added:
1055 mf2[f] = None
1069 mf2[f] = None
1056 mf2.set(f, ctx2.flags(f))
1070 mf2.set(f, ctx2.flags(f))
1057 for f in removed:
1071 for f in removed:
1058 if f in mf2:
1072 if f in mf2:
1059 del mf2[f]
1073 del mf2[f]
1060 else:
1074 else:
1061 # we are comparing two revisions
1075 # we are comparing two revisions
1062 deleted, unknown, ignored = [], [], []
1076 deleted, unknown, ignored = [], [], []
1063 mf2 = mfmatches(ctx2)
1077 mf2 = mfmatches(ctx2)
1064
1078
1065 modified, added, clean = [], [], []
1079 modified, added, clean = [], [], []
1066 for fn in mf2:
1080 for fn in mf2:
1067 if fn in mf1:
1081 if fn in mf1:
1068 if (mf1.flags(fn) != mf2.flags(fn) or
1082 if (mf1.flags(fn) != mf2.flags(fn) or
1069 (mf1[fn] != mf2[fn] and
1083 (mf1[fn] != mf2[fn] and
1070 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1084 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1071 modified.append(fn)
1085 modified.append(fn)
1072 elif listclean:
1086 elif listclean:
1073 clean.append(fn)
1087 clean.append(fn)
1074 del mf1[fn]
1088 del mf1[fn]
1075 else:
1089 else:
1076 added.append(fn)
1090 added.append(fn)
1077 removed = mf1.keys()
1091 removed = mf1.keys()
1078
1092
1079 r = modified, added, removed, deleted, unknown, ignored, clean
1093 r = modified, added, removed, deleted, unknown, ignored, clean
1080 [l.sort() for l in r]
1094 [l.sort() for l in r]
1081 return r
1095 return r
1082
1096
1083 def add(self, list):
1097 def add(self, list):
1084 wlock = self.wlock()
1098 wlock = self.wlock()
1085 try:
1099 try:
1086 rejected = []
1100 rejected = []
1087 for f in list:
1101 for f in list:
1088 p = self.wjoin(f)
1102 p = self.wjoin(f)
1089 try:
1103 try:
1090 st = os.lstat(p)
1104 st = os.lstat(p)
1091 except:
1105 except:
1092 self.ui.warn(_("%s does not exist!\n") % f)
1106 self.ui.warn(_("%s does not exist!\n") % f)
1093 rejected.append(f)
1107 rejected.append(f)
1094 continue
1108 continue
1095 if st.st_size > 10000000:
1109 if st.st_size > 10000000:
1096 self.ui.warn(_("%s: files over 10MB may cause memory and"
1110 self.ui.warn(_("%s: files over 10MB may cause memory and"
1097 " performance problems\n"
1111 " performance problems\n"
1098 "(use 'hg revert %s' to unadd the file)\n")
1112 "(use 'hg revert %s' to unadd the file)\n")
1099 % (f, f))
1113 % (f, f))
1100 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1114 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1101 self.ui.warn(_("%s not added: only files and symlinks "
1115 self.ui.warn(_("%s not added: only files and symlinks "
1102 "supported currently\n") % f)
1116 "supported currently\n") % f)
1103 rejected.append(p)
1117 rejected.append(p)
1104 elif self.dirstate[f] in 'amn':
1118 elif self.dirstate[f] in 'amn':
1105 self.ui.warn(_("%s already tracked!\n") % f)
1119 self.ui.warn(_("%s already tracked!\n") % f)
1106 elif self.dirstate[f] == 'r':
1120 elif self.dirstate[f] == 'r':
1107 self.dirstate.normallookup(f)
1121 self.dirstate.normallookup(f)
1108 else:
1122 else:
1109 self.dirstate.add(f)
1123 self.dirstate.add(f)
1110 return rejected
1124 return rejected
1111 finally:
1125 finally:
1112 wlock.release()
1126 wlock.release()
1113
1127
1114 def forget(self, list):
1128 def forget(self, list):
1115 wlock = self.wlock()
1129 wlock = self.wlock()
1116 try:
1130 try:
1117 for f in list:
1131 for f in list:
1118 if self.dirstate[f] != 'a':
1132 if self.dirstate[f] != 'a':
1119 self.ui.warn(_("%s not added!\n") % f)
1133 self.ui.warn(_("%s not added!\n") % f)
1120 else:
1134 else:
1121 self.dirstate.forget(f)
1135 self.dirstate.forget(f)
1122 finally:
1136 finally:
1123 wlock.release()
1137 wlock.release()
1124
1138
1125 def remove(self, list, unlink=False):
1139 def remove(self, list, unlink=False):
1126 if unlink:
1140 if unlink:
1127 for f in list:
1141 for f in list:
1128 try:
1142 try:
1129 util.unlink(self.wjoin(f))
1143 util.unlink(self.wjoin(f))
1130 except OSError, inst:
1144 except OSError, inst:
1131 if inst.errno != errno.ENOENT:
1145 if inst.errno != errno.ENOENT:
1132 raise
1146 raise
1133 wlock = self.wlock()
1147 wlock = self.wlock()
1134 try:
1148 try:
1135 for f in list:
1149 for f in list:
1136 if unlink and os.path.exists(self.wjoin(f)):
1150 if unlink and os.path.exists(self.wjoin(f)):
1137 self.ui.warn(_("%s still exists!\n") % f)
1151 self.ui.warn(_("%s still exists!\n") % f)
1138 elif self.dirstate[f] == 'a':
1152 elif self.dirstate[f] == 'a':
1139 self.dirstate.forget(f)
1153 self.dirstate.forget(f)
1140 elif f not in self.dirstate:
1154 elif f not in self.dirstate:
1141 self.ui.warn(_("%s not tracked!\n") % f)
1155 self.ui.warn(_("%s not tracked!\n") % f)
1142 else:
1156 else:
1143 self.dirstate.remove(f)
1157 self.dirstate.remove(f)
1144 finally:
1158 finally:
1145 wlock.release()
1159 wlock.release()
1146
1160
1147 def undelete(self, list):
1161 def undelete(self, list):
1148 manifests = [self.manifest.read(self.changelog.read(p)[0])
1162 manifests = [self.manifest.read(self.changelog.read(p)[0])
1149 for p in self.dirstate.parents() if p != nullid]
1163 for p in self.dirstate.parents() if p != nullid]
1150 wlock = self.wlock()
1164 wlock = self.wlock()
1151 try:
1165 try:
1152 for f in list:
1166 for f in list:
1153 if self.dirstate[f] != 'r':
1167 if self.dirstate[f] != 'r':
1154 self.ui.warn(_("%s not removed!\n") % f)
1168 self.ui.warn(_("%s not removed!\n") % f)
1155 else:
1169 else:
1156 m = f in manifests[0] and manifests[0] or manifests[1]
1170 m = f in manifests[0] and manifests[0] or manifests[1]
1157 t = self.file(f).read(m[f])
1171 t = self.file(f).read(m[f])
1158 self.wwrite(f, t, m.flags(f))
1172 self.wwrite(f, t, m.flags(f))
1159 self.dirstate.normal(f)
1173 self.dirstate.normal(f)
1160 finally:
1174 finally:
1161 wlock.release()
1175 wlock.release()
1162
1176
1163 def copy(self, source, dest):
1177 def copy(self, source, dest):
1164 p = self.wjoin(dest)
1178 p = self.wjoin(dest)
1165 if not (os.path.exists(p) or os.path.islink(p)):
1179 if not (os.path.exists(p) or os.path.islink(p)):
1166 self.ui.warn(_("%s does not exist!\n") % dest)
1180 self.ui.warn(_("%s does not exist!\n") % dest)
1167 elif not (os.path.isfile(p) or os.path.islink(p)):
1181 elif not (os.path.isfile(p) or os.path.islink(p)):
1168 self.ui.warn(_("copy failed: %s is not a file or a "
1182 self.ui.warn(_("copy failed: %s is not a file or a "
1169 "symbolic link\n") % dest)
1183 "symbolic link\n") % dest)
1170 else:
1184 else:
1171 wlock = self.wlock()
1185 wlock = self.wlock()
1172 try:
1186 try:
1173 if self.dirstate[dest] in '?r':
1187 if self.dirstate[dest] in '?r':
1174 self.dirstate.add(dest)
1188 self.dirstate.add(dest)
1175 self.dirstate.copy(source, dest)
1189 self.dirstate.copy(source, dest)
1176 finally:
1190 finally:
1177 wlock.release()
1191 wlock.release()
1178
1192
1179 def heads(self, start=None):
1193 def heads(self, start=None):
1180 heads = self.changelog.heads(start)
1194 heads = self.changelog.heads(start)
1181 # sort the output in rev descending order
1195 # sort the output in rev descending order
1182 heads = [(-self.changelog.rev(h), h) for h in heads]
1196 heads = [(-self.changelog.rev(h), h) for h in heads]
1183 return [n for (r, n) in sorted(heads)]
1197 return [n for (r, n) in sorted(heads)]
1184
1198
1185 def branchheads(self, branch=None, start=None, closed=False):
1199 def branchheads(self, branch=None, start=None, closed=False):
1186 if branch is None:
1200 if branch is None:
1187 branch = self[None].branch()
1201 branch = self[None].branch()
1188 branches = self.branchmap()
1202 branches = self.branchmap()
1189 if branch not in branches:
1203 if branch not in branches:
1190 return []
1204 return []
1191 bheads = branches[branch]
1205 bheads = branches[branch]
1192 # the cache returns heads ordered lowest to highest
1206 # the cache returns heads ordered lowest to highest
1193 bheads.reverse()
1207 bheads.reverse()
1194 if start is not None:
1208 if start is not None:
1195 # filter out the heads that cannot be reached from startrev
1209 # filter out the heads that cannot be reached from startrev
1196 bheads = self.changelog.nodesbetween([start], bheads)[2]
1210 bheads = self.changelog.nodesbetween([start], bheads)[2]
1197 if not closed:
1211 if not closed:
1198 bheads = [h for h in bheads if
1212 bheads = [h for h in bheads if
1199 ('close' not in self.changelog.read(h)[5])]
1213 ('close' not in self.changelog.read(h)[5])]
1200 return bheads
1214 return bheads
1201
1215
1202 def branches(self, nodes):
1216 def branches(self, nodes):
1203 if not nodes:
1217 if not nodes:
1204 nodes = [self.changelog.tip()]
1218 nodes = [self.changelog.tip()]
1205 b = []
1219 b = []
1206 for n in nodes:
1220 for n in nodes:
1207 t = n
1221 t = n
1208 while 1:
1222 while 1:
1209 p = self.changelog.parents(n)
1223 p = self.changelog.parents(n)
1210 if p[1] != nullid or p[0] == nullid:
1224 if p[1] != nullid or p[0] == nullid:
1211 b.append((t, n, p[0], p[1]))
1225 b.append((t, n, p[0], p[1]))
1212 break
1226 break
1213 n = p[0]
1227 n = p[0]
1214 return b
1228 return b
1215
1229
1216 def between(self, pairs):
1230 def between(self, pairs):
1217 r = []
1231 r = []
1218
1232
1219 for top, bottom in pairs:
1233 for top, bottom in pairs:
1220 n, l, i = top, [], 0
1234 n, l, i = top, [], 0
1221 f = 1
1235 f = 1
1222
1236
1223 while n != bottom and n != nullid:
1237 while n != bottom and n != nullid:
1224 p = self.changelog.parents(n)[0]
1238 p = self.changelog.parents(n)[0]
1225 if i == f:
1239 if i == f:
1226 l.append(n)
1240 l.append(n)
1227 f = f * 2
1241 f = f * 2
1228 n = p
1242 n = p
1229 i += 1
1243 i += 1
1230
1244
1231 r.append(l)
1245 r.append(l)
1232
1246
1233 return r
1247 return r
1234
1248
1235 def findincoming(self, remote, base=None, heads=None, force=False):
1249 def findincoming(self, remote, base=None, heads=None, force=False):
1236 """Return list of roots of the subsets of missing nodes from remote
1250 """Return list of roots of the subsets of missing nodes from remote
1237
1251
1238 If base dict is specified, assume that these nodes and their parents
1252 If base dict is specified, assume that these nodes and their parents
1239 exist on the remote side and that no child of a node of base exists
1253 exist on the remote side and that no child of a node of base exists
1240 in both remote and self.
1254 in both remote and self.
1241 Furthermore base will be updated to include the nodes that exists
1255 Furthermore base will be updated to include the nodes that exists
1242 in self and remote but no children exists in self and remote.
1256 in self and remote but no children exists in self and remote.
1243 If a list of heads is specified, return only nodes which are heads
1257 If a list of heads is specified, return only nodes which are heads
1244 or ancestors of these heads.
1258 or ancestors of these heads.
1245
1259
1246 All the ancestors of base are in self and in remote.
1260 All the ancestors of base are in self and in remote.
1247 All the descendants of the list returned are missing in self.
1261 All the descendants of the list returned are missing in self.
1248 (and so we know that the rest of the nodes are missing in remote, see
1262 (and so we know that the rest of the nodes are missing in remote, see
1249 outgoing)
1263 outgoing)
1250 """
1264 """
1251 return self.findcommonincoming(remote, base, heads, force)[1]
1265 return self.findcommonincoming(remote, base, heads, force)[1]
1252
1266
1253 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1267 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1254 """Return a tuple (common, missing roots, heads) used to identify
1268 """Return a tuple (common, missing roots, heads) used to identify
1255 missing nodes from remote.
1269 missing nodes from remote.
1256
1270
1257 If base dict is specified, assume that these nodes and their parents
1271 If base dict is specified, assume that these nodes and their parents
1258 exist on the remote side and that no child of a node of base exists
1272 exist on the remote side and that no child of a node of base exists
1259 in both remote and self.
1273 in both remote and self.
1260 Furthermore base will be updated to include the nodes that exists
1274 Furthermore base will be updated to include the nodes that exists
1261 in self and remote but no children exists in self and remote.
1275 in self and remote but no children exists in self and remote.
1262 If a list of heads is specified, return only nodes which are heads
1276 If a list of heads is specified, return only nodes which are heads
1263 or ancestors of these heads.
1277 or ancestors of these heads.
1264
1278
1265 All the ancestors of base are in self and in remote.
1279 All the ancestors of base are in self and in remote.
1266 """
1280 """
1267 m = self.changelog.nodemap
1281 m = self.changelog.nodemap
1268 search = []
1282 search = []
1269 fetch = set()
1283 fetch = set()
1270 seen = set()
1284 seen = set()
1271 seenbranch = set()
1285 seenbranch = set()
1272 if base is None:
1286 if base is None:
1273 base = {}
1287 base = {}
1274
1288
1275 if not heads:
1289 if not heads:
1276 heads = remote.heads()
1290 heads = remote.heads()
1277
1291
1278 if self.changelog.tip() == nullid:
1292 if self.changelog.tip() == nullid:
1279 base[nullid] = 1
1293 base[nullid] = 1
1280 if heads != [nullid]:
1294 if heads != [nullid]:
1281 return [nullid], [nullid], list(heads)
1295 return [nullid], [nullid], list(heads)
1282 return [nullid], [], []
1296 return [nullid], [], []
1283
1297
1284 # assume we're closer to the tip than the root
1298 # assume we're closer to the tip than the root
1285 # and start by examining the heads
1299 # and start by examining the heads
1286 self.ui.status(_("searching for changes\n"))
1300 self.ui.status(_("searching for changes\n"))
1287
1301
1288 unknown = []
1302 unknown = []
1289 for h in heads:
1303 for h in heads:
1290 if h not in m:
1304 if h not in m:
1291 unknown.append(h)
1305 unknown.append(h)
1292 else:
1306 else:
1293 base[h] = 1
1307 base[h] = 1
1294
1308
1295 heads = unknown
1309 heads = unknown
1296 if not unknown:
1310 if not unknown:
1297 return base.keys(), [], []
1311 return base.keys(), [], []
1298
1312
1299 req = set(unknown)
1313 req = set(unknown)
1300 reqcnt = 0
1314 reqcnt = 0
1301
1315
1302 # search through remote branches
1316 # search through remote branches
1303 # a 'branch' here is a linear segment of history, with four parts:
1317 # a 'branch' here is a linear segment of history, with four parts:
1304 # head, root, first parent, second parent
1318 # head, root, first parent, second parent
1305 # (a branch always has two parents (or none) by definition)
1319 # (a branch always has two parents (or none) by definition)
1306 unknown = remote.branches(unknown)
1320 unknown = remote.branches(unknown)
1307 while unknown:
1321 while unknown:
1308 r = []
1322 r = []
1309 while unknown:
1323 while unknown:
1310 n = unknown.pop(0)
1324 n = unknown.pop(0)
1311 if n[0] in seen:
1325 if n[0] in seen:
1312 continue
1326 continue
1313
1327
1314 self.ui.debug(_("examining %s:%s\n")
1328 self.ui.debug(_("examining %s:%s\n")
1315 % (short(n[0]), short(n[1])))
1329 % (short(n[0]), short(n[1])))
1316 if n[0] == nullid: # found the end of the branch
1330 if n[0] == nullid: # found the end of the branch
1317 pass
1331 pass
1318 elif n in seenbranch:
1332 elif n in seenbranch:
1319 self.ui.debug(_("branch already found\n"))
1333 self.ui.debug(_("branch already found\n"))
1320 continue
1334 continue
1321 elif n[1] and n[1] in m: # do we know the base?
1335 elif n[1] and n[1] in m: # do we know the base?
1322 self.ui.debug(_("found incomplete branch %s:%s\n")
1336 self.ui.debug(_("found incomplete branch %s:%s\n")
1323 % (short(n[0]), short(n[1])))
1337 % (short(n[0]), short(n[1])))
1324 search.append(n[0:2]) # schedule branch range for scanning
1338 search.append(n[0:2]) # schedule branch range for scanning
1325 seenbranch.add(n)
1339 seenbranch.add(n)
1326 else:
1340 else:
1327 if n[1] not in seen and n[1] not in fetch:
1341 if n[1] not in seen and n[1] not in fetch:
1328 if n[2] in m and n[3] in m:
1342 if n[2] in m and n[3] in m:
1329 self.ui.debug(_("found new changeset %s\n") %
1343 self.ui.debug(_("found new changeset %s\n") %
1330 short(n[1]))
1344 short(n[1]))
1331 fetch.add(n[1]) # earliest unknown
1345 fetch.add(n[1]) # earliest unknown
1332 for p in n[2:4]:
1346 for p in n[2:4]:
1333 if p in m:
1347 if p in m:
1334 base[p] = 1 # latest known
1348 base[p] = 1 # latest known
1335
1349
1336 for p in n[2:4]:
1350 for p in n[2:4]:
1337 if p not in req and p not in m:
1351 if p not in req and p not in m:
1338 r.append(p)
1352 r.append(p)
1339 req.add(p)
1353 req.add(p)
1340 seen.add(n[0])
1354 seen.add(n[0])
1341
1355
1342 if r:
1356 if r:
1343 reqcnt += 1
1357 reqcnt += 1
1344 self.ui.debug(_("request %d: %s\n") %
1358 self.ui.debug(_("request %d: %s\n") %
1345 (reqcnt, " ".join(map(short, r))))
1359 (reqcnt, " ".join(map(short, r))))
1346 for p in xrange(0, len(r), 10):
1360 for p in xrange(0, len(r), 10):
1347 for b in remote.branches(r[p:p+10]):
1361 for b in remote.branches(r[p:p+10]):
1348 self.ui.debug(_("received %s:%s\n") %
1362 self.ui.debug(_("received %s:%s\n") %
1349 (short(b[0]), short(b[1])))
1363 (short(b[0]), short(b[1])))
1350 unknown.append(b)
1364 unknown.append(b)
1351
1365
1352 # do binary search on the branches we found
1366 # do binary search on the branches we found
1353 while search:
1367 while search:
1354 newsearch = []
1368 newsearch = []
1355 reqcnt += 1
1369 reqcnt += 1
1356 for n, l in zip(search, remote.between(search)):
1370 for n, l in zip(search, remote.between(search)):
1357 l.append(n[1])
1371 l.append(n[1])
1358 p = n[0]
1372 p = n[0]
1359 f = 1
1373 f = 1
1360 for i in l:
1374 for i in l:
1361 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1375 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1362 if i in m:
1376 if i in m:
1363 if f <= 2:
1377 if f <= 2:
1364 self.ui.debug(_("found new branch changeset %s\n") %
1378 self.ui.debug(_("found new branch changeset %s\n") %
1365 short(p))
1379 short(p))
1366 fetch.add(p)
1380 fetch.add(p)
1367 base[i] = 1
1381 base[i] = 1
1368 else:
1382 else:
1369 self.ui.debug(_("narrowed branch search to %s:%s\n")
1383 self.ui.debug(_("narrowed branch search to %s:%s\n")
1370 % (short(p), short(i)))
1384 % (short(p), short(i)))
1371 newsearch.append((p, i))
1385 newsearch.append((p, i))
1372 break
1386 break
1373 p, f = i, f * 2
1387 p, f = i, f * 2
1374 search = newsearch
1388 search = newsearch
1375
1389
1376 # sanity check our fetch list
1390 # sanity check our fetch list
1377 for f in fetch:
1391 for f in fetch:
1378 if f in m:
1392 if f in m:
1379 raise error.RepoError(_("already have changeset ")
1393 raise error.RepoError(_("already have changeset ")
1380 + short(f[:4]))
1394 + short(f[:4]))
1381
1395
1382 if base.keys() == [nullid]:
1396 if base.keys() == [nullid]:
1383 if force:
1397 if force:
1384 self.ui.warn(_("warning: repository is unrelated\n"))
1398 self.ui.warn(_("warning: repository is unrelated\n"))
1385 else:
1399 else:
1386 raise util.Abort(_("repository is unrelated"))
1400 raise util.Abort(_("repository is unrelated"))
1387
1401
1388 self.ui.debug(_("found new changesets starting at ") +
1402 self.ui.debug(_("found new changesets starting at ") +
1389 " ".join([short(f) for f in fetch]) + "\n")
1403 " ".join([short(f) for f in fetch]) + "\n")
1390
1404
1391 self.ui.debug(_("%d total queries\n") % reqcnt)
1405 self.ui.debug(_("%d total queries\n") % reqcnt)
1392
1406
1393 return base.keys(), list(fetch), heads
1407 return base.keys(), list(fetch), heads
1394
1408
1395 def findoutgoing(self, remote, base=None, heads=None, force=False):
1409 def findoutgoing(self, remote, base=None, heads=None, force=False):
1396 """Return list of nodes that are roots of subsets not in remote
1410 """Return list of nodes that are roots of subsets not in remote
1397
1411
1398 If base dict is specified, assume that these nodes and their parents
1412 If base dict is specified, assume that these nodes and their parents
1399 exist on the remote side.
1413 exist on the remote side.
1400 If a list of heads is specified, return only nodes which are heads
1414 If a list of heads is specified, return only nodes which are heads
1401 or ancestors of these heads, and return a second element which
1415 or ancestors of these heads, and return a second element which
1402 contains all remote heads which get new children.
1416 contains all remote heads which get new children.
1403 """
1417 """
1404 if base is None:
1418 if base is None:
1405 base = {}
1419 base = {}
1406 self.findincoming(remote, base, heads, force=force)
1420 self.findincoming(remote, base, heads, force=force)
1407
1421
1408 self.ui.debug(_("common changesets up to ")
1422 self.ui.debug(_("common changesets up to ")
1409 + " ".join(map(short, base.keys())) + "\n")
1423 + " ".join(map(short, base.keys())) + "\n")
1410
1424
1411 remain = set(self.changelog.nodemap)
1425 remain = set(self.changelog.nodemap)
1412
1426
1413 # prune everything remote has from the tree
1427 # prune everything remote has from the tree
1414 remain.remove(nullid)
1428 remain.remove(nullid)
1415 remove = base.keys()
1429 remove = base.keys()
1416 while remove:
1430 while remove:
1417 n = remove.pop(0)
1431 n = remove.pop(0)
1418 if n in remain:
1432 if n in remain:
1419 remain.remove(n)
1433 remain.remove(n)
1420 for p in self.changelog.parents(n):
1434 for p in self.changelog.parents(n):
1421 remove.append(p)
1435 remove.append(p)
1422
1436
1423 # find every node whose parents have been pruned
1437 # find every node whose parents have been pruned
1424 subset = []
1438 subset = []
1425 # find every remote head that will get new children
1439 # find every remote head that will get new children
1426 updated_heads = set()
1440 updated_heads = set()
1427 for n in remain:
1441 for n in remain:
1428 p1, p2 = self.changelog.parents(n)
1442 p1, p2 = self.changelog.parents(n)
1429 if p1 not in remain and p2 not in remain:
1443 if p1 not in remain and p2 not in remain:
1430 subset.append(n)
1444 subset.append(n)
1431 if heads:
1445 if heads:
1432 if p1 in heads:
1446 if p1 in heads:
1433 updated_heads.add(p1)
1447 updated_heads.add(p1)
1434 if p2 in heads:
1448 if p2 in heads:
1435 updated_heads.add(p2)
1449 updated_heads.add(p2)
1436
1450
1437 # this is the set of all roots we have to push
1451 # this is the set of all roots we have to push
1438 if heads:
1452 if heads:
1439 return subset, list(updated_heads)
1453 return subset, list(updated_heads)
1440 else:
1454 else:
1441 return subset
1455 return subset
1442
1456
1443 def pull(self, remote, heads=None, force=False):
1457 def pull(self, remote, heads=None, force=False):
1444 lock = self.lock()
1458 lock = self.lock()
1445 try:
1459 try:
1446 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1460 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1447 force=force)
1461 force=force)
1448 if fetch == [nullid]:
1462 if fetch == [nullid]:
1449 self.ui.status(_("requesting all changes\n"))
1463 self.ui.status(_("requesting all changes\n"))
1450
1464
1451 if not fetch:
1465 if not fetch:
1452 self.ui.status(_("no changes found\n"))
1466 self.ui.status(_("no changes found\n"))
1453 return 0
1467 return 0
1454
1468
1455 if heads is None and remote.capable('changegroupsubset'):
1469 if heads is None and remote.capable('changegroupsubset'):
1456 heads = rheads
1470 heads = rheads
1457
1471
1458 if heads is None:
1472 if heads is None:
1459 cg = remote.changegroup(fetch, 'pull')
1473 cg = remote.changegroup(fetch, 'pull')
1460 else:
1474 else:
1461 if not remote.capable('changegroupsubset'):
1475 if not remote.capable('changegroupsubset'):
1462 raise util.Abort(_("Partial pull cannot be done because "
1476 raise util.Abort(_("Partial pull cannot be done because "
1463 "other repository doesn't support "
1477 "other repository doesn't support "
1464 "changegroupsubset."))
1478 "changegroupsubset."))
1465 cg = remote.changegroupsubset(fetch, heads, 'pull')
1479 cg = remote.changegroupsubset(fetch, heads, 'pull')
1466 return self.addchangegroup(cg, 'pull', remote.url())
1480 return self.addchangegroup(cg, 'pull', remote.url())
1467 finally:
1481 finally:
1468 lock.release()
1482 lock.release()
1469
1483
1470 def push(self, remote, force=False, revs=None):
1484 def push(self, remote, force=False, revs=None):
1471 # there are two ways to push to remote repo:
1485 # there are two ways to push to remote repo:
1472 #
1486 #
1473 # addchangegroup assumes local user can lock remote
1487 # addchangegroup assumes local user can lock remote
1474 # repo (local filesystem, old ssh servers).
1488 # repo (local filesystem, old ssh servers).
1475 #
1489 #
1476 # unbundle assumes local user cannot lock remote repo (new ssh
1490 # unbundle assumes local user cannot lock remote repo (new ssh
1477 # servers, http servers).
1491 # servers, http servers).
1478
1492
1479 if remote.capable('unbundle'):
1493 if remote.capable('unbundle'):
1480 return self.push_unbundle(remote, force, revs)
1494 return self.push_unbundle(remote, force, revs)
1481 return self.push_addchangegroup(remote, force, revs)
1495 return self.push_addchangegroup(remote, force, revs)
1482
1496
1483 def prepush(self, remote, force, revs):
1497 def prepush(self, remote, force, revs):
1484 common = {}
1498 common = {}
1485 remote_heads = remote.heads()
1499 remote_heads = remote.heads()
1486 inc = self.findincoming(remote, common, remote_heads, force=force)
1500 inc = self.findincoming(remote, common, remote_heads, force=force)
1487
1501
1488 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1502 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1489 if revs is not None:
1503 if revs is not None:
1490 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1504 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1491 else:
1505 else:
1492 bases, heads = update, self.changelog.heads()
1506 bases, heads = update, self.changelog.heads()
1493
1507
1494 def checkbranch(lheads, rheads, updatelh):
1508 def checkbranch(lheads, rheads, updatelh):
1495 '''
1509 '''
1496 check whether there are more local heads than remote heads on
1510 check whether there are more local heads than remote heads on
1497 a specific branch.
1511 a specific branch.
1498
1512
1499 lheads: local branch heads
1513 lheads: local branch heads
1500 rheads: remote branch heads
1514 rheads: remote branch heads
1501 updatelh: outgoing local branch heads
1515 updatelh: outgoing local branch heads
1502 '''
1516 '''
1503
1517
1504 warn = 0
1518 warn = 0
1505
1519
1506 if not revs and len(lheads) > len(rheads):
1520 if not revs and len(lheads) > len(rheads):
1507 warn = 1
1521 warn = 1
1508 else:
1522 else:
1509 updatelheads = [self.changelog.heads(x, lheads)
1523 updatelheads = [self.changelog.heads(x, lheads)
1510 for x in updatelh]
1524 for x in updatelh]
1511 newheads = set(sum(updatelheads, [])) & set(lheads)
1525 newheads = set(sum(updatelheads, [])) & set(lheads)
1512
1526
1513 if not newheads:
1527 if not newheads:
1514 return True
1528 return True
1515
1529
1516 for r in rheads:
1530 for r in rheads:
1517 if r in self.changelog.nodemap:
1531 if r in self.changelog.nodemap:
1518 desc = self.changelog.heads(r, heads)
1532 desc = self.changelog.heads(r, heads)
1519 l = [h for h in heads if h in desc]
1533 l = [h for h in heads if h in desc]
1520 if not l:
1534 if not l:
1521 newheads.add(r)
1535 newheads.add(r)
1522 else:
1536 else:
1523 newheads.add(r)
1537 newheads.add(r)
1524 if len(newheads) > len(rheads):
1538 if len(newheads) > len(rheads):
1525 warn = 1
1539 warn = 1
1526
1540
1527 if warn:
1541 if warn:
1528 if not rheads: # new branch requires --force
1542 if not rheads: # new branch requires --force
1529 self.ui.warn(_("abort: push creates new"
1543 self.ui.warn(_("abort: push creates new"
1530 " remote branch '%s'!\n") %
1544 " remote branch '%s'!\n") %
1531 self[updatelh[0]].branch())
1545 self[updatelh[0]].branch())
1532 else:
1546 else:
1533 self.ui.warn(_("abort: push creates new remote heads!\n"))
1547 self.ui.warn(_("abort: push creates new remote heads!\n"))
1534
1548
1535 self.ui.status(_("(did you forget to merge?"
1549 self.ui.status(_("(did you forget to merge?"
1536 " use push -f to force)\n"))
1550 " use push -f to force)\n"))
1537 return False
1551 return False
1538 return True
1552 return True
1539
1553
1540 if not bases:
1554 if not bases:
1541 self.ui.status(_("no changes found\n"))
1555 self.ui.status(_("no changes found\n"))
1542 return None, 1
1556 return None, 1
1543 elif not force:
1557 elif not force:
1544 # Check for each named branch if we're creating new remote heads.
1558 # Check for each named branch if we're creating new remote heads.
1545 # To be a remote head after push, node must be either:
1559 # To be a remote head after push, node must be either:
1546 # - unknown locally
1560 # - unknown locally
1547 # - a local outgoing head descended from update
1561 # - a local outgoing head descended from update
1548 # - a remote head that's known locally and not
1562 # - a remote head that's known locally and not
1549 # ancestral to an outgoing head
1563 # ancestral to an outgoing head
1550 #
1564 #
1551 # New named branches cannot be created without --force.
1565 # New named branches cannot be created without --force.
1552
1566
1553 if remote_heads != [nullid]:
1567 if remote_heads != [nullid]:
1554 if remote.capable('branchmap'):
1568 if remote.capable('branchmap'):
1555 localhds = {}
1569 localhds = {}
1556 if not revs:
1570 if not revs:
1557 localhds = self.branchmap()
1571 localhds = self.branchmap()
1558 else:
1572 else:
1559 for n in heads:
1573 for n in heads:
1560 branch = self[n].branch()
1574 branch = self[n].branch()
1561 if branch in localhds:
1575 if branch in localhds:
1562 localhds[branch].append(n)
1576 localhds[branch].append(n)
1563 else:
1577 else:
1564 localhds[branch] = [n]
1578 localhds[branch] = [n]
1565
1579
1566 remotehds = remote.branchmap()
1580 remotehds = remote.branchmap()
1567
1581
1568 for lh in localhds:
1582 for lh in localhds:
1569 if lh in remotehds:
1583 if lh in remotehds:
1570 rheads = remotehds[lh]
1584 rheads = remotehds[lh]
1571 else:
1585 else:
1572 rheads = []
1586 rheads = []
1573 lheads = localhds[lh]
1587 lheads = localhds[lh]
1574 updatelh = [upd for upd in update
1588 updatelh = [upd for upd in update
1575 if self[upd].branch() == lh]
1589 if self[upd].branch() == lh]
1576 if not updatelh:
1590 if not updatelh:
1577 continue
1591 continue
1578 if not checkbranch(lheads, rheads, updatelh):
1592 if not checkbranch(lheads, rheads, updatelh):
1579 return None, 0
1593 return None, 0
1580 else:
1594 else:
1581 if not checkbranch(heads, remote_heads, update):
1595 if not checkbranch(heads, remote_heads, update):
1582 return None, 0
1596 return None, 0
1583
1597
1584 if inc:
1598 if inc:
1585 self.ui.warn(_("note: unsynced remote changes!\n"))
1599 self.ui.warn(_("note: unsynced remote changes!\n"))
1586
1600
1587
1601
1588 if revs is None:
1602 if revs is None:
1589 # use the fast path, no race possible on push
1603 # use the fast path, no race possible on push
1590 cg = self._changegroup(common.keys(), 'push')
1604 cg = self._changegroup(common.keys(), 'push')
1591 else:
1605 else:
1592 cg = self.changegroupsubset(update, revs, 'push')
1606 cg = self.changegroupsubset(update, revs, 'push')
1593 return cg, remote_heads
1607 return cg, remote_heads
1594
1608
1595 def push_addchangegroup(self, remote, force, revs):
1609 def push_addchangegroup(self, remote, force, revs):
1596 lock = remote.lock()
1610 lock = remote.lock()
1597 try:
1611 try:
1598 ret = self.prepush(remote, force, revs)
1612 ret = self.prepush(remote, force, revs)
1599 if ret[0] is not None:
1613 if ret[0] is not None:
1600 cg, remote_heads = ret
1614 cg, remote_heads = ret
1601 return remote.addchangegroup(cg, 'push', self.url())
1615 return remote.addchangegroup(cg, 'push', self.url())
1602 return ret[1]
1616 return ret[1]
1603 finally:
1617 finally:
1604 lock.release()
1618 lock.release()
1605
1619
1606 def push_unbundle(self, remote, force, revs):
1620 def push_unbundle(self, remote, force, revs):
1607 # local repo finds heads on server, finds out what revs it
1621 # local repo finds heads on server, finds out what revs it
1608 # must push. once revs transferred, if server finds it has
1622 # must push. once revs transferred, if server finds it has
1609 # different heads (someone else won commit/push race), server
1623 # different heads (someone else won commit/push race), server
1610 # aborts.
1624 # aborts.
1611
1625
1612 ret = self.prepush(remote, force, revs)
1626 ret = self.prepush(remote, force, revs)
1613 if ret[0] is not None:
1627 if ret[0] is not None:
1614 cg, remote_heads = ret
1628 cg, remote_heads = ret
1615 if force: remote_heads = ['force']
1629 if force: remote_heads = ['force']
1616 return remote.unbundle(cg, remote_heads, 'push')
1630 return remote.unbundle(cg, remote_heads, 'push')
1617 return ret[1]
1631 return ret[1]
1618
1632
1619 def changegroupinfo(self, nodes, source):
1633 def changegroupinfo(self, nodes, source):
1620 if self.ui.verbose or source == 'bundle':
1634 if self.ui.verbose or source == 'bundle':
1621 self.ui.status(_("%d changesets found\n") % len(nodes))
1635 self.ui.status(_("%d changesets found\n") % len(nodes))
1622 if self.ui.debugflag:
1636 if self.ui.debugflag:
1623 self.ui.debug(_("list of changesets:\n"))
1637 self.ui.debug(_("list of changesets:\n"))
1624 for node in nodes:
1638 for node in nodes:
1625 self.ui.debug("%s\n" % hex(node))
1639 self.ui.debug("%s\n" % hex(node))
1626
1640
1627 def changegroupsubset(self, bases, heads, source, extranodes=None):
1641 def changegroupsubset(self, bases, heads, source, extranodes=None):
1628 """This function generates a changegroup consisting of all the nodes
1642 """This function generates a changegroup consisting of all the nodes
1629 that are descendents of any of the bases, and ancestors of any of
1643 that are descendents of any of the bases, and ancestors of any of
1630 the heads.
1644 the heads.
1631
1645
1632 It is fairly complex as determining which filenodes and which
1646 It is fairly complex as determining which filenodes and which
1633 manifest nodes need to be included for the changeset to be complete
1647 manifest nodes need to be included for the changeset to be complete
1634 is non-trivial.
1648 is non-trivial.
1635
1649
1636 Another wrinkle is doing the reverse, figuring out which changeset in
1650 Another wrinkle is doing the reverse, figuring out which changeset in
1637 the changegroup a particular filenode or manifestnode belongs to.
1651 the changegroup a particular filenode or manifestnode belongs to.
1638
1652
1639 The caller can specify some nodes that must be included in the
1653 The caller can specify some nodes that must be included in the
1640 changegroup using the extranodes argument. It should be a dict
1654 changegroup using the extranodes argument. It should be a dict
1641 where the keys are the filenames (or 1 for the manifest), and the
1655 where the keys are the filenames (or 1 for the manifest), and the
1642 values are lists of (node, linknode) tuples, where node is a wanted
1656 values are lists of (node, linknode) tuples, where node is a wanted
1643 node and linknode is the changelog node that should be transmitted as
1657 node and linknode is the changelog node that should be transmitted as
1644 the linkrev.
1658 the linkrev.
1645 """
1659 """
1646
1660
1647 if extranodes is None:
1661 if extranodes is None:
1648 # can we go through the fast path ?
1662 # can we go through the fast path ?
1649 heads.sort()
1663 heads.sort()
1650 allheads = self.heads()
1664 allheads = self.heads()
1651 allheads.sort()
1665 allheads.sort()
1652 if heads == allheads:
1666 if heads == allheads:
1653 common = []
1667 common = []
1654 # parents of bases are known from both sides
1668 # parents of bases are known from both sides
1655 for n in bases:
1669 for n in bases:
1656 for p in self.changelog.parents(n):
1670 for p in self.changelog.parents(n):
1657 if p != nullid:
1671 if p != nullid:
1658 common.append(p)
1672 common.append(p)
1659 return self._changegroup(common, source)
1673 return self._changegroup(common, source)
1660
1674
1661 self.hook('preoutgoing', throw=True, source=source)
1675 self.hook('preoutgoing', throw=True, source=source)
1662
1676
1663 # Set up some initial variables
1677 # Set up some initial variables
1664 # Make it easy to refer to self.changelog
1678 # Make it easy to refer to self.changelog
1665 cl = self.changelog
1679 cl = self.changelog
1666 # msng is short for missing - compute the list of changesets in this
1680 # msng is short for missing - compute the list of changesets in this
1667 # changegroup.
1681 # changegroup.
1668 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1682 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1669 self.changegroupinfo(msng_cl_lst, source)
1683 self.changegroupinfo(msng_cl_lst, source)
1670 # Some bases may turn out to be superfluous, and some heads may be
1684 # Some bases may turn out to be superfluous, and some heads may be
1671 # too. nodesbetween will return the minimal set of bases and heads
1685 # too. nodesbetween will return the minimal set of bases and heads
1672 # necessary to re-create the changegroup.
1686 # necessary to re-create the changegroup.
1673
1687
1674 # Known heads are the list of heads that it is assumed the recipient
1688 # Known heads are the list of heads that it is assumed the recipient
1675 # of this changegroup will know about.
1689 # of this changegroup will know about.
1676 knownheads = set()
1690 knownheads = set()
1677 # We assume that all parents of bases are known heads.
1691 # We assume that all parents of bases are known heads.
1678 for n in bases:
1692 for n in bases:
1679 knownheads.update(cl.parents(n))
1693 knownheads.update(cl.parents(n))
1680 knownheads.discard(nullid)
1694 knownheads.discard(nullid)
1681 knownheads = list(knownheads)
1695 knownheads = list(knownheads)
1682 if knownheads:
1696 if knownheads:
1683 # Now that we know what heads are known, we can compute which
1697 # Now that we know what heads are known, we can compute which
1684 # changesets are known. The recipient must know about all
1698 # changesets are known. The recipient must know about all
1685 # changesets required to reach the known heads from the null
1699 # changesets required to reach the known heads from the null
1686 # changeset.
1700 # changeset.
1687 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1701 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1688 junk = None
1702 junk = None
1689 # Transform the list into a set.
1703 # Transform the list into a set.
1690 has_cl_set = set(has_cl_set)
1704 has_cl_set = set(has_cl_set)
1691 else:
1705 else:
1692 # If there were no known heads, the recipient cannot be assumed to
1706 # If there were no known heads, the recipient cannot be assumed to
1693 # know about any changesets.
1707 # know about any changesets.
1694 has_cl_set = set()
1708 has_cl_set = set()
1695
1709
1696 # Make it easy to refer to self.manifest
1710 # Make it easy to refer to self.manifest
1697 mnfst = self.manifest
1711 mnfst = self.manifest
1698 # We don't know which manifests are missing yet
1712 # We don't know which manifests are missing yet
1699 msng_mnfst_set = {}
1713 msng_mnfst_set = {}
1700 # Nor do we know which filenodes are missing.
1714 # Nor do we know which filenodes are missing.
1701 msng_filenode_set = {}
1715 msng_filenode_set = {}
1702
1716
1703 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1717 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1704 junk = None
1718 junk = None
1705
1719
1706 # A changeset always belongs to itself, so the changenode lookup
1720 # A changeset always belongs to itself, so the changenode lookup
1707 # function for a changenode is identity.
1721 # function for a changenode is identity.
1708 def identity(x):
1722 def identity(x):
1709 return x
1723 return x
1710
1724
1711 # If we determine that a particular file or manifest node must be a
1725 # If we determine that a particular file or manifest node must be a
1712 # node that the recipient of the changegroup will already have, we can
1726 # node that the recipient of the changegroup will already have, we can
1713 # also assume the recipient will have all the parents. This function
1727 # also assume the recipient will have all the parents. This function
1714 # prunes them from the set of missing nodes.
1728 # prunes them from the set of missing nodes.
1715 def prune_parents(revlog, hasset, msngset):
1729 def prune_parents(revlog, hasset, msngset):
1716 haslst = list(hasset)
1730 haslst = list(hasset)
1717 haslst.sort(key=revlog.rev)
1731 haslst.sort(key=revlog.rev)
1718 for node in haslst:
1732 for node in haslst:
1719 parentlst = [p for p in revlog.parents(node) if p != nullid]
1733 parentlst = [p for p in revlog.parents(node) if p != nullid]
1720 while parentlst:
1734 while parentlst:
1721 n = parentlst.pop()
1735 n = parentlst.pop()
1722 if n not in hasset:
1736 if n not in hasset:
1723 hasset.add(n)
1737 hasset.add(n)
1724 p = [p for p in revlog.parents(n) if p != nullid]
1738 p = [p for p in revlog.parents(n) if p != nullid]
1725 parentlst.extend(p)
1739 parentlst.extend(p)
1726 for n in hasset:
1740 for n in hasset:
1727 msngset.pop(n, None)
1741 msngset.pop(n, None)
1728
1742
1729 # This is a function generating function used to set up an environment
1743 # This is a function generating function used to set up an environment
1730 # for the inner function to execute in.
1744 # for the inner function to execute in.
1731 def manifest_and_file_collector(changedfileset):
1745 def manifest_and_file_collector(changedfileset):
1732 # This is an information gathering function that gathers
1746 # This is an information gathering function that gathers
1733 # information from each changeset node that goes out as part of
1747 # information from each changeset node that goes out as part of
1734 # the changegroup. The information gathered is a list of which
1748 # the changegroup. The information gathered is a list of which
1735 # manifest nodes are potentially required (the recipient may
1749 # manifest nodes are potentially required (the recipient may
1736 # already have them) and total list of all files which were
1750 # already have them) and total list of all files which were
1737 # changed in any changeset in the changegroup.
1751 # changed in any changeset in the changegroup.
1738 #
1752 #
1739 # We also remember the first changenode we saw any manifest
1753 # We also remember the first changenode we saw any manifest
1740 # referenced by so we can later determine which changenode 'owns'
1754 # referenced by so we can later determine which changenode 'owns'
1741 # the manifest.
1755 # the manifest.
1742 def collect_manifests_and_files(clnode):
1756 def collect_manifests_and_files(clnode):
1743 c = cl.read(clnode)
1757 c = cl.read(clnode)
1744 for f in c[3]:
1758 for f in c[3]:
1745 # This is to make sure we only have one instance of each
1759 # This is to make sure we only have one instance of each
1746 # filename string for each filename.
1760 # filename string for each filename.
1747 changedfileset.setdefault(f, f)
1761 changedfileset.setdefault(f, f)
1748 msng_mnfst_set.setdefault(c[0], clnode)
1762 msng_mnfst_set.setdefault(c[0], clnode)
1749 return collect_manifests_and_files
1763 return collect_manifests_and_files
1750
1764
1751 # Figure out which manifest nodes (of the ones we think might be part
1765 # Figure out which manifest nodes (of the ones we think might be part
1752 # of the changegroup) the recipient must know about and remove them
1766 # of the changegroup) the recipient must know about and remove them
1753 # from the changegroup.
1767 # from the changegroup.
1754 def prune_manifests():
1768 def prune_manifests():
1755 has_mnfst_set = set()
1769 has_mnfst_set = set()
1756 for n in msng_mnfst_set:
1770 for n in msng_mnfst_set:
1757 # If a 'missing' manifest thinks it belongs to a changenode
1771 # If a 'missing' manifest thinks it belongs to a changenode
1758 # the recipient is assumed to have, obviously the recipient
1772 # the recipient is assumed to have, obviously the recipient
1759 # must have that manifest.
1773 # must have that manifest.
1760 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1774 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1761 if linknode in has_cl_set:
1775 if linknode in has_cl_set:
1762 has_mnfst_set.add(n)
1776 has_mnfst_set.add(n)
1763 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1777 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1764
1778
1765 # Use the information collected in collect_manifests_and_files to say
1779 # Use the information collected in collect_manifests_and_files to say
1766 # which changenode any manifestnode belongs to.
1780 # which changenode any manifestnode belongs to.
1767 def lookup_manifest_link(mnfstnode):
1781 def lookup_manifest_link(mnfstnode):
1768 return msng_mnfst_set[mnfstnode]
1782 return msng_mnfst_set[mnfstnode]
1769
1783
1770 # A function generating function that sets up the initial environment
1784 # A function generating function that sets up the initial environment
1771 # the inner function.
1785 # the inner function.
1772 def filenode_collector(changedfiles):
1786 def filenode_collector(changedfiles):
1773 next_rev = [0]
1787 next_rev = [0]
1774 # This gathers information from each manifestnode included in the
1788 # This gathers information from each manifestnode included in the
1775 # changegroup about which filenodes the manifest node references
1789 # changegroup about which filenodes the manifest node references
1776 # so we can include those in the changegroup too.
1790 # so we can include those in the changegroup too.
1777 #
1791 #
1778 # It also remembers which changenode each filenode belongs to. It
1792 # It also remembers which changenode each filenode belongs to. It
1779 # does this by assuming the a filenode belongs to the changenode
1793 # does this by assuming the a filenode belongs to the changenode
1780 # the first manifest that references it belongs to.
1794 # the first manifest that references it belongs to.
1781 def collect_msng_filenodes(mnfstnode):
1795 def collect_msng_filenodes(mnfstnode):
1782 r = mnfst.rev(mnfstnode)
1796 r = mnfst.rev(mnfstnode)
1783 if r == next_rev[0]:
1797 if r == next_rev[0]:
1784 # If the last rev we looked at was the one just previous,
1798 # If the last rev we looked at was the one just previous,
1785 # we only need to see a diff.
1799 # we only need to see a diff.
1786 deltamf = mnfst.readdelta(mnfstnode)
1800 deltamf = mnfst.readdelta(mnfstnode)
1787 # For each line in the delta
1801 # For each line in the delta
1788 for f, fnode in deltamf.iteritems():
1802 for f, fnode in deltamf.iteritems():
1789 f = changedfiles.get(f, None)
1803 f = changedfiles.get(f, None)
1790 # And if the file is in the list of files we care
1804 # And if the file is in the list of files we care
1791 # about.
1805 # about.
1792 if f is not None:
1806 if f is not None:
1793 # Get the changenode this manifest belongs to
1807 # Get the changenode this manifest belongs to
1794 clnode = msng_mnfst_set[mnfstnode]
1808 clnode = msng_mnfst_set[mnfstnode]
1795 # Create the set of filenodes for the file if
1809 # Create the set of filenodes for the file if
1796 # there isn't one already.
1810 # there isn't one already.
1797 ndset = msng_filenode_set.setdefault(f, {})
1811 ndset = msng_filenode_set.setdefault(f, {})
1798 # And set the filenode's changelog node to the
1812 # And set the filenode's changelog node to the
1799 # manifest's if it hasn't been set already.
1813 # manifest's if it hasn't been set already.
1800 ndset.setdefault(fnode, clnode)
1814 ndset.setdefault(fnode, clnode)
1801 else:
1815 else:
1802 # Otherwise we need a full manifest.
1816 # Otherwise we need a full manifest.
1803 m = mnfst.read(mnfstnode)
1817 m = mnfst.read(mnfstnode)
1804 # For every file in we care about.
1818 # For every file in we care about.
1805 for f in changedfiles:
1819 for f in changedfiles:
1806 fnode = m.get(f, None)
1820 fnode = m.get(f, None)
1807 # If it's in the manifest
1821 # If it's in the manifest
1808 if fnode is not None:
1822 if fnode is not None:
1809 # See comments above.
1823 # See comments above.
1810 clnode = msng_mnfst_set[mnfstnode]
1824 clnode = msng_mnfst_set[mnfstnode]
1811 ndset = msng_filenode_set.setdefault(f, {})
1825 ndset = msng_filenode_set.setdefault(f, {})
1812 ndset.setdefault(fnode, clnode)
1826 ndset.setdefault(fnode, clnode)
1813 # Remember the revision we hope to see next.
1827 # Remember the revision we hope to see next.
1814 next_rev[0] = r + 1
1828 next_rev[0] = r + 1
1815 return collect_msng_filenodes
1829 return collect_msng_filenodes
1816
1830
1817 # We have a list of filenodes we think we need for a file, lets remove
1831 # We have a list of filenodes we think we need for a file, lets remove
1818 # all those we know the recipient must have.
1832 # all those we know the recipient must have.
1819 def prune_filenodes(f, filerevlog):
1833 def prune_filenodes(f, filerevlog):
1820 msngset = msng_filenode_set[f]
1834 msngset = msng_filenode_set[f]
1821 hasset = set()
1835 hasset = set()
1822 # If a 'missing' filenode thinks it belongs to a changenode we
1836 # If a 'missing' filenode thinks it belongs to a changenode we
1823 # assume the recipient must have, then the recipient must have
1837 # assume the recipient must have, then the recipient must have
1824 # that filenode.
1838 # that filenode.
1825 for n in msngset:
1839 for n in msngset:
1826 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1840 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1827 if clnode in has_cl_set:
1841 if clnode in has_cl_set:
1828 hasset.add(n)
1842 hasset.add(n)
1829 prune_parents(filerevlog, hasset, msngset)
1843 prune_parents(filerevlog, hasset, msngset)
1830
1844
1831 # A function generator function that sets up the a context for the
1845 # A function generator function that sets up the a context for the
1832 # inner function.
1846 # inner function.
1833 def lookup_filenode_link_func(fname):
1847 def lookup_filenode_link_func(fname):
1834 msngset = msng_filenode_set[fname]
1848 msngset = msng_filenode_set[fname]
1835 # Lookup the changenode the filenode belongs to.
1849 # Lookup the changenode the filenode belongs to.
1836 def lookup_filenode_link(fnode):
1850 def lookup_filenode_link(fnode):
1837 return msngset[fnode]
1851 return msngset[fnode]
1838 return lookup_filenode_link
1852 return lookup_filenode_link
1839
1853
1840 # Add the nodes that were explicitly requested.
1854 # Add the nodes that were explicitly requested.
1841 def add_extra_nodes(name, nodes):
1855 def add_extra_nodes(name, nodes):
1842 if not extranodes or name not in extranodes:
1856 if not extranodes or name not in extranodes:
1843 return
1857 return
1844
1858
1845 for node, linknode in extranodes[name]:
1859 for node, linknode in extranodes[name]:
1846 if node not in nodes:
1860 if node not in nodes:
1847 nodes[node] = linknode
1861 nodes[node] = linknode
1848
1862
1849 # Now that we have all theses utility functions to help out and
1863 # Now that we have all theses utility functions to help out and
1850 # logically divide up the task, generate the group.
1864 # logically divide up the task, generate the group.
1851 def gengroup():
1865 def gengroup():
1852 # The set of changed files starts empty.
1866 # The set of changed files starts empty.
1853 changedfiles = {}
1867 changedfiles = {}
1854 # Create a changenode group generator that will call our functions
1868 # Create a changenode group generator that will call our functions
1855 # back to lookup the owning changenode and collect information.
1869 # back to lookup the owning changenode and collect information.
1856 group = cl.group(msng_cl_lst, identity,
1870 group = cl.group(msng_cl_lst, identity,
1857 manifest_and_file_collector(changedfiles))
1871 manifest_and_file_collector(changedfiles))
1858 for chnk in group:
1872 for chnk in group:
1859 yield chnk
1873 yield chnk
1860
1874
1861 # The list of manifests has been collected by the generator
1875 # The list of manifests has been collected by the generator
1862 # calling our functions back.
1876 # calling our functions back.
1863 prune_manifests()
1877 prune_manifests()
1864 add_extra_nodes(1, msng_mnfst_set)
1878 add_extra_nodes(1, msng_mnfst_set)
1865 msng_mnfst_lst = msng_mnfst_set.keys()
1879 msng_mnfst_lst = msng_mnfst_set.keys()
1866 # Sort the manifestnodes by revision number.
1880 # Sort the manifestnodes by revision number.
1867 msng_mnfst_lst.sort(key=mnfst.rev)
1881 msng_mnfst_lst.sort(key=mnfst.rev)
1868 # Create a generator for the manifestnodes that calls our lookup
1882 # Create a generator for the manifestnodes that calls our lookup
1869 # and data collection functions back.
1883 # and data collection functions back.
1870 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1884 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1871 filenode_collector(changedfiles))
1885 filenode_collector(changedfiles))
1872 for chnk in group:
1886 for chnk in group:
1873 yield chnk
1887 yield chnk
1874
1888
1875 # These are no longer needed, dereference and toss the memory for
1889 # These are no longer needed, dereference and toss the memory for
1876 # them.
1890 # them.
1877 msng_mnfst_lst = None
1891 msng_mnfst_lst = None
1878 msng_mnfst_set.clear()
1892 msng_mnfst_set.clear()
1879
1893
1880 if extranodes:
1894 if extranodes:
1881 for fname in extranodes:
1895 for fname in extranodes:
1882 if isinstance(fname, int):
1896 if isinstance(fname, int):
1883 continue
1897 continue
1884 msng_filenode_set.setdefault(fname, {})
1898 msng_filenode_set.setdefault(fname, {})
1885 changedfiles[fname] = 1
1899 changedfiles[fname] = 1
1886 # Go through all our files in order sorted by name.
1900 # Go through all our files in order sorted by name.
1887 for fname in sorted(changedfiles):
1901 for fname in sorted(changedfiles):
1888 filerevlog = self.file(fname)
1902 filerevlog = self.file(fname)
1889 if not len(filerevlog):
1903 if not len(filerevlog):
1890 raise util.Abort(_("empty or missing revlog for %s") % fname)
1904 raise util.Abort(_("empty or missing revlog for %s") % fname)
1891 # Toss out the filenodes that the recipient isn't really
1905 # Toss out the filenodes that the recipient isn't really
1892 # missing.
1906 # missing.
1893 if fname in msng_filenode_set:
1907 if fname in msng_filenode_set:
1894 prune_filenodes(fname, filerevlog)
1908 prune_filenodes(fname, filerevlog)
1895 add_extra_nodes(fname, msng_filenode_set[fname])
1909 add_extra_nodes(fname, msng_filenode_set[fname])
1896 msng_filenode_lst = msng_filenode_set[fname].keys()
1910 msng_filenode_lst = msng_filenode_set[fname].keys()
1897 else:
1911 else:
1898 msng_filenode_lst = []
1912 msng_filenode_lst = []
1899 # If any filenodes are left, generate the group for them,
1913 # If any filenodes are left, generate the group for them,
1900 # otherwise don't bother.
1914 # otherwise don't bother.
1901 if len(msng_filenode_lst) > 0:
1915 if len(msng_filenode_lst) > 0:
1902 yield changegroup.chunkheader(len(fname))
1916 yield changegroup.chunkheader(len(fname))
1903 yield fname
1917 yield fname
1904 # Sort the filenodes by their revision #
1918 # Sort the filenodes by their revision #
1905 msng_filenode_lst.sort(key=filerevlog.rev)
1919 msng_filenode_lst.sort(key=filerevlog.rev)
1906 # Create a group generator and only pass in a changenode
1920 # Create a group generator and only pass in a changenode
1907 # lookup function as we need to collect no information
1921 # lookup function as we need to collect no information
1908 # from filenodes.
1922 # from filenodes.
1909 group = filerevlog.group(msng_filenode_lst,
1923 group = filerevlog.group(msng_filenode_lst,
1910 lookup_filenode_link_func(fname))
1924 lookup_filenode_link_func(fname))
1911 for chnk in group:
1925 for chnk in group:
1912 yield chnk
1926 yield chnk
1913 if fname in msng_filenode_set:
1927 if fname in msng_filenode_set:
1914 # Don't need this anymore, toss it to free memory.
1928 # Don't need this anymore, toss it to free memory.
1915 del msng_filenode_set[fname]
1929 del msng_filenode_set[fname]
1916 # Signal that no more groups are left.
1930 # Signal that no more groups are left.
1917 yield changegroup.closechunk()
1931 yield changegroup.closechunk()
1918
1932
1919 if msng_cl_lst:
1933 if msng_cl_lst:
1920 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1934 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1921
1935
1922 return util.chunkbuffer(gengroup())
1936 return util.chunkbuffer(gengroup())
1923
1937
1924 def changegroup(self, basenodes, source):
1938 def changegroup(self, basenodes, source):
1925 # to avoid a race we use changegroupsubset() (issue1320)
1939 # to avoid a race we use changegroupsubset() (issue1320)
1926 return self.changegroupsubset(basenodes, self.heads(), source)
1940 return self.changegroupsubset(basenodes, self.heads(), source)
1927
1941
1928 def _changegroup(self, common, source):
1942 def _changegroup(self, common, source):
1929 """Generate a changegroup of all nodes that we have that a recipient
1943 """Generate a changegroup of all nodes that we have that a recipient
1930 doesn't.
1944 doesn't.
1931
1945
1932 This is much easier than the previous function as we can assume that
1946 This is much easier than the previous function as we can assume that
1933 the recipient has any changenode we aren't sending them.
1947 the recipient has any changenode we aren't sending them.
1934
1948
1935 common is the set of common nodes between remote and self"""
1949 common is the set of common nodes between remote and self"""
1936
1950
1937 self.hook('preoutgoing', throw=True, source=source)
1951 self.hook('preoutgoing', throw=True, source=source)
1938
1952
1939 cl = self.changelog
1953 cl = self.changelog
1940 nodes = cl.findmissing(common)
1954 nodes = cl.findmissing(common)
1941 revset = set([cl.rev(n) for n in nodes])
1955 revset = set([cl.rev(n) for n in nodes])
1942 self.changegroupinfo(nodes, source)
1956 self.changegroupinfo(nodes, source)
1943
1957
1944 def identity(x):
1958 def identity(x):
1945 return x
1959 return x
1946
1960
1947 def gennodelst(log):
1961 def gennodelst(log):
1948 for r in log:
1962 for r in log:
1949 if log.linkrev(r) in revset:
1963 if log.linkrev(r) in revset:
1950 yield log.node(r)
1964 yield log.node(r)
1951
1965
1952 def changed_file_collector(changedfileset):
1966 def changed_file_collector(changedfileset):
1953 def collect_changed_files(clnode):
1967 def collect_changed_files(clnode):
1954 c = cl.read(clnode)
1968 c = cl.read(clnode)
1955 changedfileset.update(c[3])
1969 changedfileset.update(c[3])
1956 return collect_changed_files
1970 return collect_changed_files
1957
1971
1958 def lookuprevlink_func(revlog):
1972 def lookuprevlink_func(revlog):
1959 def lookuprevlink(n):
1973 def lookuprevlink(n):
1960 return cl.node(revlog.linkrev(revlog.rev(n)))
1974 return cl.node(revlog.linkrev(revlog.rev(n)))
1961 return lookuprevlink
1975 return lookuprevlink
1962
1976
1963 def gengroup():
1977 def gengroup():
1964 # construct a list of all changed files
1978 # construct a list of all changed files
1965 changedfiles = set()
1979 changedfiles = set()
1966
1980
1967 for chnk in cl.group(nodes, identity,
1981 for chnk in cl.group(nodes, identity,
1968 changed_file_collector(changedfiles)):
1982 changed_file_collector(changedfiles)):
1969 yield chnk
1983 yield chnk
1970
1984
1971 mnfst = self.manifest
1985 mnfst = self.manifest
1972 nodeiter = gennodelst(mnfst)
1986 nodeiter = gennodelst(mnfst)
1973 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1987 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1974 yield chnk
1988 yield chnk
1975
1989
1976 for fname in sorted(changedfiles):
1990 for fname in sorted(changedfiles):
1977 filerevlog = self.file(fname)
1991 filerevlog = self.file(fname)
1978 if not len(filerevlog):
1992 if not len(filerevlog):
1979 raise util.Abort(_("empty or missing revlog for %s") % fname)
1993 raise util.Abort(_("empty or missing revlog for %s") % fname)
1980 nodeiter = gennodelst(filerevlog)
1994 nodeiter = gennodelst(filerevlog)
1981 nodeiter = list(nodeiter)
1995 nodeiter = list(nodeiter)
1982 if nodeiter:
1996 if nodeiter:
1983 yield changegroup.chunkheader(len(fname))
1997 yield changegroup.chunkheader(len(fname))
1984 yield fname
1998 yield fname
1985 lookup = lookuprevlink_func(filerevlog)
1999 lookup = lookuprevlink_func(filerevlog)
1986 for chnk in filerevlog.group(nodeiter, lookup):
2000 for chnk in filerevlog.group(nodeiter, lookup):
1987 yield chnk
2001 yield chnk
1988
2002
1989 yield changegroup.closechunk()
2003 yield changegroup.closechunk()
1990
2004
1991 if nodes:
2005 if nodes:
1992 self.hook('outgoing', node=hex(nodes[0]), source=source)
2006 self.hook('outgoing', node=hex(nodes[0]), source=source)
1993
2007
1994 return util.chunkbuffer(gengroup())
2008 return util.chunkbuffer(gengroup())
1995
2009
1996 def addchangegroup(self, source, srctype, url, emptyok=False):
2010 def addchangegroup(self, source, srctype, url, emptyok=False):
1997 """add changegroup to repo.
2011 """add changegroup to repo.
1998
2012
1999 return values:
2013 return values:
2000 - nothing changed or no source: 0
2014 - nothing changed or no source: 0
2001 - more heads than before: 1+added heads (2..n)
2015 - more heads than before: 1+added heads (2..n)
2002 - less heads than before: -1-removed heads (-2..-n)
2016 - less heads than before: -1-removed heads (-2..-n)
2003 - number of heads stays the same: 1
2017 - number of heads stays the same: 1
2004 """
2018 """
2005 def csmap(x):
2019 def csmap(x):
2006 self.ui.debug(_("add changeset %s\n") % short(x))
2020 self.ui.debug(_("add changeset %s\n") % short(x))
2007 return len(cl)
2021 return len(cl)
2008
2022
2009 def revmap(x):
2023 def revmap(x):
2010 return cl.rev(x)
2024 return cl.rev(x)
2011
2025
2012 if not source:
2026 if not source:
2013 return 0
2027 return 0
2014
2028
2015 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2029 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2016
2030
2017 changesets = files = revisions = 0
2031 changesets = files = revisions = 0
2018
2032
2019 # write changelog data to temp files so concurrent readers will not see
2033 # write changelog data to temp files so concurrent readers will not see
2020 # inconsistent view
2034 # inconsistent view
2021 cl = self.changelog
2035 cl = self.changelog
2022 cl.delayupdate()
2036 cl.delayupdate()
2023 oldheads = len(cl.heads())
2037 oldheads = len(cl.heads())
2024
2038
2025 tr = self.transaction()
2039 tr = self.transaction()
2026 try:
2040 try:
2027 trp = weakref.proxy(tr)
2041 trp = weakref.proxy(tr)
2028 # pull off the changeset group
2042 # pull off the changeset group
2029 self.ui.status(_("adding changesets\n"))
2043 self.ui.status(_("adding changesets\n"))
2030 clstart = len(cl)
2044 clstart = len(cl)
2031 chunkiter = changegroup.chunkiter(source)
2045 chunkiter = changegroup.chunkiter(source)
2032 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2046 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2033 raise util.Abort(_("received changelog group is empty"))
2047 raise util.Abort(_("received changelog group is empty"))
2034 clend = len(cl)
2048 clend = len(cl)
2035 changesets = clend - clstart
2049 changesets = clend - clstart
2036
2050
2037 # pull off the manifest group
2051 # pull off the manifest group
2038 self.ui.status(_("adding manifests\n"))
2052 self.ui.status(_("adding manifests\n"))
2039 chunkiter = changegroup.chunkiter(source)
2053 chunkiter = changegroup.chunkiter(source)
2040 # no need to check for empty manifest group here:
2054 # no need to check for empty manifest group here:
2041 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2055 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2042 # no new manifest will be created and the manifest group will
2056 # no new manifest will be created and the manifest group will
2043 # be empty during the pull
2057 # be empty during the pull
2044 self.manifest.addgroup(chunkiter, revmap, trp)
2058 self.manifest.addgroup(chunkiter, revmap, trp)
2045
2059
2046 # process the files
2060 # process the files
2047 self.ui.status(_("adding file changes\n"))
2061 self.ui.status(_("adding file changes\n"))
2048 while 1:
2062 while 1:
2049 f = changegroup.getchunk(source)
2063 f = changegroup.getchunk(source)
2050 if not f:
2064 if not f:
2051 break
2065 break
2052 self.ui.debug(_("adding %s revisions\n") % f)
2066 self.ui.debug(_("adding %s revisions\n") % f)
2053 fl = self.file(f)
2067 fl = self.file(f)
2054 o = len(fl)
2068 o = len(fl)
2055 chunkiter = changegroup.chunkiter(source)
2069 chunkiter = changegroup.chunkiter(source)
2056 if fl.addgroup(chunkiter, revmap, trp) is None:
2070 if fl.addgroup(chunkiter, revmap, trp) is None:
2057 raise util.Abort(_("received file revlog group is empty"))
2071 raise util.Abort(_("received file revlog group is empty"))
2058 revisions += len(fl) - o
2072 revisions += len(fl) - o
2059 files += 1
2073 files += 1
2060
2074
2061 newheads = len(cl.heads())
2075 newheads = len(cl.heads())
2062 heads = ""
2076 heads = ""
2063 if oldheads and newheads != oldheads:
2077 if oldheads and newheads != oldheads:
2064 heads = _(" (%+d heads)") % (newheads - oldheads)
2078 heads = _(" (%+d heads)") % (newheads - oldheads)
2065
2079
2066 self.ui.status(_("added %d changesets"
2080 self.ui.status(_("added %d changesets"
2067 " with %d changes to %d files%s\n")
2081 " with %d changes to %d files%s\n")
2068 % (changesets, revisions, files, heads))
2082 % (changesets, revisions, files, heads))
2069
2083
2070 if changesets > 0:
2084 if changesets > 0:
2071 p = lambda: cl.writepending() and self.root or ""
2085 p = lambda: cl.writepending() and self.root or ""
2072 self.hook('pretxnchangegroup', throw=True,
2086 self.hook('pretxnchangegroup', throw=True,
2073 node=hex(cl.node(clstart)), source=srctype,
2087 node=hex(cl.node(clstart)), source=srctype,
2074 url=url, pending=p)
2088 url=url, pending=p)
2075
2089
2076 # make changelog see real files again
2090 # make changelog see real files again
2077 cl.finalize(trp)
2091 cl.finalize(trp)
2078
2092
2079 tr.close()
2093 tr.close()
2080 finally:
2094 finally:
2081 del tr
2095 del tr
2082
2096
2083 if changesets > 0:
2097 if changesets > 0:
2084 # forcefully update the on-disk branch cache
2098 # forcefully update the on-disk branch cache
2085 self.ui.debug(_("updating the branch cache\n"))
2099 self.ui.debug(_("updating the branch cache\n"))
2086 self.branchtags()
2100 self.branchtags()
2087 self.hook("changegroup", node=hex(cl.node(clstart)),
2101 self.hook("changegroup", node=hex(cl.node(clstart)),
2088 source=srctype, url=url)
2102 source=srctype, url=url)
2089
2103
2090 for i in xrange(clstart, clend):
2104 for i in xrange(clstart, clend):
2091 self.hook("incoming", node=hex(cl.node(i)),
2105 self.hook("incoming", node=hex(cl.node(i)),
2092 source=srctype, url=url)
2106 source=srctype, url=url)
2093
2107
2094 # never return 0 here:
2108 # never return 0 here:
2095 if newheads < oldheads:
2109 if newheads < oldheads:
2096 return newheads - oldheads - 1
2110 return newheads - oldheads - 1
2097 else:
2111 else:
2098 return newheads - oldheads + 1
2112 return newheads - oldheads + 1
2099
2113
2100
2114
2101 def stream_in(self, remote):
2115 def stream_in(self, remote):
2102 fp = remote.stream_out()
2116 fp = remote.stream_out()
2103 l = fp.readline()
2117 l = fp.readline()
2104 try:
2118 try:
2105 resp = int(l)
2119 resp = int(l)
2106 except ValueError:
2120 except ValueError:
2107 raise error.ResponseError(
2121 raise error.ResponseError(
2108 _('Unexpected response from remote server:'), l)
2122 _('Unexpected response from remote server:'), l)
2109 if resp == 1:
2123 if resp == 1:
2110 raise util.Abort(_('operation forbidden by server'))
2124 raise util.Abort(_('operation forbidden by server'))
2111 elif resp == 2:
2125 elif resp == 2:
2112 raise util.Abort(_('locking the remote repository failed'))
2126 raise util.Abort(_('locking the remote repository failed'))
2113 elif resp != 0:
2127 elif resp != 0:
2114 raise util.Abort(_('the server sent an unknown error code'))
2128 raise util.Abort(_('the server sent an unknown error code'))
2115 self.ui.status(_('streaming all changes\n'))
2129 self.ui.status(_('streaming all changes\n'))
2116 l = fp.readline()
2130 l = fp.readline()
2117 try:
2131 try:
2118 total_files, total_bytes = map(int, l.split(' ', 1))
2132 total_files, total_bytes = map(int, l.split(' ', 1))
2119 except (ValueError, TypeError):
2133 except (ValueError, TypeError):
2120 raise error.ResponseError(
2134 raise error.ResponseError(
2121 _('Unexpected response from remote server:'), l)
2135 _('Unexpected response from remote server:'), l)
2122 self.ui.status(_('%d files to transfer, %s of data\n') %
2136 self.ui.status(_('%d files to transfer, %s of data\n') %
2123 (total_files, util.bytecount(total_bytes)))
2137 (total_files, util.bytecount(total_bytes)))
2124 start = time.time()
2138 start = time.time()
2125 for i in xrange(total_files):
2139 for i in xrange(total_files):
2126 # XXX doesn't support '\n' or '\r' in filenames
2140 # XXX doesn't support '\n' or '\r' in filenames
2127 l = fp.readline()
2141 l = fp.readline()
2128 try:
2142 try:
2129 name, size = l.split('\0', 1)
2143 name, size = l.split('\0', 1)
2130 size = int(size)
2144 size = int(size)
2131 except (ValueError, TypeError):
2145 except (ValueError, TypeError):
2132 raise error.ResponseError(
2146 raise error.ResponseError(
2133 _('Unexpected response from remote server:'), l)
2147 _('Unexpected response from remote server:'), l)
2134 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2148 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2135 # for backwards compat, name was partially encoded
2149 # for backwards compat, name was partially encoded
2136 ofp = self.sopener(store.decodedir(name), 'w')
2150 ofp = self.sopener(store.decodedir(name), 'w')
2137 for chunk in util.filechunkiter(fp, limit=size):
2151 for chunk in util.filechunkiter(fp, limit=size):
2138 ofp.write(chunk)
2152 ofp.write(chunk)
2139 ofp.close()
2153 ofp.close()
2140 elapsed = time.time() - start
2154 elapsed = time.time() - start
2141 if elapsed <= 0:
2155 if elapsed <= 0:
2142 elapsed = 0.001
2156 elapsed = 0.001
2143 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2157 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2144 (util.bytecount(total_bytes), elapsed,
2158 (util.bytecount(total_bytes), elapsed,
2145 util.bytecount(total_bytes / elapsed)))
2159 util.bytecount(total_bytes / elapsed)))
2146 self.invalidate()
2160 self.invalidate()
2147 return len(self.heads()) + 1
2161 return len(self.heads()) + 1
2148
2162
2149 def clone(self, remote, heads=[], stream=False):
2163 def clone(self, remote, heads=[], stream=False):
2150 '''clone remote repository.
2164 '''clone remote repository.
2151
2165
2152 keyword arguments:
2166 keyword arguments:
2153 heads: list of revs to clone (forces use of pull)
2167 heads: list of revs to clone (forces use of pull)
2154 stream: use streaming clone if possible'''
2168 stream: use streaming clone if possible'''
2155
2169
2156 # now, all clients that can request uncompressed clones can
2170 # now, all clients that can request uncompressed clones can
2157 # read repo formats supported by all servers that can serve
2171 # read repo formats supported by all servers that can serve
2158 # them.
2172 # them.
2159
2173
2160 # if revlog format changes, client will have to check version
2174 # if revlog format changes, client will have to check version
2161 # and format flags on "stream" capability, and use
2175 # and format flags on "stream" capability, and use
2162 # uncompressed only if compatible.
2176 # uncompressed only if compatible.
2163
2177
2164 if stream and not heads and remote.capable('stream'):
2178 if stream and not heads and remote.capable('stream'):
2165 return self.stream_in(remote)
2179 return self.stream_in(remote)
2166 return self.pull(remote, heads)
2180 return self.pull(remote, heads)
2167
2181
2168 # used to avoid circular references so destructors work
2182 # used to avoid circular references so destructors work
2169 def aftertrans(files):
2183 def aftertrans(files):
2170 renamefiles = [tuple(t) for t in files]
2184 renamefiles = [tuple(t) for t in files]
2171 def a():
2185 def a():
2172 for src, dest in renamefiles:
2186 for src, dest in renamefiles:
2173 util.rename(src, dest)
2187 util.rename(src, dest)
2174 return a
2188 return a
2175
2189
2176 def instance(ui, path, create):
2190 def instance(ui, path, create):
2177 return localrepository(ui, util.drop_scheme('file', path), create)
2191 return localrepository(ui, util.drop_scheme('file', path), create)
2178
2192
2179 def islocal(path):
2193 def islocal(path):
2180 return True
2194 return True
General Comments 0
You need to be logged in to leave comments. Login now