##// END OF EJS Templates
bookmarks: merge _findtags method into core
Matt Mackall -
r13360:bab5490e default
parent child Browse files
Show More
@@ -1,366 +1,360 b''
1 # Mercurial extension to provide the 'hg bookmark' command
1 # Mercurial extension to provide the 'hg bookmark' command
2 #
2 #
3 # Copyright 2008 David Soria Parra <dsp@php.net>
3 # Copyright 2008 David Soria Parra <dsp@php.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''track a line of development with movable markers
8 '''track a line of development with movable markers
9
9
10 Bookmarks are local movable markers to changesets. Every bookmark
10 Bookmarks are local movable markers to changesets. Every bookmark
11 points to a changeset identified by its hash. If you commit a
11 points to a changeset identified by its hash. If you commit a
12 changeset that is based on a changeset that has a bookmark on it, the
12 changeset that is based on a changeset that has a bookmark on it, the
13 bookmark shifts to the new changeset.
13 bookmark shifts to the new changeset.
14
14
15 It is possible to use bookmark names in every revision lookup (e.g.
15 It is possible to use bookmark names in every revision lookup (e.g.
16 :hg:`merge`, :hg:`update`).
16 :hg:`merge`, :hg:`update`).
17
17
18 By default, when several bookmarks point to the same changeset, they
18 By default, when several bookmarks point to the same changeset, they
19 will all move forward together. It is possible to obtain a more
19 will all move forward together. It is possible to obtain a more
20 git-like experience by adding the following configuration option to
20 git-like experience by adding the following configuration option to
21 your configuration file::
21 your configuration file::
22
22
23 [bookmarks]
23 [bookmarks]
24 track.current = True
24 track.current = True
25
25
26 This will cause Mercurial to track the bookmark that you are currently
26 This will cause Mercurial to track the bookmark that you are currently
27 using, and only update it. This is similar to git's approach to
27 using, and only update it. This is similar to git's approach to
28 branching.
28 branching.
29 '''
29 '''
30
30
31 from mercurial.i18n import _
31 from mercurial.i18n import _
32 from mercurial.node import nullid, nullrev, bin, hex, short
32 from mercurial.node import nullid, nullrev, bin, hex, short
33 from mercurial import util, commands, repair, extensions, pushkey, hg, url
33 from mercurial import util, commands, repair, extensions, pushkey, hg, url
34 from mercurial import encoding
34 from mercurial import encoding
35 from mercurial import bookmarks
35 from mercurial import bookmarks
36 import os
36 import os
37
37
38 def bookmark(ui, repo, mark=None, rev=None, force=False, delete=False, rename=None):
38 def bookmark(ui, repo, mark=None, rev=None, force=False, delete=False, rename=None):
39 '''track a line of development with movable markers
39 '''track a line of development with movable markers
40
40
41 Bookmarks are pointers to certain commits that move when
41 Bookmarks are pointers to certain commits that move when
42 committing. Bookmarks are local. They can be renamed, copied and
42 committing. Bookmarks are local. They can be renamed, copied and
43 deleted. It is possible to use bookmark names in :hg:`merge` and
43 deleted. It is possible to use bookmark names in :hg:`merge` and
44 :hg:`update` to merge and update respectively to a given bookmark.
44 :hg:`update` to merge and update respectively to a given bookmark.
45
45
46 You can use :hg:`bookmark NAME` to set a bookmark on the working
46 You can use :hg:`bookmark NAME` to set a bookmark on the working
47 directory's parent revision with the given name. If you specify
47 directory's parent revision with the given name. If you specify
48 a revision using -r REV (where REV may be an existing bookmark),
48 a revision using -r REV (where REV may be an existing bookmark),
49 the bookmark is assigned to that revision.
49 the bookmark is assigned to that revision.
50
50
51 Bookmarks can be pushed and pulled between repositories (see :hg:`help
51 Bookmarks can be pushed and pulled between repositories (see :hg:`help
52 push` and :hg:`help pull`). This requires the bookmark extension to be
52 push` and :hg:`help pull`). This requires the bookmark extension to be
53 enabled for both the local and remote repositories.
53 enabled for both the local and remote repositories.
54 '''
54 '''
55 hexfn = ui.debugflag and hex or short
55 hexfn = ui.debugflag and hex or short
56 marks = repo._bookmarks
56 marks = repo._bookmarks
57 cur = repo.changectx('.').node()
57 cur = repo.changectx('.').node()
58
58
59 if rename:
59 if rename:
60 if rename not in marks:
60 if rename not in marks:
61 raise util.Abort(_("a bookmark of this name does not exist"))
61 raise util.Abort(_("a bookmark of this name does not exist"))
62 if mark in marks and not force:
62 if mark in marks and not force:
63 raise util.Abort(_("a bookmark of the same name already exists"))
63 raise util.Abort(_("a bookmark of the same name already exists"))
64 if mark is None:
64 if mark is None:
65 raise util.Abort(_("new bookmark name required"))
65 raise util.Abort(_("new bookmark name required"))
66 marks[mark] = marks[rename]
66 marks[mark] = marks[rename]
67 del marks[rename]
67 del marks[rename]
68 if repo._bookmarkcurrent == rename:
68 if repo._bookmarkcurrent == rename:
69 bookmarks.setcurrent(repo, mark)
69 bookmarks.setcurrent(repo, mark)
70 bookmarks.write(repo)
70 bookmarks.write(repo)
71 return
71 return
72
72
73 if delete:
73 if delete:
74 if mark is None:
74 if mark is None:
75 raise util.Abort(_("bookmark name required"))
75 raise util.Abort(_("bookmark name required"))
76 if mark not in marks:
76 if mark not in marks:
77 raise util.Abort(_("a bookmark of this name does not exist"))
77 raise util.Abort(_("a bookmark of this name does not exist"))
78 if mark == repo._bookmarkcurrent:
78 if mark == repo._bookmarkcurrent:
79 bookmarks.setcurrent(repo, None)
79 bookmarks.setcurrent(repo, None)
80 del marks[mark]
80 del marks[mark]
81 bookmarks.write(repo)
81 bookmarks.write(repo)
82 return
82 return
83
83
84 if mark is not None:
84 if mark is not None:
85 if "\n" in mark:
85 if "\n" in mark:
86 raise util.Abort(_("bookmark name cannot contain newlines"))
86 raise util.Abort(_("bookmark name cannot contain newlines"))
87 mark = mark.strip()
87 mark = mark.strip()
88 if not mark:
88 if not mark:
89 raise util.Abort(_("bookmark names cannot consist entirely of "
89 raise util.Abort(_("bookmark names cannot consist entirely of "
90 "whitespace"))
90 "whitespace"))
91 if mark in marks and not force:
91 if mark in marks and not force:
92 raise util.Abort(_("a bookmark of the same name already exists"))
92 raise util.Abort(_("a bookmark of the same name already exists"))
93 if ((mark in repo.branchtags() or mark == repo.dirstate.branch())
93 if ((mark in repo.branchtags() or mark == repo.dirstate.branch())
94 and not force):
94 and not force):
95 raise util.Abort(
95 raise util.Abort(
96 _("a bookmark cannot have the name of an existing branch"))
96 _("a bookmark cannot have the name of an existing branch"))
97 if rev:
97 if rev:
98 marks[mark] = repo.lookup(rev)
98 marks[mark] = repo.lookup(rev)
99 else:
99 else:
100 marks[mark] = repo.changectx('.').node()
100 marks[mark] = repo.changectx('.').node()
101 bookmarks.setcurrent(repo, mark)
101 bookmarks.setcurrent(repo, mark)
102 bookmarks.write(repo)
102 bookmarks.write(repo)
103 return
103 return
104
104
105 if mark is None:
105 if mark is None:
106 if rev:
106 if rev:
107 raise util.Abort(_("bookmark name required"))
107 raise util.Abort(_("bookmark name required"))
108 if len(marks) == 0:
108 if len(marks) == 0:
109 ui.status(_("no bookmarks set\n"))
109 ui.status(_("no bookmarks set\n"))
110 else:
110 else:
111 for bmark, n in marks.iteritems():
111 for bmark, n in marks.iteritems():
112 if ui.configbool('bookmarks', 'track.current'):
112 if ui.configbool('bookmarks', 'track.current'):
113 current = repo._bookmarkcurrent
113 current = repo._bookmarkcurrent
114 if bmark == current and n == cur:
114 if bmark == current and n == cur:
115 prefix, label = '*', 'bookmarks.current'
115 prefix, label = '*', 'bookmarks.current'
116 else:
116 else:
117 prefix, label = ' ', ''
117 prefix, label = ' ', ''
118 else:
118 else:
119 if n == cur:
119 if n == cur:
120 prefix, label = '*', 'bookmarks.current'
120 prefix, label = '*', 'bookmarks.current'
121 else:
121 else:
122 prefix, label = ' ', ''
122 prefix, label = ' ', ''
123
123
124 if ui.quiet:
124 if ui.quiet:
125 ui.write("%s\n" % bmark, label=label)
125 ui.write("%s\n" % bmark, label=label)
126 else:
126 else:
127 ui.write(" %s %-25s %d:%s\n" % (
127 ui.write(" %s %-25s %d:%s\n" % (
128 prefix, bmark, repo.changelog.rev(n), hexfn(n)),
128 prefix, bmark, repo.changelog.rev(n), hexfn(n)),
129 label=label)
129 label=label)
130 return
130 return
131
131
132 def _revstostrip(changelog, node):
132 def _revstostrip(changelog, node):
133 srev = changelog.rev(node)
133 srev = changelog.rev(node)
134 tostrip = [srev]
134 tostrip = [srev]
135 saveheads = []
135 saveheads = []
136 for r in xrange(srev, len(changelog)):
136 for r in xrange(srev, len(changelog)):
137 parents = changelog.parentrevs(r)
137 parents = changelog.parentrevs(r)
138 if parents[0] in tostrip or parents[1] in tostrip:
138 if parents[0] in tostrip or parents[1] in tostrip:
139 tostrip.append(r)
139 tostrip.append(r)
140 if parents[1] != nullrev:
140 if parents[1] != nullrev:
141 for p in parents:
141 for p in parents:
142 if p not in tostrip and p > srev:
142 if p not in tostrip and p > srev:
143 saveheads.append(p)
143 saveheads.append(p)
144 return [r for r in tostrip if r not in saveheads]
144 return [r for r in tostrip if r not in saveheads]
145
145
146 def strip(oldstrip, ui, repo, node, backup="all"):
146 def strip(oldstrip, ui, repo, node, backup="all"):
147 """Strip bookmarks if revisions are stripped using
147 """Strip bookmarks if revisions are stripped using
148 the mercurial.strip method. This usually happens during
148 the mercurial.strip method. This usually happens during
149 qpush and qpop"""
149 qpush and qpop"""
150 revisions = _revstostrip(repo.changelog, node)
150 revisions = _revstostrip(repo.changelog, node)
151 marks = repo._bookmarks
151 marks = repo._bookmarks
152 update = []
152 update = []
153 for mark, n in marks.iteritems():
153 for mark, n in marks.iteritems():
154 if repo.changelog.rev(n) in revisions:
154 if repo.changelog.rev(n) in revisions:
155 update.append(mark)
155 update.append(mark)
156 oldstrip(ui, repo, node, backup)
156 oldstrip(ui, repo, node, backup)
157 if len(update) > 0:
157 if len(update) > 0:
158 for m in update:
158 for m in update:
159 marks[m] = repo.changectx('.').node()
159 marks[m] = repo.changectx('.').node()
160 bookmarks.write(repo)
160 bookmarks.write(repo)
161
161
162 def reposetup(ui, repo):
162 def reposetup(ui, repo):
163 if not repo.local():
163 if not repo.local():
164 return
164 return
165
165
166 class bookmark_repo(repo.__class__):
166 class bookmark_repo(repo.__class__):
167 def lookup(self, key):
167 def lookup(self, key):
168 if key in self._bookmarks:
168 if key in self._bookmarks:
169 key = self._bookmarks[key]
169 key = self._bookmarks[key]
170 return super(bookmark_repo, self).lookup(key)
170 return super(bookmark_repo, self).lookup(key)
171
171
172 def pull(self, remote, heads=None, force=False):
172 def pull(self, remote, heads=None, force=False):
173 result = super(bookmark_repo, self).pull(remote, heads, force)
173 result = super(bookmark_repo, self).pull(remote, heads, force)
174
174
175 self.ui.debug("checking for updated bookmarks\n")
175 self.ui.debug("checking for updated bookmarks\n")
176 rb = remote.listkeys('bookmarks')
176 rb = remote.listkeys('bookmarks')
177 changed = False
177 changed = False
178 for k in rb.keys():
178 for k in rb.keys():
179 if k in self._bookmarks:
179 if k in self._bookmarks:
180 nr, nl = rb[k], self._bookmarks[k]
180 nr, nl = rb[k], self._bookmarks[k]
181 if nr in self:
181 if nr in self:
182 cr = self[nr]
182 cr = self[nr]
183 cl = self[nl]
183 cl = self[nl]
184 if cl.rev() >= cr.rev():
184 if cl.rev() >= cr.rev():
185 continue
185 continue
186 if cr in cl.descendants():
186 if cr in cl.descendants():
187 self._bookmarks[k] = cr.node()
187 self._bookmarks[k] = cr.node()
188 changed = True
188 changed = True
189 self.ui.status(_("updating bookmark %s\n") % k)
189 self.ui.status(_("updating bookmark %s\n") % k)
190 else:
190 else:
191 self.ui.warn(_("not updating divergent"
191 self.ui.warn(_("not updating divergent"
192 " bookmark %s\n") % k)
192 " bookmark %s\n") % k)
193 if changed:
193 if changed:
194 bookmarks.write(repo)
194 bookmarks.write(repo)
195
195
196 return result
196 return result
197
197
198 def push(self, remote, force=False, revs=None, newbranch=False):
198 def push(self, remote, force=False, revs=None, newbranch=False):
199 result = super(bookmark_repo, self).push(remote, force, revs,
199 result = super(bookmark_repo, self).push(remote, force, revs,
200 newbranch)
200 newbranch)
201
201
202 self.ui.debug("checking for updated bookmarks\n")
202 self.ui.debug("checking for updated bookmarks\n")
203 rb = remote.listkeys('bookmarks')
203 rb = remote.listkeys('bookmarks')
204 for k in rb.keys():
204 for k in rb.keys():
205 if k in self._bookmarks:
205 if k in self._bookmarks:
206 nr, nl = rb[k], hex(self._bookmarks[k])
206 nr, nl = rb[k], hex(self._bookmarks[k])
207 if nr in self:
207 if nr in self:
208 cr = self[nr]
208 cr = self[nr]
209 cl = self[nl]
209 cl = self[nl]
210 if cl in cr.descendants():
210 if cl in cr.descendants():
211 r = remote.pushkey('bookmarks', k, nr, nl)
211 r = remote.pushkey('bookmarks', k, nr, nl)
212 if r:
212 if r:
213 self.ui.status(_("updating bookmark %s\n") % k)
213 self.ui.status(_("updating bookmark %s\n") % k)
214 else:
214 else:
215 self.ui.warn(_('updating bookmark %s'
215 self.ui.warn(_('updating bookmark %s'
216 ' failed!\n') % k)
216 ' failed!\n') % k)
217
217
218 return result
218 return result
219
219
220 def addchangegroup(self, *args, **kwargs):
220 def addchangegroup(self, *args, **kwargs):
221 result = super(bookmark_repo, self).addchangegroup(*args, **kwargs)
221 result = super(bookmark_repo, self).addchangegroup(*args, **kwargs)
222 if result > 1:
222 if result > 1:
223 # We have more heads than before
223 # We have more heads than before
224 return result
224 return result
225 node = self.changelog.tip()
225 node = self.changelog.tip()
226 parents = self.dirstate.parents()
226 parents = self.dirstate.parents()
227 bookmarks.update(self, parents, node)
227 bookmarks.update(self, parents, node)
228 return result
228 return result
229
229
230 def _findtags(self):
231 """Merge bookmarks with normal tags"""
232 (tags, tagtypes) = super(bookmark_repo, self)._findtags()
233 tags.update(self._bookmarks)
234 return (tags, tagtypes)
235
236 repo.__class__ = bookmark_repo
230 repo.__class__ = bookmark_repo
237
231
238 def pull(oldpull, ui, repo, source="default", **opts):
232 def pull(oldpull, ui, repo, source="default", **opts):
239 # translate bookmark args to rev args for actual pull
233 # translate bookmark args to rev args for actual pull
240 if opts.get('bookmark'):
234 if opts.get('bookmark'):
241 # this is an unpleasant hack as pull will do this internally
235 # this is an unpleasant hack as pull will do this internally
242 source, branches = hg.parseurl(ui.expandpath(source),
236 source, branches = hg.parseurl(ui.expandpath(source),
243 opts.get('branch'))
237 opts.get('branch'))
244 other = hg.repository(hg.remoteui(repo, opts), source)
238 other = hg.repository(hg.remoteui(repo, opts), source)
245 rb = other.listkeys('bookmarks')
239 rb = other.listkeys('bookmarks')
246
240
247 for b in opts['bookmark']:
241 for b in opts['bookmark']:
248 if b not in rb:
242 if b not in rb:
249 raise util.Abort(_('remote bookmark %s not found!') % b)
243 raise util.Abort(_('remote bookmark %s not found!') % b)
250 opts.setdefault('rev', []).append(b)
244 opts.setdefault('rev', []).append(b)
251
245
252 result = oldpull(ui, repo, source, **opts)
246 result = oldpull(ui, repo, source, **opts)
253
247
254 # update specified bookmarks
248 # update specified bookmarks
255 if opts.get('bookmark'):
249 if opts.get('bookmark'):
256 for b in opts['bookmark']:
250 for b in opts['bookmark']:
257 # explicit pull overrides local bookmark if any
251 # explicit pull overrides local bookmark if any
258 ui.status(_("importing bookmark %s\n") % b)
252 ui.status(_("importing bookmark %s\n") % b)
259 repo._bookmarks[b] = repo[rb[b]].node()
253 repo._bookmarks[b] = repo[rb[b]].node()
260 bookmarks.write(repo)
254 bookmarks.write(repo)
261
255
262 return result
256 return result
263
257
264 def push(oldpush, ui, repo, dest=None, **opts):
258 def push(oldpush, ui, repo, dest=None, **opts):
265 dopush = True
259 dopush = True
266 if opts.get('bookmark'):
260 if opts.get('bookmark'):
267 dopush = False
261 dopush = False
268 for b in opts['bookmark']:
262 for b in opts['bookmark']:
269 if b in repo._bookmarks:
263 if b in repo._bookmarks:
270 dopush = True
264 dopush = True
271 opts.setdefault('rev', []).append(b)
265 opts.setdefault('rev', []).append(b)
272
266
273 result = 0
267 result = 0
274 if dopush:
268 if dopush:
275 result = oldpush(ui, repo, dest, **opts)
269 result = oldpush(ui, repo, dest, **opts)
276
270
277 if opts.get('bookmark'):
271 if opts.get('bookmark'):
278 # this is an unpleasant hack as push will do this internally
272 # this is an unpleasant hack as push will do this internally
279 dest = ui.expandpath(dest or 'default-push', dest or 'default')
273 dest = ui.expandpath(dest or 'default-push', dest or 'default')
280 dest, branches = hg.parseurl(dest, opts.get('branch'))
274 dest, branches = hg.parseurl(dest, opts.get('branch'))
281 other = hg.repository(hg.remoteui(repo, opts), dest)
275 other = hg.repository(hg.remoteui(repo, opts), dest)
282 rb = other.listkeys('bookmarks')
276 rb = other.listkeys('bookmarks')
283 for b in opts['bookmark']:
277 for b in opts['bookmark']:
284 # explicit push overrides remote bookmark if any
278 # explicit push overrides remote bookmark if any
285 if b in repo._bookmarks:
279 if b in repo._bookmarks:
286 ui.status(_("exporting bookmark %s\n") % b)
280 ui.status(_("exporting bookmark %s\n") % b)
287 new = repo[b].hex()
281 new = repo[b].hex()
288 elif b in rb:
282 elif b in rb:
289 ui.status(_("deleting remote bookmark %s\n") % b)
283 ui.status(_("deleting remote bookmark %s\n") % b)
290 new = '' # delete
284 new = '' # delete
291 else:
285 else:
292 ui.warn(_('bookmark %s does not exist on the local '
286 ui.warn(_('bookmark %s does not exist on the local '
293 'or remote repository!\n') % b)
287 'or remote repository!\n') % b)
294 return 2
288 return 2
295 old = rb.get(b, '')
289 old = rb.get(b, '')
296 r = other.pushkey('bookmarks', b, old, new)
290 r = other.pushkey('bookmarks', b, old, new)
297 if not r:
291 if not r:
298 ui.warn(_('updating bookmark %s failed!\n') % b)
292 ui.warn(_('updating bookmark %s failed!\n') % b)
299 if not result:
293 if not result:
300 result = 2
294 result = 2
301
295
302 return result
296 return result
303
297
304 def incoming(oldincoming, ui, repo, source="default", **opts):
298 def incoming(oldincoming, ui, repo, source="default", **opts):
305 if opts.get('bookmarks'):
299 if opts.get('bookmarks'):
306 source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch'))
300 source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch'))
307 other = hg.repository(hg.remoteui(repo, opts), source)
301 other = hg.repository(hg.remoteui(repo, opts), source)
308 ui.status(_('comparing with %s\n') % url.hidepassword(source))
302 ui.status(_('comparing with %s\n') % url.hidepassword(source))
309 return bookmarks.diff(ui, repo, other)
303 return bookmarks.diff(ui, repo, other)
310 else:
304 else:
311 return oldincoming(ui, repo, source, **opts)
305 return oldincoming(ui, repo, source, **opts)
312
306
313 def outgoing(oldoutgoing, ui, repo, dest=None, **opts):
307 def outgoing(oldoutgoing, ui, repo, dest=None, **opts):
314 if opts.get('bookmarks'):
308 if opts.get('bookmarks'):
315 dest = ui.expandpath(dest or 'default-push', dest or 'default')
309 dest = ui.expandpath(dest or 'default-push', dest or 'default')
316 dest, branches = hg.parseurl(dest, opts.get('branch'))
310 dest, branches = hg.parseurl(dest, opts.get('branch'))
317 other = hg.repository(hg.remoteui(repo, opts), dest)
311 other = hg.repository(hg.remoteui(repo, opts), dest)
318 ui.status(_('comparing with %s\n') % url.hidepassword(dest))
312 ui.status(_('comparing with %s\n') % url.hidepassword(dest))
319 return bookmarks.diff(ui, other, repo)
313 return bookmarks.diff(ui, other, repo)
320 else:
314 else:
321 return oldoutgoing(ui, repo, dest, **opts)
315 return oldoutgoing(ui, repo, dest, **opts)
322
316
323 def uisetup(ui):
317 def uisetup(ui):
324 extensions.wrapfunction(repair, "strip", strip)
318 extensions.wrapfunction(repair, "strip", strip)
325 if ui.configbool('bookmarks', 'track.current'):
319 if ui.configbool('bookmarks', 'track.current'):
326 extensions.wrapcommand(commands.table, 'update', updatecurbookmark)
320 extensions.wrapcommand(commands.table, 'update', updatecurbookmark)
327
321
328 entry = extensions.wrapcommand(commands.table, 'pull', pull)
322 entry = extensions.wrapcommand(commands.table, 'pull', pull)
329 entry[1].append(('B', 'bookmark', [],
323 entry[1].append(('B', 'bookmark', [],
330 _("bookmark to import"),
324 _("bookmark to import"),
331 _('BOOKMARK')))
325 _('BOOKMARK')))
332 entry = extensions.wrapcommand(commands.table, 'push', push)
326 entry = extensions.wrapcommand(commands.table, 'push', push)
333 entry[1].append(('B', 'bookmark', [],
327 entry[1].append(('B', 'bookmark', [],
334 _("bookmark to export"),
328 _("bookmark to export"),
335 _('BOOKMARK')))
329 _('BOOKMARK')))
336 entry = extensions.wrapcommand(commands.table, 'incoming', incoming)
330 entry = extensions.wrapcommand(commands.table, 'incoming', incoming)
337 entry[1].append(('B', 'bookmarks', False,
331 entry[1].append(('B', 'bookmarks', False,
338 _("compare bookmark")))
332 _("compare bookmark")))
339 entry = extensions.wrapcommand(commands.table, 'outgoing', outgoing)
333 entry = extensions.wrapcommand(commands.table, 'outgoing', outgoing)
340 entry[1].append(('B', 'bookmarks', False,
334 entry[1].append(('B', 'bookmarks', False,
341 _("compare bookmark")))
335 _("compare bookmark")))
342
336
343 def updatecurbookmark(orig, ui, repo, *args, **opts):
337 def updatecurbookmark(orig, ui, repo, *args, **opts):
344 '''Set the current bookmark
338 '''Set the current bookmark
345
339
346 If the user updates to a bookmark we update the .hg/bookmarks.current
340 If the user updates to a bookmark we update the .hg/bookmarks.current
347 file.
341 file.
348 '''
342 '''
349 res = orig(ui, repo, *args, **opts)
343 res = orig(ui, repo, *args, **opts)
350 rev = opts['rev']
344 rev = opts['rev']
351 if not rev and len(args) > 0:
345 if not rev and len(args) > 0:
352 rev = args[0]
346 rev = args[0]
353 bookmarks.setcurrent(repo, rev)
347 bookmarks.setcurrent(repo, rev)
354 return res
348 return res
355
349
356 cmdtable = {
350 cmdtable = {
357 "bookmarks":
351 "bookmarks":
358 (bookmark,
352 (bookmark,
359 [('f', 'force', False, _('force')),
353 [('f', 'force', False, _('force')),
360 ('r', 'rev', '', _('revision'), _('REV')),
354 ('r', 'rev', '', _('revision'), _('REV')),
361 ('d', 'delete', False, _('delete a given bookmark')),
355 ('d', 'delete', False, _('delete a given bookmark')),
362 ('m', 'rename', '', _('rename a given bookmark'), _('NAME'))],
356 ('m', 'rename', '', _('rename a given bookmark'), _('NAME'))],
363 _('hg bookmarks [-f] [-d] [-m NAME] [-r REV] [NAME]')),
357 _('hg bookmarks [-f] [-d] [-m NAME] [-r REV] [NAME]')),
364 }
358 }
365
359
366 colortable = {'bookmarks.current': 'green'}
360 colortable = {'bookmarks.current': 'green'}
@@ -1,1959 +1,1960 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks
11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 import url as urlmod
17 import url as urlmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
24 supportedformats = set(('revlogv1', 'parentdelta'))
24 supportedformats = set(('revlogv1', 'parentdelta'))
25 supported = supportedformats | set(('store', 'fncache', 'shared',
25 supported = supportedformats | set(('store', 'fncache', 'shared',
26 'dotencode'))
26 'dotencode'))
27
27
28 def __init__(self, baseui, path=None, create=0):
28 def __init__(self, baseui, path=None, create=0):
29 repo.repository.__init__(self)
29 repo.repository.__init__(self)
30 self.root = os.path.realpath(util.expandpath(path))
30 self.root = os.path.realpath(util.expandpath(path))
31 self.path = os.path.join(self.root, ".hg")
31 self.path = os.path.join(self.root, ".hg")
32 self.origroot = path
32 self.origroot = path
33 self.auditor = util.path_auditor(self.root, self._checknested)
33 self.auditor = util.path_auditor(self.root, self._checknested)
34 self.opener = util.opener(self.path)
34 self.opener = util.opener(self.path)
35 self.wopener = util.opener(self.root)
35 self.wopener = util.opener(self.root)
36 self.baseui = baseui
36 self.baseui = baseui
37 self.ui = baseui.copy()
37 self.ui = baseui.copy()
38
38
39 try:
39 try:
40 self.ui.readconfig(self.join("hgrc"), self.root)
40 self.ui.readconfig(self.join("hgrc"), self.root)
41 extensions.loadall(self.ui)
41 extensions.loadall(self.ui)
42 except IOError:
42 except IOError:
43 pass
43 pass
44
44
45 if not os.path.isdir(self.path):
45 if not os.path.isdir(self.path):
46 if create:
46 if create:
47 if not os.path.exists(path):
47 if not os.path.exists(path):
48 util.makedirs(path)
48 util.makedirs(path)
49 os.mkdir(self.path)
49 os.mkdir(self.path)
50 requirements = ["revlogv1"]
50 requirements = ["revlogv1"]
51 if self.ui.configbool('format', 'usestore', True):
51 if self.ui.configbool('format', 'usestore', True):
52 os.mkdir(os.path.join(self.path, "store"))
52 os.mkdir(os.path.join(self.path, "store"))
53 requirements.append("store")
53 requirements.append("store")
54 if self.ui.configbool('format', 'usefncache', True):
54 if self.ui.configbool('format', 'usefncache', True):
55 requirements.append("fncache")
55 requirements.append("fncache")
56 if self.ui.configbool('format', 'dotencode', True):
56 if self.ui.configbool('format', 'dotencode', True):
57 requirements.append('dotencode')
57 requirements.append('dotencode')
58 # create an invalid changelog
58 # create an invalid changelog
59 self.opener("00changelog.i", "a").write(
59 self.opener("00changelog.i", "a").write(
60 '\0\0\0\2' # represents revlogv2
60 '\0\0\0\2' # represents revlogv2
61 ' dummy changelog to prevent using the old repo layout'
61 ' dummy changelog to prevent using the old repo layout'
62 )
62 )
63 if self.ui.configbool('format', 'parentdelta', False):
63 if self.ui.configbool('format', 'parentdelta', False):
64 requirements.append("parentdelta")
64 requirements.append("parentdelta")
65 else:
65 else:
66 raise error.RepoError(_("repository %s not found") % path)
66 raise error.RepoError(_("repository %s not found") % path)
67 elif create:
67 elif create:
68 raise error.RepoError(_("repository %s already exists") % path)
68 raise error.RepoError(_("repository %s already exists") % path)
69 else:
69 else:
70 # find requirements
70 # find requirements
71 requirements = set()
71 requirements = set()
72 try:
72 try:
73 requirements = set(self.opener("requires").read().splitlines())
73 requirements = set(self.opener("requires").read().splitlines())
74 except IOError, inst:
74 except IOError, inst:
75 if inst.errno != errno.ENOENT:
75 if inst.errno != errno.ENOENT:
76 raise
76 raise
77 for r in requirements - self.supported:
77 for r in requirements - self.supported:
78 raise error.RepoError(_("requirement '%s' not supported") % r)
78 raise error.RepoError(_("requirement '%s' not supported") % r)
79
79
80 self.sharedpath = self.path
80 self.sharedpath = self.path
81 try:
81 try:
82 s = os.path.realpath(self.opener("sharedpath").read())
82 s = os.path.realpath(self.opener("sharedpath").read())
83 if not os.path.exists(s):
83 if not os.path.exists(s):
84 raise error.RepoError(
84 raise error.RepoError(
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 self.sharedpath = s
86 self.sharedpath = s
87 except IOError, inst:
87 except IOError, inst:
88 if inst.errno != errno.ENOENT:
88 if inst.errno != errno.ENOENT:
89 raise
89 raise
90
90
91 self.store = store.store(requirements, self.sharedpath, util.opener)
91 self.store = store.store(requirements, self.sharedpath, util.opener)
92 self.spath = self.store.path
92 self.spath = self.store.path
93 self.sopener = self.store.opener
93 self.sopener = self.store.opener
94 self.sjoin = self.store.join
94 self.sjoin = self.store.join
95 self.opener.createmode = self.store.createmode
95 self.opener.createmode = self.store.createmode
96 self._applyrequirements(requirements)
96 self._applyrequirements(requirements)
97 if create:
97 if create:
98 self._writerequirements()
98 self._writerequirements()
99
99
100 # These two define the set of tags for this repository. _tags
100 # These two define the set of tags for this repository. _tags
101 # maps tag name to node; _tagtypes maps tag name to 'global' or
101 # maps tag name to node; _tagtypes maps tag name to 'global' or
102 # 'local'. (Global tags are defined by .hgtags across all
102 # 'local'. (Global tags are defined by .hgtags across all
103 # heads, and local tags are defined in .hg/localtags.) They
103 # heads, and local tags are defined in .hg/localtags.) They
104 # constitute the in-memory cache of tags.
104 # constitute the in-memory cache of tags.
105 self._tags = None
105 self._tags = None
106 self._tagtypes = None
106 self._tagtypes = None
107
107
108 self._branchcache = None
108 self._branchcache = None
109 self._branchcachetip = None
109 self._branchcachetip = None
110 self.nodetagscache = None
110 self.nodetagscache = None
111 self.filterpats = {}
111 self.filterpats = {}
112 self._datafilters = {}
112 self._datafilters = {}
113 self._transref = self._lockref = self._wlockref = None
113 self._transref = self._lockref = self._wlockref = None
114
114
115 def _applyrequirements(self, requirements):
115 def _applyrequirements(self, requirements):
116 self.requirements = requirements
116 self.requirements = requirements
117 self.sopener.options = {}
117 self.sopener.options = {}
118 if 'parentdelta' in requirements:
118 if 'parentdelta' in requirements:
119 self.sopener.options['parentdelta'] = 1
119 self.sopener.options['parentdelta'] = 1
120
120
121 def _writerequirements(self):
121 def _writerequirements(self):
122 reqfile = self.opener("requires", "w")
122 reqfile = self.opener("requires", "w")
123 for r in self.requirements:
123 for r in self.requirements:
124 reqfile.write("%s\n" % r)
124 reqfile.write("%s\n" % r)
125 reqfile.close()
125 reqfile.close()
126
126
127 def _checknested(self, path):
127 def _checknested(self, path):
128 """Determine if path is a legal nested repository."""
128 """Determine if path is a legal nested repository."""
129 if not path.startswith(self.root):
129 if not path.startswith(self.root):
130 return False
130 return False
131 subpath = path[len(self.root) + 1:]
131 subpath = path[len(self.root) + 1:]
132
132
133 # XXX: Checking against the current working copy is wrong in
133 # XXX: Checking against the current working copy is wrong in
134 # the sense that it can reject things like
134 # the sense that it can reject things like
135 #
135 #
136 # $ hg cat -r 10 sub/x.txt
136 # $ hg cat -r 10 sub/x.txt
137 #
137 #
138 # if sub/ is no longer a subrepository in the working copy
138 # if sub/ is no longer a subrepository in the working copy
139 # parent revision.
139 # parent revision.
140 #
140 #
141 # However, it can of course also allow things that would have
141 # However, it can of course also allow things that would have
142 # been rejected before, such as the above cat command if sub/
142 # been rejected before, such as the above cat command if sub/
143 # is a subrepository now, but was a normal directory before.
143 # is a subrepository now, but was a normal directory before.
144 # The old path auditor would have rejected by mistake since it
144 # The old path auditor would have rejected by mistake since it
145 # panics when it sees sub/.hg/.
145 # panics when it sees sub/.hg/.
146 #
146 #
147 # All in all, checking against the working copy seems sensible
147 # All in all, checking against the working copy seems sensible
148 # since we want to prevent access to nested repositories on
148 # since we want to prevent access to nested repositories on
149 # the filesystem *now*.
149 # the filesystem *now*.
150 ctx = self[None]
150 ctx = self[None]
151 parts = util.splitpath(subpath)
151 parts = util.splitpath(subpath)
152 while parts:
152 while parts:
153 prefix = os.sep.join(parts)
153 prefix = os.sep.join(parts)
154 if prefix in ctx.substate:
154 if prefix in ctx.substate:
155 if prefix == subpath:
155 if prefix == subpath:
156 return True
156 return True
157 else:
157 else:
158 sub = ctx.sub(prefix)
158 sub = ctx.sub(prefix)
159 return sub.checknested(subpath[len(prefix) + 1:])
159 return sub.checknested(subpath[len(prefix) + 1:])
160 else:
160 else:
161 parts.pop()
161 parts.pop()
162 return False
162 return False
163
163
164 @util.propertycache
164 @util.propertycache
165 def _bookmarks(self):
165 def _bookmarks(self):
166 return bookmarks.read(self)
166 return bookmarks.read(self)
167
167
168 @util.propertycache
168 @util.propertycache
169 def _bookmarkcurrent(self):
169 def _bookmarkcurrent(self):
170 return bookmarks.readcurrent(self)
170 return bookmarks.readcurrent(self)
171
171
172 @propertycache
172 @propertycache
173 def changelog(self):
173 def changelog(self):
174 c = changelog.changelog(self.sopener)
174 c = changelog.changelog(self.sopener)
175 if 'HG_PENDING' in os.environ:
175 if 'HG_PENDING' in os.environ:
176 p = os.environ['HG_PENDING']
176 p = os.environ['HG_PENDING']
177 if p.startswith(self.root):
177 if p.startswith(self.root):
178 c.readpending('00changelog.i.a')
178 c.readpending('00changelog.i.a')
179 self.sopener.options['defversion'] = c.version
179 self.sopener.options['defversion'] = c.version
180 return c
180 return c
181
181
182 @propertycache
182 @propertycache
183 def manifest(self):
183 def manifest(self):
184 return manifest.manifest(self.sopener)
184 return manifest.manifest(self.sopener)
185
185
186 @propertycache
186 @propertycache
187 def dirstate(self):
187 def dirstate(self):
188 warned = [0]
188 warned = [0]
189 def validate(node):
189 def validate(node):
190 try:
190 try:
191 r = self.changelog.rev(node)
191 r = self.changelog.rev(node)
192 return node
192 return node
193 except error.LookupError:
193 except error.LookupError:
194 if not warned[0]:
194 if not warned[0]:
195 warned[0] = True
195 warned[0] = True
196 self.ui.warn(_("warning: ignoring unknown"
196 self.ui.warn(_("warning: ignoring unknown"
197 " working parent %s!\n") % short(node))
197 " working parent %s!\n") % short(node))
198 return nullid
198 return nullid
199
199
200 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
200 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
201
201
202 def __getitem__(self, changeid):
202 def __getitem__(self, changeid):
203 if changeid is None:
203 if changeid is None:
204 return context.workingctx(self)
204 return context.workingctx(self)
205 return context.changectx(self, changeid)
205 return context.changectx(self, changeid)
206
206
207 def __contains__(self, changeid):
207 def __contains__(self, changeid):
208 try:
208 try:
209 return bool(self.lookup(changeid))
209 return bool(self.lookup(changeid))
210 except error.RepoLookupError:
210 except error.RepoLookupError:
211 return False
211 return False
212
212
213 def __nonzero__(self):
213 def __nonzero__(self):
214 return True
214 return True
215
215
216 def __len__(self):
216 def __len__(self):
217 return len(self.changelog)
217 return len(self.changelog)
218
218
219 def __iter__(self):
219 def __iter__(self):
220 for i in xrange(len(self)):
220 for i in xrange(len(self)):
221 yield i
221 yield i
222
222
223 def url(self):
223 def url(self):
224 return 'file:' + self.root
224 return 'file:' + self.root
225
225
226 def hook(self, name, throw=False, **args):
226 def hook(self, name, throw=False, **args):
227 return hook.hook(self.ui, self, name, throw, **args)
227 return hook.hook(self.ui, self, name, throw, **args)
228
228
229 tag_disallowed = ':\r\n'
229 tag_disallowed = ':\r\n'
230
230
231 def _tag(self, names, node, message, local, user, date, extra={}):
231 def _tag(self, names, node, message, local, user, date, extra={}):
232 if isinstance(names, str):
232 if isinstance(names, str):
233 allchars = names
233 allchars = names
234 names = (names,)
234 names = (names,)
235 else:
235 else:
236 allchars = ''.join(names)
236 allchars = ''.join(names)
237 for c in self.tag_disallowed:
237 for c in self.tag_disallowed:
238 if c in allchars:
238 if c in allchars:
239 raise util.Abort(_('%r cannot be used in a tag name') % c)
239 raise util.Abort(_('%r cannot be used in a tag name') % c)
240
240
241 branches = self.branchmap()
241 branches = self.branchmap()
242 for name in names:
242 for name in names:
243 self.hook('pretag', throw=True, node=hex(node), tag=name,
243 self.hook('pretag', throw=True, node=hex(node), tag=name,
244 local=local)
244 local=local)
245 if name in branches:
245 if name in branches:
246 self.ui.warn(_("warning: tag %s conflicts with existing"
246 self.ui.warn(_("warning: tag %s conflicts with existing"
247 " branch name\n") % name)
247 " branch name\n") % name)
248
248
249 def writetags(fp, names, munge, prevtags):
249 def writetags(fp, names, munge, prevtags):
250 fp.seek(0, 2)
250 fp.seek(0, 2)
251 if prevtags and prevtags[-1] != '\n':
251 if prevtags and prevtags[-1] != '\n':
252 fp.write('\n')
252 fp.write('\n')
253 for name in names:
253 for name in names:
254 m = munge and munge(name) or name
254 m = munge and munge(name) or name
255 if self._tagtypes and name in self._tagtypes:
255 if self._tagtypes and name in self._tagtypes:
256 old = self._tags.get(name, nullid)
256 old = self._tags.get(name, nullid)
257 fp.write('%s %s\n' % (hex(old), m))
257 fp.write('%s %s\n' % (hex(old), m))
258 fp.write('%s %s\n' % (hex(node), m))
258 fp.write('%s %s\n' % (hex(node), m))
259 fp.close()
259 fp.close()
260
260
261 prevtags = ''
261 prevtags = ''
262 if local:
262 if local:
263 try:
263 try:
264 fp = self.opener('localtags', 'r+')
264 fp = self.opener('localtags', 'r+')
265 except IOError:
265 except IOError:
266 fp = self.opener('localtags', 'a')
266 fp = self.opener('localtags', 'a')
267 else:
267 else:
268 prevtags = fp.read()
268 prevtags = fp.read()
269
269
270 # local tags are stored in the current charset
270 # local tags are stored in the current charset
271 writetags(fp, names, None, prevtags)
271 writetags(fp, names, None, prevtags)
272 for name in names:
272 for name in names:
273 self.hook('tag', node=hex(node), tag=name, local=local)
273 self.hook('tag', node=hex(node), tag=name, local=local)
274 return
274 return
275
275
276 try:
276 try:
277 fp = self.wfile('.hgtags', 'rb+')
277 fp = self.wfile('.hgtags', 'rb+')
278 except IOError:
278 except IOError:
279 fp = self.wfile('.hgtags', 'ab')
279 fp = self.wfile('.hgtags', 'ab')
280 else:
280 else:
281 prevtags = fp.read()
281 prevtags = fp.read()
282
282
283 # committed tags are stored in UTF-8
283 # committed tags are stored in UTF-8
284 writetags(fp, names, encoding.fromlocal, prevtags)
284 writetags(fp, names, encoding.fromlocal, prevtags)
285
285
286 if '.hgtags' not in self.dirstate:
286 if '.hgtags' not in self.dirstate:
287 self[None].add(['.hgtags'])
287 self[None].add(['.hgtags'])
288
288
289 m = matchmod.exact(self.root, '', ['.hgtags'])
289 m = matchmod.exact(self.root, '', ['.hgtags'])
290 tagnode = self.commit(message, user, date, extra=extra, match=m)
290 tagnode = self.commit(message, user, date, extra=extra, match=m)
291
291
292 for name in names:
292 for name in names:
293 self.hook('tag', node=hex(node), tag=name, local=local)
293 self.hook('tag', node=hex(node), tag=name, local=local)
294
294
295 return tagnode
295 return tagnode
296
296
297 def tag(self, names, node, message, local, user, date):
297 def tag(self, names, node, message, local, user, date):
298 '''tag a revision with one or more symbolic names.
298 '''tag a revision with one or more symbolic names.
299
299
300 names is a list of strings or, when adding a single tag, names may be a
300 names is a list of strings or, when adding a single tag, names may be a
301 string.
301 string.
302
302
303 if local is True, the tags are stored in a per-repository file.
303 if local is True, the tags are stored in a per-repository file.
304 otherwise, they are stored in the .hgtags file, and a new
304 otherwise, they are stored in the .hgtags file, and a new
305 changeset is committed with the change.
305 changeset is committed with the change.
306
306
307 keyword arguments:
307 keyword arguments:
308
308
309 local: whether to store tags in non-version-controlled file
309 local: whether to store tags in non-version-controlled file
310 (default False)
310 (default False)
311
311
312 message: commit message to use if committing
312 message: commit message to use if committing
313
313
314 user: name of user to use if committing
314 user: name of user to use if committing
315
315
316 date: date tuple to use if committing'''
316 date: date tuple to use if committing'''
317
317
318 if not local:
318 if not local:
319 for x in self.status()[:5]:
319 for x in self.status()[:5]:
320 if '.hgtags' in x:
320 if '.hgtags' in x:
321 raise util.Abort(_('working copy of .hgtags is changed '
321 raise util.Abort(_('working copy of .hgtags is changed '
322 '(please commit .hgtags manually)'))
322 '(please commit .hgtags manually)'))
323
323
324 self.tags() # instantiate the cache
324 self.tags() # instantiate the cache
325 self._tag(names, node, message, local, user, date)
325 self._tag(names, node, message, local, user, date)
326
326
327 def tags(self):
327 def tags(self):
328 '''return a mapping of tag to node'''
328 '''return a mapping of tag to node'''
329 if self._tags is None:
329 if self._tags is None:
330 (self._tags, self._tagtypes) = self._findtags()
330 (self._tags, self._tagtypes) = self._findtags()
331
331
332 return self._tags
332 return self._tags
333
333
334 def _findtags(self):
334 def _findtags(self):
335 '''Do the hard work of finding tags. Return a pair of dicts
335 '''Do the hard work of finding tags. Return a pair of dicts
336 (tags, tagtypes) where tags maps tag name to node, and tagtypes
336 (tags, tagtypes) where tags maps tag name to node, and tagtypes
337 maps tag name to a string like \'global\' or \'local\'.
337 maps tag name to a string like \'global\' or \'local\'.
338 Subclasses or extensions are free to add their own tags, but
338 Subclasses or extensions are free to add their own tags, but
339 should be aware that the returned dicts will be retained for the
339 should be aware that the returned dicts will be retained for the
340 duration of the localrepo object.'''
340 duration of the localrepo object.'''
341
341
342 # XXX what tagtype should subclasses/extensions use? Currently
342 # XXX what tagtype should subclasses/extensions use? Currently
343 # mq and bookmarks add tags, but do not set the tagtype at all.
343 # mq and bookmarks add tags, but do not set the tagtype at all.
344 # Should each extension invent its own tag type? Should there
344 # Should each extension invent its own tag type? Should there
345 # be one tagtype for all such "virtual" tags? Or is the status
345 # be one tagtype for all such "virtual" tags? Or is the status
346 # quo fine?
346 # quo fine?
347
347
348 alltags = {} # map tag name to (node, hist)
348 alltags = {} # map tag name to (node, hist)
349 tagtypes = {}
349 tagtypes = {}
350
350
351 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
351 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
352 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
352 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
353
353
354 # Build the return dicts. Have to re-encode tag names because
354 # Build the return dicts. Have to re-encode tag names because
355 # the tags module always uses UTF-8 (in order not to lose info
355 # the tags module always uses UTF-8 (in order not to lose info
356 # writing to the cache), but the rest of Mercurial wants them in
356 # writing to the cache), but the rest of Mercurial wants them in
357 # local encoding.
357 # local encoding.
358 tags = {}
358 tags = {}
359 for (name, (node, hist)) in alltags.iteritems():
359 for (name, (node, hist)) in alltags.iteritems():
360 if node != nullid:
360 if node != nullid:
361 tags[encoding.tolocal(name)] = node
361 tags[encoding.tolocal(name)] = node
362 tags['tip'] = self.changelog.tip()
362 tags['tip'] = self.changelog.tip()
363 tags.update(self._bookmarks)
363 tagtypes = dict([(encoding.tolocal(name), value)
364 tagtypes = dict([(encoding.tolocal(name), value)
364 for (name, value) in tagtypes.iteritems()])
365 for (name, value) in tagtypes.iteritems()])
365 return (tags, tagtypes)
366 return (tags, tagtypes)
366
367
367 def tagtype(self, tagname):
368 def tagtype(self, tagname):
368 '''
369 '''
369 return the type of the given tag. result can be:
370 return the type of the given tag. result can be:
370
371
371 'local' : a local tag
372 'local' : a local tag
372 'global' : a global tag
373 'global' : a global tag
373 None : tag does not exist
374 None : tag does not exist
374 '''
375 '''
375
376
376 self.tags()
377 self.tags()
377
378
378 return self._tagtypes.get(tagname)
379 return self._tagtypes.get(tagname)
379
380
380 def tagslist(self):
381 def tagslist(self):
381 '''return a list of tags ordered by revision'''
382 '''return a list of tags ordered by revision'''
382 l = []
383 l = []
383 for t, n in self.tags().iteritems():
384 for t, n in self.tags().iteritems():
384 try:
385 try:
385 r = self.changelog.rev(n)
386 r = self.changelog.rev(n)
386 except:
387 except:
387 r = -2 # sort to the beginning of the list if unknown
388 r = -2 # sort to the beginning of the list if unknown
388 l.append((r, t, n))
389 l.append((r, t, n))
389 return [(t, n) for r, t, n in sorted(l)]
390 return [(t, n) for r, t, n in sorted(l)]
390
391
391 def nodetags(self, node):
392 def nodetags(self, node):
392 '''return the tags associated with a node'''
393 '''return the tags associated with a node'''
393 if not self.nodetagscache:
394 if not self.nodetagscache:
394 self.nodetagscache = {}
395 self.nodetagscache = {}
395 for t, n in self.tags().iteritems():
396 for t, n in self.tags().iteritems():
396 self.nodetagscache.setdefault(n, []).append(t)
397 self.nodetagscache.setdefault(n, []).append(t)
397 for tags in self.nodetagscache.itervalues():
398 for tags in self.nodetagscache.itervalues():
398 tags.sort()
399 tags.sort()
399 return self.nodetagscache.get(node, [])
400 return self.nodetagscache.get(node, [])
400
401
401 def _branchtags(self, partial, lrev):
402 def _branchtags(self, partial, lrev):
402 # TODO: rename this function?
403 # TODO: rename this function?
403 tiprev = len(self) - 1
404 tiprev = len(self) - 1
404 if lrev != tiprev:
405 if lrev != tiprev:
405 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
406 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
406 self._updatebranchcache(partial, ctxgen)
407 self._updatebranchcache(partial, ctxgen)
407 self._writebranchcache(partial, self.changelog.tip(), tiprev)
408 self._writebranchcache(partial, self.changelog.tip(), tiprev)
408
409
409 return partial
410 return partial
410
411
411 def updatebranchcache(self):
412 def updatebranchcache(self):
412 tip = self.changelog.tip()
413 tip = self.changelog.tip()
413 if self._branchcache is not None and self._branchcachetip == tip:
414 if self._branchcache is not None and self._branchcachetip == tip:
414 return self._branchcache
415 return self._branchcache
415
416
416 oldtip = self._branchcachetip
417 oldtip = self._branchcachetip
417 self._branchcachetip = tip
418 self._branchcachetip = tip
418 if oldtip is None or oldtip not in self.changelog.nodemap:
419 if oldtip is None or oldtip not in self.changelog.nodemap:
419 partial, last, lrev = self._readbranchcache()
420 partial, last, lrev = self._readbranchcache()
420 else:
421 else:
421 lrev = self.changelog.rev(oldtip)
422 lrev = self.changelog.rev(oldtip)
422 partial = self._branchcache
423 partial = self._branchcache
423
424
424 self._branchtags(partial, lrev)
425 self._branchtags(partial, lrev)
425 # this private cache holds all heads (not just tips)
426 # this private cache holds all heads (not just tips)
426 self._branchcache = partial
427 self._branchcache = partial
427
428
428 def branchmap(self):
429 def branchmap(self):
429 '''returns a dictionary {branch: [branchheads]}'''
430 '''returns a dictionary {branch: [branchheads]}'''
430 self.updatebranchcache()
431 self.updatebranchcache()
431 return self._branchcache
432 return self._branchcache
432
433
433 def branchtags(self):
434 def branchtags(self):
434 '''return a dict where branch names map to the tipmost head of
435 '''return a dict where branch names map to the tipmost head of
435 the branch, open heads come before closed'''
436 the branch, open heads come before closed'''
436 bt = {}
437 bt = {}
437 for bn, heads in self.branchmap().iteritems():
438 for bn, heads in self.branchmap().iteritems():
438 tip = heads[-1]
439 tip = heads[-1]
439 for h in reversed(heads):
440 for h in reversed(heads):
440 if 'close' not in self.changelog.read(h)[5]:
441 if 'close' not in self.changelog.read(h)[5]:
441 tip = h
442 tip = h
442 break
443 break
443 bt[bn] = tip
444 bt[bn] = tip
444 return bt
445 return bt
445
446
446 def _readbranchcache(self):
447 def _readbranchcache(self):
447 partial = {}
448 partial = {}
448 try:
449 try:
449 f = self.opener("cache/branchheads")
450 f = self.opener("cache/branchheads")
450 lines = f.read().split('\n')
451 lines = f.read().split('\n')
451 f.close()
452 f.close()
452 except (IOError, OSError):
453 except (IOError, OSError):
453 return {}, nullid, nullrev
454 return {}, nullid, nullrev
454
455
455 try:
456 try:
456 last, lrev = lines.pop(0).split(" ", 1)
457 last, lrev = lines.pop(0).split(" ", 1)
457 last, lrev = bin(last), int(lrev)
458 last, lrev = bin(last), int(lrev)
458 if lrev >= len(self) or self[lrev].node() != last:
459 if lrev >= len(self) or self[lrev].node() != last:
459 # invalidate the cache
460 # invalidate the cache
460 raise ValueError('invalidating branch cache (tip differs)')
461 raise ValueError('invalidating branch cache (tip differs)')
461 for l in lines:
462 for l in lines:
462 if not l:
463 if not l:
463 continue
464 continue
464 node, label = l.split(" ", 1)
465 node, label = l.split(" ", 1)
465 label = encoding.tolocal(label.strip())
466 label = encoding.tolocal(label.strip())
466 partial.setdefault(label, []).append(bin(node))
467 partial.setdefault(label, []).append(bin(node))
467 except KeyboardInterrupt:
468 except KeyboardInterrupt:
468 raise
469 raise
469 except Exception, inst:
470 except Exception, inst:
470 if self.ui.debugflag:
471 if self.ui.debugflag:
471 self.ui.warn(str(inst), '\n')
472 self.ui.warn(str(inst), '\n')
472 partial, last, lrev = {}, nullid, nullrev
473 partial, last, lrev = {}, nullid, nullrev
473 return partial, last, lrev
474 return partial, last, lrev
474
475
475 def _writebranchcache(self, branches, tip, tiprev):
476 def _writebranchcache(self, branches, tip, tiprev):
476 try:
477 try:
477 f = self.opener("cache/branchheads", "w", atomictemp=True)
478 f = self.opener("cache/branchheads", "w", atomictemp=True)
478 f.write("%s %s\n" % (hex(tip), tiprev))
479 f.write("%s %s\n" % (hex(tip), tiprev))
479 for label, nodes in branches.iteritems():
480 for label, nodes in branches.iteritems():
480 for node in nodes:
481 for node in nodes:
481 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
482 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
482 f.rename()
483 f.rename()
483 except (IOError, OSError):
484 except (IOError, OSError):
484 pass
485 pass
485
486
486 def _updatebranchcache(self, partial, ctxgen):
487 def _updatebranchcache(self, partial, ctxgen):
487 # collect new branch entries
488 # collect new branch entries
488 newbranches = {}
489 newbranches = {}
489 for c in ctxgen:
490 for c in ctxgen:
490 newbranches.setdefault(c.branch(), []).append(c.node())
491 newbranches.setdefault(c.branch(), []).append(c.node())
491 # if older branchheads are reachable from new ones, they aren't
492 # if older branchheads are reachable from new ones, they aren't
492 # really branchheads. Note checking parents is insufficient:
493 # really branchheads. Note checking parents is insufficient:
493 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
494 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
494 for branch, newnodes in newbranches.iteritems():
495 for branch, newnodes in newbranches.iteritems():
495 bheads = partial.setdefault(branch, [])
496 bheads = partial.setdefault(branch, [])
496 bheads.extend(newnodes)
497 bheads.extend(newnodes)
497 if len(bheads) <= 1:
498 if len(bheads) <= 1:
498 continue
499 continue
499 # starting from tip means fewer passes over reachable
500 # starting from tip means fewer passes over reachable
500 while newnodes:
501 while newnodes:
501 latest = newnodes.pop()
502 latest = newnodes.pop()
502 if latest not in bheads:
503 if latest not in bheads:
503 continue
504 continue
504 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
505 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
505 reachable = self.changelog.reachable(latest, minbhrev)
506 reachable = self.changelog.reachable(latest, minbhrev)
506 reachable.remove(latest)
507 reachable.remove(latest)
507 bheads = [b for b in bheads if b not in reachable]
508 bheads = [b for b in bheads if b not in reachable]
508 partial[branch] = bheads
509 partial[branch] = bheads
509
510
510 def lookup(self, key):
511 def lookup(self, key):
511 if isinstance(key, int):
512 if isinstance(key, int):
512 return self.changelog.node(key)
513 return self.changelog.node(key)
513 elif key == '.':
514 elif key == '.':
514 return self.dirstate.parents()[0]
515 return self.dirstate.parents()[0]
515 elif key == 'null':
516 elif key == 'null':
516 return nullid
517 return nullid
517 elif key == 'tip':
518 elif key == 'tip':
518 return self.changelog.tip()
519 return self.changelog.tip()
519 n = self.changelog._match(key)
520 n = self.changelog._match(key)
520 if n:
521 if n:
521 return n
522 return n
522 if key in self.tags():
523 if key in self.tags():
523 return self.tags()[key]
524 return self.tags()[key]
524 if key in self.branchtags():
525 if key in self.branchtags():
525 return self.branchtags()[key]
526 return self.branchtags()[key]
526 n = self.changelog._partialmatch(key)
527 n = self.changelog._partialmatch(key)
527 if n:
528 if n:
528 return n
529 return n
529
530
530 # can't find key, check if it might have come from damaged dirstate
531 # can't find key, check if it might have come from damaged dirstate
531 if key in self.dirstate.parents():
532 if key in self.dirstate.parents():
532 raise error.Abort(_("working directory has unknown parent '%s'!")
533 raise error.Abort(_("working directory has unknown parent '%s'!")
533 % short(key))
534 % short(key))
534 try:
535 try:
535 if len(key) == 20:
536 if len(key) == 20:
536 key = hex(key)
537 key = hex(key)
537 except:
538 except:
538 pass
539 pass
539 raise error.RepoLookupError(_("unknown revision '%s'") % key)
540 raise error.RepoLookupError(_("unknown revision '%s'") % key)
540
541
541 def lookupbranch(self, key, remote=None):
542 def lookupbranch(self, key, remote=None):
542 repo = remote or self
543 repo = remote or self
543 if key in repo.branchmap():
544 if key in repo.branchmap():
544 return key
545 return key
545
546
546 repo = (remote and remote.local()) and remote or self
547 repo = (remote and remote.local()) and remote or self
547 return repo[key].branch()
548 return repo[key].branch()
548
549
549 def local(self):
550 def local(self):
550 return True
551 return True
551
552
552 def join(self, f):
553 def join(self, f):
553 return os.path.join(self.path, f)
554 return os.path.join(self.path, f)
554
555
555 def wjoin(self, f):
556 def wjoin(self, f):
556 return os.path.join(self.root, f)
557 return os.path.join(self.root, f)
557
558
558 def file(self, f):
559 def file(self, f):
559 if f[0] == '/':
560 if f[0] == '/':
560 f = f[1:]
561 f = f[1:]
561 return filelog.filelog(self.sopener, f)
562 return filelog.filelog(self.sopener, f)
562
563
563 def changectx(self, changeid):
564 def changectx(self, changeid):
564 return self[changeid]
565 return self[changeid]
565
566
566 def parents(self, changeid=None):
567 def parents(self, changeid=None):
567 '''get list of changectxs for parents of changeid'''
568 '''get list of changectxs for parents of changeid'''
568 return self[changeid].parents()
569 return self[changeid].parents()
569
570
570 def filectx(self, path, changeid=None, fileid=None):
571 def filectx(self, path, changeid=None, fileid=None):
571 """changeid can be a changeset revision, node, or tag.
572 """changeid can be a changeset revision, node, or tag.
572 fileid can be a file revision or node."""
573 fileid can be a file revision or node."""
573 return context.filectx(self, path, changeid, fileid)
574 return context.filectx(self, path, changeid, fileid)
574
575
575 def getcwd(self):
576 def getcwd(self):
576 return self.dirstate.getcwd()
577 return self.dirstate.getcwd()
577
578
578 def pathto(self, f, cwd=None):
579 def pathto(self, f, cwd=None):
579 return self.dirstate.pathto(f, cwd)
580 return self.dirstate.pathto(f, cwd)
580
581
581 def wfile(self, f, mode='r'):
582 def wfile(self, f, mode='r'):
582 return self.wopener(f, mode)
583 return self.wopener(f, mode)
583
584
584 def _link(self, f):
585 def _link(self, f):
585 return os.path.islink(self.wjoin(f))
586 return os.path.islink(self.wjoin(f))
586
587
587 def _loadfilter(self, filter):
588 def _loadfilter(self, filter):
588 if filter not in self.filterpats:
589 if filter not in self.filterpats:
589 l = []
590 l = []
590 for pat, cmd in self.ui.configitems(filter):
591 for pat, cmd in self.ui.configitems(filter):
591 if cmd == '!':
592 if cmd == '!':
592 continue
593 continue
593 mf = matchmod.match(self.root, '', [pat])
594 mf = matchmod.match(self.root, '', [pat])
594 fn = None
595 fn = None
595 params = cmd
596 params = cmd
596 for name, filterfn in self._datafilters.iteritems():
597 for name, filterfn in self._datafilters.iteritems():
597 if cmd.startswith(name):
598 if cmd.startswith(name):
598 fn = filterfn
599 fn = filterfn
599 params = cmd[len(name):].lstrip()
600 params = cmd[len(name):].lstrip()
600 break
601 break
601 if not fn:
602 if not fn:
602 fn = lambda s, c, **kwargs: util.filter(s, c)
603 fn = lambda s, c, **kwargs: util.filter(s, c)
603 # Wrap old filters not supporting keyword arguments
604 # Wrap old filters not supporting keyword arguments
604 if not inspect.getargspec(fn)[2]:
605 if not inspect.getargspec(fn)[2]:
605 oldfn = fn
606 oldfn = fn
606 fn = lambda s, c, **kwargs: oldfn(s, c)
607 fn = lambda s, c, **kwargs: oldfn(s, c)
607 l.append((mf, fn, params))
608 l.append((mf, fn, params))
608 self.filterpats[filter] = l
609 self.filterpats[filter] = l
609 return self.filterpats[filter]
610 return self.filterpats[filter]
610
611
611 def _filter(self, filterpats, filename, data):
612 def _filter(self, filterpats, filename, data):
612 for mf, fn, cmd in filterpats:
613 for mf, fn, cmd in filterpats:
613 if mf(filename):
614 if mf(filename):
614 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
615 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
615 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
616 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
616 break
617 break
617
618
618 return data
619 return data
619
620
620 @propertycache
621 @propertycache
621 def _encodefilterpats(self):
622 def _encodefilterpats(self):
622 return self._loadfilter('encode')
623 return self._loadfilter('encode')
623
624
624 @propertycache
625 @propertycache
625 def _decodefilterpats(self):
626 def _decodefilterpats(self):
626 return self._loadfilter('decode')
627 return self._loadfilter('decode')
627
628
628 def adddatafilter(self, name, filter):
629 def adddatafilter(self, name, filter):
629 self._datafilters[name] = filter
630 self._datafilters[name] = filter
630
631
631 def wread(self, filename):
632 def wread(self, filename):
632 if self._link(filename):
633 if self._link(filename):
633 data = os.readlink(self.wjoin(filename))
634 data = os.readlink(self.wjoin(filename))
634 else:
635 else:
635 data = self.wopener(filename, 'r').read()
636 data = self.wopener(filename, 'r').read()
636 return self._filter(self._encodefilterpats, filename, data)
637 return self._filter(self._encodefilterpats, filename, data)
637
638
638 def wwrite(self, filename, data, flags):
639 def wwrite(self, filename, data, flags):
639 data = self._filter(self._decodefilterpats, filename, data)
640 data = self._filter(self._decodefilterpats, filename, data)
640 if 'l' in flags:
641 if 'l' in flags:
641 self.wopener.symlink(data, filename)
642 self.wopener.symlink(data, filename)
642 else:
643 else:
643 self.wopener(filename, 'w').write(data)
644 self.wopener(filename, 'w').write(data)
644 if 'x' in flags:
645 if 'x' in flags:
645 util.set_flags(self.wjoin(filename), False, True)
646 util.set_flags(self.wjoin(filename), False, True)
646
647
647 def wwritedata(self, filename, data):
648 def wwritedata(self, filename, data):
648 return self._filter(self._decodefilterpats, filename, data)
649 return self._filter(self._decodefilterpats, filename, data)
649
650
650 def transaction(self, desc):
651 def transaction(self, desc):
651 tr = self._transref and self._transref() or None
652 tr = self._transref and self._transref() or None
652 if tr and tr.running():
653 if tr and tr.running():
653 return tr.nest()
654 return tr.nest()
654
655
655 # abort here if the journal already exists
656 # abort here if the journal already exists
656 if os.path.exists(self.sjoin("journal")):
657 if os.path.exists(self.sjoin("journal")):
657 raise error.RepoError(
658 raise error.RepoError(
658 _("abandoned transaction found - run hg recover"))
659 _("abandoned transaction found - run hg recover"))
659
660
660 # save dirstate for rollback
661 # save dirstate for rollback
661 try:
662 try:
662 ds = self.opener("dirstate").read()
663 ds = self.opener("dirstate").read()
663 except IOError:
664 except IOError:
664 ds = ""
665 ds = ""
665 self.opener("journal.dirstate", "w").write(ds)
666 self.opener("journal.dirstate", "w").write(ds)
666 self.opener("journal.branch", "w").write(
667 self.opener("journal.branch", "w").write(
667 encoding.fromlocal(self.dirstate.branch()))
668 encoding.fromlocal(self.dirstate.branch()))
668 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
669 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
669
670
670 renames = [(self.sjoin("journal"), self.sjoin("undo")),
671 renames = [(self.sjoin("journal"), self.sjoin("undo")),
671 (self.join("journal.dirstate"), self.join("undo.dirstate")),
672 (self.join("journal.dirstate"), self.join("undo.dirstate")),
672 (self.join("journal.branch"), self.join("undo.branch")),
673 (self.join("journal.branch"), self.join("undo.branch")),
673 (self.join("journal.desc"), self.join("undo.desc"))]
674 (self.join("journal.desc"), self.join("undo.desc"))]
674 tr = transaction.transaction(self.ui.warn, self.sopener,
675 tr = transaction.transaction(self.ui.warn, self.sopener,
675 self.sjoin("journal"),
676 self.sjoin("journal"),
676 aftertrans(renames),
677 aftertrans(renames),
677 self.store.createmode)
678 self.store.createmode)
678 self._transref = weakref.ref(tr)
679 self._transref = weakref.ref(tr)
679 return tr
680 return tr
680
681
681 def recover(self):
682 def recover(self):
682 lock = self.lock()
683 lock = self.lock()
683 try:
684 try:
684 if os.path.exists(self.sjoin("journal")):
685 if os.path.exists(self.sjoin("journal")):
685 self.ui.status(_("rolling back interrupted transaction\n"))
686 self.ui.status(_("rolling back interrupted transaction\n"))
686 transaction.rollback(self.sopener, self.sjoin("journal"),
687 transaction.rollback(self.sopener, self.sjoin("journal"),
687 self.ui.warn)
688 self.ui.warn)
688 self.invalidate()
689 self.invalidate()
689 return True
690 return True
690 else:
691 else:
691 self.ui.warn(_("no interrupted transaction available\n"))
692 self.ui.warn(_("no interrupted transaction available\n"))
692 return False
693 return False
693 finally:
694 finally:
694 lock.release()
695 lock.release()
695
696
696 def rollback(self, dryrun=False):
697 def rollback(self, dryrun=False):
697 wlock = lock = None
698 wlock = lock = None
698 try:
699 try:
699 wlock = self.wlock()
700 wlock = self.wlock()
700 lock = self.lock()
701 lock = self.lock()
701 if os.path.exists(self.sjoin("undo")):
702 if os.path.exists(self.sjoin("undo")):
702 try:
703 try:
703 args = self.opener("undo.desc", "r").read().splitlines()
704 args = self.opener("undo.desc", "r").read().splitlines()
704 if len(args) >= 3 and self.ui.verbose:
705 if len(args) >= 3 and self.ui.verbose:
705 desc = _("rolling back to revision %s"
706 desc = _("rolling back to revision %s"
706 " (undo %s: %s)\n") % (
707 " (undo %s: %s)\n") % (
707 int(args[0]) - 1, args[1], args[2])
708 int(args[0]) - 1, args[1], args[2])
708 elif len(args) >= 2:
709 elif len(args) >= 2:
709 desc = _("rolling back to revision %s (undo %s)\n") % (
710 desc = _("rolling back to revision %s (undo %s)\n") % (
710 int(args[0]) - 1, args[1])
711 int(args[0]) - 1, args[1])
711 except IOError:
712 except IOError:
712 desc = _("rolling back unknown transaction\n")
713 desc = _("rolling back unknown transaction\n")
713 self.ui.status(desc)
714 self.ui.status(desc)
714 if dryrun:
715 if dryrun:
715 return
716 return
716 transaction.rollback(self.sopener, self.sjoin("undo"),
717 transaction.rollback(self.sopener, self.sjoin("undo"),
717 self.ui.warn)
718 self.ui.warn)
718 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
719 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
719 if os.path.exists(self.join('undo.bookmarks')):
720 if os.path.exists(self.join('undo.bookmarks')):
720 util.rename(self.join('undo.bookmarks'),
721 util.rename(self.join('undo.bookmarks'),
721 self.join('bookmarks'))
722 self.join('bookmarks'))
722 try:
723 try:
723 branch = self.opener("undo.branch").read()
724 branch = self.opener("undo.branch").read()
724 self.dirstate.setbranch(branch)
725 self.dirstate.setbranch(branch)
725 except IOError:
726 except IOError:
726 self.ui.warn(_("Named branch could not be reset, "
727 self.ui.warn(_("Named branch could not be reset, "
727 "current branch still is: %s\n")
728 "current branch still is: %s\n")
728 % self.dirstate.branch())
729 % self.dirstate.branch())
729 self.invalidate()
730 self.invalidate()
730 self.dirstate.invalidate()
731 self.dirstate.invalidate()
731 self.destroyed()
732 self.destroyed()
732 else:
733 else:
733 self.ui.warn(_("no rollback information available\n"))
734 self.ui.warn(_("no rollback information available\n"))
734 return 1
735 return 1
735 finally:
736 finally:
736 release(lock, wlock)
737 release(lock, wlock)
737
738
738 def invalidatecaches(self):
739 def invalidatecaches(self):
739 self._tags = None
740 self._tags = None
740 self._tagtypes = None
741 self._tagtypes = None
741 self.nodetagscache = None
742 self.nodetagscache = None
742 self._branchcache = None # in UTF-8
743 self._branchcache = None # in UTF-8
743 self._branchcachetip = None
744 self._branchcachetip = None
744
745
745 def invalidate(self):
746 def invalidate(self):
746 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkscurrent"):
747 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkscurrent"):
747 if a in self.__dict__:
748 if a in self.__dict__:
748 delattr(self, a)
749 delattr(self, a)
749 self.invalidatecaches()
750 self.invalidatecaches()
750
751
751 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
752 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
752 try:
753 try:
753 l = lock.lock(lockname, 0, releasefn, desc=desc)
754 l = lock.lock(lockname, 0, releasefn, desc=desc)
754 except error.LockHeld, inst:
755 except error.LockHeld, inst:
755 if not wait:
756 if not wait:
756 raise
757 raise
757 self.ui.warn(_("waiting for lock on %s held by %r\n") %
758 self.ui.warn(_("waiting for lock on %s held by %r\n") %
758 (desc, inst.locker))
759 (desc, inst.locker))
759 # default to 600 seconds timeout
760 # default to 600 seconds timeout
760 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
761 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
761 releasefn, desc=desc)
762 releasefn, desc=desc)
762 if acquirefn:
763 if acquirefn:
763 acquirefn()
764 acquirefn()
764 return l
765 return l
765
766
766 def lock(self, wait=True):
767 def lock(self, wait=True):
767 '''Lock the repository store (.hg/store) and return a weak reference
768 '''Lock the repository store (.hg/store) and return a weak reference
768 to the lock. Use this before modifying the store (e.g. committing or
769 to the lock. Use this before modifying the store (e.g. committing or
769 stripping). If you are opening a transaction, get a lock as well.)'''
770 stripping). If you are opening a transaction, get a lock as well.)'''
770 l = self._lockref and self._lockref()
771 l = self._lockref and self._lockref()
771 if l is not None and l.held:
772 if l is not None and l.held:
772 l.lock()
773 l.lock()
773 return l
774 return l
774
775
775 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
776 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
776 _('repository %s') % self.origroot)
777 _('repository %s') % self.origroot)
777 self._lockref = weakref.ref(l)
778 self._lockref = weakref.ref(l)
778 return l
779 return l
779
780
780 def wlock(self, wait=True):
781 def wlock(self, wait=True):
781 '''Lock the non-store parts of the repository (everything under
782 '''Lock the non-store parts of the repository (everything under
782 .hg except .hg/store) and return a weak reference to the lock.
783 .hg except .hg/store) and return a weak reference to the lock.
783 Use this before modifying files in .hg.'''
784 Use this before modifying files in .hg.'''
784 l = self._wlockref and self._wlockref()
785 l = self._wlockref and self._wlockref()
785 if l is not None and l.held:
786 if l is not None and l.held:
786 l.lock()
787 l.lock()
787 return l
788 return l
788
789
789 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
790 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
790 self.dirstate.invalidate, _('working directory of %s') %
791 self.dirstate.invalidate, _('working directory of %s') %
791 self.origroot)
792 self.origroot)
792 self._wlockref = weakref.ref(l)
793 self._wlockref = weakref.ref(l)
793 return l
794 return l
794
795
795 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
796 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
796 """
797 """
797 commit an individual file as part of a larger transaction
798 commit an individual file as part of a larger transaction
798 """
799 """
799
800
800 fname = fctx.path()
801 fname = fctx.path()
801 text = fctx.data()
802 text = fctx.data()
802 flog = self.file(fname)
803 flog = self.file(fname)
803 fparent1 = manifest1.get(fname, nullid)
804 fparent1 = manifest1.get(fname, nullid)
804 fparent2 = fparent2o = manifest2.get(fname, nullid)
805 fparent2 = fparent2o = manifest2.get(fname, nullid)
805
806
806 meta = {}
807 meta = {}
807 copy = fctx.renamed()
808 copy = fctx.renamed()
808 if copy and copy[0] != fname:
809 if copy and copy[0] != fname:
809 # Mark the new revision of this file as a copy of another
810 # Mark the new revision of this file as a copy of another
810 # file. This copy data will effectively act as a parent
811 # file. This copy data will effectively act as a parent
811 # of this new revision. If this is a merge, the first
812 # of this new revision. If this is a merge, the first
812 # parent will be the nullid (meaning "look up the copy data")
813 # parent will be the nullid (meaning "look up the copy data")
813 # and the second one will be the other parent. For example:
814 # and the second one will be the other parent. For example:
814 #
815 #
815 # 0 --- 1 --- 3 rev1 changes file foo
816 # 0 --- 1 --- 3 rev1 changes file foo
816 # \ / rev2 renames foo to bar and changes it
817 # \ / rev2 renames foo to bar and changes it
817 # \- 2 -/ rev3 should have bar with all changes and
818 # \- 2 -/ rev3 should have bar with all changes and
818 # should record that bar descends from
819 # should record that bar descends from
819 # bar in rev2 and foo in rev1
820 # bar in rev2 and foo in rev1
820 #
821 #
821 # this allows this merge to succeed:
822 # this allows this merge to succeed:
822 #
823 #
823 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
824 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
824 # \ / merging rev3 and rev4 should use bar@rev2
825 # \ / merging rev3 and rev4 should use bar@rev2
825 # \- 2 --- 4 as the merge base
826 # \- 2 --- 4 as the merge base
826 #
827 #
827
828
828 cfname = copy[0]
829 cfname = copy[0]
829 crev = manifest1.get(cfname)
830 crev = manifest1.get(cfname)
830 newfparent = fparent2
831 newfparent = fparent2
831
832
832 if manifest2: # branch merge
833 if manifest2: # branch merge
833 if fparent2 == nullid or crev is None: # copied on remote side
834 if fparent2 == nullid or crev is None: # copied on remote side
834 if cfname in manifest2:
835 if cfname in manifest2:
835 crev = manifest2[cfname]
836 crev = manifest2[cfname]
836 newfparent = fparent1
837 newfparent = fparent1
837
838
838 # find source in nearest ancestor if we've lost track
839 # find source in nearest ancestor if we've lost track
839 if not crev:
840 if not crev:
840 self.ui.debug(" %s: searching for copy revision for %s\n" %
841 self.ui.debug(" %s: searching for copy revision for %s\n" %
841 (fname, cfname))
842 (fname, cfname))
842 for ancestor in self[None].ancestors():
843 for ancestor in self[None].ancestors():
843 if cfname in ancestor:
844 if cfname in ancestor:
844 crev = ancestor[cfname].filenode()
845 crev = ancestor[cfname].filenode()
845 break
846 break
846
847
847 if crev:
848 if crev:
848 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
849 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
849 meta["copy"] = cfname
850 meta["copy"] = cfname
850 meta["copyrev"] = hex(crev)
851 meta["copyrev"] = hex(crev)
851 fparent1, fparent2 = nullid, newfparent
852 fparent1, fparent2 = nullid, newfparent
852 else:
853 else:
853 self.ui.warn(_("warning: can't find ancestor for '%s' "
854 self.ui.warn(_("warning: can't find ancestor for '%s' "
854 "copied from '%s'!\n") % (fname, cfname))
855 "copied from '%s'!\n") % (fname, cfname))
855
856
856 elif fparent2 != nullid:
857 elif fparent2 != nullid:
857 # is one parent an ancestor of the other?
858 # is one parent an ancestor of the other?
858 fparentancestor = flog.ancestor(fparent1, fparent2)
859 fparentancestor = flog.ancestor(fparent1, fparent2)
859 if fparentancestor == fparent1:
860 if fparentancestor == fparent1:
860 fparent1, fparent2 = fparent2, nullid
861 fparent1, fparent2 = fparent2, nullid
861 elif fparentancestor == fparent2:
862 elif fparentancestor == fparent2:
862 fparent2 = nullid
863 fparent2 = nullid
863
864
864 # is the file changed?
865 # is the file changed?
865 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
866 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
866 changelist.append(fname)
867 changelist.append(fname)
867 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
868 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
868
869
869 # are just the flags changed during merge?
870 # are just the flags changed during merge?
870 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
871 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
871 changelist.append(fname)
872 changelist.append(fname)
872
873
873 return fparent1
874 return fparent1
874
875
875 def commit(self, text="", user=None, date=None, match=None, force=False,
876 def commit(self, text="", user=None, date=None, match=None, force=False,
876 editor=False, extra={}):
877 editor=False, extra={}):
877 """Add a new revision to current repository.
878 """Add a new revision to current repository.
878
879
879 Revision information is gathered from the working directory,
880 Revision information is gathered from the working directory,
880 match can be used to filter the committed files. If editor is
881 match can be used to filter the committed files. If editor is
881 supplied, it is called to get a commit message.
882 supplied, it is called to get a commit message.
882 """
883 """
883
884
884 def fail(f, msg):
885 def fail(f, msg):
885 raise util.Abort('%s: %s' % (f, msg))
886 raise util.Abort('%s: %s' % (f, msg))
886
887
887 if not match:
888 if not match:
888 match = matchmod.always(self.root, '')
889 match = matchmod.always(self.root, '')
889
890
890 if not force:
891 if not force:
891 vdirs = []
892 vdirs = []
892 match.dir = vdirs.append
893 match.dir = vdirs.append
893 match.bad = fail
894 match.bad = fail
894
895
895 wlock = self.wlock()
896 wlock = self.wlock()
896 try:
897 try:
897 wctx = self[None]
898 wctx = self[None]
898 merge = len(wctx.parents()) > 1
899 merge = len(wctx.parents()) > 1
899
900
900 if (not force and merge and match and
901 if (not force and merge and match and
901 (match.files() or match.anypats())):
902 (match.files() or match.anypats())):
902 raise util.Abort(_('cannot partially commit a merge '
903 raise util.Abort(_('cannot partially commit a merge '
903 '(do not specify files or patterns)'))
904 '(do not specify files or patterns)'))
904
905
905 changes = self.status(match=match, clean=force)
906 changes = self.status(match=match, clean=force)
906 if force:
907 if force:
907 changes[0].extend(changes[6]) # mq may commit unchanged files
908 changes[0].extend(changes[6]) # mq may commit unchanged files
908
909
909 # check subrepos
910 # check subrepos
910 subs = []
911 subs = []
911 removedsubs = set()
912 removedsubs = set()
912 for p in wctx.parents():
913 for p in wctx.parents():
913 removedsubs.update(s for s in p.substate if match(s))
914 removedsubs.update(s for s in p.substate if match(s))
914 for s in wctx.substate:
915 for s in wctx.substate:
915 removedsubs.discard(s)
916 removedsubs.discard(s)
916 if match(s) and wctx.sub(s).dirty():
917 if match(s) and wctx.sub(s).dirty():
917 subs.append(s)
918 subs.append(s)
918 if (subs or removedsubs):
919 if (subs or removedsubs):
919 if (not match('.hgsub') and
920 if (not match('.hgsub') and
920 '.hgsub' in (wctx.modified() + wctx.added())):
921 '.hgsub' in (wctx.modified() + wctx.added())):
921 raise util.Abort(_("can't commit subrepos without .hgsub"))
922 raise util.Abort(_("can't commit subrepos without .hgsub"))
922 if '.hgsubstate' not in changes[0]:
923 if '.hgsubstate' not in changes[0]:
923 changes[0].insert(0, '.hgsubstate')
924 changes[0].insert(0, '.hgsubstate')
924
925
925 # make sure all explicit patterns are matched
926 # make sure all explicit patterns are matched
926 if not force and match.files():
927 if not force and match.files():
927 matched = set(changes[0] + changes[1] + changes[2])
928 matched = set(changes[0] + changes[1] + changes[2])
928
929
929 for f in match.files():
930 for f in match.files():
930 if f == '.' or f in matched or f in wctx.substate:
931 if f == '.' or f in matched or f in wctx.substate:
931 continue
932 continue
932 if f in changes[3]: # missing
933 if f in changes[3]: # missing
933 fail(f, _('file not found!'))
934 fail(f, _('file not found!'))
934 if f in vdirs: # visited directory
935 if f in vdirs: # visited directory
935 d = f + '/'
936 d = f + '/'
936 for mf in matched:
937 for mf in matched:
937 if mf.startswith(d):
938 if mf.startswith(d):
938 break
939 break
939 else:
940 else:
940 fail(f, _("no match under directory!"))
941 fail(f, _("no match under directory!"))
941 elif f not in self.dirstate:
942 elif f not in self.dirstate:
942 fail(f, _("file not tracked!"))
943 fail(f, _("file not tracked!"))
943
944
944 if (not force and not extra.get("close") and not merge
945 if (not force and not extra.get("close") and not merge
945 and not (changes[0] or changes[1] or changes[2])
946 and not (changes[0] or changes[1] or changes[2])
946 and wctx.branch() == wctx.p1().branch()):
947 and wctx.branch() == wctx.p1().branch()):
947 return None
948 return None
948
949
949 ms = mergemod.mergestate(self)
950 ms = mergemod.mergestate(self)
950 for f in changes[0]:
951 for f in changes[0]:
951 if f in ms and ms[f] == 'u':
952 if f in ms and ms[f] == 'u':
952 raise util.Abort(_("unresolved merge conflicts "
953 raise util.Abort(_("unresolved merge conflicts "
953 "(see hg resolve)"))
954 "(see hg resolve)"))
954
955
955 cctx = context.workingctx(self, text, user, date, extra, changes)
956 cctx = context.workingctx(self, text, user, date, extra, changes)
956 if editor:
957 if editor:
957 cctx._text = editor(self, cctx, subs)
958 cctx._text = editor(self, cctx, subs)
958 edited = (text != cctx._text)
959 edited = (text != cctx._text)
959
960
960 # commit subs
961 # commit subs
961 if subs or removedsubs:
962 if subs or removedsubs:
962 state = wctx.substate.copy()
963 state = wctx.substate.copy()
963 for s in sorted(subs):
964 for s in sorted(subs):
964 sub = wctx.sub(s)
965 sub = wctx.sub(s)
965 self.ui.status(_('committing subrepository %s\n') %
966 self.ui.status(_('committing subrepository %s\n') %
966 subrepo.subrelpath(sub))
967 subrepo.subrelpath(sub))
967 sr = sub.commit(cctx._text, user, date)
968 sr = sub.commit(cctx._text, user, date)
968 state[s] = (state[s][0], sr)
969 state[s] = (state[s][0], sr)
969 subrepo.writestate(self, state)
970 subrepo.writestate(self, state)
970
971
971 # Save commit message in case this transaction gets rolled back
972 # Save commit message in case this transaction gets rolled back
972 # (e.g. by a pretxncommit hook). Leave the content alone on
973 # (e.g. by a pretxncommit hook). Leave the content alone on
973 # the assumption that the user will use the same editor again.
974 # the assumption that the user will use the same editor again.
974 msgfile = self.opener('last-message.txt', 'wb')
975 msgfile = self.opener('last-message.txt', 'wb')
975 msgfile.write(cctx._text)
976 msgfile.write(cctx._text)
976 msgfile.close()
977 msgfile.close()
977
978
978 p1, p2 = self.dirstate.parents()
979 p1, p2 = self.dirstate.parents()
979 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
980 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
980 try:
981 try:
981 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
982 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
982 ret = self.commitctx(cctx, True)
983 ret = self.commitctx(cctx, True)
983 except:
984 except:
984 if edited:
985 if edited:
985 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
986 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
986 self.ui.write(
987 self.ui.write(
987 _('note: commit message saved in %s\n') % msgfn)
988 _('note: commit message saved in %s\n') % msgfn)
988 raise
989 raise
989
990
990 # update bookmarks, dirstate and mergestate
991 # update bookmarks, dirstate and mergestate
991 parents = (p1, p2)
992 parents = (p1, p2)
992 if p2 == nullid:
993 if p2 == nullid:
993 parents = (p1,)
994 parents = (p1,)
994 bookmarks.update(self, parents, ret)
995 bookmarks.update(self, parents, ret)
995 for f in changes[0] + changes[1]:
996 for f in changes[0] + changes[1]:
996 self.dirstate.normal(f)
997 self.dirstate.normal(f)
997 for f in changes[2]:
998 for f in changes[2]:
998 self.dirstate.forget(f)
999 self.dirstate.forget(f)
999 self.dirstate.setparents(ret)
1000 self.dirstate.setparents(ret)
1000 ms.reset()
1001 ms.reset()
1001 finally:
1002 finally:
1002 wlock.release()
1003 wlock.release()
1003
1004
1004 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1005 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1005 return ret
1006 return ret
1006
1007
1007 def commitctx(self, ctx, error=False):
1008 def commitctx(self, ctx, error=False):
1008 """Add a new revision to current repository.
1009 """Add a new revision to current repository.
1009 Revision information is passed via the context argument.
1010 Revision information is passed via the context argument.
1010 """
1011 """
1011
1012
1012 tr = lock = None
1013 tr = lock = None
1013 removed = list(ctx.removed())
1014 removed = list(ctx.removed())
1014 p1, p2 = ctx.p1(), ctx.p2()
1015 p1, p2 = ctx.p1(), ctx.p2()
1015 m1 = p1.manifest().copy()
1016 m1 = p1.manifest().copy()
1016 m2 = p2.manifest()
1017 m2 = p2.manifest()
1017 user = ctx.user()
1018 user = ctx.user()
1018
1019
1019 lock = self.lock()
1020 lock = self.lock()
1020 try:
1021 try:
1021 tr = self.transaction("commit")
1022 tr = self.transaction("commit")
1022 trp = weakref.proxy(tr)
1023 trp = weakref.proxy(tr)
1023
1024
1024 # check in files
1025 # check in files
1025 new = {}
1026 new = {}
1026 changed = []
1027 changed = []
1027 linkrev = len(self)
1028 linkrev = len(self)
1028 for f in sorted(ctx.modified() + ctx.added()):
1029 for f in sorted(ctx.modified() + ctx.added()):
1029 self.ui.note(f + "\n")
1030 self.ui.note(f + "\n")
1030 try:
1031 try:
1031 fctx = ctx[f]
1032 fctx = ctx[f]
1032 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1033 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1033 changed)
1034 changed)
1034 m1.set(f, fctx.flags())
1035 m1.set(f, fctx.flags())
1035 except OSError, inst:
1036 except OSError, inst:
1036 self.ui.warn(_("trouble committing %s!\n") % f)
1037 self.ui.warn(_("trouble committing %s!\n") % f)
1037 raise
1038 raise
1038 except IOError, inst:
1039 except IOError, inst:
1039 errcode = getattr(inst, 'errno', errno.ENOENT)
1040 errcode = getattr(inst, 'errno', errno.ENOENT)
1040 if error or errcode and errcode != errno.ENOENT:
1041 if error or errcode and errcode != errno.ENOENT:
1041 self.ui.warn(_("trouble committing %s!\n") % f)
1042 self.ui.warn(_("trouble committing %s!\n") % f)
1042 raise
1043 raise
1043 else:
1044 else:
1044 removed.append(f)
1045 removed.append(f)
1045
1046
1046 # update manifest
1047 # update manifest
1047 m1.update(new)
1048 m1.update(new)
1048 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1049 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1049 drop = [f for f in removed if f in m1]
1050 drop = [f for f in removed if f in m1]
1050 for f in drop:
1051 for f in drop:
1051 del m1[f]
1052 del m1[f]
1052 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1053 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1053 p2.manifestnode(), (new, drop))
1054 p2.manifestnode(), (new, drop))
1054
1055
1055 # update changelog
1056 # update changelog
1056 self.changelog.delayupdate()
1057 self.changelog.delayupdate()
1057 n = self.changelog.add(mn, changed + removed, ctx.description(),
1058 n = self.changelog.add(mn, changed + removed, ctx.description(),
1058 trp, p1.node(), p2.node(),
1059 trp, p1.node(), p2.node(),
1059 user, ctx.date(), ctx.extra().copy())
1060 user, ctx.date(), ctx.extra().copy())
1060 p = lambda: self.changelog.writepending() and self.root or ""
1061 p = lambda: self.changelog.writepending() and self.root or ""
1061 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1062 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1062 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1063 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1063 parent2=xp2, pending=p)
1064 parent2=xp2, pending=p)
1064 self.changelog.finalize(trp)
1065 self.changelog.finalize(trp)
1065 tr.close()
1066 tr.close()
1066
1067
1067 if self._branchcache:
1068 if self._branchcache:
1068 self.updatebranchcache()
1069 self.updatebranchcache()
1069 return n
1070 return n
1070 finally:
1071 finally:
1071 if tr:
1072 if tr:
1072 tr.release()
1073 tr.release()
1073 lock.release()
1074 lock.release()
1074
1075
1075 def destroyed(self):
1076 def destroyed(self):
1076 '''Inform the repository that nodes have been destroyed.
1077 '''Inform the repository that nodes have been destroyed.
1077 Intended for use by strip and rollback, so there's a common
1078 Intended for use by strip and rollback, so there's a common
1078 place for anything that has to be done after destroying history.'''
1079 place for anything that has to be done after destroying history.'''
1079 # XXX it might be nice if we could take the list of destroyed
1080 # XXX it might be nice if we could take the list of destroyed
1080 # nodes, but I don't see an easy way for rollback() to do that
1081 # nodes, but I don't see an easy way for rollback() to do that
1081
1082
1082 # Ensure the persistent tag cache is updated. Doing it now
1083 # Ensure the persistent tag cache is updated. Doing it now
1083 # means that the tag cache only has to worry about destroyed
1084 # means that the tag cache only has to worry about destroyed
1084 # heads immediately after a strip/rollback. That in turn
1085 # heads immediately after a strip/rollback. That in turn
1085 # guarantees that "cachetip == currenttip" (comparing both rev
1086 # guarantees that "cachetip == currenttip" (comparing both rev
1086 # and node) always means no nodes have been added or destroyed.
1087 # and node) always means no nodes have been added or destroyed.
1087
1088
1088 # XXX this is suboptimal when qrefresh'ing: we strip the current
1089 # XXX this is suboptimal when qrefresh'ing: we strip the current
1089 # head, refresh the tag cache, then immediately add a new head.
1090 # head, refresh the tag cache, then immediately add a new head.
1090 # But I think doing it this way is necessary for the "instant
1091 # But I think doing it this way is necessary for the "instant
1091 # tag cache retrieval" case to work.
1092 # tag cache retrieval" case to work.
1092 self.invalidatecaches()
1093 self.invalidatecaches()
1093
1094
1094 def walk(self, match, node=None):
1095 def walk(self, match, node=None):
1095 '''
1096 '''
1096 walk recursively through the directory tree or a given
1097 walk recursively through the directory tree or a given
1097 changeset, finding all files matched by the match
1098 changeset, finding all files matched by the match
1098 function
1099 function
1099 '''
1100 '''
1100 return self[node].walk(match)
1101 return self[node].walk(match)
1101
1102
1102 def status(self, node1='.', node2=None, match=None,
1103 def status(self, node1='.', node2=None, match=None,
1103 ignored=False, clean=False, unknown=False,
1104 ignored=False, clean=False, unknown=False,
1104 listsubrepos=False):
1105 listsubrepos=False):
1105 """return status of files between two nodes or node and working directory
1106 """return status of files between two nodes or node and working directory
1106
1107
1107 If node1 is None, use the first dirstate parent instead.
1108 If node1 is None, use the first dirstate parent instead.
1108 If node2 is None, compare node1 with working directory.
1109 If node2 is None, compare node1 with working directory.
1109 """
1110 """
1110
1111
1111 def mfmatches(ctx):
1112 def mfmatches(ctx):
1112 mf = ctx.manifest().copy()
1113 mf = ctx.manifest().copy()
1113 for fn in mf.keys():
1114 for fn in mf.keys():
1114 if not match(fn):
1115 if not match(fn):
1115 del mf[fn]
1116 del mf[fn]
1116 return mf
1117 return mf
1117
1118
1118 if isinstance(node1, context.changectx):
1119 if isinstance(node1, context.changectx):
1119 ctx1 = node1
1120 ctx1 = node1
1120 else:
1121 else:
1121 ctx1 = self[node1]
1122 ctx1 = self[node1]
1122 if isinstance(node2, context.changectx):
1123 if isinstance(node2, context.changectx):
1123 ctx2 = node2
1124 ctx2 = node2
1124 else:
1125 else:
1125 ctx2 = self[node2]
1126 ctx2 = self[node2]
1126
1127
1127 working = ctx2.rev() is None
1128 working = ctx2.rev() is None
1128 parentworking = working and ctx1 == self['.']
1129 parentworking = working and ctx1 == self['.']
1129 match = match or matchmod.always(self.root, self.getcwd())
1130 match = match or matchmod.always(self.root, self.getcwd())
1130 listignored, listclean, listunknown = ignored, clean, unknown
1131 listignored, listclean, listunknown = ignored, clean, unknown
1131
1132
1132 # load earliest manifest first for caching reasons
1133 # load earliest manifest first for caching reasons
1133 if not working and ctx2.rev() < ctx1.rev():
1134 if not working and ctx2.rev() < ctx1.rev():
1134 ctx2.manifest()
1135 ctx2.manifest()
1135
1136
1136 if not parentworking:
1137 if not parentworking:
1137 def bad(f, msg):
1138 def bad(f, msg):
1138 if f not in ctx1:
1139 if f not in ctx1:
1139 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1140 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1140 match.bad = bad
1141 match.bad = bad
1141
1142
1142 if working: # we need to scan the working dir
1143 if working: # we need to scan the working dir
1143 subrepos = []
1144 subrepos = []
1144 if '.hgsub' in self.dirstate:
1145 if '.hgsub' in self.dirstate:
1145 subrepos = ctx1.substate.keys()
1146 subrepos = ctx1.substate.keys()
1146 s = self.dirstate.status(match, subrepos, listignored,
1147 s = self.dirstate.status(match, subrepos, listignored,
1147 listclean, listunknown)
1148 listclean, listunknown)
1148 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1149 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1149
1150
1150 # check for any possibly clean files
1151 # check for any possibly clean files
1151 if parentworking and cmp:
1152 if parentworking and cmp:
1152 fixup = []
1153 fixup = []
1153 # do a full compare of any files that might have changed
1154 # do a full compare of any files that might have changed
1154 for f in sorted(cmp):
1155 for f in sorted(cmp):
1155 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1156 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1156 or ctx1[f].cmp(ctx2[f])):
1157 or ctx1[f].cmp(ctx2[f])):
1157 modified.append(f)
1158 modified.append(f)
1158 else:
1159 else:
1159 fixup.append(f)
1160 fixup.append(f)
1160
1161
1161 # update dirstate for files that are actually clean
1162 # update dirstate for files that are actually clean
1162 if fixup:
1163 if fixup:
1163 if listclean:
1164 if listclean:
1164 clean += fixup
1165 clean += fixup
1165
1166
1166 try:
1167 try:
1167 # updating the dirstate is optional
1168 # updating the dirstate is optional
1168 # so we don't wait on the lock
1169 # so we don't wait on the lock
1169 wlock = self.wlock(False)
1170 wlock = self.wlock(False)
1170 try:
1171 try:
1171 for f in fixup:
1172 for f in fixup:
1172 self.dirstate.normal(f)
1173 self.dirstate.normal(f)
1173 finally:
1174 finally:
1174 wlock.release()
1175 wlock.release()
1175 except error.LockError:
1176 except error.LockError:
1176 pass
1177 pass
1177
1178
1178 if not parentworking:
1179 if not parentworking:
1179 mf1 = mfmatches(ctx1)
1180 mf1 = mfmatches(ctx1)
1180 if working:
1181 if working:
1181 # we are comparing working dir against non-parent
1182 # we are comparing working dir against non-parent
1182 # generate a pseudo-manifest for the working dir
1183 # generate a pseudo-manifest for the working dir
1183 mf2 = mfmatches(self['.'])
1184 mf2 = mfmatches(self['.'])
1184 for f in cmp + modified + added:
1185 for f in cmp + modified + added:
1185 mf2[f] = None
1186 mf2[f] = None
1186 mf2.set(f, ctx2.flags(f))
1187 mf2.set(f, ctx2.flags(f))
1187 for f in removed:
1188 for f in removed:
1188 if f in mf2:
1189 if f in mf2:
1189 del mf2[f]
1190 del mf2[f]
1190 else:
1191 else:
1191 # we are comparing two revisions
1192 # we are comparing two revisions
1192 deleted, unknown, ignored = [], [], []
1193 deleted, unknown, ignored = [], [], []
1193 mf2 = mfmatches(ctx2)
1194 mf2 = mfmatches(ctx2)
1194
1195
1195 modified, added, clean = [], [], []
1196 modified, added, clean = [], [], []
1196 for fn in mf2:
1197 for fn in mf2:
1197 if fn in mf1:
1198 if fn in mf1:
1198 if (mf1.flags(fn) != mf2.flags(fn) or
1199 if (mf1.flags(fn) != mf2.flags(fn) or
1199 (mf1[fn] != mf2[fn] and
1200 (mf1[fn] != mf2[fn] and
1200 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1201 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1201 modified.append(fn)
1202 modified.append(fn)
1202 elif listclean:
1203 elif listclean:
1203 clean.append(fn)
1204 clean.append(fn)
1204 del mf1[fn]
1205 del mf1[fn]
1205 else:
1206 else:
1206 added.append(fn)
1207 added.append(fn)
1207 removed = mf1.keys()
1208 removed = mf1.keys()
1208
1209
1209 r = modified, added, removed, deleted, unknown, ignored, clean
1210 r = modified, added, removed, deleted, unknown, ignored, clean
1210
1211
1211 if listsubrepos:
1212 if listsubrepos:
1212 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1213 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1213 if working:
1214 if working:
1214 rev2 = None
1215 rev2 = None
1215 else:
1216 else:
1216 rev2 = ctx2.substate[subpath][1]
1217 rev2 = ctx2.substate[subpath][1]
1217 try:
1218 try:
1218 submatch = matchmod.narrowmatcher(subpath, match)
1219 submatch = matchmod.narrowmatcher(subpath, match)
1219 s = sub.status(rev2, match=submatch, ignored=listignored,
1220 s = sub.status(rev2, match=submatch, ignored=listignored,
1220 clean=listclean, unknown=listunknown,
1221 clean=listclean, unknown=listunknown,
1221 listsubrepos=True)
1222 listsubrepos=True)
1222 for rfiles, sfiles in zip(r, s):
1223 for rfiles, sfiles in zip(r, s):
1223 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1224 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1224 except error.LookupError:
1225 except error.LookupError:
1225 self.ui.status(_("skipping missing subrepository: %s\n")
1226 self.ui.status(_("skipping missing subrepository: %s\n")
1226 % subpath)
1227 % subpath)
1227
1228
1228 [l.sort() for l in r]
1229 [l.sort() for l in r]
1229 return r
1230 return r
1230
1231
1231 def heads(self, start=None):
1232 def heads(self, start=None):
1232 heads = self.changelog.heads(start)
1233 heads = self.changelog.heads(start)
1233 # sort the output in rev descending order
1234 # sort the output in rev descending order
1234 return sorted(heads, key=self.changelog.rev, reverse=True)
1235 return sorted(heads, key=self.changelog.rev, reverse=True)
1235
1236
1236 def branchheads(self, branch=None, start=None, closed=False):
1237 def branchheads(self, branch=None, start=None, closed=False):
1237 '''return a (possibly filtered) list of heads for the given branch
1238 '''return a (possibly filtered) list of heads for the given branch
1238
1239
1239 Heads are returned in topological order, from newest to oldest.
1240 Heads are returned in topological order, from newest to oldest.
1240 If branch is None, use the dirstate branch.
1241 If branch is None, use the dirstate branch.
1241 If start is not None, return only heads reachable from start.
1242 If start is not None, return only heads reachable from start.
1242 If closed is True, return heads that are marked as closed as well.
1243 If closed is True, return heads that are marked as closed as well.
1243 '''
1244 '''
1244 if branch is None:
1245 if branch is None:
1245 branch = self[None].branch()
1246 branch = self[None].branch()
1246 branches = self.branchmap()
1247 branches = self.branchmap()
1247 if branch not in branches:
1248 if branch not in branches:
1248 return []
1249 return []
1249 # the cache returns heads ordered lowest to highest
1250 # the cache returns heads ordered lowest to highest
1250 bheads = list(reversed(branches[branch]))
1251 bheads = list(reversed(branches[branch]))
1251 if start is not None:
1252 if start is not None:
1252 # filter out the heads that cannot be reached from startrev
1253 # filter out the heads that cannot be reached from startrev
1253 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1254 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1254 bheads = [h for h in bheads if h in fbheads]
1255 bheads = [h for h in bheads if h in fbheads]
1255 if not closed:
1256 if not closed:
1256 bheads = [h for h in bheads if
1257 bheads = [h for h in bheads if
1257 ('close' not in self.changelog.read(h)[5])]
1258 ('close' not in self.changelog.read(h)[5])]
1258 return bheads
1259 return bheads
1259
1260
1260 def branches(self, nodes):
1261 def branches(self, nodes):
1261 if not nodes:
1262 if not nodes:
1262 nodes = [self.changelog.tip()]
1263 nodes = [self.changelog.tip()]
1263 b = []
1264 b = []
1264 for n in nodes:
1265 for n in nodes:
1265 t = n
1266 t = n
1266 while 1:
1267 while 1:
1267 p = self.changelog.parents(n)
1268 p = self.changelog.parents(n)
1268 if p[1] != nullid or p[0] == nullid:
1269 if p[1] != nullid or p[0] == nullid:
1269 b.append((t, n, p[0], p[1]))
1270 b.append((t, n, p[0], p[1]))
1270 break
1271 break
1271 n = p[0]
1272 n = p[0]
1272 return b
1273 return b
1273
1274
1274 def between(self, pairs):
1275 def between(self, pairs):
1275 r = []
1276 r = []
1276
1277
1277 for top, bottom in pairs:
1278 for top, bottom in pairs:
1278 n, l, i = top, [], 0
1279 n, l, i = top, [], 0
1279 f = 1
1280 f = 1
1280
1281
1281 while n != bottom and n != nullid:
1282 while n != bottom and n != nullid:
1282 p = self.changelog.parents(n)[0]
1283 p = self.changelog.parents(n)[0]
1283 if i == f:
1284 if i == f:
1284 l.append(n)
1285 l.append(n)
1285 f = f * 2
1286 f = f * 2
1286 n = p
1287 n = p
1287 i += 1
1288 i += 1
1288
1289
1289 r.append(l)
1290 r.append(l)
1290
1291
1291 return r
1292 return r
1292
1293
1293 def pull(self, remote, heads=None, force=False):
1294 def pull(self, remote, heads=None, force=False):
1294 lock = self.lock()
1295 lock = self.lock()
1295 try:
1296 try:
1296 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1297 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1297 force=force)
1298 force=force)
1298 common, fetch, rheads = tmp
1299 common, fetch, rheads = tmp
1299 if not fetch:
1300 if not fetch:
1300 self.ui.status(_("no changes found\n"))
1301 self.ui.status(_("no changes found\n"))
1301 return 0
1302 return 0
1302
1303
1303 if heads is None and fetch == [nullid]:
1304 if heads is None and fetch == [nullid]:
1304 self.ui.status(_("requesting all changes\n"))
1305 self.ui.status(_("requesting all changes\n"))
1305 elif heads is None and remote.capable('changegroupsubset'):
1306 elif heads is None and remote.capable('changegroupsubset'):
1306 # issue1320, avoid a race if remote changed after discovery
1307 # issue1320, avoid a race if remote changed after discovery
1307 heads = rheads
1308 heads = rheads
1308
1309
1309 if heads is None:
1310 if heads is None:
1310 cg = remote.changegroup(fetch, 'pull')
1311 cg = remote.changegroup(fetch, 'pull')
1311 else:
1312 else:
1312 if not remote.capable('changegroupsubset'):
1313 if not remote.capable('changegroupsubset'):
1313 raise util.Abort(_("partial pull cannot be done because "
1314 raise util.Abort(_("partial pull cannot be done because "
1314 "other repository doesn't support "
1315 "other repository doesn't support "
1315 "changegroupsubset."))
1316 "changegroupsubset."))
1316 cg = remote.changegroupsubset(fetch, heads, 'pull')
1317 cg = remote.changegroupsubset(fetch, heads, 'pull')
1317 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1318 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1318 finally:
1319 finally:
1319 lock.release()
1320 lock.release()
1320
1321
1321 def checkpush(self, force, revs):
1322 def checkpush(self, force, revs):
1322 """Extensions can override this function if additional checks have
1323 """Extensions can override this function if additional checks have
1323 to be performed before pushing, or call it if they override push
1324 to be performed before pushing, or call it if they override push
1324 command.
1325 command.
1325 """
1326 """
1326 pass
1327 pass
1327
1328
1328 def push(self, remote, force=False, revs=None, newbranch=False):
1329 def push(self, remote, force=False, revs=None, newbranch=False):
1329 '''Push outgoing changesets (limited by revs) from the current
1330 '''Push outgoing changesets (limited by revs) from the current
1330 repository to remote. Return an integer:
1331 repository to remote. Return an integer:
1331 - 0 means HTTP error *or* nothing to push
1332 - 0 means HTTP error *or* nothing to push
1332 - 1 means we pushed and remote head count is unchanged *or*
1333 - 1 means we pushed and remote head count is unchanged *or*
1333 we have outgoing changesets but refused to push
1334 we have outgoing changesets but refused to push
1334 - other values as described by addchangegroup()
1335 - other values as described by addchangegroup()
1335 '''
1336 '''
1336 # there are two ways to push to remote repo:
1337 # there are two ways to push to remote repo:
1337 #
1338 #
1338 # addchangegroup assumes local user can lock remote
1339 # addchangegroup assumes local user can lock remote
1339 # repo (local filesystem, old ssh servers).
1340 # repo (local filesystem, old ssh servers).
1340 #
1341 #
1341 # unbundle assumes local user cannot lock remote repo (new ssh
1342 # unbundle assumes local user cannot lock remote repo (new ssh
1342 # servers, http servers).
1343 # servers, http servers).
1343
1344
1344 self.checkpush(force, revs)
1345 self.checkpush(force, revs)
1345 lock = None
1346 lock = None
1346 unbundle = remote.capable('unbundle')
1347 unbundle = remote.capable('unbundle')
1347 if not unbundle:
1348 if not unbundle:
1348 lock = remote.lock()
1349 lock = remote.lock()
1349 try:
1350 try:
1350 ret = discovery.prepush(self, remote, force, revs, newbranch)
1351 ret = discovery.prepush(self, remote, force, revs, newbranch)
1351 if ret[0] is None:
1352 if ret[0] is None:
1352 # and here we return 0 for "nothing to push" or 1 for
1353 # and here we return 0 for "nothing to push" or 1 for
1353 # "something to push but I refuse"
1354 # "something to push but I refuse"
1354 return ret[1]
1355 return ret[1]
1355
1356
1356 cg, remote_heads = ret
1357 cg, remote_heads = ret
1357 if unbundle:
1358 if unbundle:
1358 # local repo finds heads on server, finds out what revs it must
1359 # local repo finds heads on server, finds out what revs it must
1359 # push. once revs transferred, if server finds it has
1360 # push. once revs transferred, if server finds it has
1360 # different heads (someone else won commit/push race), server
1361 # different heads (someone else won commit/push race), server
1361 # aborts.
1362 # aborts.
1362 if force:
1363 if force:
1363 remote_heads = ['force']
1364 remote_heads = ['force']
1364 # ssh: return remote's addchangegroup()
1365 # ssh: return remote's addchangegroup()
1365 # http: return remote's addchangegroup() or 0 for error
1366 # http: return remote's addchangegroup() or 0 for error
1366 return remote.unbundle(cg, remote_heads, 'push')
1367 return remote.unbundle(cg, remote_heads, 'push')
1367 else:
1368 else:
1368 # we return an integer indicating remote head count change
1369 # we return an integer indicating remote head count change
1369 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1370 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1370 finally:
1371 finally:
1371 if lock is not None:
1372 if lock is not None:
1372 lock.release()
1373 lock.release()
1373
1374
1374 def changegroupinfo(self, nodes, source):
1375 def changegroupinfo(self, nodes, source):
1375 if self.ui.verbose or source == 'bundle':
1376 if self.ui.verbose or source == 'bundle':
1376 self.ui.status(_("%d changesets found\n") % len(nodes))
1377 self.ui.status(_("%d changesets found\n") % len(nodes))
1377 if self.ui.debugflag:
1378 if self.ui.debugflag:
1378 self.ui.debug("list of changesets:\n")
1379 self.ui.debug("list of changesets:\n")
1379 for node in nodes:
1380 for node in nodes:
1380 self.ui.debug("%s\n" % hex(node))
1381 self.ui.debug("%s\n" % hex(node))
1381
1382
1382 def changegroupsubset(self, bases, heads, source, extranodes=None):
1383 def changegroupsubset(self, bases, heads, source, extranodes=None):
1383 """Compute a changegroup consisting of all the nodes that are
1384 """Compute a changegroup consisting of all the nodes that are
1384 descendents of any of the bases and ancestors of any of the heads.
1385 descendents of any of the bases and ancestors of any of the heads.
1385 Return a chunkbuffer object whose read() method will return
1386 Return a chunkbuffer object whose read() method will return
1386 successive changegroup chunks.
1387 successive changegroup chunks.
1387
1388
1388 It is fairly complex as determining which filenodes and which
1389 It is fairly complex as determining which filenodes and which
1389 manifest nodes need to be included for the changeset to be complete
1390 manifest nodes need to be included for the changeset to be complete
1390 is non-trivial.
1391 is non-trivial.
1391
1392
1392 Another wrinkle is doing the reverse, figuring out which changeset in
1393 Another wrinkle is doing the reverse, figuring out which changeset in
1393 the changegroup a particular filenode or manifestnode belongs to.
1394 the changegroup a particular filenode or manifestnode belongs to.
1394
1395
1395 The caller can specify some nodes that must be included in the
1396 The caller can specify some nodes that must be included in the
1396 changegroup using the extranodes argument. It should be a dict
1397 changegroup using the extranodes argument. It should be a dict
1397 where the keys are the filenames (or 1 for the manifest), and the
1398 where the keys are the filenames (or 1 for the manifest), and the
1398 values are lists of (node, linknode) tuples, where node is a wanted
1399 values are lists of (node, linknode) tuples, where node is a wanted
1399 node and linknode is the changelog node that should be transmitted as
1400 node and linknode is the changelog node that should be transmitted as
1400 the linkrev.
1401 the linkrev.
1401 """
1402 """
1402
1403
1403 # Set up some initial variables
1404 # Set up some initial variables
1404 # Make it easy to refer to self.changelog
1405 # Make it easy to refer to self.changelog
1405 cl = self.changelog
1406 cl = self.changelog
1406 # Compute the list of changesets in this changegroup.
1407 # Compute the list of changesets in this changegroup.
1407 # Some bases may turn out to be superfluous, and some heads may be
1408 # Some bases may turn out to be superfluous, and some heads may be
1408 # too. nodesbetween will return the minimal set of bases and heads
1409 # too. nodesbetween will return the minimal set of bases and heads
1409 # necessary to re-create the changegroup.
1410 # necessary to re-create the changegroup.
1410 if not bases:
1411 if not bases:
1411 bases = [nullid]
1412 bases = [nullid]
1412 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1413 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1413
1414
1414 if extranodes is None:
1415 if extranodes is None:
1415 # can we go through the fast path ?
1416 # can we go through the fast path ?
1416 heads.sort()
1417 heads.sort()
1417 allheads = self.heads()
1418 allheads = self.heads()
1418 allheads.sort()
1419 allheads.sort()
1419 if heads == allheads:
1420 if heads == allheads:
1420 return self._changegroup(msng_cl_lst, source)
1421 return self._changegroup(msng_cl_lst, source)
1421
1422
1422 # slow path
1423 # slow path
1423 self.hook('preoutgoing', throw=True, source=source)
1424 self.hook('preoutgoing', throw=True, source=source)
1424
1425
1425 self.changegroupinfo(msng_cl_lst, source)
1426 self.changegroupinfo(msng_cl_lst, source)
1426
1427
1427 # We assume that all ancestors of bases are known
1428 # We assume that all ancestors of bases are known
1428 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1429 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1429
1430
1430 # Make it easy to refer to self.manifest
1431 # Make it easy to refer to self.manifest
1431 mnfst = self.manifest
1432 mnfst = self.manifest
1432 # We don't know which manifests are missing yet
1433 # We don't know which manifests are missing yet
1433 msng_mnfst_set = {}
1434 msng_mnfst_set = {}
1434 # Nor do we know which filenodes are missing.
1435 # Nor do we know which filenodes are missing.
1435 msng_filenode_set = {}
1436 msng_filenode_set = {}
1436
1437
1437 # A changeset always belongs to itself, so the changenode lookup
1438 # A changeset always belongs to itself, so the changenode lookup
1438 # function for a changenode is identity.
1439 # function for a changenode is identity.
1439 def identity(x):
1440 def identity(x):
1440 return x
1441 return x
1441
1442
1442 # A function generating function that sets up the initial environment
1443 # A function generating function that sets up the initial environment
1443 # the inner function.
1444 # the inner function.
1444 def filenode_collector(changedfiles):
1445 def filenode_collector(changedfiles):
1445 # This gathers information from each manifestnode included in the
1446 # This gathers information from each manifestnode included in the
1446 # changegroup about which filenodes the manifest node references
1447 # changegroup about which filenodes the manifest node references
1447 # so we can include those in the changegroup too.
1448 # so we can include those in the changegroup too.
1448 #
1449 #
1449 # It also remembers which changenode each filenode belongs to. It
1450 # It also remembers which changenode each filenode belongs to. It
1450 # does this by assuming the a filenode belongs to the changenode
1451 # does this by assuming the a filenode belongs to the changenode
1451 # the first manifest that references it belongs to.
1452 # the first manifest that references it belongs to.
1452 def collect_msng_filenodes(mnfstnode):
1453 def collect_msng_filenodes(mnfstnode):
1453 r = mnfst.rev(mnfstnode)
1454 r = mnfst.rev(mnfstnode)
1454 if mnfst.deltaparent(r) in mnfst.parentrevs(r):
1455 if mnfst.deltaparent(r) in mnfst.parentrevs(r):
1455 # If the previous rev is one of the parents,
1456 # If the previous rev is one of the parents,
1456 # we only need to see a diff.
1457 # we only need to see a diff.
1457 deltamf = mnfst.readdelta(mnfstnode)
1458 deltamf = mnfst.readdelta(mnfstnode)
1458 # For each line in the delta
1459 # For each line in the delta
1459 for f, fnode in deltamf.iteritems():
1460 for f, fnode in deltamf.iteritems():
1460 # And if the file is in the list of files we care
1461 # And if the file is in the list of files we care
1461 # about.
1462 # about.
1462 if f in changedfiles:
1463 if f in changedfiles:
1463 # Get the changenode this manifest belongs to
1464 # Get the changenode this manifest belongs to
1464 clnode = msng_mnfst_set[mnfstnode]
1465 clnode = msng_mnfst_set[mnfstnode]
1465 # Create the set of filenodes for the file if
1466 # Create the set of filenodes for the file if
1466 # there isn't one already.
1467 # there isn't one already.
1467 ndset = msng_filenode_set.setdefault(f, {})
1468 ndset = msng_filenode_set.setdefault(f, {})
1468 # And set the filenode's changelog node to the
1469 # And set the filenode's changelog node to the
1469 # manifest's if it hasn't been set already.
1470 # manifest's if it hasn't been set already.
1470 ndset.setdefault(fnode, clnode)
1471 ndset.setdefault(fnode, clnode)
1471 else:
1472 else:
1472 # Otherwise we need a full manifest.
1473 # Otherwise we need a full manifest.
1473 m = mnfst.read(mnfstnode)
1474 m = mnfst.read(mnfstnode)
1474 # For every file in we care about.
1475 # For every file in we care about.
1475 for f in changedfiles:
1476 for f in changedfiles:
1476 fnode = m.get(f, None)
1477 fnode = m.get(f, None)
1477 # If it's in the manifest
1478 # If it's in the manifest
1478 if fnode is not None:
1479 if fnode is not None:
1479 # See comments above.
1480 # See comments above.
1480 clnode = msng_mnfst_set[mnfstnode]
1481 clnode = msng_mnfst_set[mnfstnode]
1481 ndset = msng_filenode_set.setdefault(f, {})
1482 ndset = msng_filenode_set.setdefault(f, {})
1482 ndset.setdefault(fnode, clnode)
1483 ndset.setdefault(fnode, clnode)
1483 return collect_msng_filenodes
1484 return collect_msng_filenodes
1484
1485
1485 # If we determine that a particular file or manifest node must be a
1486 # If we determine that a particular file or manifest node must be a
1486 # node that the recipient of the changegroup will already have, we can
1487 # node that the recipient of the changegroup will already have, we can
1487 # also assume the recipient will have all the parents. This function
1488 # also assume the recipient will have all the parents. This function
1488 # prunes them from the set of missing nodes.
1489 # prunes them from the set of missing nodes.
1489 def prune(revlog, missingnodes):
1490 def prune(revlog, missingnodes):
1490 hasset = set()
1491 hasset = set()
1491 # If a 'missing' filenode thinks it belongs to a changenode we
1492 # If a 'missing' filenode thinks it belongs to a changenode we
1492 # assume the recipient must have, then the recipient must have
1493 # assume the recipient must have, then the recipient must have
1493 # that filenode.
1494 # that filenode.
1494 for n in missingnodes:
1495 for n in missingnodes:
1495 clrev = revlog.linkrev(revlog.rev(n))
1496 clrev = revlog.linkrev(revlog.rev(n))
1496 if clrev in commonrevs:
1497 if clrev in commonrevs:
1497 hasset.add(n)
1498 hasset.add(n)
1498 for n in hasset:
1499 for n in hasset:
1499 missingnodes.pop(n, None)
1500 missingnodes.pop(n, None)
1500 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1501 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1501 missingnodes.pop(revlog.node(r), None)
1502 missingnodes.pop(revlog.node(r), None)
1502
1503
1503 # Add the nodes that were explicitly requested.
1504 # Add the nodes that were explicitly requested.
1504 def add_extra_nodes(name, nodes):
1505 def add_extra_nodes(name, nodes):
1505 if not extranodes or name not in extranodes:
1506 if not extranodes or name not in extranodes:
1506 return
1507 return
1507
1508
1508 for node, linknode in extranodes[name]:
1509 for node, linknode in extranodes[name]:
1509 if node not in nodes:
1510 if node not in nodes:
1510 nodes[node] = linknode
1511 nodes[node] = linknode
1511
1512
1512 # Now that we have all theses utility functions to help out and
1513 # Now that we have all theses utility functions to help out and
1513 # logically divide up the task, generate the group.
1514 # logically divide up the task, generate the group.
1514 def gengroup():
1515 def gengroup():
1515 # The set of changed files starts empty.
1516 # The set of changed files starts empty.
1516 changedfiles = set()
1517 changedfiles = set()
1517 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1518 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1518
1519
1519 # Create a changenode group generator that will call our functions
1520 # Create a changenode group generator that will call our functions
1520 # back to lookup the owning changenode and collect information.
1521 # back to lookup the owning changenode and collect information.
1521 group = cl.group(msng_cl_lst, identity, collect)
1522 group = cl.group(msng_cl_lst, identity, collect)
1522 for cnt, chnk in enumerate(group):
1523 for cnt, chnk in enumerate(group):
1523 yield chnk
1524 yield chnk
1524 # revlog.group yields three entries per node, so
1525 # revlog.group yields three entries per node, so
1525 # dividing by 3 gives an approximation of how many
1526 # dividing by 3 gives an approximation of how many
1526 # nodes have been processed.
1527 # nodes have been processed.
1527 self.ui.progress(_('bundling'), cnt / 3,
1528 self.ui.progress(_('bundling'), cnt / 3,
1528 unit=_('changesets'))
1529 unit=_('changesets'))
1529 changecount = cnt / 3
1530 changecount = cnt / 3
1530 self.ui.progress(_('bundling'), None)
1531 self.ui.progress(_('bundling'), None)
1531
1532
1532 prune(mnfst, msng_mnfst_set)
1533 prune(mnfst, msng_mnfst_set)
1533 add_extra_nodes(1, msng_mnfst_set)
1534 add_extra_nodes(1, msng_mnfst_set)
1534 msng_mnfst_lst = msng_mnfst_set.keys()
1535 msng_mnfst_lst = msng_mnfst_set.keys()
1535 # Sort the manifestnodes by revision number.
1536 # Sort the manifestnodes by revision number.
1536 msng_mnfst_lst.sort(key=mnfst.rev)
1537 msng_mnfst_lst.sort(key=mnfst.rev)
1537 # Create a generator for the manifestnodes that calls our lookup
1538 # Create a generator for the manifestnodes that calls our lookup
1538 # and data collection functions back.
1539 # and data collection functions back.
1539 group = mnfst.group(msng_mnfst_lst,
1540 group = mnfst.group(msng_mnfst_lst,
1540 lambda mnode: msng_mnfst_set[mnode],
1541 lambda mnode: msng_mnfst_set[mnode],
1541 filenode_collector(changedfiles))
1542 filenode_collector(changedfiles))
1542 efiles = {}
1543 efiles = {}
1543 for cnt, chnk in enumerate(group):
1544 for cnt, chnk in enumerate(group):
1544 if cnt % 3 == 1:
1545 if cnt % 3 == 1:
1545 mnode = chnk[:20]
1546 mnode = chnk[:20]
1546 efiles.update(mnfst.readdelta(mnode))
1547 efiles.update(mnfst.readdelta(mnode))
1547 yield chnk
1548 yield chnk
1548 # see above comment for why we divide by 3
1549 # see above comment for why we divide by 3
1549 self.ui.progress(_('bundling'), cnt / 3,
1550 self.ui.progress(_('bundling'), cnt / 3,
1550 unit=_('manifests'), total=changecount)
1551 unit=_('manifests'), total=changecount)
1551 self.ui.progress(_('bundling'), None)
1552 self.ui.progress(_('bundling'), None)
1552 efiles = len(efiles)
1553 efiles = len(efiles)
1553
1554
1554 # These are no longer needed, dereference and toss the memory for
1555 # These are no longer needed, dereference and toss the memory for
1555 # them.
1556 # them.
1556 msng_mnfst_lst = None
1557 msng_mnfst_lst = None
1557 msng_mnfst_set.clear()
1558 msng_mnfst_set.clear()
1558
1559
1559 if extranodes:
1560 if extranodes:
1560 for fname in extranodes:
1561 for fname in extranodes:
1561 if isinstance(fname, int):
1562 if isinstance(fname, int):
1562 continue
1563 continue
1563 msng_filenode_set.setdefault(fname, {})
1564 msng_filenode_set.setdefault(fname, {})
1564 changedfiles.add(fname)
1565 changedfiles.add(fname)
1565 # Go through all our files in order sorted by name.
1566 # Go through all our files in order sorted by name.
1566 for idx, fname in enumerate(sorted(changedfiles)):
1567 for idx, fname in enumerate(sorted(changedfiles)):
1567 filerevlog = self.file(fname)
1568 filerevlog = self.file(fname)
1568 if not len(filerevlog):
1569 if not len(filerevlog):
1569 raise util.Abort(_("empty or missing revlog for %s") % fname)
1570 raise util.Abort(_("empty or missing revlog for %s") % fname)
1570 # Toss out the filenodes that the recipient isn't really
1571 # Toss out the filenodes that the recipient isn't really
1571 # missing.
1572 # missing.
1572 missingfnodes = msng_filenode_set.pop(fname, {})
1573 missingfnodes = msng_filenode_set.pop(fname, {})
1573 prune(filerevlog, missingfnodes)
1574 prune(filerevlog, missingfnodes)
1574 add_extra_nodes(fname, missingfnodes)
1575 add_extra_nodes(fname, missingfnodes)
1575 # If any filenodes are left, generate the group for them,
1576 # If any filenodes are left, generate the group for them,
1576 # otherwise don't bother.
1577 # otherwise don't bother.
1577 if missingfnodes:
1578 if missingfnodes:
1578 yield changegroup.chunkheader(len(fname))
1579 yield changegroup.chunkheader(len(fname))
1579 yield fname
1580 yield fname
1580 # Sort the filenodes by their revision # (topological order)
1581 # Sort the filenodes by their revision # (topological order)
1581 nodeiter = list(missingfnodes)
1582 nodeiter = list(missingfnodes)
1582 nodeiter.sort(key=filerevlog.rev)
1583 nodeiter.sort(key=filerevlog.rev)
1583 # Create a group generator and only pass in a changenode
1584 # Create a group generator and only pass in a changenode
1584 # lookup function as we need to collect no information
1585 # lookup function as we need to collect no information
1585 # from filenodes.
1586 # from filenodes.
1586 group = filerevlog.group(nodeiter,
1587 group = filerevlog.group(nodeiter,
1587 lambda fnode: missingfnodes[fnode])
1588 lambda fnode: missingfnodes[fnode])
1588 for chnk in group:
1589 for chnk in group:
1589 # even though we print the same progress on
1590 # even though we print the same progress on
1590 # most loop iterations, put the progress call
1591 # most loop iterations, put the progress call
1591 # here so that time estimates (if any) can be updated
1592 # here so that time estimates (if any) can be updated
1592 self.ui.progress(
1593 self.ui.progress(
1593 _('bundling'), idx, item=fname,
1594 _('bundling'), idx, item=fname,
1594 unit=_('files'), total=efiles)
1595 unit=_('files'), total=efiles)
1595 yield chnk
1596 yield chnk
1596 # Signal that no more groups are left.
1597 # Signal that no more groups are left.
1597 yield changegroup.closechunk()
1598 yield changegroup.closechunk()
1598 self.ui.progress(_('bundling'), None)
1599 self.ui.progress(_('bundling'), None)
1599
1600
1600 if msng_cl_lst:
1601 if msng_cl_lst:
1601 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1602 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1602
1603
1603 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1604 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1604
1605
1605 def changegroup(self, basenodes, source):
1606 def changegroup(self, basenodes, source):
1606 # to avoid a race we use changegroupsubset() (issue1320)
1607 # to avoid a race we use changegroupsubset() (issue1320)
1607 return self.changegroupsubset(basenodes, self.heads(), source)
1608 return self.changegroupsubset(basenodes, self.heads(), source)
1608
1609
1609 def _changegroup(self, nodes, source):
1610 def _changegroup(self, nodes, source):
1610 """Compute the changegroup of all nodes that we have that a recipient
1611 """Compute the changegroup of all nodes that we have that a recipient
1611 doesn't. Return a chunkbuffer object whose read() method will return
1612 doesn't. Return a chunkbuffer object whose read() method will return
1612 successive changegroup chunks.
1613 successive changegroup chunks.
1613
1614
1614 This is much easier than the previous function as we can assume that
1615 This is much easier than the previous function as we can assume that
1615 the recipient has any changenode we aren't sending them.
1616 the recipient has any changenode we aren't sending them.
1616
1617
1617 nodes is the set of nodes to send"""
1618 nodes is the set of nodes to send"""
1618
1619
1619 self.hook('preoutgoing', throw=True, source=source)
1620 self.hook('preoutgoing', throw=True, source=source)
1620
1621
1621 cl = self.changelog
1622 cl = self.changelog
1622 revset = set([cl.rev(n) for n in nodes])
1623 revset = set([cl.rev(n) for n in nodes])
1623 self.changegroupinfo(nodes, source)
1624 self.changegroupinfo(nodes, source)
1624
1625
1625 def identity(x):
1626 def identity(x):
1626 return x
1627 return x
1627
1628
1628 def gennodelst(log):
1629 def gennodelst(log):
1629 for r in log:
1630 for r in log:
1630 if log.linkrev(r) in revset:
1631 if log.linkrev(r) in revset:
1631 yield log.node(r)
1632 yield log.node(r)
1632
1633
1633 def lookuplinkrev_func(revlog):
1634 def lookuplinkrev_func(revlog):
1634 def lookuplinkrev(n):
1635 def lookuplinkrev(n):
1635 return cl.node(revlog.linkrev(revlog.rev(n)))
1636 return cl.node(revlog.linkrev(revlog.rev(n)))
1636 return lookuplinkrev
1637 return lookuplinkrev
1637
1638
1638 def gengroup():
1639 def gengroup():
1639 '''yield a sequence of changegroup chunks (strings)'''
1640 '''yield a sequence of changegroup chunks (strings)'''
1640 # construct a list of all changed files
1641 # construct a list of all changed files
1641 changedfiles = set()
1642 changedfiles = set()
1642 mmfs = {}
1643 mmfs = {}
1643 collect = changegroup.collector(cl, mmfs, changedfiles)
1644 collect = changegroup.collector(cl, mmfs, changedfiles)
1644
1645
1645 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1646 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1646 # revlog.group yields three entries per node, so
1647 # revlog.group yields three entries per node, so
1647 # dividing by 3 gives an approximation of how many
1648 # dividing by 3 gives an approximation of how many
1648 # nodes have been processed.
1649 # nodes have been processed.
1649 self.ui.progress(_('bundling'), cnt / 3, unit=_('changesets'))
1650 self.ui.progress(_('bundling'), cnt / 3, unit=_('changesets'))
1650 yield chnk
1651 yield chnk
1651 changecount = cnt / 3
1652 changecount = cnt / 3
1652 self.ui.progress(_('bundling'), None)
1653 self.ui.progress(_('bundling'), None)
1653
1654
1654 mnfst = self.manifest
1655 mnfst = self.manifest
1655 nodeiter = gennodelst(mnfst)
1656 nodeiter = gennodelst(mnfst)
1656 efiles = {}
1657 efiles = {}
1657 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1658 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1658 lookuplinkrev_func(mnfst))):
1659 lookuplinkrev_func(mnfst))):
1659 if cnt % 3 == 1:
1660 if cnt % 3 == 1:
1660 mnode = chnk[:20]
1661 mnode = chnk[:20]
1661 efiles.update(mnfst.readdelta(mnode))
1662 efiles.update(mnfst.readdelta(mnode))
1662 # see above comment for why we divide by 3
1663 # see above comment for why we divide by 3
1663 self.ui.progress(_('bundling'), cnt / 3,
1664 self.ui.progress(_('bundling'), cnt / 3,
1664 unit=_('manifests'), total=changecount)
1665 unit=_('manifests'), total=changecount)
1665 yield chnk
1666 yield chnk
1666 efiles = len(efiles)
1667 efiles = len(efiles)
1667 self.ui.progress(_('bundling'), None)
1668 self.ui.progress(_('bundling'), None)
1668
1669
1669 for idx, fname in enumerate(sorted(changedfiles)):
1670 for idx, fname in enumerate(sorted(changedfiles)):
1670 filerevlog = self.file(fname)
1671 filerevlog = self.file(fname)
1671 if not len(filerevlog):
1672 if not len(filerevlog):
1672 raise util.Abort(_("empty or missing revlog for %s") % fname)
1673 raise util.Abort(_("empty or missing revlog for %s") % fname)
1673 nodeiter = gennodelst(filerevlog)
1674 nodeiter = gennodelst(filerevlog)
1674 nodeiter = list(nodeiter)
1675 nodeiter = list(nodeiter)
1675 if nodeiter:
1676 if nodeiter:
1676 yield changegroup.chunkheader(len(fname))
1677 yield changegroup.chunkheader(len(fname))
1677 yield fname
1678 yield fname
1678 lookup = lookuplinkrev_func(filerevlog)
1679 lookup = lookuplinkrev_func(filerevlog)
1679 for chnk in filerevlog.group(nodeiter, lookup):
1680 for chnk in filerevlog.group(nodeiter, lookup):
1680 self.ui.progress(
1681 self.ui.progress(
1681 _('bundling'), idx, item=fname,
1682 _('bundling'), idx, item=fname,
1682 total=efiles, unit=_('files'))
1683 total=efiles, unit=_('files'))
1683 yield chnk
1684 yield chnk
1684 self.ui.progress(_('bundling'), None)
1685 self.ui.progress(_('bundling'), None)
1685
1686
1686 yield changegroup.closechunk()
1687 yield changegroup.closechunk()
1687
1688
1688 if nodes:
1689 if nodes:
1689 self.hook('outgoing', node=hex(nodes[0]), source=source)
1690 self.hook('outgoing', node=hex(nodes[0]), source=source)
1690
1691
1691 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1692 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1692
1693
1693 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1694 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1694 """Add the changegroup returned by source.read() to this repo.
1695 """Add the changegroup returned by source.read() to this repo.
1695 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1696 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1696 the URL of the repo where this changegroup is coming from.
1697 the URL of the repo where this changegroup is coming from.
1697 If lock is not None, the function takes ownership of the lock
1698 If lock is not None, the function takes ownership of the lock
1698 and releases it after the changegroup is added.
1699 and releases it after the changegroup is added.
1699
1700
1700 Return an integer summarizing the change to this repo:
1701 Return an integer summarizing the change to this repo:
1701 - nothing changed or no source: 0
1702 - nothing changed or no source: 0
1702 - more heads than before: 1+added heads (2..n)
1703 - more heads than before: 1+added heads (2..n)
1703 - fewer heads than before: -1-removed heads (-2..-n)
1704 - fewer heads than before: -1-removed heads (-2..-n)
1704 - number of heads stays the same: 1
1705 - number of heads stays the same: 1
1705 """
1706 """
1706 def csmap(x):
1707 def csmap(x):
1707 self.ui.debug("add changeset %s\n" % short(x))
1708 self.ui.debug("add changeset %s\n" % short(x))
1708 return len(cl)
1709 return len(cl)
1709
1710
1710 def revmap(x):
1711 def revmap(x):
1711 return cl.rev(x)
1712 return cl.rev(x)
1712
1713
1713 if not source:
1714 if not source:
1714 return 0
1715 return 0
1715
1716
1716 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1717 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1717
1718
1718 changesets = files = revisions = 0
1719 changesets = files = revisions = 0
1719 efiles = set()
1720 efiles = set()
1720
1721
1721 # write changelog data to temp files so concurrent readers will not see
1722 # write changelog data to temp files so concurrent readers will not see
1722 # inconsistent view
1723 # inconsistent view
1723 cl = self.changelog
1724 cl = self.changelog
1724 cl.delayupdate()
1725 cl.delayupdate()
1725 oldheads = len(cl.heads())
1726 oldheads = len(cl.heads())
1726
1727
1727 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1728 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1728 try:
1729 try:
1729 trp = weakref.proxy(tr)
1730 trp = weakref.proxy(tr)
1730 # pull off the changeset group
1731 # pull off the changeset group
1731 self.ui.status(_("adding changesets\n"))
1732 self.ui.status(_("adding changesets\n"))
1732 clstart = len(cl)
1733 clstart = len(cl)
1733 class prog(object):
1734 class prog(object):
1734 step = _('changesets')
1735 step = _('changesets')
1735 count = 1
1736 count = 1
1736 ui = self.ui
1737 ui = self.ui
1737 total = None
1738 total = None
1738 def __call__(self):
1739 def __call__(self):
1739 self.ui.progress(self.step, self.count, unit=_('chunks'),
1740 self.ui.progress(self.step, self.count, unit=_('chunks'),
1740 total=self.total)
1741 total=self.total)
1741 self.count += 1
1742 self.count += 1
1742 pr = prog()
1743 pr = prog()
1743 source.callback = pr
1744 source.callback = pr
1744
1745
1745 if (cl.addgroup(source, csmap, trp) is None
1746 if (cl.addgroup(source, csmap, trp) is None
1746 and not emptyok):
1747 and not emptyok):
1747 raise util.Abort(_("received changelog group is empty"))
1748 raise util.Abort(_("received changelog group is empty"))
1748 clend = len(cl)
1749 clend = len(cl)
1749 changesets = clend - clstart
1750 changesets = clend - clstart
1750 for c in xrange(clstart, clend):
1751 for c in xrange(clstart, clend):
1751 efiles.update(self[c].files())
1752 efiles.update(self[c].files())
1752 efiles = len(efiles)
1753 efiles = len(efiles)
1753 self.ui.progress(_('changesets'), None)
1754 self.ui.progress(_('changesets'), None)
1754
1755
1755 # pull off the manifest group
1756 # pull off the manifest group
1756 self.ui.status(_("adding manifests\n"))
1757 self.ui.status(_("adding manifests\n"))
1757 pr.step = _('manifests')
1758 pr.step = _('manifests')
1758 pr.count = 1
1759 pr.count = 1
1759 pr.total = changesets # manifests <= changesets
1760 pr.total = changesets # manifests <= changesets
1760 # no need to check for empty manifest group here:
1761 # no need to check for empty manifest group here:
1761 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1762 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1762 # no new manifest will be created and the manifest group will
1763 # no new manifest will be created and the manifest group will
1763 # be empty during the pull
1764 # be empty during the pull
1764 self.manifest.addgroup(source, revmap, trp)
1765 self.manifest.addgroup(source, revmap, trp)
1765 self.ui.progress(_('manifests'), None)
1766 self.ui.progress(_('manifests'), None)
1766
1767
1767 needfiles = {}
1768 needfiles = {}
1768 if self.ui.configbool('server', 'validate', default=False):
1769 if self.ui.configbool('server', 'validate', default=False):
1769 # validate incoming csets have their manifests
1770 # validate incoming csets have their manifests
1770 for cset in xrange(clstart, clend):
1771 for cset in xrange(clstart, clend):
1771 mfest = self.changelog.read(self.changelog.node(cset))[0]
1772 mfest = self.changelog.read(self.changelog.node(cset))[0]
1772 mfest = self.manifest.readdelta(mfest)
1773 mfest = self.manifest.readdelta(mfest)
1773 # store file nodes we must see
1774 # store file nodes we must see
1774 for f, n in mfest.iteritems():
1775 for f, n in mfest.iteritems():
1775 needfiles.setdefault(f, set()).add(n)
1776 needfiles.setdefault(f, set()).add(n)
1776
1777
1777 # process the files
1778 # process the files
1778 self.ui.status(_("adding file changes\n"))
1779 self.ui.status(_("adding file changes\n"))
1779 pr.step = 'files'
1780 pr.step = 'files'
1780 pr.count = 1
1781 pr.count = 1
1781 pr.total = efiles
1782 pr.total = efiles
1782 source.callback = None
1783 source.callback = None
1783
1784
1784 while 1:
1785 while 1:
1785 f = source.chunk()
1786 f = source.chunk()
1786 if not f:
1787 if not f:
1787 break
1788 break
1788 self.ui.debug("adding %s revisions\n" % f)
1789 self.ui.debug("adding %s revisions\n" % f)
1789 pr()
1790 pr()
1790 fl = self.file(f)
1791 fl = self.file(f)
1791 o = len(fl)
1792 o = len(fl)
1792 if fl.addgroup(source, revmap, trp) is None:
1793 if fl.addgroup(source, revmap, trp) is None:
1793 raise util.Abort(_("received file revlog group is empty"))
1794 raise util.Abort(_("received file revlog group is empty"))
1794 revisions += len(fl) - o
1795 revisions += len(fl) - o
1795 files += 1
1796 files += 1
1796 if f in needfiles:
1797 if f in needfiles:
1797 needs = needfiles[f]
1798 needs = needfiles[f]
1798 for new in xrange(o, len(fl)):
1799 for new in xrange(o, len(fl)):
1799 n = fl.node(new)
1800 n = fl.node(new)
1800 if n in needs:
1801 if n in needs:
1801 needs.remove(n)
1802 needs.remove(n)
1802 if not needs:
1803 if not needs:
1803 del needfiles[f]
1804 del needfiles[f]
1804 self.ui.progress(_('files'), None)
1805 self.ui.progress(_('files'), None)
1805
1806
1806 for f, needs in needfiles.iteritems():
1807 for f, needs in needfiles.iteritems():
1807 fl = self.file(f)
1808 fl = self.file(f)
1808 for n in needs:
1809 for n in needs:
1809 try:
1810 try:
1810 fl.rev(n)
1811 fl.rev(n)
1811 except error.LookupError:
1812 except error.LookupError:
1812 raise util.Abort(
1813 raise util.Abort(
1813 _('missing file data for %s:%s - run hg verify') %
1814 _('missing file data for %s:%s - run hg verify') %
1814 (f, hex(n)))
1815 (f, hex(n)))
1815
1816
1816 newheads = len(cl.heads())
1817 newheads = len(cl.heads())
1817 heads = ""
1818 heads = ""
1818 if oldheads and newheads != oldheads:
1819 if oldheads and newheads != oldheads:
1819 heads = _(" (%+d heads)") % (newheads - oldheads)
1820 heads = _(" (%+d heads)") % (newheads - oldheads)
1820
1821
1821 self.ui.status(_("added %d changesets"
1822 self.ui.status(_("added %d changesets"
1822 " with %d changes to %d files%s\n")
1823 " with %d changes to %d files%s\n")
1823 % (changesets, revisions, files, heads))
1824 % (changesets, revisions, files, heads))
1824
1825
1825 if changesets > 0:
1826 if changesets > 0:
1826 p = lambda: cl.writepending() and self.root or ""
1827 p = lambda: cl.writepending() and self.root or ""
1827 self.hook('pretxnchangegroup', throw=True,
1828 self.hook('pretxnchangegroup', throw=True,
1828 node=hex(cl.node(clstart)), source=srctype,
1829 node=hex(cl.node(clstart)), source=srctype,
1829 url=url, pending=p)
1830 url=url, pending=p)
1830
1831
1831 # make changelog see real files again
1832 # make changelog see real files again
1832 cl.finalize(trp)
1833 cl.finalize(trp)
1833
1834
1834 tr.close()
1835 tr.close()
1835 finally:
1836 finally:
1836 tr.release()
1837 tr.release()
1837 if lock:
1838 if lock:
1838 lock.release()
1839 lock.release()
1839
1840
1840 if changesets > 0:
1841 if changesets > 0:
1841 # forcefully update the on-disk branch cache
1842 # forcefully update the on-disk branch cache
1842 self.ui.debug("updating the branch cache\n")
1843 self.ui.debug("updating the branch cache\n")
1843 self.updatebranchcache()
1844 self.updatebranchcache()
1844 self.hook("changegroup", node=hex(cl.node(clstart)),
1845 self.hook("changegroup", node=hex(cl.node(clstart)),
1845 source=srctype, url=url)
1846 source=srctype, url=url)
1846
1847
1847 for i in xrange(clstart, clend):
1848 for i in xrange(clstart, clend):
1848 self.hook("incoming", node=hex(cl.node(i)),
1849 self.hook("incoming", node=hex(cl.node(i)),
1849 source=srctype, url=url)
1850 source=srctype, url=url)
1850
1851
1851 # never return 0 here:
1852 # never return 0 here:
1852 if newheads < oldheads:
1853 if newheads < oldheads:
1853 return newheads - oldheads - 1
1854 return newheads - oldheads - 1
1854 else:
1855 else:
1855 return newheads - oldheads + 1
1856 return newheads - oldheads + 1
1856
1857
1857
1858
1858 def stream_in(self, remote, requirements):
1859 def stream_in(self, remote, requirements):
1859 fp = remote.stream_out()
1860 fp = remote.stream_out()
1860 l = fp.readline()
1861 l = fp.readline()
1861 try:
1862 try:
1862 resp = int(l)
1863 resp = int(l)
1863 except ValueError:
1864 except ValueError:
1864 raise error.ResponseError(
1865 raise error.ResponseError(
1865 _('Unexpected response from remote server:'), l)
1866 _('Unexpected response from remote server:'), l)
1866 if resp == 1:
1867 if resp == 1:
1867 raise util.Abort(_('operation forbidden by server'))
1868 raise util.Abort(_('operation forbidden by server'))
1868 elif resp == 2:
1869 elif resp == 2:
1869 raise util.Abort(_('locking the remote repository failed'))
1870 raise util.Abort(_('locking the remote repository failed'))
1870 elif resp != 0:
1871 elif resp != 0:
1871 raise util.Abort(_('the server sent an unknown error code'))
1872 raise util.Abort(_('the server sent an unknown error code'))
1872 self.ui.status(_('streaming all changes\n'))
1873 self.ui.status(_('streaming all changes\n'))
1873 l = fp.readline()
1874 l = fp.readline()
1874 try:
1875 try:
1875 total_files, total_bytes = map(int, l.split(' ', 1))
1876 total_files, total_bytes = map(int, l.split(' ', 1))
1876 except (ValueError, TypeError):
1877 except (ValueError, TypeError):
1877 raise error.ResponseError(
1878 raise error.ResponseError(
1878 _('Unexpected response from remote server:'), l)
1879 _('Unexpected response from remote server:'), l)
1879 self.ui.status(_('%d files to transfer, %s of data\n') %
1880 self.ui.status(_('%d files to transfer, %s of data\n') %
1880 (total_files, util.bytecount(total_bytes)))
1881 (total_files, util.bytecount(total_bytes)))
1881 start = time.time()
1882 start = time.time()
1882 for i in xrange(total_files):
1883 for i in xrange(total_files):
1883 # XXX doesn't support '\n' or '\r' in filenames
1884 # XXX doesn't support '\n' or '\r' in filenames
1884 l = fp.readline()
1885 l = fp.readline()
1885 try:
1886 try:
1886 name, size = l.split('\0', 1)
1887 name, size = l.split('\0', 1)
1887 size = int(size)
1888 size = int(size)
1888 except (ValueError, TypeError):
1889 except (ValueError, TypeError):
1889 raise error.ResponseError(
1890 raise error.ResponseError(
1890 _('Unexpected response from remote server:'), l)
1891 _('Unexpected response from remote server:'), l)
1891 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1892 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1892 # for backwards compat, name was partially encoded
1893 # for backwards compat, name was partially encoded
1893 ofp = self.sopener(store.decodedir(name), 'w')
1894 ofp = self.sopener(store.decodedir(name), 'w')
1894 for chunk in util.filechunkiter(fp, limit=size):
1895 for chunk in util.filechunkiter(fp, limit=size):
1895 ofp.write(chunk)
1896 ofp.write(chunk)
1896 ofp.close()
1897 ofp.close()
1897 elapsed = time.time() - start
1898 elapsed = time.time() - start
1898 if elapsed <= 0:
1899 if elapsed <= 0:
1899 elapsed = 0.001
1900 elapsed = 0.001
1900 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1901 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1901 (util.bytecount(total_bytes), elapsed,
1902 (util.bytecount(total_bytes), elapsed,
1902 util.bytecount(total_bytes / elapsed)))
1903 util.bytecount(total_bytes / elapsed)))
1903
1904
1904 # new requirements = old non-format requirements + new format-related
1905 # new requirements = old non-format requirements + new format-related
1905 # requirements from the streamed-in repository
1906 # requirements from the streamed-in repository
1906 requirements.update(set(self.requirements) - self.supportedformats)
1907 requirements.update(set(self.requirements) - self.supportedformats)
1907 self._applyrequirements(requirements)
1908 self._applyrequirements(requirements)
1908 self._writerequirements()
1909 self._writerequirements()
1909
1910
1910 self.invalidate()
1911 self.invalidate()
1911 return len(self.heads()) + 1
1912 return len(self.heads()) + 1
1912
1913
1913 def clone(self, remote, heads=[], stream=False):
1914 def clone(self, remote, heads=[], stream=False):
1914 '''clone remote repository.
1915 '''clone remote repository.
1915
1916
1916 keyword arguments:
1917 keyword arguments:
1917 heads: list of revs to clone (forces use of pull)
1918 heads: list of revs to clone (forces use of pull)
1918 stream: use streaming clone if possible'''
1919 stream: use streaming clone if possible'''
1919
1920
1920 # now, all clients that can request uncompressed clones can
1921 # now, all clients that can request uncompressed clones can
1921 # read repo formats supported by all servers that can serve
1922 # read repo formats supported by all servers that can serve
1922 # them.
1923 # them.
1923
1924
1924 # if revlog format changes, client will have to check version
1925 # if revlog format changes, client will have to check version
1925 # and format flags on "stream" capability, and use
1926 # and format flags on "stream" capability, and use
1926 # uncompressed only if compatible.
1927 # uncompressed only if compatible.
1927
1928
1928 if stream and not heads:
1929 if stream and not heads:
1929 # 'stream' means remote revlog format is revlogv1 only
1930 # 'stream' means remote revlog format is revlogv1 only
1930 if remote.capable('stream'):
1931 if remote.capable('stream'):
1931 return self.stream_in(remote, set(('revlogv1',)))
1932 return self.stream_in(remote, set(('revlogv1',)))
1932 # otherwise, 'streamreqs' contains the remote revlog format
1933 # otherwise, 'streamreqs' contains the remote revlog format
1933 streamreqs = remote.capable('streamreqs')
1934 streamreqs = remote.capable('streamreqs')
1934 if streamreqs:
1935 if streamreqs:
1935 streamreqs = set(streamreqs.split(','))
1936 streamreqs = set(streamreqs.split(','))
1936 # if we support it, stream in and adjust our requirements
1937 # if we support it, stream in and adjust our requirements
1937 if not streamreqs - self.supportedformats:
1938 if not streamreqs - self.supportedformats:
1938 return self.stream_in(remote, streamreqs)
1939 return self.stream_in(remote, streamreqs)
1939 return self.pull(remote, heads)
1940 return self.pull(remote, heads)
1940
1941
1941 def pushkey(self, namespace, key, old, new):
1942 def pushkey(self, namespace, key, old, new):
1942 return pushkey.push(self, namespace, key, old, new)
1943 return pushkey.push(self, namespace, key, old, new)
1943
1944
1944 def listkeys(self, namespace):
1945 def listkeys(self, namespace):
1945 return pushkey.list(self, namespace)
1946 return pushkey.list(self, namespace)
1946
1947
1947 # used to avoid circular references so destructors work
1948 # used to avoid circular references so destructors work
1948 def aftertrans(files):
1949 def aftertrans(files):
1949 renamefiles = [tuple(t) for t in files]
1950 renamefiles = [tuple(t) for t in files]
1950 def a():
1951 def a():
1951 for src, dest in renamefiles:
1952 for src, dest in renamefiles:
1952 util.rename(src, dest)
1953 util.rename(src, dest)
1953 return a
1954 return a
1954
1955
1955 def instance(ui, path, create):
1956 def instance(ui, path, create):
1956 return localrepository(ui, util.drop_scheme('file', path), create)
1957 return localrepository(ui, util.drop_scheme('file', path), create)
1957
1958
1958 def islocal(path):
1959 def islocal(path):
1959 return True
1960 return True
General Comments 0
You need to be logged in to leave comments. Login now