##// END OF EJS Templates
pushkey: splits obsolete marker exchange into multiple keys...
Pierre-Yves David -
r17295:1f08ecc7 stable
parent child Browse files
Show More
@@ -1,2590 +1,2597
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import bin, hex, nullid, nullrev, short
7 from node import bin, hex, nullid, nullrev, short
8 from i18n import _
8 from i18n import _
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock, transaction, store, encoding, base85
11 import lock, transaction, store, encoding, base85
12 import scmutil, util, extensions, hook, error, revset
12 import scmutil, util, extensions, hook, error, revset
13 import match as matchmod
13 import match as matchmod
14 import merge as mergemod
14 import merge as mergemod
15 import tags as tagsmod
15 import tags as tagsmod
16 from lock import release
16 from lock import release
17 import weakref, errno, os, time, inspect
17 import weakref, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19 filecache = scmutil.filecache
19 filecache = scmutil.filecache
20
20
21 class storecache(filecache):
21 class storecache(filecache):
22 """filecache for files in the store"""
22 """filecache for files in the store"""
23 def join(self, obj, fname):
23 def join(self, obj, fname):
24 return obj.sjoin(fname)
24 return obj.sjoin(fname)
25
25
26 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
26 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
27 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
27 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
28
28
29 class localpeer(peer.peerrepository):
29 class localpeer(peer.peerrepository):
30 '''peer for a local repo; reflects only the most recent API'''
30 '''peer for a local repo; reflects only the most recent API'''
31
31
32 def __init__(self, repo, caps=MODERNCAPS):
32 def __init__(self, repo, caps=MODERNCAPS):
33 peer.peerrepository.__init__(self)
33 peer.peerrepository.__init__(self)
34 self._repo = repo
34 self._repo = repo
35 self.ui = repo.ui
35 self.ui = repo.ui
36 self._caps = repo._restrictcapabilities(caps)
36 self._caps = repo._restrictcapabilities(caps)
37 self.requirements = repo.requirements
37 self.requirements = repo.requirements
38 self.supportedformats = repo.supportedformats
38 self.supportedformats = repo.supportedformats
39
39
40 def close(self):
40 def close(self):
41 self._repo.close()
41 self._repo.close()
42
42
43 def _capabilities(self):
43 def _capabilities(self):
44 return self._caps
44 return self._caps
45
45
46 def local(self):
46 def local(self):
47 return self._repo
47 return self._repo
48
48
49 def canpush(self):
49 def canpush(self):
50 return True
50 return True
51
51
52 def url(self):
52 def url(self):
53 return self._repo.url()
53 return self._repo.url()
54
54
55 def lookup(self, key):
55 def lookup(self, key):
56 return self._repo.lookup(key)
56 return self._repo.lookup(key)
57
57
58 def branchmap(self):
58 def branchmap(self):
59 return discovery.visiblebranchmap(self._repo)
59 return discovery.visiblebranchmap(self._repo)
60
60
61 def heads(self):
61 def heads(self):
62 return discovery.visibleheads(self._repo)
62 return discovery.visibleheads(self._repo)
63
63
64 def known(self, nodes):
64 def known(self, nodes):
65 return self._repo.known(nodes)
65 return self._repo.known(nodes)
66
66
67 def getbundle(self, source, heads=None, common=None):
67 def getbundle(self, source, heads=None, common=None):
68 return self._repo.getbundle(source, heads=heads, common=common)
68 return self._repo.getbundle(source, heads=heads, common=common)
69
69
70 # TODO We might want to move the next two calls into legacypeer and add
70 # TODO We might want to move the next two calls into legacypeer and add
71 # unbundle instead.
71 # unbundle instead.
72
72
73 def lock(self):
73 def lock(self):
74 return self._repo.lock()
74 return self._repo.lock()
75
75
76 def addchangegroup(self, cg, source, url):
76 def addchangegroup(self, cg, source, url):
77 return self._repo.addchangegroup(cg, source, url)
77 return self._repo.addchangegroup(cg, source, url)
78
78
79 def pushkey(self, namespace, key, old, new):
79 def pushkey(self, namespace, key, old, new):
80 return self._repo.pushkey(namespace, key, old, new)
80 return self._repo.pushkey(namespace, key, old, new)
81
81
82 def listkeys(self, namespace):
82 def listkeys(self, namespace):
83 return self._repo.listkeys(namespace)
83 return self._repo.listkeys(namespace)
84
84
85 def debugwireargs(self, one, two, three=None, four=None, five=None):
85 def debugwireargs(self, one, two, three=None, four=None, five=None):
86 '''used to test argument passing over the wire'''
86 '''used to test argument passing over the wire'''
87 return "%s %s %s %s %s" % (one, two, three, four, five)
87 return "%s %s %s %s %s" % (one, two, three, four, five)
88
88
89 class locallegacypeer(localpeer):
89 class locallegacypeer(localpeer):
90 '''peer extension which implements legacy methods too; used for tests with
90 '''peer extension which implements legacy methods too; used for tests with
91 restricted capabilities'''
91 restricted capabilities'''
92
92
93 def __init__(self, repo):
93 def __init__(self, repo):
94 localpeer.__init__(self, repo, caps=LEGACYCAPS)
94 localpeer.__init__(self, repo, caps=LEGACYCAPS)
95
95
96 def branches(self, nodes):
96 def branches(self, nodes):
97 return self._repo.branches(nodes)
97 return self._repo.branches(nodes)
98
98
99 def between(self, pairs):
99 def between(self, pairs):
100 return self._repo.between(pairs)
100 return self._repo.between(pairs)
101
101
102 def changegroup(self, basenodes, source):
102 def changegroup(self, basenodes, source):
103 return self._repo.changegroup(basenodes, source)
103 return self._repo.changegroup(basenodes, source)
104
104
105 def changegroupsubset(self, bases, heads, source):
105 def changegroupsubset(self, bases, heads, source):
106 return self._repo.changegroupsubset(bases, heads, source)
106 return self._repo.changegroupsubset(bases, heads, source)
107
107
108 class localrepository(object):
108 class localrepository(object):
109
109
110 supportedformats = set(('revlogv1', 'generaldelta'))
110 supportedformats = set(('revlogv1', 'generaldelta'))
111 supported = supportedformats | set(('store', 'fncache', 'shared',
111 supported = supportedformats | set(('store', 'fncache', 'shared',
112 'dotencode'))
112 'dotencode'))
113 openerreqs = set(('revlogv1', 'generaldelta'))
113 openerreqs = set(('revlogv1', 'generaldelta'))
114 requirements = ['revlogv1']
114 requirements = ['revlogv1']
115
115
116 def _baserequirements(self, create):
116 def _baserequirements(self, create):
117 return self.requirements[:]
117 return self.requirements[:]
118
118
119 def __init__(self, baseui, path=None, create=False):
119 def __init__(self, baseui, path=None, create=False):
120 self.wopener = scmutil.opener(path, expand=True)
120 self.wopener = scmutil.opener(path, expand=True)
121 self.wvfs = self.wopener
121 self.wvfs = self.wopener
122 self.root = self.wvfs.base
122 self.root = self.wvfs.base
123 self.path = self.wvfs.join(".hg")
123 self.path = self.wvfs.join(".hg")
124 self.origroot = path
124 self.origroot = path
125 self.auditor = scmutil.pathauditor(self.root, self._checknested)
125 self.auditor = scmutil.pathauditor(self.root, self._checknested)
126 self.opener = scmutil.opener(self.path)
126 self.opener = scmutil.opener(self.path)
127 self.vfs = self.opener
127 self.vfs = self.opener
128 self.baseui = baseui
128 self.baseui = baseui
129 self.ui = baseui.copy()
129 self.ui = baseui.copy()
130 # A list of callback to shape the phase if no data were found.
130 # A list of callback to shape the phase if no data were found.
131 # Callback are in the form: func(repo, roots) --> processed root.
131 # Callback are in the form: func(repo, roots) --> processed root.
132 # This list it to be filled by extension during repo setup
132 # This list it to be filled by extension during repo setup
133 self._phasedefaults = []
133 self._phasedefaults = []
134 try:
134 try:
135 self.ui.readconfig(self.join("hgrc"), self.root)
135 self.ui.readconfig(self.join("hgrc"), self.root)
136 extensions.loadall(self.ui)
136 extensions.loadall(self.ui)
137 except IOError:
137 except IOError:
138 pass
138 pass
139
139
140 if not self.vfs.isdir():
140 if not self.vfs.isdir():
141 if create:
141 if create:
142 if not self.wvfs.exists():
142 if not self.wvfs.exists():
143 self.wvfs.makedirs()
143 self.wvfs.makedirs()
144 self.vfs.makedir(notindexed=True)
144 self.vfs.makedir(notindexed=True)
145 requirements = self._baserequirements(create)
145 requirements = self._baserequirements(create)
146 if self.ui.configbool('format', 'usestore', True):
146 if self.ui.configbool('format', 'usestore', True):
147 self.vfs.mkdir("store")
147 self.vfs.mkdir("store")
148 requirements.append("store")
148 requirements.append("store")
149 if self.ui.configbool('format', 'usefncache', True):
149 if self.ui.configbool('format', 'usefncache', True):
150 requirements.append("fncache")
150 requirements.append("fncache")
151 if self.ui.configbool('format', 'dotencode', True):
151 if self.ui.configbool('format', 'dotencode', True):
152 requirements.append('dotencode')
152 requirements.append('dotencode')
153 # create an invalid changelog
153 # create an invalid changelog
154 self.vfs.append(
154 self.vfs.append(
155 "00changelog.i",
155 "00changelog.i",
156 '\0\0\0\2' # represents revlogv2
156 '\0\0\0\2' # represents revlogv2
157 ' dummy changelog to prevent using the old repo layout'
157 ' dummy changelog to prevent using the old repo layout'
158 )
158 )
159 if self.ui.configbool('format', 'generaldelta', False):
159 if self.ui.configbool('format', 'generaldelta', False):
160 requirements.append("generaldelta")
160 requirements.append("generaldelta")
161 requirements = set(requirements)
161 requirements = set(requirements)
162 else:
162 else:
163 raise error.RepoError(_("repository %s not found") % path)
163 raise error.RepoError(_("repository %s not found") % path)
164 elif create:
164 elif create:
165 raise error.RepoError(_("repository %s already exists") % path)
165 raise error.RepoError(_("repository %s already exists") % path)
166 else:
166 else:
167 try:
167 try:
168 requirements = scmutil.readrequires(self.vfs, self.supported)
168 requirements = scmutil.readrequires(self.vfs, self.supported)
169 except IOError, inst:
169 except IOError, inst:
170 if inst.errno != errno.ENOENT:
170 if inst.errno != errno.ENOENT:
171 raise
171 raise
172 requirements = set()
172 requirements = set()
173
173
174 self.sharedpath = self.path
174 self.sharedpath = self.path
175 try:
175 try:
176 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
176 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
177 if not os.path.exists(s):
177 if not os.path.exists(s):
178 raise error.RepoError(
178 raise error.RepoError(
179 _('.hg/sharedpath points to nonexistent directory %s') % s)
179 _('.hg/sharedpath points to nonexistent directory %s') % s)
180 self.sharedpath = s
180 self.sharedpath = s
181 except IOError, inst:
181 except IOError, inst:
182 if inst.errno != errno.ENOENT:
182 if inst.errno != errno.ENOENT:
183 raise
183 raise
184
184
185 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
185 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
186 self.spath = self.store.path
186 self.spath = self.store.path
187 self.sopener = self.store.opener
187 self.sopener = self.store.opener
188 self.svfs = self.sopener
188 self.svfs = self.sopener
189 self.sjoin = self.store.join
189 self.sjoin = self.store.join
190 self.opener.createmode = self.store.createmode
190 self.opener.createmode = self.store.createmode
191 self._applyrequirements(requirements)
191 self._applyrequirements(requirements)
192 if create:
192 if create:
193 self._writerequirements()
193 self._writerequirements()
194
194
195
195
196 self._branchcache = None
196 self._branchcache = None
197 self._branchcachetip = None
197 self._branchcachetip = None
198 self.filterpats = {}
198 self.filterpats = {}
199 self._datafilters = {}
199 self._datafilters = {}
200 self._transref = self._lockref = self._wlockref = None
200 self._transref = self._lockref = self._wlockref = None
201
201
202 # A cache for various files under .hg/ that tracks file changes,
202 # A cache for various files under .hg/ that tracks file changes,
203 # (used by the filecache decorator)
203 # (used by the filecache decorator)
204 #
204 #
205 # Maps a property name to its util.filecacheentry
205 # Maps a property name to its util.filecacheentry
206 self._filecache = {}
206 self._filecache = {}
207
207
208 def close(self):
208 def close(self):
209 pass
209 pass
210
210
211 def _restrictcapabilities(self, caps):
211 def _restrictcapabilities(self, caps):
212 return caps
212 return caps
213
213
214 def _applyrequirements(self, requirements):
214 def _applyrequirements(self, requirements):
215 self.requirements = requirements
215 self.requirements = requirements
216 self.sopener.options = dict((r, 1) for r in requirements
216 self.sopener.options = dict((r, 1) for r in requirements
217 if r in self.openerreqs)
217 if r in self.openerreqs)
218
218
219 def _writerequirements(self):
219 def _writerequirements(self):
220 reqfile = self.opener("requires", "w")
220 reqfile = self.opener("requires", "w")
221 for r in self.requirements:
221 for r in self.requirements:
222 reqfile.write("%s\n" % r)
222 reqfile.write("%s\n" % r)
223 reqfile.close()
223 reqfile.close()
224
224
225 def _checknested(self, path):
225 def _checknested(self, path):
226 """Determine if path is a legal nested repository."""
226 """Determine if path is a legal nested repository."""
227 if not path.startswith(self.root):
227 if not path.startswith(self.root):
228 return False
228 return False
229 subpath = path[len(self.root) + 1:]
229 subpath = path[len(self.root) + 1:]
230 normsubpath = util.pconvert(subpath)
230 normsubpath = util.pconvert(subpath)
231
231
232 # XXX: Checking against the current working copy is wrong in
232 # XXX: Checking against the current working copy is wrong in
233 # the sense that it can reject things like
233 # the sense that it can reject things like
234 #
234 #
235 # $ hg cat -r 10 sub/x.txt
235 # $ hg cat -r 10 sub/x.txt
236 #
236 #
237 # if sub/ is no longer a subrepository in the working copy
237 # if sub/ is no longer a subrepository in the working copy
238 # parent revision.
238 # parent revision.
239 #
239 #
240 # However, it can of course also allow things that would have
240 # However, it can of course also allow things that would have
241 # been rejected before, such as the above cat command if sub/
241 # been rejected before, such as the above cat command if sub/
242 # is a subrepository now, but was a normal directory before.
242 # is a subrepository now, but was a normal directory before.
243 # The old path auditor would have rejected by mistake since it
243 # The old path auditor would have rejected by mistake since it
244 # panics when it sees sub/.hg/.
244 # panics when it sees sub/.hg/.
245 #
245 #
246 # All in all, checking against the working copy seems sensible
246 # All in all, checking against the working copy seems sensible
247 # since we want to prevent access to nested repositories on
247 # since we want to prevent access to nested repositories on
248 # the filesystem *now*.
248 # the filesystem *now*.
249 ctx = self[None]
249 ctx = self[None]
250 parts = util.splitpath(subpath)
250 parts = util.splitpath(subpath)
251 while parts:
251 while parts:
252 prefix = '/'.join(parts)
252 prefix = '/'.join(parts)
253 if prefix in ctx.substate:
253 if prefix in ctx.substate:
254 if prefix == normsubpath:
254 if prefix == normsubpath:
255 return True
255 return True
256 else:
256 else:
257 sub = ctx.sub(prefix)
257 sub = ctx.sub(prefix)
258 return sub.checknested(subpath[len(prefix) + 1:])
258 return sub.checknested(subpath[len(prefix) + 1:])
259 else:
259 else:
260 parts.pop()
260 parts.pop()
261 return False
261 return False
262
262
263 def peer(self):
263 def peer(self):
264 return localpeer(self) # not cached to avoid reference cycle
264 return localpeer(self) # not cached to avoid reference cycle
265
265
266 @filecache('bookmarks')
266 @filecache('bookmarks')
267 def _bookmarks(self):
267 def _bookmarks(self):
268 return bookmarks.read(self)
268 return bookmarks.read(self)
269
269
270 @filecache('bookmarks.current')
270 @filecache('bookmarks.current')
271 def _bookmarkcurrent(self):
271 def _bookmarkcurrent(self):
272 return bookmarks.readcurrent(self)
272 return bookmarks.readcurrent(self)
273
273
274 def _writebookmarks(self, marks):
274 def _writebookmarks(self, marks):
275 bookmarks.write(self)
275 bookmarks.write(self)
276
276
277 def bookmarkheads(self, bookmark):
277 def bookmarkheads(self, bookmark):
278 name = bookmark.split('@', 1)[0]
278 name = bookmark.split('@', 1)[0]
279 heads = []
279 heads = []
280 for mark, n in self._bookmarks.iteritems():
280 for mark, n in self._bookmarks.iteritems():
281 if mark.split('@', 1)[0] == name:
281 if mark.split('@', 1)[0] == name:
282 heads.append(n)
282 heads.append(n)
283 return heads
283 return heads
284
284
285 @storecache('phaseroots')
285 @storecache('phaseroots')
286 def _phasecache(self):
286 def _phasecache(self):
287 return phases.phasecache(self, self._phasedefaults)
287 return phases.phasecache(self, self._phasedefaults)
288
288
289 @storecache('obsstore')
289 @storecache('obsstore')
290 def obsstore(self):
290 def obsstore(self):
291 store = obsolete.obsstore(self.sopener)
291 store = obsolete.obsstore(self.sopener)
292 return store
292 return store
293
293
294 @propertycache
294 @propertycache
295 def hiddenrevs(self):
295 def hiddenrevs(self):
296 """hiddenrevs: revs that should be hidden by command and tools
296 """hiddenrevs: revs that should be hidden by command and tools
297
297
298 This set is carried on the repo to ease initialisation and lazy
298 This set is carried on the repo to ease initialisation and lazy
299 loading it'll probably move back to changelog for efficienty and
299 loading it'll probably move back to changelog for efficienty and
300 consistency reason
300 consistency reason
301
301
302 Note that the hiddenrevs will needs invalidations when
302 Note that the hiddenrevs will needs invalidations when
303 - a new changesets is added (possible unstable above extinct)
303 - a new changesets is added (possible unstable above extinct)
304 - a new obsolete marker is added (possible new extinct changeset)
304 - a new obsolete marker is added (possible new extinct changeset)
305 """
305 """
306 hidden = set()
306 hidden = set()
307 if self.obsstore:
307 if self.obsstore:
308 ### hide extinct changeset that are not accessible by any mean
308 ### hide extinct changeset that are not accessible by any mean
309 hiddenquery = 'extinct() - ::(. + bookmark() + tagged())'
309 hiddenquery = 'extinct() - ::(. + bookmark() + tagged())'
310 hidden.update(self.revs(hiddenquery))
310 hidden.update(self.revs(hiddenquery))
311 return hidden
311 return hidden
312
312
313 @storecache('00changelog.i')
313 @storecache('00changelog.i')
314 def changelog(self):
314 def changelog(self):
315 c = changelog.changelog(self.sopener)
315 c = changelog.changelog(self.sopener)
316 if 'HG_PENDING' in os.environ:
316 if 'HG_PENDING' in os.environ:
317 p = os.environ['HG_PENDING']
317 p = os.environ['HG_PENDING']
318 if p.startswith(self.root):
318 if p.startswith(self.root):
319 c.readpending('00changelog.i.a')
319 c.readpending('00changelog.i.a')
320 return c
320 return c
321
321
322 @storecache('00manifest.i')
322 @storecache('00manifest.i')
323 def manifest(self):
323 def manifest(self):
324 return manifest.manifest(self.sopener)
324 return manifest.manifest(self.sopener)
325
325
326 @filecache('dirstate')
326 @filecache('dirstate')
327 def dirstate(self):
327 def dirstate(self):
328 warned = [0]
328 warned = [0]
329 def validate(node):
329 def validate(node):
330 try:
330 try:
331 self.changelog.rev(node)
331 self.changelog.rev(node)
332 return node
332 return node
333 except error.LookupError:
333 except error.LookupError:
334 if not warned[0]:
334 if not warned[0]:
335 warned[0] = True
335 warned[0] = True
336 self.ui.warn(_("warning: ignoring unknown"
336 self.ui.warn(_("warning: ignoring unknown"
337 " working parent %s!\n") % short(node))
337 " working parent %s!\n") % short(node))
338 return nullid
338 return nullid
339
339
340 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
340 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
341
341
342 def __getitem__(self, changeid):
342 def __getitem__(self, changeid):
343 if changeid is None:
343 if changeid is None:
344 return context.workingctx(self)
344 return context.workingctx(self)
345 return context.changectx(self, changeid)
345 return context.changectx(self, changeid)
346
346
347 def __contains__(self, changeid):
347 def __contains__(self, changeid):
348 try:
348 try:
349 return bool(self.lookup(changeid))
349 return bool(self.lookup(changeid))
350 except error.RepoLookupError:
350 except error.RepoLookupError:
351 return False
351 return False
352
352
353 def __nonzero__(self):
353 def __nonzero__(self):
354 return True
354 return True
355
355
356 def __len__(self):
356 def __len__(self):
357 return len(self.changelog)
357 return len(self.changelog)
358
358
359 def __iter__(self):
359 def __iter__(self):
360 for i in xrange(len(self)):
360 for i in xrange(len(self)):
361 yield i
361 yield i
362
362
363 def revs(self, expr, *args):
363 def revs(self, expr, *args):
364 '''Return a list of revisions matching the given revset'''
364 '''Return a list of revisions matching the given revset'''
365 expr = revset.formatspec(expr, *args)
365 expr = revset.formatspec(expr, *args)
366 m = revset.match(None, expr)
366 m = revset.match(None, expr)
367 return [r for r in m(self, range(len(self)))]
367 return [r for r in m(self, range(len(self)))]
368
368
369 def set(self, expr, *args):
369 def set(self, expr, *args):
370 '''
370 '''
371 Yield a context for each matching revision, after doing arg
371 Yield a context for each matching revision, after doing arg
372 replacement via revset.formatspec
372 replacement via revset.formatspec
373 '''
373 '''
374 for r in self.revs(expr, *args):
374 for r in self.revs(expr, *args):
375 yield self[r]
375 yield self[r]
376
376
377 def url(self):
377 def url(self):
378 return 'file:' + self.root
378 return 'file:' + self.root
379
379
380 def hook(self, name, throw=False, **args):
380 def hook(self, name, throw=False, **args):
381 return hook.hook(self.ui, self, name, throw, **args)
381 return hook.hook(self.ui, self, name, throw, **args)
382
382
383 tag_disallowed = ':\r\n'
383 tag_disallowed = ':\r\n'
384
384
385 def _tag(self, names, node, message, local, user, date, extra={}):
385 def _tag(self, names, node, message, local, user, date, extra={}):
386 if isinstance(names, str):
386 if isinstance(names, str):
387 allchars = names
387 allchars = names
388 names = (names,)
388 names = (names,)
389 else:
389 else:
390 allchars = ''.join(names)
390 allchars = ''.join(names)
391 for c in self.tag_disallowed:
391 for c in self.tag_disallowed:
392 if c in allchars:
392 if c in allchars:
393 raise util.Abort(_('%r cannot be used in a tag name') % c)
393 raise util.Abort(_('%r cannot be used in a tag name') % c)
394
394
395 branches = self.branchmap()
395 branches = self.branchmap()
396 for name in names:
396 for name in names:
397 self.hook('pretag', throw=True, node=hex(node), tag=name,
397 self.hook('pretag', throw=True, node=hex(node), tag=name,
398 local=local)
398 local=local)
399 if name in branches:
399 if name in branches:
400 self.ui.warn(_("warning: tag %s conflicts with existing"
400 self.ui.warn(_("warning: tag %s conflicts with existing"
401 " branch name\n") % name)
401 " branch name\n") % name)
402
402
403 def writetags(fp, names, munge, prevtags):
403 def writetags(fp, names, munge, prevtags):
404 fp.seek(0, 2)
404 fp.seek(0, 2)
405 if prevtags and prevtags[-1] != '\n':
405 if prevtags and prevtags[-1] != '\n':
406 fp.write('\n')
406 fp.write('\n')
407 for name in names:
407 for name in names:
408 m = munge and munge(name) or name
408 m = munge and munge(name) or name
409 if (self._tagscache.tagtypes and
409 if (self._tagscache.tagtypes and
410 name in self._tagscache.tagtypes):
410 name in self._tagscache.tagtypes):
411 old = self.tags().get(name, nullid)
411 old = self.tags().get(name, nullid)
412 fp.write('%s %s\n' % (hex(old), m))
412 fp.write('%s %s\n' % (hex(old), m))
413 fp.write('%s %s\n' % (hex(node), m))
413 fp.write('%s %s\n' % (hex(node), m))
414 fp.close()
414 fp.close()
415
415
416 prevtags = ''
416 prevtags = ''
417 if local:
417 if local:
418 try:
418 try:
419 fp = self.opener('localtags', 'r+')
419 fp = self.opener('localtags', 'r+')
420 except IOError:
420 except IOError:
421 fp = self.opener('localtags', 'a')
421 fp = self.opener('localtags', 'a')
422 else:
422 else:
423 prevtags = fp.read()
423 prevtags = fp.read()
424
424
425 # local tags are stored in the current charset
425 # local tags are stored in the current charset
426 writetags(fp, names, None, prevtags)
426 writetags(fp, names, None, prevtags)
427 for name in names:
427 for name in names:
428 self.hook('tag', node=hex(node), tag=name, local=local)
428 self.hook('tag', node=hex(node), tag=name, local=local)
429 return
429 return
430
430
431 try:
431 try:
432 fp = self.wfile('.hgtags', 'rb+')
432 fp = self.wfile('.hgtags', 'rb+')
433 except IOError, e:
433 except IOError, e:
434 if e.errno != errno.ENOENT:
434 if e.errno != errno.ENOENT:
435 raise
435 raise
436 fp = self.wfile('.hgtags', 'ab')
436 fp = self.wfile('.hgtags', 'ab')
437 else:
437 else:
438 prevtags = fp.read()
438 prevtags = fp.read()
439
439
440 # committed tags are stored in UTF-8
440 # committed tags are stored in UTF-8
441 writetags(fp, names, encoding.fromlocal, prevtags)
441 writetags(fp, names, encoding.fromlocal, prevtags)
442
442
443 fp.close()
443 fp.close()
444
444
445 self.invalidatecaches()
445 self.invalidatecaches()
446
446
447 if '.hgtags' not in self.dirstate:
447 if '.hgtags' not in self.dirstate:
448 self[None].add(['.hgtags'])
448 self[None].add(['.hgtags'])
449
449
450 m = matchmod.exact(self.root, '', ['.hgtags'])
450 m = matchmod.exact(self.root, '', ['.hgtags'])
451 tagnode = self.commit(message, user, date, extra=extra, match=m)
451 tagnode = self.commit(message, user, date, extra=extra, match=m)
452
452
453 for name in names:
453 for name in names:
454 self.hook('tag', node=hex(node), tag=name, local=local)
454 self.hook('tag', node=hex(node), tag=name, local=local)
455
455
456 return tagnode
456 return tagnode
457
457
458 def tag(self, names, node, message, local, user, date):
458 def tag(self, names, node, message, local, user, date):
459 '''tag a revision with one or more symbolic names.
459 '''tag a revision with one or more symbolic names.
460
460
461 names is a list of strings or, when adding a single tag, names may be a
461 names is a list of strings or, when adding a single tag, names may be a
462 string.
462 string.
463
463
464 if local is True, the tags are stored in a per-repository file.
464 if local is True, the tags are stored in a per-repository file.
465 otherwise, they are stored in the .hgtags file, and a new
465 otherwise, they are stored in the .hgtags file, and a new
466 changeset is committed with the change.
466 changeset is committed with the change.
467
467
468 keyword arguments:
468 keyword arguments:
469
469
470 local: whether to store tags in non-version-controlled file
470 local: whether to store tags in non-version-controlled file
471 (default False)
471 (default False)
472
472
473 message: commit message to use if committing
473 message: commit message to use if committing
474
474
475 user: name of user to use if committing
475 user: name of user to use if committing
476
476
477 date: date tuple to use if committing'''
477 date: date tuple to use if committing'''
478
478
479 if not local:
479 if not local:
480 for x in self.status()[:5]:
480 for x in self.status()[:5]:
481 if '.hgtags' in x:
481 if '.hgtags' in x:
482 raise util.Abort(_('working copy of .hgtags is changed '
482 raise util.Abort(_('working copy of .hgtags is changed '
483 '(please commit .hgtags manually)'))
483 '(please commit .hgtags manually)'))
484
484
485 self.tags() # instantiate the cache
485 self.tags() # instantiate the cache
486 self._tag(names, node, message, local, user, date)
486 self._tag(names, node, message, local, user, date)
487
487
488 @propertycache
488 @propertycache
489 def _tagscache(self):
489 def _tagscache(self):
490 '''Returns a tagscache object that contains various tags related
490 '''Returns a tagscache object that contains various tags related
491 caches.'''
491 caches.'''
492
492
493 # This simplifies its cache management by having one decorated
493 # This simplifies its cache management by having one decorated
494 # function (this one) and the rest simply fetch things from it.
494 # function (this one) and the rest simply fetch things from it.
495 class tagscache(object):
495 class tagscache(object):
496 def __init__(self):
496 def __init__(self):
497 # These two define the set of tags for this repository. tags
497 # These two define the set of tags for this repository. tags
498 # maps tag name to node; tagtypes maps tag name to 'global' or
498 # maps tag name to node; tagtypes maps tag name to 'global' or
499 # 'local'. (Global tags are defined by .hgtags across all
499 # 'local'. (Global tags are defined by .hgtags across all
500 # heads, and local tags are defined in .hg/localtags.)
500 # heads, and local tags are defined in .hg/localtags.)
501 # They constitute the in-memory cache of tags.
501 # They constitute the in-memory cache of tags.
502 self.tags = self.tagtypes = None
502 self.tags = self.tagtypes = None
503
503
504 self.nodetagscache = self.tagslist = None
504 self.nodetagscache = self.tagslist = None
505
505
506 cache = tagscache()
506 cache = tagscache()
507 cache.tags, cache.tagtypes = self._findtags()
507 cache.tags, cache.tagtypes = self._findtags()
508
508
509 return cache
509 return cache
510
510
511 def tags(self):
511 def tags(self):
512 '''return a mapping of tag to node'''
512 '''return a mapping of tag to node'''
513 t = {}
513 t = {}
514 for k, v in self._tagscache.tags.iteritems():
514 for k, v in self._tagscache.tags.iteritems():
515 try:
515 try:
516 # ignore tags to unknown nodes
516 # ignore tags to unknown nodes
517 self.changelog.rev(v)
517 self.changelog.rev(v)
518 t[k] = v
518 t[k] = v
519 except (error.LookupError, ValueError):
519 except (error.LookupError, ValueError):
520 pass
520 pass
521 return t
521 return t
522
522
523 def _findtags(self):
523 def _findtags(self):
524 '''Do the hard work of finding tags. Return a pair of dicts
524 '''Do the hard work of finding tags. Return a pair of dicts
525 (tags, tagtypes) where tags maps tag name to node, and tagtypes
525 (tags, tagtypes) where tags maps tag name to node, and tagtypes
526 maps tag name to a string like \'global\' or \'local\'.
526 maps tag name to a string like \'global\' or \'local\'.
527 Subclasses or extensions are free to add their own tags, but
527 Subclasses or extensions are free to add their own tags, but
528 should be aware that the returned dicts will be retained for the
528 should be aware that the returned dicts will be retained for the
529 duration of the localrepo object.'''
529 duration of the localrepo object.'''
530
530
531 # XXX what tagtype should subclasses/extensions use? Currently
531 # XXX what tagtype should subclasses/extensions use? Currently
532 # mq and bookmarks add tags, but do not set the tagtype at all.
532 # mq and bookmarks add tags, but do not set the tagtype at all.
533 # Should each extension invent its own tag type? Should there
533 # Should each extension invent its own tag type? Should there
534 # be one tagtype for all such "virtual" tags? Or is the status
534 # be one tagtype for all such "virtual" tags? Or is the status
535 # quo fine?
535 # quo fine?
536
536
537 alltags = {} # map tag name to (node, hist)
537 alltags = {} # map tag name to (node, hist)
538 tagtypes = {}
538 tagtypes = {}
539
539
540 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
540 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
541 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
541 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
542
542
543 # Build the return dicts. Have to re-encode tag names because
543 # Build the return dicts. Have to re-encode tag names because
544 # the tags module always uses UTF-8 (in order not to lose info
544 # the tags module always uses UTF-8 (in order not to lose info
545 # writing to the cache), but the rest of Mercurial wants them in
545 # writing to the cache), but the rest of Mercurial wants them in
546 # local encoding.
546 # local encoding.
547 tags = {}
547 tags = {}
548 for (name, (node, hist)) in alltags.iteritems():
548 for (name, (node, hist)) in alltags.iteritems():
549 if node != nullid:
549 if node != nullid:
550 tags[encoding.tolocal(name)] = node
550 tags[encoding.tolocal(name)] = node
551 tags['tip'] = self.changelog.tip()
551 tags['tip'] = self.changelog.tip()
552 tagtypes = dict([(encoding.tolocal(name), value)
552 tagtypes = dict([(encoding.tolocal(name), value)
553 for (name, value) in tagtypes.iteritems()])
553 for (name, value) in tagtypes.iteritems()])
554 return (tags, tagtypes)
554 return (tags, tagtypes)
555
555
556 def tagtype(self, tagname):
556 def tagtype(self, tagname):
557 '''
557 '''
558 return the type of the given tag. result can be:
558 return the type of the given tag. result can be:
559
559
560 'local' : a local tag
560 'local' : a local tag
561 'global' : a global tag
561 'global' : a global tag
562 None : tag does not exist
562 None : tag does not exist
563 '''
563 '''
564
564
565 return self._tagscache.tagtypes.get(tagname)
565 return self._tagscache.tagtypes.get(tagname)
566
566
567 def tagslist(self):
567 def tagslist(self):
568 '''return a list of tags ordered by revision'''
568 '''return a list of tags ordered by revision'''
569 if not self._tagscache.tagslist:
569 if not self._tagscache.tagslist:
570 l = []
570 l = []
571 for t, n in self.tags().iteritems():
571 for t, n in self.tags().iteritems():
572 r = self.changelog.rev(n)
572 r = self.changelog.rev(n)
573 l.append((r, t, n))
573 l.append((r, t, n))
574 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
574 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
575
575
576 return self._tagscache.tagslist
576 return self._tagscache.tagslist
577
577
578 def nodetags(self, node):
578 def nodetags(self, node):
579 '''return the tags associated with a node'''
579 '''return the tags associated with a node'''
580 if not self._tagscache.nodetagscache:
580 if not self._tagscache.nodetagscache:
581 nodetagscache = {}
581 nodetagscache = {}
582 for t, n in self._tagscache.tags.iteritems():
582 for t, n in self._tagscache.tags.iteritems():
583 nodetagscache.setdefault(n, []).append(t)
583 nodetagscache.setdefault(n, []).append(t)
584 for tags in nodetagscache.itervalues():
584 for tags in nodetagscache.itervalues():
585 tags.sort()
585 tags.sort()
586 self._tagscache.nodetagscache = nodetagscache
586 self._tagscache.nodetagscache = nodetagscache
587 return self._tagscache.nodetagscache.get(node, [])
587 return self._tagscache.nodetagscache.get(node, [])
588
588
589 def nodebookmarks(self, node):
589 def nodebookmarks(self, node):
590 marks = []
590 marks = []
591 for bookmark, n in self._bookmarks.iteritems():
591 for bookmark, n in self._bookmarks.iteritems():
592 if n == node:
592 if n == node:
593 marks.append(bookmark)
593 marks.append(bookmark)
594 return sorted(marks)
594 return sorted(marks)
595
595
596 def _branchtags(self, partial, lrev):
596 def _branchtags(self, partial, lrev):
597 # TODO: rename this function?
597 # TODO: rename this function?
598 tiprev = len(self) - 1
598 tiprev = len(self) - 1
599 if lrev != tiprev:
599 if lrev != tiprev:
600 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
600 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
601 self._updatebranchcache(partial, ctxgen)
601 self._updatebranchcache(partial, ctxgen)
602 self._writebranchcache(partial, self.changelog.tip(), tiprev)
602 self._writebranchcache(partial, self.changelog.tip(), tiprev)
603
603
604 return partial
604 return partial
605
605
606 def updatebranchcache(self):
606 def updatebranchcache(self):
607 tip = self.changelog.tip()
607 tip = self.changelog.tip()
608 if self._branchcache is not None and self._branchcachetip == tip:
608 if self._branchcache is not None and self._branchcachetip == tip:
609 return
609 return
610
610
611 oldtip = self._branchcachetip
611 oldtip = self._branchcachetip
612 self._branchcachetip = tip
612 self._branchcachetip = tip
613 if oldtip is None or oldtip not in self.changelog.nodemap:
613 if oldtip is None or oldtip not in self.changelog.nodemap:
614 partial, last, lrev = self._readbranchcache()
614 partial, last, lrev = self._readbranchcache()
615 else:
615 else:
616 lrev = self.changelog.rev(oldtip)
616 lrev = self.changelog.rev(oldtip)
617 partial = self._branchcache
617 partial = self._branchcache
618
618
619 self._branchtags(partial, lrev)
619 self._branchtags(partial, lrev)
620 # this private cache holds all heads (not just the branch tips)
620 # this private cache holds all heads (not just the branch tips)
621 self._branchcache = partial
621 self._branchcache = partial
622
622
623 def branchmap(self):
623 def branchmap(self):
624 '''returns a dictionary {branch: [branchheads]}'''
624 '''returns a dictionary {branch: [branchheads]}'''
625 self.updatebranchcache()
625 self.updatebranchcache()
626 return self._branchcache
626 return self._branchcache
627
627
628 def _branchtip(self, heads):
628 def _branchtip(self, heads):
629 '''return the tipmost branch head in heads'''
629 '''return the tipmost branch head in heads'''
630 tip = heads[-1]
630 tip = heads[-1]
631 for h in reversed(heads):
631 for h in reversed(heads):
632 if not self[h].closesbranch():
632 if not self[h].closesbranch():
633 tip = h
633 tip = h
634 break
634 break
635 return tip
635 return tip
636
636
637 def branchtip(self, branch):
637 def branchtip(self, branch):
638 '''return the tip node for a given branch'''
638 '''return the tip node for a given branch'''
639 if branch not in self.branchmap():
639 if branch not in self.branchmap():
640 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
640 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
641 return self._branchtip(self.branchmap()[branch])
641 return self._branchtip(self.branchmap()[branch])
642
642
643 def branchtags(self):
643 def branchtags(self):
644 '''return a dict where branch names map to the tipmost head of
644 '''return a dict where branch names map to the tipmost head of
645 the branch, open heads come before closed'''
645 the branch, open heads come before closed'''
646 bt = {}
646 bt = {}
647 for bn, heads in self.branchmap().iteritems():
647 for bn, heads in self.branchmap().iteritems():
648 bt[bn] = self._branchtip(heads)
648 bt[bn] = self._branchtip(heads)
649 return bt
649 return bt
650
650
651 def _readbranchcache(self):
651 def _readbranchcache(self):
652 partial = {}
652 partial = {}
653 try:
653 try:
654 f = self.opener("cache/branchheads")
654 f = self.opener("cache/branchheads")
655 lines = f.read().split('\n')
655 lines = f.read().split('\n')
656 f.close()
656 f.close()
657 except (IOError, OSError):
657 except (IOError, OSError):
658 return {}, nullid, nullrev
658 return {}, nullid, nullrev
659
659
660 try:
660 try:
661 last, lrev = lines.pop(0).split(" ", 1)
661 last, lrev = lines.pop(0).split(" ", 1)
662 last, lrev = bin(last), int(lrev)
662 last, lrev = bin(last), int(lrev)
663 if lrev >= len(self) or self[lrev].node() != last:
663 if lrev >= len(self) or self[lrev].node() != last:
664 # invalidate the cache
664 # invalidate the cache
665 raise ValueError('invalidating branch cache (tip differs)')
665 raise ValueError('invalidating branch cache (tip differs)')
666 for l in lines:
666 for l in lines:
667 if not l:
667 if not l:
668 continue
668 continue
669 node, label = l.split(" ", 1)
669 node, label = l.split(" ", 1)
670 label = encoding.tolocal(label.strip())
670 label = encoding.tolocal(label.strip())
671 if not node in self:
671 if not node in self:
672 raise ValueError('invalidating branch cache because node '+
672 raise ValueError('invalidating branch cache because node '+
673 '%s does not exist' % node)
673 '%s does not exist' % node)
674 partial.setdefault(label, []).append(bin(node))
674 partial.setdefault(label, []).append(bin(node))
675 except KeyboardInterrupt:
675 except KeyboardInterrupt:
676 raise
676 raise
677 except Exception, inst:
677 except Exception, inst:
678 if self.ui.debugflag:
678 if self.ui.debugflag:
679 self.ui.warn(str(inst), '\n')
679 self.ui.warn(str(inst), '\n')
680 partial, last, lrev = {}, nullid, nullrev
680 partial, last, lrev = {}, nullid, nullrev
681 return partial, last, lrev
681 return partial, last, lrev
682
682
683 def _writebranchcache(self, branches, tip, tiprev):
683 def _writebranchcache(self, branches, tip, tiprev):
684 try:
684 try:
685 f = self.opener("cache/branchheads", "w", atomictemp=True)
685 f = self.opener("cache/branchheads", "w", atomictemp=True)
686 f.write("%s %s\n" % (hex(tip), tiprev))
686 f.write("%s %s\n" % (hex(tip), tiprev))
687 for label, nodes in branches.iteritems():
687 for label, nodes in branches.iteritems():
688 for node in nodes:
688 for node in nodes:
689 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
689 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
690 f.close()
690 f.close()
691 except (IOError, OSError):
691 except (IOError, OSError):
692 pass
692 pass
693
693
694 def _updatebranchcache(self, partial, ctxgen):
694 def _updatebranchcache(self, partial, ctxgen):
695 """Given a branchhead cache, partial, that may have extra nodes or be
695 """Given a branchhead cache, partial, that may have extra nodes or be
696 missing heads, and a generator of nodes that are at least a superset of
696 missing heads, and a generator of nodes that are at least a superset of
697 heads missing, this function updates partial to be correct.
697 heads missing, this function updates partial to be correct.
698 """
698 """
699 # collect new branch entries
699 # collect new branch entries
700 newbranches = {}
700 newbranches = {}
701 for c in ctxgen:
701 for c in ctxgen:
702 newbranches.setdefault(c.branch(), []).append(c.node())
702 newbranches.setdefault(c.branch(), []).append(c.node())
703 # if older branchheads are reachable from new ones, they aren't
703 # if older branchheads are reachable from new ones, they aren't
704 # really branchheads. Note checking parents is insufficient:
704 # really branchheads. Note checking parents is insufficient:
705 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
705 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
706 for branch, newnodes in newbranches.iteritems():
706 for branch, newnodes in newbranches.iteritems():
707 bheads = partial.setdefault(branch, [])
707 bheads = partial.setdefault(branch, [])
708 # Remove candidate heads that no longer are in the repo (e.g., as
708 # Remove candidate heads that no longer are in the repo (e.g., as
709 # the result of a strip that just happened). Avoid using 'node in
709 # the result of a strip that just happened). Avoid using 'node in
710 # self' here because that dives down into branchcache code somewhat
710 # self' here because that dives down into branchcache code somewhat
711 # recrusively.
711 # recrusively.
712 bheadrevs = [self.changelog.rev(node) for node in bheads
712 bheadrevs = [self.changelog.rev(node) for node in bheads
713 if self.changelog.hasnode(node)]
713 if self.changelog.hasnode(node)]
714 newheadrevs = [self.changelog.rev(node) for node in newnodes
714 newheadrevs = [self.changelog.rev(node) for node in newnodes
715 if self.changelog.hasnode(node)]
715 if self.changelog.hasnode(node)]
716 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
716 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
717 # Remove duplicates - nodes that are in newheadrevs and are already
717 # Remove duplicates - nodes that are in newheadrevs and are already
718 # in bheadrevs. This can happen if you strip a node whose parent
718 # in bheadrevs. This can happen if you strip a node whose parent
719 # was already a head (because they're on different branches).
719 # was already a head (because they're on different branches).
720 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
720 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
721
721
722 # Starting from tip means fewer passes over reachable. If we know
722 # Starting from tip means fewer passes over reachable. If we know
723 # the new candidates are not ancestors of existing heads, we don't
723 # the new candidates are not ancestors of existing heads, we don't
724 # have to examine ancestors of existing heads
724 # have to examine ancestors of existing heads
725 if ctxisnew:
725 if ctxisnew:
726 iterrevs = sorted(newheadrevs)
726 iterrevs = sorted(newheadrevs)
727 else:
727 else:
728 iterrevs = list(bheadrevs)
728 iterrevs = list(bheadrevs)
729
729
730 # This loop prunes out two kinds of heads - heads that are
730 # This loop prunes out two kinds of heads - heads that are
731 # superceded by a head in newheadrevs, and newheadrevs that are not
731 # superceded by a head in newheadrevs, and newheadrevs that are not
732 # heads because an existing head is their descendant.
732 # heads because an existing head is their descendant.
733 while iterrevs:
733 while iterrevs:
734 latest = iterrevs.pop()
734 latest = iterrevs.pop()
735 if latest not in bheadrevs:
735 if latest not in bheadrevs:
736 continue
736 continue
737 ancestors = set(self.changelog.ancestors([latest],
737 ancestors = set(self.changelog.ancestors([latest],
738 bheadrevs[0]))
738 bheadrevs[0]))
739 if ancestors:
739 if ancestors:
740 bheadrevs = [b for b in bheadrevs if b not in ancestors]
740 bheadrevs = [b for b in bheadrevs if b not in ancestors]
741 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
741 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
742
742
743 # There may be branches that cease to exist when the last commit in the
743 # There may be branches that cease to exist when the last commit in the
744 # branch was stripped. This code filters them out. Note that the
744 # branch was stripped. This code filters them out. Note that the
745 # branch that ceased to exist may not be in newbranches because
745 # branch that ceased to exist may not be in newbranches because
746 # newbranches is the set of candidate heads, which when you strip the
746 # newbranches is the set of candidate heads, which when you strip the
747 # last commit in a branch will be the parent branch.
747 # last commit in a branch will be the parent branch.
748 for branch in partial.keys():
748 for branch in partial.keys():
749 nodes = [head for head in partial[branch]
749 nodes = [head for head in partial[branch]
750 if self.changelog.hasnode(head)]
750 if self.changelog.hasnode(head)]
751 if not nodes:
751 if not nodes:
752 del partial[branch]
752 del partial[branch]
753
753
754 def lookup(self, key):
754 def lookup(self, key):
755 return self[key].node()
755 return self[key].node()
756
756
757 def lookupbranch(self, key, remote=None):
757 def lookupbranch(self, key, remote=None):
758 repo = remote or self
758 repo = remote or self
759 if key in repo.branchmap():
759 if key in repo.branchmap():
760 return key
760 return key
761
761
762 repo = (remote and remote.local()) and remote or self
762 repo = (remote and remote.local()) and remote or self
763 return repo[key].branch()
763 return repo[key].branch()
764
764
765 def known(self, nodes):
765 def known(self, nodes):
766 nm = self.changelog.nodemap
766 nm = self.changelog.nodemap
767 pc = self._phasecache
767 pc = self._phasecache
768 result = []
768 result = []
769 for n in nodes:
769 for n in nodes:
770 r = nm.get(n)
770 r = nm.get(n)
771 resp = not (r is None or pc.phase(self, r) >= phases.secret)
771 resp = not (r is None or pc.phase(self, r) >= phases.secret)
772 result.append(resp)
772 result.append(resp)
773 return result
773 return result
774
774
775 def local(self):
775 def local(self):
776 return self
776 return self
777
777
778 def cancopy(self):
778 def cancopy(self):
779 return self.local() # so statichttprepo's override of local() works
779 return self.local() # so statichttprepo's override of local() works
780
780
781 def join(self, f):
781 def join(self, f):
782 return os.path.join(self.path, f)
782 return os.path.join(self.path, f)
783
783
784 def wjoin(self, f):
784 def wjoin(self, f):
785 return os.path.join(self.root, f)
785 return os.path.join(self.root, f)
786
786
787 def file(self, f):
787 def file(self, f):
788 if f[0] == '/':
788 if f[0] == '/':
789 f = f[1:]
789 f = f[1:]
790 return filelog.filelog(self.sopener, f)
790 return filelog.filelog(self.sopener, f)
791
791
792 def changectx(self, changeid):
792 def changectx(self, changeid):
793 return self[changeid]
793 return self[changeid]
794
794
795 def parents(self, changeid=None):
795 def parents(self, changeid=None):
796 '''get list of changectxs for parents of changeid'''
796 '''get list of changectxs for parents of changeid'''
797 return self[changeid].parents()
797 return self[changeid].parents()
798
798
799 def setparents(self, p1, p2=nullid):
799 def setparents(self, p1, p2=nullid):
800 copies = self.dirstate.setparents(p1, p2)
800 copies = self.dirstate.setparents(p1, p2)
801 if copies:
801 if copies:
802 # Adjust copy records, the dirstate cannot do it, it
802 # Adjust copy records, the dirstate cannot do it, it
803 # requires access to parents manifests. Preserve them
803 # requires access to parents manifests. Preserve them
804 # only for entries added to first parent.
804 # only for entries added to first parent.
805 pctx = self[p1]
805 pctx = self[p1]
806 for f in copies:
806 for f in copies:
807 if f not in pctx and copies[f] in pctx:
807 if f not in pctx and copies[f] in pctx:
808 self.dirstate.copy(copies[f], f)
808 self.dirstate.copy(copies[f], f)
809
809
810 def filectx(self, path, changeid=None, fileid=None):
810 def filectx(self, path, changeid=None, fileid=None):
811 """changeid can be a changeset revision, node, or tag.
811 """changeid can be a changeset revision, node, or tag.
812 fileid can be a file revision or node."""
812 fileid can be a file revision or node."""
813 return context.filectx(self, path, changeid, fileid)
813 return context.filectx(self, path, changeid, fileid)
814
814
815 def getcwd(self):
815 def getcwd(self):
816 return self.dirstate.getcwd()
816 return self.dirstate.getcwd()
817
817
818 def pathto(self, f, cwd=None):
818 def pathto(self, f, cwd=None):
819 return self.dirstate.pathto(f, cwd)
819 return self.dirstate.pathto(f, cwd)
820
820
821 def wfile(self, f, mode='r'):
821 def wfile(self, f, mode='r'):
822 return self.wopener(f, mode)
822 return self.wopener(f, mode)
823
823
824 def _link(self, f):
824 def _link(self, f):
825 return os.path.islink(self.wjoin(f))
825 return os.path.islink(self.wjoin(f))
826
826
827 def _loadfilter(self, filter):
827 def _loadfilter(self, filter):
828 if filter not in self.filterpats:
828 if filter not in self.filterpats:
829 l = []
829 l = []
830 for pat, cmd in self.ui.configitems(filter):
830 for pat, cmd in self.ui.configitems(filter):
831 if cmd == '!':
831 if cmd == '!':
832 continue
832 continue
833 mf = matchmod.match(self.root, '', [pat])
833 mf = matchmod.match(self.root, '', [pat])
834 fn = None
834 fn = None
835 params = cmd
835 params = cmd
836 for name, filterfn in self._datafilters.iteritems():
836 for name, filterfn in self._datafilters.iteritems():
837 if cmd.startswith(name):
837 if cmd.startswith(name):
838 fn = filterfn
838 fn = filterfn
839 params = cmd[len(name):].lstrip()
839 params = cmd[len(name):].lstrip()
840 break
840 break
841 if not fn:
841 if not fn:
842 fn = lambda s, c, **kwargs: util.filter(s, c)
842 fn = lambda s, c, **kwargs: util.filter(s, c)
843 # Wrap old filters not supporting keyword arguments
843 # Wrap old filters not supporting keyword arguments
844 if not inspect.getargspec(fn)[2]:
844 if not inspect.getargspec(fn)[2]:
845 oldfn = fn
845 oldfn = fn
846 fn = lambda s, c, **kwargs: oldfn(s, c)
846 fn = lambda s, c, **kwargs: oldfn(s, c)
847 l.append((mf, fn, params))
847 l.append((mf, fn, params))
848 self.filterpats[filter] = l
848 self.filterpats[filter] = l
849 return self.filterpats[filter]
849 return self.filterpats[filter]
850
850
851 def _filter(self, filterpats, filename, data):
851 def _filter(self, filterpats, filename, data):
852 for mf, fn, cmd in filterpats:
852 for mf, fn, cmd in filterpats:
853 if mf(filename):
853 if mf(filename):
854 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
854 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
855 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
855 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
856 break
856 break
857
857
858 return data
858 return data
859
859
860 @propertycache
860 @propertycache
861 def _encodefilterpats(self):
861 def _encodefilterpats(self):
862 return self._loadfilter('encode')
862 return self._loadfilter('encode')
863
863
864 @propertycache
864 @propertycache
865 def _decodefilterpats(self):
865 def _decodefilterpats(self):
866 return self._loadfilter('decode')
866 return self._loadfilter('decode')
867
867
868 def adddatafilter(self, name, filter):
868 def adddatafilter(self, name, filter):
869 self._datafilters[name] = filter
869 self._datafilters[name] = filter
870
870
871 def wread(self, filename):
871 def wread(self, filename):
872 if self._link(filename):
872 if self._link(filename):
873 data = os.readlink(self.wjoin(filename))
873 data = os.readlink(self.wjoin(filename))
874 else:
874 else:
875 data = self.wopener.read(filename)
875 data = self.wopener.read(filename)
876 return self._filter(self._encodefilterpats, filename, data)
876 return self._filter(self._encodefilterpats, filename, data)
877
877
878 def wwrite(self, filename, data, flags):
878 def wwrite(self, filename, data, flags):
879 data = self._filter(self._decodefilterpats, filename, data)
879 data = self._filter(self._decodefilterpats, filename, data)
880 if 'l' in flags:
880 if 'l' in flags:
881 self.wopener.symlink(data, filename)
881 self.wopener.symlink(data, filename)
882 else:
882 else:
883 self.wopener.write(filename, data)
883 self.wopener.write(filename, data)
884 if 'x' in flags:
884 if 'x' in flags:
885 util.setflags(self.wjoin(filename), False, True)
885 util.setflags(self.wjoin(filename), False, True)
886
886
887 def wwritedata(self, filename, data):
887 def wwritedata(self, filename, data):
888 return self._filter(self._decodefilterpats, filename, data)
888 return self._filter(self._decodefilterpats, filename, data)
889
889
890 def transaction(self, desc):
890 def transaction(self, desc):
891 tr = self._transref and self._transref() or None
891 tr = self._transref and self._transref() or None
892 if tr and tr.running():
892 if tr and tr.running():
893 return tr.nest()
893 return tr.nest()
894
894
895 # abort here if the journal already exists
895 # abort here if the journal already exists
896 if os.path.exists(self.sjoin("journal")):
896 if os.path.exists(self.sjoin("journal")):
897 raise error.RepoError(
897 raise error.RepoError(
898 _("abandoned transaction found - run hg recover"))
898 _("abandoned transaction found - run hg recover"))
899
899
900 self._writejournal(desc)
900 self._writejournal(desc)
901 renames = [(x, undoname(x)) for x in self._journalfiles()]
901 renames = [(x, undoname(x)) for x in self._journalfiles()]
902
902
903 tr = transaction.transaction(self.ui.warn, self.sopener,
903 tr = transaction.transaction(self.ui.warn, self.sopener,
904 self.sjoin("journal"),
904 self.sjoin("journal"),
905 aftertrans(renames),
905 aftertrans(renames),
906 self.store.createmode)
906 self.store.createmode)
907 self._transref = weakref.ref(tr)
907 self._transref = weakref.ref(tr)
908 return tr
908 return tr
909
909
910 def _journalfiles(self):
910 def _journalfiles(self):
911 return (self.sjoin('journal'), self.join('journal.dirstate'),
911 return (self.sjoin('journal'), self.join('journal.dirstate'),
912 self.join('journal.branch'), self.join('journal.desc'),
912 self.join('journal.branch'), self.join('journal.desc'),
913 self.join('journal.bookmarks'),
913 self.join('journal.bookmarks'),
914 self.sjoin('journal.phaseroots'))
914 self.sjoin('journal.phaseroots'))
915
915
916 def undofiles(self):
916 def undofiles(self):
917 return [undoname(x) for x in self._journalfiles()]
917 return [undoname(x) for x in self._journalfiles()]
918
918
919 def _writejournal(self, desc):
919 def _writejournal(self, desc):
920 self.opener.write("journal.dirstate",
920 self.opener.write("journal.dirstate",
921 self.opener.tryread("dirstate"))
921 self.opener.tryread("dirstate"))
922 self.opener.write("journal.branch",
922 self.opener.write("journal.branch",
923 encoding.fromlocal(self.dirstate.branch()))
923 encoding.fromlocal(self.dirstate.branch()))
924 self.opener.write("journal.desc",
924 self.opener.write("journal.desc",
925 "%d\n%s\n" % (len(self), desc))
925 "%d\n%s\n" % (len(self), desc))
926 self.opener.write("journal.bookmarks",
926 self.opener.write("journal.bookmarks",
927 self.opener.tryread("bookmarks"))
927 self.opener.tryread("bookmarks"))
928 self.sopener.write("journal.phaseroots",
928 self.sopener.write("journal.phaseroots",
929 self.sopener.tryread("phaseroots"))
929 self.sopener.tryread("phaseroots"))
930
930
931 def recover(self):
931 def recover(self):
932 lock = self.lock()
932 lock = self.lock()
933 try:
933 try:
934 if os.path.exists(self.sjoin("journal")):
934 if os.path.exists(self.sjoin("journal")):
935 self.ui.status(_("rolling back interrupted transaction\n"))
935 self.ui.status(_("rolling back interrupted transaction\n"))
936 transaction.rollback(self.sopener, self.sjoin("journal"),
936 transaction.rollback(self.sopener, self.sjoin("journal"),
937 self.ui.warn)
937 self.ui.warn)
938 self.invalidate()
938 self.invalidate()
939 return True
939 return True
940 else:
940 else:
941 self.ui.warn(_("no interrupted transaction available\n"))
941 self.ui.warn(_("no interrupted transaction available\n"))
942 return False
942 return False
943 finally:
943 finally:
944 lock.release()
944 lock.release()
945
945
946 def rollback(self, dryrun=False, force=False):
946 def rollback(self, dryrun=False, force=False):
947 wlock = lock = None
947 wlock = lock = None
948 try:
948 try:
949 wlock = self.wlock()
949 wlock = self.wlock()
950 lock = self.lock()
950 lock = self.lock()
951 if os.path.exists(self.sjoin("undo")):
951 if os.path.exists(self.sjoin("undo")):
952 return self._rollback(dryrun, force)
952 return self._rollback(dryrun, force)
953 else:
953 else:
954 self.ui.warn(_("no rollback information available\n"))
954 self.ui.warn(_("no rollback information available\n"))
955 return 1
955 return 1
956 finally:
956 finally:
957 release(lock, wlock)
957 release(lock, wlock)
958
958
959 def _rollback(self, dryrun, force):
959 def _rollback(self, dryrun, force):
960 ui = self.ui
960 ui = self.ui
961 try:
961 try:
962 args = self.opener.read('undo.desc').splitlines()
962 args = self.opener.read('undo.desc').splitlines()
963 (oldlen, desc, detail) = (int(args[0]), args[1], None)
963 (oldlen, desc, detail) = (int(args[0]), args[1], None)
964 if len(args) >= 3:
964 if len(args) >= 3:
965 detail = args[2]
965 detail = args[2]
966 oldtip = oldlen - 1
966 oldtip = oldlen - 1
967
967
968 if detail and ui.verbose:
968 if detail and ui.verbose:
969 msg = (_('repository tip rolled back to revision %s'
969 msg = (_('repository tip rolled back to revision %s'
970 ' (undo %s: %s)\n')
970 ' (undo %s: %s)\n')
971 % (oldtip, desc, detail))
971 % (oldtip, desc, detail))
972 else:
972 else:
973 msg = (_('repository tip rolled back to revision %s'
973 msg = (_('repository tip rolled back to revision %s'
974 ' (undo %s)\n')
974 ' (undo %s)\n')
975 % (oldtip, desc))
975 % (oldtip, desc))
976 except IOError:
976 except IOError:
977 msg = _('rolling back unknown transaction\n')
977 msg = _('rolling back unknown transaction\n')
978 desc = None
978 desc = None
979
979
980 if not force and self['.'] != self['tip'] and desc == 'commit':
980 if not force and self['.'] != self['tip'] and desc == 'commit':
981 raise util.Abort(
981 raise util.Abort(
982 _('rollback of last commit while not checked out '
982 _('rollback of last commit while not checked out '
983 'may lose data'), hint=_('use -f to force'))
983 'may lose data'), hint=_('use -f to force'))
984
984
985 ui.status(msg)
985 ui.status(msg)
986 if dryrun:
986 if dryrun:
987 return 0
987 return 0
988
988
989 parents = self.dirstate.parents()
989 parents = self.dirstate.parents()
990 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
990 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
991 if os.path.exists(self.join('undo.bookmarks')):
991 if os.path.exists(self.join('undo.bookmarks')):
992 util.rename(self.join('undo.bookmarks'),
992 util.rename(self.join('undo.bookmarks'),
993 self.join('bookmarks'))
993 self.join('bookmarks'))
994 if os.path.exists(self.sjoin('undo.phaseroots')):
994 if os.path.exists(self.sjoin('undo.phaseroots')):
995 util.rename(self.sjoin('undo.phaseroots'),
995 util.rename(self.sjoin('undo.phaseroots'),
996 self.sjoin('phaseroots'))
996 self.sjoin('phaseroots'))
997 self.invalidate()
997 self.invalidate()
998
998
999 parentgone = (parents[0] not in self.changelog.nodemap or
999 parentgone = (parents[0] not in self.changelog.nodemap or
1000 parents[1] not in self.changelog.nodemap)
1000 parents[1] not in self.changelog.nodemap)
1001 if parentgone:
1001 if parentgone:
1002 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1002 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1003 try:
1003 try:
1004 branch = self.opener.read('undo.branch')
1004 branch = self.opener.read('undo.branch')
1005 self.dirstate.setbranch(branch)
1005 self.dirstate.setbranch(branch)
1006 except IOError:
1006 except IOError:
1007 ui.warn(_('named branch could not be reset: '
1007 ui.warn(_('named branch could not be reset: '
1008 'current branch is still \'%s\'\n')
1008 'current branch is still \'%s\'\n')
1009 % self.dirstate.branch())
1009 % self.dirstate.branch())
1010
1010
1011 self.dirstate.invalidate()
1011 self.dirstate.invalidate()
1012 parents = tuple([p.rev() for p in self.parents()])
1012 parents = tuple([p.rev() for p in self.parents()])
1013 if len(parents) > 1:
1013 if len(parents) > 1:
1014 ui.status(_('working directory now based on '
1014 ui.status(_('working directory now based on '
1015 'revisions %d and %d\n') % parents)
1015 'revisions %d and %d\n') % parents)
1016 else:
1016 else:
1017 ui.status(_('working directory now based on '
1017 ui.status(_('working directory now based on '
1018 'revision %d\n') % parents)
1018 'revision %d\n') % parents)
1019 # TODO: if we know which new heads may result from this rollback, pass
1019 # TODO: if we know which new heads may result from this rollback, pass
1020 # them to destroy(), which will prevent the branchhead cache from being
1020 # them to destroy(), which will prevent the branchhead cache from being
1021 # invalidated.
1021 # invalidated.
1022 self.destroyed()
1022 self.destroyed()
1023 return 0
1023 return 0
1024
1024
1025 def invalidatecaches(self):
1025 def invalidatecaches(self):
1026 def delcache(name):
1026 def delcache(name):
1027 try:
1027 try:
1028 delattr(self, name)
1028 delattr(self, name)
1029 except AttributeError:
1029 except AttributeError:
1030 pass
1030 pass
1031
1031
1032 delcache('_tagscache')
1032 delcache('_tagscache')
1033
1033
1034 self._branchcache = None # in UTF-8
1034 self._branchcache = None # in UTF-8
1035 self._branchcachetip = None
1035 self._branchcachetip = None
1036
1036
1037 def invalidatedirstate(self):
1037 def invalidatedirstate(self):
1038 '''Invalidates the dirstate, causing the next call to dirstate
1038 '''Invalidates the dirstate, causing the next call to dirstate
1039 to check if it was modified since the last time it was read,
1039 to check if it was modified since the last time it was read,
1040 rereading it if it has.
1040 rereading it if it has.
1041
1041
1042 This is different to dirstate.invalidate() that it doesn't always
1042 This is different to dirstate.invalidate() that it doesn't always
1043 rereads the dirstate. Use dirstate.invalidate() if you want to
1043 rereads the dirstate. Use dirstate.invalidate() if you want to
1044 explicitly read the dirstate again (i.e. restoring it to a previous
1044 explicitly read the dirstate again (i.e. restoring it to a previous
1045 known good state).'''
1045 known good state).'''
1046 if 'dirstate' in self.__dict__:
1046 if 'dirstate' in self.__dict__:
1047 for k in self.dirstate._filecache:
1047 for k in self.dirstate._filecache:
1048 try:
1048 try:
1049 delattr(self.dirstate, k)
1049 delattr(self.dirstate, k)
1050 except AttributeError:
1050 except AttributeError:
1051 pass
1051 pass
1052 delattr(self, 'dirstate')
1052 delattr(self, 'dirstate')
1053
1053
1054 def invalidate(self):
1054 def invalidate(self):
1055 for k in self._filecache:
1055 for k in self._filecache:
1056 # dirstate is invalidated separately in invalidatedirstate()
1056 # dirstate is invalidated separately in invalidatedirstate()
1057 if k == 'dirstate':
1057 if k == 'dirstate':
1058 continue
1058 continue
1059
1059
1060 try:
1060 try:
1061 delattr(self, k)
1061 delattr(self, k)
1062 except AttributeError:
1062 except AttributeError:
1063 pass
1063 pass
1064 self.invalidatecaches()
1064 self.invalidatecaches()
1065
1065
1066 # Discard all cache entries to force reloading everything.
1066 # Discard all cache entries to force reloading everything.
1067 self._filecache.clear()
1067 self._filecache.clear()
1068
1068
1069 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1069 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1070 try:
1070 try:
1071 l = lock.lock(lockname, 0, releasefn, desc=desc)
1071 l = lock.lock(lockname, 0, releasefn, desc=desc)
1072 except error.LockHeld, inst:
1072 except error.LockHeld, inst:
1073 if not wait:
1073 if not wait:
1074 raise
1074 raise
1075 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1075 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1076 (desc, inst.locker))
1076 (desc, inst.locker))
1077 # default to 600 seconds timeout
1077 # default to 600 seconds timeout
1078 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1078 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1079 releasefn, desc=desc)
1079 releasefn, desc=desc)
1080 if acquirefn:
1080 if acquirefn:
1081 acquirefn()
1081 acquirefn()
1082 return l
1082 return l
1083
1083
1084 def _afterlock(self, callback):
1084 def _afterlock(self, callback):
1085 """add a callback to the current repository lock.
1085 """add a callback to the current repository lock.
1086
1086
1087 The callback will be executed on lock release."""
1087 The callback will be executed on lock release."""
1088 l = self._lockref and self._lockref()
1088 l = self._lockref and self._lockref()
1089 if l:
1089 if l:
1090 l.postrelease.append(callback)
1090 l.postrelease.append(callback)
1091 else:
1091 else:
1092 callback()
1092 callback()
1093
1093
1094 def lock(self, wait=True):
1094 def lock(self, wait=True):
1095 '''Lock the repository store (.hg/store) and return a weak reference
1095 '''Lock the repository store (.hg/store) and return a weak reference
1096 to the lock. Use this before modifying the store (e.g. committing or
1096 to the lock. Use this before modifying the store (e.g. committing or
1097 stripping). If you are opening a transaction, get a lock as well.)'''
1097 stripping). If you are opening a transaction, get a lock as well.)'''
1098 l = self._lockref and self._lockref()
1098 l = self._lockref and self._lockref()
1099 if l is not None and l.held:
1099 if l is not None and l.held:
1100 l.lock()
1100 l.lock()
1101 return l
1101 return l
1102
1102
1103 def unlock():
1103 def unlock():
1104 self.store.write()
1104 self.store.write()
1105 if '_phasecache' in vars(self):
1105 if '_phasecache' in vars(self):
1106 self._phasecache.write()
1106 self._phasecache.write()
1107 for k, ce in self._filecache.items():
1107 for k, ce in self._filecache.items():
1108 if k == 'dirstate':
1108 if k == 'dirstate':
1109 continue
1109 continue
1110 ce.refresh()
1110 ce.refresh()
1111
1111
1112 l = self._lock(self.sjoin("lock"), wait, unlock,
1112 l = self._lock(self.sjoin("lock"), wait, unlock,
1113 self.invalidate, _('repository %s') % self.origroot)
1113 self.invalidate, _('repository %s') % self.origroot)
1114 self._lockref = weakref.ref(l)
1114 self._lockref = weakref.ref(l)
1115 return l
1115 return l
1116
1116
1117 def wlock(self, wait=True):
1117 def wlock(self, wait=True):
1118 '''Lock the non-store parts of the repository (everything under
1118 '''Lock the non-store parts of the repository (everything under
1119 .hg except .hg/store) and return a weak reference to the lock.
1119 .hg except .hg/store) and return a weak reference to the lock.
1120 Use this before modifying files in .hg.'''
1120 Use this before modifying files in .hg.'''
1121 l = self._wlockref and self._wlockref()
1121 l = self._wlockref and self._wlockref()
1122 if l is not None and l.held:
1122 if l is not None and l.held:
1123 l.lock()
1123 l.lock()
1124 return l
1124 return l
1125
1125
1126 def unlock():
1126 def unlock():
1127 self.dirstate.write()
1127 self.dirstate.write()
1128 ce = self._filecache.get('dirstate')
1128 ce = self._filecache.get('dirstate')
1129 if ce:
1129 if ce:
1130 ce.refresh()
1130 ce.refresh()
1131
1131
1132 l = self._lock(self.join("wlock"), wait, unlock,
1132 l = self._lock(self.join("wlock"), wait, unlock,
1133 self.invalidatedirstate, _('working directory of %s') %
1133 self.invalidatedirstate, _('working directory of %s') %
1134 self.origroot)
1134 self.origroot)
1135 self._wlockref = weakref.ref(l)
1135 self._wlockref = weakref.ref(l)
1136 return l
1136 return l
1137
1137
1138 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1138 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1139 """
1139 """
1140 commit an individual file as part of a larger transaction
1140 commit an individual file as part of a larger transaction
1141 """
1141 """
1142
1142
1143 fname = fctx.path()
1143 fname = fctx.path()
1144 text = fctx.data()
1144 text = fctx.data()
1145 flog = self.file(fname)
1145 flog = self.file(fname)
1146 fparent1 = manifest1.get(fname, nullid)
1146 fparent1 = manifest1.get(fname, nullid)
1147 fparent2 = fparent2o = manifest2.get(fname, nullid)
1147 fparent2 = fparent2o = manifest2.get(fname, nullid)
1148
1148
1149 meta = {}
1149 meta = {}
1150 copy = fctx.renamed()
1150 copy = fctx.renamed()
1151 if copy and copy[0] != fname:
1151 if copy and copy[0] != fname:
1152 # Mark the new revision of this file as a copy of another
1152 # Mark the new revision of this file as a copy of another
1153 # file. This copy data will effectively act as a parent
1153 # file. This copy data will effectively act as a parent
1154 # of this new revision. If this is a merge, the first
1154 # of this new revision. If this is a merge, the first
1155 # parent will be the nullid (meaning "look up the copy data")
1155 # parent will be the nullid (meaning "look up the copy data")
1156 # and the second one will be the other parent. For example:
1156 # and the second one will be the other parent. For example:
1157 #
1157 #
1158 # 0 --- 1 --- 3 rev1 changes file foo
1158 # 0 --- 1 --- 3 rev1 changes file foo
1159 # \ / rev2 renames foo to bar and changes it
1159 # \ / rev2 renames foo to bar and changes it
1160 # \- 2 -/ rev3 should have bar with all changes and
1160 # \- 2 -/ rev3 should have bar with all changes and
1161 # should record that bar descends from
1161 # should record that bar descends from
1162 # bar in rev2 and foo in rev1
1162 # bar in rev2 and foo in rev1
1163 #
1163 #
1164 # this allows this merge to succeed:
1164 # this allows this merge to succeed:
1165 #
1165 #
1166 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1166 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1167 # \ / merging rev3 and rev4 should use bar@rev2
1167 # \ / merging rev3 and rev4 should use bar@rev2
1168 # \- 2 --- 4 as the merge base
1168 # \- 2 --- 4 as the merge base
1169 #
1169 #
1170
1170
1171 cfname = copy[0]
1171 cfname = copy[0]
1172 crev = manifest1.get(cfname)
1172 crev = manifest1.get(cfname)
1173 newfparent = fparent2
1173 newfparent = fparent2
1174
1174
1175 if manifest2: # branch merge
1175 if manifest2: # branch merge
1176 if fparent2 == nullid or crev is None: # copied on remote side
1176 if fparent2 == nullid or crev is None: # copied on remote side
1177 if cfname in manifest2:
1177 if cfname in manifest2:
1178 crev = manifest2[cfname]
1178 crev = manifest2[cfname]
1179 newfparent = fparent1
1179 newfparent = fparent1
1180
1180
1181 # find source in nearest ancestor if we've lost track
1181 # find source in nearest ancestor if we've lost track
1182 if not crev:
1182 if not crev:
1183 self.ui.debug(" %s: searching for copy revision for %s\n" %
1183 self.ui.debug(" %s: searching for copy revision for %s\n" %
1184 (fname, cfname))
1184 (fname, cfname))
1185 for ancestor in self[None].ancestors():
1185 for ancestor in self[None].ancestors():
1186 if cfname in ancestor:
1186 if cfname in ancestor:
1187 crev = ancestor[cfname].filenode()
1187 crev = ancestor[cfname].filenode()
1188 break
1188 break
1189
1189
1190 if crev:
1190 if crev:
1191 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1191 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1192 meta["copy"] = cfname
1192 meta["copy"] = cfname
1193 meta["copyrev"] = hex(crev)
1193 meta["copyrev"] = hex(crev)
1194 fparent1, fparent2 = nullid, newfparent
1194 fparent1, fparent2 = nullid, newfparent
1195 else:
1195 else:
1196 self.ui.warn(_("warning: can't find ancestor for '%s' "
1196 self.ui.warn(_("warning: can't find ancestor for '%s' "
1197 "copied from '%s'!\n") % (fname, cfname))
1197 "copied from '%s'!\n") % (fname, cfname))
1198
1198
1199 elif fparent2 != nullid:
1199 elif fparent2 != nullid:
1200 # is one parent an ancestor of the other?
1200 # is one parent an ancestor of the other?
1201 fparentancestor = flog.ancestor(fparent1, fparent2)
1201 fparentancestor = flog.ancestor(fparent1, fparent2)
1202 if fparentancestor == fparent1:
1202 if fparentancestor == fparent1:
1203 fparent1, fparent2 = fparent2, nullid
1203 fparent1, fparent2 = fparent2, nullid
1204 elif fparentancestor == fparent2:
1204 elif fparentancestor == fparent2:
1205 fparent2 = nullid
1205 fparent2 = nullid
1206
1206
1207 # is the file changed?
1207 # is the file changed?
1208 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1208 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1209 changelist.append(fname)
1209 changelist.append(fname)
1210 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1210 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1211
1211
1212 # are just the flags changed during merge?
1212 # are just the flags changed during merge?
1213 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1213 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1214 changelist.append(fname)
1214 changelist.append(fname)
1215
1215
1216 return fparent1
1216 return fparent1
1217
1217
1218 def commit(self, text="", user=None, date=None, match=None, force=False,
1218 def commit(self, text="", user=None, date=None, match=None, force=False,
1219 editor=False, extra={}):
1219 editor=False, extra={}):
1220 """Add a new revision to current repository.
1220 """Add a new revision to current repository.
1221
1221
1222 Revision information is gathered from the working directory,
1222 Revision information is gathered from the working directory,
1223 match can be used to filter the committed files. If editor is
1223 match can be used to filter the committed files. If editor is
1224 supplied, it is called to get a commit message.
1224 supplied, it is called to get a commit message.
1225 """
1225 """
1226
1226
1227 def fail(f, msg):
1227 def fail(f, msg):
1228 raise util.Abort('%s: %s' % (f, msg))
1228 raise util.Abort('%s: %s' % (f, msg))
1229
1229
1230 if not match:
1230 if not match:
1231 match = matchmod.always(self.root, '')
1231 match = matchmod.always(self.root, '')
1232
1232
1233 if not force:
1233 if not force:
1234 vdirs = []
1234 vdirs = []
1235 match.dir = vdirs.append
1235 match.dir = vdirs.append
1236 match.bad = fail
1236 match.bad = fail
1237
1237
1238 wlock = self.wlock()
1238 wlock = self.wlock()
1239 try:
1239 try:
1240 wctx = self[None]
1240 wctx = self[None]
1241 merge = len(wctx.parents()) > 1
1241 merge = len(wctx.parents()) > 1
1242
1242
1243 if (not force and merge and match and
1243 if (not force and merge and match and
1244 (match.files() or match.anypats())):
1244 (match.files() or match.anypats())):
1245 raise util.Abort(_('cannot partially commit a merge '
1245 raise util.Abort(_('cannot partially commit a merge '
1246 '(do not specify files or patterns)'))
1246 '(do not specify files or patterns)'))
1247
1247
1248 changes = self.status(match=match, clean=force)
1248 changes = self.status(match=match, clean=force)
1249 if force:
1249 if force:
1250 changes[0].extend(changes[6]) # mq may commit unchanged files
1250 changes[0].extend(changes[6]) # mq may commit unchanged files
1251
1251
1252 # check subrepos
1252 # check subrepos
1253 subs = []
1253 subs = []
1254 commitsubs = set()
1254 commitsubs = set()
1255 newstate = wctx.substate.copy()
1255 newstate = wctx.substate.copy()
1256 # only manage subrepos and .hgsubstate if .hgsub is present
1256 # only manage subrepos and .hgsubstate if .hgsub is present
1257 if '.hgsub' in wctx:
1257 if '.hgsub' in wctx:
1258 # we'll decide whether to track this ourselves, thanks
1258 # we'll decide whether to track this ourselves, thanks
1259 if '.hgsubstate' in changes[0]:
1259 if '.hgsubstate' in changes[0]:
1260 changes[0].remove('.hgsubstate')
1260 changes[0].remove('.hgsubstate')
1261 if '.hgsubstate' in changes[2]:
1261 if '.hgsubstate' in changes[2]:
1262 changes[2].remove('.hgsubstate')
1262 changes[2].remove('.hgsubstate')
1263
1263
1264 # compare current state to last committed state
1264 # compare current state to last committed state
1265 # build new substate based on last committed state
1265 # build new substate based on last committed state
1266 oldstate = wctx.p1().substate
1266 oldstate = wctx.p1().substate
1267 for s in sorted(newstate.keys()):
1267 for s in sorted(newstate.keys()):
1268 if not match(s):
1268 if not match(s):
1269 # ignore working copy, use old state if present
1269 # ignore working copy, use old state if present
1270 if s in oldstate:
1270 if s in oldstate:
1271 newstate[s] = oldstate[s]
1271 newstate[s] = oldstate[s]
1272 continue
1272 continue
1273 if not force:
1273 if not force:
1274 raise util.Abort(
1274 raise util.Abort(
1275 _("commit with new subrepo %s excluded") % s)
1275 _("commit with new subrepo %s excluded") % s)
1276 if wctx.sub(s).dirty(True):
1276 if wctx.sub(s).dirty(True):
1277 if not self.ui.configbool('ui', 'commitsubrepos'):
1277 if not self.ui.configbool('ui', 'commitsubrepos'):
1278 raise util.Abort(
1278 raise util.Abort(
1279 _("uncommitted changes in subrepo %s") % s,
1279 _("uncommitted changes in subrepo %s") % s,
1280 hint=_("use --subrepos for recursive commit"))
1280 hint=_("use --subrepos for recursive commit"))
1281 subs.append(s)
1281 subs.append(s)
1282 commitsubs.add(s)
1282 commitsubs.add(s)
1283 else:
1283 else:
1284 bs = wctx.sub(s).basestate()
1284 bs = wctx.sub(s).basestate()
1285 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1285 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1286 if oldstate.get(s, (None, None, None))[1] != bs:
1286 if oldstate.get(s, (None, None, None))[1] != bs:
1287 subs.append(s)
1287 subs.append(s)
1288
1288
1289 # check for removed subrepos
1289 # check for removed subrepos
1290 for p in wctx.parents():
1290 for p in wctx.parents():
1291 r = [s for s in p.substate if s not in newstate]
1291 r = [s for s in p.substate if s not in newstate]
1292 subs += [s for s in r if match(s)]
1292 subs += [s for s in r if match(s)]
1293 if subs:
1293 if subs:
1294 if (not match('.hgsub') and
1294 if (not match('.hgsub') and
1295 '.hgsub' in (wctx.modified() + wctx.added())):
1295 '.hgsub' in (wctx.modified() + wctx.added())):
1296 raise util.Abort(
1296 raise util.Abort(
1297 _("can't commit subrepos without .hgsub"))
1297 _("can't commit subrepos without .hgsub"))
1298 changes[0].insert(0, '.hgsubstate')
1298 changes[0].insert(0, '.hgsubstate')
1299
1299
1300 elif '.hgsub' in changes[2]:
1300 elif '.hgsub' in changes[2]:
1301 # clean up .hgsubstate when .hgsub is removed
1301 # clean up .hgsubstate when .hgsub is removed
1302 if ('.hgsubstate' in wctx and
1302 if ('.hgsubstate' in wctx and
1303 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1303 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1304 changes[2].insert(0, '.hgsubstate')
1304 changes[2].insert(0, '.hgsubstate')
1305
1305
1306 # make sure all explicit patterns are matched
1306 # make sure all explicit patterns are matched
1307 if not force and match.files():
1307 if not force and match.files():
1308 matched = set(changes[0] + changes[1] + changes[2])
1308 matched = set(changes[0] + changes[1] + changes[2])
1309
1309
1310 for f in match.files():
1310 for f in match.files():
1311 if f == '.' or f in matched or f in wctx.substate:
1311 if f == '.' or f in matched or f in wctx.substate:
1312 continue
1312 continue
1313 if f in changes[3]: # missing
1313 if f in changes[3]: # missing
1314 fail(f, _('file not found!'))
1314 fail(f, _('file not found!'))
1315 if f in vdirs: # visited directory
1315 if f in vdirs: # visited directory
1316 d = f + '/'
1316 d = f + '/'
1317 for mf in matched:
1317 for mf in matched:
1318 if mf.startswith(d):
1318 if mf.startswith(d):
1319 break
1319 break
1320 else:
1320 else:
1321 fail(f, _("no match under directory!"))
1321 fail(f, _("no match under directory!"))
1322 elif f not in self.dirstate:
1322 elif f not in self.dirstate:
1323 fail(f, _("file not tracked!"))
1323 fail(f, _("file not tracked!"))
1324
1324
1325 if (not force and not extra.get("close") and not merge
1325 if (not force and not extra.get("close") and not merge
1326 and not (changes[0] or changes[1] or changes[2])
1326 and not (changes[0] or changes[1] or changes[2])
1327 and wctx.branch() == wctx.p1().branch()):
1327 and wctx.branch() == wctx.p1().branch()):
1328 return None
1328 return None
1329
1329
1330 if merge and changes[3]:
1330 if merge and changes[3]:
1331 raise util.Abort(_("cannot commit merge with missing files"))
1331 raise util.Abort(_("cannot commit merge with missing files"))
1332
1332
1333 ms = mergemod.mergestate(self)
1333 ms = mergemod.mergestate(self)
1334 for f in changes[0]:
1334 for f in changes[0]:
1335 if f in ms and ms[f] == 'u':
1335 if f in ms and ms[f] == 'u':
1336 raise util.Abort(_("unresolved merge conflicts "
1336 raise util.Abort(_("unresolved merge conflicts "
1337 "(see hg help resolve)"))
1337 "(see hg help resolve)"))
1338
1338
1339 cctx = context.workingctx(self, text, user, date, extra, changes)
1339 cctx = context.workingctx(self, text, user, date, extra, changes)
1340 if editor:
1340 if editor:
1341 cctx._text = editor(self, cctx, subs)
1341 cctx._text = editor(self, cctx, subs)
1342 edited = (text != cctx._text)
1342 edited = (text != cctx._text)
1343
1343
1344 # commit subs and write new state
1344 # commit subs and write new state
1345 if subs:
1345 if subs:
1346 for s in sorted(commitsubs):
1346 for s in sorted(commitsubs):
1347 sub = wctx.sub(s)
1347 sub = wctx.sub(s)
1348 self.ui.status(_('committing subrepository %s\n') %
1348 self.ui.status(_('committing subrepository %s\n') %
1349 subrepo.subrelpath(sub))
1349 subrepo.subrelpath(sub))
1350 sr = sub.commit(cctx._text, user, date)
1350 sr = sub.commit(cctx._text, user, date)
1351 newstate[s] = (newstate[s][0], sr)
1351 newstate[s] = (newstate[s][0], sr)
1352 subrepo.writestate(self, newstate)
1352 subrepo.writestate(self, newstate)
1353
1353
1354 # Save commit message in case this transaction gets rolled back
1354 # Save commit message in case this transaction gets rolled back
1355 # (e.g. by a pretxncommit hook). Leave the content alone on
1355 # (e.g. by a pretxncommit hook). Leave the content alone on
1356 # the assumption that the user will use the same editor again.
1356 # the assumption that the user will use the same editor again.
1357 msgfn = self.savecommitmessage(cctx._text)
1357 msgfn = self.savecommitmessage(cctx._text)
1358
1358
1359 p1, p2 = self.dirstate.parents()
1359 p1, p2 = self.dirstate.parents()
1360 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1360 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1361 try:
1361 try:
1362 self.hook("precommit", throw=True, parent1=hookp1,
1362 self.hook("precommit", throw=True, parent1=hookp1,
1363 parent2=hookp2)
1363 parent2=hookp2)
1364 ret = self.commitctx(cctx, True)
1364 ret = self.commitctx(cctx, True)
1365 except: # re-raises
1365 except: # re-raises
1366 if edited:
1366 if edited:
1367 self.ui.write(
1367 self.ui.write(
1368 _('note: commit message saved in %s\n') % msgfn)
1368 _('note: commit message saved in %s\n') % msgfn)
1369 raise
1369 raise
1370
1370
1371 # update bookmarks, dirstate and mergestate
1371 # update bookmarks, dirstate and mergestate
1372 bookmarks.update(self, [p1, p2], ret)
1372 bookmarks.update(self, [p1, p2], ret)
1373 for f in changes[0] + changes[1]:
1373 for f in changes[0] + changes[1]:
1374 self.dirstate.normal(f)
1374 self.dirstate.normal(f)
1375 for f in changes[2]:
1375 for f in changes[2]:
1376 self.dirstate.drop(f)
1376 self.dirstate.drop(f)
1377 self.dirstate.setparents(ret)
1377 self.dirstate.setparents(ret)
1378 ms.reset()
1378 ms.reset()
1379 finally:
1379 finally:
1380 wlock.release()
1380 wlock.release()
1381
1381
1382 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1382 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1383 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1383 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1384 self._afterlock(commithook)
1384 self._afterlock(commithook)
1385 return ret
1385 return ret
1386
1386
1387 def commitctx(self, ctx, error=False):
1387 def commitctx(self, ctx, error=False):
1388 """Add a new revision to current repository.
1388 """Add a new revision to current repository.
1389 Revision information is passed via the context argument.
1389 Revision information is passed via the context argument.
1390 """
1390 """
1391
1391
1392 tr = lock = None
1392 tr = lock = None
1393 removed = list(ctx.removed())
1393 removed = list(ctx.removed())
1394 p1, p2 = ctx.p1(), ctx.p2()
1394 p1, p2 = ctx.p1(), ctx.p2()
1395 user = ctx.user()
1395 user = ctx.user()
1396
1396
1397 lock = self.lock()
1397 lock = self.lock()
1398 try:
1398 try:
1399 tr = self.transaction("commit")
1399 tr = self.transaction("commit")
1400 trp = weakref.proxy(tr)
1400 trp = weakref.proxy(tr)
1401
1401
1402 if ctx.files():
1402 if ctx.files():
1403 m1 = p1.manifest().copy()
1403 m1 = p1.manifest().copy()
1404 m2 = p2.manifest()
1404 m2 = p2.manifest()
1405
1405
1406 # check in files
1406 # check in files
1407 new = {}
1407 new = {}
1408 changed = []
1408 changed = []
1409 linkrev = len(self)
1409 linkrev = len(self)
1410 for f in sorted(ctx.modified() + ctx.added()):
1410 for f in sorted(ctx.modified() + ctx.added()):
1411 self.ui.note(f + "\n")
1411 self.ui.note(f + "\n")
1412 try:
1412 try:
1413 fctx = ctx[f]
1413 fctx = ctx[f]
1414 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1414 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1415 changed)
1415 changed)
1416 m1.set(f, fctx.flags())
1416 m1.set(f, fctx.flags())
1417 except OSError, inst:
1417 except OSError, inst:
1418 self.ui.warn(_("trouble committing %s!\n") % f)
1418 self.ui.warn(_("trouble committing %s!\n") % f)
1419 raise
1419 raise
1420 except IOError, inst:
1420 except IOError, inst:
1421 errcode = getattr(inst, 'errno', errno.ENOENT)
1421 errcode = getattr(inst, 'errno', errno.ENOENT)
1422 if error or errcode and errcode != errno.ENOENT:
1422 if error or errcode and errcode != errno.ENOENT:
1423 self.ui.warn(_("trouble committing %s!\n") % f)
1423 self.ui.warn(_("trouble committing %s!\n") % f)
1424 raise
1424 raise
1425 else:
1425 else:
1426 removed.append(f)
1426 removed.append(f)
1427
1427
1428 # update manifest
1428 # update manifest
1429 m1.update(new)
1429 m1.update(new)
1430 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1430 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1431 drop = [f for f in removed if f in m1]
1431 drop = [f for f in removed if f in m1]
1432 for f in drop:
1432 for f in drop:
1433 del m1[f]
1433 del m1[f]
1434 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1434 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1435 p2.manifestnode(), (new, drop))
1435 p2.manifestnode(), (new, drop))
1436 files = changed + removed
1436 files = changed + removed
1437 else:
1437 else:
1438 mn = p1.manifestnode()
1438 mn = p1.manifestnode()
1439 files = []
1439 files = []
1440
1440
1441 # update changelog
1441 # update changelog
1442 self.changelog.delayupdate()
1442 self.changelog.delayupdate()
1443 n = self.changelog.add(mn, files, ctx.description(),
1443 n = self.changelog.add(mn, files, ctx.description(),
1444 trp, p1.node(), p2.node(),
1444 trp, p1.node(), p2.node(),
1445 user, ctx.date(), ctx.extra().copy())
1445 user, ctx.date(), ctx.extra().copy())
1446 p = lambda: self.changelog.writepending() and self.root or ""
1446 p = lambda: self.changelog.writepending() and self.root or ""
1447 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1447 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1448 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1448 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1449 parent2=xp2, pending=p)
1449 parent2=xp2, pending=p)
1450 self.changelog.finalize(trp)
1450 self.changelog.finalize(trp)
1451 # set the new commit is proper phase
1451 # set the new commit is proper phase
1452 targetphase = phases.newcommitphase(self.ui)
1452 targetphase = phases.newcommitphase(self.ui)
1453 if targetphase:
1453 if targetphase:
1454 # retract boundary do not alter parent changeset.
1454 # retract boundary do not alter parent changeset.
1455 # if a parent have higher the resulting phase will
1455 # if a parent have higher the resulting phase will
1456 # be compliant anyway
1456 # be compliant anyway
1457 #
1457 #
1458 # if minimal phase was 0 we don't need to retract anything
1458 # if minimal phase was 0 we don't need to retract anything
1459 phases.retractboundary(self, targetphase, [n])
1459 phases.retractboundary(self, targetphase, [n])
1460 tr.close()
1460 tr.close()
1461 self.updatebranchcache()
1461 self.updatebranchcache()
1462 return n
1462 return n
1463 finally:
1463 finally:
1464 if tr:
1464 if tr:
1465 tr.release()
1465 tr.release()
1466 lock.release()
1466 lock.release()
1467
1467
1468 def destroyed(self, newheadnodes=None):
1468 def destroyed(self, newheadnodes=None):
1469 '''Inform the repository that nodes have been destroyed.
1469 '''Inform the repository that nodes have been destroyed.
1470 Intended for use by strip and rollback, so there's a common
1470 Intended for use by strip and rollback, so there's a common
1471 place for anything that has to be done after destroying history.
1471 place for anything that has to be done after destroying history.
1472
1472
1473 If you know the branchheadcache was uptodate before nodes were removed
1473 If you know the branchheadcache was uptodate before nodes were removed
1474 and you also know the set of candidate new heads that may have resulted
1474 and you also know the set of candidate new heads that may have resulted
1475 from the destruction, you can set newheadnodes. This will enable the
1475 from the destruction, you can set newheadnodes. This will enable the
1476 code to update the branchheads cache, rather than having future code
1476 code to update the branchheads cache, rather than having future code
1477 decide it's invalid and regenrating it from scratch.
1477 decide it's invalid and regenrating it from scratch.
1478 '''
1478 '''
1479 # If we have info, newheadnodes, on how to update the branch cache, do
1479 # If we have info, newheadnodes, on how to update the branch cache, do
1480 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1480 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1481 # will be caught the next time it is read.
1481 # will be caught the next time it is read.
1482 if newheadnodes:
1482 if newheadnodes:
1483 tiprev = len(self) - 1
1483 tiprev = len(self) - 1
1484 ctxgen = (self[node] for node in newheadnodes
1484 ctxgen = (self[node] for node in newheadnodes
1485 if self.changelog.hasnode(node))
1485 if self.changelog.hasnode(node))
1486 self._updatebranchcache(self._branchcache, ctxgen)
1486 self._updatebranchcache(self._branchcache, ctxgen)
1487 self._writebranchcache(self._branchcache, self.changelog.tip(),
1487 self._writebranchcache(self._branchcache, self.changelog.tip(),
1488 tiprev)
1488 tiprev)
1489
1489
1490 # Ensure the persistent tag cache is updated. Doing it now
1490 # Ensure the persistent tag cache is updated. Doing it now
1491 # means that the tag cache only has to worry about destroyed
1491 # means that the tag cache only has to worry about destroyed
1492 # heads immediately after a strip/rollback. That in turn
1492 # heads immediately after a strip/rollback. That in turn
1493 # guarantees that "cachetip == currenttip" (comparing both rev
1493 # guarantees that "cachetip == currenttip" (comparing both rev
1494 # and node) always means no nodes have been added or destroyed.
1494 # and node) always means no nodes have been added or destroyed.
1495
1495
1496 # XXX this is suboptimal when qrefresh'ing: we strip the current
1496 # XXX this is suboptimal when qrefresh'ing: we strip the current
1497 # head, refresh the tag cache, then immediately add a new head.
1497 # head, refresh the tag cache, then immediately add a new head.
1498 # But I think doing it this way is necessary for the "instant
1498 # But I think doing it this way is necessary for the "instant
1499 # tag cache retrieval" case to work.
1499 # tag cache retrieval" case to work.
1500 self.invalidatecaches()
1500 self.invalidatecaches()
1501
1501
1502 def walk(self, match, node=None):
1502 def walk(self, match, node=None):
1503 '''
1503 '''
1504 walk recursively through the directory tree or a given
1504 walk recursively through the directory tree or a given
1505 changeset, finding all files matched by the match
1505 changeset, finding all files matched by the match
1506 function
1506 function
1507 '''
1507 '''
1508 return self[node].walk(match)
1508 return self[node].walk(match)
1509
1509
1510 def status(self, node1='.', node2=None, match=None,
1510 def status(self, node1='.', node2=None, match=None,
1511 ignored=False, clean=False, unknown=False,
1511 ignored=False, clean=False, unknown=False,
1512 listsubrepos=False):
1512 listsubrepos=False):
1513 """return status of files between two nodes or node and working
1513 """return status of files between two nodes or node and working
1514 directory.
1514 directory.
1515
1515
1516 If node1 is None, use the first dirstate parent instead.
1516 If node1 is None, use the first dirstate parent instead.
1517 If node2 is None, compare node1 with working directory.
1517 If node2 is None, compare node1 with working directory.
1518 """
1518 """
1519
1519
1520 def mfmatches(ctx):
1520 def mfmatches(ctx):
1521 mf = ctx.manifest().copy()
1521 mf = ctx.manifest().copy()
1522 if match.always():
1522 if match.always():
1523 return mf
1523 return mf
1524 for fn in mf.keys():
1524 for fn in mf.keys():
1525 if not match(fn):
1525 if not match(fn):
1526 del mf[fn]
1526 del mf[fn]
1527 return mf
1527 return mf
1528
1528
1529 if isinstance(node1, context.changectx):
1529 if isinstance(node1, context.changectx):
1530 ctx1 = node1
1530 ctx1 = node1
1531 else:
1531 else:
1532 ctx1 = self[node1]
1532 ctx1 = self[node1]
1533 if isinstance(node2, context.changectx):
1533 if isinstance(node2, context.changectx):
1534 ctx2 = node2
1534 ctx2 = node2
1535 else:
1535 else:
1536 ctx2 = self[node2]
1536 ctx2 = self[node2]
1537
1537
1538 working = ctx2.rev() is None
1538 working = ctx2.rev() is None
1539 parentworking = working and ctx1 == self['.']
1539 parentworking = working and ctx1 == self['.']
1540 match = match or matchmod.always(self.root, self.getcwd())
1540 match = match or matchmod.always(self.root, self.getcwd())
1541 listignored, listclean, listunknown = ignored, clean, unknown
1541 listignored, listclean, listunknown = ignored, clean, unknown
1542
1542
1543 # load earliest manifest first for caching reasons
1543 # load earliest manifest first for caching reasons
1544 if not working and ctx2.rev() < ctx1.rev():
1544 if not working and ctx2.rev() < ctx1.rev():
1545 ctx2.manifest()
1545 ctx2.manifest()
1546
1546
1547 if not parentworking:
1547 if not parentworking:
1548 def bad(f, msg):
1548 def bad(f, msg):
1549 # 'f' may be a directory pattern from 'match.files()',
1549 # 'f' may be a directory pattern from 'match.files()',
1550 # so 'f not in ctx1' is not enough
1550 # so 'f not in ctx1' is not enough
1551 if f not in ctx1 and f not in ctx1.dirs():
1551 if f not in ctx1 and f not in ctx1.dirs():
1552 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1552 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1553 match.bad = bad
1553 match.bad = bad
1554
1554
1555 if working: # we need to scan the working dir
1555 if working: # we need to scan the working dir
1556 subrepos = []
1556 subrepos = []
1557 if '.hgsub' in self.dirstate:
1557 if '.hgsub' in self.dirstate:
1558 subrepos = ctx2.substate.keys()
1558 subrepos = ctx2.substate.keys()
1559 s = self.dirstate.status(match, subrepos, listignored,
1559 s = self.dirstate.status(match, subrepos, listignored,
1560 listclean, listunknown)
1560 listclean, listunknown)
1561 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1561 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1562
1562
1563 # check for any possibly clean files
1563 # check for any possibly clean files
1564 if parentworking and cmp:
1564 if parentworking and cmp:
1565 fixup = []
1565 fixup = []
1566 # do a full compare of any files that might have changed
1566 # do a full compare of any files that might have changed
1567 for f in sorted(cmp):
1567 for f in sorted(cmp):
1568 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1568 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1569 or ctx1[f].cmp(ctx2[f])):
1569 or ctx1[f].cmp(ctx2[f])):
1570 modified.append(f)
1570 modified.append(f)
1571 else:
1571 else:
1572 fixup.append(f)
1572 fixup.append(f)
1573
1573
1574 # update dirstate for files that are actually clean
1574 # update dirstate for files that are actually clean
1575 if fixup:
1575 if fixup:
1576 if listclean:
1576 if listclean:
1577 clean += fixup
1577 clean += fixup
1578
1578
1579 try:
1579 try:
1580 # updating the dirstate is optional
1580 # updating the dirstate is optional
1581 # so we don't wait on the lock
1581 # so we don't wait on the lock
1582 wlock = self.wlock(False)
1582 wlock = self.wlock(False)
1583 try:
1583 try:
1584 for f in fixup:
1584 for f in fixup:
1585 self.dirstate.normal(f)
1585 self.dirstate.normal(f)
1586 finally:
1586 finally:
1587 wlock.release()
1587 wlock.release()
1588 except error.LockError:
1588 except error.LockError:
1589 pass
1589 pass
1590
1590
1591 if not parentworking:
1591 if not parentworking:
1592 mf1 = mfmatches(ctx1)
1592 mf1 = mfmatches(ctx1)
1593 if working:
1593 if working:
1594 # we are comparing working dir against non-parent
1594 # we are comparing working dir against non-parent
1595 # generate a pseudo-manifest for the working dir
1595 # generate a pseudo-manifest for the working dir
1596 mf2 = mfmatches(self['.'])
1596 mf2 = mfmatches(self['.'])
1597 for f in cmp + modified + added:
1597 for f in cmp + modified + added:
1598 mf2[f] = None
1598 mf2[f] = None
1599 mf2.set(f, ctx2.flags(f))
1599 mf2.set(f, ctx2.flags(f))
1600 for f in removed:
1600 for f in removed:
1601 if f in mf2:
1601 if f in mf2:
1602 del mf2[f]
1602 del mf2[f]
1603 else:
1603 else:
1604 # we are comparing two revisions
1604 # we are comparing two revisions
1605 deleted, unknown, ignored = [], [], []
1605 deleted, unknown, ignored = [], [], []
1606 mf2 = mfmatches(ctx2)
1606 mf2 = mfmatches(ctx2)
1607
1607
1608 modified, added, clean = [], [], []
1608 modified, added, clean = [], [], []
1609 withflags = mf1.withflags() | mf2.withflags()
1609 withflags = mf1.withflags() | mf2.withflags()
1610 for fn in mf2:
1610 for fn in mf2:
1611 if fn in mf1:
1611 if fn in mf1:
1612 if (fn not in deleted and
1612 if (fn not in deleted and
1613 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1613 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1614 (mf1[fn] != mf2[fn] and
1614 (mf1[fn] != mf2[fn] and
1615 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1615 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1616 modified.append(fn)
1616 modified.append(fn)
1617 elif listclean:
1617 elif listclean:
1618 clean.append(fn)
1618 clean.append(fn)
1619 del mf1[fn]
1619 del mf1[fn]
1620 elif fn not in deleted:
1620 elif fn not in deleted:
1621 added.append(fn)
1621 added.append(fn)
1622 removed = mf1.keys()
1622 removed = mf1.keys()
1623
1623
1624 if working and modified and not self.dirstate._checklink:
1624 if working and modified and not self.dirstate._checklink:
1625 # Symlink placeholders may get non-symlink-like contents
1625 # Symlink placeholders may get non-symlink-like contents
1626 # via user error or dereferencing by NFS or Samba servers,
1626 # via user error or dereferencing by NFS or Samba servers,
1627 # so we filter out any placeholders that don't look like a
1627 # so we filter out any placeholders that don't look like a
1628 # symlink
1628 # symlink
1629 sane = []
1629 sane = []
1630 for f in modified:
1630 for f in modified:
1631 if ctx2.flags(f) == 'l':
1631 if ctx2.flags(f) == 'l':
1632 d = ctx2[f].data()
1632 d = ctx2[f].data()
1633 if len(d) >= 1024 or '\n' in d or util.binary(d):
1633 if len(d) >= 1024 or '\n' in d or util.binary(d):
1634 self.ui.debug('ignoring suspect symlink placeholder'
1634 self.ui.debug('ignoring suspect symlink placeholder'
1635 ' "%s"\n' % f)
1635 ' "%s"\n' % f)
1636 continue
1636 continue
1637 sane.append(f)
1637 sane.append(f)
1638 modified = sane
1638 modified = sane
1639
1639
1640 r = modified, added, removed, deleted, unknown, ignored, clean
1640 r = modified, added, removed, deleted, unknown, ignored, clean
1641
1641
1642 if listsubrepos:
1642 if listsubrepos:
1643 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1643 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1644 if working:
1644 if working:
1645 rev2 = None
1645 rev2 = None
1646 else:
1646 else:
1647 rev2 = ctx2.substate[subpath][1]
1647 rev2 = ctx2.substate[subpath][1]
1648 try:
1648 try:
1649 submatch = matchmod.narrowmatcher(subpath, match)
1649 submatch = matchmod.narrowmatcher(subpath, match)
1650 s = sub.status(rev2, match=submatch, ignored=listignored,
1650 s = sub.status(rev2, match=submatch, ignored=listignored,
1651 clean=listclean, unknown=listunknown,
1651 clean=listclean, unknown=listunknown,
1652 listsubrepos=True)
1652 listsubrepos=True)
1653 for rfiles, sfiles in zip(r, s):
1653 for rfiles, sfiles in zip(r, s):
1654 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1654 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1655 except error.LookupError:
1655 except error.LookupError:
1656 self.ui.status(_("skipping missing subrepository: %s\n")
1656 self.ui.status(_("skipping missing subrepository: %s\n")
1657 % subpath)
1657 % subpath)
1658
1658
1659 for l in r:
1659 for l in r:
1660 l.sort()
1660 l.sort()
1661 return r
1661 return r
1662
1662
1663 def heads(self, start=None):
1663 def heads(self, start=None):
1664 heads = self.changelog.heads(start)
1664 heads = self.changelog.heads(start)
1665 # sort the output in rev descending order
1665 # sort the output in rev descending order
1666 return sorted(heads, key=self.changelog.rev, reverse=True)
1666 return sorted(heads, key=self.changelog.rev, reverse=True)
1667
1667
1668 def branchheads(self, branch=None, start=None, closed=False):
1668 def branchheads(self, branch=None, start=None, closed=False):
1669 '''return a (possibly filtered) list of heads for the given branch
1669 '''return a (possibly filtered) list of heads for the given branch
1670
1670
1671 Heads are returned in topological order, from newest to oldest.
1671 Heads are returned in topological order, from newest to oldest.
1672 If branch is None, use the dirstate branch.
1672 If branch is None, use the dirstate branch.
1673 If start is not None, return only heads reachable from start.
1673 If start is not None, return only heads reachable from start.
1674 If closed is True, return heads that are marked as closed as well.
1674 If closed is True, return heads that are marked as closed as well.
1675 '''
1675 '''
1676 if branch is None:
1676 if branch is None:
1677 branch = self[None].branch()
1677 branch = self[None].branch()
1678 branches = self.branchmap()
1678 branches = self.branchmap()
1679 if branch not in branches:
1679 if branch not in branches:
1680 return []
1680 return []
1681 # the cache returns heads ordered lowest to highest
1681 # the cache returns heads ordered lowest to highest
1682 bheads = list(reversed(branches[branch]))
1682 bheads = list(reversed(branches[branch]))
1683 if start is not None:
1683 if start is not None:
1684 # filter out the heads that cannot be reached from startrev
1684 # filter out the heads that cannot be reached from startrev
1685 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1685 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1686 bheads = [h for h in bheads if h in fbheads]
1686 bheads = [h for h in bheads if h in fbheads]
1687 if not closed:
1687 if not closed:
1688 bheads = [h for h in bheads if not self[h].closesbranch()]
1688 bheads = [h for h in bheads if not self[h].closesbranch()]
1689 return bheads
1689 return bheads
1690
1690
1691 def branches(self, nodes):
1691 def branches(self, nodes):
1692 if not nodes:
1692 if not nodes:
1693 nodes = [self.changelog.tip()]
1693 nodes = [self.changelog.tip()]
1694 b = []
1694 b = []
1695 for n in nodes:
1695 for n in nodes:
1696 t = n
1696 t = n
1697 while True:
1697 while True:
1698 p = self.changelog.parents(n)
1698 p = self.changelog.parents(n)
1699 if p[1] != nullid or p[0] == nullid:
1699 if p[1] != nullid or p[0] == nullid:
1700 b.append((t, n, p[0], p[1]))
1700 b.append((t, n, p[0], p[1]))
1701 break
1701 break
1702 n = p[0]
1702 n = p[0]
1703 return b
1703 return b
1704
1704
1705 def between(self, pairs):
1705 def between(self, pairs):
1706 r = []
1706 r = []
1707
1707
1708 for top, bottom in pairs:
1708 for top, bottom in pairs:
1709 n, l, i = top, [], 0
1709 n, l, i = top, [], 0
1710 f = 1
1710 f = 1
1711
1711
1712 while n != bottom and n != nullid:
1712 while n != bottom and n != nullid:
1713 p = self.changelog.parents(n)[0]
1713 p = self.changelog.parents(n)[0]
1714 if i == f:
1714 if i == f:
1715 l.append(n)
1715 l.append(n)
1716 f = f * 2
1716 f = f * 2
1717 n = p
1717 n = p
1718 i += 1
1718 i += 1
1719
1719
1720 r.append(l)
1720 r.append(l)
1721
1721
1722 return r
1722 return r
1723
1723
1724 def pull(self, remote, heads=None, force=False):
1724 def pull(self, remote, heads=None, force=False):
1725 # don't open transaction for nothing or you break future useful
1725 # don't open transaction for nothing or you break future useful
1726 # rollback call
1726 # rollback call
1727 tr = None
1727 tr = None
1728 trname = 'pull\n' + util.hidepassword(remote.url())
1728 trname = 'pull\n' + util.hidepassword(remote.url())
1729 lock = self.lock()
1729 lock = self.lock()
1730 try:
1730 try:
1731 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1731 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1732 force=force)
1732 force=force)
1733 common, fetch, rheads = tmp
1733 common, fetch, rheads = tmp
1734 if not fetch:
1734 if not fetch:
1735 self.ui.status(_("no changes found\n"))
1735 self.ui.status(_("no changes found\n"))
1736 added = []
1736 added = []
1737 result = 0
1737 result = 0
1738 else:
1738 else:
1739 tr = self.transaction(trname)
1739 tr = self.transaction(trname)
1740 if heads is None and list(common) == [nullid]:
1740 if heads is None and list(common) == [nullid]:
1741 self.ui.status(_("requesting all changes\n"))
1741 self.ui.status(_("requesting all changes\n"))
1742 elif heads is None and remote.capable('changegroupsubset'):
1742 elif heads is None and remote.capable('changegroupsubset'):
1743 # issue1320, avoid a race if remote changed after discovery
1743 # issue1320, avoid a race if remote changed after discovery
1744 heads = rheads
1744 heads = rheads
1745
1745
1746 if remote.capable('getbundle'):
1746 if remote.capable('getbundle'):
1747 cg = remote.getbundle('pull', common=common,
1747 cg = remote.getbundle('pull', common=common,
1748 heads=heads or rheads)
1748 heads=heads or rheads)
1749 elif heads is None:
1749 elif heads is None:
1750 cg = remote.changegroup(fetch, 'pull')
1750 cg = remote.changegroup(fetch, 'pull')
1751 elif not remote.capable('changegroupsubset'):
1751 elif not remote.capable('changegroupsubset'):
1752 raise util.Abort(_("partial pull cannot be done because "
1752 raise util.Abort(_("partial pull cannot be done because "
1753 "other repository doesn't support "
1753 "other repository doesn't support "
1754 "changegroupsubset."))
1754 "changegroupsubset."))
1755 else:
1755 else:
1756 cg = remote.changegroupsubset(fetch, heads, 'pull')
1756 cg = remote.changegroupsubset(fetch, heads, 'pull')
1757 clstart = len(self.changelog)
1757 clstart = len(self.changelog)
1758 result = self.addchangegroup(cg, 'pull', remote.url())
1758 result = self.addchangegroup(cg, 'pull', remote.url())
1759 clend = len(self.changelog)
1759 clend = len(self.changelog)
1760 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1760 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1761
1761
1762 # compute target subset
1762 # compute target subset
1763 if heads is None:
1763 if heads is None:
1764 # We pulled every thing possible
1764 # We pulled every thing possible
1765 # sync on everything common
1765 # sync on everything common
1766 subset = common + added
1766 subset = common + added
1767 else:
1767 else:
1768 # We pulled a specific subset
1768 # We pulled a specific subset
1769 # sync on this subset
1769 # sync on this subset
1770 subset = heads
1770 subset = heads
1771
1771
1772 # Get remote phases data from remote
1772 # Get remote phases data from remote
1773 remotephases = remote.listkeys('phases')
1773 remotephases = remote.listkeys('phases')
1774 publishing = bool(remotephases.get('publishing', False))
1774 publishing = bool(remotephases.get('publishing', False))
1775 if remotephases and not publishing:
1775 if remotephases and not publishing:
1776 # remote is new and unpublishing
1776 # remote is new and unpublishing
1777 pheads, _dr = phases.analyzeremotephases(self, subset,
1777 pheads, _dr = phases.analyzeremotephases(self, subset,
1778 remotephases)
1778 remotephases)
1779 phases.advanceboundary(self, phases.public, pheads)
1779 phases.advanceboundary(self, phases.public, pheads)
1780 phases.advanceboundary(self, phases.draft, subset)
1780 phases.advanceboundary(self, phases.draft, subset)
1781 else:
1781 else:
1782 # Remote is old or publishing all common changesets
1782 # Remote is old or publishing all common changesets
1783 # should be seen as public
1783 # should be seen as public
1784 phases.advanceboundary(self, phases.public, subset)
1784 phases.advanceboundary(self, phases.public, subset)
1785
1785
1786 self.ui.debug('fetching remote obsolete markers')
1786 self.ui.debug('fetching remote obsolete markers')
1787 remoteobs = remote.listkeys('obsolete')
1787 remoteobs = remote.listkeys('obsolete')
1788 if 'dump' in remoteobs:
1788 if 'dump0' in remoteobs:
1789 if tr is None:
1789 if tr is None:
1790 tr = self.transaction(trname)
1790 tr = self.transaction(trname)
1791 data = base85.b85decode(remoteobs['dump'])
1791 for key in sorted(remoteobs, reverse=True):
1792 self.obsstore.mergemarkers(tr, data)
1792 if key.startswith('dump'):
1793 data = base85.b85decode(remoteobs[key])
1794 self.obsstore.mergemarkers(tr, data)
1793 if tr is not None:
1795 if tr is not None:
1794 tr.close()
1796 tr.close()
1795 finally:
1797 finally:
1796 if tr is not None:
1798 if tr is not None:
1797 tr.release()
1799 tr.release()
1798 lock.release()
1800 lock.release()
1799
1801
1800 return result
1802 return result
1801
1803
1802 def checkpush(self, force, revs):
1804 def checkpush(self, force, revs):
1803 """Extensions can override this function if additional checks have
1805 """Extensions can override this function if additional checks have
1804 to be performed before pushing, or call it if they override push
1806 to be performed before pushing, or call it if they override push
1805 command.
1807 command.
1806 """
1808 """
1807 pass
1809 pass
1808
1810
1809 def push(self, remote, force=False, revs=None, newbranch=False):
1811 def push(self, remote, force=False, revs=None, newbranch=False):
1810 '''Push outgoing changesets (limited by revs) from the current
1812 '''Push outgoing changesets (limited by revs) from the current
1811 repository to remote. Return an integer:
1813 repository to remote. Return an integer:
1812 - None means nothing to push
1814 - None means nothing to push
1813 - 0 means HTTP error
1815 - 0 means HTTP error
1814 - 1 means we pushed and remote head count is unchanged *or*
1816 - 1 means we pushed and remote head count is unchanged *or*
1815 we have outgoing changesets but refused to push
1817 we have outgoing changesets but refused to push
1816 - other values as described by addchangegroup()
1818 - other values as described by addchangegroup()
1817 '''
1819 '''
1818 # there are two ways to push to remote repo:
1820 # there are two ways to push to remote repo:
1819 #
1821 #
1820 # addchangegroup assumes local user can lock remote
1822 # addchangegroup assumes local user can lock remote
1821 # repo (local filesystem, old ssh servers).
1823 # repo (local filesystem, old ssh servers).
1822 #
1824 #
1823 # unbundle assumes local user cannot lock remote repo (new ssh
1825 # unbundle assumes local user cannot lock remote repo (new ssh
1824 # servers, http servers).
1826 # servers, http servers).
1825
1827
1826 if not remote.canpush():
1828 if not remote.canpush():
1827 raise util.Abort(_("destination does not support push"))
1829 raise util.Abort(_("destination does not support push"))
1828 # get local lock as we might write phase data
1830 # get local lock as we might write phase data
1829 locallock = self.lock()
1831 locallock = self.lock()
1830 try:
1832 try:
1831 self.checkpush(force, revs)
1833 self.checkpush(force, revs)
1832 lock = None
1834 lock = None
1833 unbundle = remote.capable('unbundle')
1835 unbundle = remote.capable('unbundle')
1834 if not unbundle:
1836 if not unbundle:
1835 lock = remote.lock()
1837 lock = remote.lock()
1836 try:
1838 try:
1837 # discovery
1839 # discovery
1838 fci = discovery.findcommonincoming
1840 fci = discovery.findcommonincoming
1839 commoninc = fci(self, remote, force=force)
1841 commoninc = fci(self, remote, force=force)
1840 common, inc, remoteheads = commoninc
1842 common, inc, remoteheads = commoninc
1841 fco = discovery.findcommonoutgoing
1843 fco = discovery.findcommonoutgoing
1842 outgoing = fco(self, remote, onlyheads=revs,
1844 outgoing = fco(self, remote, onlyheads=revs,
1843 commoninc=commoninc, force=force)
1845 commoninc=commoninc, force=force)
1844
1846
1845
1847
1846 if not outgoing.missing:
1848 if not outgoing.missing:
1847 # nothing to push
1849 # nothing to push
1848 scmutil.nochangesfound(self.ui, self, outgoing.excluded)
1850 scmutil.nochangesfound(self.ui, self, outgoing.excluded)
1849 ret = None
1851 ret = None
1850 else:
1852 else:
1851 # something to push
1853 # something to push
1852 if not force:
1854 if not force:
1853 # if self.obsstore == False --> no obsolete
1855 # if self.obsstore == False --> no obsolete
1854 # then, save the iteration
1856 # then, save the iteration
1855 if self.obsstore:
1857 if self.obsstore:
1856 # this message are here for 80 char limit reason
1858 # this message are here for 80 char limit reason
1857 mso = _("push includes an obsolete changeset: %s!")
1859 mso = _("push includes an obsolete changeset: %s!")
1858 msu = _("push includes an unstable changeset: %s!")
1860 msu = _("push includes an unstable changeset: %s!")
1859 # If we are to push if there is at least one
1861 # If we are to push if there is at least one
1860 # obsolete or unstable changeset in missing, at
1862 # obsolete or unstable changeset in missing, at
1861 # least one of the missinghead will be obsolete or
1863 # least one of the missinghead will be obsolete or
1862 # unstable. So checking heads only is ok
1864 # unstable. So checking heads only is ok
1863 for node in outgoing.missingheads:
1865 for node in outgoing.missingheads:
1864 ctx = self[node]
1866 ctx = self[node]
1865 if ctx.obsolete():
1867 if ctx.obsolete():
1866 raise util.Abort(_(mso) % ctx)
1868 raise util.Abort(_(mso) % ctx)
1867 elif ctx.unstable():
1869 elif ctx.unstable():
1868 raise util.Abort(_(msu) % ctx)
1870 raise util.Abort(_(msu) % ctx)
1869 discovery.checkheads(self, remote, outgoing,
1871 discovery.checkheads(self, remote, outgoing,
1870 remoteheads, newbranch,
1872 remoteheads, newbranch,
1871 bool(inc))
1873 bool(inc))
1872
1874
1873 # create a changegroup from local
1875 # create a changegroup from local
1874 if revs is None and not outgoing.excluded:
1876 if revs is None and not outgoing.excluded:
1875 # push everything,
1877 # push everything,
1876 # use the fast path, no race possible on push
1878 # use the fast path, no race possible on push
1877 cg = self._changegroup(outgoing.missing, 'push')
1879 cg = self._changegroup(outgoing.missing, 'push')
1878 else:
1880 else:
1879 cg = self.getlocalbundle('push', outgoing)
1881 cg = self.getlocalbundle('push', outgoing)
1880
1882
1881 # apply changegroup to remote
1883 # apply changegroup to remote
1882 if unbundle:
1884 if unbundle:
1883 # local repo finds heads on server, finds out what
1885 # local repo finds heads on server, finds out what
1884 # revs it must push. once revs transferred, if server
1886 # revs it must push. once revs transferred, if server
1885 # finds it has different heads (someone else won
1887 # finds it has different heads (someone else won
1886 # commit/push race), server aborts.
1888 # commit/push race), server aborts.
1887 if force:
1889 if force:
1888 remoteheads = ['force']
1890 remoteheads = ['force']
1889 # ssh: return remote's addchangegroup()
1891 # ssh: return remote's addchangegroup()
1890 # http: return remote's addchangegroup() or 0 for error
1892 # http: return remote's addchangegroup() or 0 for error
1891 ret = remote.unbundle(cg, remoteheads, 'push')
1893 ret = remote.unbundle(cg, remoteheads, 'push')
1892 else:
1894 else:
1893 # we return an integer indicating remote head count
1895 # we return an integer indicating remote head count
1894 # change
1896 # change
1895 ret = remote.addchangegroup(cg, 'push', self.url())
1897 ret = remote.addchangegroup(cg, 'push', self.url())
1896
1898
1897 if ret:
1899 if ret:
1898 # push succeed, synchonize target of the push
1900 # push succeed, synchonize target of the push
1899 cheads = outgoing.missingheads
1901 cheads = outgoing.missingheads
1900 elif revs is None:
1902 elif revs is None:
1901 # All out push fails. synchronize all common
1903 # All out push fails. synchronize all common
1902 cheads = outgoing.commonheads
1904 cheads = outgoing.commonheads
1903 else:
1905 else:
1904 # I want cheads = heads(::missingheads and ::commonheads)
1906 # I want cheads = heads(::missingheads and ::commonheads)
1905 # (missingheads is revs with secret changeset filtered out)
1907 # (missingheads is revs with secret changeset filtered out)
1906 #
1908 #
1907 # This can be expressed as:
1909 # This can be expressed as:
1908 # cheads = ( (missingheads and ::commonheads)
1910 # cheads = ( (missingheads and ::commonheads)
1909 # + (commonheads and ::missingheads))"
1911 # + (commonheads and ::missingheads))"
1910 # )
1912 # )
1911 #
1913 #
1912 # while trying to push we already computed the following:
1914 # while trying to push we already computed the following:
1913 # common = (::commonheads)
1915 # common = (::commonheads)
1914 # missing = ((commonheads::missingheads) - commonheads)
1916 # missing = ((commonheads::missingheads) - commonheads)
1915 #
1917 #
1916 # We can pick:
1918 # We can pick:
1917 # * missingheads part of comon (::commonheads)
1919 # * missingheads part of comon (::commonheads)
1918 common = set(outgoing.common)
1920 common = set(outgoing.common)
1919 cheads = [node for node in revs if node in common]
1921 cheads = [node for node in revs if node in common]
1920 # and
1922 # and
1921 # * commonheads parents on missing
1923 # * commonheads parents on missing
1922 revset = self.set('%ln and parents(roots(%ln))',
1924 revset = self.set('%ln and parents(roots(%ln))',
1923 outgoing.commonheads,
1925 outgoing.commonheads,
1924 outgoing.missing)
1926 outgoing.missing)
1925 cheads.extend(c.node() for c in revset)
1927 cheads.extend(c.node() for c in revset)
1926 # even when we don't push, exchanging phase data is useful
1928 # even when we don't push, exchanging phase data is useful
1927 remotephases = remote.listkeys('phases')
1929 remotephases = remote.listkeys('phases')
1928 if not remotephases: # old server or public only repo
1930 if not remotephases: # old server or public only repo
1929 phases.advanceboundary(self, phases.public, cheads)
1931 phases.advanceboundary(self, phases.public, cheads)
1930 # don't push any phase data as there is nothing to push
1932 # don't push any phase data as there is nothing to push
1931 else:
1933 else:
1932 ana = phases.analyzeremotephases(self, cheads, remotephases)
1934 ana = phases.analyzeremotephases(self, cheads, remotephases)
1933 pheads, droots = ana
1935 pheads, droots = ana
1934 ### Apply remote phase on local
1936 ### Apply remote phase on local
1935 if remotephases.get('publishing', False):
1937 if remotephases.get('publishing', False):
1936 phases.advanceboundary(self, phases.public, cheads)
1938 phases.advanceboundary(self, phases.public, cheads)
1937 else: # publish = False
1939 else: # publish = False
1938 phases.advanceboundary(self, phases.public, pheads)
1940 phases.advanceboundary(self, phases.public, pheads)
1939 phases.advanceboundary(self, phases.draft, cheads)
1941 phases.advanceboundary(self, phases.draft, cheads)
1940 ### Apply local phase on remote
1942 ### Apply local phase on remote
1941
1943
1942 # Get the list of all revs draft on remote by public here.
1944 # Get the list of all revs draft on remote by public here.
1943 # XXX Beware that revset break if droots is not strictly
1945 # XXX Beware that revset break if droots is not strictly
1944 # XXX root we may want to ensure it is but it is costly
1946 # XXX root we may want to ensure it is but it is costly
1945 outdated = self.set('heads((%ln::%ln) and public())',
1947 outdated = self.set('heads((%ln::%ln) and public())',
1946 droots, cheads)
1948 droots, cheads)
1947 for newremotehead in outdated:
1949 for newremotehead in outdated:
1948 r = remote.pushkey('phases',
1950 r = remote.pushkey('phases',
1949 newremotehead.hex(),
1951 newremotehead.hex(),
1950 str(phases.draft),
1952 str(phases.draft),
1951 str(phases.public))
1953 str(phases.public))
1952 if not r:
1954 if not r:
1953 self.ui.warn(_('updating %s to public failed!\n')
1955 self.ui.warn(_('updating %s to public failed!\n')
1954 % newremotehead)
1956 % newremotehead)
1955 self.ui.debug('try to push obsolete markers to remote\n')
1957 self.ui.debug('try to push obsolete markers to remote\n')
1956 if (self.obsstore and
1958 if (self.obsstore and
1957 'obsolete' in remote.listkeys('namespaces')):
1959 'obsolete' in remote.listkeys('namespaces')):
1958 data = self.listkeys('obsolete')['dump']
1960 rslts = []
1959 r = remote.pushkey('obsolete', 'dump', '', data)
1961 remotedata = self.listkeys('obsolete')
1960 if not r:
1962 for key in sorted(remotedata, reverse=True):
1961 self.ui.warn(_('failed to push obsolete markers!\n'))
1963 # reverse sort to ensure we end with dump0
1964 data = remotedata[key]
1965 rslts.append(remote.pushkey('obsolete', key, '', data))
1966 if [r for r in rslts if not r]:
1967 msg = _('failed to push some obsolete markers!\n')
1968 self.ui.warn(msg)
1962 finally:
1969 finally:
1963 if lock is not None:
1970 if lock is not None:
1964 lock.release()
1971 lock.release()
1965 finally:
1972 finally:
1966 locallock.release()
1973 locallock.release()
1967
1974
1968 self.ui.debug("checking for updated bookmarks\n")
1975 self.ui.debug("checking for updated bookmarks\n")
1969 rb = remote.listkeys('bookmarks')
1976 rb = remote.listkeys('bookmarks')
1970 for k in rb.keys():
1977 for k in rb.keys():
1971 if k in self._bookmarks:
1978 if k in self._bookmarks:
1972 nr, nl = rb[k], hex(self._bookmarks[k])
1979 nr, nl = rb[k], hex(self._bookmarks[k])
1973 if nr in self:
1980 if nr in self:
1974 cr = self[nr]
1981 cr = self[nr]
1975 cl = self[nl]
1982 cl = self[nl]
1976 if cl in cr.descendants():
1983 if cl in cr.descendants():
1977 r = remote.pushkey('bookmarks', k, nr, nl)
1984 r = remote.pushkey('bookmarks', k, nr, nl)
1978 if r:
1985 if r:
1979 self.ui.status(_("updating bookmark %s\n") % k)
1986 self.ui.status(_("updating bookmark %s\n") % k)
1980 else:
1987 else:
1981 self.ui.warn(_('updating bookmark %s'
1988 self.ui.warn(_('updating bookmark %s'
1982 ' failed!\n') % k)
1989 ' failed!\n') % k)
1983
1990
1984 return ret
1991 return ret
1985
1992
1986 def changegroupinfo(self, nodes, source):
1993 def changegroupinfo(self, nodes, source):
1987 if self.ui.verbose or source == 'bundle':
1994 if self.ui.verbose or source == 'bundle':
1988 self.ui.status(_("%d changesets found\n") % len(nodes))
1995 self.ui.status(_("%d changesets found\n") % len(nodes))
1989 if self.ui.debugflag:
1996 if self.ui.debugflag:
1990 self.ui.debug("list of changesets:\n")
1997 self.ui.debug("list of changesets:\n")
1991 for node in nodes:
1998 for node in nodes:
1992 self.ui.debug("%s\n" % hex(node))
1999 self.ui.debug("%s\n" % hex(node))
1993
2000
1994 def changegroupsubset(self, bases, heads, source):
2001 def changegroupsubset(self, bases, heads, source):
1995 """Compute a changegroup consisting of all the nodes that are
2002 """Compute a changegroup consisting of all the nodes that are
1996 descendants of any of the bases and ancestors of any of the heads.
2003 descendants of any of the bases and ancestors of any of the heads.
1997 Return a chunkbuffer object whose read() method will return
2004 Return a chunkbuffer object whose read() method will return
1998 successive changegroup chunks.
2005 successive changegroup chunks.
1999
2006
2000 It is fairly complex as determining which filenodes and which
2007 It is fairly complex as determining which filenodes and which
2001 manifest nodes need to be included for the changeset to be complete
2008 manifest nodes need to be included for the changeset to be complete
2002 is non-trivial.
2009 is non-trivial.
2003
2010
2004 Another wrinkle is doing the reverse, figuring out which changeset in
2011 Another wrinkle is doing the reverse, figuring out which changeset in
2005 the changegroup a particular filenode or manifestnode belongs to.
2012 the changegroup a particular filenode or manifestnode belongs to.
2006 """
2013 """
2007 cl = self.changelog
2014 cl = self.changelog
2008 if not bases:
2015 if not bases:
2009 bases = [nullid]
2016 bases = [nullid]
2010 csets, bases, heads = cl.nodesbetween(bases, heads)
2017 csets, bases, heads = cl.nodesbetween(bases, heads)
2011 # We assume that all ancestors of bases are known
2018 # We assume that all ancestors of bases are known
2012 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2019 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2013 return self._changegroupsubset(common, csets, heads, source)
2020 return self._changegroupsubset(common, csets, heads, source)
2014
2021
2015 def getlocalbundle(self, source, outgoing):
2022 def getlocalbundle(self, source, outgoing):
2016 """Like getbundle, but taking a discovery.outgoing as an argument.
2023 """Like getbundle, but taking a discovery.outgoing as an argument.
2017
2024
2018 This is only implemented for local repos and reuses potentially
2025 This is only implemented for local repos and reuses potentially
2019 precomputed sets in outgoing."""
2026 precomputed sets in outgoing."""
2020 if not outgoing.missing:
2027 if not outgoing.missing:
2021 return None
2028 return None
2022 return self._changegroupsubset(outgoing.common,
2029 return self._changegroupsubset(outgoing.common,
2023 outgoing.missing,
2030 outgoing.missing,
2024 outgoing.missingheads,
2031 outgoing.missingheads,
2025 source)
2032 source)
2026
2033
2027 def getbundle(self, source, heads=None, common=None):
2034 def getbundle(self, source, heads=None, common=None):
2028 """Like changegroupsubset, but returns the set difference between the
2035 """Like changegroupsubset, but returns the set difference between the
2029 ancestors of heads and the ancestors common.
2036 ancestors of heads and the ancestors common.
2030
2037
2031 If heads is None, use the local heads. If common is None, use [nullid].
2038 If heads is None, use the local heads. If common is None, use [nullid].
2032
2039
2033 The nodes in common might not all be known locally due to the way the
2040 The nodes in common might not all be known locally due to the way the
2034 current discovery protocol works.
2041 current discovery protocol works.
2035 """
2042 """
2036 cl = self.changelog
2043 cl = self.changelog
2037 if common:
2044 if common:
2038 nm = cl.nodemap
2045 nm = cl.nodemap
2039 common = [n for n in common if n in nm]
2046 common = [n for n in common if n in nm]
2040 else:
2047 else:
2041 common = [nullid]
2048 common = [nullid]
2042 if not heads:
2049 if not heads:
2043 heads = cl.heads()
2050 heads = cl.heads()
2044 return self.getlocalbundle(source,
2051 return self.getlocalbundle(source,
2045 discovery.outgoing(cl, common, heads))
2052 discovery.outgoing(cl, common, heads))
2046
2053
2047 def _changegroupsubset(self, commonrevs, csets, heads, source):
2054 def _changegroupsubset(self, commonrevs, csets, heads, source):
2048
2055
2049 cl = self.changelog
2056 cl = self.changelog
2050 mf = self.manifest
2057 mf = self.manifest
2051 mfs = {} # needed manifests
2058 mfs = {} # needed manifests
2052 fnodes = {} # needed file nodes
2059 fnodes = {} # needed file nodes
2053 changedfiles = set()
2060 changedfiles = set()
2054 fstate = ['', {}]
2061 fstate = ['', {}]
2055 count = [0, 0]
2062 count = [0, 0]
2056
2063
2057 # can we go through the fast path ?
2064 # can we go through the fast path ?
2058 heads.sort()
2065 heads.sort()
2059 if heads == sorted(self.heads()):
2066 if heads == sorted(self.heads()):
2060 return self._changegroup(csets, source)
2067 return self._changegroup(csets, source)
2061
2068
2062 # slow path
2069 # slow path
2063 self.hook('preoutgoing', throw=True, source=source)
2070 self.hook('preoutgoing', throw=True, source=source)
2064 self.changegroupinfo(csets, source)
2071 self.changegroupinfo(csets, source)
2065
2072
2066 # filter any nodes that claim to be part of the known set
2073 # filter any nodes that claim to be part of the known set
2067 def prune(revlog, missing):
2074 def prune(revlog, missing):
2068 rr, rl = revlog.rev, revlog.linkrev
2075 rr, rl = revlog.rev, revlog.linkrev
2069 return [n for n in missing
2076 return [n for n in missing
2070 if rl(rr(n)) not in commonrevs]
2077 if rl(rr(n)) not in commonrevs]
2071
2078
2072 progress = self.ui.progress
2079 progress = self.ui.progress
2073 _bundling = _('bundling')
2080 _bundling = _('bundling')
2074 _changesets = _('changesets')
2081 _changesets = _('changesets')
2075 _manifests = _('manifests')
2082 _manifests = _('manifests')
2076 _files = _('files')
2083 _files = _('files')
2077
2084
2078 def lookup(revlog, x):
2085 def lookup(revlog, x):
2079 if revlog == cl:
2086 if revlog == cl:
2080 c = cl.read(x)
2087 c = cl.read(x)
2081 changedfiles.update(c[3])
2088 changedfiles.update(c[3])
2082 mfs.setdefault(c[0], x)
2089 mfs.setdefault(c[0], x)
2083 count[0] += 1
2090 count[0] += 1
2084 progress(_bundling, count[0],
2091 progress(_bundling, count[0],
2085 unit=_changesets, total=count[1])
2092 unit=_changesets, total=count[1])
2086 return x
2093 return x
2087 elif revlog == mf:
2094 elif revlog == mf:
2088 clnode = mfs[x]
2095 clnode = mfs[x]
2089 mdata = mf.readfast(x)
2096 mdata = mf.readfast(x)
2090 for f, n in mdata.iteritems():
2097 for f, n in mdata.iteritems():
2091 if f in changedfiles:
2098 if f in changedfiles:
2092 fnodes[f].setdefault(n, clnode)
2099 fnodes[f].setdefault(n, clnode)
2093 count[0] += 1
2100 count[0] += 1
2094 progress(_bundling, count[0],
2101 progress(_bundling, count[0],
2095 unit=_manifests, total=count[1])
2102 unit=_manifests, total=count[1])
2096 return clnode
2103 return clnode
2097 else:
2104 else:
2098 progress(_bundling, count[0], item=fstate[0],
2105 progress(_bundling, count[0], item=fstate[0],
2099 unit=_files, total=count[1])
2106 unit=_files, total=count[1])
2100 return fstate[1][x]
2107 return fstate[1][x]
2101
2108
2102 bundler = changegroup.bundle10(lookup)
2109 bundler = changegroup.bundle10(lookup)
2103 reorder = self.ui.config('bundle', 'reorder', 'auto')
2110 reorder = self.ui.config('bundle', 'reorder', 'auto')
2104 if reorder == 'auto':
2111 if reorder == 'auto':
2105 reorder = None
2112 reorder = None
2106 else:
2113 else:
2107 reorder = util.parsebool(reorder)
2114 reorder = util.parsebool(reorder)
2108
2115
2109 def gengroup():
2116 def gengroup():
2110 # Create a changenode group generator that will call our functions
2117 # Create a changenode group generator that will call our functions
2111 # back to lookup the owning changenode and collect information.
2118 # back to lookup the owning changenode and collect information.
2112 count[:] = [0, len(csets)]
2119 count[:] = [0, len(csets)]
2113 for chunk in cl.group(csets, bundler, reorder=reorder):
2120 for chunk in cl.group(csets, bundler, reorder=reorder):
2114 yield chunk
2121 yield chunk
2115 progress(_bundling, None)
2122 progress(_bundling, None)
2116
2123
2117 # Create a generator for the manifestnodes that calls our lookup
2124 # Create a generator for the manifestnodes that calls our lookup
2118 # and data collection functions back.
2125 # and data collection functions back.
2119 for f in changedfiles:
2126 for f in changedfiles:
2120 fnodes[f] = {}
2127 fnodes[f] = {}
2121 count[:] = [0, len(mfs)]
2128 count[:] = [0, len(mfs)]
2122 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2129 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2123 yield chunk
2130 yield chunk
2124 progress(_bundling, None)
2131 progress(_bundling, None)
2125
2132
2126 mfs.clear()
2133 mfs.clear()
2127
2134
2128 # Go through all our files in order sorted by name.
2135 # Go through all our files in order sorted by name.
2129 count[:] = [0, len(changedfiles)]
2136 count[:] = [0, len(changedfiles)]
2130 for fname in sorted(changedfiles):
2137 for fname in sorted(changedfiles):
2131 filerevlog = self.file(fname)
2138 filerevlog = self.file(fname)
2132 if not len(filerevlog):
2139 if not len(filerevlog):
2133 raise util.Abort(_("empty or missing revlog for %s")
2140 raise util.Abort(_("empty or missing revlog for %s")
2134 % fname)
2141 % fname)
2135 fstate[0] = fname
2142 fstate[0] = fname
2136 fstate[1] = fnodes.pop(fname, {})
2143 fstate[1] = fnodes.pop(fname, {})
2137
2144
2138 nodelist = prune(filerevlog, fstate[1])
2145 nodelist = prune(filerevlog, fstate[1])
2139 if nodelist:
2146 if nodelist:
2140 count[0] += 1
2147 count[0] += 1
2141 yield bundler.fileheader(fname)
2148 yield bundler.fileheader(fname)
2142 for chunk in filerevlog.group(nodelist, bundler, reorder):
2149 for chunk in filerevlog.group(nodelist, bundler, reorder):
2143 yield chunk
2150 yield chunk
2144
2151
2145 # Signal that no more groups are left.
2152 # Signal that no more groups are left.
2146 yield bundler.close()
2153 yield bundler.close()
2147 progress(_bundling, None)
2154 progress(_bundling, None)
2148
2155
2149 if csets:
2156 if csets:
2150 self.hook('outgoing', node=hex(csets[0]), source=source)
2157 self.hook('outgoing', node=hex(csets[0]), source=source)
2151
2158
2152 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2159 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2153
2160
2154 def changegroup(self, basenodes, source):
2161 def changegroup(self, basenodes, source):
2155 # to avoid a race we use changegroupsubset() (issue1320)
2162 # to avoid a race we use changegroupsubset() (issue1320)
2156 return self.changegroupsubset(basenodes, self.heads(), source)
2163 return self.changegroupsubset(basenodes, self.heads(), source)
2157
2164
2158 def _changegroup(self, nodes, source):
2165 def _changegroup(self, nodes, source):
2159 """Compute the changegroup of all nodes that we have that a recipient
2166 """Compute the changegroup of all nodes that we have that a recipient
2160 doesn't. Return a chunkbuffer object whose read() method will return
2167 doesn't. Return a chunkbuffer object whose read() method will return
2161 successive changegroup chunks.
2168 successive changegroup chunks.
2162
2169
2163 This is much easier than the previous function as we can assume that
2170 This is much easier than the previous function as we can assume that
2164 the recipient has any changenode we aren't sending them.
2171 the recipient has any changenode we aren't sending them.
2165
2172
2166 nodes is the set of nodes to send"""
2173 nodes is the set of nodes to send"""
2167
2174
2168 cl = self.changelog
2175 cl = self.changelog
2169 mf = self.manifest
2176 mf = self.manifest
2170 mfs = {}
2177 mfs = {}
2171 changedfiles = set()
2178 changedfiles = set()
2172 fstate = ['']
2179 fstate = ['']
2173 count = [0, 0]
2180 count = [0, 0]
2174
2181
2175 self.hook('preoutgoing', throw=True, source=source)
2182 self.hook('preoutgoing', throw=True, source=source)
2176 self.changegroupinfo(nodes, source)
2183 self.changegroupinfo(nodes, source)
2177
2184
2178 revset = set([cl.rev(n) for n in nodes])
2185 revset = set([cl.rev(n) for n in nodes])
2179
2186
2180 def gennodelst(log):
2187 def gennodelst(log):
2181 ln, llr = log.node, log.linkrev
2188 ln, llr = log.node, log.linkrev
2182 return [ln(r) for r in log if llr(r) in revset]
2189 return [ln(r) for r in log if llr(r) in revset]
2183
2190
2184 progress = self.ui.progress
2191 progress = self.ui.progress
2185 _bundling = _('bundling')
2192 _bundling = _('bundling')
2186 _changesets = _('changesets')
2193 _changesets = _('changesets')
2187 _manifests = _('manifests')
2194 _manifests = _('manifests')
2188 _files = _('files')
2195 _files = _('files')
2189
2196
2190 def lookup(revlog, x):
2197 def lookup(revlog, x):
2191 if revlog == cl:
2198 if revlog == cl:
2192 c = cl.read(x)
2199 c = cl.read(x)
2193 changedfiles.update(c[3])
2200 changedfiles.update(c[3])
2194 mfs.setdefault(c[0], x)
2201 mfs.setdefault(c[0], x)
2195 count[0] += 1
2202 count[0] += 1
2196 progress(_bundling, count[0],
2203 progress(_bundling, count[0],
2197 unit=_changesets, total=count[1])
2204 unit=_changesets, total=count[1])
2198 return x
2205 return x
2199 elif revlog == mf:
2206 elif revlog == mf:
2200 count[0] += 1
2207 count[0] += 1
2201 progress(_bundling, count[0],
2208 progress(_bundling, count[0],
2202 unit=_manifests, total=count[1])
2209 unit=_manifests, total=count[1])
2203 return cl.node(revlog.linkrev(revlog.rev(x)))
2210 return cl.node(revlog.linkrev(revlog.rev(x)))
2204 else:
2211 else:
2205 progress(_bundling, count[0], item=fstate[0],
2212 progress(_bundling, count[0], item=fstate[0],
2206 total=count[1], unit=_files)
2213 total=count[1], unit=_files)
2207 return cl.node(revlog.linkrev(revlog.rev(x)))
2214 return cl.node(revlog.linkrev(revlog.rev(x)))
2208
2215
2209 bundler = changegroup.bundle10(lookup)
2216 bundler = changegroup.bundle10(lookup)
2210 reorder = self.ui.config('bundle', 'reorder', 'auto')
2217 reorder = self.ui.config('bundle', 'reorder', 'auto')
2211 if reorder == 'auto':
2218 if reorder == 'auto':
2212 reorder = None
2219 reorder = None
2213 else:
2220 else:
2214 reorder = util.parsebool(reorder)
2221 reorder = util.parsebool(reorder)
2215
2222
2216 def gengroup():
2223 def gengroup():
2217 '''yield a sequence of changegroup chunks (strings)'''
2224 '''yield a sequence of changegroup chunks (strings)'''
2218 # construct a list of all changed files
2225 # construct a list of all changed files
2219
2226
2220 count[:] = [0, len(nodes)]
2227 count[:] = [0, len(nodes)]
2221 for chunk in cl.group(nodes, bundler, reorder=reorder):
2228 for chunk in cl.group(nodes, bundler, reorder=reorder):
2222 yield chunk
2229 yield chunk
2223 progress(_bundling, None)
2230 progress(_bundling, None)
2224
2231
2225 count[:] = [0, len(mfs)]
2232 count[:] = [0, len(mfs)]
2226 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2233 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2227 yield chunk
2234 yield chunk
2228 progress(_bundling, None)
2235 progress(_bundling, None)
2229
2236
2230 count[:] = [0, len(changedfiles)]
2237 count[:] = [0, len(changedfiles)]
2231 for fname in sorted(changedfiles):
2238 for fname in sorted(changedfiles):
2232 filerevlog = self.file(fname)
2239 filerevlog = self.file(fname)
2233 if not len(filerevlog):
2240 if not len(filerevlog):
2234 raise util.Abort(_("empty or missing revlog for %s")
2241 raise util.Abort(_("empty or missing revlog for %s")
2235 % fname)
2242 % fname)
2236 fstate[0] = fname
2243 fstate[0] = fname
2237 nodelist = gennodelst(filerevlog)
2244 nodelist = gennodelst(filerevlog)
2238 if nodelist:
2245 if nodelist:
2239 count[0] += 1
2246 count[0] += 1
2240 yield bundler.fileheader(fname)
2247 yield bundler.fileheader(fname)
2241 for chunk in filerevlog.group(nodelist, bundler, reorder):
2248 for chunk in filerevlog.group(nodelist, bundler, reorder):
2242 yield chunk
2249 yield chunk
2243 yield bundler.close()
2250 yield bundler.close()
2244 progress(_bundling, None)
2251 progress(_bundling, None)
2245
2252
2246 if nodes:
2253 if nodes:
2247 self.hook('outgoing', node=hex(nodes[0]), source=source)
2254 self.hook('outgoing', node=hex(nodes[0]), source=source)
2248
2255
2249 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2256 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2250
2257
2251 def addchangegroup(self, source, srctype, url, emptyok=False):
2258 def addchangegroup(self, source, srctype, url, emptyok=False):
2252 """Add the changegroup returned by source.read() to this repo.
2259 """Add the changegroup returned by source.read() to this repo.
2253 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2260 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2254 the URL of the repo where this changegroup is coming from.
2261 the URL of the repo where this changegroup is coming from.
2255
2262
2256 Return an integer summarizing the change to this repo:
2263 Return an integer summarizing the change to this repo:
2257 - nothing changed or no source: 0
2264 - nothing changed or no source: 0
2258 - more heads than before: 1+added heads (2..n)
2265 - more heads than before: 1+added heads (2..n)
2259 - fewer heads than before: -1-removed heads (-2..-n)
2266 - fewer heads than before: -1-removed heads (-2..-n)
2260 - number of heads stays the same: 1
2267 - number of heads stays the same: 1
2261 """
2268 """
2262 def csmap(x):
2269 def csmap(x):
2263 self.ui.debug("add changeset %s\n" % short(x))
2270 self.ui.debug("add changeset %s\n" % short(x))
2264 return len(cl)
2271 return len(cl)
2265
2272
2266 def revmap(x):
2273 def revmap(x):
2267 return cl.rev(x)
2274 return cl.rev(x)
2268
2275
2269 if not source:
2276 if not source:
2270 return 0
2277 return 0
2271
2278
2272 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2279 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2273
2280
2274 changesets = files = revisions = 0
2281 changesets = files = revisions = 0
2275 efiles = set()
2282 efiles = set()
2276
2283
2277 # write changelog data to temp files so concurrent readers will not see
2284 # write changelog data to temp files so concurrent readers will not see
2278 # inconsistent view
2285 # inconsistent view
2279 cl = self.changelog
2286 cl = self.changelog
2280 cl.delayupdate()
2287 cl.delayupdate()
2281 oldheads = cl.heads()
2288 oldheads = cl.heads()
2282
2289
2283 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2290 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2284 try:
2291 try:
2285 trp = weakref.proxy(tr)
2292 trp = weakref.proxy(tr)
2286 # pull off the changeset group
2293 # pull off the changeset group
2287 self.ui.status(_("adding changesets\n"))
2294 self.ui.status(_("adding changesets\n"))
2288 clstart = len(cl)
2295 clstart = len(cl)
2289 class prog(object):
2296 class prog(object):
2290 step = _('changesets')
2297 step = _('changesets')
2291 count = 1
2298 count = 1
2292 ui = self.ui
2299 ui = self.ui
2293 total = None
2300 total = None
2294 def __call__(self):
2301 def __call__(self):
2295 self.ui.progress(self.step, self.count, unit=_('chunks'),
2302 self.ui.progress(self.step, self.count, unit=_('chunks'),
2296 total=self.total)
2303 total=self.total)
2297 self.count += 1
2304 self.count += 1
2298 pr = prog()
2305 pr = prog()
2299 source.callback = pr
2306 source.callback = pr
2300
2307
2301 source.changelogheader()
2308 source.changelogheader()
2302 srccontent = cl.addgroup(source, csmap, trp)
2309 srccontent = cl.addgroup(source, csmap, trp)
2303 if not (srccontent or emptyok):
2310 if not (srccontent or emptyok):
2304 raise util.Abort(_("received changelog group is empty"))
2311 raise util.Abort(_("received changelog group is empty"))
2305 clend = len(cl)
2312 clend = len(cl)
2306 changesets = clend - clstart
2313 changesets = clend - clstart
2307 for c in xrange(clstart, clend):
2314 for c in xrange(clstart, clend):
2308 efiles.update(self[c].files())
2315 efiles.update(self[c].files())
2309 efiles = len(efiles)
2316 efiles = len(efiles)
2310 self.ui.progress(_('changesets'), None)
2317 self.ui.progress(_('changesets'), None)
2311
2318
2312 # pull off the manifest group
2319 # pull off the manifest group
2313 self.ui.status(_("adding manifests\n"))
2320 self.ui.status(_("adding manifests\n"))
2314 pr.step = _('manifests')
2321 pr.step = _('manifests')
2315 pr.count = 1
2322 pr.count = 1
2316 pr.total = changesets # manifests <= changesets
2323 pr.total = changesets # manifests <= changesets
2317 # no need to check for empty manifest group here:
2324 # no need to check for empty manifest group here:
2318 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2325 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2319 # no new manifest will be created and the manifest group will
2326 # no new manifest will be created and the manifest group will
2320 # be empty during the pull
2327 # be empty during the pull
2321 source.manifestheader()
2328 source.manifestheader()
2322 self.manifest.addgroup(source, revmap, trp)
2329 self.manifest.addgroup(source, revmap, trp)
2323 self.ui.progress(_('manifests'), None)
2330 self.ui.progress(_('manifests'), None)
2324
2331
2325 needfiles = {}
2332 needfiles = {}
2326 if self.ui.configbool('server', 'validate', default=False):
2333 if self.ui.configbool('server', 'validate', default=False):
2327 # validate incoming csets have their manifests
2334 # validate incoming csets have their manifests
2328 for cset in xrange(clstart, clend):
2335 for cset in xrange(clstart, clend):
2329 mfest = self.changelog.read(self.changelog.node(cset))[0]
2336 mfest = self.changelog.read(self.changelog.node(cset))[0]
2330 mfest = self.manifest.readdelta(mfest)
2337 mfest = self.manifest.readdelta(mfest)
2331 # store file nodes we must see
2338 # store file nodes we must see
2332 for f, n in mfest.iteritems():
2339 for f, n in mfest.iteritems():
2333 needfiles.setdefault(f, set()).add(n)
2340 needfiles.setdefault(f, set()).add(n)
2334
2341
2335 # process the files
2342 # process the files
2336 self.ui.status(_("adding file changes\n"))
2343 self.ui.status(_("adding file changes\n"))
2337 pr.step = _('files')
2344 pr.step = _('files')
2338 pr.count = 1
2345 pr.count = 1
2339 pr.total = efiles
2346 pr.total = efiles
2340 source.callback = None
2347 source.callback = None
2341
2348
2342 while True:
2349 while True:
2343 chunkdata = source.filelogheader()
2350 chunkdata = source.filelogheader()
2344 if not chunkdata:
2351 if not chunkdata:
2345 break
2352 break
2346 f = chunkdata["filename"]
2353 f = chunkdata["filename"]
2347 self.ui.debug("adding %s revisions\n" % f)
2354 self.ui.debug("adding %s revisions\n" % f)
2348 pr()
2355 pr()
2349 fl = self.file(f)
2356 fl = self.file(f)
2350 o = len(fl)
2357 o = len(fl)
2351 if not fl.addgroup(source, revmap, trp):
2358 if not fl.addgroup(source, revmap, trp):
2352 raise util.Abort(_("received file revlog group is empty"))
2359 raise util.Abort(_("received file revlog group is empty"))
2353 revisions += len(fl) - o
2360 revisions += len(fl) - o
2354 files += 1
2361 files += 1
2355 if f in needfiles:
2362 if f in needfiles:
2356 needs = needfiles[f]
2363 needs = needfiles[f]
2357 for new in xrange(o, len(fl)):
2364 for new in xrange(o, len(fl)):
2358 n = fl.node(new)
2365 n = fl.node(new)
2359 if n in needs:
2366 if n in needs:
2360 needs.remove(n)
2367 needs.remove(n)
2361 if not needs:
2368 if not needs:
2362 del needfiles[f]
2369 del needfiles[f]
2363 self.ui.progress(_('files'), None)
2370 self.ui.progress(_('files'), None)
2364
2371
2365 for f, needs in needfiles.iteritems():
2372 for f, needs in needfiles.iteritems():
2366 fl = self.file(f)
2373 fl = self.file(f)
2367 for n in needs:
2374 for n in needs:
2368 try:
2375 try:
2369 fl.rev(n)
2376 fl.rev(n)
2370 except error.LookupError:
2377 except error.LookupError:
2371 raise util.Abort(
2378 raise util.Abort(
2372 _('missing file data for %s:%s - run hg verify') %
2379 _('missing file data for %s:%s - run hg verify') %
2373 (f, hex(n)))
2380 (f, hex(n)))
2374
2381
2375 dh = 0
2382 dh = 0
2376 if oldheads:
2383 if oldheads:
2377 heads = cl.heads()
2384 heads = cl.heads()
2378 dh = len(heads) - len(oldheads)
2385 dh = len(heads) - len(oldheads)
2379 for h in heads:
2386 for h in heads:
2380 if h not in oldheads and self[h].closesbranch():
2387 if h not in oldheads and self[h].closesbranch():
2381 dh -= 1
2388 dh -= 1
2382 htext = ""
2389 htext = ""
2383 if dh:
2390 if dh:
2384 htext = _(" (%+d heads)") % dh
2391 htext = _(" (%+d heads)") % dh
2385
2392
2386 self.ui.status(_("added %d changesets"
2393 self.ui.status(_("added %d changesets"
2387 " with %d changes to %d files%s\n")
2394 " with %d changes to %d files%s\n")
2388 % (changesets, revisions, files, htext))
2395 % (changesets, revisions, files, htext))
2389
2396
2390 if changesets > 0:
2397 if changesets > 0:
2391 p = lambda: cl.writepending() and self.root or ""
2398 p = lambda: cl.writepending() and self.root or ""
2392 self.hook('pretxnchangegroup', throw=True,
2399 self.hook('pretxnchangegroup', throw=True,
2393 node=hex(cl.node(clstart)), source=srctype,
2400 node=hex(cl.node(clstart)), source=srctype,
2394 url=url, pending=p)
2401 url=url, pending=p)
2395
2402
2396 added = [cl.node(r) for r in xrange(clstart, clend)]
2403 added = [cl.node(r) for r in xrange(clstart, clend)]
2397 publishing = self.ui.configbool('phases', 'publish', True)
2404 publishing = self.ui.configbool('phases', 'publish', True)
2398 if srctype == 'push':
2405 if srctype == 'push':
2399 # Old server can not push the boundary themself.
2406 # Old server can not push the boundary themself.
2400 # New server won't push the boundary if changeset already
2407 # New server won't push the boundary if changeset already
2401 # existed locally as secrete
2408 # existed locally as secrete
2402 #
2409 #
2403 # We should not use added here but the list of all change in
2410 # We should not use added here but the list of all change in
2404 # the bundle
2411 # the bundle
2405 if publishing:
2412 if publishing:
2406 phases.advanceboundary(self, phases.public, srccontent)
2413 phases.advanceboundary(self, phases.public, srccontent)
2407 else:
2414 else:
2408 phases.advanceboundary(self, phases.draft, srccontent)
2415 phases.advanceboundary(self, phases.draft, srccontent)
2409 phases.retractboundary(self, phases.draft, added)
2416 phases.retractboundary(self, phases.draft, added)
2410 elif srctype != 'strip':
2417 elif srctype != 'strip':
2411 # publishing only alter behavior during push
2418 # publishing only alter behavior during push
2412 #
2419 #
2413 # strip should not touch boundary at all
2420 # strip should not touch boundary at all
2414 phases.retractboundary(self, phases.draft, added)
2421 phases.retractboundary(self, phases.draft, added)
2415
2422
2416 # make changelog see real files again
2423 # make changelog see real files again
2417 cl.finalize(trp)
2424 cl.finalize(trp)
2418
2425
2419 tr.close()
2426 tr.close()
2420
2427
2421 if changesets > 0:
2428 if changesets > 0:
2422 def runhooks():
2429 def runhooks():
2423 # forcefully update the on-disk branch cache
2430 # forcefully update the on-disk branch cache
2424 self.ui.debug("updating the branch cache\n")
2431 self.ui.debug("updating the branch cache\n")
2425 self.updatebranchcache()
2432 self.updatebranchcache()
2426 self.hook("changegroup", node=hex(cl.node(clstart)),
2433 self.hook("changegroup", node=hex(cl.node(clstart)),
2427 source=srctype, url=url)
2434 source=srctype, url=url)
2428
2435
2429 for n in added:
2436 for n in added:
2430 self.hook("incoming", node=hex(n), source=srctype,
2437 self.hook("incoming", node=hex(n), source=srctype,
2431 url=url)
2438 url=url)
2432 self._afterlock(runhooks)
2439 self._afterlock(runhooks)
2433
2440
2434 finally:
2441 finally:
2435 tr.release()
2442 tr.release()
2436 # never return 0 here:
2443 # never return 0 here:
2437 if dh < 0:
2444 if dh < 0:
2438 return dh - 1
2445 return dh - 1
2439 else:
2446 else:
2440 return dh + 1
2447 return dh + 1
2441
2448
2442 def stream_in(self, remote, requirements):
2449 def stream_in(self, remote, requirements):
2443 lock = self.lock()
2450 lock = self.lock()
2444 try:
2451 try:
2445 fp = remote.stream_out()
2452 fp = remote.stream_out()
2446 l = fp.readline()
2453 l = fp.readline()
2447 try:
2454 try:
2448 resp = int(l)
2455 resp = int(l)
2449 except ValueError:
2456 except ValueError:
2450 raise error.ResponseError(
2457 raise error.ResponseError(
2451 _('unexpected response from remote server:'), l)
2458 _('unexpected response from remote server:'), l)
2452 if resp == 1:
2459 if resp == 1:
2453 raise util.Abort(_('operation forbidden by server'))
2460 raise util.Abort(_('operation forbidden by server'))
2454 elif resp == 2:
2461 elif resp == 2:
2455 raise util.Abort(_('locking the remote repository failed'))
2462 raise util.Abort(_('locking the remote repository failed'))
2456 elif resp != 0:
2463 elif resp != 0:
2457 raise util.Abort(_('the server sent an unknown error code'))
2464 raise util.Abort(_('the server sent an unknown error code'))
2458 self.ui.status(_('streaming all changes\n'))
2465 self.ui.status(_('streaming all changes\n'))
2459 l = fp.readline()
2466 l = fp.readline()
2460 try:
2467 try:
2461 total_files, total_bytes = map(int, l.split(' ', 1))
2468 total_files, total_bytes = map(int, l.split(' ', 1))
2462 except (ValueError, TypeError):
2469 except (ValueError, TypeError):
2463 raise error.ResponseError(
2470 raise error.ResponseError(
2464 _('unexpected response from remote server:'), l)
2471 _('unexpected response from remote server:'), l)
2465 self.ui.status(_('%d files to transfer, %s of data\n') %
2472 self.ui.status(_('%d files to transfer, %s of data\n') %
2466 (total_files, util.bytecount(total_bytes)))
2473 (total_files, util.bytecount(total_bytes)))
2467 handled_bytes = 0
2474 handled_bytes = 0
2468 self.ui.progress(_('clone'), 0, total=total_bytes)
2475 self.ui.progress(_('clone'), 0, total=total_bytes)
2469 start = time.time()
2476 start = time.time()
2470 for i in xrange(total_files):
2477 for i in xrange(total_files):
2471 # XXX doesn't support '\n' or '\r' in filenames
2478 # XXX doesn't support '\n' or '\r' in filenames
2472 l = fp.readline()
2479 l = fp.readline()
2473 try:
2480 try:
2474 name, size = l.split('\0', 1)
2481 name, size = l.split('\0', 1)
2475 size = int(size)
2482 size = int(size)
2476 except (ValueError, TypeError):
2483 except (ValueError, TypeError):
2477 raise error.ResponseError(
2484 raise error.ResponseError(
2478 _('unexpected response from remote server:'), l)
2485 _('unexpected response from remote server:'), l)
2479 if self.ui.debugflag:
2486 if self.ui.debugflag:
2480 self.ui.debug('adding %s (%s)\n' %
2487 self.ui.debug('adding %s (%s)\n' %
2481 (name, util.bytecount(size)))
2488 (name, util.bytecount(size)))
2482 # for backwards compat, name was partially encoded
2489 # for backwards compat, name was partially encoded
2483 ofp = self.sopener(store.decodedir(name), 'w')
2490 ofp = self.sopener(store.decodedir(name), 'w')
2484 for chunk in util.filechunkiter(fp, limit=size):
2491 for chunk in util.filechunkiter(fp, limit=size):
2485 handled_bytes += len(chunk)
2492 handled_bytes += len(chunk)
2486 self.ui.progress(_('clone'), handled_bytes,
2493 self.ui.progress(_('clone'), handled_bytes,
2487 total=total_bytes)
2494 total=total_bytes)
2488 ofp.write(chunk)
2495 ofp.write(chunk)
2489 ofp.close()
2496 ofp.close()
2490 elapsed = time.time() - start
2497 elapsed = time.time() - start
2491 if elapsed <= 0:
2498 if elapsed <= 0:
2492 elapsed = 0.001
2499 elapsed = 0.001
2493 self.ui.progress(_('clone'), None)
2500 self.ui.progress(_('clone'), None)
2494 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2501 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2495 (util.bytecount(total_bytes), elapsed,
2502 (util.bytecount(total_bytes), elapsed,
2496 util.bytecount(total_bytes / elapsed)))
2503 util.bytecount(total_bytes / elapsed)))
2497
2504
2498 # new requirements = old non-format requirements +
2505 # new requirements = old non-format requirements +
2499 # new format-related
2506 # new format-related
2500 # requirements from the streamed-in repository
2507 # requirements from the streamed-in repository
2501 requirements.update(set(self.requirements) - self.supportedformats)
2508 requirements.update(set(self.requirements) - self.supportedformats)
2502 self._applyrequirements(requirements)
2509 self._applyrequirements(requirements)
2503 self._writerequirements()
2510 self._writerequirements()
2504
2511
2505 self.invalidate()
2512 self.invalidate()
2506 return len(self.heads()) + 1
2513 return len(self.heads()) + 1
2507 finally:
2514 finally:
2508 lock.release()
2515 lock.release()
2509
2516
2510 def clone(self, remote, heads=[], stream=False):
2517 def clone(self, remote, heads=[], stream=False):
2511 '''clone remote repository.
2518 '''clone remote repository.
2512
2519
2513 keyword arguments:
2520 keyword arguments:
2514 heads: list of revs to clone (forces use of pull)
2521 heads: list of revs to clone (forces use of pull)
2515 stream: use streaming clone if possible'''
2522 stream: use streaming clone if possible'''
2516
2523
2517 # now, all clients that can request uncompressed clones can
2524 # now, all clients that can request uncompressed clones can
2518 # read repo formats supported by all servers that can serve
2525 # read repo formats supported by all servers that can serve
2519 # them.
2526 # them.
2520
2527
2521 # if revlog format changes, client will have to check version
2528 # if revlog format changes, client will have to check version
2522 # and format flags on "stream" capability, and use
2529 # and format flags on "stream" capability, and use
2523 # uncompressed only if compatible.
2530 # uncompressed only if compatible.
2524
2531
2525 if not stream:
2532 if not stream:
2526 # if the server explicitely prefer to stream (for fast LANs)
2533 # if the server explicitely prefer to stream (for fast LANs)
2527 stream = remote.capable('stream-preferred')
2534 stream = remote.capable('stream-preferred')
2528
2535
2529 if stream and not heads:
2536 if stream and not heads:
2530 # 'stream' means remote revlog format is revlogv1 only
2537 # 'stream' means remote revlog format is revlogv1 only
2531 if remote.capable('stream'):
2538 if remote.capable('stream'):
2532 return self.stream_in(remote, set(('revlogv1',)))
2539 return self.stream_in(remote, set(('revlogv1',)))
2533 # otherwise, 'streamreqs' contains the remote revlog format
2540 # otherwise, 'streamreqs' contains the remote revlog format
2534 streamreqs = remote.capable('streamreqs')
2541 streamreqs = remote.capable('streamreqs')
2535 if streamreqs:
2542 if streamreqs:
2536 streamreqs = set(streamreqs.split(','))
2543 streamreqs = set(streamreqs.split(','))
2537 # if we support it, stream in and adjust our requirements
2544 # if we support it, stream in and adjust our requirements
2538 if not streamreqs - self.supportedformats:
2545 if not streamreqs - self.supportedformats:
2539 return self.stream_in(remote, streamreqs)
2546 return self.stream_in(remote, streamreqs)
2540 return self.pull(remote, heads)
2547 return self.pull(remote, heads)
2541
2548
2542 def pushkey(self, namespace, key, old, new):
2549 def pushkey(self, namespace, key, old, new):
2543 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2550 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2544 old=old, new=new)
2551 old=old, new=new)
2545 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2552 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2546 ret = pushkey.push(self, namespace, key, old, new)
2553 ret = pushkey.push(self, namespace, key, old, new)
2547 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2554 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2548 ret=ret)
2555 ret=ret)
2549 return ret
2556 return ret
2550
2557
2551 def listkeys(self, namespace):
2558 def listkeys(self, namespace):
2552 self.hook('prelistkeys', throw=True, namespace=namespace)
2559 self.hook('prelistkeys', throw=True, namespace=namespace)
2553 self.ui.debug('listing keys for "%s"\n' % namespace)
2560 self.ui.debug('listing keys for "%s"\n' % namespace)
2554 values = pushkey.list(self, namespace)
2561 values = pushkey.list(self, namespace)
2555 self.hook('listkeys', namespace=namespace, values=values)
2562 self.hook('listkeys', namespace=namespace, values=values)
2556 return values
2563 return values
2557
2564
2558 def debugwireargs(self, one, two, three=None, four=None, five=None):
2565 def debugwireargs(self, one, two, three=None, four=None, five=None):
2559 '''used to test argument passing over the wire'''
2566 '''used to test argument passing over the wire'''
2560 return "%s %s %s %s %s" % (one, two, three, four, five)
2567 return "%s %s %s %s %s" % (one, two, three, four, five)
2561
2568
2562 def savecommitmessage(self, text):
2569 def savecommitmessage(self, text):
2563 fp = self.opener('last-message.txt', 'wb')
2570 fp = self.opener('last-message.txt', 'wb')
2564 try:
2571 try:
2565 fp.write(text)
2572 fp.write(text)
2566 finally:
2573 finally:
2567 fp.close()
2574 fp.close()
2568 return self.pathto(fp.name[len(self.root)+1:])
2575 return self.pathto(fp.name[len(self.root)+1:])
2569
2576
2570 # used to avoid circular references so destructors work
2577 # used to avoid circular references so destructors work
2571 def aftertrans(files):
2578 def aftertrans(files):
2572 renamefiles = [tuple(t) for t in files]
2579 renamefiles = [tuple(t) for t in files]
2573 def a():
2580 def a():
2574 for src, dest in renamefiles:
2581 for src, dest in renamefiles:
2575 try:
2582 try:
2576 util.rename(src, dest)
2583 util.rename(src, dest)
2577 except OSError: # journal file does not yet exist
2584 except OSError: # journal file does not yet exist
2578 pass
2585 pass
2579 return a
2586 return a
2580
2587
2581 def undoname(fn):
2588 def undoname(fn):
2582 base, name = os.path.split(fn)
2589 base, name = os.path.split(fn)
2583 assert name.startswith('journal')
2590 assert name.startswith('journal')
2584 return os.path.join(base, name.replace('journal', 'undo', 1))
2591 return os.path.join(base, name.replace('journal', 'undo', 1))
2585
2592
2586 def instance(ui, path, create):
2593 def instance(ui, path, create):
2587 return localrepository(ui, util.urllocalpath(path), create)
2594 return localrepository(ui, util.urllocalpath(path), create)
2588
2595
2589 def islocal(path):
2596 def islocal(path):
2590 return True
2597 return True
@@ -1,301 +1,322
1 # obsolete.py - obsolete markers handling
1 # obsolete.py - obsolete markers handling
2 #
2 #
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Obsolete markers handling
9 """Obsolete markers handling
10
10
11 An obsolete marker maps an old changeset to a list of new
11 An obsolete marker maps an old changeset to a list of new
12 changesets. If the list of new changesets is empty, the old changeset
12 changesets. If the list of new changesets is empty, the old changeset
13 is said to be "killed". Otherwise, the old changeset is being
13 is said to be "killed". Otherwise, the old changeset is being
14 "replaced" by the new changesets.
14 "replaced" by the new changesets.
15
15
16 Obsolete markers can be used to record and distribute changeset graph
16 Obsolete markers can be used to record and distribute changeset graph
17 transformations performed by history rewriting operations, and help
17 transformations performed by history rewriting operations, and help
18 building new tools to reconciliate conflicting rewriting actions. To
18 building new tools to reconciliate conflicting rewriting actions. To
19 facilitate conflicts resolution, markers include various annotations
19 facilitate conflicts resolution, markers include various annotations
20 besides old and news changeset identifiers, such as creation date or
20 besides old and news changeset identifiers, such as creation date or
21 author name.
21 author name.
22
22
23
23
24 Format
24 Format
25 ------
25 ------
26
26
27 Markers are stored in an append-only file stored in
27 Markers are stored in an append-only file stored in
28 '.hg/store/obsstore'.
28 '.hg/store/obsstore'.
29
29
30 The file starts with a version header:
30 The file starts with a version header:
31
31
32 - 1 unsigned byte: version number, starting at zero.
32 - 1 unsigned byte: version number, starting at zero.
33
33
34
34
35 The header is followed by the markers. Each marker is made of:
35 The header is followed by the markers. Each marker is made of:
36
36
37 - 1 unsigned byte: number of new changesets "R", could be zero.
37 - 1 unsigned byte: number of new changesets "R", could be zero.
38
38
39 - 1 unsigned 32-bits integer: metadata size "M" in bytes.
39 - 1 unsigned 32-bits integer: metadata size "M" in bytes.
40
40
41 - 1 byte: a bit field. It is reserved for flags used in obsolete
41 - 1 byte: a bit field. It is reserved for flags used in obsolete
42 markers common operations, to avoid repeated decoding of metadata
42 markers common operations, to avoid repeated decoding of metadata
43 entries.
43 entries.
44
44
45 - 20 bytes: obsoleted changeset identifier.
45 - 20 bytes: obsoleted changeset identifier.
46
46
47 - N*20 bytes: new changesets identifiers.
47 - N*20 bytes: new changesets identifiers.
48
48
49 - M bytes: metadata as a sequence of nul-terminated strings. Each
49 - M bytes: metadata as a sequence of nul-terminated strings. Each
50 string contains a key and a value, separated by a color ':', without
50 string contains a key and a value, separated by a color ':', without
51 additional encoding. Keys cannot contain '\0' or ':' and values
51 additional encoding. Keys cannot contain '\0' or ':' and values
52 cannot contain '\0'.
52 cannot contain '\0'.
53 """
53 """
54 import struct
54 import struct
55 from mercurial import util, base85
55 from mercurial import util, base85
56 from i18n import _
56 from i18n import _
57
57
58 _pack = struct.pack
58 _pack = struct.pack
59 _unpack = struct.unpack
59 _unpack = struct.unpack
60
60
61
61
62
62
63 # data used for parsing and writing
63 # data used for parsing and writing
64 _fmversion = 0
64 _fmversion = 0
65 _fmfixed = '>BIB20s'
65 _fmfixed = '>BIB20s'
66 _fmnode = '20s'
66 _fmnode = '20s'
67 _fmfsize = struct.calcsize(_fmfixed)
67 _fmfsize = struct.calcsize(_fmfixed)
68 _fnodesize = struct.calcsize(_fmnode)
68 _fnodesize = struct.calcsize(_fmnode)
69
69
70 def _readmarkers(data):
70 def _readmarkers(data):
71 """Read and enumerate markers from raw data"""
71 """Read and enumerate markers from raw data"""
72 off = 0
72 off = 0
73 diskversion = _unpack('>B', data[off:off + 1])[0]
73 diskversion = _unpack('>B', data[off:off + 1])[0]
74 off += 1
74 off += 1
75 if diskversion != _fmversion:
75 if diskversion != _fmversion:
76 raise util.Abort(_('parsing obsolete marker: unknown version %r')
76 raise util.Abort(_('parsing obsolete marker: unknown version %r')
77 % diskversion)
77 % diskversion)
78
78
79 # Loop on markers
79 # Loop on markers
80 l = len(data)
80 l = len(data)
81 while off + _fmfsize <= l:
81 while off + _fmfsize <= l:
82 # read fixed part
82 # read fixed part
83 cur = data[off:off + _fmfsize]
83 cur = data[off:off + _fmfsize]
84 off += _fmfsize
84 off += _fmfsize
85 nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur)
85 nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur)
86 # read replacement
86 # read replacement
87 sucs = ()
87 sucs = ()
88 if nbsuc:
88 if nbsuc:
89 s = (_fnodesize * nbsuc)
89 s = (_fnodesize * nbsuc)
90 cur = data[off:off + s]
90 cur = data[off:off + s]
91 sucs = _unpack(_fmnode * nbsuc, cur)
91 sucs = _unpack(_fmnode * nbsuc, cur)
92 off += s
92 off += s
93 # read metadata
93 # read metadata
94 # (metadata will be decoded on demand)
94 # (metadata will be decoded on demand)
95 metadata = data[off:off + mdsize]
95 metadata = data[off:off + mdsize]
96 if len(metadata) != mdsize:
96 if len(metadata) != mdsize:
97 raise util.Abort(_('parsing obsolete marker: metadata is too '
97 raise util.Abort(_('parsing obsolete marker: metadata is too '
98 'short, %d bytes expected, got %d')
98 'short, %d bytes expected, got %d')
99 % (mdsize, len(metadata)))
99 % (mdsize, len(metadata)))
100 off += mdsize
100 off += mdsize
101 yield (pre, sucs, flags, metadata)
101 yield (pre, sucs, flags, metadata)
102
102
103 def encodemeta(meta):
103 def encodemeta(meta):
104 """Return encoded metadata string to string mapping.
104 """Return encoded metadata string to string mapping.
105
105
106 Assume no ':' in key and no '\0' in both key and value."""
106 Assume no ':' in key and no '\0' in both key and value."""
107 for key, value in meta.iteritems():
107 for key, value in meta.iteritems():
108 if ':' in key or '\0' in key:
108 if ':' in key or '\0' in key:
109 raise ValueError("':' and '\0' are forbidden in metadata key'")
109 raise ValueError("':' and '\0' are forbidden in metadata key'")
110 if '\0' in value:
110 if '\0' in value:
111 raise ValueError("':' are forbidden in metadata value'")
111 raise ValueError("':' are forbidden in metadata value'")
112 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
112 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
113
113
114 def decodemeta(data):
114 def decodemeta(data):
115 """Return string to string dictionary from encoded version."""
115 """Return string to string dictionary from encoded version."""
116 d = {}
116 d = {}
117 for l in data.split('\0'):
117 for l in data.split('\0'):
118 if l:
118 if l:
119 key, value = l.split(':')
119 key, value = l.split(':')
120 d[key] = value
120 d[key] = value
121 return d
121 return d
122
122
123 class marker(object):
123 class marker(object):
124 """Wrap obsolete marker raw data"""
124 """Wrap obsolete marker raw data"""
125
125
126 def __init__(self, repo, data):
126 def __init__(self, repo, data):
127 # the repo argument will be used to create changectx in later version
127 # the repo argument will be used to create changectx in later version
128 self._repo = repo
128 self._repo = repo
129 self._data = data
129 self._data = data
130 self._decodedmeta = None
130 self._decodedmeta = None
131
131
132 def precnode(self):
132 def precnode(self):
133 """Precursor changeset node identifier"""
133 """Precursor changeset node identifier"""
134 return self._data[0]
134 return self._data[0]
135
135
136 def succnodes(self):
136 def succnodes(self):
137 """List of successor changesets node identifiers"""
137 """List of successor changesets node identifiers"""
138 return self._data[1]
138 return self._data[1]
139
139
140 def metadata(self):
140 def metadata(self):
141 """Decoded metadata dictionary"""
141 """Decoded metadata dictionary"""
142 if self._decodedmeta is None:
142 if self._decodedmeta is None:
143 self._decodedmeta = decodemeta(self._data[3])
143 self._decodedmeta = decodemeta(self._data[3])
144 return self._decodedmeta
144 return self._decodedmeta
145
145
146 def date(self):
146 def date(self):
147 """Creation date as (unixtime, offset)"""
147 """Creation date as (unixtime, offset)"""
148 parts = self.metadata()['date'].split(' ')
148 parts = self.metadata()['date'].split(' ')
149 return (float(parts[0]), int(parts[1]))
149 return (float(parts[0]), int(parts[1]))
150
150
151 class obsstore(object):
151 class obsstore(object):
152 """Store obsolete markers
152 """Store obsolete markers
153
153
154 Markers can be accessed with two mappings:
154 Markers can be accessed with two mappings:
155 - precursors: old -> set(new)
155 - precursors: old -> set(new)
156 - successors: new -> set(old)
156 - successors: new -> set(old)
157 """
157 """
158
158
159 def __init__(self, sopener):
159 def __init__(self, sopener):
160 self._all = []
160 self._all = []
161 # new markers to serialize
161 # new markers to serialize
162 self.precursors = {}
162 self.precursors = {}
163 self.successors = {}
163 self.successors = {}
164 self.sopener = sopener
164 self.sopener = sopener
165 data = sopener.tryread('obsstore')
165 data = sopener.tryread('obsstore')
166 if data:
166 if data:
167 self._load(_readmarkers(data))
167 self._load(_readmarkers(data))
168
168
169 def __iter__(self):
169 def __iter__(self):
170 return iter(self._all)
170 return iter(self._all)
171
171
172 def __nonzero__(self):
172 def __nonzero__(self):
173 return bool(self._all)
173 return bool(self._all)
174
174
175 def create(self, transaction, prec, succs=(), flag=0, metadata=None):
175 def create(self, transaction, prec, succs=(), flag=0, metadata=None):
176 """obsolete: add a new obsolete marker
176 """obsolete: add a new obsolete marker
177
177
178 * ensuring it is hashable
178 * ensuring it is hashable
179 * check mandatory metadata
179 * check mandatory metadata
180 * encode metadata
180 * encode metadata
181 """
181 """
182 if metadata is None:
182 if metadata is None:
183 metadata = {}
183 metadata = {}
184 if len(prec) != 20:
184 if len(prec) != 20:
185 raise ValueError(prec)
185 raise ValueError(prec)
186 for succ in succs:
186 for succ in succs:
187 if len(succ) != 20:
187 if len(succ) != 20:
188 raise ValueError(succ)
188 raise ValueError(succ)
189 marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata))
189 marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata))
190 self.add(transaction, [marker])
190 self.add(transaction, [marker])
191
191
192 def add(self, transaction, markers):
192 def add(self, transaction, markers):
193 """Add new markers to the store
193 """Add new markers to the store
194
194
195 Take care of filtering duplicate.
195 Take care of filtering duplicate.
196 Return the number of new marker."""
196 Return the number of new marker."""
197 new = [m for m in markers if m not in self._all]
197 new = [m for m in markers if m not in self._all]
198 if new:
198 if new:
199 f = self.sopener('obsstore', 'ab')
199 f = self.sopener('obsstore', 'ab')
200 try:
200 try:
201 # Whether the file's current position is at the begin or at
201 # Whether the file's current position is at the begin or at
202 # the end after opening a file for appending is implementation
202 # the end after opening a file for appending is implementation
203 # defined. So we must seek to the end before calling tell(),
203 # defined. So we must seek to the end before calling tell(),
204 # or we may get a zero offset for non-zero sized files on
204 # or we may get a zero offset for non-zero sized files on
205 # some platforms (issue3543).
205 # some platforms (issue3543).
206 f.seek(0, 2) # os.SEEK_END
206 f.seek(0, 2) # os.SEEK_END
207 offset = f.tell()
207 offset = f.tell()
208 transaction.add('obsstore', offset)
208 transaction.add('obsstore', offset)
209 # offset == 0: new file - add the version header
209 # offset == 0: new file - add the version header
210 for bytes in _encodemarkers(new, offset == 0):
210 for bytes in _encodemarkers(new, offset == 0):
211 f.write(bytes)
211 f.write(bytes)
212 finally:
212 finally:
213 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
213 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
214 # call 'filecacheentry.refresh()' here
214 # call 'filecacheentry.refresh()' here
215 f.close()
215 f.close()
216 self._load(new)
216 self._load(new)
217 return len(new)
217 return len(new)
218
218
219 def mergemarkers(self, transation, data):
219 def mergemarkers(self, transation, data):
220 markers = _readmarkers(data)
220 markers = _readmarkers(data)
221 self.add(transation, markers)
221 self.add(transation, markers)
222
222
223 def _load(self, markers):
223 def _load(self, markers):
224 for mark in markers:
224 for mark in markers:
225 self._all.append(mark)
225 self._all.append(mark)
226 pre, sucs = mark[:2]
226 pre, sucs = mark[:2]
227 self.precursors.setdefault(pre, set()).add(mark)
227 self.precursors.setdefault(pre, set()).add(mark)
228 for suc in sucs:
228 for suc in sucs:
229 self.successors.setdefault(suc, set()).add(mark)
229 self.successors.setdefault(suc, set()).add(mark)
230
230
231 def _encodemarkers(markers, addheader=False):
231 def _encodemarkers(markers, addheader=False):
232 # Kept separate from flushmarkers(), it will be reused for
232 # Kept separate from flushmarkers(), it will be reused for
233 # markers exchange.
233 # markers exchange.
234 if addheader:
234 if addheader:
235 yield _pack('>B', _fmversion)
235 yield _pack('>B', _fmversion)
236 for marker in markers:
236 for marker in markers:
237 pre, sucs, flags, metadata = marker
237 yield _encodeonemarker(marker)
238 nbsuc = len(sucs)
238
239 format = _fmfixed + (_fmnode * nbsuc)
239
240 data = [nbsuc, len(metadata), flags, pre]
240 def _encodeonemarker(marker):
241 data.extend(sucs)
241 pre, sucs, flags, metadata = marker
242 yield _pack(format, *data)
242 nbsuc = len(sucs)
243 yield metadata
243 format = _fmfixed + (_fmnode * nbsuc)
244 data = [nbsuc, len(metadata), flags, pre]
245 data.extend(sucs)
246 return _pack(format, *data) + metadata
247
248 # arbitrary picked to fit into 8K limit from HTTP server
249 # you have to take in account:
250 # - the version header
251 # - the base85 encoding
252 _maxpayload = 5300
244
253
245 def listmarkers(repo):
254 def listmarkers(repo):
246 """List markers over pushkey"""
255 """List markers over pushkey"""
247 if not repo.obsstore:
256 if not repo.obsstore:
248 return {}
257 return {}
249 markers = _encodemarkers(repo.obsstore, True)
258 keys = {}
250 return {'dump': base85.b85encode(''.join(markers))}
259 parts = []
260 currentlen = _maxpayload * 2 # ensure we create a new part
261 for marker in repo.obsstore:
262 nextdata = _encodeonemarker(marker)
263 if (len(nextdata) + currentlen > _maxpayload):
264 currentpart = []
265 currentlen = 0
266 parts.append(currentpart)
267 currentpart.append(nextdata)
268 for idx, part in enumerate(reversed(parts)):
269 data = ''.join([_pack('>B', _fmversion)] + part)
270 keys['dump%i' % idx] = base85.b85encode(data)
271 return keys
251
272
252 def pushmarker(repo, key, old, new):
273 def pushmarker(repo, key, old, new):
253 """Push markers over pushkey"""
274 """Push markers over pushkey"""
254 if key != 'dump':
275 if not key.startswith('dump'):
255 repo.ui.warn(_('unknown key: %r') % key)
276 repo.ui.warn(_('unknown key: %r') % key)
256 return 0
277 return 0
257 if old:
278 if old:
258 repo.ui.warn(_('unexpected old value') % key)
279 repo.ui.warn(_('unexpected old value') % key)
259 return 0
280 return 0
260 data = base85.b85decode(new)
281 data = base85.b85decode(new)
261 lock = repo.lock()
282 lock = repo.lock()
262 try:
283 try:
263 tr = repo.transaction('pushkey: obsolete markers')
284 tr = repo.transaction('pushkey: obsolete markers')
264 try:
285 try:
265 repo.obsstore.mergemarkers(tr, data)
286 repo.obsstore.mergemarkers(tr, data)
266 tr.close()
287 tr.close()
267 return 1
288 return 1
268 finally:
289 finally:
269 tr.release()
290 tr.release()
270 finally:
291 finally:
271 lock.release()
292 lock.release()
272
293
273 def allmarkers(repo):
294 def allmarkers(repo):
274 """all obsolete markers known in a repository"""
295 """all obsolete markers known in a repository"""
275 for markerdata in repo.obsstore:
296 for markerdata in repo.obsstore:
276 yield marker(repo, markerdata)
297 yield marker(repo, markerdata)
277
298
278 def precursormarkers(ctx):
299 def precursormarkers(ctx):
279 """obsolete marker making this changeset obsolete"""
300 """obsolete marker making this changeset obsolete"""
280 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
301 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
281 yield marker(ctx._repo, data)
302 yield marker(ctx._repo, data)
282
303
283 def successormarkers(ctx):
304 def successormarkers(ctx):
284 """obsolete marker marking this changeset as a successors"""
305 """obsolete marker marking this changeset as a successors"""
285 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
306 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
286 yield marker(ctx._repo, data)
307 yield marker(ctx._repo, data)
287
308
288 def anysuccessors(obsstore, node):
309 def anysuccessors(obsstore, node):
289 """Yield every successor of <node>
310 """Yield every successor of <node>
290
311
291 This this a linear yield unsuitable to detect splitted changeset."""
312 This this a linear yield unsuitable to detect splitted changeset."""
292 remaining = set([node])
313 remaining = set([node])
293 seen = set(remaining)
314 seen = set(remaining)
294 while remaining:
315 while remaining:
295 current = remaining.pop()
316 current = remaining.pop()
296 yield current
317 yield current
297 for mark in obsstore.precursors.get(current, ()):
318 for mark in obsstore.precursors.get(current, ()):
298 for suc in mark[1]:
319 for suc in mark[1]:
299 if suc not in seen:
320 if suc not in seen:
300 seen.add(suc)
321 seen.add(suc)
301 remaining.add(suc)
322 remaining.add(suc)
General Comments 0
You need to be logged in to leave comments. Login now