##// END OF EJS Templates
_updatebranchcache: prevent deleting dict key during iteration...
Pierre-Yves David -
r17210:ec80ae98 default
parent child Browse files
Show More
@@ -1,2586 +1,2586 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import bin, hex, nullid, nullrev, short
7 from node import bin, hex, nullid, nullrev, short
8 from i18n import _
8 from i18n import _
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock, transaction, store, encoding, base85
11 import lock, transaction, store, encoding, base85
12 import scmutil, util, extensions, hook, error, revset
12 import scmutil, util, extensions, hook, error, revset
13 import match as matchmod
13 import match as matchmod
14 import merge as mergemod
14 import merge as mergemod
15 import tags as tagsmod
15 import tags as tagsmod
16 from lock import release
16 from lock import release
17 import weakref, errno, os, time, inspect
17 import weakref, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19 filecache = scmutil.filecache
19 filecache = scmutil.filecache
20
20
21 class storecache(filecache):
21 class storecache(filecache):
22 """filecache for files in the store"""
22 """filecache for files in the store"""
23 def join(self, obj, fname):
23 def join(self, obj, fname):
24 return obj.sjoin(fname)
24 return obj.sjoin(fname)
25
25
26 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
26 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
27 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
27 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
28
28
29 class localpeer(peer.peerrepository):
29 class localpeer(peer.peerrepository):
30 '''peer for a local repo; reflects only the most recent API'''
30 '''peer for a local repo; reflects only the most recent API'''
31
31
32 def __init__(self, repo, caps=MODERNCAPS):
32 def __init__(self, repo, caps=MODERNCAPS):
33 peer.peerrepository.__init__(self)
33 peer.peerrepository.__init__(self)
34 self._repo = repo
34 self._repo = repo
35 self.ui = repo.ui
35 self.ui = repo.ui
36 self._caps = repo._restrictcapabilities(caps)
36 self._caps = repo._restrictcapabilities(caps)
37 self.requirements = repo.requirements
37 self.requirements = repo.requirements
38 self.supportedformats = repo.supportedformats
38 self.supportedformats = repo.supportedformats
39
39
40 def close(self):
40 def close(self):
41 self._repo.close()
41 self._repo.close()
42
42
43 def _capabilities(self):
43 def _capabilities(self):
44 return self._caps
44 return self._caps
45
45
46 def local(self):
46 def local(self):
47 return self._repo
47 return self._repo
48
48
49 def canpush(self):
49 def canpush(self):
50 return True
50 return True
51
51
52 def url(self):
52 def url(self):
53 return self._repo.url()
53 return self._repo.url()
54
54
55 def lookup(self, key):
55 def lookup(self, key):
56 return self._repo.lookup(key)
56 return self._repo.lookup(key)
57
57
58 def branchmap(self):
58 def branchmap(self):
59 return discovery.visiblebranchmap(self._repo)
59 return discovery.visiblebranchmap(self._repo)
60
60
61 def heads(self):
61 def heads(self):
62 return discovery.visibleheads(self._repo)
62 return discovery.visibleheads(self._repo)
63
63
64 def known(self, nodes):
64 def known(self, nodes):
65 return self._repo.known(nodes)
65 return self._repo.known(nodes)
66
66
67 def getbundle(self, source, heads=None, common=None):
67 def getbundle(self, source, heads=None, common=None):
68 return self._repo.getbundle(source, heads=heads, common=common)
68 return self._repo.getbundle(source, heads=heads, common=common)
69
69
70 # TODO We might want to move the next two calls into legacypeer and add
70 # TODO We might want to move the next two calls into legacypeer and add
71 # unbundle instead.
71 # unbundle instead.
72
72
73 def lock(self):
73 def lock(self):
74 return self._repo.lock()
74 return self._repo.lock()
75
75
76 def addchangegroup(self, cg, source, url):
76 def addchangegroup(self, cg, source, url):
77 return self._repo.addchangegroup(cg, source, url)
77 return self._repo.addchangegroup(cg, source, url)
78
78
79 def pushkey(self, namespace, key, old, new):
79 def pushkey(self, namespace, key, old, new):
80 return self._repo.pushkey(namespace, key, old, new)
80 return self._repo.pushkey(namespace, key, old, new)
81
81
82 def listkeys(self, namespace):
82 def listkeys(self, namespace):
83 return self._repo.listkeys(namespace)
83 return self._repo.listkeys(namespace)
84
84
85 def debugwireargs(self, one, two, three=None, four=None, five=None):
85 def debugwireargs(self, one, two, three=None, four=None, five=None):
86 '''used to test argument passing over the wire'''
86 '''used to test argument passing over the wire'''
87 return "%s %s %s %s %s" % (one, two, three, four, five)
87 return "%s %s %s %s %s" % (one, two, three, four, five)
88
88
89 class locallegacypeer(localpeer):
89 class locallegacypeer(localpeer):
90 '''peer extension which implements legacy methods too; used for tests with
90 '''peer extension which implements legacy methods too; used for tests with
91 restricted capabilities'''
91 restricted capabilities'''
92
92
93 def __init__(self, repo):
93 def __init__(self, repo):
94 localpeer.__init__(self, repo, caps=LEGACYCAPS)
94 localpeer.__init__(self, repo, caps=LEGACYCAPS)
95
95
96 def branches(self, nodes):
96 def branches(self, nodes):
97 return self._repo.branches(nodes)
97 return self._repo.branches(nodes)
98
98
99 def between(self, pairs):
99 def between(self, pairs):
100 return self._repo.between(pairs)
100 return self._repo.between(pairs)
101
101
102 def changegroup(self, basenodes, source):
102 def changegroup(self, basenodes, source):
103 return self._repo.changegroup(basenodes, source)
103 return self._repo.changegroup(basenodes, source)
104
104
105 def changegroupsubset(self, bases, heads, source):
105 def changegroupsubset(self, bases, heads, source):
106 return self._repo.changegroupsubset(bases, heads, source)
106 return self._repo.changegroupsubset(bases, heads, source)
107
107
108 class localrepository(object):
108 class localrepository(object):
109
109
110 supportedformats = set(('revlogv1', 'generaldelta'))
110 supportedformats = set(('revlogv1', 'generaldelta'))
111 supported = supportedformats | set(('store', 'fncache', 'shared',
111 supported = supportedformats | set(('store', 'fncache', 'shared',
112 'dotencode'))
112 'dotencode'))
113 openerreqs = set(('revlogv1', 'generaldelta'))
113 openerreqs = set(('revlogv1', 'generaldelta'))
114 requirements = ['revlogv1']
114 requirements = ['revlogv1']
115
115
116 def _baserequirements(self, create):
116 def _baserequirements(self, create):
117 return self.requirements[:]
117 return self.requirements[:]
118
118
119 def __init__(self, baseui, path=None, create=False):
119 def __init__(self, baseui, path=None, create=False):
120 self.wopener = scmutil.opener(path, expand=True)
120 self.wopener = scmutil.opener(path, expand=True)
121 self.wvfs = self.wopener
121 self.wvfs = self.wopener
122 self.root = self.wvfs.base
122 self.root = self.wvfs.base
123 self.path = self.wvfs.join(".hg")
123 self.path = self.wvfs.join(".hg")
124 self.origroot = path
124 self.origroot = path
125 self.auditor = scmutil.pathauditor(self.root, self._checknested)
125 self.auditor = scmutil.pathauditor(self.root, self._checknested)
126 self.opener = scmutil.opener(self.path)
126 self.opener = scmutil.opener(self.path)
127 self.vfs = self.opener
127 self.vfs = self.opener
128 self.baseui = baseui
128 self.baseui = baseui
129 self.ui = baseui.copy()
129 self.ui = baseui.copy()
130 # A list of callback to shape the phase if no data were found.
130 # A list of callback to shape the phase if no data were found.
131 # Callback are in the form: func(repo, roots) --> processed root.
131 # Callback are in the form: func(repo, roots) --> processed root.
132 # This list it to be filled by extension during repo setup
132 # This list it to be filled by extension during repo setup
133 self._phasedefaults = []
133 self._phasedefaults = []
134 try:
134 try:
135 self.ui.readconfig(self.join("hgrc"), self.root)
135 self.ui.readconfig(self.join("hgrc"), self.root)
136 extensions.loadall(self.ui)
136 extensions.loadall(self.ui)
137 except IOError:
137 except IOError:
138 pass
138 pass
139
139
140 if not self.vfs.isdir():
140 if not self.vfs.isdir():
141 if create:
141 if create:
142 if not self.wvfs.exists():
142 if not self.wvfs.exists():
143 self.wvfs.makedirs()
143 self.wvfs.makedirs()
144 self.vfs.makedir(notindexed=True)
144 self.vfs.makedir(notindexed=True)
145 requirements = self._baserequirements(create)
145 requirements = self._baserequirements(create)
146 if self.ui.configbool('format', 'usestore', True):
146 if self.ui.configbool('format', 'usestore', True):
147 self.vfs.mkdir("store")
147 self.vfs.mkdir("store")
148 requirements.append("store")
148 requirements.append("store")
149 if self.ui.configbool('format', 'usefncache', True):
149 if self.ui.configbool('format', 'usefncache', True):
150 requirements.append("fncache")
150 requirements.append("fncache")
151 if self.ui.configbool('format', 'dotencode', True):
151 if self.ui.configbool('format', 'dotencode', True):
152 requirements.append('dotencode')
152 requirements.append('dotencode')
153 # create an invalid changelog
153 # create an invalid changelog
154 self.vfs.append(
154 self.vfs.append(
155 "00changelog.i",
155 "00changelog.i",
156 '\0\0\0\2' # represents revlogv2
156 '\0\0\0\2' # represents revlogv2
157 ' dummy changelog to prevent using the old repo layout'
157 ' dummy changelog to prevent using the old repo layout'
158 )
158 )
159 if self.ui.configbool('format', 'generaldelta', False):
159 if self.ui.configbool('format', 'generaldelta', False):
160 requirements.append("generaldelta")
160 requirements.append("generaldelta")
161 requirements = set(requirements)
161 requirements = set(requirements)
162 else:
162 else:
163 raise error.RepoError(_("repository %s not found") % path)
163 raise error.RepoError(_("repository %s not found") % path)
164 elif create:
164 elif create:
165 raise error.RepoError(_("repository %s already exists") % path)
165 raise error.RepoError(_("repository %s already exists") % path)
166 else:
166 else:
167 try:
167 try:
168 requirements = scmutil.readrequires(self.vfs, self.supported)
168 requirements = scmutil.readrequires(self.vfs, self.supported)
169 except IOError, inst:
169 except IOError, inst:
170 if inst.errno != errno.ENOENT:
170 if inst.errno != errno.ENOENT:
171 raise
171 raise
172 requirements = set()
172 requirements = set()
173
173
174 self.sharedpath = self.path
174 self.sharedpath = self.path
175 try:
175 try:
176 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
176 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
177 if not os.path.exists(s):
177 if not os.path.exists(s):
178 raise error.RepoError(
178 raise error.RepoError(
179 _('.hg/sharedpath points to nonexistent directory %s') % s)
179 _('.hg/sharedpath points to nonexistent directory %s') % s)
180 self.sharedpath = s
180 self.sharedpath = s
181 except IOError, inst:
181 except IOError, inst:
182 if inst.errno != errno.ENOENT:
182 if inst.errno != errno.ENOENT:
183 raise
183 raise
184
184
185 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
185 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
186 self.spath = self.store.path
186 self.spath = self.store.path
187 self.sopener = self.store.opener
187 self.sopener = self.store.opener
188 self.svfs = self.sopener
188 self.svfs = self.sopener
189 self.sjoin = self.store.join
189 self.sjoin = self.store.join
190 self.opener.createmode = self.store.createmode
190 self.opener.createmode = self.store.createmode
191 self._applyrequirements(requirements)
191 self._applyrequirements(requirements)
192 if create:
192 if create:
193 self._writerequirements()
193 self._writerequirements()
194
194
195
195
196 self._branchcache = None
196 self._branchcache = None
197 self._branchcachetip = None
197 self._branchcachetip = None
198 self.filterpats = {}
198 self.filterpats = {}
199 self._datafilters = {}
199 self._datafilters = {}
200 self._transref = self._lockref = self._wlockref = None
200 self._transref = self._lockref = self._wlockref = None
201
201
202 # A cache for various files under .hg/ that tracks file changes,
202 # A cache for various files under .hg/ that tracks file changes,
203 # (used by the filecache decorator)
203 # (used by the filecache decorator)
204 #
204 #
205 # Maps a property name to its util.filecacheentry
205 # Maps a property name to its util.filecacheentry
206 self._filecache = {}
206 self._filecache = {}
207
207
208 def close(self):
208 def close(self):
209 pass
209 pass
210
210
211 def _restrictcapabilities(self, caps):
211 def _restrictcapabilities(self, caps):
212 return caps
212 return caps
213
213
214 def _applyrequirements(self, requirements):
214 def _applyrequirements(self, requirements):
215 self.requirements = requirements
215 self.requirements = requirements
216 self.sopener.options = dict((r, 1) for r in requirements
216 self.sopener.options = dict((r, 1) for r in requirements
217 if r in self.openerreqs)
217 if r in self.openerreqs)
218
218
219 def _writerequirements(self):
219 def _writerequirements(self):
220 reqfile = self.opener("requires", "w")
220 reqfile = self.opener("requires", "w")
221 for r in self.requirements:
221 for r in self.requirements:
222 reqfile.write("%s\n" % r)
222 reqfile.write("%s\n" % r)
223 reqfile.close()
223 reqfile.close()
224
224
225 def _checknested(self, path):
225 def _checknested(self, path):
226 """Determine if path is a legal nested repository."""
226 """Determine if path is a legal nested repository."""
227 if not path.startswith(self.root):
227 if not path.startswith(self.root):
228 return False
228 return False
229 subpath = path[len(self.root) + 1:]
229 subpath = path[len(self.root) + 1:]
230 normsubpath = util.pconvert(subpath)
230 normsubpath = util.pconvert(subpath)
231
231
232 # XXX: Checking against the current working copy is wrong in
232 # XXX: Checking against the current working copy is wrong in
233 # the sense that it can reject things like
233 # the sense that it can reject things like
234 #
234 #
235 # $ hg cat -r 10 sub/x.txt
235 # $ hg cat -r 10 sub/x.txt
236 #
236 #
237 # if sub/ is no longer a subrepository in the working copy
237 # if sub/ is no longer a subrepository in the working copy
238 # parent revision.
238 # parent revision.
239 #
239 #
240 # However, it can of course also allow things that would have
240 # However, it can of course also allow things that would have
241 # been rejected before, such as the above cat command if sub/
241 # been rejected before, such as the above cat command if sub/
242 # is a subrepository now, but was a normal directory before.
242 # is a subrepository now, but was a normal directory before.
243 # The old path auditor would have rejected by mistake since it
243 # The old path auditor would have rejected by mistake since it
244 # panics when it sees sub/.hg/.
244 # panics when it sees sub/.hg/.
245 #
245 #
246 # All in all, checking against the working copy seems sensible
246 # All in all, checking against the working copy seems sensible
247 # since we want to prevent access to nested repositories on
247 # since we want to prevent access to nested repositories on
248 # the filesystem *now*.
248 # the filesystem *now*.
249 ctx = self[None]
249 ctx = self[None]
250 parts = util.splitpath(subpath)
250 parts = util.splitpath(subpath)
251 while parts:
251 while parts:
252 prefix = '/'.join(parts)
252 prefix = '/'.join(parts)
253 if prefix in ctx.substate:
253 if prefix in ctx.substate:
254 if prefix == normsubpath:
254 if prefix == normsubpath:
255 return True
255 return True
256 else:
256 else:
257 sub = ctx.sub(prefix)
257 sub = ctx.sub(prefix)
258 return sub.checknested(subpath[len(prefix) + 1:])
258 return sub.checknested(subpath[len(prefix) + 1:])
259 else:
259 else:
260 parts.pop()
260 parts.pop()
261 return False
261 return False
262
262
263 def peer(self):
263 def peer(self):
264 return localpeer(self) # not cached to avoid reference cycle
264 return localpeer(self) # not cached to avoid reference cycle
265
265
266 @filecache('bookmarks')
266 @filecache('bookmarks')
267 def _bookmarks(self):
267 def _bookmarks(self):
268 return bookmarks.read(self)
268 return bookmarks.read(self)
269
269
270 @filecache('bookmarks.current')
270 @filecache('bookmarks.current')
271 def _bookmarkcurrent(self):
271 def _bookmarkcurrent(self):
272 return bookmarks.readcurrent(self)
272 return bookmarks.readcurrent(self)
273
273
274 def _writebookmarks(self, marks):
274 def _writebookmarks(self, marks):
275 bookmarks.write(self)
275 bookmarks.write(self)
276
276
277 def bookmarkheads(self, bookmark):
277 def bookmarkheads(self, bookmark):
278 name = bookmark.split('@', 1)[0]
278 name = bookmark.split('@', 1)[0]
279 heads = []
279 heads = []
280 for mark, n in self._bookmarks.iteritems():
280 for mark, n in self._bookmarks.iteritems():
281 if mark.split('@', 1)[0] == name:
281 if mark.split('@', 1)[0] == name:
282 heads.append(n)
282 heads.append(n)
283 return heads
283 return heads
284
284
285 @storecache('phaseroots')
285 @storecache('phaseroots')
286 def _phasecache(self):
286 def _phasecache(self):
287 return phases.phasecache(self, self._phasedefaults)
287 return phases.phasecache(self, self._phasedefaults)
288
288
289 @storecache('obsstore')
289 @storecache('obsstore')
290 def obsstore(self):
290 def obsstore(self):
291 store = obsolete.obsstore(self.sopener)
291 store = obsolete.obsstore(self.sopener)
292 return store
292 return store
293
293
294 @propertycache
294 @propertycache
295 def hiddenrevs(self):
295 def hiddenrevs(self):
296 """hiddenrevs: revs that should be hidden by command and tools
296 """hiddenrevs: revs that should be hidden by command and tools
297
297
298 This set is carried on the repo to ease initialisation and lazy
298 This set is carried on the repo to ease initialisation and lazy
299 loading it'll probably move back to changelog for efficienty and
299 loading it'll probably move back to changelog for efficienty and
300 consistency reason
300 consistency reason
301
301
302 Note that the hiddenrevs will needs invalidations when
302 Note that the hiddenrevs will needs invalidations when
303 - a new changesets is added (possible unstable above extinct)
303 - a new changesets is added (possible unstable above extinct)
304 - a new obsolete marker is added (possible new extinct changeset)
304 - a new obsolete marker is added (possible new extinct changeset)
305 """
305 """
306 hidden = set()
306 hidden = set()
307 if self.obsstore:
307 if self.obsstore:
308 ### hide extinct changeset that are not accessible by any mean
308 ### hide extinct changeset that are not accessible by any mean
309 hiddenquery = 'extinct() - ::(. + bookmark() + tagged())'
309 hiddenquery = 'extinct() - ::(. + bookmark() + tagged())'
310 hidden.update(self.revs(hiddenquery))
310 hidden.update(self.revs(hiddenquery))
311 return hidden
311 return hidden
312
312
313 @storecache('00changelog.i')
313 @storecache('00changelog.i')
314 def changelog(self):
314 def changelog(self):
315 c = changelog.changelog(self.sopener)
315 c = changelog.changelog(self.sopener)
316 if 'HG_PENDING' in os.environ:
316 if 'HG_PENDING' in os.environ:
317 p = os.environ['HG_PENDING']
317 p = os.environ['HG_PENDING']
318 if p.startswith(self.root):
318 if p.startswith(self.root):
319 c.readpending('00changelog.i.a')
319 c.readpending('00changelog.i.a')
320 return c
320 return c
321
321
322 @storecache('00manifest.i')
322 @storecache('00manifest.i')
323 def manifest(self):
323 def manifest(self):
324 return manifest.manifest(self.sopener)
324 return manifest.manifest(self.sopener)
325
325
326 @filecache('dirstate')
326 @filecache('dirstate')
327 def dirstate(self):
327 def dirstate(self):
328 warned = [0]
328 warned = [0]
329 def validate(node):
329 def validate(node):
330 try:
330 try:
331 self.changelog.rev(node)
331 self.changelog.rev(node)
332 return node
332 return node
333 except error.LookupError:
333 except error.LookupError:
334 if not warned[0]:
334 if not warned[0]:
335 warned[0] = True
335 warned[0] = True
336 self.ui.warn(_("warning: ignoring unknown"
336 self.ui.warn(_("warning: ignoring unknown"
337 " working parent %s!\n") % short(node))
337 " working parent %s!\n") % short(node))
338 return nullid
338 return nullid
339
339
340 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
340 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
341
341
342 def __getitem__(self, changeid):
342 def __getitem__(self, changeid):
343 if changeid is None:
343 if changeid is None:
344 return context.workingctx(self)
344 return context.workingctx(self)
345 return context.changectx(self, changeid)
345 return context.changectx(self, changeid)
346
346
347 def __contains__(self, changeid):
347 def __contains__(self, changeid):
348 try:
348 try:
349 return bool(self.lookup(changeid))
349 return bool(self.lookup(changeid))
350 except error.RepoLookupError:
350 except error.RepoLookupError:
351 return False
351 return False
352
352
353 def __nonzero__(self):
353 def __nonzero__(self):
354 return True
354 return True
355
355
356 def __len__(self):
356 def __len__(self):
357 return len(self.changelog)
357 return len(self.changelog)
358
358
359 def __iter__(self):
359 def __iter__(self):
360 for i in xrange(len(self)):
360 for i in xrange(len(self)):
361 yield i
361 yield i
362
362
363 def revs(self, expr, *args):
363 def revs(self, expr, *args):
364 '''Return a list of revisions matching the given revset'''
364 '''Return a list of revisions matching the given revset'''
365 expr = revset.formatspec(expr, *args)
365 expr = revset.formatspec(expr, *args)
366 m = revset.match(None, expr)
366 m = revset.match(None, expr)
367 return [r for r in m(self, range(len(self)))]
367 return [r for r in m(self, range(len(self)))]
368
368
369 def set(self, expr, *args):
369 def set(self, expr, *args):
370 '''
370 '''
371 Yield a context for each matching revision, after doing arg
371 Yield a context for each matching revision, after doing arg
372 replacement via revset.formatspec
372 replacement via revset.formatspec
373 '''
373 '''
374 for r in self.revs(expr, *args):
374 for r in self.revs(expr, *args):
375 yield self[r]
375 yield self[r]
376
376
377 def url(self):
377 def url(self):
378 return 'file:' + self.root
378 return 'file:' + self.root
379
379
380 def hook(self, name, throw=False, **args):
380 def hook(self, name, throw=False, **args):
381 return hook.hook(self.ui, self, name, throw, **args)
381 return hook.hook(self.ui, self, name, throw, **args)
382
382
383 tag_disallowed = ':\r\n'
383 tag_disallowed = ':\r\n'
384
384
385 def _tag(self, names, node, message, local, user, date, extra={}):
385 def _tag(self, names, node, message, local, user, date, extra={}):
386 if isinstance(names, str):
386 if isinstance(names, str):
387 allchars = names
387 allchars = names
388 names = (names,)
388 names = (names,)
389 else:
389 else:
390 allchars = ''.join(names)
390 allchars = ''.join(names)
391 for c in self.tag_disallowed:
391 for c in self.tag_disallowed:
392 if c in allchars:
392 if c in allchars:
393 raise util.Abort(_('%r cannot be used in a tag name') % c)
393 raise util.Abort(_('%r cannot be used in a tag name') % c)
394
394
395 branches = self.branchmap()
395 branches = self.branchmap()
396 for name in names:
396 for name in names:
397 self.hook('pretag', throw=True, node=hex(node), tag=name,
397 self.hook('pretag', throw=True, node=hex(node), tag=name,
398 local=local)
398 local=local)
399 if name in branches:
399 if name in branches:
400 self.ui.warn(_("warning: tag %s conflicts with existing"
400 self.ui.warn(_("warning: tag %s conflicts with existing"
401 " branch name\n") % name)
401 " branch name\n") % name)
402
402
403 def writetags(fp, names, munge, prevtags):
403 def writetags(fp, names, munge, prevtags):
404 fp.seek(0, 2)
404 fp.seek(0, 2)
405 if prevtags and prevtags[-1] != '\n':
405 if prevtags and prevtags[-1] != '\n':
406 fp.write('\n')
406 fp.write('\n')
407 for name in names:
407 for name in names:
408 m = munge and munge(name) or name
408 m = munge and munge(name) or name
409 if (self._tagscache.tagtypes and
409 if (self._tagscache.tagtypes and
410 name in self._tagscache.tagtypes):
410 name in self._tagscache.tagtypes):
411 old = self.tags().get(name, nullid)
411 old = self.tags().get(name, nullid)
412 fp.write('%s %s\n' % (hex(old), m))
412 fp.write('%s %s\n' % (hex(old), m))
413 fp.write('%s %s\n' % (hex(node), m))
413 fp.write('%s %s\n' % (hex(node), m))
414 fp.close()
414 fp.close()
415
415
416 prevtags = ''
416 prevtags = ''
417 if local:
417 if local:
418 try:
418 try:
419 fp = self.opener('localtags', 'r+')
419 fp = self.opener('localtags', 'r+')
420 except IOError:
420 except IOError:
421 fp = self.opener('localtags', 'a')
421 fp = self.opener('localtags', 'a')
422 else:
422 else:
423 prevtags = fp.read()
423 prevtags = fp.read()
424
424
425 # local tags are stored in the current charset
425 # local tags are stored in the current charset
426 writetags(fp, names, None, prevtags)
426 writetags(fp, names, None, prevtags)
427 for name in names:
427 for name in names:
428 self.hook('tag', node=hex(node), tag=name, local=local)
428 self.hook('tag', node=hex(node), tag=name, local=local)
429 return
429 return
430
430
431 try:
431 try:
432 fp = self.wfile('.hgtags', 'rb+')
432 fp = self.wfile('.hgtags', 'rb+')
433 except IOError, e:
433 except IOError, e:
434 if e.errno != errno.ENOENT:
434 if e.errno != errno.ENOENT:
435 raise
435 raise
436 fp = self.wfile('.hgtags', 'ab')
436 fp = self.wfile('.hgtags', 'ab')
437 else:
437 else:
438 prevtags = fp.read()
438 prevtags = fp.read()
439
439
440 # committed tags are stored in UTF-8
440 # committed tags are stored in UTF-8
441 writetags(fp, names, encoding.fromlocal, prevtags)
441 writetags(fp, names, encoding.fromlocal, prevtags)
442
442
443 fp.close()
443 fp.close()
444
444
445 self.invalidatecaches()
445 self.invalidatecaches()
446
446
447 if '.hgtags' not in self.dirstate:
447 if '.hgtags' not in self.dirstate:
448 self[None].add(['.hgtags'])
448 self[None].add(['.hgtags'])
449
449
450 m = matchmod.exact(self.root, '', ['.hgtags'])
450 m = matchmod.exact(self.root, '', ['.hgtags'])
451 tagnode = self.commit(message, user, date, extra=extra, match=m)
451 tagnode = self.commit(message, user, date, extra=extra, match=m)
452
452
453 for name in names:
453 for name in names:
454 self.hook('tag', node=hex(node), tag=name, local=local)
454 self.hook('tag', node=hex(node), tag=name, local=local)
455
455
456 return tagnode
456 return tagnode
457
457
458 def tag(self, names, node, message, local, user, date):
458 def tag(self, names, node, message, local, user, date):
459 '''tag a revision with one or more symbolic names.
459 '''tag a revision with one or more symbolic names.
460
460
461 names is a list of strings or, when adding a single tag, names may be a
461 names is a list of strings or, when adding a single tag, names may be a
462 string.
462 string.
463
463
464 if local is True, the tags are stored in a per-repository file.
464 if local is True, the tags are stored in a per-repository file.
465 otherwise, they are stored in the .hgtags file, and a new
465 otherwise, they are stored in the .hgtags file, and a new
466 changeset is committed with the change.
466 changeset is committed with the change.
467
467
468 keyword arguments:
468 keyword arguments:
469
469
470 local: whether to store tags in non-version-controlled file
470 local: whether to store tags in non-version-controlled file
471 (default False)
471 (default False)
472
472
473 message: commit message to use if committing
473 message: commit message to use if committing
474
474
475 user: name of user to use if committing
475 user: name of user to use if committing
476
476
477 date: date tuple to use if committing'''
477 date: date tuple to use if committing'''
478
478
479 if not local:
479 if not local:
480 for x in self.status()[:5]:
480 for x in self.status()[:5]:
481 if '.hgtags' in x:
481 if '.hgtags' in x:
482 raise util.Abort(_('working copy of .hgtags is changed '
482 raise util.Abort(_('working copy of .hgtags is changed '
483 '(please commit .hgtags manually)'))
483 '(please commit .hgtags manually)'))
484
484
485 self.tags() # instantiate the cache
485 self.tags() # instantiate the cache
486 self._tag(names, node, message, local, user, date)
486 self._tag(names, node, message, local, user, date)
487
487
488 @propertycache
488 @propertycache
489 def _tagscache(self):
489 def _tagscache(self):
490 '''Returns a tagscache object that contains various tags related
490 '''Returns a tagscache object that contains various tags related
491 caches.'''
491 caches.'''
492
492
493 # This simplifies its cache management by having one decorated
493 # This simplifies its cache management by having one decorated
494 # function (this one) and the rest simply fetch things from it.
494 # function (this one) and the rest simply fetch things from it.
495 class tagscache(object):
495 class tagscache(object):
496 def __init__(self):
496 def __init__(self):
497 # These two define the set of tags for this repository. tags
497 # These two define the set of tags for this repository. tags
498 # maps tag name to node; tagtypes maps tag name to 'global' or
498 # maps tag name to node; tagtypes maps tag name to 'global' or
499 # 'local'. (Global tags are defined by .hgtags across all
499 # 'local'. (Global tags are defined by .hgtags across all
500 # heads, and local tags are defined in .hg/localtags.)
500 # heads, and local tags are defined in .hg/localtags.)
501 # They constitute the in-memory cache of tags.
501 # They constitute the in-memory cache of tags.
502 self.tags = self.tagtypes = None
502 self.tags = self.tagtypes = None
503
503
504 self.nodetagscache = self.tagslist = None
504 self.nodetagscache = self.tagslist = None
505
505
506 cache = tagscache()
506 cache = tagscache()
507 cache.tags, cache.tagtypes = self._findtags()
507 cache.tags, cache.tagtypes = self._findtags()
508
508
509 return cache
509 return cache
510
510
511 def tags(self):
511 def tags(self):
512 '''return a mapping of tag to node'''
512 '''return a mapping of tag to node'''
513 t = {}
513 t = {}
514 for k, v in self._tagscache.tags.iteritems():
514 for k, v in self._tagscache.tags.iteritems():
515 try:
515 try:
516 # ignore tags to unknown nodes
516 # ignore tags to unknown nodes
517 self.changelog.rev(v)
517 self.changelog.rev(v)
518 t[k] = v
518 t[k] = v
519 except (error.LookupError, ValueError):
519 except (error.LookupError, ValueError):
520 pass
520 pass
521 return t
521 return t
522
522
523 def _findtags(self):
523 def _findtags(self):
524 '''Do the hard work of finding tags. Return a pair of dicts
524 '''Do the hard work of finding tags. Return a pair of dicts
525 (tags, tagtypes) where tags maps tag name to node, and tagtypes
525 (tags, tagtypes) where tags maps tag name to node, and tagtypes
526 maps tag name to a string like \'global\' or \'local\'.
526 maps tag name to a string like \'global\' or \'local\'.
527 Subclasses or extensions are free to add their own tags, but
527 Subclasses or extensions are free to add their own tags, but
528 should be aware that the returned dicts will be retained for the
528 should be aware that the returned dicts will be retained for the
529 duration of the localrepo object.'''
529 duration of the localrepo object.'''
530
530
531 # XXX what tagtype should subclasses/extensions use? Currently
531 # XXX what tagtype should subclasses/extensions use? Currently
532 # mq and bookmarks add tags, but do not set the tagtype at all.
532 # mq and bookmarks add tags, but do not set the tagtype at all.
533 # Should each extension invent its own tag type? Should there
533 # Should each extension invent its own tag type? Should there
534 # be one tagtype for all such "virtual" tags? Or is the status
534 # be one tagtype for all such "virtual" tags? Or is the status
535 # quo fine?
535 # quo fine?
536
536
537 alltags = {} # map tag name to (node, hist)
537 alltags = {} # map tag name to (node, hist)
538 tagtypes = {}
538 tagtypes = {}
539
539
540 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
540 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
541 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
541 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
542
542
543 # Build the return dicts. Have to re-encode tag names because
543 # Build the return dicts. Have to re-encode tag names because
544 # the tags module always uses UTF-8 (in order not to lose info
544 # the tags module always uses UTF-8 (in order not to lose info
545 # writing to the cache), but the rest of Mercurial wants them in
545 # writing to the cache), but the rest of Mercurial wants them in
546 # local encoding.
546 # local encoding.
547 tags = {}
547 tags = {}
548 for (name, (node, hist)) in alltags.iteritems():
548 for (name, (node, hist)) in alltags.iteritems():
549 if node != nullid:
549 if node != nullid:
550 tags[encoding.tolocal(name)] = node
550 tags[encoding.tolocal(name)] = node
551 tags['tip'] = self.changelog.tip()
551 tags['tip'] = self.changelog.tip()
552 tagtypes = dict([(encoding.tolocal(name), value)
552 tagtypes = dict([(encoding.tolocal(name), value)
553 for (name, value) in tagtypes.iteritems()])
553 for (name, value) in tagtypes.iteritems()])
554 return (tags, tagtypes)
554 return (tags, tagtypes)
555
555
556 def tagtype(self, tagname):
556 def tagtype(self, tagname):
557 '''
557 '''
558 return the type of the given tag. result can be:
558 return the type of the given tag. result can be:
559
559
560 'local' : a local tag
560 'local' : a local tag
561 'global' : a global tag
561 'global' : a global tag
562 None : tag does not exist
562 None : tag does not exist
563 '''
563 '''
564
564
565 return self._tagscache.tagtypes.get(tagname)
565 return self._tagscache.tagtypes.get(tagname)
566
566
567 def tagslist(self):
567 def tagslist(self):
568 '''return a list of tags ordered by revision'''
568 '''return a list of tags ordered by revision'''
569 if not self._tagscache.tagslist:
569 if not self._tagscache.tagslist:
570 l = []
570 l = []
571 for t, n in self.tags().iteritems():
571 for t, n in self.tags().iteritems():
572 r = self.changelog.rev(n)
572 r = self.changelog.rev(n)
573 l.append((r, t, n))
573 l.append((r, t, n))
574 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
574 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
575
575
576 return self._tagscache.tagslist
576 return self._tagscache.tagslist
577
577
578 def nodetags(self, node):
578 def nodetags(self, node):
579 '''return the tags associated with a node'''
579 '''return the tags associated with a node'''
580 if not self._tagscache.nodetagscache:
580 if not self._tagscache.nodetagscache:
581 nodetagscache = {}
581 nodetagscache = {}
582 for t, n in self._tagscache.tags.iteritems():
582 for t, n in self._tagscache.tags.iteritems():
583 nodetagscache.setdefault(n, []).append(t)
583 nodetagscache.setdefault(n, []).append(t)
584 for tags in nodetagscache.itervalues():
584 for tags in nodetagscache.itervalues():
585 tags.sort()
585 tags.sort()
586 self._tagscache.nodetagscache = nodetagscache
586 self._tagscache.nodetagscache = nodetagscache
587 return self._tagscache.nodetagscache.get(node, [])
587 return self._tagscache.nodetagscache.get(node, [])
588
588
589 def nodebookmarks(self, node):
589 def nodebookmarks(self, node):
590 marks = []
590 marks = []
591 for bookmark, n in self._bookmarks.iteritems():
591 for bookmark, n in self._bookmarks.iteritems():
592 if n == node:
592 if n == node:
593 marks.append(bookmark)
593 marks.append(bookmark)
594 return sorted(marks)
594 return sorted(marks)
595
595
596 def _branchtags(self, partial, lrev):
596 def _branchtags(self, partial, lrev):
597 # TODO: rename this function?
597 # TODO: rename this function?
598 tiprev = len(self) - 1
598 tiprev = len(self) - 1
599 if lrev != tiprev:
599 if lrev != tiprev:
600 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
600 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
601 self._updatebranchcache(partial, ctxgen)
601 self._updatebranchcache(partial, ctxgen)
602 self._writebranchcache(partial, self.changelog.tip(), tiprev)
602 self._writebranchcache(partial, self.changelog.tip(), tiprev)
603
603
604 return partial
604 return partial
605
605
606 def updatebranchcache(self):
606 def updatebranchcache(self):
607 tip = self.changelog.tip()
607 tip = self.changelog.tip()
608 if self._branchcache is not None and self._branchcachetip == tip:
608 if self._branchcache is not None and self._branchcachetip == tip:
609 return
609 return
610
610
611 oldtip = self._branchcachetip
611 oldtip = self._branchcachetip
612 self._branchcachetip = tip
612 self._branchcachetip = tip
613 if oldtip is None or oldtip not in self.changelog.nodemap:
613 if oldtip is None or oldtip not in self.changelog.nodemap:
614 partial, last, lrev = self._readbranchcache()
614 partial, last, lrev = self._readbranchcache()
615 else:
615 else:
616 lrev = self.changelog.rev(oldtip)
616 lrev = self.changelog.rev(oldtip)
617 partial = self._branchcache
617 partial = self._branchcache
618
618
619 self._branchtags(partial, lrev)
619 self._branchtags(partial, lrev)
620 # this private cache holds all heads (not just the branch tips)
620 # this private cache holds all heads (not just the branch tips)
621 self._branchcache = partial
621 self._branchcache = partial
622
622
623 def branchmap(self):
623 def branchmap(self):
624 '''returns a dictionary {branch: [branchheads]}'''
624 '''returns a dictionary {branch: [branchheads]}'''
625 self.updatebranchcache()
625 self.updatebranchcache()
626 return self._branchcache
626 return self._branchcache
627
627
628 def _branchtip(self, heads):
628 def _branchtip(self, heads):
629 '''return the tipmost branch head in heads'''
629 '''return the tipmost branch head in heads'''
630 tip = heads[-1]
630 tip = heads[-1]
631 for h in reversed(heads):
631 for h in reversed(heads):
632 if not self[h].closesbranch():
632 if not self[h].closesbranch():
633 tip = h
633 tip = h
634 break
634 break
635 return tip
635 return tip
636
636
637 def branchtip(self, branch):
637 def branchtip(self, branch):
638 '''return the tip node for a given branch'''
638 '''return the tip node for a given branch'''
639 if branch not in self.branchmap():
639 if branch not in self.branchmap():
640 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
640 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
641 return self._branchtip(self.branchmap()[branch])
641 return self._branchtip(self.branchmap()[branch])
642
642
643 def branchtags(self):
643 def branchtags(self):
644 '''return a dict where branch names map to the tipmost head of
644 '''return a dict where branch names map to the tipmost head of
645 the branch, open heads come before closed'''
645 the branch, open heads come before closed'''
646 bt = {}
646 bt = {}
647 for bn, heads in self.branchmap().iteritems():
647 for bn, heads in self.branchmap().iteritems():
648 bt[bn] = self._branchtip(heads)
648 bt[bn] = self._branchtip(heads)
649 return bt
649 return bt
650
650
651 def _readbranchcache(self):
651 def _readbranchcache(self):
652 partial = {}
652 partial = {}
653 try:
653 try:
654 f = self.opener("cache/branchheads")
654 f = self.opener("cache/branchheads")
655 lines = f.read().split('\n')
655 lines = f.read().split('\n')
656 f.close()
656 f.close()
657 except (IOError, OSError):
657 except (IOError, OSError):
658 return {}, nullid, nullrev
658 return {}, nullid, nullrev
659
659
660 try:
660 try:
661 last, lrev = lines.pop(0).split(" ", 1)
661 last, lrev = lines.pop(0).split(" ", 1)
662 last, lrev = bin(last), int(lrev)
662 last, lrev = bin(last), int(lrev)
663 if lrev >= len(self) or self[lrev].node() != last:
663 if lrev >= len(self) or self[lrev].node() != last:
664 # invalidate the cache
664 # invalidate the cache
665 raise ValueError('invalidating branch cache (tip differs)')
665 raise ValueError('invalidating branch cache (tip differs)')
666 for l in lines:
666 for l in lines:
667 if not l:
667 if not l:
668 continue
668 continue
669 node, label = l.split(" ", 1)
669 node, label = l.split(" ", 1)
670 label = encoding.tolocal(label.strip())
670 label = encoding.tolocal(label.strip())
671 if not node in self:
671 if not node in self:
672 raise ValueError('invalidating branch cache because node '+
672 raise ValueError('invalidating branch cache because node '+
673 '%s does not exist' % node)
673 '%s does not exist' % node)
674 partial.setdefault(label, []).append(bin(node))
674 partial.setdefault(label, []).append(bin(node))
675 except KeyboardInterrupt:
675 except KeyboardInterrupt:
676 raise
676 raise
677 except Exception, inst:
677 except Exception, inst:
678 if self.ui.debugflag:
678 if self.ui.debugflag:
679 self.ui.warn(str(inst), '\n')
679 self.ui.warn(str(inst), '\n')
680 partial, last, lrev = {}, nullid, nullrev
680 partial, last, lrev = {}, nullid, nullrev
681 return partial, last, lrev
681 return partial, last, lrev
682
682
683 def _writebranchcache(self, branches, tip, tiprev):
683 def _writebranchcache(self, branches, tip, tiprev):
684 try:
684 try:
685 f = self.opener("cache/branchheads", "w", atomictemp=True)
685 f = self.opener("cache/branchheads", "w", atomictemp=True)
686 f.write("%s %s\n" % (hex(tip), tiprev))
686 f.write("%s %s\n" % (hex(tip), tiprev))
687 for label, nodes in branches.iteritems():
687 for label, nodes in branches.iteritems():
688 for node in nodes:
688 for node in nodes:
689 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
689 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
690 f.close()
690 f.close()
691 except (IOError, OSError):
691 except (IOError, OSError):
692 pass
692 pass
693
693
694 def _updatebranchcache(self, partial, ctxgen):
694 def _updatebranchcache(self, partial, ctxgen):
695 """Given a branchhead cache, partial, that may have extra nodes or be
695 """Given a branchhead cache, partial, that may have extra nodes or be
696 missing heads, and a generator of nodes that are at least a superset of
696 missing heads, and a generator of nodes that are at least a superset of
697 heads missing, this function updates partial to be correct.
697 heads missing, this function updates partial to be correct.
698 """
698 """
699 # collect new branch entries
699 # collect new branch entries
700 newbranches = {}
700 newbranches = {}
701 for c in ctxgen:
701 for c in ctxgen:
702 newbranches.setdefault(c.branch(), []).append(c.node())
702 newbranches.setdefault(c.branch(), []).append(c.node())
703 # if older branchheads are reachable from new ones, they aren't
703 # if older branchheads are reachable from new ones, they aren't
704 # really branchheads. Note checking parents is insufficient:
704 # really branchheads. Note checking parents is insufficient:
705 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
705 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
706 for branch, newnodes in newbranches.iteritems():
706 for branch, newnodes in newbranches.iteritems():
707 bheads = partial.setdefault(branch, [])
707 bheads = partial.setdefault(branch, [])
708 # Remove candidate heads that no longer are in the repo (e.g., as
708 # Remove candidate heads that no longer are in the repo (e.g., as
709 # the result of a strip that just happened). Avoid using 'node in
709 # the result of a strip that just happened). Avoid using 'node in
710 # self' here because that dives down into branchcache code somewhat
710 # self' here because that dives down into branchcache code somewhat
711 # recrusively.
711 # recrusively.
712 bheadrevs = [self.changelog.rev(node) for node in bheads
712 bheadrevs = [self.changelog.rev(node) for node in bheads
713 if self.changelog.hasnode(node)]
713 if self.changelog.hasnode(node)]
714 newheadrevs = [self.changelog.rev(node) for node in newnodes
714 newheadrevs = [self.changelog.rev(node) for node in newnodes
715 if self.changelog.hasnode(node)]
715 if self.changelog.hasnode(node)]
716 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
716 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
717 # Remove duplicates - nodes that are in newheadrevs and are already
717 # Remove duplicates - nodes that are in newheadrevs and are already
718 # in bheadrevs. This can happen if you strip a node whose parent
718 # in bheadrevs. This can happen if you strip a node whose parent
719 # was already a head (because they're on different branches).
719 # was already a head (because they're on different branches).
720 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
720 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
721
721
722 # Starting from tip means fewer passes over reachable. If we know
722 # Starting from tip means fewer passes over reachable. If we know
723 # the new candidates are not ancestors of existing heads, we don't
723 # the new candidates are not ancestors of existing heads, we don't
724 # have to examine ancestors of existing heads
724 # have to examine ancestors of existing heads
725 if ctxisnew:
725 if ctxisnew:
726 iterrevs = sorted(newheadrevs)
726 iterrevs = sorted(newheadrevs)
727 else:
727 else:
728 iterrevs = list(bheadrevs)
728 iterrevs = list(bheadrevs)
729
729
730 # This loop prunes out two kinds of heads - heads that are
730 # This loop prunes out two kinds of heads - heads that are
731 # superceded by a head in newheadrevs, and newheadrevs that are not
731 # superceded by a head in newheadrevs, and newheadrevs that are not
732 # heads because an existing head is their descendant.
732 # heads because an existing head is their descendant.
733 while iterrevs:
733 while iterrevs:
734 latest = iterrevs.pop()
734 latest = iterrevs.pop()
735 if latest not in bheadrevs:
735 if latest not in bheadrevs:
736 continue
736 continue
737 ancestors = set(self.changelog.ancestors([latest],
737 ancestors = set(self.changelog.ancestors([latest],
738 bheadrevs[0]))
738 bheadrevs[0]))
739 if ancestors:
739 if ancestors:
740 bheadrevs = [b for b in bheadrevs if b not in ancestors]
740 bheadrevs = [b for b in bheadrevs if b not in ancestors]
741 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
741 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
742
742
743 # There may be branches that cease to exist when the last commit in the
743 # There may be branches that cease to exist when the last commit in the
744 # branch was stripped. This code filters them out. Note that the
744 # branch was stripped. This code filters them out. Note that the
745 # branch that ceased to exist may not be in newbranches because
745 # branch that ceased to exist may not be in newbranches because
746 # newbranches is the set of candidate heads, which when you strip the
746 # newbranches is the set of candidate heads, which when you strip the
747 # last commit in a branch will be the parent branch.
747 # last commit in a branch will be the parent branch.
748 for branch in partial:
748 for branch in partial.keys():
749 nodes = [head for head in partial[branch]
749 nodes = [head for head in partial[branch]
750 if self.changelog.hasnode(head)]
750 if self.changelog.hasnode(head)]
751 if not nodes:
751 if not nodes:
752 del partial[branch]
752 del partial[branch]
753
753
754 def lookup(self, key):
754 def lookup(self, key):
755 return self[key].node()
755 return self[key].node()
756
756
757 def lookupbranch(self, key, remote=None):
757 def lookupbranch(self, key, remote=None):
758 repo = remote or self
758 repo = remote or self
759 if key in repo.branchmap():
759 if key in repo.branchmap():
760 return key
760 return key
761
761
762 repo = (remote and remote.local()) and remote or self
762 repo = (remote and remote.local()) and remote or self
763 return repo[key].branch()
763 return repo[key].branch()
764
764
765 def known(self, nodes):
765 def known(self, nodes):
766 nm = self.changelog.nodemap
766 nm = self.changelog.nodemap
767 pc = self._phasecache
767 pc = self._phasecache
768 result = []
768 result = []
769 for n in nodes:
769 for n in nodes:
770 r = nm.get(n)
770 r = nm.get(n)
771 resp = not (r is None or pc.phase(self, r) >= phases.secret)
771 resp = not (r is None or pc.phase(self, r) >= phases.secret)
772 result.append(resp)
772 result.append(resp)
773 return result
773 return result
774
774
775 def local(self):
775 def local(self):
776 return self
776 return self
777
777
778 def cancopy(self):
778 def cancopy(self):
779 return self.local() # so statichttprepo's override of local() works
779 return self.local() # so statichttprepo's override of local() works
780
780
781 def join(self, f):
781 def join(self, f):
782 return os.path.join(self.path, f)
782 return os.path.join(self.path, f)
783
783
784 def wjoin(self, f):
784 def wjoin(self, f):
785 return os.path.join(self.root, f)
785 return os.path.join(self.root, f)
786
786
787 def file(self, f):
787 def file(self, f):
788 if f[0] == '/':
788 if f[0] == '/':
789 f = f[1:]
789 f = f[1:]
790 return filelog.filelog(self.sopener, f)
790 return filelog.filelog(self.sopener, f)
791
791
792 def changectx(self, changeid):
792 def changectx(self, changeid):
793 return self[changeid]
793 return self[changeid]
794
794
795 def parents(self, changeid=None):
795 def parents(self, changeid=None):
796 '''get list of changectxs for parents of changeid'''
796 '''get list of changectxs for parents of changeid'''
797 return self[changeid].parents()
797 return self[changeid].parents()
798
798
799 def setparents(self, p1, p2=nullid):
799 def setparents(self, p1, p2=nullid):
800 copies = self.dirstate.setparents(p1, p2)
800 copies = self.dirstate.setparents(p1, p2)
801 if copies:
801 if copies:
802 # Adjust copy records, the dirstate cannot do it, it
802 # Adjust copy records, the dirstate cannot do it, it
803 # requires access to parents manifests. Preserve them
803 # requires access to parents manifests. Preserve them
804 # only for entries added to first parent.
804 # only for entries added to first parent.
805 pctx = self[p1]
805 pctx = self[p1]
806 for f in copies:
806 for f in copies:
807 if f not in pctx and copies[f] in pctx:
807 if f not in pctx and copies[f] in pctx:
808 self.dirstate.copy(copies[f], f)
808 self.dirstate.copy(copies[f], f)
809
809
810 def filectx(self, path, changeid=None, fileid=None):
810 def filectx(self, path, changeid=None, fileid=None):
811 """changeid can be a changeset revision, node, or tag.
811 """changeid can be a changeset revision, node, or tag.
812 fileid can be a file revision or node."""
812 fileid can be a file revision or node."""
813 return context.filectx(self, path, changeid, fileid)
813 return context.filectx(self, path, changeid, fileid)
814
814
815 def getcwd(self):
815 def getcwd(self):
816 return self.dirstate.getcwd()
816 return self.dirstate.getcwd()
817
817
818 def pathto(self, f, cwd=None):
818 def pathto(self, f, cwd=None):
819 return self.dirstate.pathto(f, cwd)
819 return self.dirstate.pathto(f, cwd)
820
820
821 def wfile(self, f, mode='r'):
821 def wfile(self, f, mode='r'):
822 return self.wopener(f, mode)
822 return self.wopener(f, mode)
823
823
824 def _link(self, f):
824 def _link(self, f):
825 return os.path.islink(self.wjoin(f))
825 return os.path.islink(self.wjoin(f))
826
826
827 def _loadfilter(self, filter):
827 def _loadfilter(self, filter):
828 if filter not in self.filterpats:
828 if filter not in self.filterpats:
829 l = []
829 l = []
830 for pat, cmd in self.ui.configitems(filter):
830 for pat, cmd in self.ui.configitems(filter):
831 if cmd == '!':
831 if cmd == '!':
832 continue
832 continue
833 mf = matchmod.match(self.root, '', [pat])
833 mf = matchmod.match(self.root, '', [pat])
834 fn = None
834 fn = None
835 params = cmd
835 params = cmd
836 for name, filterfn in self._datafilters.iteritems():
836 for name, filterfn in self._datafilters.iteritems():
837 if cmd.startswith(name):
837 if cmd.startswith(name):
838 fn = filterfn
838 fn = filterfn
839 params = cmd[len(name):].lstrip()
839 params = cmd[len(name):].lstrip()
840 break
840 break
841 if not fn:
841 if not fn:
842 fn = lambda s, c, **kwargs: util.filter(s, c)
842 fn = lambda s, c, **kwargs: util.filter(s, c)
843 # Wrap old filters not supporting keyword arguments
843 # Wrap old filters not supporting keyword arguments
844 if not inspect.getargspec(fn)[2]:
844 if not inspect.getargspec(fn)[2]:
845 oldfn = fn
845 oldfn = fn
846 fn = lambda s, c, **kwargs: oldfn(s, c)
846 fn = lambda s, c, **kwargs: oldfn(s, c)
847 l.append((mf, fn, params))
847 l.append((mf, fn, params))
848 self.filterpats[filter] = l
848 self.filterpats[filter] = l
849 return self.filterpats[filter]
849 return self.filterpats[filter]
850
850
851 def _filter(self, filterpats, filename, data):
851 def _filter(self, filterpats, filename, data):
852 for mf, fn, cmd in filterpats:
852 for mf, fn, cmd in filterpats:
853 if mf(filename):
853 if mf(filename):
854 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
854 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
855 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
855 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
856 break
856 break
857
857
858 return data
858 return data
859
859
860 @propertycache
860 @propertycache
861 def _encodefilterpats(self):
861 def _encodefilterpats(self):
862 return self._loadfilter('encode')
862 return self._loadfilter('encode')
863
863
864 @propertycache
864 @propertycache
865 def _decodefilterpats(self):
865 def _decodefilterpats(self):
866 return self._loadfilter('decode')
866 return self._loadfilter('decode')
867
867
868 def adddatafilter(self, name, filter):
868 def adddatafilter(self, name, filter):
869 self._datafilters[name] = filter
869 self._datafilters[name] = filter
870
870
871 def wread(self, filename):
871 def wread(self, filename):
872 if self._link(filename):
872 if self._link(filename):
873 data = os.readlink(self.wjoin(filename))
873 data = os.readlink(self.wjoin(filename))
874 else:
874 else:
875 data = self.wopener.read(filename)
875 data = self.wopener.read(filename)
876 return self._filter(self._encodefilterpats, filename, data)
876 return self._filter(self._encodefilterpats, filename, data)
877
877
878 def wwrite(self, filename, data, flags):
878 def wwrite(self, filename, data, flags):
879 data = self._filter(self._decodefilterpats, filename, data)
879 data = self._filter(self._decodefilterpats, filename, data)
880 if 'l' in flags:
880 if 'l' in flags:
881 self.wopener.symlink(data, filename)
881 self.wopener.symlink(data, filename)
882 else:
882 else:
883 self.wopener.write(filename, data)
883 self.wopener.write(filename, data)
884 if 'x' in flags:
884 if 'x' in flags:
885 util.setflags(self.wjoin(filename), False, True)
885 util.setflags(self.wjoin(filename), False, True)
886
886
887 def wwritedata(self, filename, data):
887 def wwritedata(self, filename, data):
888 return self._filter(self._decodefilterpats, filename, data)
888 return self._filter(self._decodefilterpats, filename, data)
889
889
890 def transaction(self, desc):
890 def transaction(self, desc):
891 tr = self._transref and self._transref() or None
891 tr = self._transref and self._transref() or None
892 if tr and tr.running():
892 if tr and tr.running():
893 return tr.nest()
893 return tr.nest()
894
894
895 # abort here if the journal already exists
895 # abort here if the journal already exists
896 if os.path.exists(self.sjoin("journal")):
896 if os.path.exists(self.sjoin("journal")):
897 raise error.RepoError(
897 raise error.RepoError(
898 _("abandoned transaction found - run hg recover"))
898 _("abandoned transaction found - run hg recover"))
899
899
900 self._writejournal(desc)
900 self._writejournal(desc)
901 renames = [(x, undoname(x)) for x in self._journalfiles()]
901 renames = [(x, undoname(x)) for x in self._journalfiles()]
902
902
903 tr = transaction.transaction(self.ui.warn, self.sopener,
903 tr = transaction.transaction(self.ui.warn, self.sopener,
904 self.sjoin("journal"),
904 self.sjoin("journal"),
905 aftertrans(renames),
905 aftertrans(renames),
906 self.store.createmode)
906 self.store.createmode)
907 self._transref = weakref.ref(tr)
907 self._transref = weakref.ref(tr)
908 return tr
908 return tr
909
909
910 def _journalfiles(self):
910 def _journalfiles(self):
911 return (self.sjoin('journal'), self.join('journal.dirstate'),
911 return (self.sjoin('journal'), self.join('journal.dirstate'),
912 self.join('journal.branch'), self.join('journal.desc'),
912 self.join('journal.branch'), self.join('journal.desc'),
913 self.join('journal.bookmarks'),
913 self.join('journal.bookmarks'),
914 self.sjoin('journal.phaseroots'))
914 self.sjoin('journal.phaseroots'))
915
915
916 def undofiles(self):
916 def undofiles(self):
917 return [undoname(x) for x in self._journalfiles()]
917 return [undoname(x) for x in self._journalfiles()]
918
918
919 def _writejournal(self, desc):
919 def _writejournal(self, desc):
920 self.opener.write("journal.dirstate",
920 self.opener.write("journal.dirstate",
921 self.opener.tryread("dirstate"))
921 self.opener.tryread("dirstate"))
922 self.opener.write("journal.branch",
922 self.opener.write("journal.branch",
923 encoding.fromlocal(self.dirstate.branch()))
923 encoding.fromlocal(self.dirstate.branch()))
924 self.opener.write("journal.desc",
924 self.opener.write("journal.desc",
925 "%d\n%s\n" % (len(self), desc))
925 "%d\n%s\n" % (len(self), desc))
926 self.opener.write("journal.bookmarks",
926 self.opener.write("journal.bookmarks",
927 self.opener.tryread("bookmarks"))
927 self.opener.tryread("bookmarks"))
928 self.sopener.write("journal.phaseroots",
928 self.sopener.write("journal.phaseroots",
929 self.sopener.tryread("phaseroots"))
929 self.sopener.tryread("phaseroots"))
930
930
931 def recover(self):
931 def recover(self):
932 lock = self.lock()
932 lock = self.lock()
933 try:
933 try:
934 if os.path.exists(self.sjoin("journal")):
934 if os.path.exists(self.sjoin("journal")):
935 self.ui.status(_("rolling back interrupted transaction\n"))
935 self.ui.status(_("rolling back interrupted transaction\n"))
936 transaction.rollback(self.sopener, self.sjoin("journal"),
936 transaction.rollback(self.sopener, self.sjoin("journal"),
937 self.ui.warn)
937 self.ui.warn)
938 self.invalidate()
938 self.invalidate()
939 return True
939 return True
940 else:
940 else:
941 self.ui.warn(_("no interrupted transaction available\n"))
941 self.ui.warn(_("no interrupted transaction available\n"))
942 return False
942 return False
943 finally:
943 finally:
944 lock.release()
944 lock.release()
945
945
946 def rollback(self, dryrun=False, force=False):
946 def rollback(self, dryrun=False, force=False):
947 wlock = lock = None
947 wlock = lock = None
948 try:
948 try:
949 wlock = self.wlock()
949 wlock = self.wlock()
950 lock = self.lock()
950 lock = self.lock()
951 if os.path.exists(self.sjoin("undo")):
951 if os.path.exists(self.sjoin("undo")):
952 return self._rollback(dryrun, force)
952 return self._rollback(dryrun, force)
953 else:
953 else:
954 self.ui.warn(_("no rollback information available\n"))
954 self.ui.warn(_("no rollback information available\n"))
955 return 1
955 return 1
956 finally:
956 finally:
957 release(lock, wlock)
957 release(lock, wlock)
958
958
959 def _rollback(self, dryrun, force):
959 def _rollback(self, dryrun, force):
960 ui = self.ui
960 ui = self.ui
961 try:
961 try:
962 args = self.opener.read('undo.desc').splitlines()
962 args = self.opener.read('undo.desc').splitlines()
963 (oldlen, desc, detail) = (int(args[0]), args[1], None)
963 (oldlen, desc, detail) = (int(args[0]), args[1], None)
964 if len(args) >= 3:
964 if len(args) >= 3:
965 detail = args[2]
965 detail = args[2]
966 oldtip = oldlen - 1
966 oldtip = oldlen - 1
967
967
968 if detail and ui.verbose:
968 if detail and ui.verbose:
969 msg = (_('repository tip rolled back to revision %s'
969 msg = (_('repository tip rolled back to revision %s'
970 ' (undo %s: %s)\n')
970 ' (undo %s: %s)\n')
971 % (oldtip, desc, detail))
971 % (oldtip, desc, detail))
972 else:
972 else:
973 msg = (_('repository tip rolled back to revision %s'
973 msg = (_('repository tip rolled back to revision %s'
974 ' (undo %s)\n')
974 ' (undo %s)\n')
975 % (oldtip, desc))
975 % (oldtip, desc))
976 except IOError:
976 except IOError:
977 msg = _('rolling back unknown transaction\n')
977 msg = _('rolling back unknown transaction\n')
978 desc = None
978 desc = None
979
979
980 if not force and self['.'] != self['tip'] and desc == 'commit':
980 if not force and self['.'] != self['tip'] and desc == 'commit':
981 raise util.Abort(
981 raise util.Abort(
982 _('rollback of last commit while not checked out '
982 _('rollback of last commit while not checked out '
983 'may lose data'), hint=_('use -f to force'))
983 'may lose data'), hint=_('use -f to force'))
984
984
985 ui.status(msg)
985 ui.status(msg)
986 if dryrun:
986 if dryrun:
987 return 0
987 return 0
988
988
989 parents = self.dirstate.parents()
989 parents = self.dirstate.parents()
990 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
990 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
991 if os.path.exists(self.join('undo.bookmarks')):
991 if os.path.exists(self.join('undo.bookmarks')):
992 util.rename(self.join('undo.bookmarks'),
992 util.rename(self.join('undo.bookmarks'),
993 self.join('bookmarks'))
993 self.join('bookmarks'))
994 if os.path.exists(self.sjoin('undo.phaseroots')):
994 if os.path.exists(self.sjoin('undo.phaseroots')):
995 util.rename(self.sjoin('undo.phaseroots'),
995 util.rename(self.sjoin('undo.phaseroots'),
996 self.sjoin('phaseroots'))
996 self.sjoin('phaseroots'))
997 self.invalidate()
997 self.invalidate()
998
998
999 parentgone = (parents[0] not in self.changelog.nodemap or
999 parentgone = (parents[0] not in self.changelog.nodemap or
1000 parents[1] not in self.changelog.nodemap)
1000 parents[1] not in self.changelog.nodemap)
1001 if parentgone:
1001 if parentgone:
1002 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1002 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1003 try:
1003 try:
1004 branch = self.opener.read('undo.branch')
1004 branch = self.opener.read('undo.branch')
1005 self.dirstate.setbranch(branch)
1005 self.dirstate.setbranch(branch)
1006 except IOError:
1006 except IOError:
1007 ui.warn(_('named branch could not be reset: '
1007 ui.warn(_('named branch could not be reset: '
1008 'current branch is still \'%s\'\n')
1008 'current branch is still \'%s\'\n')
1009 % self.dirstate.branch())
1009 % self.dirstate.branch())
1010
1010
1011 self.dirstate.invalidate()
1011 self.dirstate.invalidate()
1012 parents = tuple([p.rev() for p in self.parents()])
1012 parents = tuple([p.rev() for p in self.parents()])
1013 if len(parents) > 1:
1013 if len(parents) > 1:
1014 ui.status(_('working directory now based on '
1014 ui.status(_('working directory now based on '
1015 'revisions %d and %d\n') % parents)
1015 'revisions %d and %d\n') % parents)
1016 else:
1016 else:
1017 ui.status(_('working directory now based on '
1017 ui.status(_('working directory now based on '
1018 'revision %d\n') % parents)
1018 'revision %d\n') % parents)
1019 # TODO: if we know which new heads may result from this rollback, pass
1019 # TODO: if we know which new heads may result from this rollback, pass
1020 # them to destroy(), which will prevent the branchhead cache from being
1020 # them to destroy(), which will prevent the branchhead cache from being
1021 # invalidated.
1021 # invalidated.
1022 self.destroyed()
1022 self.destroyed()
1023 return 0
1023 return 0
1024
1024
1025 def invalidatecaches(self):
1025 def invalidatecaches(self):
1026 def delcache(name):
1026 def delcache(name):
1027 try:
1027 try:
1028 delattr(self, name)
1028 delattr(self, name)
1029 except AttributeError:
1029 except AttributeError:
1030 pass
1030 pass
1031
1031
1032 delcache('_tagscache')
1032 delcache('_tagscache')
1033
1033
1034 self._branchcache = None # in UTF-8
1034 self._branchcache = None # in UTF-8
1035 self._branchcachetip = None
1035 self._branchcachetip = None
1036
1036
1037 def invalidatedirstate(self):
1037 def invalidatedirstate(self):
1038 '''Invalidates the dirstate, causing the next call to dirstate
1038 '''Invalidates the dirstate, causing the next call to dirstate
1039 to check if it was modified since the last time it was read,
1039 to check if it was modified since the last time it was read,
1040 rereading it if it has.
1040 rereading it if it has.
1041
1041
1042 This is different to dirstate.invalidate() that it doesn't always
1042 This is different to dirstate.invalidate() that it doesn't always
1043 rereads the dirstate. Use dirstate.invalidate() if you want to
1043 rereads the dirstate. Use dirstate.invalidate() if you want to
1044 explicitly read the dirstate again (i.e. restoring it to a previous
1044 explicitly read the dirstate again (i.e. restoring it to a previous
1045 known good state).'''
1045 known good state).'''
1046 if 'dirstate' in self.__dict__:
1046 if 'dirstate' in self.__dict__:
1047 for k in self.dirstate._filecache:
1047 for k in self.dirstate._filecache:
1048 try:
1048 try:
1049 delattr(self.dirstate, k)
1049 delattr(self.dirstate, k)
1050 except AttributeError:
1050 except AttributeError:
1051 pass
1051 pass
1052 delattr(self, 'dirstate')
1052 delattr(self, 'dirstate')
1053
1053
1054 def invalidate(self):
1054 def invalidate(self):
1055 for k in self._filecache:
1055 for k in self._filecache:
1056 # dirstate is invalidated separately in invalidatedirstate()
1056 # dirstate is invalidated separately in invalidatedirstate()
1057 if k == 'dirstate':
1057 if k == 'dirstate':
1058 continue
1058 continue
1059
1059
1060 try:
1060 try:
1061 delattr(self, k)
1061 delattr(self, k)
1062 except AttributeError:
1062 except AttributeError:
1063 pass
1063 pass
1064 self.invalidatecaches()
1064 self.invalidatecaches()
1065
1065
1066 # Discard all cache entries to force reloading everything.
1066 # Discard all cache entries to force reloading everything.
1067 self._filecache.clear()
1067 self._filecache.clear()
1068
1068
1069 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1069 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1070 try:
1070 try:
1071 l = lock.lock(lockname, 0, releasefn, desc=desc)
1071 l = lock.lock(lockname, 0, releasefn, desc=desc)
1072 except error.LockHeld, inst:
1072 except error.LockHeld, inst:
1073 if not wait:
1073 if not wait:
1074 raise
1074 raise
1075 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1075 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1076 (desc, inst.locker))
1076 (desc, inst.locker))
1077 # default to 600 seconds timeout
1077 # default to 600 seconds timeout
1078 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1078 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1079 releasefn, desc=desc)
1079 releasefn, desc=desc)
1080 if acquirefn:
1080 if acquirefn:
1081 acquirefn()
1081 acquirefn()
1082 return l
1082 return l
1083
1083
1084 def _afterlock(self, callback):
1084 def _afterlock(self, callback):
1085 """add a callback to the current repository lock.
1085 """add a callback to the current repository lock.
1086
1086
1087 The callback will be executed on lock release."""
1087 The callback will be executed on lock release."""
1088 l = self._lockref and self._lockref()
1088 l = self._lockref and self._lockref()
1089 if l:
1089 if l:
1090 l.postrelease.append(callback)
1090 l.postrelease.append(callback)
1091 else:
1091 else:
1092 callback()
1092 callback()
1093
1093
1094 def lock(self, wait=True):
1094 def lock(self, wait=True):
1095 '''Lock the repository store (.hg/store) and return a weak reference
1095 '''Lock the repository store (.hg/store) and return a weak reference
1096 to the lock. Use this before modifying the store (e.g. committing or
1096 to the lock. Use this before modifying the store (e.g. committing or
1097 stripping). If you are opening a transaction, get a lock as well.)'''
1097 stripping). If you are opening a transaction, get a lock as well.)'''
1098 l = self._lockref and self._lockref()
1098 l = self._lockref and self._lockref()
1099 if l is not None and l.held:
1099 if l is not None and l.held:
1100 l.lock()
1100 l.lock()
1101 return l
1101 return l
1102
1102
1103 def unlock():
1103 def unlock():
1104 self.store.write()
1104 self.store.write()
1105 if '_phasecache' in vars(self):
1105 if '_phasecache' in vars(self):
1106 self._phasecache.write()
1106 self._phasecache.write()
1107 for k, ce in self._filecache.items():
1107 for k, ce in self._filecache.items():
1108 if k == 'dirstate':
1108 if k == 'dirstate':
1109 continue
1109 continue
1110 ce.refresh()
1110 ce.refresh()
1111
1111
1112 l = self._lock(self.sjoin("lock"), wait, unlock,
1112 l = self._lock(self.sjoin("lock"), wait, unlock,
1113 self.invalidate, _('repository %s') % self.origroot)
1113 self.invalidate, _('repository %s') % self.origroot)
1114 self._lockref = weakref.ref(l)
1114 self._lockref = weakref.ref(l)
1115 return l
1115 return l
1116
1116
1117 def wlock(self, wait=True):
1117 def wlock(self, wait=True):
1118 '''Lock the non-store parts of the repository (everything under
1118 '''Lock the non-store parts of the repository (everything under
1119 .hg except .hg/store) and return a weak reference to the lock.
1119 .hg except .hg/store) and return a weak reference to the lock.
1120 Use this before modifying files in .hg.'''
1120 Use this before modifying files in .hg.'''
1121 l = self._wlockref and self._wlockref()
1121 l = self._wlockref and self._wlockref()
1122 if l is not None and l.held:
1122 if l is not None and l.held:
1123 l.lock()
1123 l.lock()
1124 return l
1124 return l
1125
1125
1126 def unlock():
1126 def unlock():
1127 self.dirstate.write()
1127 self.dirstate.write()
1128 ce = self._filecache.get('dirstate')
1128 ce = self._filecache.get('dirstate')
1129 if ce:
1129 if ce:
1130 ce.refresh()
1130 ce.refresh()
1131
1131
1132 l = self._lock(self.join("wlock"), wait, unlock,
1132 l = self._lock(self.join("wlock"), wait, unlock,
1133 self.invalidatedirstate, _('working directory of %s') %
1133 self.invalidatedirstate, _('working directory of %s') %
1134 self.origroot)
1134 self.origroot)
1135 self._wlockref = weakref.ref(l)
1135 self._wlockref = weakref.ref(l)
1136 return l
1136 return l
1137
1137
1138 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1138 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1139 """
1139 """
1140 commit an individual file as part of a larger transaction
1140 commit an individual file as part of a larger transaction
1141 """
1141 """
1142
1142
1143 fname = fctx.path()
1143 fname = fctx.path()
1144 text = fctx.data()
1144 text = fctx.data()
1145 flog = self.file(fname)
1145 flog = self.file(fname)
1146 fparent1 = manifest1.get(fname, nullid)
1146 fparent1 = manifest1.get(fname, nullid)
1147 fparent2 = fparent2o = manifest2.get(fname, nullid)
1147 fparent2 = fparent2o = manifest2.get(fname, nullid)
1148
1148
1149 meta = {}
1149 meta = {}
1150 copy = fctx.renamed()
1150 copy = fctx.renamed()
1151 if copy and copy[0] != fname:
1151 if copy and copy[0] != fname:
1152 # Mark the new revision of this file as a copy of another
1152 # Mark the new revision of this file as a copy of another
1153 # file. This copy data will effectively act as a parent
1153 # file. This copy data will effectively act as a parent
1154 # of this new revision. If this is a merge, the first
1154 # of this new revision. If this is a merge, the first
1155 # parent will be the nullid (meaning "look up the copy data")
1155 # parent will be the nullid (meaning "look up the copy data")
1156 # and the second one will be the other parent. For example:
1156 # and the second one will be the other parent. For example:
1157 #
1157 #
1158 # 0 --- 1 --- 3 rev1 changes file foo
1158 # 0 --- 1 --- 3 rev1 changes file foo
1159 # \ / rev2 renames foo to bar and changes it
1159 # \ / rev2 renames foo to bar and changes it
1160 # \- 2 -/ rev3 should have bar with all changes and
1160 # \- 2 -/ rev3 should have bar with all changes and
1161 # should record that bar descends from
1161 # should record that bar descends from
1162 # bar in rev2 and foo in rev1
1162 # bar in rev2 and foo in rev1
1163 #
1163 #
1164 # this allows this merge to succeed:
1164 # this allows this merge to succeed:
1165 #
1165 #
1166 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1166 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1167 # \ / merging rev3 and rev4 should use bar@rev2
1167 # \ / merging rev3 and rev4 should use bar@rev2
1168 # \- 2 --- 4 as the merge base
1168 # \- 2 --- 4 as the merge base
1169 #
1169 #
1170
1170
1171 cfname = copy[0]
1171 cfname = copy[0]
1172 crev = manifest1.get(cfname)
1172 crev = manifest1.get(cfname)
1173 newfparent = fparent2
1173 newfparent = fparent2
1174
1174
1175 if manifest2: # branch merge
1175 if manifest2: # branch merge
1176 if fparent2 == nullid or crev is None: # copied on remote side
1176 if fparent2 == nullid or crev is None: # copied on remote side
1177 if cfname in manifest2:
1177 if cfname in manifest2:
1178 crev = manifest2[cfname]
1178 crev = manifest2[cfname]
1179 newfparent = fparent1
1179 newfparent = fparent1
1180
1180
1181 # find source in nearest ancestor if we've lost track
1181 # find source in nearest ancestor if we've lost track
1182 if not crev:
1182 if not crev:
1183 self.ui.debug(" %s: searching for copy revision for %s\n" %
1183 self.ui.debug(" %s: searching for copy revision for %s\n" %
1184 (fname, cfname))
1184 (fname, cfname))
1185 for ancestor in self[None].ancestors():
1185 for ancestor in self[None].ancestors():
1186 if cfname in ancestor:
1186 if cfname in ancestor:
1187 crev = ancestor[cfname].filenode()
1187 crev = ancestor[cfname].filenode()
1188 break
1188 break
1189
1189
1190 if crev:
1190 if crev:
1191 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1191 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1192 meta["copy"] = cfname
1192 meta["copy"] = cfname
1193 meta["copyrev"] = hex(crev)
1193 meta["copyrev"] = hex(crev)
1194 fparent1, fparent2 = nullid, newfparent
1194 fparent1, fparent2 = nullid, newfparent
1195 else:
1195 else:
1196 self.ui.warn(_("warning: can't find ancestor for '%s' "
1196 self.ui.warn(_("warning: can't find ancestor for '%s' "
1197 "copied from '%s'!\n") % (fname, cfname))
1197 "copied from '%s'!\n") % (fname, cfname))
1198
1198
1199 elif fparent2 != nullid:
1199 elif fparent2 != nullid:
1200 # is one parent an ancestor of the other?
1200 # is one parent an ancestor of the other?
1201 fparentancestor = flog.ancestor(fparent1, fparent2)
1201 fparentancestor = flog.ancestor(fparent1, fparent2)
1202 if fparentancestor == fparent1:
1202 if fparentancestor == fparent1:
1203 fparent1, fparent2 = fparent2, nullid
1203 fparent1, fparent2 = fparent2, nullid
1204 elif fparentancestor == fparent2:
1204 elif fparentancestor == fparent2:
1205 fparent2 = nullid
1205 fparent2 = nullid
1206
1206
1207 # is the file changed?
1207 # is the file changed?
1208 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1208 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1209 changelist.append(fname)
1209 changelist.append(fname)
1210 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1210 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1211
1211
1212 # are just the flags changed during merge?
1212 # are just the flags changed during merge?
1213 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1213 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1214 changelist.append(fname)
1214 changelist.append(fname)
1215
1215
1216 return fparent1
1216 return fparent1
1217
1217
1218 def commit(self, text="", user=None, date=None, match=None, force=False,
1218 def commit(self, text="", user=None, date=None, match=None, force=False,
1219 editor=False, extra={}):
1219 editor=False, extra={}):
1220 """Add a new revision to current repository.
1220 """Add a new revision to current repository.
1221
1221
1222 Revision information is gathered from the working directory,
1222 Revision information is gathered from the working directory,
1223 match can be used to filter the committed files. If editor is
1223 match can be used to filter the committed files. If editor is
1224 supplied, it is called to get a commit message.
1224 supplied, it is called to get a commit message.
1225 """
1225 """
1226
1226
1227 def fail(f, msg):
1227 def fail(f, msg):
1228 raise util.Abort('%s: %s' % (f, msg))
1228 raise util.Abort('%s: %s' % (f, msg))
1229
1229
1230 if not match:
1230 if not match:
1231 match = matchmod.always(self.root, '')
1231 match = matchmod.always(self.root, '')
1232
1232
1233 if not force:
1233 if not force:
1234 vdirs = []
1234 vdirs = []
1235 match.dir = vdirs.append
1235 match.dir = vdirs.append
1236 match.bad = fail
1236 match.bad = fail
1237
1237
1238 wlock = self.wlock()
1238 wlock = self.wlock()
1239 try:
1239 try:
1240 wctx = self[None]
1240 wctx = self[None]
1241 merge = len(wctx.parents()) > 1
1241 merge = len(wctx.parents()) > 1
1242
1242
1243 if (not force and merge and match and
1243 if (not force and merge and match and
1244 (match.files() or match.anypats())):
1244 (match.files() or match.anypats())):
1245 raise util.Abort(_('cannot partially commit a merge '
1245 raise util.Abort(_('cannot partially commit a merge '
1246 '(do not specify files or patterns)'))
1246 '(do not specify files or patterns)'))
1247
1247
1248 changes = self.status(match=match, clean=force)
1248 changes = self.status(match=match, clean=force)
1249 if force:
1249 if force:
1250 changes[0].extend(changes[6]) # mq may commit unchanged files
1250 changes[0].extend(changes[6]) # mq may commit unchanged files
1251
1251
1252 # check subrepos
1252 # check subrepos
1253 subs = []
1253 subs = []
1254 commitsubs = set()
1254 commitsubs = set()
1255 newstate = wctx.substate.copy()
1255 newstate = wctx.substate.copy()
1256 # only manage subrepos and .hgsubstate if .hgsub is present
1256 # only manage subrepos and .hgsubstate if .hgsub is present
1257 if '.hgsub' in wctx:
1257 if '.hgsub' in wctx:
1258 # we'll decide whether to track this ourselves, thanks
1258 # we'll decide whether to track this ourselves, thanks
1259 if '.hgsubstate' in changes[0]:
1259 if '.hgsubstate' in changes[0]:
1260 changes[0].remove('.hgsubstate')
1260 changes[0].remove('.hgsubstate')
1261 if '.hgsubstate' in changes[2]:
1261 if '.hgsubstate' in changes[2]:
1262 changes[2].remove('.hgsubstate')
1262 changes[2].remove('.hgsubstate')
1263
1263
1264 # compare current state to last committed state
1264 # compare current state to last committed state
1265 # build new substate based on last committed state
1265 # build new substate based on last committed state
1266 oldstate = wctx.p1().substate
1266 oldstate = wctx.p1().substate
1267 for s in sorted(newstate.keys()):
1267 for s in sorted(newstate.keys()):
1268 if not match(s):
1268 if not match(s):
1269 # ignore working copy, use old state if present
1269 # ignore working copy, use old state if present
1270 if s in oldstate:
1270 if s in oldstate:
1271 newstate[s] = oldstate[s]
1271 newstate[s] = oldstate[s]
1272 continue
1272 continue
1273 if not force:
1273 if not force:
1274 raise util.Abort(
1274 raise util.Abort(
1275 _("commit with new subrepo %s excluded") % s)
1275 _("commit with new subrepo %s excluded") % s)
1276 if wctx.sub(s).dirty(True):
1276 if wctx.sub(s).dirty(True):
1277 if not self.ui.configbool('ui', 'commitsubrepos'):
1277 if not self.ui.configbool('ui', 'commitsubrepos'):
1278 raise util.Abort(
1278 raise util.Abort(
1279 _("uncommitted changes in subrepo %s") % s,
1279 _("uncommitted changes in subrepo %s") % s,
1280 hint=_("use --subrepos for recursive commit"))
1280 hint=_("use --subrepos for recursive commit"))
1281 subs.append(s)
1281 subs.append(s)
1282 commitsubs.add(s)
1282 commitsubs.add(s)
1283 else:
1283 else:
1284 bs = wctx.sub(s).basestate()
1284 bs = wctx.sub(s).basestate()
1285 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1285 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1286 if oldstate.get(s, (None, None, None))[1] != bs:
1286 if oldstate.get(s, (None, None, None))[1] != bs:
1287 subs.append(s)
1287 subs.append(s)
1288
1288
1289 # check for removed subrepos
1289 # check for removed subrepos
1290 for p in wctx.parents():
1290 for p in wctx.parents():
1291 r = [s for s in p.substate if s not in newstate]
1291 r = [s for s in p.substate if s not in newstate]
1292 subs += [s for s in r if match(s)]
1292 subs += [s for s in r if match(s)]
1293 if subs:
1293 if subs:
1294 if (not match('.hgsub') and
1294 if (not match('.hgsub') and
1295 '.hgsub' in (wctx.modified() + wctx.added())):
1295 '.hgsub' in (wctx.modified() + wctx.added())):
1296 raise util.Abort(
1296 raise util.Abort(
1297 _("can't commit subrepos without .hgsub"))
1297 _("can't commit subrepos without .hgsub"))
1298 changes[0].insert(0, '.hgsubstate')
1298 changes[0].insert(0, '.hgsubstate')
1299
1299
1300 elif '.hgsub' in changes[2]:
1300 elif '.hgsub' in changes[2]:
1301 # clean up .hgsubstate when .hgsub is removed
1301 # clean up .hgsubstate when .hgsub is removed
1302 if ('.hgsubstate' in wctx and
1302 if ('.hgsubstate' in wctx and
1303 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1303 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1304 changes[2].insert(0, '.hgsubstate')
1304 changes[2].insert(0, '.hgsubstate')
1305
1305
1306 # make sure all explicit patterns are matched
1306 # make sure all explicit patterns are matched
1307 if not force and match.files():
1307 if not force and match.files():
1308 matched = set(changes[0] + changes[1] + changes[2])
1308 matched = set(changes[0] + changes[1] + changes[2])
1309
1309
1310 for f in match.files():
1310 for f in match.files():
1311 if f == '.' or f in matched or f in wctx.substate:
1311 if f == '.' or f in matched or f in wctx.substate:
1312 continue
1312 continue
1313 if f in changes[3]: # missing
1313 if f in changes[3]: # missing
1314 fail(f, _('file not found!'))
1314 fail(f, _('file not found!'))
1315 if f in vdirs: # visited directory
1315 if f in vdirs: # visited directory
1316 d = f + '/'
1316 d = f + '/'
1317 for mf in matched:
1317 for mf in matched:
1318 if mf.startswith(d):
1318 if mf.startswith(d):
1319 break
1319 break
1320 else:
1320 else:
1321 fail(f, _("no match under directory!"))
1321 fail(f, _("no match under directory!"))
1322 elif f not in self.dirstate:
1322 elif f not in self.dirstate:
1323 fail(f, _("file not tracked!"))
1323 fail(f, _("file not tracked!"))
1324
1324
1325 if (not force and not extra.get("close") and not merge
1325 if (not force and not extra.get("close") and not merge
1326 and not (changes[0] or changes[1] or changes[2])
1326 and not (changes[0] or changes[1] or changes[2])
1327 and wctx.branch() == wctx.p1().branch()):
1327 and wctx.branch() == wctx.p1().branch()):
1328 return None
1328 return None
1329
1329
1330 if merge and changes[3]:
1330 if merge and changes[3]:
1331 raise util.Abort(_("cannot commit merge with missing files"))
1331 raise util.Abort(_("cannot commit merge with missing files"))
1332
1332
1333 ms = mergemod.mergestate(self)
1333 ms = mergemod.mergestate(self)
1334 for f in changes[0]:
1334 for f in changes[0]:
1335 if f in ms and ms[f] == 'u':
1335 if f in ms and ms[f] == 'u':
1336 raise util.Abort(_("unresolved merge conflicts "
1336 raise util.Abort(_("unresolved merge conflicts "
1337 "(see hg help resolve)"))
1337 "(see hg help resolve)"))
1338
1338
1339 cctx = context.workingctx(self, text, user, date, extra, changes)
1339 cctx = context.workingctx(self, text, user, date, extra, changes)
1340 if editor:
1340 if editor:
1341 cctx._text = editor(self, cctx, subs)
1341 cctx._text = editor(self, cctx, subs)
1342 edited = (text != cctx._text)
1342 edited = (text != cctx._text)
1343
1343
1344 # commit subs and write new state
1344 # commit subs and write new state
1345 if subs:
1345 if subs:
1346 for s in sorted(commitsubs):
1346 for s in sorted(commitsubs):
1347 sub = wctx.sub(s)
1347 sub = wctx.sub(s)
1348 self.ui.status(_('committing subrepository %s\n') %
1348 self.ui.status(_('committing subrepository %s\n') %
1349 subrepo.subrelpath(sub))
1349 subrepo.subrelpath(sub))
1350 sr = sub.commit(cctx._text, user, date)
1350 sr = sub.commit(cctx._text, user, date)
1351 newstate[s] = (newstate[s][0], sr)
1351 newstate[s] = (newstate[s][0], sr)
1352 subrepo.writestate(self, newstate)
1352 subrepo.writestate(self, newstate)
1353
1353
1354 # Save commit message in case this transaction gets rolled back
1354 # Save commit message in case this transaction gets rolled back
1355 # (e.g. by a pretxncommit hook). Leave the content alone on
1355 # (e.g. by a pretxncommit hook). Leave the content alone on
1356 # the assumption that the user will use the same editor again.
1356 # the assumption that the user will use the same editor again.
1357 msgfn = self.savecommitmessage(cctx._text)
1357 msgfn = self.savecommitmessage(cctx._text)
1358
1358
1359 p1, p2 = self.dirstate.parents()
1359 p1, p2 = self.dirstate.parents()
1360 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1360 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1361 try:
1361 try:
1362 self.hook("precommit", throw=True, parent1=hookp1,
1362 self.hook("precommit", throw=True, parent1=hookp1,
1363 parent2=hookp2)
1363 parent2=hookp2)
1364 ret = self.commitctx(cctx, True)
1364 ret = self.commitctx(cctx, True)
1365 except: # re-raises
1365 except: # re-raises
1366 if edited:
1366 if edited:
1367 self.ui.write(
1367 self.ui.write(
1368 _('note: commit message saved in %s\n') % msgfn)
1368 _('note: commit message saved in %s\n') % msgfn)
1369 raise
1369 raise
1370
1370
1371 # update bookmarks, dirstate and mergestate
1371 # update bookmarks, dirstate and mergestate
1372 bookmarks.update(self, [p1, p2], ret)
1372 bookmarks.update(self, [p1, p2], ret)
1373 for f in changes[0] + changes[1]:
1373 for f in changes[0] + changes[1]:
1374 self.dirstate.normal(f)
1374 self.dirstate.normal(f)
1375 for f in changes[2]:
1375 for f in changes[2]:
1376 self.dirstate.drop(f)
1376 self.dirstate.drop(f)
1377 self.dirstate.setparents(ret)
1377 self.dirstate.setparents(ret)
1378 ms.reset()
1378 ms.reset()
1379 finally:
1379 finally:
1380 wlock.release()
1380 wlock.release()
1381
1381
1382 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1382 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1383 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1383 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1384 self._afterlock(commithook)
1384 self._afterlock(commithook)
1385 return ret
1385 return ret
1386
1386
1387 def commitctx(self, ctx, error=False):
1387 def commitctx(self, ctx, error=False):
1388 """Add a new revision to current repository.
1388 """Add a new revision to current repository.
1389 Revision information is passed via the context argument.
1389 Revision information is passed via the context argument.
1390 """
1390 """
1391
1391
1392 tr = lock = None
1392 tr = lock = None
1393 removed = list(ctx.removed())
1393 removed = list(ctx.removed())
1394 p1, p2 = ctx.p1(), ctx.p2()
1394 p1, p2 = ctx.p1(), ctx.p2()
1395 user = ctx.user()
1395 user = ctx.user()
1396
1396
1397 lock = self.lock()
1397 lock = self.lock()
1398 try:
1398 try:
1399 tr = self.transaction("commit")
1399 tr = self.transaction("commit")
1400 trp = weakref.proxy(tr)
1400 trp = weakref.proxy(tr)
1401
1401
1402 if ctx.files():
1402 if ctx.files():
1403 m1 = p1.manifest().copy()
1403 m1 = p1.manifest().copy()
1404 m2 = p2.manifest()
1404 m2 = p2.manifest()
1405
1405
1406 # check in files
1406 # check in files
1407 new = {}
1407 new = {}
1408 changed = []
1408 changed = []
1409 linkrev = len(self)
1409 linkrev = len(self)
1410 for f in sorted(ctx.modified() + ctx.added()):
1410 for f in sorted(ctx.modified() + ctx.added()):
1411 self.ui.note(f + "\n")
1411 self.ui.note(f + "\n")
1412 try:
1412 try:
1413 fctx = ctx[f]
1413 fctx = ctx[f]
1414 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1414 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1415 changed)
1415 changed)
1416 m1.set(f, fctx.flags())
1416 m1.set(f, fctx.flags())
1417 except OSError, inst:
1417 except OSError, inst:
1418 self.ui.warn(_("trouble committing %s!\n") % f)
1418 self.ui.warn(_("trouble committing %s!\n") % f)
1419 raise
1419 raise
1420 except IOError, inst:
1420 except IOError, inst:
1421 errcode = getattr(inst, 'errno', errno.ENOENT)
1421 errcode = getattr(inst, 'errno', errno.ENOENT)
1422 if error or errcode and errcode != errno.ENOENT:
1422 if error or errcode and errcode != errno.ENOENT:
1423 self.ui.warn(_("trouble committing %s!\n") % f)
1423 self.ui.warn(_("trouble committing %s!\n") % f)
1424 raise
1424 raise
1425 else:
1425 else:
1426 removed.append(f)
1426 removed.append(f)
1427
1427
1428 # update manifest
1428 # update manifest
1429 m1.update(new)
1429 m1.update(new)
1430 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1430 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1431 drop = [f for f in removed if f in m1]
1431 drop = [f for f in removed if f in m1]
1432 for f in drop:
1432 for f in drop:
1433 del m1[f]
1433 del m1[f]
1434 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1434 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1435 p2.manifestnode(), (new, drop))
1435 p2.manifestnode(), (new, drop))
1436 files = changed + removed
1436 files = changed + removed
1437 else:
1437 else:
1438 mn = p1.manifestnode()
1438 mn = p1.manifestnode()
1439 files = []
1439 files = []
1440
1440
1441 # update changelog
1441 # update changelog
1442 self.changelog.delayupdate()
1442 self.changelog.delayupdate()
1443 n = self.changelog.add(mn, files, ctx.description(),
1443 n = self.changelog.add(mn, files, ctx.description(),
1444 trp, p1.node(), p2.node(),
1444 trp, p1.node(), p2.node(),
1445 user, ctx.date(), ctx.extra().copy())
1445 user, ctx.date(), ctx.extra().copy())
1446 p = lambda: self.changelog.writepending() and self.root or ""
1446 p = lambda: self.changelog.writepending() and self.root or ""
1447 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1447 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1448 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1448 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1449 parent2=xp2, pending=p)
1449 parent2=xp2, pending=p)
1450 self.changelog.finalize(trp)
1450 self.changelog.finalize(trp)
1451 # set the new commit is proper phase
1451 # set the new commit is proper phase
1452 targetphase = phases.newcommitphase(self.ui)
1452 targetphase = phases.newcommitphase(self.ui)
1453 if targetphase:
1453 if targetphase:
1454 # retract boundary do not alter parent changeset.
1454 # retract boundary do not alter parent changeset.
1455 # if a parent have higher the resulting phase will
1455 # if a parent have higher the resulting phase will
1456 # be compliant anyway
1456 # be compliant anyway
1457 #
1457 #
1458 # if minimal phase was 0 we don't need to retract anything
1458 # if minimal phase was 0 we don't need to retract anything
1459 phases.retractboundary(self, targetphase, [n])
1459 phases.retractboundary(self, targetphase, [n])
1460 tr.close()
1460 tr.close()
1461 self.updatebranchcache()
1461 self.updatebranchcache()
1462 return n
1462 return n
1463 finally:
1463 finally:
1464 if tr:
1464 if tr:
1465 tr.release()
1465 tr.release()
1466 lock.release()
1466 lock.release()
1467
1467
1468 def destroyed(self, newheadnodes=None):
1468 def destroyed(self, newheadnodes=None):
1469 '''Inform the repository that nodes have been destroyed.
1469 '''Inform the repository that nodes have been destroyed.
1470 Intended for use by strip and rollback, so there's a common
1470 Intended for use by strip and rollback, so there's a common
1471 place for anything that has to be done after destroying history.
1471 place for anything that has to be done after destroying history.
1472
1472
1473 If you know the branchheadcache was uptodate before nodes were removed
1473 If you know the branchheadcache was uptodate before nodes were removed
1474 and you also know the set of candidate new heads that may have resulted
1474 and you also know the set of candidate new heads that may have resulted
1475 from the destruction, you can set newheadnodes. This will enable the
1475 from the destruction, you can set newheadnodes. This will enable the
1476 code to update the branchheads cache, rather than having future code
1476 code to update the branchheads cache, rather than having future code
1477 decide it's invalid and regenrating it from scratch.
1477 decide it's invalid and regenrating it from scratch.
1478 '''
1478 '''
1479 # If we have info, newheadnodes, on how to update the branch cache, do
1479 # If we have info, newheadnodes, on how to update the branch cache, do
1480 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1480 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1481 # will be caught the next time it is read.
1481 # will be caught the next time it is read.
1482 if newheadnodes:
1482 if newheadnodes:
1483 tiprev = len(self) - 1
1483 tiprev = len(self) - 1
1484 ctxgen = (self[node] for node in newheadnodes
1484 ctxgen = (self[node] for node in newheadnodes
1485 if self.changelog.hasnode(node))
1485 if self.changelog.hasnode(node))
1486 self._updatebranchcache(self._branchcache, ctxgen)
1486 self._updatebranchcache(self._branchcache, ctxgen)
1487 self._writebranchcache(self._branchcache, self.changelog.tip(),
1487 self._writebranchcache(self._branchcache, self.changelog.tip(),
1488 tiprev)
1488 tiprev)
1489
1489
1490 # Ensure the persistent tag cache is updated. Doing it now
1490 # Ensure the persistent tag cache is updated. Doing it now
1491 # means that the tag cache only has to worry about destroyed
1491 # means that the tag cache only has to worry about destroyed
1492 # heads immediately after a strip/rollback. That in turn
1492 # heads immediately after a strip/rollback. That in turn
1493 # guarantees that "cachetip == currenttip" (comparing both rev
1493 # guarantees that "cachetip == currenttip" (comparing both rev
1494 # and node) always means no nodes have been added or destroyed.
1494 # and node) always means no nodes have been added or destroyed.
1495
1495
1496 # XXX this is suboptimal when qrefresh'ing: we strip the current
1496 # XXX this is suboptimal when qrefresh'ing: we strip the current
1497 # head, refresh the tag cache, then immediately add a new head.
1497 # head, refresh the tag cache, then immediately add a new head.
1498 # But I think doing it this way is necessary for the "instant
1498 # But I think doing it this way is necessary for the "instant
1499 # tag cache retrieval" case to work.
1499 # tag cache retrieval" case to work.
1500 self.invalidatecaches()
1500 self.invalidatecaches()
1501
1501
1502 def walk(self, match, node=None):
1502 def walk(self, match, node=None):
1503 '''
1503 '''
1504 walk recursively through the directory tree or a given
1504 walk recursively through the directory tree or a given
1505 changeset, finding all files matched by the match
1505 changeset, finding all files matched by the match
1506 function
1506 function
1507 '''
1507 '''
1508 return self[node].walk(match)
1508 return self[node].walk(match)
1509
1509
1510 def status(self, node1='.', node2=None, match=None,
1510 def status(self, node1='.', node2=None, match=None,
1511 ignored=False, clean=False, unknown=False,
1511 ignored=False, clean=False, unknown=False,
1512 listsubrepos=False):
1512 listsubrepos=False):
1513 """return status of files between two nodes or node and working
1513 """return status of files between two nodes or node and working
1514 directory.
1514 directory.
1515
1515
1516 If node1 is None, use the first dirstate parent instead.
1516 If node1 is None, use the first dirstate parent instead.
1517 If node2 is None, compare node1 with working directory.
1517 If node2 is None, compare node1 with working directory.
1518 """
1518 """
1519
1519
1520 def mfmatches(ctx):
1520 def mfmatches(ctx):
1521 mf = ctx.manifest().copy()
1521 mf = ctx.manifest().copy()
1522 if match.always():
1522 if match.always():
1523 return mf
1523 return mf
1524 for fn in mf.keys():
1524 for fn in mf.keys():
1525 if not match(fn):
1525 if not match(fn):
1526 del mf[fn]
1526 del mf[fn]
1527 return mf
1527 return mf
1528
1528
1529 if isinstance(node1, context.changectx):
1529 if isinstance(node1, context.changectx):
1530 ctx1 = node1
1530 ctx1 = node1
1531 else:
1531 else:
1532 ctx1 = self[node1]
1532 ctx1 = self[node1]
1533 if isinstance(node2, context.changectx):
1533 if isinstance(node2, context.changectx):
1534 ctx2 = node2
1534 ctx2 = node2
1535 else:
1535 else:
1536 ctx2 = self[node2]
1536 ctx2 = self[node2]
1537
1537
1538 working = ctx2.rev() is None
1538 working = ctx2.rev() is None
1539 parentworking = working and ctx1 == self['.']
1539 parentworking = working and ctx1 == self['.']
1540 match = match or matchmod.always(self.root, self.getcwd())
1540 match = match or matchmod.always(self.root, self.getcwd())
1541 listignored, listclean, listunknown = ignored, clean, unknown
1541 listignored, listclean, listunknown = ignored, clean, unknown
1542
1542
1543 # load earliest manifest first for caching reasons
1543 # load earliest manifest first for caching reasons
1544 if not working and ctx2.rev() < ctx1.rev():
1544 if not working and ctx2.rev() < ctx1.rev():
1545 ctx2.manifest()
1545 ctx2.manifest()
1546
1546
1547 if not parentworking:
1547 if not parentworking:
1548 def bad(f, msg):
1548 def bad(f, msg):
1549 # 'f' may be a directory pattern from 'match.files()',
1549 # 'f' may be a directory pattern from 'match.files()',
1550 # so 'f not in ctx1' is not enough
1550 # so 'f not in ctx1' is not enough
1551 if f not in ctx1 and f not in ctx1.dirs():
1551 if f not in ctx1 and f not in ctx1.dirs():
1552 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1552 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1553 match.bad = bad
1553 match.bad = bad
1554
1554
1555 if working: # we need to scan the working dir
1555 if working: # we need to scan the working dir
1556 subrepos = []
1556 subrepos = []
1557 if '.hgsub' in self.dirstate:
1557 if '.hgsub' in self.dirstate:
1558 subrepos = ctx2.substate.keys()
1558 subrepos = ctx2.substate.keys()
1559 s = self.dirstate.status(match, subrepos, listignored,
1559 s = self.dirstate.status(match, subrepos, listignored,
1560 listclean, listunknown)
1560 listclean, listunknown)
1561 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1561 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1562
1562
1563 # check for any possibly clean files
1563 # check for any possibly clean files
1564 if parentworking and cmp:
1564 if parentworking and cmp:
1565 fixup = []
1565 fixup = []
1566 # do a full compare of any files that might have changed
1566 # do a full compare of any files that might have changed
1567 for f in sorted(cmp):
1567 for f in sorted(cmp):
1568 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1568 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1569 or ctx1[f].cmp(ctx2[f])):
1569 or ctx1[f].cmp(ctx2[f])):
1570 modified.append(f)
1570 modified.append(f)
1571 else:
1571 else:
1572 fixup.append(f)
1572 fixup.append(f)
1573
1573
1574 # update dirstate for files that are actually clean
1574 # update dirstate for files that are actually clean
1575 if fixup:
1575 if fixup:
1576 if listclean:
1576 if listclean:
1577 clean += fixup
1577 clean += fixup
1578
1578
1579 try:
1579 try:
1580 # updating the dirstate is optional
1580 # updating the dirstate is optional
1581 # so we don't wait on the lock
1581 # so we don't wait on the lock
1582 wlock = self.wlock(False)
1582 wlock = self.wlock(False)
1583 try:
1583 try:
1584 for f in fixup:
1584 for f in fixup:
1585 self.dirstate.normal(f)
1585 self.dirstate.normal(f)
1586 finally:
1586 finally:
1587 wlock.release()
1587 wlock.release()
1588 except error.LockError:
1588 except error.LockError:
1589 pass
1589 pass
1590
1590
1591 if not parentworking:
1591 if not parentworking:
1592 mf1 = mfmatches(ctx1)
1592 mf1 = mfmatches(ctx1)
1593 if working:
1593 if working:
1594 # we are comparing working dir against non-parent
1594 # we are comparing working dir against non-parent
1595 # generate a pseudo-manifest for the working dir
1595 # generate a pseudo-manifest for the working dir
1596 mf2 = mfmatches(self['.'])
1596 mf2 = mfmatches(self['.'])
1597 for f in cmp + modified + added:
1597 for f in cmp + modified + added:
1598 mf2[f] = None
1598 mf2[f] = None
1599 mf2.set(f, ctx2.flags(f))
1599 mf2.set(f, ctx2.flags(f))
1600 for f in removed:
1600 for f in removed:
1601 if f in mf2:
1601 if f in mf2:
1602 del mf2[f]
1602 del mf2[f]
1603 else:
1603 else:
1604 # we are comparing two revisions
1604 # we are comparing two revisions
1605 deleted, unknown, ignored = [], [], []
1605 deleted, unknown, ignored = [], [], []
1606 mf2 = mfmatches(ctx2)
1606 mf2 = mfmatches(ctx2)
1607
1607
1608 modified, added, clean = [], [], []
1608 modified, added, clean = [], [], []
1609 withflags = mf1.withflags() | mf2.withflags()
1609 withflags = mf1.withflags() | mf2.withflags()
1610 for fn in mf2:
1610 for fn in mf2:
1611 if fn in mf1:
1611 if fn in mf1:
1612 if (fn not in deleted and
1612 if (fn not in deleted and
1613 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1613 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1614 (mf1[fn] != mf2[fn] and
1614 (mf1[fn] != mf2[fn] and
1615 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1615 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1616 modified.append(fn)
1616 modified.append(fn)
1617 elif listclean:
1617 elif listclean:
1618 clean.append(fn)
1618 clean.append(fn)
1619 del mf1[fn]
1619 del mf1[fn]
1620 elif fn not in deleted:
1620 elif fn not in deleted:
1621 added.append(fn)
1621 added.append(fn)
1622 removed = mf1.keys()
1622 removed = mf1.keys()
1623
1623
1624 if working and modified and not self.dirstate._checklink:
1624 if working and modified and not self.dirstate._checklink:
1625 # Symlink placeholders may get non-symlink-like contents
1625 # Symlink placeholders may get non-symlink-like contents
1626 # via user error or dereferencing by NFS or Samba servers,
1626 # via user error or dereferencing by NFS or Samba servers,
1627 # so we filter out any placeholders that don't look like a
1627 # so we filter out any placeholders that don't look like a
1628 # symlink
1628 # symlink
1629 sane = []
1629 sane = []
1630 for f in modified:
1630 for f in modified:
1631 if ctx2.flags(f) == 'l':
1631 if ctx2.flags(f) == 'l':
1632 d = ctx2[f].data()
1632 d = ctx2[f].data()
1633 if len(d) >= 1024 or '\n' in d or util.binary(d):
1633 if len(d) >= 1024 or '\n' in d or util.binary(d):
1634 self.ui.debug('ignoring suspect symlink placeholder'
1634 self.ui.debug('ignoring suspect symlink placeholder'
1635 ' "%s"\n' % f)
1635 ' "%s"\n' % f)
1636 continue
1636 continue
1637 sane.append(f)
1637 sane.append(f)
1638 modified = sane
1638 modified = sane
1639
1639
1640 r = modified, added, removed, deleted, unknown, ignored, clean
1640 r = modified, added, removed, deleted, unknown, ignored, clean
1641
1641
1642 if listsubrepos:
1642 if listsubrepos:
1643 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1643 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1644 if working:
1644 if working:
1645 rev2 = None
1645 rev2 = None
1646 else:
1646 else:
1647 rev2 = ctx2.substate[subpath][1]
1647 rev2 = ctx2.substate[subpath][1]
1648 try:
1648 try:
1649 submatch = matchmod.narrowmatcher(subpath, match)
1649 submatch = matchmod.narrowmatcher(subpath, match)
1650 s = sub.status(rev2, match=submatch, ignored=listignored,
1650 s = sub.status(rev2, match=submatch, ignored=listignored,
1651 clean=listclean, unknown=listunknown,
1651 clean=listclean, unknown=listunknown,
1652 listsubrepos=True)
1652 listsubrepos=True)
1653 for rfiles, sfiles in zip(r, s):
1653 for rfiles, sfiles in zip(r, s):
1654 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1654 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1655 except error.LookupError:
1655 except error.LookupError:
1656 self.ui.status(_("skipping missing subrepository: %s\n")
1656 self.ui.status(_("skipping missing subrepository: %s\n")
1657 % subpath)
1657 % subpath)
1658
1658
1659 for l in r:
1659 for l in r:
1660 l.sort()
1660 l.sort()
1661 return r
1661 return r
1662
1662
1663 def heads(self, start=None):
1663 def heads(self, start=None):
1664 heads = self.changelog.heads(start)
1664 heads = self.changelog.heads(start)
1665 # sort the output in rev descending order
1665 # sort the output in rev descending order
1666 return sorted(heads, key=self.changelog.rev, reverse=True)
1666 return sorted(heads, key=self.changelog.rev, reverse=True)
1667
1667
1668 def branchheads(self, branch=None, start=None, closed=False):
1668 def branchheads(self, branch=None, start=None, closed=False):
1669 '''return a (possibly filtered) list of heads for the given branch
1669 '''return a (possibly filtered) list of heads for the given branch
1670
1670
1671 Heads are returned in topological order, from newest to oldest.
1671 Heads are returned in topological order, from newest to oldest.
1672 If branch is None, use the dirstate branch.
1672 If branch is None, use the dirstate branch.
1673 If start is not None, return only heads reachable from start.
1673 If start is not None, return only heads reachable from start.
1674 If closed is True, return heads that are marked as closed as well.
1674 If closed is True, return heads that are marked as closed as well.
1675 '''
1675 '''
1676 if branch is None:
1676 if branch is None:
1677 branch = self[None].branch()
1677 branch = self[None].branch()
1678 branches = self.branchmap()
1678 branches = self.branchmap()
1679 if branch not in branches:
1679 if branch not in branches:
1680 return []
1680 return []
1681 # the cache returns heads ordered lowest to highest
1681 # the cache returns heads ordered lowest to highest
1682 bheads = list(reversed(branches[branch]))
1682 bheads = list(reversed(branches[branch]))
1683 if start is not None:
1683 if start is not None:
1684 # filter out the heads that cannot be reached from startrev
1684 # filter out the heads that cannot be reached from startrev
1685 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1685 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1686 bheads = [h for h in bheads if h in fbheads]
1686 bheads = [h for h in bheads if h in fbheads]
1687 if not closed:
1687 if not closed:
1688 bheads = [h for h in bheads if not self[h].closesbranch()]
1688 bheads = [h for h in bheads if not self[h].closesbranch()]
1689 return bheads
1689 return bheads
1690
1690
1691 def branches(self, nodes):
1691 def branches(self, nodes):
1692 if not nodes:
1692 if not nodes:
1693 nodes = [self.changelog.tip()]
1693 nodes = [self.changelog.tip()]
1694 b = []
1694 b = []
1695 for n in nodes:
1695 for n in nodes:
1696 t = n
1696 t = n
1697 while True:
1697 while True:
1698 p = self.changelog.parents(n)
1698 p = self.changelog.parents(n)
1699 if p[1] != nullid or p[0] == nullid:
1699 if p[1] != nullid or p[0] == nullid:
1700 b.append((t, n, p[0], p[1]))
1700 b.append((t, n, p[0], p[1]))
1701 break
1701 break
1702 n = p[0]
1702 n = p[0]
1703 return b
1703 return b
1704
1704
1705 def between(self, pairs):
1705 def between(self, pairs):
1706 r = []
1706 r = []
1707
1707
1708 for top, bottom in pairs:
1708 for top, bottom in pairs:
1709 n, l, i = top, [], 0
1709 n, l, i = top, [], 0
1710 f = 1
1710 f = 1
1711
1711
1712 while n != bottom and n != nullid:
1712 while n != bottom and n != nullid:
1713 p = self.changelog.parents(n)[0]
1713 p = self.changelog.parents(n)[0]
1714 if i == f:
1714 if i == f:
1715 l.append(n)
1715 l.append(n)
1716 f = f * 2
1716 f = f * 2
1717 n = p
1717 n = p
1718 i += 1
1718 i += 1
1719
1719
1720 r.append(l)
1720 r.append(l)
1721
1721
1722 return r
1722 return r
1723
1723
1724 def pull(self, remote, heads=None, force=False):
1724 def pull(self, remote, heads=None, force=False):
1725 # don't open transaction for nothing or you break future useful
1725 # don't open transaction for nothing or you break future useful
1726 # rollback call
1726 # rollback call
1727 tr = None
1727 tr = None
1728 trname = 'pull\n' + util.hidepassword(remote.url())
1728 trname = 'pull\n' + util.hidepassword(remote.url())
1729 lock = self.lock()
1729 lock = self.lock()
1730 try:
1730 try:
1731 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1731 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1732 force=force)
1732 force=force)
1733 common, fetch, rheads = tmp
1733 common, fetch, rheads = tmp
1734 if not fetch:
1734 if not fetch:
1735 self.ui.status(_("no changes found\n"))
1735 self.ui.status(_("no changes found\n"))
1736 added = []
1736 added = []
1737 result = 0
1737 result = 0
1738 else:
1738 else:
1739 tr = self.transaction(trname)
1739 tr = self.transaction(trname)
1740 if heads is None and list(common) == [nullid]:
1740 if heads is None and list(common) == [nullid]:
1741 self.ui.status(_("requesting all changes\n"))
1741 self.ui.status(_("requesting all changes\n"))
1742 elif heads is None and remote.capable('changegroupsubset'):
1742 elif heads is None and remote.capable('changegroupsubset'):
1743 # issue1320, avoid a race if remote changed after discovery
1743 # issue1320, avoid a race if remote changed after discovery
1744 heads = rheads
1744 heads = rheads
1745
1745
1746 if remote.capable('getbundle'):
1746 if remote.capable('getbundle'):
1747 cg = remote.getbundle('pull', common=common,
1747 cg = remote.getbundle('pull', common=common,
1748 heads=heads or rheads)
1748 heads=heads or rheads)
1749 elif heads is None:
1749 elif heads is None:
1750 cg = remote.changegroup(fetch, 'pull')
1750 cg = remote.changegroup(fetch, 'pull')
1751 elif not remote.capable('changegroupsubset'):
1751 elif not remote.capable('changegroupsubset'):
1752 raise util.Abort(_("partial pull cannot be done because "
1752 raise util.Abort(_("partial pull cannot be done because "
1753 "other repository doesn't support "
1753 "other repository doesn't support "
1754 "changegroupsubset."))
1754 "changegroupsubset."))
1755 else:
1755 else:
1756 cg = remote.changegroupsubset(fetch, heads, 'pull')
1756 cg = remote.changegroupsubset(fetch, heads, 'pull')
1757 clstart = len(self.changelog)
1757 clstart = len(self.changelog)
1758 result = self.addchangegroup(cg, 'pull', remote.url())
1758 result = self.addchangegroup(cg, 'pull', remote.url())
1759 clend = len(self.changelog)
1759 clend = len(self.changelog)
1760 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1760 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1761
1761
1762 # compute target subset
1762 # compute target subset
1763 if heads is None:
1763 if heads is None:
1764 # We pulled every thing possible
1764 # We pulled every thing possible
1765 # sync on everything common
1765 # sync on everything common
1766 subset = common + added
1766 subset = common + added
1767 else:
1767 else:
1768 # We pulled a specific subset
1768 # We pulled a specific subset
1769 # sync on this subset
1769 # sync on this subset
1770 subset = heads
1770 subset = heads
1771
1771
1772 # Get remote phases data from remote
1772 # Get remote phases data from remote
1773 remotephases = remote.listkeys('phases')
1773 remotephases = remote.listkeys('phases')
1774 publishing = bool(remotephases.get('publishing', False))
1774 publishing = bool(remotephases.get('publishing', False))
1775 if remotephases and not publishing:
1775 if remotephases and not publishing:
1776 # remote is new and unpublishing
1776 # remote is new and unpublishing
1777 pheads, _dr = phases.analyzeremotephases(self, subset,
1777 pheads, _dr = phases.analyzeremotephases(self, subset,
1778 remotephases)
1778 remotephases)
1779 phases.advanceboundary(self, phases.public, pheads)
1779 phases.advanceboundary(self, phases.public, pheads)
1780 phases.advanceboundary(self, phases.draft, subset)
1780 phases.advanceboundary(self, phases.draft, subset)
1781 else:
1781 else:
1782 # Remote is old or publishing all common changesets
1782 # Remote is old or publishing all common changesets
1783 # should be seen as public
1783 # should be seen as public
1784 phases.advanceboundary(self, phases.public, subset)
1784 phases.advanceboundary(self, phases.public, subset)
1785
1785
1786 remoteobs = remote.listkeys('obsolete')
1786 remoteobs = remote.listkeys('obsolete')
1787 if 'dump' in remoteobs:
1787 if 'dump' in remoteobs:
1788 if tr is None:
1788 if tr is None:
1789 tr = self.transaction(trname)
1789 tr = self.transaction(trname)
1790 data = base85.b85decode(remoteobs['dump'])
1790 data = base85.b85decode(remoteobs['dump'])
1791 self.obsstore.mergemarkers(tr, data)
1791 self.obsstore.mergemarkers(tr, data)
1792 if tr is not None:
1792 if tr is not None:
1793 tr.close()
1793 tr.close()
1794 finally:
1794 finally:
1795 if tr is not None:
1795 if tr is not None:
1796 tr.release()
1796 tr.release()
1797 lock.release()
1797 lock.release()
1798
1798
1799 return result
1799 return result
1800
1800
1801 def checkpush(self, force, revs):
1801 def checkpush(self, force, revs):
1802 """Extensions can override this function if additional checks have
1802 """Extensions can override this function if additional checks have
1803 to be performed before pushing, or call it if they override push
1803 to be performed before pushing, or call it if they override push
1804 command.
1804 command.
1805 """
1805 """
1806 pass
1806 pass
1807
1807
1808 def push(self, remote, force=False, revs=None, newbranch=False):
1808 def push(self, remote, force=False, revs=None, newbranch=False):
1809 '''Push outgoing changesets (limited by revs) from the current
1809 '''Push outgoing changesets (limited by revs) from the current
1810 repository to remote. Return an integer:
1810 repository to remote. Return an integer:
1811 - None means nothing to push
1811 - None means nothing to push
1812 - 0 means HTTP error
1812 - 0 means HTTP error
1813 - 1 means we pushed and remote head count is unchanged *or*
1813 - 1 means we pushed and remote head count is unchanged *or*
1814 we have outgoing changesets but refused to push
1814 we have outgoing changesets but refused to push
1815 - other values as described by addchangegroup()
1815 - other values as described by addchangegroup()
1816 '''
1816 '''
1817 # there are two ways to push to remote repo:
1817 # there are two ways to push to remote repo:
1818 #
1818 #
1819 # addchangegroup assumes local user can lock remote
1819 # addchangegroup assumes local user can lock remote
1820 # repo (local filesystem, old ssh servers).
1820 # repo (local filesystem, old ssh servers).
1821 #
1821 #
1822 # unbundle assumes local user cannot lock remote repo (new ssh
1822 # unbundle assumes local user cannot lock remote repo (new ssh
1823 # servers, http servers).
1823 # servers, http servers).
1824
1824
1825 if not remote.canpush():
1825 if not remote.canpush():
1826 raise util.Abort(_("destination does not support push"))
1826 raise util.Abort(_("destination does not support push"))
1827 # get local lock as we might write phase data
1827 # get local lock as we might write phase data
1828 locallock = self.lock()
1828 locallock = self.lock()
1829 try:
1829 try:
1830 self.checkpush(force, revs)
1830 self.checkpush(force, revs)
1831 lock = None
1831 lock = None
1832 unbundle = remote.capable('unbundle')
1832 unbundle = remote.capable('unbundle')
1833 if not unbundle:
1833 if not unbundle:
1834 lock = remote.lock()
1834 lock = remote.lock()
1835 try:
1835 try:
1836 # discovery
1836 # discovery
1837 fci = discovery.findcommonincoming
1837 fci = discovery.findcommonincoming
1838 commoninc = fci(self, remote, force=force)
1838 commoninc = fci(self, remote, force=force)
1839 common, inc, remoteheads = commoninc
1839 common, inc, remoteheads = commoninc
1840 fco = discovery.findcommonoutgoing
1840 fco = discovery.findcommonoutgoing
1841 outgoing = fco(self, remote, onlyheads=revs,
1841 outgoing = fco(self, remote, onlyheads=revs,
1842 commoninc=commoninc, force=force)
1842 commoninc=commoninc, force=force)
1843
1843
1844
1844
1845 if not outgoing.missing:
1845 if not outgoing.missing:
1846 # nothing to push
1846 # nothing to push
1847 scmutil.nochangesfound(self.ui, outgoing.excluded)
1847 scmutil.nochangesfound(self.ui, outgoing.excluded)
1848 ret = None
1848 ret = None
1849 else:
1849 else:
1850 # something to push
1850 # something to push
1851 if not force:
1851 if not force:
1852 # if self.obsstore == False --> no obsolete
1852 # if self.obsstore == False --> no obsolete
1853 # then, save the iteration
1853 # then, save the iteration
1854 if self.obsstore:
1854 if self.obsstore:
1855 # this message are here for 80 char limit reason
1855 # this message are here for 80 char limit reason
1856 mso = _("push includes an obsolete changeset: %s!")
1856 mso = _("push includes an obsolete changeset: %s!")
1857 msu = _("push includes an unstable changeset: %s!")
1857 msu = _("push includes an unstable changeset: %s!")
1858 # If we are to push if there is at least one
1858 # If we are to push if there is at least one
1859 # obsolete or unstable changeset in missing, at
1859 # obsolete or unstable changeset in missing, at
1860 # least one of the missinghead will be obsolete or
1860 # least one of the missinghead will be obsolete or
1861 # unstable. So checking heads only is ok
1861 # unstable. So checking heads only is ok
1862 for node in outgoing.missingheads:
1862 for node in outgoing.missingheads:
1863 ctx = self[node]
1863 ctx = self[node]
1864 if ctx.obsolete():
1864 if ctx.obsolete():
1865 raise util.Abort(_(mso) % ctx)
1865 raise util.Abort(_(mso) % ctx)
1866 elif ctx.unstable():
1866 elif ctx.unstable():
1867 raise util.Abort(_(msu) % ctx)
1867 raise util.Abort(_(msu) % ctx)
1868 discovery.checkheads(self, remote, outgoing,
1868 discovery.checkheads(self, remote, outgoing,
1869 remoteheads, newbranch,
1869 remoteheads, newbranch,
1870 bool(inc))
1870 bool(inc))
1871
1871
1872 # create a changegroup from local
1872 # create a changegroup from local
1873 if revs is None and not outgoing.excluded:
1873 if revs is None and not outgoing.excluded:
1874 # push everything,
1874 # push everything,
1875 # use the fast path, no race possible on push
1875 # use the fast path, no race possible on push
1876 cg = self._changegroup(outgoing.missing, 'push')
1876 cg = self._changegroup(outgoing.missing, 'push')
1877 else:
1877 else:
1878 cg = self.getlocalbundle('push', outgoing)
1878 cg = self.getlocalbundle('push', outgoing)
1879
1879
1880 # apply changegroup to remote
1880 # apply changegroup to remote
1881 if unbundle:
1881 if unbundle:
1882 # local repo finds heads on server, finds out what
1882 # local repo finds heads on server, finds out what
1883 # revs it must push. once revs transferred, if server
1883 # revs it must push. once revs transferred, if server
1884 # finds it has different heads (someone else won
1884 # finds it has different heads (someone else won
1885 # commit/push race), server aborts.
1885 # commit/push race), server aborts.
1886 if force:
1886 if force:
1887 remoteheads = ['force']
1887 remoteheads = ['force']
1888 # ssh: return remote's addchangegroup()
1888 # ssh: return remote's addchangegroup()
1889 # http: return remote's addchangegroup() or 0 for error
1889 # http: return remote's addchangegroup() or 0 for error
1890 ret = remote.unbundle(cg, remoteheads, 'push')
1890 ret = remote.unbundle(cg, remoteheads, 'push')
1891 else:
1891 else:
1892 # we return an integer indicating remote head count
1892 # we return an integer indicating remote head count
1893 # change
1893 # change
1894 ret = remote.addchangegroup(cg, 'push', self.url())
1894 ret = remote.addchangegroup(cg, 'push', self.url())
1895
1895
1896 if ret:
1896 if ret:
1897 # push succeed, synchonize target of the push
1897 # push succeed, synchonize target of the push
1898 cheads = outgoing.missingheads
1898 cheads = outgoing.missingheads
1899 elif revs is None:
1899 elif revs is None:
1900 # All out push fails. synchronize all common
1900 # All out push fails. synchronize all common
1901 cheads = outgoing.commonheads
1901 cheads = outgoing.commonheads
1902 else:
1902 else:
1903 # I want cheads = heads(::missingheads and ::commonheads)
1903 # I want cheads = heads(::missingheads and ::commonheads)
1904 # (missingheads is revs with secret changeset filtered out)
1904 # (missingheads is revs with secret changeset filtered out)
1905 #
1905 #
1906 # This can be expressed as:
1906 # This can be expressed as:
1907 # cheads = ( (missingheads and ::commonheads)
1907 # cheads = ( (missingheads and ::commonheads)
1908 # + (commonheads and ::missingheads))"
1908 # + (commonheads and ::missingheads))"
1909 # )
1909 # )
1910 #
1910 #
1911 # while trying to push we already computed the following:
1911 # while trying to push we already computed the following:
1912 # common = (::commonheads)
1912 # common = (::commonheads)
1913 # missing = ((commonheads::missingheads) - commonheads)
1913 # missing = ((commonheads::missingheads) - commonheads)
1914 #
1914 #
1915 # We can pick:
1915 # We can pick:
1916 # * missingheads part of comon (::commonheads)
1916 # * missingheads part of comon (::commonheads)
1917 common = set(outgoing.common)
1917 common = set(outgoing.common)
1918 cheads = [node for node in revs if node in common]
1918 cheads = [node for node in revs if node in common]
1919 # and
1919 # and
1920 # * commonheads parents on missing
1920 # * commonheads parents on missing
1921 revset = self.set('%ln and parents(roots(%ln))',
1921 revset = self.set('%ln and parents(roots(%ln))',
1922 outgoing.commonheads,
1922 outgoing.commonheads,
1923 outgoing.missing)
1923 outgoing.missing)
1924 cheads.extend(c.node() for c in revset)
1924 cheads.extend(c.node() for c in revset)
1925 # even when we don't push, exchanging phase data is useful
1925 # even when we don't push, exchanging phase data is useful
1926 remotephases = remote.listkeys('phases')
1926 remotephases = remote.listkeys('phases')
1927 if not remotephases: # old server or public only repo
1927 if not remotephases: # old server or public only repo
1928 phases.advanceboundary(self, phases.public, cheads)
1928 phases.advanceboundary(self, phases.public, cheads)
1929 # don't push any phase data as there is nothing to push
1929 # don't push any phase data as there is nothing to push
1930 else:
1930 else:
1931 ana = phases.analyzeremotephases(self, cheads, remotephases)
1931 ana = phases.analyzeremotephases(self, cheads, remotephases)
1932 pheads, droots = ana
1932 pheads, droots = ana
1933 ### Apply remote phase on local
1933 ### Apply remote phase on local
1934 if remotephases.get('publishing', False):
1934 if remotephases.get('publishing', False):
1935 phases.advanceboundary(self, phases.public, cheads)
1935 phases.advanceboundary(self, phases.public, cheads)
1936 else: # publish = False
1936 else: # publish = False
1937 phases.advanceboundary(self, phases.public, pheads)
1937 phases.advanceboundary(self, phases.public, pheads)
1938 phases.advanceboundary(self, phases.draft, cheads)
1938 phases.advanceboundary(self, phases.draft, cheads)
1939 ### Apply local phase on remote
1939 ### Apply local phase on remote
1940
1940
1941 # Get the list of all revs draft on remote by public here.
1941 # Get the list of all revs draft on remote by public here.
1942 # XXX Beware that revset break if droots is not strictly
1942 # XXX Beware that revset break if droots is not strictly
1943 # XXX root we may want to ensure it is but it is costly
1943 # XXX root we may want to ensure it is but it is costly
1944 outdated = self.set('heads((%ln::%ln) and public())',
1944 outdated = self.set('heads((%ln::%ln) and public())',
1945 droots, cheads)
1945 droots, cheads)
1946 for newremotehead in outdated:
1946 for newremotehead in outdated:
1947 r = remote.pushkey('phases',
1947 r = remote.pushkey('phases',
1948 newremotehead.hex(),
1948 newremotehead.hex(),
1949 str(phases.draft),
1949 str(phases.draft),
1950 str(phases.public))
1950 str(phases.public))
1951 if not r:
1951 if not r:
1952 self.ui.warn(_('updating %s to public failed!\n')
1952 self.ui.warn(_('updating %s to public failed!\n')
1953 % newremotehead)
1953 % newremotehead)
1954 if ('obsolete' in remote.listkeys('namespaces')
1954 if ('obsolete' in remote.listkeys('namespaces')
1955 and self.obsstore):
1955 and self.obsstore):
1956 data = self.listkeys('obsolete')['dump']
1956 data = self.listkeys('obsolete')['dump']
1957 r = remote.pushkey('obsolete', 'dump', '', data)
1957 r = remote.pushkey('obsolete', 'dump', '', data)
1958 if not r:
1958 if not r:
1959 self.ui.warn(_('failed to push obsolete markers!\n'))
1959 self.ui.warn(_('failed to push obsolete markers!\n'))
1960 finally:
1960 finally:
1961 if lock is not None:
1961 if lock is not None:
1962 lock.release()
1962 lock.release()
1963 finally:
1963 finally:
1964 locallock.release()
1964 locallock.release()
1965
1965
1966 self.ui.debug("checking for updated bookmarks\n")
1966 self.ui.debug("checking for updated bookmarks\n")
1967 rb = remote.listkeys('bookmarks')
1967 rb = remote.listkeys('bookmarks')
1968 for k in rb.keys():
1968 for k in rb.keys():
1969 if k in self._bookmarks:
1969 if k in self._bookmarks:
1970 nr, nl = rb[k], hex(self._bookmarks[k])
1970 nr, nl = rb[k], hex(self._bookmarks[k])
1971 if nr in self:
1971 if nr in self:
1972 cr = self[nr]
1972 cr = self[nr]
1973 cl = self[nl]
1973 cl = self[nl]
1974 if cl in cr.descendants():
1974 if cl in cr.descendants():
1975 r = remote.pushkey('bookmarks', k, nr, nl)
1975 r = remote.pushkey('bookmarks', k, nr, nl)
1976 if r:
1976 if r:
1977 self.ui.status(_("updating bookmark %s\n") % k)
1977 self.ui.status(_("updating bookmark %s\n") % k)
1978 else:
1978 else:
1979 self.ui.warn(_('updating bookmark %s'
1979 self.ui.warn(_('updating bookmark %s'
1980 ' failed!\n') % k)
1980 ' failed!\n') % k)
1981
1981
1982 return ret
1982 return ret
1983
1983
1984 def changegroupinfo(self, nodes, source):
1984 def changegroupinfo(self, nodes, source):
1985 if self.ui.verbose or source == 'bundle':
1985 if self.ui.verbose or source == 'bundle':
1986 self.ui.status(_("%d changesets found\n") % len(nodes))
1986 self.ui.status(_("%d changesets found\n") % len(nodes))
1987 if self.ui.debugflag:
1987 if self.ui.debugflag:
1988 self.ui.debug("list of changesets:\n")
1988 self.ui.debug("list of changesets:\n")
1989 for node in nodes:
1989 for node in nodes:
1990 self.ui.debug("%s\n" % hex(node))
1990 self.ui.debug("%s\n" % hex(node))
1991
1991
1992 def changegroupsubset(self, bases, heads, source):
1992 def changegroupsubset(self, bases, heads, source):
1993 """Compute a changegroup consisting of all the nodes that are
1993 """Compute a changegroup consisting of all the nodes that are
1994 descendants of any of the bases and ancestors of any of the heads.
1994 descendants of any of the bases and ancestors of any of the heads.
1995 Return a chunkbuffer object whose read() method will return
1995 Return a chunkbuffer object whose read() method will return
1996 successive changegroup chunks.
1996 successive changegroup chunks.
1997
1997
1998 It is fairly complex as determining which filenodes and which
1998 It is fairly complex as determining which filenodes and which
1999 manifest nodes need to be included for the changeset to be complete
1999 manifest nodes need to be included for the changeset to be complete
2000 is non-trivial.
2000 is non-trivial.
2001
2001
2002 Another wrinkle is doing the reverse, figuring out which changeset in
2002 Another wrinkle is doing the reverse, figuring out which changeset in
2003 the changegroup a particular filenode or manifestnode belongs to.
2003 the changegroup a particular filenode or manifestnode belongs to.
2004 """
2004 """
2005 cl = self.changelog
2005 cl = self.changelog
2006 if not bases:
2006 if not bases:
2007 bases = [nullid]
2007 bases = [nullid]
2008 csets, bases, heads = cl.nodesbetween(bases, heads)
2008 csets, bases, heads = cl.nodesbetween(bases, heads)
2009 # We assume that all ancestors of bases are known
2009 # We assume that all ancestors of bases are known
2010 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2010 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2011 return self._changegroupsubset(common, csets, heads, source)
2011 return self._changegroupsubset(common, csets, heads, source)
2012
2012
2013 def getlocalbundle(self, source, outgoing):
2013 def getlocalbundle(self, source, outgoing):
2014 """Like getbundle, but taking a discovery.outgoing as an argument.
2014 """Like getbundle, but taking a discovery.outgoing as an argument.
2015
2015
2016 This is only implemented for local repos and reuses potentially
2016 This is only implemented for local repos and reuses potentially
2017 precomputed sets in outgoing."""
2017 precomputed sets in outgoing."""
2018 if not outgoing.missing:
2018 if not outgoing.missing:
2019 return None
2019 return None
2020 return self._changegroupsubset(outgoing.common,
2020 return self._changegroupsubset(outgoing.common,
2021 outgoing.missing,
2021 outgoing.missing,
2022 outgoing.missingheads,
2022 outgoing.missingheads,
2023 source)
2023 source)
2024
2024
2025 def getbundle(self, source, heads=None, common=None):
2025 def getbundle(self, source, heads=None, common=None):
2026 """Like changegroupsubset, but returns the set difference between the
2026 """Like changegroupsubset, but returns the set difference between the
2027 ancestors of heads and the ancestors common.
2027 ancestors of heads and the ancestors common.
2028
2028
2029 If heads is None, use the local heads. If common is None, use [nullid].
2029 If heads is None, use the local heads. If common is None, use [nullid].
2030
2030
2031 The nodes in common might not all be known locally due to the way the
2031 The nodes in common might not all be known locally due to the way the
2032 current discovery protocol works.
2032 current discovery protocol works.
2033 """
2033 """
2034 cl = self.changelog
2034 cl = self.changelog
2035 if common:
2035 if common:
2036 nm = cl.nodemap
2036 nm = cl.nodemap
2037 common = [n for n in common if n in nm]
2037 common = [n for n in common if n in nm]
2038 else:
2038 else:
2039 common = [nullid]
2039 common = [nullid]
2040 if not heads:
2040 if not heads:
2041 heads = cl.heads()
2041 heads = cl.heads()
2042 return self.getlocalbundle(source,
2042 return self.getlocalbundle(source,
2043 discovery.outgoing(cl, common, heads))
2043 discovery.outgoing(cl, common, heads))
2044
2044
2045 def _changegroupsubset(self, commonrevs, csets, heads, source):
2045 def _changegroupsubset(self, commonrevs, csets, heads, source):
2046
2046
2047 cl = self.changelog
2047 cl = self.changelog
2048 mf = self.manifest
2048 mf = self.manifest
2049 mfs = {} # needed manifests
2049 mfs = {} # needed manifests
2050 fnodes = {} # needed file nodes
2050 fnodes = {} # needed file nodes
2051 changedfiles = set()
2051 changedfiles = set()
2052 fstate = ['', {}]
2052 fstate = ['', {}]
2053 count = [0, 0]
2053 count = [0, 0]
2054
2054
2055 # can we go through the fast path ?
2055 # can we go through the fast path ?
2056 heads.sort()
2056 heads.sort()
2057 if heads == sorted(self.heads()):
2057 if heads == sorted(self.heads()):
2058 return self._changegroup(csets, source)
2058 return self._changegroup(csets, source)
2059
2059
2060 # slow path
2060 # slow path
2061 self.hook('preoutgoing', throw=True, source=source)
2061 self.hook('preoutgoing', throw=True, source=source)
2062 self.changegroupinfo(csets, source)
2062 self.changegroupinfo(csets, source)
2063
2063
2064 # filter any nodes that claim to be part of the known set
2064 # filter any nodes that claim to be part of the known set
2065 def prune(revlog, missing):
2065 def prune(revlog, missing):
2066 rr, rl = revlog.rev, revlog.linkrev
2066 rr, rl = revlog.rev, revlog.linkrev
2067 return [n for n in missing
2067 return [n for n in missing
2068 if rl(rr(n)) not in commonrevs]
2068 if rl(rr(n)) not in commonrevs]
2069
2069
2070 progress = self.ui.progress
2070 progress = self.ui.progress
2071 _bundling = _('bundling')
2071 _bundling = _('bundling')
2072 _changesets = _('changesets')
2072 _changesets = _('changesets')
2073 _manifests = _('manifests')
2073 _manifests = _('manifests')
2074 _files = _('files')
2074 _files = _('files')
2075
2075
2076 def lookup(revlog, x):
2076 def lookup(revlog, x):
2077 if revlog == cl:
2077 if revlog == cl:
2078 c = cl.read(x)
2078 c = cl.read(x)
2079 changedfiles.update(c[3])
2079 changedfiles.update(c[3])
2080 mfs.setdefault(c[0], x)
2080 mfs.setdefault(c[0], x)
2081 count[0] += 1
2081 count[0] += 1
2082 progress(_bundling, count[0],
2082 progress(_bundling, count[0],
2083 unit=_changesets, total=count[1])
2083 unit=_changesets, total=count[1])
2084 return x
2084 return x
2085 elif revlog == mf:
2085 elif revlog == mf:
2086 clnode = mfs[x]
2086 clnode = mfs[x]
2087 mdata = mf.readfast(x)
2087 mdata = mf.readfast(x)
2088 for f, n in mdata.iteritems():
2088 for f, n in mdata.iteritems():
2089 if f in changedfiles:
2089 if f in changedfiles:
2090 fnodes[f].setdefault(n, clnode)
2090 fnodes[f].setdefault(n, clnode)
2091 count[0] += 1
2091 count[0] += 1
2092 progress(_bundling, count[0],
2092 progress(_bundling, count[0],
2093 unit=_manifests, total=count[1])
2093 unit=_manifests, total=count[1])
2094 return clnode
2094 return clnode
2095 else:
2095 else:
2096 progress(_bundling, count[0], item=fstate[0],
2096 progress(_bundling, count[0], item=fstate[0],
2097 unit=_files, total=count[1])
2097 unit=_files, total=count[1])
2098 return fstate[1][x]
2098 return fstate[1][x]
2099
2099
2100 bundler = changegroup.bundle10(lookup)
2100 bundler = changegroup.bundle10(lookup)
2101 reorder = self.ui.config('bundle', 'reorder', 'auto')
2101 reorder = self.ui.config('bundle', 'reorder', 'auto')
2102 if reorder == 'auto':
2102 if reorder == 'auto':
2103 reorder = None
2103 reorder = None
2104 else:
2104 else:
2105 reorder = util.parsebool(reorder)
2105 reorder = util.parsebool(reorder)
2106
2106
2107 def gengroup():
2107 def gengroup():
2108 # Create a changenode group generator that will call our functions
2108 # Create a changenode group generator that will call our functions
2109 # back to lookup the owning changenode and collect information.
2109 # back to lookup the owning changenode and collect information.
2110 count[:] = [0, len(csets)]
2110 count[:] = [0, len(csets)]
2111 for chunk in cl.group(csets, bundler, reorder=reorder):
2111 for chunk in cl.group(csets, bundler, reorder=reorder):
2112 yield chunk
2112 yield chunk
2113 progress(_bundling, None)
2113 progress(_bundling, None)
2114
2114
2115 # Create a generator for the manifestnodes that calls our lookup
2115 # Create a generator for the manifestnodes that calls our lookup
2116 # and data collection functions back.
2116 # and data collection functions back.
2117 for f in changedfiles:
2117 for f in changedfiles:
2118 fnodes[f] = {}
2118 fnodes[f] = {}
2119 count[:] = [0, len(mfs)]
2119 count[:] = [0, len(mfs)]
2120 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2120 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2121 yield chunk
2121 yield chunk
2122 progress(_bundling, None)
2122 progress(_bundling, None)
2123
2123
2124 mfs.clear()
2124 mfs.clear()
2125
2125
2126 # Go through all our files in order sorted by name.
2126 # Go through all our files in order sorted by name.
2127 count[:] = [0, len(changedfiles)]
2127 count[:] = [0, len(changedfiles)]
2128 for fname in sorted(changedfiles):
2128 for fname in sorted(changedfiles):
2129 filerevlog = self.file(fname)
2129 filerevlog = self.file(fname)
2130 if not len(filerevlog):
2130 if not len(filerevlog):
2131 raise util.Abort(_("empty or missing revlog for %s")
2131 raise util.Abort(_("empty or missing revlog for %s")
2132 % fname)
2132 % fname)
2133 fstate[0] = fname
2133 fstate[0] = fname
2134 fstate[1] = fnodes.pop(fname, {})
2134 fstate[1] = fnodes.pop(fname, {})
2135
2135
2136 nodelist = prune(filerevlog, fstate[1])
2136 nodelist = prune(filerevlog, fstate[1])
2137 if nodelist:
2137 if nodelist:
2138 count[0] += 1
2138 count[0] += 1
2139 yield bundler.fileheader(fname)
2139 yield bundler.fileheader(fname)
2140 for chunk in filerevlog.group(nodelist, bundler, reorder):
2140 for chunk in filerevlog.group(nodelist, bundler, reorder):
2141 yield chunk
2141 yield chunk
2142
2142
2143 # Signal that no more groups are left.
2143 # Signal that no more groups are left.
2144 yield bundler.close()
2144 yield bundler.close()
2145 progress(_bundling, None)
2145 progress(_bundling, None)
2146
2146
2147 if csets:
2147 if csets:
2148 self.hook('outgoing', node=hex(csets[0]), source=source)
2148 self.hook('outgoing', node=hex(csets[0]), source=source)
2149
2149
2150 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2150 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2151
2151
2152 def changegroup(self, basenodes, source):
2152 def changegroup(self, basenodes, source):
2153 # to avoid a race we use changegroupsubset() (issue1320)
2153 # to avoid a race we use changegroupsubset() (issue1320)
2154 return self.changegroupsubset(basenodes, self.heads(), source)
2154 return self.changegroupsubset(basenodes, self.heads(), source)
2155
2155
2156 def _changegroup(self, nodes, source):
2156 def _changegroup(self, nodes, source):
2157 """Compute the changegroup of all nodes that we have that a recipient
2157 """Compute the changegroup of all nodes that we have that a recipient
2158 doesn't. Return a chunkbuffer object whose read() method will return
2158 doesn't. Return a chunkbuffer object whose read() method will return
2159 successive changegroup chunks.
2159 successive changegroup chunks.
2160
2160
2161 This is much easier than the previous function as we can assume that
2161 This is much easier than the previous function as we can assume that
2162 the recipient has any changenode we aren't sending them.
2162 the recipient has any changenode we aren't sending them.
2163
2163
2164 nodes is the set of nodes to send"""
2164 nodes is the set of nodes to send"""
2165
2165
2166 cl = self.changelog
2166 cl = self.changelog
2167 mf = self.manifest
2167 mf = self.manifest
2168 mfs = {}
2168 mfs = {}
2169 changedfiles = set()
2169 changedfiles = set()
2170 fstate = ['']
2170 fstate = ['']
2171 count = [0, 0]
2171 count = [0, 0]
2172
2172
2173 self.hook('preoutgoing', throw=True, source=source)
2173 self.hook('preoutgoing', throw=True, source=source)
2174 self.changegroupinfo(nodes, source)
2174 self.changegroupinfo(nodes, source)
2175
2175
2176 revset = set([cl.rev(n) for n in nodes])
2176 revset = set([cl.rev(n) for n in nodes])
2177
2177
2178 def gennodelst(log):
2178 def gennodelst(log):
2179 ln, llr = log.node, log.linkrev
2179 ln, llr = log.node, log.linkrev
2180 return [ln(r) for r in log if llr(r) in revset]
2180 return [ln(r) for r in log if llr(r) in revset]
2181
2181
2182 progress = self.ui.progress
2182 progress = self.ui.progress
2183 _bundling = _('bundling')
2183 _bundling = _('bundling')
2184 _changesets = _('changesets')
2184 _changesets = _('changesets')
2185 _manifests = _('manifests')
2185 _manifests = _('manifests')
2186 _files = _('files')
2186 _files = _('files')
2187
2187
2188 def lookup(revlog, x):
2188 def lookup(revlog, x):
2189 if revlog == cl:
2189 if revlog == cl:
2190 c = cl.read(x)
2190 c = cl.read(x)
2191 changedfiles.update(c[3])
2191 changedfiles.update(c[3])
2192 mfs.setdefault(c[0], x)
2192 mfs.setdefault(c[0], x)
2193 count[0] += 1
2193 count[0] += 1
2194 progress(_bundling, count[0],
2194 progress(_bundling, count[0],
2195 unit=_changesets, total=count[1])
2195 unit=_changesets, total=count[1])
2196 return x
2196 return x
2197 elif revlog == mf:
2197 elif revlog == mf:
2198 count[0] += 1
2198 count[0] += 1
2199 progress(_bundling, count[0],
2199 progress(_bundling, count[0],
2200 unit=_manifests, total=count[1])
2200 unit=_manifests, total=count[1])
2201 return cl.node(revlog.linkrev(revlog.rev(x)))
2201 return cl.node(revlog.linkrev(revlog.rev(x)))
2202 else:
2202 else:
2203 progress(_bundling, count[0], item=fstate[0],
2203 progress(_bundling, count[0], item=fstate[0],
2204 total=count[1], unit=_files)
2204 total=count[1], unit=_files)
2205 return cl.node(revlog.linkrev(revlog.rev(x)))
2205 return cl.node(revlog.linkrev(revlog.rev(x)))
2206
2206
2207 bundler = changegroup.bundle10(lookup)
2207 bundler = changegroup.bundle10(lookup)
2208 reorder = self.ui.config('bundle', 'reorder', 'auto')
2208 reorder = self.ui.config('bundle', 'reorder', 'auto')
2209 if reorder == 'auto':
2209 if reorder == 'auto':
2210 reorder = None
2210 reorder = None
2211 else:
2211 else:
2212 reorder = util.parsebool(reorder)
2212 reorder = util.parsebool(reorder)
2213
2213
2214 def gengroup():
2214 def gengroup():
2215 '''yield a sequence of changegroup chunks (strings)'''
2215 '''yield a sequence of changegroup chunks (strings)'''
2216 # construct a list of all changed files
2216 # construct a list of all changed files
2217
2217
2218 count[:] = [0, len(nodes)]
2218 count[:] = [0, len(nodes)]
2219 for chunk in cl.group(nodes, bundler, reorder=reorder):
2219 for chunk in cl.group(nodes, bundler, reorder=reorder):
2220 yield chunk
2220 yield chunk
2221 progress(_bundling, None)
2221 progress(_bundling, None)
2222
2222
2223 count[:] = [0, len(mfs)]
2223 count[:] = [0, len(mfs)]
2224 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2224 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2225 yield chunk
2225 yield chunk
2226 progress(_bundling, None)
2226 progress(_bundling, None)
2227
2227
2228 count[:] = [0, len(changedfiles)]
2228 count[:] = [0, len(changedfiles)]
2229 for fname in sorted(changedfiles):
2229 for fname in sorted(changedfiles):
2230 filerevlog = self.file(fname)
2230 filerevlog = self.file(fname)
2231 if not len(filerevlog):
2231 if not len(filerevlog):
2232 raise util.Abort(_("empty or missing revlog for %s")
2232 raise util.Abort(_("empty or missing revlog for %s")
2233 % fname)
2233 % fname)
2234 fstate[0] = fname
2234 fstate[0] = fname
2235 nodelist = gennodelst(filerevlog)
2235 nodelist = gennodelst(filerevlog)
2236 if nodelist:
2236 if nodelist:
2237 count[0] += 1
2237 count[0] += 1
2238 yield bundler.fileheader(fname)
2238 yield bundler.fileheader(fname)
2239 for chunk in filerevlog.group(nodelist, bundler, reorder):
2239 for chunk in filerevlog.group(nodelist, bundler, reorder):
2240 yield chunk
2240 yield chunk
2241 yield bundler.close()
2241 yield bundler.close()
2242 progress(_bundling, None)
2242 progress(_bundling, None)
2243
2243
2244 if nodes:
2244 if nodes:
2245 self.hook('outgoing', node=hex(nodes[0]), source=source)
2245 self.hook('outgoing', node=hex(nodes[0]), source=source)
2246
2246
2247 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2247 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2248
2248
2249 def addchangegroup(self, source, srctype, url, emptyok=False):
2249 def addchangegroup(self, source, srctype, url, emptyok=False):
2250 """Add the changegroup returned by source.read() to this repo.
2250 """Add the changegroup returned by source.read() to this repo.
2251 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2251 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2252 the URL of the repo where this changegroup is coming from.
2252 the URL of the repo where this changegroup is coming from.
2253
2253
2254 Return an integer summarizing the change to this repo:
2254 Return an integer summarizing the change to this repo:
2255 - nothing changed or no source: 0
2255 - nothing changed or no source: 0
2256 - more heads than before: 1+added heads (2..n)
2256 - more heads than before: 1+added heads (2..n)
2257 - fewer heads than before: -1-removed heads (-2..-n)
2257 - fewer heads than before: -1-removed heads (-2..-n)
2258 - number of heads stays the same: 1
2258 - number of heads stays the same: 1
2259 """
2259 """
2260 def csmap(x):
2260 def csmap(x):
2261 self.ui.debug("add changeset %s\n" % short(x))
2261 self.ui.debug("add changeset %s\n" % short(x))
2262 return len(cl)
2262 return len(cl)
2263
2263
2264 def revmap(x):
2264 def revmap(x):
2265 return cl.rev(x)
2265 return cl.rev(x)
2266
2266
2267 if not source:
2267 if not source:
2268 return 0
2268 return 0
2269
2269
2270 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2270 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2271
2271
2272 changesets = files = revisions = 0
2272 changesets = files = revisions = 0
2273 efiles = set()
2273 efiles = set()
2274
2274
2275 # write changelog data to temp files so concurrent readers will not see
2275 # write changelog data to temp files so concurrent readers will not see
2276 # inconsistent view
2276 # inconsistent view
2277 cl = self.changelog
2277 cl = self.changelog
2278 cl.delayupdate()
2278 cl.delayupdate()
2279 oldheads = cl.heads()
2279 oldheads = cl.heads()
2280
2280
2281 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2281 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2282 try:
2282 try:
2283 trp = weakref.proxy(tr)
2283 trp = weakref.proxy(tr)
2284 # pull off the changeset group
2284 # pull off the changeset group
2285 self.ui.status(_("adding changesets\n"))
2285 self.ui.status(_("adding changesets\n"))
2286 clstart = len(cl)
2286 clstart = len(cl)
2287 class prog(object):
2287 class prog(object):
2288 step = _('changesets')
2288 step = _('changesets')
2289 count = 1
2289 count = 1
2290 ui = self.ui
2290 ui = self.ui
2291 total = None
2291 total = None
2292 def __call__(self):
2292 def __call__(self):
2293 self.ui.progress(self.step, self.count, unit=_('chunks'),
2293 self.ui.progress(self.step, self.count, unit=_('chunks'),
2294 total=self.total)
2294 total=self.total)
2295 self.count += 1
2295 self.count += 1
2296 pr = prog()
2296 pr = prog()
2297 source.callback = pr
2297 source.callback = pr
2298
2298
2299 source.changelogheader()
2299 source.changelogheader()
2300 srccontent = cl.addgroup(source, csmap, trp)
2300 srccontent = cl.addgroup(source, csmap, trp)
2301 if not (srccontent or emptyok):
2301 if not (srccontent or emptyok):
2302 raise util.Abort(_("received changelog group is empty"))
2302 raise util.Abort(_("received changelog group is empty"))
2303 clend = len(cl)
2303 clend = len(cl)
2304 changesets = clend - clstart
2304 changesets = clend - clstart
2305 for c in xrange(clstart, clend):
2305 for c in xrange(clstart, clend):
2306 efiles.update(self[c].files())
2306 efiles.update(self[c].files())
2307 efiles = len(efiles)
2307 efiles = len(efiles)
2308 self.ui.progress(_('changesets'), None)
2308 self.ui.progress(_('changesets'), None)
2309
2309
2310 # pull off the manifest group
2310 # pull off the manifest group
2311 self.ui.status(_("adding manifests\n"))
2311 self.ui.status(_("adding manifests\n"))
2312 pr.step = _('manifests')
2312 pr.step = _('manifests')
2313 pr.count = 1
2313 pr.count = 1
2314 pr.total = changesets # manifests <= changesets
2314 pr.total = changesets # manifests <= changesets
2315 # no need to check for empty manifest group here:
2315 # no need to check for empty manifest group here:
2316 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2316 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2317 # no new manifest will be created and the manifest group will
2317 # no new manifest will be created and the manifest group will
2318 # be empty during the pull
2318 # be empty during the pull
2319 source.manifestheader()
2319 source.manifestheader()
2320 self.manifest.addgroup(source, revmap, trp)
2320 self.manifest.addgroup(source, revmap, trp)
2321 self.ui.progress(_('manifests'), None)
2321 self.ui.progress(_('manifests'), None)
2322
2322
2323 needfiles = {}
2323 needfiles = {}
2324 if self.ui.configbool('server', 'validate', default=False):
2324 if self.ui.configbool('server', 'validate', default=False):
2325 # validate incoming csets have their manifests
2325 # validate incoming csets have their manifests
2326 for cset in xrange(clstart, clend):
2326 for cset in xrange(clstart, clend):
2327 mfest = self.changelog.read(self.changelog.node(cset))[0]
2327 mfest = self.changelog.read(self.changelog.node(cset))[0]
2328 mfest = self.manifest.readdelta(mfest)
2328 mfest = self.manifest.readdelta(mfest)
2329 # store file nodes we must see
2329 # store file nodes we must see
2330 for f, n in mfest.iteritems():
2330 for f, n in mfest.iteritems():
2331 needfiles.setdefault(f, set()).add(n)
2331 needfiles.setdefault(f, set()).add(n)
2332
2332
2333 # process the files
2333 # process the files
2334 self.ui.status(_("adding file changes\n"))
2334 self.ui.status(_("adding file changes\n"))
2335 pr.step = _('files')
2335 pr.step = _('files')
2336 pr.count = 1
2336 pr.count = 1
2337 pr.total = efiles
2337 pr.total = efiles
2338 source.callback = None
2338 source.callback = None
2339
2339
2340 while True:
2340 while True:
2341 chunkdata = source.filelogheader()
2341 chunkdata = source.filelogheader()
2342 if not chunkdata:
2342 if not chunkdata:
2343 break
2343 break
2344 f = chunkdata["filename"]
2344 f = chunkdata["filename"]
2345 self.ui.debug("adding %s revisions\n" % f)
2345 self.ui.debug("adding %s revisions\n" % f)
2346 pr()
2346 pr()
2347 fl = self.file(f)
2347 fl = self.file(f)
2348 o = len(fl)
2348 o = len(fl)
2349 if not fl.addgroup(source, revmap, trp):
2349 if not fl.addgroup(source, revmap, trp):
2350 raise util.Abort(_("received file revlog group is empty"))
2350 raise util.Abort(_("received file revlog group is empty"))
2351 revisions += len(fl) - o
2351 revisions += len(fl) - o
2352 files += 1
2352 files += 1
2353 if f in needfiles:
2353 if f in needfiles:
2354 needs = needfiles[f]
2354 needs = needfiles[f]
2355 for new in xrange(o, len(fl)):
2355 for new in xrange(o, len(fl)):
2356 n = fl.node(new)
2356 n = fl.node(new)
2357 if n in needs:
2357 if n in needs:
2358 needs.remove(n)
2358 needs.remove(n)
2359 if not needs:
2359 if not needs:
2360 del needfiles[f]
2360 del needfiles[f]
2361 self.ui.progress(_('files'), None)
2361 self.ui.progress(_('files'), None)
2362
2362
2363 for f, needs in needfiles.iteritems():
2363 for f, needs in needfiles.iteritems():
2364 fl = self.file(f)
2364 fl = self.file(f)
2365 for n in needs:
2365 for n in needs:
2366 try:
2366 try:
2367 fl.rev(n)
2367 fl.rev(n)
2368 except error.LookupError:
2368 except error.LookupError:
2369 raise util.Abort(
2369 raise util.Abort(
2370 _('missing file data for %s:%s - run hg verify') %
2370 _('missing file data for %s:%s - run hg verify') %
2371 (f, hex(n)))
2371 (f, hex(n)))
2372
2372
2373 dh = 0
2373 dh = 0
2374 if oldheads:
2374 if oldheads:
2375 heads = cl.heads()
2375 heads = cl.heads()
2376 dh = len(heads) - len(oldheads)
2376 dh = len(heads) - len(oldheads)
2377 for h in heads:
2377 for h in heads:
2378 if h not in oldheads and self[h].closesbranch():
2378 if h not in oldheads and self[h].closesbranch():
2379 dh -= 1
2379 dh -= 1
2380 htext = ""
2380 htext = ""
2381 if dh:
2381 if dh:
2382 htext = _(" (%+d heads)") % dh
2382 htext = _(" (%+d heads)") % dh
2383
2383
2384 self.ui.status(_("added %d changesets"
2384 self.ui.status(_("added %d changesets"
2385 " with %d changes to %d files%s\n")
2385 " with %d changes to %d files%s\n")
2386 % (changesets, revisions, files, htext))
2386 % (changesets, revisions, files, htext))
2387
2387
2388 if changesets > 0:
2388 if changesets > 0:
2389 p = lambda: cl.writepending() and self.root or ""
2389 p = lambda: cl.writepending() and self.root or ""
2390 self.hook('pretxnchangegroup', throw=True,
2390 self.hook('pretxnchangegroup', throw=True,
2391 node=hex(cl.node(clstart)), source=srctype,
2391 node=hex(cl.node(clstart)), source=srctype,
2392 url=url, pending=p)
2392 url=url, pending=p)
2393
2393
2394 added = [cl.node(r) for r in xrange(clstart, clend)]
2394 added = [cl.node(r) for r in xrange(clstart, clend)]
2395 publishing = self.ui.configbool('phases', 'publish', True)
2395 publishing = self.ui.configbool('phases', 'publish', True)
2396 if srctype == 'push':
2396 if srctype == 'push':
2397 # Old server can not push the boundary themself.
2397 # Old server can not push the boundary themself.
2398 # New server won't push the boundary if changeset already
2398 # New server won't push the boundary if changeset already
2399 # existed locally as secrete
2399 # existed locally as secrete
2400 #
2400 #
2401 # We should not use added here but the list of all change in
2401 # We should not use added here but the list of all change in
2402 # the bundle
2402 # the bundle
2403 if publishing:
2403 if publishing:
2404 phases.advanceboundary(self, phases.public, srccontent)
2404 phases.advanceboundary(self, phases.public, srccontent)
2405 else:
2405 else:
2406 phases.advanceboundary(self, phases.draft, srccontent)
2406 phases.advanceboundary(self, phases.draft, srccontent)
2407 phases.retractboundary(self, phases.draft, added)
2407 phases.retractboundary(self, phases.draft, added)
2408 elif srctype != 'strip':
2408 elif srctype != 'strip':
2409 # publishing only alter behavior during push
2409 # publishing only alter behavior during push
2410 #
2410 #
2411 # strip should not touch boundary at all
2411 # strip should not touch boundary at all
2412 phases.retractboundary(self, phases.draft, added)
2412 phases.retractboundary(self, phases.draft, added)
2413
2413
2414 # make changelog see real files again
2414 # make changelog see real files again
2415 cl.finalize(trp)
2415 cl.finalize(trp)
2416
2416
2417 tr.close()
2417 tr.close()
2418
2418
2419 if changesets > 0:
2419 if changesets > 0:
2420 def runhooks():
2420 def runhooks():
2421 # forcefully update the on-disk branch cache
2421 # forcefully update the on-disk branch cache
2422 self.ui.debug("updating the branch cache\n")
2422 self.ui.debug("updating the branch cache\n")
2423 self.updatebranchcache()
2423 self.updatebranchcache()
2424 self.hook("changegroup", node=hex(cl.node(clstart)),
2424 self.hook("changegroup", node=hex(cl.node(clstart)),
2425 source=srctype, url=url)
2425 source=srctype, url=url)
2426
2426
2427 for n in added:
2427 for n in added:
2428 self.hook("incoming", node=hex(n), source=srctype,
2428 self.hook("incoming", node=hex(n), source=srctype,
2429 url=url)
2429 url=url)
2430 self._afterlock(runhooks)
2430 self._afterlock(runhooks)
2431
2431
2432 finally:
2432 finally:
2433 tr.release()
2433 tr.release()
2434 # never return 0 here:
2434 # never return 0 here:
2435 if dh < 0:
2435 if dh < 0:
2436 return dh - 1
2436 return dh - 1
2437 else:
2437 else:
2438 return dh + 1
2438 return dh + 1
2439
2439
2440 def stream_in(self, remote, requirements):
2440 def stream_in(self, remote, requirements):
2441 lock = self.lock()
2441 lock = self.lock()
2442 try:
2442 try:
2443 fp = remote.stream_out()
2443 fp = remote.stream_out()
2444 l = fp.readline()
2444 l = fp.readline()
2445 try:
2445 try:
2446 resp = int(l)
2446 resp = int(l)
2447 except ValueError:
2447 except ValueError:
2448 raise error.ResponseError(
2448 raise error.ResponseError(
2449 _('unexpected response from remote server:'), l)
2449 _('unexpected response from remote server:'), l)
2450 if resp == 1:
2450 if resp == 1:
2451 raise util.Abort(_('operation forbidden by server'))
2451 raise util.Abort(_('operation forbidden by server'))
2452 elif resp == 2:
2452 elif resp == 2:
2453 raise util.Abort(_('locking the remote repository failed'))
2453 raise util.Abort(_('locking the remote repository failed'))
2454 elif resp != 0:
2454 elif resp != 0:
2455 raise util.Abort(_('the server sent an unknown error code'))
2455 raise util.Abort(_('the server sent an unknown error code'))
2456 self.ui.status(_('streaming all changes\n'))
2456 self.ui.status(_('streaming all changes\n'))
2457 l = fp.readline()
2457 l = fp.readline()
2458 try:
2458 try:
2459 total_files, total_bytes = map(int, l.split(' ', 1))
2459 total_files, total_bytes = map(int, l.split(' ', 1))
2460 except (ValueError, TypeError):
2460 except (ValueError, TypeError):
2461 raise error.ResponseError(
2461 raise error.ResponseError(
2462 _('unexpected response from remote server:'), l)
2462 _('unexpected response from remote server:'), l)
2463 self.ui.status(_('%d files to transfer, %s of data\n') %
2463 self.ui.status(_('%d files to transfer, %s of data\n') %
2464 (total_files, util.bytecount(total_bytes)))
2464 (total_files, util.bytecount(total_bytes)))
2465 handled_bytes = 0
2465 handled_bytes = 0
2466 self.ui.progress(_('clone'), 0, total=total_bytes)
2466 self.ui.progress(_('clone'), 0, total=total_bytes)
2467 start = time.time()
2467 start = time.time()
2468 for i in xrange(total_files):
2468 for i in xrange(total_files):
2469 # XXX doesn't support '\n' or '\r' in filenames
2469 # XXX doesn't support '\n' or '\r' in filenames
2470 l = fp.readline()
2470 l = fp.readline()
2471 try:
2471 try:
2472 name, size = l.split('\0', 1)
2472 name, size = l.split('\0', 1)
2473 size = int(size)
2473 size = int(size)
2474 except (ValueError, TypeError):
2474 except (ValueError, TypeError):
2475 raise error.ResponseError(
2475 raise error.ResponseError(
2476 _('unexpected response from remote server:'), l)
2476 _('unexpected response from remote server:'), l)
2477 if self.ui.debugflag:
2477 if self.ui.debugflag:
2478 self.ui.debug('adding %s (%s)\n' %
2478 self.ui.debug('adding %s (%s)\n' %
2479 (name, util.bytecount(size)))
2479 (name, util.bytecount(size)))
2480 # for backwards compat, name was partially encoded
2480 # for backwards compat, name was partially encoded
2481 ofp = self.sopener(store.decodedir(name), 'w')
2481 ofp = self.sopener(store.decodedir(name), 'w')
2482 for chunk in util.filechunkiter(fp, limit=size):
2482 for chunk in util.filechunkiter(fp, limit=size):
2483 handled_bytes += len(chunk)
2483 handled_bytes += len(chunk)
2484 self.ui.progress(_('clone'), handled_bytes,
2484 self.ui.progress(_('clone'), handled_bytes,
2485 total=total_bytes)
2485 total=total_bytes)
2486 ofp.write(chunk)
2486 ofp.write(chunk)
2487 ofp.close()
2487 ofp.close()
2488 elapsed = time.time() - start
2488 elapsed = time.time() - start
2489 if elapsed <= 0:
2489 if elapsed <= 0:
2490 elapsed = 0.001
2490 elapsed = 0.001
2491 self.ui.progress(_('clone'), None)
2491 self.ui.progress(_('clone'), None)
2492 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2492 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2493 (util.bytecount(total_bytes), elapsed,
2493 (util.bytecount(total_bytes), elapsed,
2494 util.bytecount(total_bytes / elapsed)))
2494 util.bytecount(total_bytes / elapsed)))
2495
2495
2496 # new requirements = old non-format requirements +
2496 # new requirements = old non-format requirements +
2497 # new format-related
2497 # new format-related
2498 # requirements from the streamed-in repository
2498 # requirements from the streamed-in repository
2499 requirements.update(set(self.requirements) - self.supportedformats)
2499 requirements.update(set(self.requirements) - self.supportedformats)
2500 self._applyrequirements(requirements)
2500 self._applyrequirements(requirements)
2501 self._writerequirements()
2501 self._writerequirements()
2502
2502
2503 self.invalidate()
2503 self.invalidate()
2504 return len(self.heads()) + 1
2504 return len(self.heads()) + 1
2505 finally:
2505 finally:
2506 lock.release()
2506 lock.release()
2507
2507
2508 def clone(self, remote, heads=[], stream=False):
2508 def clone(self, remote, heads=[], stream=False):
2509 '''clone remote repository.
2509 '''clone remote repository.
2510
2510
2511 keyword arguments:
2511 keyword arguments:
2512 heads: list of revs to clone (forces use of pull)
2512 heads: list of revs to clone (forces use of pull)
2513 stream: use streaming clone if possible'''
2513 stream: use streaming clone if possible'''
2514
2514
2515 # now, all clients that can request uncompressed clones can
2515 # now, all clients that can request uncompressed clones can
2516 # read repo formats supported by all servers that can serve
2516 # read repo formats supported by all servers that can serve
2517 # them.
2517 # them.
2518
2518
2519 # if revlog format changes, client will have to check version
2519 # if revlog format changes, client will have to check version
2520 # and format flags on "stream" capability, and use
2520 # and format flags on "stream" capability, and use
2521 # uncompressed only if compatible.
2521 # uncompressed only if compatible.
2522
2522
2523 if not stream:
2523 if not stream:
2524 # if the server explicitely prefer to stream (for fast LANs)
2524 # if the server explicitely prefer to stream (for fast LANs)
2525 stream = remote.capable('stream-preferred')
2525 stream = remote.capable('stream-preferred')
2526
2526
2527 if stream and not heads:
2527 if stream and not heads:
2528 # 'stream' means remote revlog format is revlogv1 only
2528 # 'stream' means remote revlog format is revlogv1 only
2529 if remote.capable('stream'):
2529 if remote.capable('stream'):
2530 return self.stream_in(remote, set(('revlogv1',)))
2530 return self.stream_in(remote, set(('revlogv1',)))
2531 # otherwise, 'streamreqs' contains the remote revlog format
2531 # otherwise, 'streamreqs' contains the remote revlog format
2532 streamreqs = remote.capable('streamreqs')
2532 streamreqs = remote.capable('streamreqs')
2533 if streamreqs:
2533 if streamreqs:
2534 streamreqs = set(streamreqs.split(','))
2534 streamreqs = set(streamreqs.split(','))
2535 # if we support it, stream in and adjust our requirements
2535 # if we support it, stream in and adjust our requirements
2536 if not streamreqs - self.supportedformats:
2536 if not streamreqs - self.supportedformats:
2537 return self.stream_in(remote, streamreqs)
2537 return self.stream_in(remote, streamreqs)
2538 return self.pull(remote, heads)
2538 return self.pull(remote, heads)
2539
2539
2540 def pushkey(self, namespace, key, old, new):
2540 def pushkey(self, namespace, key, old, new):
2541 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2541 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2542 old=old, new=new)
2542 old=old, new=new)
2543 ret = pushkey.push(self, namespace, key, old, new)
2543 ret = pushkey.push(self, namespace, key, old, new)
2544 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2544 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2545 ret=ret)
2545 ret=ret)
2546 return ret
2546 return ret
2547
2547
2548 def listkeys(self, namespace):
2548 def listkeys(self, namespace):
2549 self.hook('prelistkeys', throw=True, namespace=namespace)
2549 self.hook('prelistkeys', throw=True, namespace=namespace)
2550 values = pushkey.list(self, namespace)
2550 values = pushkey.list(self, namespace)
2551 self.hook('listkeys', namespace=namespace, values=values)
2551 self.hook('listkeys', namespace=namespace, values=values)
2552 return values
2552 return values
2553
2553
2554 def debugwireargs(self, one, two, three=None, four=None, five=None):
2554 def debugwireargs(self, one, two, three=None, four=None, five=None):
2555 '''used to test argument passing over the wire'''
2555 '''used to test argument passing over the wire'''
2556 return "%s %s %s %s %s" % (one, two, three, four, five)
2556 return "%s %s %s %s %s" % (one, two, three, four, five)
2557
2557
2558 def savecommitmessage(self, text):
2558 def savecommitmessage(self, text):
2559 fp = self.opener('last-message.txt', 'wb')
2559 fp = self.opener('last-message.txt', 'wb')
2560 try:
2560 try:
2561 fp.write(text)
2561 fp.write(text)
2562 finally:
2562 finally:
2563 fp.close()
2563 fp.close()
2564 return self.pathto(fp.name[len(self.root)+1:])
2564 return self.pathto(fp.name[len(self.root)+1:])
2565
2565
2566 # used to avoid circular references so destructors work
2566 # used to avoid circular references so destructors work
2567 def aftertrans(files):
2567 def aftertrans(files):
2568 renamefiles = [tuple(t) for t in files]
2568 renamefiles = [tuple(t) for t in files]
2569 def a():
2569 def a():
2570 for src, dest in renamefiles:
2570 for src, dest in renamefiles:
2571 try:
2571 try:
2572 util.rename(src, dest)
2572 util.rename(src, dest)
2573 except OSError: # journal file does not yet exist
2573 except OSError: # journal file does not yet exist
2574 pass
2574 pass
2575 return a
2575 return a
2576
2576
2577 def undoname(fn):
2577 def undoname(fn):
2578 base, name = os.path.split(fn)
2578 base, name = os.path.split(fn)
2579 assert name.startswith('journal')
2579 assert name.startswith('journal')
2580 return os.path.join(base, name.replace('journal', 'undo', 1))
2580 return os.path.join(base, name.replace('journal', 'undo', 1))
2581
2581
2582 def instance(ui, path, create):
2582 def instance(ui, path, create):
2583 return localrepository(ui, util.urllocalpath(path), create)
2583 return localrepository(ui, util.urllocalpath(path), create)
2584
2584
2585 def islocal(path):
2585 def islocal(path):
2586 return True
2586 return True
General Comments 0
You need to be logged in to leave comments. Login now